2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.cache.cache_errors import CacheError
53 from portage.data import secpass
54 from portage.elog.messages import eerror
55 from portage.util import normalize_path as normpath
56 from portage.util import cmp_sort_key, writemsg, writemsg_level
57 from portage.sets import load_default_config, SETPREFIX
58 from portage.sets.base import InternalPackageSet
60 from itertools import chain, izip
63 import cPickle as pickle
68 from cStringIO import StringIO
70 from StringIO import StringIO
72 class stdout_spinner(object):
74 "Gentoo Rocks ("+platform.system()+")",
75 "Thank you for using Gentoo. :)",
76 "Are you actually trying to read this?",
77 "How many times have you stared at this?",
78 "We are generating the cache right now",
79 "You are paying too much attention.",
80 "A theory is better than its explanation.",
81 "Phasers locked on target, Captain.",
82 "Thrashing is just virtual crashing.",
83 "To be is to program.",
84 "Real Users hate Real Programmers.",
85 "When all else fails, read the instructions.",
86 "Functionality breeds Contempt.",
87 "The future lies ahead.",
88 "3.1415926535897932384626433832795028841971694",
89 "Sometimes insanity is the only alternative.",
90 "Inaccuracy saves a world of explanation.",
93 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
97 self.update = self.update_twirl
98 self.scroll_sequence = self.scroll_msgs[
99 int(time.time() * 100) % len(self.scroll_msgs)]
101 self.min_display_latency = 0.05
103 def _return_early(self):
105 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
106 each update* method should return without doing any output when this
109 cur_time = time.time()
110 if cur_time - self.last_update < self.min_display_latency:
112 self.last_update = cur_time
115 def update_basic(self):
116 self.spinpos = (self.spinpos + 1) % 500
117 if self._return_early():
119 if (self.spinpos % 100) == 0:
120 if self.spinpos == 0:
121 sys.stdout.write(". ")
123 sys.stdout.write(".")
126 def update_scroll(self):
127 if self._return_early():
129 if(self.spinpos >= len(self.scroll_sequence)):
130 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
131 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
133 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
135 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
137 def update_twirl(self):
138 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
139 if self._return_early():
141 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
144 def update_quiet(self):
147 def userquery(prompt, responses=None, colours=None):
148 """Displays a prompt and a set of responses, then waits for a response
149 which is checked against the responses and the first to match is
150 returned. An empty response will match the first value in responses. The
151 input buffer is *not* cleared prior to the prompt!
154 responses: a List of Strings.
155 colours: a List of Functions taking and returning a String, used to
156 process the responses for display. Typically these will be functions
157 like red() but could be e.g. lambda x: "DisplayString".
158 If responses is omitted, defaults to ["Yes", "No"], [green, red].
159 If only colours is omitted, defaults to [bold, ...].
161 Returns a member of the List responses. (If called without optional
162 arguments, returns "Yes" or "No".)
163 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
165 if responses is None:
166 responses = ["Yes", "No"]
168 create_color_func("PROMPT_CHOICE_DEFAULT"),
169 create_color_func("PROMPT_CHOICE_OTHER")
171 elif colours is None:
173 colours=(colours*len(responses))[:len(responses)]
177 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
178 for key in responses:
179 # An empty response will match the first value in responses.
180 if response.upper()==key[:len(response)].upper():
182 print "Sorry, response '%s' not understood." % response,
183 except (EOFError, KeyboardInterrupt):
187 actions = frozenset([
188 "clean", "config", "depclean",
189 "info", "list-sets", "metadata",
190 "prune", "regen", "search",
191 "sync", "unmerge", "version",
194 "--ask", "--alphabetical",
195 "--buildpkg", "--buildpkgonly",
196 "--changelog", "--columns",
201 "--fetchonly", "--fetch-all-uri",
202 "--getbinpkg", "--getbinpkgonly",
203 "--help", "--ignore-default-opts",
207 "--nodeps", "--noreplace",
208 "--nospinner", "--oneshot",
209 "--onlydeps", "--pretend",
210 "--quiet", "--resume",
211 "--searchdesc", "--selective",
215 "--usepkg", "--usepkgonly",
222 "b":"--buildpkg", "B":"--buildpkgonly",
223 "c":"--clean", "C":"--unmerge",
224 "d":"--debug", "D":"--deep",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
229 "k":"--usepkg", "K":"--usepkgonly",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps", "O":"--nodeps",
233 "p":"--pretend", "P":"--prune",
235 "s":"--search", "S":"--searchdesc",
238 "v":"--verbose", "V":"--version"
241 def emergelog(xterm_titles, mystr, short_msg=None):
242 if xterm_titles and short_msg:
243 if "HOSTNAME" in os.environ:
244 short_msg = os.environ["HOSTNAME"]+": "+short_msg
245 xtermTitle(short_msg)
247 file_path = "/var/log/emerge.log"
248 mylogfile = open(file_path, "a")
249 portage.util.apply_secpass_permissions(file_path,
250 uid=portage.portage_uid, gid=portage.portage_gid,
254 mylock = portage.locks.lockfile(mylogfile)
255 # seek because we may have gotten held up by the lock.
256 # if so, we may not be positioned at the end of the file.
258 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
262 portage.locks.unlockfile(mylock)
264 except (IOError,OSError,portage.exception.PortageException), e:
266 print >> sys.stderr, "emergelog():",e
268 def countdown(secs=5, doing="Starting"):
270 print ">>> Waiting",secs,"seconds before starting..."
271 print ">>> (Control-C to abort)...\n"+doing+" in: ",
275 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282 if isinstance(mysize, basestring):
284 if 0 != mysize % 1024:
285 # Always round up to the next kB so that it doesn't show 0 kB when
286 # some small file still needs to be fetched.
287 mysize += 1024 - mysize % 1024
288 mystr=str(mysize/1024)
292 mystr=mystr[:mycount]+","+mystr[mycount:]
296 def getgccversion(chost):
299 return: the current in-use gcc version
302 gcc_ver_command = 'gcc -dumpversion'
303 gcc_ver_prefix = 'gcc-'
305 gcc_not_found_error = red(
306 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307 "!!! to update the environment of this terminal and possibly\n" +
308 "!!! other terminals also.\n"
311 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
315 mystatus, myoutput = commands.getstatusoutput(
316 chost + "-" + gcc_ver_command)
317 if mystatus == os.EX_OK:
318 return gcc_ver_prefix + myoutput
320 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321 if mystatus == os.EX_OK:
322 return gcc_ver_prefix + myoutput
324 portage.writemsg(gcc_not_found_error, noiselevel=-1)
325 return "[unavailable]"
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328 profilever = "unavailable"
330 realpath = os.path.realpath(profile)
331 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
332 if realpath.startswith(basepath):
333 profilever = realpath[1 + len(basepath):]
336 profilever = "!" + os.readlink(profile)
339 del realpath, basepath
342 libclist = vardb.match("virtual/libc")
343 libclist += vardb.match("virtual/glibc")
344 libclist = portage.util.unique_array(libclist)
346 xs=portage.catpkgsplit(x)
348 libcver+=","+"-".join(xs[1:])
350 libcver="-".join(xs[1:])
352 libcver="unavailable"
354 gccver = getgccversion(chost)
355 unameout=platform.release()+" "+platform.machine()
357 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
359 def create_depgraph_params(myopts, myaction):
360 #configure emerge engine parameters
362 # self: include _this_ package regardless of if it is merged.
363 # selective: exclude the package if it is merged
364 # recurse: go into the dependencies
365 # deep: go into the dependencies of already merged packages
366 # empty: pretend nothing is merged
367 # complete: completely account for all known dependencies
368 # remove: build graph for use in removing packages
369 myparams = set(["recurse"])
371 if myaction == "remove":
372 myparams.add("remove")
373 myparams.add("complete")
376 if "--update" in myopts or \
377 "--newuse" in myopts or \
378 "--reinstall" in myopts or \
379 "--noreplace" in myopts:
380 myparams.add("selective")
381 if "--emptytree" in myopts:
382 myparams.add("empty")
383 myparams.discard("selective")
384 if "--nodeps" in myopts:
385 myparams.discard("recurse")
386 if "--deep" in myopts:
388 if "--complete-graph" in myopts:
389 myparams.add("complete")
392 # search functionality
393 class search(object):
404 def __init__(self, root_config, spinner, searchdesc,
405 verbose, usepkg, usepkgonly):
406 """Searches the available and installed packages for the supplied search key.
407 The list of available and installed packages is created at object instantiation.
408 This makes successive searches faster."""
409 self.settings = root_config.settings
410 self.vartree = root_config.trees["vartree"]
411 self.spinner = spinner
412 self.verbose = verbose
413 self.searchdesc = searchdesc
414 self.root_config = root_config
415 self.setconfig = root_config.setconfig
416 self.matches = {"pkg" : []}
421 self.portdb = fake_portdb
422 for attrib in ("aux_get", "cp_all",
423 "xmatch", "findname", "getFetchMap"):
424 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
428 portdb = root_config.trees["porttree"].dbapi
429 bindb = root_config.trees["bintree"].dbapi
430 vardb = root_config.trees["vartree"].dbapi
432 if not usepkgonly and portdb._have_root_eclass_dir:
433 self._dbs.append(portdb)
435 if (usepkg or usepkgonly) and bindb.cp_all():
436 self._dbs.append(bindb)
438 self._dbs.append(vardb)
439 self._portdb = portdb
444 cp_all.update(db.cp_all())
445 return list(sorted(cp_all))
447 def _aux_get(self, *args, **kwargs):
450 return db.aux_get(*args, **kwargs)
455 def _findname(self, *args, **kwargs):
457 if db is not self._portdb:
458 # We don't want findname to return anything
459 # unless it's an ebuild in a portage tree.
460 # Otherwise, it's already built and we don't
463 func = getattr(db, "findname", None)
465 value = func(*args, **kwargs)
470 def _getFetchMap(self, *args, **kwargs):
472 func = getattr(db, "getFetchMap", None)
474 value = func(*args, **kwargs)
479 def _visible(self, db, cpv, metadata):
480 installed = db is self.vartree.dbapi
481 built = installed or db is not self._portdb
484 pkg_type = "installed"
487 return visible(self.settings,
488 Package(type_name=pkg_type, root_config=self.root_config,
489 cpv=cpv, built=built, installed=installed, metadata=metadata))
491 def _xmatch(self, level, atom):
493 This method does not expand old-style virtuals because it
494 is restricted to returning matches for a single ${CATEGORY}/${PN}
495 and old-style virual matches unreliable for that when querying
496 multiple package databases. If necessary, old-style virtuals
497 can be performed on atoms prior to calling this method.
499 cp = portage.dep_getkey(atom)
500 if level == "match-all":
503 if hasattr(db, "xmatch"):
504 matches.update(db.xmatch(level, atom))
506 matches.update(db.match(atom))
507 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508 db._cpv_sort_ascending(result)
509 elif level == "match-visible":
512 if hasattr(db, "xmatch"):
513 matches.update(db.xmatch(level, atom))
515 db_keys = list(db._aux_cache_keys)
516 for cpv in db.match(atom):
517 metadata = izip(db_keys,
518 db.aux_get(cpv, db_keys))
519 if not self._visible(db, cpv, metadata):
522 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523 db._cpv_sort_ascending(result)
524 elif level == "bestmatch-visible":
527 if hasattr(db, "xmatch"):
528 cpv = db.xmatch("bestmatch-visible", atom)
529 if not cpv or portage.cpv_getkey(cpv) != cp:
531 if not result or cpv == portage.best([cpv, result]):
534 db_keys = Package.metadata_keys
535 # break out of this loop with highest visible
536 # match, checked in descending order
537 for cpv in reversed(db.match(atom)):
538 if portage.cpv_getkey(cpv) != cp:
540 metadata = izip(db_keys,
541 db.aux_get(cpv, db_keys))
542 if not self._visible(db, cpv, metadata):
544 if not result or cpv == portage.best([cpv, result]):
548 raise NotImplementedError(level)
551 def execute(self,searchkey):
552 """Performs the search for the supplied search key"""
554 self.searchkey=searchkey
555 self.packagematches = []
558 self.matches = {"pkg":[], "desc":[], "set":[]}
561 self.matches = {"pkg":[], "set":[]}
562 print "Searching... ",
565 if self.searchkey.startswith('%'):
567 self.searchkey = self.searchkey[1:]
568 if self.searchkey.startswith('@'):
570 self.searchkey = self.searchkey[1:]
572 self.searchre=re.compile(self.searchkey,re.I)
574 self.searchre=re.compile(re.escape(self.searchkey), re.I)
575 for package in self.portdb.cp_all():
576 self.spinner.update()
579 match_string = package[:]
581 match_string = package.split("/")[-1]
584 if self.searchre.search(match_string):
585 if not self.portdb.xmatch("match-visible", package):
587 self.matches["pkg"].append([package,masked])
588 elif self.searchdesc: # DESCRIPTION searching
589 full_package = self.portdb.xmatch("bestmatch-visible", package)
591 #no match found; we don't want to query description
592 full_package = portage.best(
593 self.portdb.xmatch("match-all", package))
599 full_desc = self.portdb.aux_get(
600 full_package, ["DESCRIPTION"])[0]
602 print "emerge: search: aux_get() failed, skipping"
604 if self.searchre.search(full_desc):
605 self.matches["desc"].append([full_package,masked])
607 self.sdict = self.setconfig.getSets()
608 for setname in self.sdict:
609 self.spinner.update()
611 match_string = setname
613 match_string = setname.split("/")[-1]
615 if self.searchre.search(match_string):
616 self.matches["set"].append([setname, False])
617 elif self.searchdesc:
618 if self.searchre.search(
619 self.sdict[setname].getMetadata("DESCRIPTION")):
620 self.matches["set"].append([setname, False])
623 for mtype in self.matches:
624 self.matches[mtype].sort()
625 self.mlen += len(self.matches[mtype])
628 if not self.portdb.xmatch("match-all", cp):
631 if not self.portdb.xmatch("bestmatch-visible", cp):
633 self.matches["pkg"].append([cp, masked])
637 """Outputs the results of the search."""
638 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
639 print "[ Applications found : "+white(str(self.mlen))+" ]"
641 vardb = self.vartree.dbapi
642 for mtype in self.matches:
643 for match,masked in self.matches[mtype]:
647 full_package = self.portdb.xmatch(
648 "bestmatch-visible", match)
650 #no match found; we don't want to query description
652 full_package = portage.best(
653 self.portdb.xmatch("match-all",match))
654 elif mtype == "desc":
656 match = portage.cpv_getkey(match)
658 print green("*")+" "+white(match)
659 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
663 desc, homepage, license = self.portdb.aux_get(
664 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
666 print "emerge: search: aux_get() failed, skipping"
669 print green("*")+" "+white(match)+" "+red("[ Masked ]")
671 print green("*")+" "+white(match)
672 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
676 mycat = match.split("/")[0]
677 mypkg = match.split("/")[1]
678 mycpv = match + "-" + myversion
679 myebuild = self.portdb.findname(mycpv)
681 pkgdir = os.path.dirname(myebuild)
682 from portage import manifest
683 mf = manifest.Manifest(
684 pkgdir, self.settings["DISTDIR"])
686 uri_map = self.portdb.getFetchMap(mycpv)
687 except portage.exception.InvalidDependString, e:
688 file_size_str = "Unknown (%s)" % (e,)
692 mysum[0] = mf.getDistfilesSize(uri_map)
694 file_size_str = "Unknown (missing " + \
695 "digest for %s)" % (e,)
700 if db is not vardb and \
701 db.cpv_exists(mycpv):
703 if not myebuild and hasattr(db, "bintree"):
704 myebuild = db.bintree.getname(mycpv)
706 mysum[0] = os.stat(myebuild).st_size
711 if myebuild and file_size_str is None:
712 mystr = str(mysum[0] / 1024)
716 mystr = mystr[:mycount] + "," + mystr[mycount:]
717 file_size_str = mystr + " kB"
721 print " ", darkgreen("Latest version available:"),myversion
722 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
725 (darkgreen("Size of files:"), file_size_str)
726 print " ", darkgreen("Homepage:")+" ",homepage
727 print " ", darkgreen("Description:")+" ",desc
728 print " ", darkgreen("License:")+" ",license
733 def getInstallationStatus(self,package):
734 installed_package = self.vartree.dep_bestmatch(package)
736 version = self.getVersion(installed_package,search.VERSION_RELEASE)
738 result = darkgreen("Latest version installed:")+" "+version
740 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
743 def getVersion(self,full_package,detail):
744 if len(full_package) > 1:
745 package_parts = portage.catpkgsplit(full_package)
746 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747 result = package_parts[2]+ "-" + package_parts[3]
749 result = package_parts[2]
754 class RootConfig(object):
755 """This is used internally by depgraph to track information about a
759 "ebuild" : "porttree",
760 "binary" : "bintree",
761 "installed" : "vartree"
765 for k, v in pkg_tree_map.iteritems():
768 def __init__(self, settings, trees, setconfig):
770 self.settings = settings
771 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772 self.root = self.settings["ROOT"]
773 self.setconfig = setconfig
774 if setconfig is None:
777 self.sets = self.setconfig.getSets()
778 self.visible_pkgs = PackageVirtualDbapi(self.settings)
780 def create_world_atom(pkg, args_set, root_config):
781 """Create a new atom for the world file if one does not exist. If the
782 argument atom is precise enough to identify a specific slot then a slot
783 atom will be returned. Atoms that are in the system set may also be stored
784 in world since system atoms can only match one slot while world atoms can
785 be greedy with respect to slots. Unslotted system packages will not be
788 arg_atom = args_set.findAtomForPackage(pkg)
791 cp = portage.dep_getkey(arg_atom)
793 sets = root_config.sets
794 portdb = root_config.trees["porttree"].dbapi
795 vardb = root_config.trees["vartree"].dbapi
796 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
797 for cpv in portdb.match(cp))
798 slotted = len(available_slots) > 1 or \
799 (len(available_slots) == 1 and "0" not in available_slots)
801 # check the vdb in case this is multislot
802 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
803 for cpv in vardb.match(cp))
804 slotted = len(available_slots) > 1 or \
805 (len(available_slots) == 1 and "0" not in available_slots)
806 if slotted and arg_atom != cp:
807 # If the user gave a specific atom, store it as a
808 # slot atom in the world file.
809 slot_atom = pkg.slot_atom
811 # For USE=multislot, there are a couple of cases to
814 # 1) SLOT="0", but the real SLOT spontaneously changed to some
815 # unknown value, so just record an unslotted atom.
817 # 2) SLOT comes from an installed package and there is no
818 # matching SLOT in the portage tree.
820 # Make sure that the slot atom is available in either the
821 # portdb or the vardb, since otherwise the user certainly
822 # doesn't want the SLOT atom recorded in the world file
823 # (case 1 above). If it's only available in the vardb,
824 # the user may be trying to prevent a USE=multislot
825 # package from being removed by --depclean (case 2 above).
828 if not portdb.match(slot_atom):
829 # SLOT seems to come from an installed multislot package
831 # If there is no installed package matching the SLOT atom,
832 # it probably changed SLOT spontaneously due to USE=multislot,
833 # so just record an unslotted atom.
834 if vardb.match(slot_atom):
835 # Now verify that the argument is precise
836 # enough to identify a specific slot.
837 matches = mydb.match(arg_atom)
838 matched_slots = set()
840 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
841 if len(matched_slots) == 1:
842 new_world_atom = slot_atom
844 if new_world_atom == sets["world"].findAtomForPackage(pkg):
845 # Both atoms would be identical, so there's nothing to add.
848 # Unlike world atoms, system atoms are not greedy for slots, so they
849 # can't be safely excluded from world if they are slotted.
850 system_atom = sets["system"].findAtomForPackage(pkg)
852 if not portage.dep_getkey(system_atom).startswith("virtual/"):
854 # System virtuals aren't safe to exclude from world since they can
855 # match multiple old-style virtuals but only one of them will be
856 # pulled in by update or depclean.
857 providers = portdb.mysettings.getvirtuals().get(
858 portage.dep_getkey(system_atom))
859 if providers and len(providers) == 1 and providers[0] == cp:
861 return new_world_atom
863 def filter_iuse_defaults(iuse):
865 if flag.startswith("+") or flag.startswith("-"):
870 class SlotObject(object):
871 __slots__ = ("__weakref__",)
873 def __init__(self, **kwargs):
874 classes = [self.__class__]
879 classes.extend(c.__bases__)
880 slots = getattr(c, "__slots__", None)
884 myvalue = kwargs.get(myattr, None)
885 setattr(self, myattr, myvalue)
889 Create a new instance and copy all attributes
890 defined from __slots__ (including those from
893 obj = self.__class__()
895 classes = [self.__class__]
900 classes.extend(c.__bases__)
901 slots = getattr(c, "__slots__", None)
905 setattr(obj, myattr, getattr(self, myattr))
909 class AbstractDepPriority(SlotObject):
910 __slots__ = ("buildtime", "runtime", "runtime_post")
912 def __lt__(self, other):
913 return self.__int__() < other
915 def __le__(self, other):
916 return self.__int__() <= other
918 def __eq__(self, other):
919 return self.__int__() == other
921 def __ne__(self, other):
922 return self.__int__() != other
924 def __gt__(self, other):
925 return self.__int__() > other
927 def __ge__(self, other):
928 return self.__int__() >= other
932 return copy.copy(self)
934 class DepPriority(AbstractDepPriority):
936 __slots__ = ("satisfied", "optional", "rebuild")
948 if self.runtime_post:
949 return "runtime_post"
952 class BlockerDepPriority(DepPriority):
960 BlockerDepPriority.instance = BlockerDepPriority()
962 class UnmergeDepPriority(AbstractDepPriority):
963 __slots__ = ("optional", "satisfied",)
965 Combination of properties Priority Category
970 (none of the above) -2 SOFT
980 if self.runtime_post:
987 myvalue = self.__int__()
988 if myvalue > self.SOFT:
992 class DepPriorityNormalRange(object):
994 DepPriority properties Index Category
998 runtime_post 2 MEDIUM_SOFT
1000 (none of the above) 0 NONE
1008 def _ignore_optional(cls, priority):
1009 if priority.__class__ is not DepPriority:
1011 return bool(priority.optional)
1014 def _ignore_runtime_post(cls, priority):
1015 if priority.__class__ is not DepPriority:
1017 return bool(priority.optional or priority.runtime_post)
1020 def _ignore_runtime(cls, priority):
1021 if priority.__class__ is not DepPriority:
1023 return not priority.buildtime
1025 ignore_medium = _ignore_runtime
1026 ignore_medium_soft = _ignore_runtime_post
1027 ignore_soft = _ignore_optional
1029 DepPriorityNormalRange.ignore_priority = (
1031 DepPriorityNormalRange._ignore_optional,
1032 DepPriorityNormalRange._ignore_runtime_post,
1033 DepPriorityNormalRange._ignore_runtime
1036 class DepPrioritySatisfiedRange(object):
1038 DepPriority Index Category
1040 not satisfied and buildtime HARD
1041 not satisfied and runtime 7 MEDIUM
1042 not satisfied and runtime_post 6 MEDIUM_SOFT
1043 satisfied and buildtime and rebuild 5 SOFT
1044 satisfied and buildtime 4 SOFT
1045 satisfied and runtime 3 SOFT
1046 satisfied and runtime_post 2 SOFT
1048 (none of the above) 0 NONE
1056 def _ignore_optional(cls, priority):
1057 if priority.__class__ is not DepPriority:
1059 return bool(priority.optional)
1062 def _ignore_satisfied_runtime_post(cls, priority):
1063 if priority.__class__ is not DepPriority:
1065 if priority.optional:
1067 if not priority.satisfied:
1069 return bool(priority.runtime_post)
1072 def _ignore_satisfied_runtime(cls, priority):
1073 if priority.__class__ is not DepPriority:
1075 if priority.optional:
1077 if not priority.satisfied:
1079 return not priority.buildtime
1082 def _ignore_satisfied_buildtime(cls, priority):
1083 if priority.__class__ is not DepPriority:
1085 if priority.optional:
1087 if not priority.satisfied:
1089 if priority.buildtime:
1090 return not priority.rebuild
1094 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1095 if priority.__class__ is not DepPriority:
1097 if priority.optional:
1099 return bool(priority.satisfied)
1102 def _ignore_runtime_post(cls, priority):
1103 if priority.__class__ is not DepPriority:
1105 return bool(priority.optional or \
1106 priority.satisfied or \
1107 priority.runtime_post)
1110 def _ignore_runtime(cls, priority):
1111 if priority.__class__ is not DepPriority:
1113 return bool(priority.satisfied or \
1114 not priority.buildtime)
1116 ignore_medium = _ignore_runtime
1117 ignore_medium_soft = _ignore_runtime_post
1118 ignore_soft = _ignore_satisfied_buildtime_rebuild
1120 DepPrioritySatisfiedRange.ignore_priority = (
1122 DepPrioritySatisfiedRange._ignore_optional,
1123 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1124 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1125 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1126 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1127 DepPrioritySatisfiedRange._ignore_runtime_post,
1128 DepPrioritySatisfiedRange._ignore_runtime
1131 def _find_deep_system_runtime_deps(graph):
1132 deep_system_deps = set()
1135 if not isinstance(node, Package) or \
1136 node.operation == 'uninstall':
1138 if node.root_config.sets['system'].findAtomForPackage(node):
1139 node_stack.append(node)
1141 def ignore_priority(priority):
1143 Ignore non-runtime priorities.
1145 if isinstance(priority, DepPriority) and \
1146 (priority.runtime or priority.runtime_post):
1151 node = node_stack.pop()
1152 if node in deep_system_deps:
1154 deep_system_deps.add(node)
1155 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1156 if not isinstance(child, Package) or \
1157 child.operation == 'uninstall':
1159 node_stack.append(child)
1161 return deep_system_deps
1163 class FakeVartree(portage.vartree):
1164 """This is implements an in-memory copy of a vartree instance that provides
1165 all the interfaces required for use by the depgraph. The vardb is locked
1166 during the constructor call just long enough to read a copy of the
1167 installed package information. This allows the depgraph to do it's
1168 dependency calculations without holding a lock on the vardb. It also
1169 allows things like vardb global updates to be done in memory so that the
1170 user doesn't necessarily need write access to the vardb in cases where
1171 global updates are necessary (updates are performed when necessary if there
1172 is not a matching ebuild in the tree)."""
1173 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1174 self._root_config = root_config
1175 if pkg_cache is None:
1177 real_vartree = root_config.trees["vartree"]
1178 portdb = root_config.trees["porttree"].dbapi
1179 self.root = real_vartree.root
1180 self.settings = real_vartree.settings
1181 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1182 if "_mtime_" not in mykeys:
1183 mykeys.append("_mtime_")
1184 self._db_keys = mykeys
1185 self._pkg_cache = pkg_cache
1186 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1187 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1189 # At least the parent needs to exist for the lock file.
1190 portage.util.ensure_dirs(vdb_path)
1191 except portage.exception.PortageException:
1195 if acquire_lock and os.access(vdb_path, os.W_OK):
1196 vdb_lock = portage.locks.lockdir(vdb_path)
1197 real_dbapi = real_vartree.dbapi
1199 for cpv in real_dbapi.cpv_all():
1200 cache_key = ("installed", self.root, cpv, "nomerge")
1201 pkg = self._pkg_cache.get(cache_key)
1203 metadata = pkg.metadata
1205 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1206 myslot = metadata["SLOT"]
1207 mycp = portage.dep_getkey(cpv)
1208 myslot_atom = "%s:%s" % (mycp, myslot)
1210 mycounter = long(metadata["COUNTER"])
1213 metadata["COUNTER"] = str(mycounter)
1214 other_counter = slot_counters.get(myslot_atom, None)
1215 if other_counter is not None:
1216 if other_counter > mycounter:
1218 slot_counters[myslot_atom] = mycounter
1220 pkg = Package(built=True, cpv=cpv,
1221 installed=True, metadata=metadata,
1222 root_config=root_config, type_name="installed")
1223 self._pkg_cache[pkg] = pkg
1224 self.dbapi.cpv_inject(pkg)
1225 real_dbapi.flush_cache()
1228 portage.locks.unlockdir(vdb_lock)
1229 # Populate the old-style virtuals using the cached values.
1230 if not self.settings.treeVirtuals:
1231 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1232 portage.getCPFromCPV, self.get_all_provides())
1234 # Intialize variables needed for lazy cache pulls of the live ebuild
1235 # metadata. This ensures that the vardb lock is released ASAP, without
1236 # being delayed in case cache generation is triggered.
1237 self._aux_get = self.dbapi.aux_get
1238 self.dbapi.aux_get = self._aux_get_wrapper
1239 self._match = self.dbapi.match
1240 self.dbapi.match = self._match_wrapper
1241 self._aux_get_history = set()
1242 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1243 self._portdb = portdb
1244 self._global_updates = None
1246 def _match_wrapper(self, cpv, use_cache=1):
1248 Make sure the metadata in Package instances gets updated for any
1249 cpv that is returned from a match() call, since the metadata can
1250 be accessed directly from the Package instance instead of via
1253 matches = self._match(cpv, use_cache=use_cache)
1255 if cpv in self._aux_get_history:
1257 self._aux_get_wrapper(cpv, [])
1260 def _aux_get_wrapper(self, pkg, wants):
1261 if pkg in self._aux_get_history:
1262 return self._aux_get(pkg, wants)
1263 self._aux_get_history.add(pkg)
1265 # Use the live ebuild metadata if possible.
1266 live_metadata = dict(izip(self._portdb_keys,
1267 self._portdb.aux_get(pkg, self._portdb_keys)))
1268 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1270 self.dbapi.aux_update(pkg, live_metadata)
1271 except (KeyError, portage.exception.PortageException):
1272 if self._global_updates is None:
1273 self._global_updates = \
1274 grab_global_updates(self._portdb.porttree_root)
1275 perform_global_updates(
1276 pkg, self.dbapi, self._global_updates)
1277 return self._aux_get(pkg, wants)
1279 def sync(self, acquire_lock=1):
1281 Call this method to synchronize state with the real vardb
1282 after one or more packages may have been installed or
1285 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1287 # At least the parent needs to exist for the lock file.
1288 portage.util.ensure_dirs(vdb_path)
1289 except portage.exception.PortageException:
1293 if acquire_lock and os.access(vdb_path, os.W_OK):
1294 vdb_lock = portage.locks.lockdir(vdb_path)
1298 portage.locks.unlockdir(vdb_lock)
1302 real_vardb = self._root_config.trees["vartree"].dbapi
1303 current_cpv_set = frozenset(real_vardb.cpv_all())
1304 pkg_vardb = self.dbapi
1305 aux_get_history = self._aux_get_history
1307 # Remove any packages that have been uninstalled.
1308 for pkg in list(pkg_vardb):
1309 if pkg.cpv not in current_cpv_set:
1310 pkg_vardb.cpv_remove(pkg)
1311 aux_get_history.discard(pkg.cpv)
1313 # Validate counters and timestamps.
1316 validation_keys = ["COUNTER", "_mtime_"]
1317 for cpv in current_cpv_set:
1319 pkg_hash_key = ("installed", root, cpv, "nomerge")
1320 pkg = pkg_vardb.get(pkg_hash_key)
1322 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1324 counter = long(counter)
1328 if counter != pkg.counter or \
1330 pkg_vardb.cpv_remove(pkg)
1331 aux_get_history.discard(pkg.cpv)
1335 pkg = self._pkg(cpv)
1337 other_counter = slot_counters.get(pkg.slot_atom)
1338 if other_counter is not None:
1339 if other_counter > pkg.counter:
1342 slot_counters[pkg.slot_atom] = pkg.counter
1343 pkg_vardb.cpv_inject(pkg)
1345 real_vardb.flush_cache()
1347 def _pkg(self, cpv):
1348 root_config = self._root_config
1349 real_vardb = root_config.trees["vartree"].dbapi
1350 pkg = Package(cpv=cpv, installed=True,
1351 metadata=izip(self._db_keys,
1352 real_vardb.aux_get(cpv, self._db_keys)),
1353 root_config=root_config,
1354 type_name="installed")
1357 mycounter = long(pkg.metadata["COUNTER"])
1360 pkg.metadata["COUNTER"] = str(mycounter)
1364 def grab_global_updates(portdir):
1365 from portage.update import grab_updates, parse_updates
1366 updpath = os.path.join(portdir, "profiles", "updates")
1368 rawupdates = grab_updates(updpath)
1369 except portage.exception.DirectoryNotFound:
1372 for mykey, mystat, mycontent in rawupdates:
1373 commands, errors = parse_updates(mycontent)
1374 upd_commands.extend(commands)
1377 def perform_global_updates(mycpv, mydb, mycommands):
1378 from portage.update import update_dbentries
1379 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1380 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1381 updates = update_dbentries(mycommands, aux_dict)
1383 mydb.aux_update(mycpv, updates)
1385 def visible(pkgsettings, pkg):
1387 Check if a package is visible. This can raise an InvalidDependString
1388 exception if LICENSE is invalid.
1389 TODO: optionally generate a list of masking reasons
1391 @returns: True if the package is visible, False otherwise.
1393 if not pkg.metadata["SLOT"]:
1395 if not pkg.installed:
1396 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1398 eapi = pkg.metadata["EAPI"]
1399 if not portage.eapi_is_supported(eapi):
1401 if not pkg.installed:
1402 if portage._eapi_is_deprecated(eapi):
1404 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1408 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1411 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1413 except portage.exception.InvalidDependString:
1417 def get_masking_status(pkg, pkgsettings, root_config):
1419 mreasons = portage.getmaskingstatus(
1420 pkg, settings=pkgsettings,
1421 portdb=root_config.trees["porttree"].dbapi)
1423 if not pkg.installed:
1424 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1425 mreasons.append("CHOST: %s" % \
1426 pkg.metadata["CHOST"])
1428 if not pkg.metadata["SLOT"]:
1429 mreasons.append("invalid: SLOT is undefined")
1433 def get_mask_info(root_config, cpv, pkgsettings,
1434 db, pkg_type, built, installed, db_keys):
1437 metadata = dict(izip(db_keys,
1438 db.aux_get(cpv, db_keys)))
1441 if metadata and not built:
1442 pkgsettings.setcpv(cpv, mydb=metadata)
1443 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1444 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1445 if metadata is None:
1446 mreasons = ["corruption"]
1448 eapi = metadata['EAPI']
1451 if not portage.eapi_is_supported(eapi):
1452 mreasons = ['EAPI %s' % eapi]
1454 pkg = Package(type_name=pkg_type, root_config=root_config,
1455 cpv=cpv, built=built, installed=installed, metadata=metadata)
1456 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1457 return metadata, mreasons
1459 def show_masked_packages(masked_packages):
1460 shown_licenses = set()
1461 shown_comments = set()
1462 # Maybe there is both an ebuild and a binary. Only
1463 # show one of them to avoid redundant appearance.
1465 have_eapi_mask = False
1466 for (root_config, pkgsettings, cpv,
1467 metadata, mreasons) in masked_packages:
1468 if cpv in shown_cpvs:
1471 comment, filename = None, None
1472 if "package.mask" in mreasons:
1473 comment, filename = \
1474 portage.getmaskingreason(
1475 cpv, metadata=metadata,
1476 settings=pkgsettings,
1477 portdb=root_config.trees["porttree"].dbapi,
1478 return_location=True)
1479 missing_licenses = []
1481 if not portage.eapi_is_supported(metadata["EAPI"]):
1482 have_eapi_mask = True
1484 missing_licenses = \
1485 pkgsettings._getMissingLicenses(
1487 except portage.exception.InvalidDependString:
1488 # This will have already been reported
1489 # above via mreasons.
1492 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1493 if comment and comment not in shown_comments:
1496 shown_comments.add(comment)
1497 portdb = root_config.trees["porttree"].dbapi
1498 for l in missing_licenses:
1499 l_path = portdb.findLicensePath(l)
1500 if l in shown_licenses:
1502 msg = ("A copy of the '%s' license" + \
1503 " is located at '%s'.") % (l, l_path)
1506 shown_licenses.add(l)
1507 return have_eapi_mask
1509 class Task(SlotObject):
1510 __slots__ = ("_hash_key", "_hash_value")
1512 def _get_hash_key(self):
1513 hash_key = getattr(self, "_hash_key", None)
1514 if hash_key is None:
1515 raise NotImplementedError(self)
1518 def __eq__(self, other):
1519 return self._get_hash_key() == other
1521 def __ne__(self, other):
1522 return self._get_hash_key() != other
1525 hash_value = getattr(self, "_hash_value", None)
1526 if hash_value is None:
1527 self._hash_value = hash(self._get_hash_key())
1528 return self._hash_value
1531 return len(self._get_hash_key())
1533 def __getitem__(self, key):
1534 return self._get_hash_key()[key]
1537 return iter(self._get_hash_key())
1539 def __contains__(self, key):
1540 return key in self._get_hash_key()
1543 return str(self._get_hash_key())
1545 class Blocker(Task):
1547 __hash__ = Task.__hash__
1548 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1550 def __init__(self, **kwargs):
1551 Task.__init__(self, **kwargs)
1552 self.cp = portage.dep_getkey(self.atom)
1554 def _get_hash_key(self):
1555 hash_key = getattr(self, "_hash_key", None)
1556 if hash_key is None:
1558 ("blocks", self.root, self.atom, self.eapi)
1559 return self._hash_key
1561 class Package(Task):
1563 __hash__ = Task.__hash__
1564 __slots__ = ("built", "cpv", "depth",
1565 "installed", "metadata", "onlydeps", "operation",
1566 "root_config", "type_name",
1567 "category", "counter", "cp", "cpv_split",
1568 "inherited", "iuse", "mtime",
1569 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1572 "CHOST", "COUNTER", "DEPEND", "EAPI",
1573 "INHERITED", "IUSE", "KEYWORDS",
1574 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1575 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1577 def __init__(self, **kwargs):
1578 Task.__init__(self, **kwargs)
1579 self.root = self.root_config.root
1580 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1581 self.cp = portage.cpv_getkey(self.cpv)
1584 # Avoid an InvalidAtom exception when creating slot_atom.
1585 # This package instance will be masked due to empty SLOT.
1587 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1588 self.category, self.pf = portage.catsplit(self.cpv)
1589 self.cpv_split = portage.catpkgsplit(self.cpv)
1590 self.pv_split = self.cpv_split[1:]
1594 __slots__ = ("__weakref__", "enabled")
1596 def __init__(self, use):
1597 self.enabled = frozenset(use)
1599 class _iuse(object):
1601 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1603 def __init__(self, tokens, iuse_implicit):
1604 self.tokens = tuple(tokens)
1605 self.iuse_implicit = iuse_implicit
1612 enabled.append(x[1:])
1614 disabled.append(x[1:])
1617 self.enabled = frozenset(enabled)
1618 self.disabled = frozenset(disabled)
1619 self.all = frozenset(chain(enabled, disabled, other))
1621 def __getattribute__(self, name):
1624 return object.__getattribute__(self, "regex")
1625 except AttributeError:
1626 all = object.__getattribute__(self, "all")
1627 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1628 # Escape anything except ".*" which is supposed
1629 # to pass through from _get_implicit_iuse()
1630 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1631 regex = "^(%s)$" % "|".join(regex)
1632 regex = regex.replace("\\.\\*", ".*")
1633 self.regex = re.compile(regex)
1634 return object.__getattribute__(self, name)
1636 def _get_hash_key(self):
1637 hash_key = getattr(self, "_hash_key", None)
1638 if hash_key is None:
1639 if self.operation is None:
1640 self.operation = "merge"
1641 if self.onlydeps or self.installed:
1642 self.operation = "nomerge"
1644 (self.type_name, self.root, self.cpv, self.operation)
1645 return self._hash_key
1647 def __lt__(self, other):
1648 if other.cp != self.cp:
1650 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1654 def __le__(self, other):
1655 if other.cp != self.cp:
1657 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1661 def __gt__(self, other):
1662 if other.cp != self.cp:
1664 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1668 def __ge__(self, other):
1669 if other.cp != self.cp:
1671 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1675 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1676 if not x.startswith("UNUSED_"))
1677 _all_metadata_keys.discard("CDEPEND")
1678 _all_metadata_keys.update(Package.metadata_keys)
1680 from portage.cache.mappings import slot_dict_class
1681 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1683 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1685 Detect metadata updates and synchronize Package attributes.
1688 __slots__ = ("_pkg",)
1689 _wrapped_keys = frozenset(
1690 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1692 def __init__(self, pkg, metadata):
1693 _PackageMetadataWrapperBase.__init__(self)
1695 self.update(metadata)
1697 def __setitem__(self, k, v):
1698 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1699 if k in self._wrapped_keys:
1700 getattr(self, "_set_" + k.lower())(k, v)
1702 def _set_inherited(self, k, v):
1703 if isinstance(v, basestring):
1704 v = frozenset(v.split())
1705 self._pkg.inherited = v
1707 def _set_iuse(self, k, v):
1708 self._pkg.iuse = self._pkg._iuse(
1709 v.split(), self._pkg.root_config.iuse_implicit)
1711 def _set_slot(self, k, v):
1714 def _set_use(self, k, v):
1715 self._pkg.use = self._pkg._use(v.split())
1717 def _set_counter(self, k, v):
1718 if isinstance(v, basestring):
1723 self._pkg.counter = v
1725 def _set__mtime_(self, k, v):
1726 if isinstance(v, basestring):
1733 class EbuildFetchonly(SlotObject):
1735 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1738 settings = self.settings
1740 portdb = pkg.root_config.trees["porttree"].dbapi
1741 ebuild_path = portdb.findname(pkg.cpv)
1742 settings.setcpv(pkg)
1743 debug = settings.get("PORTAGE_DEBUG") == "1"
1744 use_cache = 1 # always true
1745 portage.doebuild_environment(ebuild_path, "fetch",
1746 settings["ROOT"], settings, debug, use_cache, portdb)
1747 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1750 rval = self._execute_with_builddir()
1752 rval = portage.doebuild(ebuild_path, "fetch",
1753 settings["ROOT"], settings, debug=debug,
1754 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1755 mydbapi=portdb, tree="porttree")
1757 if rval != os.EX_OK:
1758 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1759 eerror(msg, phase="unpack", key=pkg.cpv)
1763 def _execute_with_builddir(self):
1764 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1765 # ensuring sane $PWD (bug #239560) and storing elog
1766 # messages. Use a private temp directory, in order
1767 # to avoid locking the main one.
1768 settings = self.settings
1769 global_tmpdir = settings["PORTAGE_TMPDIR"]
1770 from tempfile import mkdtemp
1772 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1774 if e.errno != portage.exception.PermissionDenied.errno:
1776 raise portage.exception.PermissionDenied(global_tmpdir)
1777 settings["PORTAGE_TMPDIR"] = private_tmpdir
1778 settings.backup_changes("PORTAGE_TMPDIR")
1780 retval = self._execute()
1782 settings["PORTAGE_TMPDIR"] = global_tmpdir
1783 settings.backup_changes("PORTAGE_TMPDIR")
1784 shutil.rmtree(private_tmpdir)
1788 settings = self.settings
1790 root_config = pkg.root_config
1791 portdb = root_config.trees["porttree"].dbapi
1792 ebuild_path = portdb.findname(pkg.cpv)
1793 debug = settings.get("PORTAGE_DEBUG") == "1"
1794 retval = portage.doebuild(ebuild_path, "fetch",
1795 self.settings["ROOT"], self.settings, debug=debug,
1796 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1797 mydbapi=portdb, tree="porttree")
1799 if retval != os.EX_OK:
1800 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1801 eerror(msg, phase="unpack", key=pkg.cpv)
1803 portage.elog.elog_process(self.pkg.cpv, self.settings)
1806 class PollConstants(object):
1809 Provides POLL* constants that are equivalent to those from the
1810 select module, for use by PollSelectAdapter.
1813 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1816 locals()[k] = getattr(select, k, v)
1820 class AsynchronousTask(SlotObject):
1822 Subclasses override _wait() and _poll() so that calls
1823 to public methods can be wrapped for implementing
1824 hooks such as exit listener notification.
1826 Sublasses should call self.wait() to notify exit listeners after
1827 the task is complete and self.returncode has been set.
1830 __slots__ = ("background", "cancelled", "returncode") + \
1831 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1835 Start an asynchronous task and then return as soon as possible.
1841 raise NotImplementedError(self)
1844 return self.returncode is None
1851 return self.returncode
1854 if self.returncode is None:
1857 return self.returncode
1860 return self.returncode
1863 self.cancelled = True
1866 def addStartListener(self, f):
1868 The function will be called with one argument, a reference to self.
1870 if self._start_listeners is None:
1871 self._start_listeners = []
1872 self._start_listeners.append(f)
1874 def removeStartListener(self, f):
1875 if self._start_listeners is None:
1877 self._start_listeners.remove(f)
1879 def _start_hook(self):
1880 if self._start_listeners is not None:
1881 start_listeners = self._start_listeners
1882 self._start_listeners = None
1884 for f in start_listeners:
1887 def addExitListener(self, f):
1889 The function will be called with one argument, a reference to self.
1891 if self._exit_listeners is None:
1892 self._exit_listeners = []
1893 self._exit_listeners.append(f)
1895 def removeExitListener(self, f):
1896 if self._exit_listeners is None:
1897 if self._exit_listener_stack is not None:
1898 self._exit_listener_stack.remove(f)
1900 self._exit_listeners.remove(f)
1902 def _wait_hook(self):
1904 Call this method after the task completes, just before returning
1905 the returncode from wait() or poll(). This hook is
1906 used to trigger exit listeners when the returncode first
1909 if self.returncode is not None and \
1910 self._exit_listeners is not None:
1912 # This prevents recursion, in case one of the
1913 # exit handlers triggers this method again by
1914 # calling wait(). Use a stack that gives
1915 # removeExitListener() an opportunity to consume
1916 # listeners from the stack, before they can get
1917 # called below. This is necessary because a call
1918 # to one exit listener may result in a call to
1919 # removeExitListener() for another listener on
1920 # the stack. That listener needs to be removed
1921 # from the stack since it would be inconsistent
1922 # to call it after it has been been passed into
1923 # removeExitListener().
1924 self._exit_listener_stack = self._exit_listeners
1925 self._exit_listeners = None
1927 self._exit_listener_stack.reverse()
1928 while self._exit_listener_stack:
1929 self._exit_listener_stack.pop()(self)
1931 class AbstractPollTask(AsynchronousTask):
1933 __slots__ = ("scheduler",) + \
1937 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1938 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1941 def _unregister(self):
1942 raise NotImplementedError(self)
1944 def _unregister_if_appropriate(self, event):
1945 if self._registered:
1946 if event & self._exceptional_events:
1949 elif event & PollConstants.POLLHUP:
1953 class PipeReader(AbstractPollTask):
1956 Reads output from one or more files and saves it in memory,
1957 for retrieval via the getvalue() method. This is driven by
1958 the scheduler's poll() loop, so it runs entirely within the
1962 __slots__ = ("input_files",) + \
1963 ("_read_data", "_reg_ids")
1966 self._reg_ids = set()
1967 self._read_data = []
1968 for k, f in self.input_files.iteritems():
1969 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1970 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1971 self._reg_ids.add(self.scheduler.register(f.fileno(),
1972 self._registered_events, self._output_handler))
1973 self._registered = True
1976 return self._registered
1979 if self.returncode is None:
1981 self.cancelled = True
1985 if self.returncode is not None:
1986 return self.returncode
1988 if self._registered:
1989 self.scheduler.schedule(self._reg_ids)
1992 self.returncode = os.EX_OK
1993 return self.returncode
1996 """Retrieve the entire contents"""
1997 if sys.hexversion >= 0x3000000:
1998 return bytes().join(self._read_data)
1999 return "".join(self._read_data)
2002 """Free the memory buffer."""
2003 self._read_data = None
2005 def _output_handler(self, fd, event):
2007 if event & PollConstants.POLLIN:
2009 for f in self.input_files.itervalues():
2010 if fd == f.fileno():
2013 buf = array.array('B')
2015 buf.fromfile(f, self._bufsize)
2020 self._read_data.append(buf.tostring())
2025 self._unregister_if_appropriate(event)
2026 return self._registered
2028 def _unregister(self):
2030 Unregister from the scheduler and close open files.
2033 self._registered = False
2035 if self._reg_ids is not None:
2036 for reg_id in self._reg_ids:
2037 self.scheduler.unregister(reg_id)
2038 self._reg_ids = None
2040 if self.input_files is not None:
2041 for f in self.input_files.itervalues():
2043 self.input_files = None
2045 class CompositeTask(AsynchronousTask):
2047 __slots__ = ("scheduler",) + ("_current_task",)
2050 return self._current_task is not None
2053 self.cancelled = True
2054 if self._current_task is not None:
2055 self._current_task.cancel()
2059 This does a loop calling self._current_task.poll()
2060 repeatedly as long as the value of self._current_task
2061 keeps changing. It calls poll() a maximum of one time
2062 for a given self._current_task instance. This is useful
2063 since calling poll() on a task can trigger advance to
2064 the next task could eventually lead to the returncode
2065 being set in cases when polling only a single task would
2066 not have the same effect.
2071 task = self._current_task
2072 if task is None or task is prev:
2073 # don't poll the same task more than once
2078 return self.returncode
2084 task = self._current_task
2086 # don't wait for the same task more than once
2089 # Before the task.wait() method returned, an exit
2090 # listener should have set self._current_task to either
2091 # a different task or None. Something is wrong.
2092 raise AssertionError("self._current_task has not " + \
2093 "changed since calling wait", self, task)
2097 return self.returncode
2099 def _assert_current(self, task):
2101 Raises an AssertionError if the given task is not the
2102 same one as self._current_task. This can be useful
2105 if task is not self._current_task:
2106 raise AssertionError("Unrecognized task: %s" % (task,))
2108 def _default_exit(self, task):
2110 Calls _assert_current() on the given task and then sets the
2111 composite returncode attribute if task.returncode != os.EX_OK.
2112 If the task failed then self._current_task will be set to None.
2113 Subclasses can use this as a generic task exit callback.
2116 @returns: The task.returncode attribute.
2118 self._assert_current(task)
2119 if task.returncode != os.EX_OK:
2120 self.returncode = task.returncode
2121 self._current_task = None
2122 return task.returncode
2124 def _final_exit(self, task):
2126 Assumes that task is the final task of this composite task.
2127 Calls _default_exit() and sets self.returncode to the task's
2128 returncode and sets self._current_task to None.
2130 self._default_exit(task)
2131 self._current_task = None
2132 self.returncode = task.returncode
2133 return self.returncode
2135 def _default_final_exit(self, task):
2137 This calls _final_exit() and then wait().
2139 Subclasses can use this as a generic final task exit callback.
2142 self._final_exit(task)
2145 def _start_task(self, task, exit_handler):
2147 Register exit handler for the given task, set it
2148 as self._current_task, and call task.start().
2150 Subclasses can use this as a generic way to start
2154 task.addExitListener(exit_handler)
2155 self._current_task = task
2158 class TaskSequence(CompositeTask):
2160 A collection of tasks that executes sequentially. Each task
2161 must have a addExitListener() method that can be used as
2162 a means to trigger movement from one task to the next.
2165 __slots__ = ("_task_queue",)
2167 def __init__(self, **kwargs):
2168 AsynchronousTask.__init__(self, **kwargs)
2169 self._task_queue = deque()
2171 def add(self, task):
2172 self._task_queue.append(task)
2175 self._start_next_task()
2178 self._task_queue.clear()
2179 CompositeTask.cancel(self)
2181 def _start_next_task(self):
2182 self._start_task(self._task_queue.popleft(),
2183 self._task_exit_handler)
2185 def _task_exit_handler(self, task):
2186 if self._default_exit(task) != os.EX_OK:
2188 elif self._task_queue:
2189 self._start_next_task()
2191 self._final_exit(task)
2194 class SubProcess(AbstractPollTask):
2196 __slots__ = ("pid",) + \
2197 ("_files", "_reg_id")
2199 # A file descriptor is required for the scheduler to monitor changes from
2200 # inside a poll() loop. When logging is not enabled, create a pipe just to
2201 # serve this purpose alone.
2205 if self.returncode is not None:
2206 return self.returncode
2207 if self.pid is None:
2208 return self.returncode
2209 if self._registered:
2210 return self.returncode
2213 retval = os.waitpid(self.pid, os.WNOHANG)
2215 if e.errno != errno.ECHILD:
2218 retval = (self.pid, 1)
2220 if retval == (0, 0):
2222 self._set_returncode(retval)
2223 return self.returncode
2228 os.kill(self.pid, signal.SIGTERM)
2230 if e.errno != errno.ESRCH:
2234 self.cancelled = True
2235 if self.pid is not None:
2237 return self.returncode
2240 return self.pid is not None and \
2241 self.returncode is None
2245 if self.returncode is not None:
2246 return self.returncode
2248 if self._registered:
2249 self.scheduler.schedule(self._reg_id)
2251 if self.returncode is not None:
2252 return self.returncode
2255 wait_retval = os.waitpid(self.pid, 0)
2257 if e.errno != errno.ECHILD:
2260 self._set_returncode((self.pid, 1))
2262 self._set_returncode(wait_retval)
2264 return self.returncode
2266 def _unregister(self):
2268 Unregister from the scheduler and close open files.
2271 self._registered = False
2273 if self._reg_id is not None:
2274 self.scheduler.unregister(self._reg_id)
2277 if self._files is not None:
2278 for f in self._files.itervalues():
2282 def _set_returncode(self, wait_retval):
2284 retval = wait_retval[1]
2286 if retval != os.EX_OK:
2288 retval = (retval & 0xff) << 8
2290 retval = retval >> 8
2292 self.returncode = retval
2294 class SpawnProcess(SubProcess):
2297 Constructor keyword args are passed into portage.process.spawn().
2298 The required "args" keyword argument will be passed as the first
2302 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2303 "uid", "gid", "groups", "umask", "logfile",
2304 "path_lookup", "pre_exec")
2306 __slots__ = ("args",) + \
2309 _file_names = ("log", "process", "stdout")
2310 _files_dict = slot_dict_class(_file_names, prefix="")
2317 if self.fd_pipes is None:
2319 fd_pipes = self.fd_pipes
2320 fd_pipes.setdefault(0, sys.stdin.fileno())
2321 fd_pipes.setdefault(1, sys.stdout.fileno())
2322 fd_pipes.setdefault(2, sys.stderr.fileno())
2324 # flush any pending output
2325 for fd in fd_pipes.itervalues():
2326 if fd == sys.stdout.fileno():
2328 if fd == sys.stderr.fileno():
2331 logfile = self.logfile
2332 self._files = self._files_dict()
2335 master_fd, slave_fd = self._pipe(fd_pipes)
2336 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2337 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2340 fd_pipes_orig = fd_pipes.copy()
2342 # TODO: Use job control functions like tcsetpgrp() to control
2343 # access to stdin. Until then, use /dev/null so that any
2344 # attempts to read from stdin will immediately return EOF
2345 # instead of blocking indefinitely.
2346 null_input = open('/dev/null', 'rb')
2347 fd_pipes[0] = null_input.fileno()
2349 fd_pipes[0] = fd_pipes_orig[0]
2351 files.process = os.fdopen(master_fd, 'rb')
2352 if logfile is not None:
2354 fd_pipes[1] = slave_fd
2355 fd_pipes[2] = slave_fd
2357 files.log = open(logfile, mode='ab')
2358 portage.util.apply_secpass_permissions(logfile,
2359 uid=portage.portage_uid, gid=portage.portage_gid,
2362 if not self.background:
2363 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2365 output_handler = self._output_handler
2369 # Create a dummy pipe so the scheduler can monitor
2370 # the process from inside a poll() loop.
2371 fd_pipes[self._dummy_pipe_fd] = slave_fd
2373 fd_pipes[1] = slave_fd
2374 fd_pipes[2] = slave_fd
2375 output_handler = self._dummy_handler
2378 for k in self._spawn_kwarg_names:
2379 v = getattr(self, k)
2383 kwargs["fd_pipes"] = fd_pipes
2384 kwargs["returnpid"] = True
2385 kwargs.pop("logfile", None)
2387 self._reg_id = self.scheduler.register(files.process.fileno(),
2388 self._registered_events, output_handler)
2389 self._registered = True
2391 retval = self._spawn(self.args, **kwargs)
2394 if null_input is not None:
2397 if isinstance(retval, int):
2400 self.returncode = retval
2404 self.pid = retval[0]
2405 portage.process.spawned_pids.remove(self.pid)
2407 def _pipe(self, fd_pipes):
2409 @type fd_pipes: dict
2410 @param fd_pipes: pipes from which to copy terminal size if desired.
2414 def _spawn(self, args, **kwargs):
2415 return portage.process.spawn(args, **kwargs)
2417 def _output_handler(self, fd, event):
2419 if event & PollConstants.POLLIN:
2422 buf = array.array('B')
2424 buf.fromfile(files.process, self._bufsize)
2429 if not self.background:
2430 write_successful = False
2434 if not write_successful:
2435 buf.tofile(files.stdout)
2436 write_successful = True
2437 files.stdout.flush()
2440 if e.errno != errno.EAGAIN:
2445 # Avoid a potentially infinite loop. In
2446 # most cases, the failure count is zero
2447 # and it's unlikely to exceed 1.
2450 # This means that a subprocess has put an inherited
2451 # stdio file descriptor (typically stdin) into
2452 # O_NONBLOCK mode. This is not acceptable (see bug
2453 # #264435), so revert it. We need to use a loop
2454 # here since there's a race condition due to
2455 # parallel processes being able to change the
2456 # flags on the inherited file descriptor.
2457 # TODO: When possible, avoid having child processes
2458 # inherit stdio file descriptors from portage
2459 # (maybe it can't be avoided with
2460 # PROPERTIES=interactive).
2461 fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2462 fcntl.fcntl(files.stdout.fileno(),
2463 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2465 buf.tofile(files.log)
2471 self._unregister_if_appropriate(event)
2472 return self._registered
2474 def _dummy_handler(self, fd, event):
2476 This method is mainly interested in detecting EOF, since
2477 the only purpose of the pipe is to allow the scheduler to
2478 monitor the process from inside a poll() loop.
2481 if event & PollConstants.POLLIN:
2483 buf = array.array('B')
2485 buf.fromfile(self._files.process, self._bufsize)
2495 self._unregister_if_appropriate(event)
2496 return self._registered
2498 class MiscFunctionsProcess(SpawnProcess):
2500 Spawns misc-functions.sh with an existing ebuild environment.
2503 __slots__ = ("commands", "phase", "pkg", "settings")
2506 settings = self.settings
2507 settings.pop("EBUILD_PHASE", None)
2508 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2509 misc_sh_binary = os.path.join(portage_bin_path,
2510 os.path.basename(portage.const.MISC_SH_BINARY))
2512 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2513 self.logfile = settings.get("PORTAGE_LOG_FILE")
2515 portage._doebuild_exit_status_unlink(
2516 settings.get("EBUILD_EXIT_STATUS_FILE"))
2518 SpawnProcess._start(self)
2520 def _spawn(self, args, **kwargs):
2521 settings = self.settings
2522 debug = settings.get("PORTAGE_DEBUG") == "1"
2523 return portage.spawn(" ".join(args), settings,
2524 debug=debug, **kwargs)
2526 def _set_returncode(self, wait_retval):
2527 SpawnProcess._set_returncode(self, wait_retval)
2528 self.returncode = portage._doebuild_exit_status_check_and_log(
2529 self.settings, self.phase, self.returncode)
2531 class EbuildFetcher(SpawnProcess):
2533 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2538 root_config = self.pkg.root_config
2539 portdb = root_config.trees["porttree"].dbapi
2540 ebuild_path = portdb.findname(self.pkg.cpv)
2541 settings = self.config_pool.allocate()
2542 settings.setcpv(self.pkg)
2544 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2545 # should not be touched since otherwise it could interfere with
2546 # another instance of the same cpv concurrently being built for a
2547 # different $ROOT (currently, builds only cooperate with prefetchers
2548 # that are spawned for the same $ROOT).
2549 if not self.prefetch:
2550 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2551 self._build_dir.lock()
2552 self._build_dir.clean_log()
2553 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2554 if self.logfile is None:
2555 self.logfile = settings.get("PORTAGE_LOG_FILE")
2561 # If any incremental variables have been overridden
2562 # via the environment, those values need to be passed
2563 # along here so that they are correctly considered by
2564 # the config instance in the subproccess.
2565 fetch_env = os.environ.copy()
2567 nocolor = settings.get("NOCOLOR")
2568 if nocolor is not None:
2569 fetch_env["NOCOLOR"] = nocolor
2571 fetch_env["PORTAGE_NICENESS"] = "0"
2573 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2575 ebuild_binary = os.path.join(
2576 settings["PORTAGE_BIN_PATH"], "ebuild")
2578 fetch_args = [ebuild_binary, ebuild_path, phase]
2579 debug = settings.get("PORTAGE_DEBUG") == "1"
2581 fetch_args.append("--debug")
2583 self.args = fetch_args
2584 self.env = fetch_env
2585 SpawnProcess._start(self)
2587 def _pipe(self, fd_pipes):
2588 """When appropriate, use a pty so that fetcher progress bars,
2589 like wget has, will work properly."""
2590 if self.background or not sys.stdout.isatty():
2591 # When the output only goes to a log file,
2592 # there's no point in creating a pty.
2594 stdout_pipe = fd_pipes.get(1)
2595 got_pty, master_fd, slave_fd = \
2596 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2597 return (master_fd, slave_fd)
2599 def _set_returncode(self, wait_retval):
2600 SpawnProcess._set_returncode(self, wait_retval)
2601 # Collect elog messages that might have been
2602 # created by the pkg_nofetch phase.
2603 if self._build_dir is not None:
2604 # Skip elog messages for prefetch, in order to avoid duplicates.
2605 if not self.prefetch and self.returncode != os.EX_OK:
2607 if self.logfile is not None:
2609 elog_out = open(self.logfile, 'a')
2610 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2611 if self.logfile is not None:
2612 msg += ", Log file:"
2613 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2614 if self.logfile is not None:
2615 eerror(" '%s'" % (self.logfile,),
2616 phase="unpack", key=self.pkg.cpv, out=elog_out)
2617 if elog_out is not None:
2619 if not self.prefetch:
2620 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2621 features = self._build_dir.settings.features
2622 if self.returncode == os.EX_OK:
2623 self._build_dir.clean_log()
2624 self._build_dir.unlock()
2625 self.config_pool.deallocate(self._build_dir.settings)
2626 self._build_dir = None
2628 class EbuildBuildDir(SlotObject):
2630 __slots__ = ("dir_path", "pkg", "settings",
2631 "locked", "_catdir", "_lock_obj")
2633 def __init__(self, **kwargs):
2634 SlotObject.__init__(self, **kwargs)
2639 This raises an AlreadyLocked exception if lock() is called
2640 while a lock is already held. In order to avoid this, call
2641 unlock() or check whether the "locked" attribute is True
2642 or False before calling lock().
2644 if self._lock_obj is not None:
2645 raise self.AlreadyLocked((self._lock_obj,))
2647 dir_path = self.dir_path
2648 if dir_path is None:
2649 root_config = self.pkg.root_config
2650 portdb = root_config.trees["porttree"].dbapi
2651 ebuild_path = portdb.findname(self.pkg.cpv)
2652 settings = self.settings
2653 settings.setcpv(self.pkg)
2654 debug = settings.get("PORTAGE_DEBUG") == "1"
2655 use_cache = 1 # always true
2656 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2657 self.settings, debug, use_cache, portdb)
2658 dir_path = self.settings["PORTAGE_BUILDDIR"]
2660 catdir = os.path.dirname(dir_path)
2661 self._catdir = catdir
2663 portage.util.ensure_dirs(os.path.dirname(catdir),
2664 gid=portage.portage_gid,
2668 catdir_lock = portage.locks.lockdir(catdir)
2669 portage.util.ensure_dirs(catdir,
2670 gid=portage.portage_gid,
2672 self._lock_obj = portage.locks.lockdir(dir_path)
2674 self.locked = self._lock_obj is not None
2675 if catdir_lock is not None:
2676 portage.locks.unlockdir(catdir_lock)
2678 def clean_log(self):
2679 """Discard existing log."""
2680 settings = self.settings
2682 for x in ('.logid', 'temp/build.log'):
2684 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2689 if self._lock_obj is None:
2692 portage.locks.unlockdir(self._lock_obj)
2693 self._lock_obj = None
2696 catdir = self._catdir
2699 catdir_lock = portage.locks.lockdir(catdir)
2705 if e.errno not in (errno.ENOENT,
2706 errno.ENOTEMPTY, errno.EEXIST):
2709 portage.locks.unlockdir(catdir_lock)
2711 class AlreadyLocked(portage.exception.PortageException):
2714 class EbuildBuild(CompositeTask):
2716 __slots__ = ("args_set", "config_pool", "find_blockers",
2717 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2718 "prefetcher", "settings", "world_atom") + \
2719 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2723 logger = self.logger
2726 settings = self.settings
2727 world_atom = self.world_atom
2728 root_config = pkg.root_config
2731 portdb = root_config.trees[tree].dbapi
2732 settings.setcpv(pkg)
2733 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2734 ebuild_path = portdb.findname(self.pkg.cpv)
2735 self._ebuild_path = ebuild_path
2737 prefetcher = self.prefetcher
2738 if prefetcher is None:
2740 elif not prefetcher.isAlive():
2742 elif prefetcher.poll() is None:
2744 waiting_msg = "Fetching files " + \
2745 "in the background. " + \
2746 "To view fetch progress, run `tail -f " + \
2747 "/var/log/emerge-fetch.log` in another " + \
2749 msg_prefix = colorize("GOOD", " * ")
2750 from textwrap import wrap
2751 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2752 for line in wrap(waiting_msg, 65))
2753 if not self.background:
2754 writemsg(waiting_msg, noiselevel=-1)
2756 self._current_task = prefetcher
2757 prefetcher.addExitListener(self._prefetch_exit)
2760 self._prefetch_exit(prefetcher)
2762 def _prefetch_exit(self, prefetcher):
2766 settings = self.settings
2769 fetcher = EbuildFetchonly(
2770 fetch_all=opts.fetch_all_uri,
2771 pkg=pkg, pretend=opts.pretend,
2773 retval = fetcher.execute()
2774 self.returncode = retval
2778 fetcher = EbuildFetcher(config_pool=self.config_pool,
2779 fetchall=opts.fetch_all_uri,
2780 fetchonly=opts.fetchonly,
2781 background=self.background,
2782 pkg=pkg, scheduler=self.scheduler)
2784 self._start_task(fetcher, self._fetch_exit)
2786 def _fetch_exit(self, fetcher):
2790 fetch_failed = False
2792 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2794 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2796 if fetch_failed and fetcher.logfile is not None and \
2797 os.path.exists(fetcher.logfile):
2798 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2800 if not fetch_failed and fetcher.logfile is not None:
2801 # Fetch was successful, so remove the fetch log.
2803 os.unlink(fetcher.logfile)
2807 if fetch_failed or opts.fetchonly:
2811 logger = self.logger
2813 pkg_count = self.pkg_count
2814 scheduler = self.scheduler
2815 settings = self.settings
2816 features = settings.features
2817 ebuild_path = self._ebuild_path
2818 system_set = pkg.root_config.sets["system"]
2820 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2821 self._build_dir.lock()
2823 # Cleaning is triggered before the setup
2824 # phase, in portage.doebuild().
2825 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2826 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2827 short_msg = "emerge: (%s of %s) %s Clean" % \
2828 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2829 logger.log(msg, short_msg=short_msg)
2831 #buildsyspkg: Check if we need to _force_ binary package creation
2832 self._issyspkg = "buildsyspkg" in features and \
2833 system_set.findAtomForPackage(pkg) and \
2836 if opts.buildpkg or self._issyspkg:
2838 self._buildpkg = True
2840 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2841 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2842 short_msg = "emerge: (%s of %s) %s Compile" % \
2843 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2844 logger.log(msg, short_msg=short_msg)
2847 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2848 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2849 short_msg = "emerge: (%s of %s) %s Compile" % \
2850 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2851 logger.log(msg, short_msg=short_msg)
2853 build = EbuildExecuter(background=self.background, pkg=pkg,
2854 scheduler=scheduler, settings=settings)
2855 self._start_task(build, self._build_exit)
2857 def _unlock_builddir(self):
2858 portage.elog.elog_process(self.pkg.cpv, self.settings)
2859 self._build_dir.unlock()
2861 def _build_exit(self, build):
2862 if self._default_exit(build) != os.EX_OK:
2863 self._unlock_builddir()
2868 buildpkg = self._buildpkg
2871 self._final_exit(build)
2876 msg = ">>> This is a system package, " + \
2877 "let's pack a rescue tarball.\n"
2879 log_path = self.settings.get("PORTAGE_LOG_FILE")
2880 if log_path is not None:
2881 log_file = open(log_path, 'a')
2887 if not self.background:
2888 portage.writemsg_stdout(msg, noiselevel=-1)
2890 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2891 scheduler=self.scheduler, settings=self.settings)
2893 self._start_task(packager, self._buildpkg_exit)
2895 def _buildpkg_exit(self, packager):
2897 Released build dir lock when there is a failure or
2898 when in buildpkgonly mode. Otherwise, the lock will
2899 be released when merge() is called.
2902 if self._default_exit(packager) != os.EX_OK:
2903 self._unlock_builddir()
2907 if self.opts.buildpkgonly:
2908 # Need to call "clean" phase for buildpkgonly mode
2909 portage.elog.elog_process(self.pkg.cpv, self.settings)
2911 clean_phase = EbuildPhase(background=self.background,
2912 pkg=self.pkg, phase=phase,
2913 scheduler=self.scheduler, settings=self.settings,
2915 self._start_task(clean_phase, self._clean_exit)
2918 # Continue holding the builddir lock until
2919 # after the package has been installed.
2920 self._current_task = None
2921 self.returncode = packager.returncode
2924 def _clean_exit(self, clean_phase):
2925 if self._final_exit(clean_phase) != os.EX_OK or \
2926 self.opts.buildpkgonly:
2927 self._unlock_builddir()
2932 Install the package and then clean up and release locks.
2933 Only call this after the build has completed successfully
2934 and neither fetchonly nor buildpkgonly mode are enabled.
2937 find_blockers = self.find_blockers
2938 ldpath_mtimes = self.ldpath_mtimes
2939 logger = self.logger
2941 pkg_count = self.pkg_count
2942 settings = self.settings
2943 world_atom = self.world_atom
2944 ebuild_path = self._ebuild_path
2947 merge = EbuildMerge(find_blockers=self.find_blockers,
2948 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2949 pkg_count=pkg_count, pkg_path=ebuild_path,
2950 scheduler=self.scheduler,
2951 settings=settings, tree=tree, world_atom=world_atom)
2953 msg = " === (%s of %s) Merging (%s::%s)" % \
2954 (pkg_count.curval, pkg_count.maxval,
2955 pkg.cpv, ebuild_path)
2956 short_msg = "emerge: (%s of %s) %s Merge" % \
2957 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2958 logger.log(msg, short_msg=short_msg)
2961 rval = merge.execute()
2963 self._unlock_builddir()
2967 class EbuildExecuter(CompositeTask):
2969 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2971 _phases = ("prepare", "configure", "compile", "test", "install")
2973 _live_eclasses = frozenset([
2983 self._tree = "porttree"
2986 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2987 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2988 self._start_task(clean_phase, self._clean_phase_exit)
2990 def _clean_phase_exit(self, clean_phase):
2992 if self._default_exit(clean_phase) != os.EX_OK:
2997 scheduler = self.scheduler
2998 settings = self.settings
3001 # This initializes PORTAGE_LOG_FILE.
3002 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3004 setup_phase = EbuildPhase(background=self.background,
3005 pkg=pkg, phase="setup", scheduler=scheduler,
3006 settings=settings, tree=self._tree)
3008 setup_phase.addExitListener(self._setup_exit)
3009 self._current_task = setup_phase
3010 self.scheduler.scheduleSetup(setup_phase)
3012 def _setup_exit(self, setup_phase):
3014 if self._default_exit(setup_phase) != os.EX_OK:
3018 unpack_phase = EbuildPhase(background=self.background,
3019 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3020 settings=self.settings, tree=self._tree)
3022 if self._live_eclasses.intersection(self.pkg.inherited):
3023 # Serialize $DISTDIR access for live ebuilds since
3024 # otherwise they can interfere with eachother.
3026 unpack_phase.addExitListener(self._unpack_exit)
3027 self._current_task = unpack_phase
3028 self.scheduler.scheduleUnpack(unpack_phase)
3031 self._start_task(unpack_phase, self._unpack_exit)
3033 def _unpack_exit(self, unpack_phase):
3035 if self._default_exit(unpack_phase) != os.EX_OK:
3039 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3042 phases = self._phases
3043 eapi = pkg.metadata["EAPI"]
3044 if eapi in ("0", "1"):
3045 # skip src_prepare and src_configure
3048 for phase in phases:
3049 ebuild_phases.add(EbuildPhase(background=self.background,
3050 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3051 settings=self.settings, tree=self._tree))
3053 self._start_task(ebuild_phases, self._default_final_exit)
3055 class EbuildMetadataPhase(SubProcess):
3058 Asynchronous interface for the ebuild "depend" phase which is
3059 used to extract metadata from the ebuild.
3062 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3063 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3066 _file_names = ("ebuild",)
3067 _files_dict = slot_dict_class(_file_names, prefix="")
3071 settings = self.settings
3072 settings.setcpv(self.cpv)
3073 ebuild_path = self.ebuild_path
3076 if 'parse-eapi-glep-55' in settings.features:
3077 pf, eapi = portage._split_ebuild_name_glep55(
3078 os.path.basename(ebuild_path))
3079 if eapi is None and \
3080 'parse-eapi-ebuild-head' in settings.features:
3081 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3082 mode='r', encoding='utf_8', errors='replace'))
3084 if eapi is not None:
3085 if not portage.eapi_is_supported(eapi):
3086 self.metadata_callback(self.cpv, self.ebuild_path,
3087 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3088 self.returncode = os.EX_OK
3092 settings.configdict['pkg']['EAPI'] = eapi
3094 debug = settings.get("PORTAGE_DEBUG") == "1"
3098 if self.fd_pipes is not None:
3099 fd_pipes = self.fd_pipes.copy()
3103 fd_pipes.setdefault(0, sys.stdin.fileno())
3104 fd_pipes.setdefault(1, sys.stdout.fileno())
3105 fd_pipes.setdefault(2, sys.stderr.fileno())
3107 # flush any pending output
3108 for fd in fd_pipes.itervalues():
3109 if fd == sys.stdout.fileno():
3111 if fd == sys.stderr.fileno():
3114 fd_pipes_orig = fd_pipes.copy()
3115 self._files = self._files_dict()
3118 master_fd, slave_fd = os.pipe()
3119 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3120 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3122 fd_pipes[self._metadata_fd] = slave_fd
3124 self._raw_metadata = []
3125 files.ebuild = os.fdopen(master_fd, 'r')
3126 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3127 self._registered_events, self._output_handler)
3128 self._registered = True
3130 retval = portage.doebuild(ebuild_path, "depend",
3131 settings["ROOT"], settings, debug,
3132 mydbapi=self.portdb, tree="porttree",
3133 fd_pipes=fd_pipes, returnpid=True)
3137 if isinstance(retval, int):
3138 # doebuild failed before spawning
3140 self.returncode = retval
3144 self.pid = retval[0]
3145 portage.process.spawned_pids.remove(self.pid)
3147 def _output_handler(self, fd, event):
3149 if event & PollConstants.POLLIN:
3150 self._raw_metadata.append(self._files.ebuild.read())
3151 if not self._raw_metadata[-1]:
3155 self._unregister_if_appropriate(event)
3156 return self._registered
3158 def _set_returncode(self, wait_retval):
3159 SubProcess._set_returncode(self, wait_retval)
3160 if self.returncode == os.EX_OK:
3161 metadata_lines = "".join(self._raw_metadata).splitlines()
3162 if len(portage.auxdbkeys) != len(metadata_lines):
3163 # Don't trust bash's returncode if the
3164 # number of lines is incorrect.
3167 metadata = izip(portage.auxdbkeys, metadata_lines)
3168 self.metadata = self.metadata_callback(self.cpv,
3169 self.ebuild_path, self.repo_path, metadata,
3172 class EbuildProcess(SpawnProcess):
3174 __slots__ = ("phase", "pkg", "settings", "tree")
3177 # Don't open the log file during the clean phase since the
3178 # open file can result in an nfs lock on $T/build.log which
3179 # prevents the clean phase from removing $T.
3180 if self.phase not in ("clean", "cleanrm"):
3181 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3182 SpawnProcess._start(self)
3184 def _pipe(self, fd_pipes):
3185 stdout_pipe = fd_pipes.get(1)
3186 got_pty, master_fd, slave_fd = \
3187 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3188 return (master_fd, slave_fd)
3190 def _spawn(self, args, **kwargs):
3192 root_config = self.pkg.root_config
3194 mydbapi = root_config.trees[tree].dbapi
3195 settings = self.settings
3196 ebuild_path = settings["EBUILD"]
3197 debug = settings.get("PORTAGE_DEBUG") == "1"
3199 rval = portage.doebuild(ebuild_path, self.phase,
3200 root_config.root, settings, debug,
3201 mydbapi=mydbapi, tree=tree, **kwargs)
3205 def _set_returncode(self, wait_retval):
3206 SpawnProcess._set_returncode(self, wait_retval)
3208 if self.phase not in ("clean", "cleanrm"):
3209 self.returncode = portage._doebuild_exit_status_check_and_log(
3210 self.settings, self.phase, self.returncode)
3212 if self.phase == "test" and self.returncode != os.EX_OK and \
3213 "test-fail-continue" in self.settings.features:
3214 self.returncode = os.EX_OK
3216 portage._post_phase_userpriv_perms(self.settings)
3218 class EbuildPhase(CompositeTask):
3220 __slots__ = ("background", "pkg", "phase",
3221 "scheduler", "settings", "tree")
3223 _post_phase_cmds = portage._post_phase_cmds
3227 ebuild_process = EbuildProcess(background=self.background,
3228 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3229 settings=self.settings, tree=self.tree)
3231 self._start_task(ebuild_process, self._ebuild_exit)
3233 def _ebuild_exit(self, ebuild_process):
3235 if self.phase == "install":
3237 log_path = self.settings.get("PORTAGE_LOG_FILE")
3239 if self.background and log_path is not None:
3240 log_file = open(log_path, 'a')
3243 portage._check_build_log(self.settings, out=out)
3245 if log_file is not None:
3248 if self._default_exit(ebuild_process) != os.EX_OK:
3252 settings = self.settings
3254 if self.phase == "install":
3255 portage._post_src_install_chost_fix(settings)
3256 portage._post_src_install_uid_fix(settings)
3258 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3259 if post_phase_cmds is not None:
3260 post_phase = MiscFunctionsProcess(background=self.background,
3261 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3262 scheduler=self.scheduler, settings=settings)
3263 self._start_task(post_phase, self._post_phase_exit)
3266 self.returncode = ebuild_process.returncode
3267 self._current_task = None
3270 def _post_phase_exit(self, post_phase):
3271 if self._final_exit(post_phase) != os.EX_OK:
3272 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3274 self._current_task = None
3278 class EbuildBinpkg(EbuildProcess):
3280 This assumes that src_install() has successfully completed.
3282 __slots__ = ("_binpkg_tmpfile",)
3285 self.phase = "package"
3286 self.tree = "porttree"
3288 root_config = pkg.root_config
3289 portdb = root_config.trees["porttree"].dbapi
3290 bintree = root_config.trees["bintree"]
3291 ebuild_path = portdb.findname(self.pkg.cpv)
3292 settings = self.settings
3293 debug = settings.get("PORTAGE_DEBUG") == "1"
3295 bintree.prevent_collision(pkg.cpv)
3296 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3297 pkg.cpv + ".tbz2." + str(os.getpid()))
3298 self._binpkg_tmpfile = binpkg_tmpfile
3299 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3300 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3303 EbuildProcess._start(self)
3305 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3307 def _set_returncode(self, wait_retval):
3308 EbuildProcess._set_returncode(self, wait_retval)
3311 bintree = pkg.root_config.trees["bintree"]
3312 binpkg_tmpfile = self._binpkg_tmpfile
3313 if self.returncode == os.EX_OK:
3314 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3316 class EbuildMerge(SlotObject):
3318 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3319 "pkg", "pkg_count", "pkg_path", "pretend",
3320 "scheduler", "settings", "tree", "world_atom")
3323 root_config = self.pkg.root_config
3324 settings = self.settings
3325 retval = portage.merge(settings["CATEGORY"],
3326 settings["PF"], settings["D"],
3327 os.path.join(settings["PORTAGE_BUILDDIR"],
3328 "build-info"), root_config.root, settings,
3329 myebuild=settings["EBUILD"],
3330 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3331 vartree=root_config.trees["vartree"],
3332 prev_mtimes=self.ldpath_mtimes,
3333 scheduler=self.scheduler,
3334 blockers=self.find_blockers)
3336 if retval == os.EX_OK:
3337 self.world_atom(self.pkg)
3342 def _log_success(self):
3344 pkg_count = self.pkg_count
3345 pkg_path = self.pkg_path
3346 logger = self.logger
3347 if "noclean" not in self.settings.features:
3348 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3349 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3350 logger.log((" === (%s of %s) " + \
3351 "Post-Build Cleaning (%s::%s)") % \
3352 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3353 short_msg=short_msg)
3354 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3355 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3357 class PackageUninstall(AsynchronousTask):
3359 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3363 unmerge(self.pkg.root_config, self.opts, "unmerge",
3364 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3365 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3366 writemsg_level=self._writemsg_level)
3367 except UninstallFailure, e:
3368 self.returncode = e.status
3370 self.returncode = os.EX_OK
3373 def _writemsg_level(self, msg, level=0, noiselevel=0):
3375 log_path = self.settings.get("PORTAGE_LOG_FILE")
3376 background = self.background
3378 if log_path is None:
3379 if not (background and level < logging.WARNING):
3380 portage.util.writemsg_level(msg,
3381 level=level, noiselevel=noiselevel)
3384 portage.util.writemsg_level(msg,
3385 level=level, noiselevel=noiselevel)
3387 f = open(log_path, 'a')
3393 class Binpkg(CompositeTask):
3395 __slots__ = ("find_blockers",
3396 "ldpath_mtimes", "logger", "opts",
3397 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3398 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3399 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3401 def _writemsg_level(self, msg, level=0, noiselevel=0):
3403 if not self.background:
3404 portage.util.writemsg_level(msg,
3405 level=level, noiselevel=noiselevel)
3407 log_path = self.settings.get("PORTAGE_LOG_FILE")
3408 if log_path is not None:
3409 f = open(log_path, 'a')
3418 settings = self.settings
3419 settings.setcpv(pkg)
3420 self._tree = "bintree"
3421 self._bintree = self.pkg.root_config.trees[self._tree]
3422 self._verify = not self.opts.pretend
3424 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3425 "portage", pkg.category, pkg.pf)
3426 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3427 pkg=pkg, settings=settings)
3428 self._image_dir = os.path.join(dir_path, "image")
3429 self._infloc = os.path.join(dir_path, "build-info")
3430 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3431 settings["EBUILD"] = self._ebuild_path
3432 debug = settings.get("PORTAGE_DEBUG") == "1"
3433 portage.doebuild_environment(self._ebuild_path, "setup",
3434 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3435 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3437 # The prefetcher has already completed or it
3438 # could be running now. If it's running now,
3439 # wait for it to complete since it holds
3440 # a lock on the file being fetched. The
3441 # portage.locks functions are only designed
3442 # to work between separate processes. Since
3443 # the lock is held by the current process,
3444 # use the scheduler and fetcher methods to
3445 # synchronize with the fetcher.
3446 prefetcher = self.prefetcher
3447 if prefetcher is None:
3449 elif not prefetcher.isAlive():
3451 elif prefetcher.poll() is None:
3453 waiting_msg = ("Fetching '%s' " + \
3454 "in the background. " + \
3455 "To view fetch progress, run `tail -f " + \
3456 "/var/log/emerge-fetch.log` in another " + \
3457 "terminal.") % prefetcher.pkg_path
3458 msg_prefix = colorize("GOOD", " * ")
3459 from textwrap import wrap
3460 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3461 for line in wrap(waiting_msg, 65))
3462 if not self.background:
3463 writemsg(waiting_msg, noiselevel=-1)
3465 self._current_task = prefetcher
3466 prefetcher.addExitListener(self._prefetch_exit)
3469 self._prefetch_exit(prefetcher)
3471 def _prefetch_exit(self, prefetcher):
3474 pkg_count = self.pkg_count
3475 if not (self.opts.pretend or self.opts.fetchonly):
3476 self._build_dir.lock()
3477 # If necessary, discard old log so that we don't
3479 self._build_dir.clean_log()
3480 # Initialze PORTAGE_LOG_FILE.
3481 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3482 fetcher = BinpkgFetcher(background=self.background,
3483 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3484 pretend=self.opts.pretend, scheduler=self.scheduler)
3485 pkg_path = fetcher.pkg_path
3486 self._pkg_path = pkg_path
3488 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3490 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3491 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3492 short_msg = "emerge: (%s of %s) %s Fetch" % \
3493 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3494 self.logger.log(msg, short_msg=short_msg)
3495 self._start_task(fetcher, self._fetcher_exit)
3498 self._fetcher_exit(fetcher)
3500 def _fetcher_exit(self, fetcher):
3502 # The fetcher only has a returncode when
3503 # --getbinpkg is enabled.
3504 if fetcher.returncode is not None:
3505 self._fetched_pkg = True
3506 if self._default_exit(fetcher) != os.EX_OK:
3507 self._unlock_builddir()
3511 if self.opts.pretend:
3512 self._current_task = None
3513 self.returncode = os.EX_OK
3521 logfile = self.settings.get("PORTAGE_LOG_FILE")
3522 verifier = BinpkgVerifier(background=self.background,
3523 logfile=logfile, pkg=self.pkg)
3524 self._start_task(verifier, self._verifier_exit)
3527 self._verifier_exit(verifier)
3529 def _verifier_exit(self, verifier):
3530 if verifier is not None and \
3531 self._default_exit(verifier) != os.EX_OK:
3532 self._unlock_builddir()
3536 logger = self.logger
3538 pkg_count = self.pkg_count
3539 pkg_path = self._pkg_path
3541 if self._fetched_pkg:
3542 self._bintree.inject(pkg.cpv, filename=pkg_path)
3544 if self.opts.fetchonly:
3545 self._current_task = None
3546 self.returncode = os.EX_OK
3550 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3551 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3552 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3553 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3554 logger.log(msg, short_msg=short_msg)
3557 settings = self.settings
3558 ebuild_phase = EbuildPhase(background=self.background,
3559 pkg=pkg, phase=phase, scheduler=self.scheduler,
3560 settings=settings, tree=self._tree)
3562 self._start_task(ebuild_phase, self._clean_exit)
3564 def _clean_exit(self, clean_phase):
3565 if self._default_exit(clean_phase) != os.EX_OK:
3566 self._unlock_builddir()
3570 dir_path = self._build_dir.dir_path
3572 infloc = self._infloc
3574 pkg_path = self._pkg_path
3577 for mydir in (dir_path, self._image_dir, infloc):
3578 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3579 gid=portage.data.portage_gid, mode=dir_mode)
3581 # This initializes PORTAGE_LOG_FILE.
3582 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3583 self._writemsg_level(">>> Extracting info\n")
3585 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3586 check_missing_metadata = ("CATEGORY", "PF")
3587 missing_metadata = set()
3588 for k in check_missing_metadata:
3589 v = pkg_xpak.getfile(k)
3591 missing_metadata.add(k)
3593 pkg_xpak.unpackinfo(infloc)
3594 for k in missing_metadata:
3602 f = open(os.path.join(infloc, k), 'wb')
3608 # Store the md5sum in the vdb.
3609 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3611 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3615 # This gives bashrc users an opportunity to do various things
3616 # such as remove binary packages after they're installed.
3617 settings = self.settings
3618 settings.setcpv(self.pkg)
3619 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3620 settings.backup_changes("PORTAGE_BINPKG_FILE")
3623 setup_phase = EbuildPhase(background=self.background,
3624 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3625 settings=settings, tree=self._tree)
3627 setup_phase.addExitListener(self._setup_exit)
3628 self._current_task = setup_phase
3629 self.scheduler.scheduleSetup(setup_phase)
3631 def _setup_exit(self, setup_phase):
3632 if self._default_exit(setup_phase) != os.EX_OK:
3633 self._unlock_builddir()
3637 extractor = BinpkgExtractorAsync(background=self.background,
3638 image_dir=self._image_dir,
3639 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3640 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3641 self._start_task(extractor, self._extractor_exit)
3643 def _extractor_exit(self, extractor):
3644 if self._final_exit(extractor) != os.EX_OK:
3645 self._unlock_builddir()
3646 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3650 def _unlock_builddir(self):
3651 if self.opts.pretend or self.opts.fetchonly:
3653 portage.elog.elog_process(self.pkg.cpv, self.settings)
3654 self._build_dir.unlock()
3658 # This gives bashrc users an opportunity to do various things
3659 # such as remove binary packages after they're installed.
3660 settings = self.settings
3661 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3662 settings.backup_changes("PORTAGE_BINPKG_FILE")
3664 merge = EbuildMerge(find_blockers=self.find_blockers,
3665 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3666 pkg=self.pkg, pkg_count=self.pkg_count,
3667 pkg_path=self._pkg_path, scheduler=self.scheduler,
3668 settings=settings, tree=self._tree, world_atom=self.world_atom)
3671 retval = merge.execute()
3673 settings.pop("PORTAGE_BINPKG_FILE", None)
3674 self._unlock_builddir()
3677 class BinpkgFetcher(SpawnProcess):
3679 __slots__ = ("pkg", "pretend",
3680 "locked", "pkg_path", "_lock_obj")
3682 def __init__(self, **kwargs):
3683 SpawnProcess.__init__(self, **kwargs)
3685 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3693 pretend = self.pretend
3694 bintree = pkg.root_config.trees["bintree"]
3695 settings = bintree.settings
3696 use_locks = "distlocks" in settings.features
3697 pkg_path = self.pkg_path
3700 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3703 exists = os.path.exists(pkg_path)
3704 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3705 if not (pretend or resume):
3706 # Remove existing file or broken symlink.
3712 # urljoin doesn't work correctly with
3713 # unrecognized protocols like sftp
3714 if bintree._remote_has_index:
3715 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3717 rel_uri = pkg.cpv + ".tbz2"
3718 uri = bintree._remote_base_uri.rstrip("/") + \
3719 "/" + rel_uri.lstrip("/")
3721 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3722 "/" + pkg.pf + ".tbz2"
3725 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3726 self.returncode = os.EX_OK
3730 protocol = urlparse.urlparse(uri)[0]
3731 fcmd_prefix = "FETCHCOMMAND"
3733 fcmd_prefix = "RESUMECOMMAND"
3734 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3736 fcmd = settings.get(fcmd_prefix)
3739 "DISTDIR" : os.path.dirname(pkg_path),
3741 "FILE" : os.path.basename(pkg_path)
3744 fetch_env = dict(settings.iteritems())
3745 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3746 for x in shlex.split(fcmd)]
3748 if self.fd_pipes is None:
3750 fd_pipes = self.fd_pipes
3752 # Redirect all output to stdout since some fetchers like
3753 # wget pollute stderr (if portage detects a problem then it
3754 # can send it's own message to stderr).
3755 fd_pipes.setdefault(0, sys.stdin.fileno())
3756 fd_pipes.setdefault(1, sys.stdout.fileno())
3757 fd_pipes.setdefault(2, sys.stdout.fileno())
3759 self.args = fetch_args
3760 self.env = fetch_env
3761 SpawnProcess._start(self)
3763 def _set_returncode(self, wait_retval):
3764 SpawnProcess._set_returncode(self, wait_retval)
3765 if self.returncode == os.EX_OK:
3766 # If possible, update the mtime to match the remote package if
3767 # the fetcher didn't already do it automatically.
3768 bintree = self.pkg.root_config.trees["bintree"]
3769 if bintree._remote_has_index:
3770 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3771 if remote_mtime is not None:
3773 remote_mtime = long(remote_mtime)
3778 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3782 if remote_mtime != local_mtime:
3784 os.utime(self.pkg_path,
3785 (remote_mtime, remote_mtime))
3794 This raises an AlreadyLocked exception if lock() is called
3795 while a lock is already held. In order to avoid this, call
3796 unlock() or check whether the "locked" attribute is True
3797 or False before calling lock().
3799 if self._lock_obj is not None:
3800 raise self.AlreadyLocked((self._lock_obj,))
3802 self._lock_obj = portage.locks.lockfile(
3803 self.pkg_path, wantnewlockfile=1)
3806 class AlreadyLocked(portage.exception.PortageException):
3810 if self._lock_obj is None:
3812 portage.locks.unlockfile(self._lock_obj)
3813 self._lock_obj = None
3816 class BinpkgVerifier(AsynchronousTask):
3817 __slots__ = ("logfile", "pkg",)
3821 Note: Unlike a normal AsynchronousTask.start() method,
3822 this one does all work is synchronously. The returncode
3823 attribute will be set before it returns.
3827 root_config = pkg.root_config
3828 bintree = root_config.trees["bintree"]
3830 stdout_orig = sys.stdout
3831 stderr_orig = sys.stderr
3833 if self.background and self.logfile is not None:
3834 log_file = open(self.logfile, 'a')
3836 if log_file is not None:
3837 sys.stdout = log_file
3838 sys.stderr = log_file
3840 bintree.digestCheck(pkg)
3841 except portage.exception.FileNotFound:
3842 writemsg("!!! Fetching Binary failed " + \
3843 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3845 except portage.exception.DigestException, e:
3846 writemsg("\n!!! Digest verification failed:\n",
3848 writemsg("!!! %s\n" % e.value[0],
3850 writemsg("!!! Reason: %s\n" % e.value[1],
3852 writemsg("!!! Got: %s\n" % e.value[2],
3854 writemsg("!!! Expected: %s\n" % e.value[3],
3857 if rval != os.EX_OK:
3858 pkg_path = bintree.getname(pkg.cpv)
3859 head, tail = os.path.split(pkg_path)
3860 temp_filename = portage._checksum_failure_temp_file(head, tail)
3861 writemsg("File renamed to '%s'\n" % (temp_filename,),
3864 sys.stdout = stdout_orig
3865 sys.stderr = stderr_orig
3866 if log_file is not None:
3869 self.returncode = rval
3872 class BinpkgPrefetcher(CompositeTask):
3874 __slots__ = ("pkg",) + \
3875 ("pkg_path", "_bintree",)
3878 self._bintree = self.pkg.root_config.trees["bintree"]
3879 fetcher = BinpkgFetcher(background=self.background,
3880 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3881 scheduler=self.scheduler)
3882 self.pkg_path = fetcher.pkg_path
3883 self._start_task(fetcher, self._fetcher_exit)
3885 def _fetcher_exit(self, fetcher):
3887 if self._default_exit(fetcher) != os.EX_OK:
3891 verifier = BinpkgVerifier(background=self.background,
3892 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3893 self._start_task(verifier, self._verifier_exit)
3895 def _verifier_exit(self, verifier):
3896 if self._default_exit(verifier) != os.EX_OK:
3900 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3902 self._current_task = None
3903 self.returncode = os.EX_OK
3906 class BinpkgExtractorAsync(SpawnProcess):
3908 __slots__ = ("image_dir", "pkg", "pkg_path")
3910 _shell_binary = portage.const.BASH_BINARY
3913 self.args = [self._shell_binary, "-c",
3914 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3915 (portage._shell_quote(self.pkg_path),
3916 portage._shell_quote(self.image_dir))]
3918 self.env = self.pkg.root_config.settings.environ()
3919 SpawnProcess._start(self)
3921 class MergeListItem(CompositeTask):
3924 TODO: For parallel scheduling, everything here needs asynchronous
3925 execution support (start, poll, and wait methods).
3928 __slots__ = ("args_set",
3929 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3930 "find_blockers", "logger", "mtimedb", "pkg",
3931 "pkg_count", "pkg_to_replace", "prefetcher",
3932 "settings", "statusMessage", "world_atom") + \
3938 build_opts = self.build_opts
3941 # uninstall, executed by self.merge()
3942 self.returncode = os.EX_OK
3946 args_set = self.args_set
3947 find_blockers = self.find_blockers
3948 logger = self.logger
3949 mtimedb = self.mtimedb
3950 pkg_count = self.pkg_count
3951 scheduler = self.scheduler
3952 settings = self.settings
3953 world_atom = self.world_atom
3954 ldpath_mtimes = mtimedb["ldpath"]
3956 action_desc = "Emerging"
3958 if pkg.type_name == "binary":
3959 action_desc += " binary"
3961 if build_opts.fetchonly:
3962 action_desc = "Fetching"
3964 msg = "%s (%s of %s) %s" % \
3966 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3967 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3968 colorize("GOOD", pkg.cpv))
3970 portdb = pkg.root_config.trees["porttree"].dbapi
3971 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3972 if portdir_repo_name:
3973 pkg_repo_name = pkg.metadata.get("repository")
3974 if pkg_repo_name != portdir_repo_name:
3975 if not pkg_repo_name:
3976 pkg_repo_name = "unknown repo"
3977 msg += " from %s" % pkg_repo_name
3980 msg += " %s %s" % (preposition, pkg.root)
3982 if not build_opts.pretend:
3983 self.statusMessage(msg)
3984 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3985 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3987 if pkg.type_name == "ebuild":
3989 build = EbuildBuild(args_set=args_set,
3990 background=self.background,
3991 config_pool=self.config_pool,
3992 find_blockers=find_blockers,
3993 ldpath_mtimes=ldpath_mtimes, logger=logger,
3994 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3995 prefetcher=self.prefetcher, scheduler=scheduler,
3996 settings=settings, world_atom=world_atom)
3998 self._install_task = build
3999 self._start_task(build, self._default_final_exit)
4002 elif pkg.type_name == "binary":
4004 binpkg = Binpkg(background=self.background,
4005 find_blockers=find_blockers,
4006 ldpath_mtimes=ldpath_mtimes, logger=logger,
4007 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4008 prefetcher=self.prefetcher, settings=settings,
4009 scheduler=scheduler, world_atom=world_atom)
4011 self._install_task = binpkg
4012 self._start_task(binpkg, self._default_final_exit)
4016 self._install_task.poll()
4017 return self.returncode
4020 self._install_task.wait()
4021 return self.returncode
4026 build_opts = self.build_opts
4027 find_blockers = self.find_blockers
4028 logger = self.logger
4029 mtimedb = self.mtimedb
4030 pkg_count = self.pkg_count
4031 prefetcher = self.prefetcher
4032 scheduler = self.scheduler
4033 settings = self.settings
4034 world_atom = self.world_atom
4035 ldpath_mtimes = mtimedb["ldpath"]
4038 if not (build_opts.buildpkgonly or \
4039 build_opts.fetchonly or build_opts.pretend):
4041 uninstall = PackageUninstall(background=self.background,
4042 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4043 pkg=pkg, scheduler=scheduler, settings=settings)
4046 retval = uninstall.wait()
4047 if retval != os.EX_OK:
4051 if build_opts.fetchonly or \
4052 build_opts.buildpkgonly:
4053 return self.returncode
4055 retval = self._install_task.install()
4058 class PackageMerge(AsynchronousTask):
4060 TODO: Implement asynchronous merge so that the scheduler can
4061 run while a merge is executing.
4064 __slots__ = ("merge",)
4068 pkg = self.merge.pkg
4069 pkg_count = self.merge.pkg_count
4072 action_desc = "Uninstalling"
4073 preposition = "from"
4076 action_desc = "Installing"
4078 counter_str = "(%s of %s) " % \
4079 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4080 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4085 colorize("GOOD", pkg.cpv))
4088 msg += " %s %s" % (preposition, pkg.root)
4090 if not self.merge.build_opts.fetchonly and \
4091 not self.merge.build_opts.pretend and \
4092 not self.merge.build_opts.buildpkgonly:
4093 self.merge.statusMessage(msg)
4095 self.returncode = self.merge.merge()
4098 class DependencyArg(object):
4099 def __init__(self, arg=None, root_config=None):
4101 self.root_config = root_config
4104 return str(self.arg)
4106 class AtomArg(DependencyArg):
4107 def __init__(self, atom=None, **kwargs):
4108 DependencyArg.__init__(self, **kwargs)
4110 if not isinstance(self.atom, portage.dep.Atom):
4111 self.atom = portage.dep.Atom(self.atom)
4112 self.set = (self.atom, )
4114 class PackageArg(DependencyArg):
4115 def __init__(self, package=None, **kwargs):
4116 DependencyArg.__init__(self, **kwargs)
4117 self.package = package
4118 self.atom = portage.dep.Atom("=" + package.cpv)
4119 self.set = (self.atom, )
4121 class SetArg(DependencyArg):
4122 def __init__(self, set=None, **kwargs):
4123 DependencyArg.__init__(self, **kwargs)
4125 self.name = self.arg[len(SETPREFIX):]
4127 class Dependency(SlotObject):
4128 __slots__ = ("atom", "blocker", "depth",
4129 "parent", "onlydeps", "priority", "root")
4130 def __init__(self, **kwargs):
4131 SlotObject.__init__(self, **kwargs)
4132 if self.priority is None:
4133 self.priority = DepPriority()
4134 if self.depth is None:
4137 class BlockerCache(portage.cache.mappings.MutableMapping):
4138 """This caches blockers of installed packages so that dep_check does not
4139 have to be done for every single installed package on every invocation of
4140 emerge. The cache is invalidated whenever it is detected that something
4141 has changed that might alter the results of dep_check() calls:
4142 1) the set of installed packages (including COUNTER) has changed
4143 2) the old-style virtuals have changed
4146 # Number of uncached packages to trigger cache update, since
4147 # it's wasteful to update it for every vdb change.
4148 _cache_threshold = 5
4150 class BlockerData(object):
4152 __slots__ = ("__weakref__", "atoms", "counter")
4154 def __init__(self, counter, atoms):
4155 self.counter = counter
4158 def __init__(self, myroot, vardb):
4160 self._virtuals = vardb.settings.getvirtuals()
4161 self._cache_filename = os.path.join(myroot,
4162 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4163 self._cache_version = "1"
4164 self._cache_data = None
4165 self._modified = set()
4170 f = open(self._cache_filename, mode='rb')
4171 mypickle = pickle.Unpickler(f)
4173 mypickle.find_global = None
4174 except AttributeError:
4175 # TODO: If py3k, override Unpickler.find_class().
4177 self._cache_data = mypickle.load()
4180 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4181 if isinstance(e, pickle.UnpicklingError):
4182 writemsg("!!! Error loading '%s': %s\n" % \
4183 (self._cache_filename, str(e)), noiselevel=-1)
4186 cache_valid = self._cache_data and \
4187 isinstance(self._cache_data, dict) and \
4188 self._cache_data.get("version") == self._cache_version and \
4189 isinstance(self._cache_data.get("blockers"), dict)
4191 # Validate all the atoms and counters so that
4192 # corruption is detected as soon as possible.
4193 invalid_items = set()
4194 for k, v in self._cache_data["blockers"].iteritems():
4195 if not isinstance(k, basestring):
4196 invalid_items.add(k)
4199 if portage.catpkgsplit(k) is None:
4200 invalid_items.add(k)
4202 except portage.exception.InvalidData:
4203 invalid_items.add(k)
4205 if not isinstance(v, tuple) or \
4207 invalid_items.add(k)
4210 if not isinstance(counter, (int, long)):
4211 invalid_items.add(k)
4213 if not isinstance(atoms, (list, tuple)):
4214 invalid_items.add(k)
4216 invalid_atom = False
4218 if not isinstance(atom, basestring):
4221 if atom[:1] != "!" or \
4222 not portage.isvalidatom(
4223 atom, allow_blockers=True):
4227 invalid_items.add(k)
4230 for k in invalid_items:
4231 del self._cache_data["blockers"][k]
4232 if not self._cache_data["blockers"]:
4236 self._cache_data = {"version":self._cache_version}
4237 self._cache_data["blockers"] = {}
4238 self._cache_data["virtuals"] = self._virtuals
4239 self._modified.clear()
4242 """If the current user has permission and the internal blocker cache
4243 been updated, save it to disk and mark it unmodified. This is called
4244 by emerge after it has proccessed blockers for all installed packages.
4245 Currently, the cache is only written if the user has superuser
4246 privileges (since that's required to obtain a lock), but all users
4247 have read access and benefit from faster blocker lookups (as long as
4248 the entire cache is still valid). The cache is stored as a pickled
4249 dict object with the following format:
4253 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4254 "virtuals" : vardb.settings.getvirtuals()
4257 if len(self._modified) >= self._cache_threshold and \
4260 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4261 pickle.dump(self._cache_data, f, protocol=2)
4263 portage.util.apply_secpass_permissions(
4264 self._cache_filename, gid=portage.portage_gid, mode=0644)
4265 except (IOError, OSError), e:
4267 self._modified.clear()
4269 def __setitem__(self, cpv, blocker_data):
4271 Update the cache and mark it as modified for a future call to
4274 @param cpv: Package for which to cache blockers.
4276 @param blocker_data: An object with counter and atoms attributes.
4277 @type blocker_data: BlockerData
4279 self._cache_data["blockers"][cpv] = \
4280 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4281 self._modified.add(cpv)
4284 if self._cache_data is None:
4285 # triggered by python-trace
4287 return iter(self._cache_data["blockers"])
4289 def __delitem__(self, cpv):
4290 del self._cache_data["blockers"][cpv]
4292 def __getitem__(self, cpv):
4295 @returns: An object with counter and atoms attributes.
4297 return self.BlockerData(*self._cache_data["blockers"][cpv])
4299 class BlockerDB(object):
4301 def __init__(self, root_config):
4302 self._root_config = root_config
4303 self._vartree = root_config.trees["vartree"]
4304 self._portdb = root_config.trees["porttree"].dbapi
4306 self._dep_check_trees = None
4307 self._fake_vartree = None
4309 def _get_fake_vartree(self, acquire_lock=0):
4310 fake_vartree = self._fake_vartree
4311 if fake_vartree is None:
4312 fake_vartree = FakeVartree(self._root_config,
4313 acquire_lock=acquire_lock)
4314 self._fake_vartree = fake_vartree
4315 self._dep_check_trees = { self._vartree.root : {
4316 "porttree" : fake_vartree,
4317 "vartree" : fake_vartree,
4320 fake_vartree.sync(acquire_lock=acquire_lock)
4323 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4324 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4325 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4326 settings = self._vartree.settings
4327 stale_cache = set(blocker_cache)
4328 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4329 dep_check_trees = self._dep_check_trees
4330 vardb = fake_vartree.dbapi
4331 installed_pkgs = list(vardb)
4333 for inst_pkg in installed_pkgs:
4334 stale_cache.discard(inst_pkg.cpv)
4335 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4336 if cached_blockers is not None and \
4337 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4338 cached_blockers = None
4339 if cached_blockers is not None:
4340 blocker_atoms = cached_blockers.atoms
4342 # Use aux_get() to trigger FakeVartree global
4343 # updates on *DEPEND when appropriate.
4344 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4346 portage.dep._dep_check_strict = False
4347 success, atoms = portage.dep_check(depstr,
4348 vardb, settings, myuse=inst_pkg.use.enabled,
4349 trees=dep_check_trees, myroot=inst_pkg.root)
4351 portage.dep._dep_check_strict = True
4353 pkg_location = os.path.join(inst_pkg.root,
4354 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4355 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4356 (pkg_location, atoms), noiselevel=-1)
4359 blocker_atoms = [atom for atom in atoms \
4360 if atom.startswith("!")]
4361 blocker_atoms.sort()
4362 counter = long(inst_pkg.metadata["COUNTER"])
4363 blocker_cache[inst_pkg.cpv] = \
4364 blocker_cache.BlockerData(counter, blocker_atoms)
4365 for cpv in stale_cache:
4366 del blocker_cache[cpv]
4367 blocker_cache.flush()
4369 blocker_parents = digraph()
4371 for pkg in installed_pkgs:
4372 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4373 blocker_atom = blocker_atom.lstrip("!")
4374 blocker_atoms.append(blocker_atom)
4375 blocker_parents.add(blocker_atom, pkg)
4377 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4378 blocking_pkgs = set()
4379 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4380 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4382 # Check for blockers in the other direction.
4383 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4385 portage.dep._dep_check_strict = False
4386 success, atoms = portage.dep_check(depstr,
4387 vardb, settings, myuse=new_pkg.use.enabled,
4388 trees=dep_check_trees, myroot=new_pkg.root)
4390 portage.dep._dep_check_strict = True
4392 # We should never get this far with invalid deps.
4393 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4396 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4399 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4400 for inst_pkg in installed_pkgs:
4402 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4403 except (portage.exception.InvalidDependString, StopIteration):
4405 blocking_pkgs.add(inst_pkg)
4407 return blocking_pkgs
4409 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4411 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4412 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4413 p_type, p_root, p_key, p_status = parent_node
4415 if p_status == "nomerge":
4416 category, pf = portage.catsplit(p_key)
4417 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4418 msg.append("Portage is unable to process the dependencies of the ")
4419 msg.append("'%s' package. " % p_key)
4420 msg.append("In order to correct this problem, the package ")
4421 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4422 msg.append("As a temporary workaround, the --nodeps option can ")
4423 msg.append("be used to ignore all dependencies. For reference, ")
4424 msg.append("the problematic dependencies can be found in the ")
4425 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4427 msg.append("This package can not be installed. ")
4428 msg.append("Please notify the '%s' package maintainer " % p_key)
4429 msg.append("about this problem.")
4431 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4432 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4434 class PackageVirtualDbapi(portage.dbapi):
4436 A dbapi-like interface class that represents the state of the installed
4437 package database as new packages are installed, replacing any packages
4438 that previously existed in the same slot. The main difference between
4439 this class and fakedbapi is that this one uses Package instances
4440 internally (passed in via cpv_inject() and cpv_remove() calls).
4442 def __init__(self, settings):
4443 portage.dbapi.__init__(self)
4444 self.settings = settings
4445 self._match_cache = {}
4451 Remove all packages.
4455 self._cp_map.clear()
4456 self._cpv_map.clear()
4459 obj = PackageVirtualDbapi(self.settings)
4460 obj._match_cache = self._match_cache.copy()
4461 obj._cp_map = self._cp_map.copy()
4462 for k, v in obj._cp_map.iteritems():
4463 obj._cp_map[k] = v[:]
4464 obj._cpv_map = self._cpv_map.copy()
4468 return self._cpv_map.itervalues()
4470 def __contains__(self, item):
4471 existing = self._cpv_map.get(item.cpv)
4472 if existing is not None and \
4477 def get(self, item, default=None):
4478 cpv = getattr(item, "cpv", None)
4482 type_name, root, cpv, operation = item
4484 existing = self._cpv_map.get(cpv)
4485 if existing is not None and \
4490 def match_pkgs(self, atom):
4491 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4493 def _clear_cache(self):
4494 if self._categories is not None:
4495 self._categories = None
4496 if self._match_cache:
4497 self._match_cache = {}
4499 def match(self, origdep, use_cache=1):
4500 result = self._match_cache.get(origdep)
4501 if result is not None:
4503 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4504 self._match_cache[origdep] = result
4507 def cpv_exists(self, cpv):
4508 return cpv in self._cpv_map
4510 def cp_list(self, mycp, use_cache=1):
4511 cachelist = self._match_cache.get(mycp)
4512 # cp_list() doesn't expand old-style virtuals
4513 if cachelist and cachelist[0].startswith(mycp):
4515 cpv_list = self._cp_map.get(mycp)
4516 if cpv_list is None:
4519 cpv_list = [pkg.cpv for pkg in cpv_list]
4520 self._cpv_sort_ascending(cpv_list)
4521 if not (not cpv_list and mycp.startswith("virtual/")):
4522 self._match_cache[mycp] = cpv_list
4526 return list(self._cp_map)
4529 return list(self._cpv_map)
4531 def cpv_inject(self, pkg):
4532 cp_list = self._cp_map.get(pkg.cp)
4535 self._cp_map[pkg.cp] = cp_list
4536 e_pkg = self._cpv_map.get(pkg.cpv)
4537 if e_pkg is not None:
4540 self.cpv_remove(e_pkg)
4541 for e_pkg in cp_list:
4542 if e_pkg.slot_atom == pkg.slot_atom:
4545 self.cpv_remove(e_pkg)
4548 self._cpv_map[pkg.cpv] = pkg
4551 def cpv_remove(self, pkg):
4552 old_pkg = self._cpv_map.get(pkg.cpv)
4555 self._cp_map[pkg.cp].remove(pkg)
4556 del self._cpv_map[pkg.cpv]
4559 def aux_get(self, cpv, wants):
4560 metadata = self._cpv_map[cpv].metadata
4561 return [metadata.get(x, "") for x in wants]
4563 def aux_update(self, cpv, values):
4564 self._cpv_map[cpv].metadata.update(values)
4567 class depgraph(object):
4569 pkg_tree_map = RootConfig.pkg_tree_map
4571 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4573 def __init__(self, settings, trees, myopts, myparams, spinner):
4574 self.settings = settings
4575 self.target_root = settings["ROOT"]
4576 self.myopts = myopts
4577 self.myparams = myparams
4579 if settings.get("PORTAGE_DEBUG", "") == "1":
4581 self.spinner = spinner
4582 self._running_root = trees["/"]["root_config"]
4583 self._opts_no_restart = Scheduler._opts_no_restart
4584 self.pkgsettings = {}
4585 # Maps slot atom to package for each Package added to the graph.
4586 self._slot_pkg_map = {}
4587 # Maps nodes to the reasons they were selected for reinstallation.
4588 self._reinstall_nodes = {}
4591 self._trees_orig = trees
4593 # Contains a filtered view of preferred packages that are selected
4594 # from available repositories.
4595 self._filtered_trees = {}
4596 # Contains installed packages and new packages that have been added
4598 self._graph_trees = {}
4599 # All Package instances
4600 self._pkg_cache = {}
4601 for myroot in trees:
4602 self.trees[myroot] = {}
4603 # Create a RootConfig instance that references
4604 # the FakeVartree instead of the real one.
4605 self.roots[myroot] = RootConfig(
4606 trees[myroot]["vartree"].settings,
4608 trees[myroot]["root_config"].setconfig)
4609 for tree in ("porttree", "bintree"):
4610 self.trees[myroot][tree] = trees[myroot][tree]
4611 self.trees[myroot]["vartree"] = \
4612 FakeVartree(trees[myroot]["root_config"],
4613 pkg_cache=self._pkg_cache)
4614 self.pkgsettings[myroot] = portage.config(
4615 clone=self.trees[myroot]["vartree"].settings)
4616 self._slot_pkg_map[myroot] = {}
4617 vardb = self.trees[myroot]["vartree"].dbapi
4618 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4619 "--buildpkgonly" not in self.myopts
4620 # This fakedbapi instance will model the state that the vdb will
4621 # have after new packages have been installed.
4622 fakedb = PackageVirtualDbapi(vardb.settings)
4623 if preload_installed_pkgs:
4625 self.spinner.update()
4626 # This triggers metadata updates via FakeVartree.
4627 vardb.aux_get(pkg.cpv, [])
4628 fakedb.cpv_inject(pkg)
4630 # Now that the vardb state is cached in our FakeVartree,
4631 # we won't be needing the real vartree cache for awhile.
4632 # To make some room on the heap, clear the vardbapi
4634 trees[myroot]["vartree"].dbapi._clear_cache()
4637 self.mydbapi[myroot] = fakedb
4640 graph_tree.dbapi = fakedb
4641 self._graph_trees[myroot] = {}
4642 self._filtered_trees[myroot] = {}
4643 # Substitute the graph tree for the vartree in dep_check() since we
4644 # want atom selections to be consistent with package selections
4645 # have already been made.
4646 self._graph_trees[myroot]["porttree"] = graph_tree
4647 self._graph_trees[myroot]["vartree"] = graph_tree
4648 def filtered_tree():
4650 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4651 self._filtered_trees[myroot]["porttree"] = filtered_tree
4653 # Passing in graph_tree as the vartree here could lead to better
4654 # atom selections in some cases by causing atoms for packages that
4655 # have been added to the graph to be preferred over other choices.
4656 # However, it can trigger atom selections that result in
4657 # unresolvable direct circular dependencies. For example, this
4658 # happens with gwydion-dylan which depends on either itself or
4659 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4660 # gwydion-dylan-bin needs to be selected in order to avoid a
4661 # an unresolvable direct circular dependency.
4663 # To solve the problem described above, pass in "graph_db" so that
4664 # packages that have been added to the graph are distinguishable
4665 # from other available packages and installed packages. Also, pass
4666 # the parent package into self._select_atoms() calls so that
4667 # unresolvable direct circular dependencies can be detected and
4668 # avoided when possible.
4669 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4670 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4673 portdb = self.trees[myroot]["porttree"].dbapi
4674 bindb = self.trees[myroot]["bintree"].dbapi
4675 vardb = self.trees[myroot]["vartree"].dbapi
4676 # (db, pkg_type, built, installed, db_keys)
4677 if "--usepkgonly" not in self.myopts:
4678 db_keys = list(portdb._aux_cache_keys)
4679 dbs.append((portdb, "ebuild", False, False, db_keys))
4680 if "--usepkg" in self.myopts:
4681 db_keys = list(bindb._aux_cache_keys)
4682 dbs.append((bindb, "binary", True, False, db_keys))
4683 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4684 dbs.append((vardb, "installed", True, True, db_keys))
4685 self._filtered_trees[myroot]["dbs"] = dbs
4686 if "--usepkg" in self.myopts:
4687 self.trees[myroot]["bintree"].populate(
4688 "--getbinpkg" in self.myopts,
4689 "--getbinpkgonly" in self.myopts)
4692 self.digraph=portage.digraph()
4693 # contains all sets added to the graph
4695 # contains atoms given as arguments
4696 self._sets["args"] = InternalPackageSet()
4697 # contains all atoms from all sets added to the graph, including
4698 # atoms given as arguments
4699 self._set_atoms = InternalPackageSet()
4700 self._atom_arg_map = {}
4701 # contains all nodes pulled in by self._set_atoms
4702 self._set_nodes = set()
4703 # Contains only Blocker -> Uninstall edges
4704 self._blocker_uninstalls = digraph()
4705 # Contains only Package -> Blocker edges
4706 self._blocker_parents = digraph()
4707 # Contains only irrelevant Package -> Blocker edges
4708 self._irrelevant_blockers = digraph()
4709 # Contains only unsolvable Package -> Blocker edges
4710 self._unsolvable_blockers = digraph()
4711 # Contains all Blocker -> Blocked Package edges
4712 self._blocked_pkgs = digraph()
4713 # Contains world packages that have been protected from
4714 # uninstallation but may not have been added to the graph
4715 # if the graph is not complete yet.
4716 self._blocked_world_pkgs = {}
4717 self._slot_collision_info = {}
4718 # Slot collision nodes are not allowed to block other packages since
4719 # blocker validation is only able to account for one package per slot.
4720 self._slot_collision_nodes = set()
4721 self._parent_atoms = {}
4722 self._slot_conflict_parent_atoms = set()
4723 self._serialized_tasks_cache = None
4724 self._scheduler_graph = None
4725 self._displayed_list = None
4726 self._pprovided_args = []
4727 self._missing_args = []
4728 self._masked_installed = set()
4729 self._unsatisfied_deps_for_display = []
4730 self._unsatisfied_blockers_for_display = None
4731 self._circular_deps_for_display = None
4732 self._dep_stack = []
4733 self._unsatisfied_deps = []
4734 self._initially_unsatisfied_deps = []
4735 self._ignored_deps = []
4736 self._required_set_names = set(["system", "world"])
4737 self._select_atoms = self._select_atoms_highest_available
4738 self._select_package = self._select_pkg_highest_available
4739 self._highest_pkg_cache = {}
4741 def _show_slot_collision_notice(self):
4742 """Show an informational message advising the user to mask one of the
4743 the packages. In some cases it may be possible to resolve this
4744 automatically, but support for backtracking (removal nodes that have
4745 already been selected) will be required in order to handle all possible
4749 if not self._slot_collision_info:
4752 self._show_merge_list()
4755 msg.append("\n!!! Multiple package instances within a single " + \
4756 "package slot have been pulled\n")
4757 msg.append("!!! into the dependency graph, resulting" + \
4758 " in a slot conflict:\n\n")
4760 # Max number of parents shown, to avoid flooding the display.
4762 explanation_columns = 70
4764 for (slot_atom, root), slot_nodes \
4765 in self._slot_collision_info.iteritems():
4766 msg.append(str(slot_atom))
4769 for node in slot_nodes:
4771 msg.append(str(node))
4772 parent_atoms = self._parent_atoms.get(node)
4775 # Prefer conflict atoms over others.
4776 for parent_atom in parent_atoms:
4777 if len(pruned_list) >= max_parents:
4779 if parent_atom in self._slot_conflict_parent_atoms:
4780 pruned_list.add(parent_atom)
4782 # If this package was pulled in by conflict atoms then
4783 # show those alone since those are the most interesting.
4785 # When generating the pruned list, prefer instances
4786 # of DependencyArg over instances of Package.
4787 for parent_atom in parent_atoms:
4788 if len(pruned_list) >= max_parents:
4790 parent, atom = parent_atom
4791 if isinstance(parent, DependencyArg):
4792 pruned_list.add(parent_atom)
4793 # Prefer Packages instances that themselves have been
4794 # pulled into collision slots.
4795 for parent_atom in parent_atoms:
4796 if len(pruned_list) >= max_parents:
4798 parent, atom = parent_atom
4799 if isinstance(parent, Package) and \
4800 (parent.slot_atom, parent.root) \
4801 in self._slot_collision_info:
4802 pruned_list.add(parent_atom)
4803 for parent_atom in parent_atoms:
4804 if len(pruned_list) >= max_parents:
4806 pruned_list.add(parent_atom)
4807 omitted_parents = len(parent_atoms) - len(pruned_list)
4808 parent_atoms = pruned_list
4809 msg.append(" pulled in by\n")
4810 for parent_atom in parent_atoms:
4811 parent, atom = parent_atom
4812 msg.append(2*indent)
4813 if isinstance(parent,
4814 (PackageArg, AtomArg)):
4815 # For PackageArg and AtomArg types, it's
4816 # redundant to display the atom attribute.
4817 msg.append(str(parent))
4819 # Display the specific atom from SetArg or
4821 msg.append("%s required by %s" % (atom, parent))
4824 msg.append(2*indent)
4825 msg.append("(and %d more)\n" % omitted_parents)
4827 msg.append(" (no parents)\n")
4829 explanation = self._slot_conflict_explanation(slot_nodes)
4832 msg.append(indent + "Explanation:\n\n")
4833 for line in textwrap.wrap(explanation, explanation_columns):
4834 msg.append(2*indent + line + "\n")
4837 sys.stderr.write("".join(msg))
4840 explanations_for_all = explanations == len(self._slot_collision_info)
4842 if explanations_for_all or "--quiet" in self.myopts:
4846 msg.append("It may be possible to solve this problem ")
4847 msg.append("by using package.mask to prevent one of ")
4848 msg.append("those packages from being selected. ")
4849 msg.append("However, it is also possible that conflicting ")
4850 msg.append("dependencies exist such that they are impossible to ")
4851 msg.append("satisfy simultaneously. If such a conflict exists in ")
4852 msg.append("the dependencies of two different packages, then those ")
4853 msg.append("packages can not be installed simultaneously.")
4855 from formatter import AbstractFormatter, DumbWriter
4856 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4858 f.add_flowing_data(x)
4862 msg.append("For more information, see MASKED PACKAGES ")
4863 msg.append("section in the emerge man page or refer ")
4864 msg.append("to the Gentoo Handbook.")
4866 f.add_flowing_data(x)
4870 def _slot_conflict_explanation(self, slot_nodes):
4872 When a slot conflict occurs due to USE deps, there are a few
4873 different cases to consider:
4875 1) New USE are correctly set but --newuse wasn't requested so an
4876 installed package with incorrect USE happened to get pulled
4877 into graph before the new one.
4879 2) New USE are incorrectly set but an installed package has correct
4880 USE so it got pulled into the graph, and a new instance also got
4881 pulled in due to --newuse or an upgrade.
4883 3) Multiple USE deps exist that can't be satisfied simultaneously,
4884 and multiple package instances got pulled into the same slot to
4885 satisfy the conflicting deps.
4887 Currently, explanations and suggested courses of action are generated
4888 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4891 if len(slot_nodes) != 2:
4892 # Suggestions are only implemented for
4893 # conflicts between two packages.
4896 all_conflict_atoms = self._slot_conflict_parent_atoms
4898 matched_atoms = None
4899 unmatched_node = None
4900 for node in slot_nodes:
4901 parent_atoms = self._parent_atoms.get(node)
4902 if not parent_atoms:
4903 # Normally, there are always parent atoms. If there are
4904 # none then something unexpected is happening and there's
4905 # currently no suggestion for this case.
4907 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4908 for parent_atom in conflict_atoms:
4909 parent, atom = parent_atom
4911 # Suggestions are currently only implemented for cases
4912 # in which all conflict atoms have USE deps.
4915 if matched_node is not None:
4916 # If conflict atoms match multiple nodes
4917 # then there's no suggestion.
4920 matched_atoms = conflict_atoms
4922 if unmatched_node is not None:
4923 # Neither node is matched by conflict atoms, and
4924 # there is no suggestion for this case.
4926 unmatched_node = node
4928 if matched_node is None or unmatched_node is None:
4929 # This shouldn't happen.
4932 if unmatched_node.installed and not matched_node.installed and \
4933 unmatched_node.cpv == matched_node.cpv:
4934 # If the conflicting packages are the same version then
4935 # --newuse should be all that's needed. If they are different
4936 # versions then there's some other problem.
4937 return "New USE are correctly set, but --newuse wasn't" + \
4938 " requested, so an installed package with incorrect USE " + \
4939 "happened to get pulled into the dependency graph. " + \
4940 "In order to solve " + \
4941 "this, either specify the --newuse option or explicitly " + \
4942 " reinstall '%s'." % matched_node.slot_atom
4944 if matched_node.installed and not unmatched_node.installed:
4945 atoms = sorted(set(atom for parent, atom in matched_atoms))
4946 explanation = ("New USE for '%s' are incorrectly set. " + \
4947 "In order to solve this, adjust USE to satisfy '%s'") % \
4948 (matched_node.slot_atom, atoms[0])
4950 for atom in atoms[1:-1]:
4951 explanation += ", '%s'" % (atom,)
4954 explanation += " and '%s'" % (atoms[-1],)
4960 def _process_slot_conflicts(self):
4962 Process slot conflict data to identify specific atoms which
4963 lead to conflict. These atoms only match a subset of the
4964 packages that have been pulled into a given slot.
4966 for (slot_atom, root), slot_nodes \
4967 in self._slot_collision_info.iteritems():
4969 all_parent_atoms = set()
4970 for pkg in slot_nodes:
4971 parent_atoms = self._parent_atoms.get(pkg)
4972 if not parent_atoms:
4974 all_parent_atoms.update(parent_atoms)
4976 for pkg in slot_nodes:
4977 parent_atoms = self._parent_atoms.get(pkg)
4978 if parent_atoms is None:
4979 parent_atoms = set()
4980 self._parent_atoms[pkg] = parent_atoms
4981 for parent_atom in all_parent_atoms:
4982 if parent_atom in parent_atoms:
4984 # Use package set for matching since it will match via
4985 # PROVIDE when necessary, while match_from_list does not.
4986 parent, atom = parent_atom
4987 atom_set = InternalPackageSet(
4988 initial_atoms=(atom,))
4989 if atom_set.findAtomForPackage(pkg):
4990 parent_atoms.add(parent_atom)
4992 self._slot_conflict_parent_atoms.add(parent_atom)
4994 def _reinstall_for_flags(self, forced_flags,
4995 orig_use, orig_iuse, cur_use, cur_iuse):
4996 """Return a set of flags that trigger reinstallation, or None if there
4997 are no such flags."""
4998 if "--newuse" in self.myopts:
4999 flags = set(orig_iuse.symmetric_difference(
5000 cur_iuse).difference(forced_flags))
5001 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5002 cur_iuse.intersection(cur_use)))
5005 elif "changed-use" == self.myopts.get("--reinstall"):
5006 flags = orig_iuse.intersection(orig_use).symmetric_difference(
5007 cur_iuse.intersection(cur_use))
5012 def _create_graph(self, allow_unsatisfied=False):
5013 dep_stack = self._dep_stack
5015 self.spinner.update()
5016 dep = dep_stack.pop()
5017 if isinstance(dep, Package):
5018 if not self._add_pkg_deps(dep,
5019 allow_unsatisfied=allow_unsatisfied):
5022 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5026 def _add_dep(self, dep, allow_unsatisfied=False):
5027 debug = "--debug" in self.myopts
5028 buildpkgonly = "--buildpkgonly" in self.myopts
5029 nodeps = "--nodeps" in self.myopts
5030 empty = "empty" in self.myparams
5031 deep = "deep" in self.myparams
5032 update = "--update" in self.myopts and dep.depth <= 1
5034 if not buildpkgonly and \
5036 dep.parent not in self._slot_collision_nodes:
5037 if dep.parent.onlydeps:
5038 # It's safe to ignore blockers if the
5039 # parent is an --onlydeps node.
5041 # The blocker applies to the root where
5042 # the parent is or will be installed.
5043 blocker = Blocker(atom=dep.atom,
5044 eapi=dep.parent.metadata["EAPI"],
5045 root=dep.parent.root)
5046 self._blocker_parents.add(blocker, dep.parent)
5048 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5049 onlydeps=dep.onlydeps)
5051 if dep.priority.optional:
5052 # This could be an unecessary build-time dep
5053 # pulled in by --with-bdeps=y.
5055 if allow_unsatisfied:
5056 self._unsatisfied_deps.append(dep)
5058 self._unsatisfied_deps_for_display.append(
5059 ((dep.root, dep.atom), {"myparent":dep.parent}))
5061 # In some cases, dep_check will return deps that shouldn't
5062 # be proccessed any further, so they are identified and
5063 # discarded here. Try to discard as few as possible since
5064 # discarded dependencies reduce the amount of information
5065 # available for optimization of merge order.
5066 if dep.priority.satisfied and \
5067 not dep_pkg.installed and \
5068 not (existing_node or empty or deep or update):
5070 if dep.root == self.target_root:
5072 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5073 except StopIteration:
5075 except portage.exception.InvalidDependString:
5076 if not dep_pkg.installed:
5077 # This shouldn't happen since the package
5078 # should have been masked.
5081 self._ignored_deps.append(dep)
5084 if not self._add_pkg(dep_pkg, dep):
5088 def _add_pkg(self, pkg, dep):
5095 myparent = dep.parent
5096 priority = dep.priority
5098 if priority is None:
5099 priority = DepPriority()
5101 Fills the digraph with nodes comprised of packages to merge.
5102 mybigkey is the package spec of the package to merge.
5103 myparent is the package depending on mybigkey ( or None )
5104 addme = Should we add this package to the digraph or are we just looking at it's deps?
5105 Think --onlydeps, we need to ignore packages in that case.
5108 #IUSE-aware emerge -> USE DEP aware depgraph
5109 #"no downgrade" emerge
5111 # Ensure that the dependencies of the same package
5112 # are never processed more than once.
5113 previously_added = pkg in self.digraph
5115 # select the correct /var database that we'll be checking against
5116 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5117 pkgsettings = self.pkgsettings[pkg.root]
5122 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5123 except portage.exception.InvalidDependString, e:
5124 if not pkg.installed:
5125 show_invalid_depstring_notice(
5126 pkg, pkg.metadata["PROVIDE"], str(e))
5130 if not pkg.onlydeps:
5131 if not pkg.installed and \
5132 "empty" not in self.myparams and \
5133 vardbapi.match(pkg.slot_atom):
5134 # Increase the priority of dependencies on packages that
5135 # are being rebuilt. This optimizes merge order so that
5136 # dependencies are rebuilt/updated as soon as possible,
5137 # which is needed especially when emerge is called by
5138 # revdep-rebuild since dependencies may be affected by ABI
5139 # breakage that has rendered them useless. Don't adjust
5140 # priority here when in "empty" mode since all packages
5141 # are being merged in that case.
5142 priority.rebuild = True
5144 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5145 slot_collision = False
5147 existing_node_matches = pkg.cpv == existing_node.cpv
5148 if existing_node_matches and \
5149 pkg != existing_node and \
5150 dep.atom is not None:
5151 # Use package set for matching since it will match via
5152 # PROVIDE when necessary, while match_from_list does not.
5153 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5154 if not atom_set.findAtomForPackage(existing_node):
5155 existing_node_matches = False
5156 if existing_node_matches:
5157 # The existing node can be reused.
5159 for parent_atom in arg_atoms:
5160 parent, atom = parent_atom
5161 self.digraph.add(existing_node, parent,
5163 self._add_parent_atom(existing_node, parent_atom)
5164 # If a direct circular dependency is not an unsatisfied
5165 # buildtime dependency then drop it here since otherwise
5166 # it can skew the merge order calculation in an unwanted
5168 if existing_node != myparent or \
5169 (priority.buildtime and not priority.satisfied):
5170 self.digraph.addnode(existing_node, myparent,
5172 if dep.atom is not None and dep.parent is not None:
5173 self._add_parent_atom(existing_node,
5174 (dep.parent, dep.atom))
5178 # A slot collision has occurred. Sometimes this coincides
5179 # with unresolvable blockers, so the slot collision will be
5180 # shown later if there are no unresolvable blockers.
5181 self._add_slot_conflict(pkg)
5182 slot_collision = True
5185 # Now add this node to the graph so that self.display()
5186 # can show use flags and --tree portage.output. This node is
5187 # only being partially added to the graph. It must not be
5188 # allowed to interfere with the other nodes that have been
5189 # added. Do not overwrite data for existing nodes in
5190 # self.mydbapi since that data will be used for blocker
5192 # Even though the graph is now invalid, continue to process
5193 # dependencies so that things like --fetchonly can still
5194 # function despite collisions.
5196 elif not previously_added:
5197 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5198 self.mydbapi[pkg.root].cpv_inject(pkg)
5199 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5201 if not pkg.installed:
5202 # Allow this package to satisfy old-style virtuals in case it
5203 # doesn't already. Any pre-existing providers will be preferred
5206 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5207 # For consistency, also update the global virtuals.
5208 settings = self.roots[pkg.root].settings
5210 settings.setinst(pkg.cpv, pkg.metadata)
5212 except portage.exception.InvalidDependString, e:
5213 show_invalid_depstring_notice(
5214 pkg, pkg.metadata["PROVIDE"], str(e))
5219 self._set_nodes.add(pkg)
5221 # Do this even when addme is False (--onlydeps) so that the
5222 # parent/child relationship is always known in case
5223 # self._show_slot_collision_notice() needs to be called later.
5224 self.digraph.add(pkg, myparent, priority=priority)
5225 if dep.atom is not None and dep.parent is not None:
5226 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5229 for parent_atom in arg_atoms:
5230 parent, atom = parent_atom
5231 self.digraph.add(pkg, parent, priority=priority)
5232 self._add_parent_atom(pkg, parent_atom)
5234 """ This section determines whether we go deeper into dependencies or not.
5235 We want to go deeper on a few occasions:
5236 Installing package A, we need to make sure package A's deps are met.
5237 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5238 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5240 dep_stack = self._dep_stack
5241 if "recurse" not in self.myparams:
5243 elif pkg.installed and \
5244 "deep" not in self.myparams:
5245 dep_stack = self._ignored_deps
5247 self.spinner.update()
5252 if not previously_added:
5253 dep_stack.append(pkg)
5256 def _add_parent_atom(self, pkg, parent_atom):
5257 parent_atoms = self._parent_atoms.get(pkg)
5258 if parent_atoms is None:
5259 parent_atoms = set()
5260 self._parent_atoms[pkg] = parent_atoms
5261 parent_atoms.add(parent_atom)
5263 def _add_slot_conflict(self, pkg):
5264 self._slot_collision_nodes.add(pkg)
5265 slot_key = (pkg.slot_atom, pkg.root)
5266 slot_nodes = self._slot_collision_info.get(slot_key)
5267 if slot_nodes is None:
5269 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5270 self._slot_collision_info[slot_key] = slot_nodes
5273 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5275 mytype = pkg.type_name
5278 metadata = pkg.metadata
5279 myuse = pkg.use.enabled
5281 depth = pkg.depth + 1
5282 removal_action = "remove" in self.myparams
5285 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5287 edepend[k] = metadata[k]
5289 if not pkg.built and \
5290 "--buildpkgonly" in self.myopts and \
5291 "deep" not in self.myparams and \
5292 "empty" not in self.myparams:
5293 edepend["RDEPEND"] = ""
5294 edepend["PDEPEND"] = ""
5295 bdeps_optional = False
5297 if pkg.built and not removal_action:
5298 if self.myopts.get("--with-bdeps", "n") == "y":
5299 # Pull in build time deps as requested, but marked them as
5300 # "optional" since they are not strictly required. This allows
5301 # more freedom in the merge order calculation for solving
5302 # circular dependencies. Don't convert to PDEPEND since that
5303 # could make --with-bdeps=y less effective if it is used to
5304 # adjust merge order to prevent built_with_use() calls from
5306 bdeps_optional = True
5308 # built packages do not have build time dependencies.
5309 edepend["DEPEND"] = ""
5311 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5312 edepend["DEPEND"] = ""
5315 root_deps = self.myopts.get("--root-deps")
5316 if root_deps is not None:
5317 if root_deps is True:
5319 elif root_deps == "rdeps":
5320 edepend["DEPEND"] = ""
5323 (bdeps_root, edepend["DEPEND"],
5324 self._priority(buildtime=(not bdeps_optional),
5325 optional=bdeps_optional)),
5326 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5327 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5330 debug = "--debug" in self.myopts
5331 strict = mytype != "installed"
5333 for dep_root, dep_string, dep_priority in deps:
5338 print "Parent: ", jbigkey
5339 print "Depstring:", dep_string
5340 print "Priority:", dep_priority
5341 vardb = self.roots[dep_root].trees["vartree"].dbapi
5343 selected_atoms = self._select_atoms(dep_root,
5344 dep_string, myuse=myuse, parent=pkg, strict=strict,
5345 priority=dep_priority)
5346 except portage.exception.InvalidDependString, e:
5347 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5350 print "Candidates:", selected_atoms
5352 for atom in selected_atoms:
5355 atom = portage.dep.Atom(atom)
5357 mypriority = dep_priority.copy()
5358 if not atom.blocker and vardb.match(atom):
5359 mypriority.satisfied = True
5361 if not self._add_dep(Dependency(atom=atom,
5362 blocker=atom.blocker, depth=depth, parent=pkg,
5363 priority=mypriority, root=dep_root),
5364 allow_unsatisfied=allow_unsatisfied):
5367 except portage.exception.InvalidAtom, e:
5368 show_invalid_depstring_notice(
5369 pkg, dep_string, str(e))
5371 if not pkg.installed:
5375 print "Exiting...", jbigkey
5376 except portage.exception.AmbiguousPackageName, e:
5378 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5379 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5381 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5382 portage.writemsg("\n", noiselevel=-1)
5383 if mytype == "binary":
5385 "!!! This binary package cannot be installed: '%s'\n" % \
5386 mykey, noiselevel=-1)
5387 elif mytype == "ebuild":
5388 portdb = self.roots[myroot].trees["porttree"].dbapi
5389 myebuild, mylocation = portdb.findname2(mykey)
5390 portage.writemsg("!!! This ebuild cannot be installed: " + \
5391 "'%s'\n" % myebuild, noiselevel=-1)
5392 portage.writemsg("!!! Please notify the package maintainer " + \
5393 "that atoms must be fully-qualified.\n", noiselevel=-1)
5397 def _priority(self, **kwargs):
5398 if "remove" in self.myparams:
5399 priority_constructor = UnmergeDepPriority
5401 priority_constructor = DepPriority
5402 return priority_constructor(**kwargs)
5404 def _dep_expand(self, root_config, atom_without_category):
5406 @param root_config: a root config instance
5407 @type root_config: RootConfig
5408 @param atom_without_category: an atom without a category component
5409 @type atom_without_category: String
5411 @returns: a list of atoms containing categories (possibly empty)
5413 null_cp = portage.dep_getkey(insert_category_into_atom(
5414 atom_without_category, "null"))
5415 cat, atom_pn = portage.catsplit(null_cp)
5417 dbs = self._filtered_trees[root_config.root]["dbs"]
5419 for db, pkg_type, built, installed, db_keys in dbs:
5420 for cat in db.categories:
5421 if db.cp_list("%s/%s" % (cat, atom_pn)):
5425 for cat in categories:
5426 deps.append(insert_category_into_atom(
5427 atom_without_category, cat))
5430 def _have_new_virt(self, root, atom_cp):
5432 for db, pkg_type, built, installed, db_keys in \
5433 self._filtered_trees[root]["dbs"]:
5434 if db.cp_list(atom_cp):
5439 def _iter_atoms_for_pkg(self, pkg):
5440 # TODO: add multiple $ROOT support
5441 if pkg.root != self.target_root:
5443 atom_arg_map = self._atom_arg_map
5444 root_config = self.roots[pkg.root]
5445 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5446 atom_cp = portage.dep_getkey(atom)
5447 if atom_cp != pkg.cp and \
5448 self._have_new_virt(pkg.root, atom_cp):
5450 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5451 visible_pkgs.reverse() # descending order
5453 for visible_pkg in visible_pkgs:
5454 if visible_pkg.cp != atom_cp:
5456 if pkg >= visible_pkg:
5457 # This is descending order, and we're not
5458 # interested in any versions <= pkg given.
5460 if pkg.slot_atom != visible_pkg.slot_atom:
5461 higher_slot = visible_pkg
5463 if higher_slot is not None:
5465 for arg in atom_arg_map[(atom, pkg.root)]:
5466 if isinstance(arg, PackageArg) and \
5471 def select_files(self, myfiles):
5472 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5473 appropriate depgraph and return a favorite list."""
5474 debug = "--debug" in self.myopts
5475 root_config = self.roots[self.target_root]
5476 sets = root_config.sets
5477 getSetAtoms = root_config.setconfig.getSetAtoms
5479 myroot = self.target_root
5480 dbs = self._filtered_trees[myroot]["dbs"]
5481 vardb = self.trees[myroot]["vartree"].dbapi
5482 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5483 portdb = self.trees[myroot]["porttree"].dbapi
5484 bindb = self.trees[myroot]["bintree"].dbapi
5485 pkgsettings = self.pkgsettings[myroot]
5487 onlydeps = "--onlydeps" in self.myopts
5490 ext = os.path.splitext(x)[1]
5492 if not os.path.exists(x):
5494 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5495 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5496 elif os.path.exists(
5497 os.path.join(pkgsettings["PKGDIR"], x)):
5498 x = os.path.join(pkgsettings["PKGDIR"], x)
5500 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5501 print "!!! Please ensure the tbz2 exists as specified.\n"
5502 return 0, myfavorites
5503 mytbz2=portage.xpak.tbz2(x)
5504 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5505 if os.path.realpath(x) != \
5506 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5507 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5508 return 0, myfavorites
5509 db_keys = list(bindb._aux_cache_keys)
5510 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5511 pkg = Package(type_name="binary", root_config=root_config,
5512 cpv=mykey, built=True, metadata=metadata,
5514 self._pkg_cache[pkg] = pkg
5515 args.append(PackageArg(arg=x, package=pkg,
5516 root_config=root_config))
5517 elif ext==".ebuild":
5518 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5519 pkgdir = os.path.dirname(ebuild_path)
5520 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5521 cp = pkgdir[len(tree_root)+1:]
5522 e = portage.exception.PackageNotFound(
5523 ("%s is not in a valid portage tree " + \
5524 "hierarchy or does not exist") % x)
5525 if not portage.isvalidatom(cp):
5527 cat = portage.catsplit(cp)[0]
5528 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5529 if not portage.isvalidatom("="+mykey):
5531 ebuild_path = portdb.findname(mykey)
5533 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5534 cp, os.path.basename(ebuild_path)):
5535 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5536 return 0, myfavorites
5537 if mykey not in portdb.xmatch(
5538 "match-visible", portage.dep_getkey(mykey)):
5539 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5540 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5541 print colorize("BAD", "*** page for details.")
5542 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5545 raise portage.exception.PackageNotFound(
5546 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5547 db_keys = list(portdb._aux_cache_keys)
5548 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5549 pkg = Package(type_name="ebuild", root_config=root_config,
5550 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5551 pkgsettings.setcpv(pkg)
5552 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5553 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5554 self._pkg_cache[pkg] = pkg
5555 args.append(PackageArg(arg=x, package=pkg,
5556 root_config=root_config))
5557 elif x.startswith(os.path.sep):
5558 if not x.startswith(myroot):
5559 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5560 " $ROOT.\n") % x, noiselevel=-1)
5562 # Queue these up since it's most efficient to handle
5563 # multiple files in a single iter_owners() call.
5564 lookup_owners.append(x)
5566 if x in ("system", "world"):
5568 if x.startswith(SETPREFIX):
5569 s = x[len(SETPREFIX):]
5571 raise portage.exception.PackageSetNotFound(s)
5574 # Recursively expand sets so that containment tests in
5575 # self._get_parent_sets() properly match atoms in nested
5576 # sets (like if world contains system).
5577 expanded_set = InternalPackageSet(
5578 initial_atoms=getSetAtoms(s))
5579 self._sets[s] = expanded_set
5580 args.append(SetArg(arg=x, set=expanded_set,
5581 root_config=root_config))
5583 if not is_valid_package_atom(x):
5584 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5586 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5587 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5589 # Don't expand categories or old-style virtuals here unless
5590 # necessary. Expansion of old-style virtuals here causes at
5591 # least the following problems:
5592 # 1) It's more difficult to determine which set(s) an atom
5593 # came from, if any.
5594 # 2) It takes away freedom from the resolver to choose other
5595 # possible expansions when necessary.
5597 args.append(AtomArg(arg=x, atom=x,
5598 root_config=root_config))
5600 expanded_atoms = self._dep_expand(root_config, x)
5601 installed_cp_set = set()
5602 for atom in expanded_atoms:
5603 atom_cp = portage.dep_getkey(atom)
5604 if vardb.cp_list(atom_cp):
5605 installed_cp_set.add(atom_cp)
5607 if len(installed_cp_set) > 1:
5608 non_virtual_cps = set()
5609 for atom_cp in installed_cp_set:
5610 if not atom_cp.startswith("virtual/"):
5611 non_virtual_cps.add(atom_cp)
5612 if len(non_virtual_cps) == 1:
5613 installed_cp_set = non_virtual_cps
5615 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5616 installed_cp = iter(installed_cp_set).next()
5617 expanded_atoms = [atom for atom in expanded_atoms \
5618 if portage.dep_getkey(atom) == installed_cp]
5620 if len(expanded_atoms) > 1:
5623 ambiguous_package_name(x, expanded_atoms, root_config,
5624 self.spinner, self.myopts)
5625 return False, myfavorites
5627 atom = expanded_atoms[0]
5629 null_atom = insert_category_into_atom(x, "null")
5630 null_cp = portage.dep_getkey(null_atom)
5631 cat, atom_pn = portage.catsplit(null_cp)
5632 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5634 # Allow the depgraph to choose which virtual.
5635 atom = insert_category_into_atom(x, "virtual")
5637 atom = insert_category_into_atom(x, "null")
5639 args.append(AtomArg(arg=x, atom=atom,
5640 root_config=root_config))
5644 search_for_multiple = False
5645 if len(lookup_owners) > 1:
5646 search_for_multiple = True
5648 for x in lookup_owners:
5649 if not search_for_multiple and os.path.isdir(x):
5650 search_for_multiple = True
5651 relative_paths.append(x[len(myroot):])
5654 for pkg, relative_path in \
5655 real_vardb._owners.iter_owners(relative_paths):
5656 owners.add(pkg.mycpv)
5657 if not search_for_multiple:
5661 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5662 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5666 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5668 # portage now masks packages with missing slot, but it's
5669 # possible that one was installed by an older version
5670 atom = portage.cpv_getkey(cpv)
5672 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5673 args.append(AtomArg(arg=atom, atom=atom,
5674 root_config=root_config))
5676 if "--update" in self.myopts:
5677 # In some cases, the greedy slots behavior can pull in a slot that
5678 # the user would want to uninstall due to it being blocked by a
5679 # newer version in a different slot. Therefore, it's necessary to
5680 # detect and discard any that should be uninstalled. Each time
5681 # that arguments are updated, package selections are repeated in
5682 # order to ensure consistency with the current arguments:
5684 # 1) Initialize args
5685 # 2) Select packages and generate initial greedy atoms
5686 # 3) Update args with greedy atoms
5687 # 4) Select packages and generate greedy atoms again, while
5688 # accounting for any blockers between selected packages
5689 # 5) Update args with revised greedy atoms
5691 self._set_args(args)
5694 greedy_args.append(arg)
5695 if not isinstance(arg, AtomArg):
5697 for atom in self._greedy_slots(arg.root_config, arg.atom):
5699 AtomArg(arg=arg.arg, atom=atom,
5700 root_config=arg.root_config))
5702 self._set_args(greedy_args)
5705 # Revise greedy atoms, accounting for any blockers
5706 # between selected packages.
5707 revised_greedy_args = []
5709 revised_greedy_args.append(arg)
5710 if not isinstance(arg, AtomArg):
5712 for atom in self._greedy_slots(arg.root_config, arg.atom,
5713 blocker_lookahead=True):
5714 revised_greedy_args.append(
5715 AtomArg(arg=arg.arg, atom=atom,
5716 root_config=arg.root_config))
5717 args = revised_greedy_args
5718 del revised_greedy_args
5720 self._set_args(args)
5722 myfavorites = set(myfavorites)
5724 if isinstance(arg, (AtomArg, PackageArg)):
5725 myfavorites.add(arg.atom)
5726 elif isinstance(arg, SetArg):
5727 myfavorites.add(arg.arg)
5728 myfavorites = list(myfavorites)
5730 pprovideddict = pkgsettings.pprovideddict
5732 portage.writemsg("\n", noiselevel=-1)
5733 # Order needs to be preserved since a feature of --nodeps
5734 # is to allow the user to force a specific merge order.
5738 for atom in arg.set:
5739 self.spinner.update()
5740 dep = Dependency(atom=atom, onlydeps=onlydeps,
5741 root=myroot, parent=arg)
5742 atom_cp = portage.dep_getkey(atom)
5744 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5745 if pprovided and portage.match_from_list(atom, pprovided):
5746 # A provided package has been specified on the command line.
5747 self._pprovided_args.append((arg, atom))
5749 if isinstance(arg, PackageArg):
5750 if not self._add_pkg(arg.package, dep) or \
5751 not self._create_graph():
5752 sys.stderr.write(("\n\n!!! Problem resolving " + \
5753 "dependencies for %s\n") % arg.arg)
5754 return 0, myfavorites
5757 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5758 (arg, atom), noiselevel=-1)
5759 pkg, existing_node = self._select_package(
5760 myroot, atom, onlydeps=onlydeps)
5762 if not (isinstance(arg, SetArg) and \
5763 arg.name in ("system", "world")):
5764 self._unsatisfied_deps_for_display.append(
5765 ((myroot, atom), {}))
5766 return 0, myfavorites
5767 self._missing_args.append((arg, atom))
5769 if atom_cp != pkg.cp:
5770 # For old-style virtuals, we need to repeat the
5771 # package.provided check against the selected package.
5772 expanded_atom = atom.replace(atom_cp, pkg.cp)
5773 pprovided = pprovideddict.get(pkg.cp)
5775 portage.match_from_list(expanded_atom, pprovided):
5776 # A provided package has been
5777 # specified on the command line.
5778 self._pprovided_args.append((arg, atom))
5780 if pkg.installed and "selective" not in self.myparams:
5781 self._unsatisfied_deps_for_display.append(
5782 ((myroot, atom), {}))
5783 # Previous behavior was to bail out in this case, but
5784 # since the dep is satisfied by the installed package,
5785 # it's more friendly to continue building the graph
5786 # and just show a warning message. Therefore, only bail
5787 # out here if the atom is not from either the system or
5789 if not (isinstance(arg, SetArg) and \
5790 arg.name in ("system", "world")):
5791 return 0, myfavorites
5793 # Add the selected package to the graph as soon as possible
5794 # so that later dep_check() calls can use it as feedback
5795 # for making more consistent atom selections.
5796 if not self._add_pkg(pkg, dep):
5797 if isinstance(arg, SetArg):
5798 sys.stderr.write(("\n\n!!! Problem resolving " + \
5799 "dependencies for %s from %s\n") % \
5802 sys.stderr.write(("\n\n!!! Problem resolving " + \
5803 "dependencies for %s\n") % atom)
5804 return 0, myfavorites
5806 except portage.exception.MissingSignature, e:
5807 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5808 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5809 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5810 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5811 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5812 return 0, myfavorites
5813 except portage.exception.InvalidSignature, e:
5814 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5815 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5816 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5817 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5818 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5819 return 0, myfavorites
5820 except SystemExit, e:
5821 raise # Needed else can't exit
5822 except Exception, e:
5823 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5824 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5827 # Now that the root packages have been added to the graph,
5828 # process the dependencies.
5829 if not self._create_graph():
5830 return 0, myfavorites
5833 if "--usepkgonly" in self.myopts:
5834 for xs in self.digraph.all_nodes():
5835 if not isinstance(xs, Package):
5837 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5841 print "Missing binary for:",xs[2]
5845 except self._unknown_internal_error:
5846 return False, myfavorites
5848 # We're true here unless we are missing binaries.
5849 return (not missing,myfavorites)
5851 def _set_args(self, args):
5853 Create the "args" package set from atoms and packages given as
5854 arguments. This method can be called multiple times if necessary.
5855 The package selection cache is automatically invalidated, since
5856 arguments influence package selections.
5858 args_set = self._sets["args"]
5861 if not isinstance(arg, (AtomArg, PackageArg)):
5864 if atom in args_set:
5868 self._set_atoms.clear()
5869 self._set_atoms.update(chain(*self._sets.itervalues()))
5870 atom_arg_map = self._atom_arg_map
5871 atom_arg_map.clear()
5873 for atom in arg.set:
5874 atom_key = (atom, arg.root_config.root)
5875 refs = atom_arg_map.get(atom_key)
5878 atom_arg_map[atom_key] = refs
5882 # Invalidate the package selection cache, since
5883 # arguments influence package selections.
5884 self._highest_pkg_cache.clear()
5885 for trees in self._filtered_trees.itervalues():
5886 trees["porttree"].dbapi._clear_cache()
5888 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5890 Return a list of slot atoms corresponding to installed slots that
5891 differ from the slot of the highest visible match. When
5892 blocker_lookahead is True, slot atoms that would trigger a blocker
5893 conflict are automatically discarded, potentially allowing automatic
5894 uninstallation of older slots when appropriate.
5896 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5897 if highest_pkg is None:
5899 vardb = root_config.trees["vartree"].dbapi
5901 for cpv in vardb.match(atom):
5902 # don't mix new virtuals with old virtuals
5903 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5904 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5906 slots.add(highest_pkg.metadata["SLOT"])
5910 slots.remove(highest_pkg.metadata["SLOT"])
5913 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5914 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5915 if pkg is not None and \
5916 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5917 greedy_pkgs.append(pkg)
5920 if not blocker_lookahead:
5921 return [pkg.slot_atom for pkg in greedy_pkgs]
5924 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5925 for pkg in greedy_pkgs + [highest_pkg]:
5926 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5928 atoms = self._select_atoms(
5929 pkg.root, dep_str, pkg.use.enabled,
5930 parent=pkg, strict=True)
5931 except portage.exception.InvalidDependString:
5933 blocker_atoms = (x for x in atoms if x.blocker)
5934 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5936 if highest_pkg not in blockers:
5939 # filter packages with invalid deps
5940 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5942 # filter packages that conflict with highest_pkg
5943 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5944 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5945 blockers[pkg].findAtomForPackage(highest_pkg))]
5950 # If two packages conflict, discard the lower version.
5951 discard_pkgs = set()
5952 greedy_pkgs.sort(reverse=True)
5953 for i in xrange(len(greedy_pkgs) - 1):
5954 pkg1 = greedy_pkgs[i]
5955 if pkg1 in discard_pkgs:
5957 for j in xrange(i + 1, len(greedy_pkgs)):
5958 pkg2 = greedy_pkgs[j]
5959 if pkg2 in discard_pkgs:
5961 if blockers[pkg1].findAtomForPackage(pkg2) or \
5962 blockers[pkg2].findAtomForPackage(pkg1):
5964 discard_pkgs.add(pkg2)
5966 return [pkg.slot_atom for pkg in greedy_pkgs \
5967 if pkg not in discard_pkgs]
5969 def _select_atoms_from_graph(self, *pargs, **kwargs):
5971 Prefer atoms matching packages that have already been
5972 added to the graph or those that are installed and have
5973 not been scheduled for replacement.
5975 kwargs["trees"] = self._graph_trees
5976 return self._select_atoms_highest_available(*pargs, **kwargs)
5978 def _select_atoms_highest_available(self, root, depstring,
5979 myuse=None, parent=None, strict=True, trees=None, priority=None):
5980 """This will raise InvalidDependString if necessary. If trees is
5981 None then self._filtered_trees is used."""
5982 pkgsettings = self.pkgsettings[root]
5984 trees = self._filtered_trees
5985 if not getattr(priority, "buildtime", False):
5986 # The parent should only be passed to dep_check() for buildtime
5987 # dependencies since that's the only case when it's appropriate
5988 # to trigger the circular dependency avoidance code which uses it.
5989 # It's important not to trigger the same circular dependency
5990 # avoidance code for runtime dependencies since it's not needed
5991 # and it can promote an incorrect package choice.
5995 if parent is not None:
5996 trees[root]["parent"] = parent
5998 portage.dep._dep_check_strict = False
5999 mycheck = portage.dep_check(depstring, None,
6000 pkgsettings, myuse=myuse,
6001 myroot=root, trees=trees)
6003 if parent is not None:
6004 trees[root].pop("parent")
6005 portage.dep._dep_check_strict = True
6007 raise portage.exception.InvalidDependString(mycheck[1])
6008 selected_atoms = mycheck[1]
6009 return selected_atoms
6011 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6012 atom = portage.dep.Atom(atom)
6013 atom_set = InternalPackageSet(initial_atoms=(atom,))
6014 atom_without_use = atom
6016 atom_without_use = portage.dep.remove_slot(atom)
6018 atom_without_use += ":" + atom.slot
6019 atom_without_use = portage.dep.Atom(atom_without_use)
6020 xinfo = '"%s"' % atom
6023 # Discard null/ from failed cpv_expand category expansion.
6024 xinfo = xinfo.replace("null/", "")
6025 masked_packages = []
6027 masked_pkg_instances = set()
6028 missing_licenses = []
6029 have_eapi_mask = False
6030 pkgsettings = self.pkgsettings[root]
6031 implicit_iuse = pkgsettings._get_implicit_iuse()
6032 root_config = self.roots[root]
6033 portdb = self.roots[root].trees["porttree"].dbapi
6034 dbs = self._filtered_trees[root]["dbs"]
6035 for db, pkg_type, built, installed, db_keys in dbs:
6039 if hasattr(db, "xmatch"):
6040 cpv_list = db.xmatch("match-all", atom_without_use)
6042 cpv_list = db.match(atom_without_use)
6045 for cpv in cpv_list:
6046 metadata, mreasons = get_mask_info(root_config, cpv,
6047 pkgsettings, db, pkg_type, built, installed, db_keys)
6048 if metadata is not None:
6049 pkg = Package(built=built, cpv=cpv,
6050 installed=installed, metadata=metadata,
6051 root_config=root_config)
6052 if pkg.cp != atom.cp:
6053 # A cpv can be returned from dbapi.match() as an
6054 # old-style virtual match even in cases when the
6055 # package does not actually PROVIDE the virtual.
6056 # Filter out any such false matches here.
6057 if not atom_set.findAtomForPackage(pkg):
6060 masked_pkg_instances.add(pkg)
6062 missing_use.append(pkg)
6065 masked_packages.append(
6066 (root_config, pkgsettings, cpv, metadata, mreasons))
6068 missing_use_reasons = []
6069 missing_iuse_reasons = []
6070 for pkg in missing_use:
6071 use = pkg.use.enabled
6072 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6073 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6075 for x in atom.use.required:
6076 if iuse_re.match(x) is None:
6077 missing_iuse.append(x)
6080 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6081 missing_iuse_reasons.append((pkg, mreasons))
6083 need_enable = sorted(atom.use.enabled.difference(use))
6084 need_disable = sorted(atom.use.disabled.intersection(use))
6085 if need_enable or need_disable:
6087 changes.extend(colorize("red", "+" + x) \
6088 for x in need_enable)
6089 changes.extend(colorize("blue", "-" + x) \
6090 for x in need_disable)
6091 mreasons.append("Change USE: %s" % " ".join(changes))
6092 missing_use_reasons.append((pkg, mreasons))
6094 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6095 in missing_use_reasons if pkg not in masked_pkg_instances]
6097 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6098 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6100 show_missing_use = False
6101 if unmasked_use_reasons:
6102 # Only show the latest version.
6103 show_missing_use = unmasked_use_reasons[:1]
6104 elif unmasked_iuse_reasons:
6105 if missing_use_reasons:
6106 # All packages with required IUSE are masked,
6107 # so display a normal masking message.
6110 show_missing_use = unmasked_iuse_reasons
6112 if show_missing_use:
6113 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6114 print "!!! One of the following packages is required to complete your request:"
6115 for pkg, mreasons in show_missing_use:
6116 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6118 elif masked_packages:
6120 colorize("BAD", "All ebuilds that could satisfy ") + \
6121 colorize("INFORM", xinfo) + \
6122 colorize("BAD", " have been masked.")
6123 print "!!! One of the following masked packages is required to complete your request:"
6124 have_eapi_mask = show_masked_packages(masked_packages)
6127 msg = ("The current version of portage supports " + \
6128 "EAPI '%s'. You must upgrade to a newer version" + \
6129 " of portage before EAPI masked packages can" + \
6130 " be installed.") % portage.const.EAPI
6131 from textwrap import wrap
6132 for line in wrap(msg, 75):
6137 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6139 # Show parent nodes and the argument that pulled them in.
6140 traversed_nodes = set()
6143 while node is not None:
6144 traversed_nodes.add(node)
6145 msg.append('(dependency required by "%s" [%s])' % \
6146 (colorize('INFORM', str(node.cpv)), node.type_name))
6147 # When traversing to parents, prefer arguments over packages
6148 # since arguments are root nodes. Never traverse the same
6149 # package twice, in order to prevent an infinite loop.
6150 selected_parent = None
6151 for parent in self.digraph.parent_nodes(node):
6152 if isinstance(parent, DependencyArg):
6153 msg.append('(dependency required by "%s" [argument])' % \
6154 (colorize('INFORM', str(parent))))
6155 selected_parent = None
6157 if parent not in traversed_nodes:
6158 selected_parent = parent
6159 node = selected_parent
6165 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6166 cache_key = (root, atom, onlydeps)
6167 ret = self._highest_pkg_cache.get(cache_key)
6170 if pkg and not existing:
6171 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6172 if existing and existing == pkg:
6173 # Update the cache to reflect that the
6174 # package has been added to the graph.
6176 self._highest_pkg_cache[cache_key] = ret
6178 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6179 self._highest_pkg_cache[cache_key] = ret
6182 settings = pkg.root_config.settings
6183 if visible(settings, pkg) and not (pkg.installed and \
6184 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6185 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6188 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6189 root_config = self.roots[root]
6190 pkgsettings = self.pkgsettings[root]
6191 dbs = self._filtered_trees[root]["dbs"]
6192 vardb = self.roots[root].trees["vartree"].dbapi
6193 portdb = self.roots[root].trees["porttree"].dbapi
6194 # List of acceptable packages, ordered by type preference.
6195 matched_packages = []
6196 highest_version = None
6197 if not isinstance(atom, portage.dep.Atom):
6198 atom = portage.dep.Atom(atom)
6200 atom_set = InternalPackageSet(initial_atoms=(atom,))
6201 existing_node = None
6203 usepkgonly = "--usepkgonly" in self.myopts
6204 empty = "empty" in self.myparams
6205 selective = "selective" in self.myparams
6207 noreplace = "--noreplace" in self.myopts
6208 # Behavior of the "selective" parameter depends on
6209 # whether or not a package matches an argument atom.
6210 # If an installed package provides an old-style
6211 # virtual that is no longer provided by an available
6212 # package, the installed package may match an argument
6213 # atom even though none of the available packages do.
6214 # Therefore, "selective" logic does not consider
6215 # whether or not an installed package matches an
6216 # argument atom. It only considers whether or not
6217 # available packages match argument atoms, which is
6218 # represented by the found_available_arg flag.
6219 found_available_arg = False
6220 for find_existing_node in True, False:
6223 for db, pkg_type, built, installed, db_keys in dbs:
6226 if installed and not find_existing_node:
6227 want_reinstall = reinstall or empty or \
6228 (found_available_arg and not selective)
6229 if want_reinstall and matched_packages:
6231 if hasattr(db, "xmatch"):
6232 cpv_list = db.xmatch("match-all", atom)
6234 cpv_list = db.match(atom)
6236 # USE=multislot can make an installed package appear as if
6237 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6238 # won't do any good as long as USE=multislot is enabled since
6239 # the newly built package still won't have the expected slot.
6240 # Therefore, assume that such SLOT dependencies are already
6241 # satisfied rather than forcing a rebuild.
6242 if installed and not cpv_list and atom.slot:
6243 for cpv in db.match(atom.cp):
6244 slot_available = False
6245 for other_db, other_type, other_built, \
6246 other_installed, other_keys in dbs:
6249 other_db.aux_get(cpv, ["SLOT"])[0]:
6250 slot_available = True
6254 if not slot_available:
6256 inst_pkg = self._pkg(cpv, "installed",
6257 root_config, installed=installed)
6258 # Remove the slot from the atom and verify that
6259 # the package matches the resulting atom.
6260 atom_without_slot = portage.dep.remove_slot(atom)
6262 atom_without_slot += str(atom.use)
6263 atom_without_slot = portage.dep.Atom(atom_without_slot)
6264 if portage.match_from_list(
6265 atom_without_slot, [inst_pkg]):
6266 cpv_list = [inst_pkg.cpv]
6271 pkg_status = "merge"
6272 if installed or onlydeps:
6273 pkg_status = "nomerge"
6276 for cpv in cpv_list:
6277 # Make --noreplace take precedence over --newuse.
6278 if not installed and noreplace and \
6279 cpv in vardb.match(atom):
6280 # If the installed version is masked, it may
6281 # be necessary to look at lower versions,
6282 # in case there is a visible downgrade.
6284 reinstall_for_flags = None
6285 cache_key = (pkg_type, root, cpv, pkg_status)
6286 calculated_use = True
6287 pkg = self._pkg_cache.get(cache_key)
6289 calculated_use = False
6291 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6294 pkg = Package(built=built, cpv=cpv,
6295 installed=installed, metadata=metadata,
6296 onlydeps=onlydeps, root_config=root_config,
6298 metadata = pkg.metadata
6300 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6301 if not built and ("?" in metadata["LICENSE"] or \
6302 "?" in metadata["PROVIDE"]):
6303 # This is avoided whenever possible because
6304 # it's expensive. It only needs to be done here
6305 # if it has an effect on visibility.
6306 pkgsettings.setcpv(pkg)
6307 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6308 calculated_use = True
6309 self._pkg_cache[pkg] = pkg
6311 if not installed or (built and matched_packages):
6312 # Only enforce visibility on installed packages
6313 # if there is at least one other visible package
6314 # available. By filtering installed masked packages
6315 # here, packages that have been masked since they
6316 # were installed can be automatically downgraded
6317 # to an unmasked version.
6319 if not visible(pkgsettings, pkg):
6321 except portage.exception.InvalidDependString:
6325 # Enable upgrade or downgrade to a version
6326 # with visible KEYWORDS when the installed
6327 # version is masked by KEYWORDS, but never
6328 # reinstall the same exact version only due
6329 # to a KEYWORDS mask.
6330 if built and matched_packages:
6332 different_version = None
6333 for avail_pkg in matched_packages:
6334 if not portage.dep.cpvequal(
6335 pkg.cpv, avail_pkg.cpv):
6336 different_version = avail_pkg
6338 if different_version is not None:
6341 pkgsettings._getMissingKeywords(
6342 pkg.cpv, pkg.metadata):
6345 # If the ebuild no longer exists or it's
6346 # keywords have been dropped, reject built
6347 # instances (installed or binary).
6348 # If --usepkgonly is enabled, assume that
6349 # the ebuild status should be ignored.
6353 pkg.cpv, "ebuild", root_config)
6354 except portage.exception.PackageNotFound:
6357 if not visible(pkgsettings, pkg_eb):
6360 if not pkg.built and not calculated_use:
6361 # This is avoided whenever possible because
6363 pkgsettings.setcpv(pkg)
6364 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6366 if pkg.cp != atom.cp:
6367 # A cpv can be returned from dbapi.match() as an
6368 # old-style virtual match even in cases when the
6369 # package does not actually PROVIDE the virtual.
6370 # Filter out any such false matches here.
6371 if not atom_set.findAtomForPackage(pkg):
6375 if root == self.target_root:
6377 # Ebuild USE must have been calculated prior
6378 # to this point, in case atoms have USE deps.
6379 myarg = self._iter_atoms_for_pkg(pkg).next()
6380 except StopIteration:
6382 except portage.exception.InvalidDependString:
6384 # masked by corruption
6386 if not installed and myarg:
6387 found_available_arg = True
6389 if atom.use and not pkg.built:
6390 use = pkg.use.enabled
6391 if atom.use.enabled.difference(use):
6393 if atom.use.disabled.intersection(use):
6395 if pkg.cp == atom_cp:
6396 if highest_version is None:
6397 highest_version = pkg
6398 elif pkg > highest_version:
6399 highest_version = pkg
6400 # At this point, we've found the highest visible
6401 # match from the current repo. Any lower versions
6402 # from this repo are ignored, so this so the loop
6403 # will always end with a break statement below
6405 if find_existing_node:
6406 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6409 if portage.dep.match_from_list(atom, [e_pkg]):
6410 if highest_version and \
6411 e_pkg.cp == atom_cp and \
6412 e_pkg < highest_version and \
6413 e_pkg.slot_atom != highest_version.slot_atom:
6414 # There is a higher version available in a
6415 # different slot, so this existing node is
6419 matched_packages.append(e_pkg)
6420 existing_node = e_pkg
6422 # Compare built package to current config and
6423 # reject the built package if necessary.
6424 if built and not installed and \
6425 ("--newuse" in self.myopts or \
6426 "--reinstall" in self.myopts):
6427 iuses = pkg.iuse.all
6428 old_use = pkg.use.enabled
6430 pkgsettings.setcpv(myeb)
6432 pkgsettings.setcpv(pkg)
6433 now_use = pkgsettings["PORTAGE_USE"].split()
6434 forced_flags = set()
6435 forced_flags.update(pkgsettings.useforce)
6436 forced_flags.update(pkgsettings.usemask)
6438 if myeb and not usepkgonly:
6439 cur_iuse = myeb.iuse.all
6440 if self._reinstall_for_flags(forced_flags,
6444 # Compare current config to installed package
6445 # and do not reinstall if possible.
6446 if not installed and \
6447 ("--newuse" in self.myopts or \
6448 "--reinstall" in self.myopts) and \
6449 cpv in vardb.match(atom):
6450 pkgsettings.setcpv(pkg)
6451 forced_flags = set()
6452 forced_flags.update(pkgsettings.useforce)
6453 forced_flags.update(pkgsettings.usemask)
6454 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6455 old_iuse = set(filter_iuse_defaults(
6456 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6457 cur_use = pkg.use.enabled
6458 cur_iuse = pkg.iuse.all
6459 reinstall_for_flags = \
6460 self._reinstall_for_flags(
6461 forced_flags, old_use, old_iuse,
6463 if reinstall_for_flags:
6467 matched_packages.append(pkg)
6468 if reinstall_for_flags:
6469 self._reinstall_nodes[pkg] = \
6473 if not matched_packages:
6476 if "--debug" in self.myopts:
6477 for pkg in matched_packages:
6478 portage.writemsg("%s %s\n" % \
6479 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6481 # Filter out any old-style virtual matches if they are
6482 # mixed with new-style virtual matches.
6483 cp = portage.dep_getkey(atom)
6484 if len(matched_packages) > 1 and \
6485 "virtual" == portage.catsplit(cp)[0]:
6486 for pkg in matched_packages:
6489 # Got a new-style virtual, so filter
6490 # out any old-style virtuals.
6491 matched_packages = [pkg for pkg in matched_packages \
6495 if len(matched_packages) > 1:
6496 bestmatch = portage.best(
6497 [pkg.cpv for pkg in matched_packages])
6498 matched_packages = [pkg for pkg in matched_packages \
6499 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6501 # ordered by type preference ("ebuild" type is the last resort)
6502 return matched_packages[-1], existing_node
6504 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6506 Select packages that have already been added to the graph or
6507 those that are installed and have not been scheduled for
6510 graph_db = self._graph_trees[root]["porttree"].dbapi
6511 matches = graph_db.match_pkgs(atom)
6514 pkg = matches[-1] # highest match
6515 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6516 return pkg, in_graph
6518 def _complete_graph(self):
6520 Add any deep dependencies of required sets (args, system, world) that
6521 have not been pulled into the graph yet. This ensures that the graph
6522 is consistent such that initially satisfied deep dependencies are not
6523 broken in the new graph. Initially unsatisfied dependencies are
6524 irrelevant since we only want to avoid breaking dependencies that are
6527 Since this method can consume enough time to disturb users, it is
6528 currently only enabled by the --complete-graph option.
6530 if "--buildpkgonly" in self.myopts or \
6531 "recurse" not in self.myparams:
6534 if "complete" not in self.myparams:
6535 # Skip this to avoid consuming enough time to disturb users.
6538 # Put the depgraph into a mode that causes it to only
6539 # select packages that have already been added to the
6540 # graph or those that are installed and have not been
6541 # scheduled for replacement. Also, toggle the "deep"
6542 # parameter so that all dependencies are traversed and
6544 self._select_atoms = self._select_atoms_from_graph
6545 self._select_package = self._select_pkg_from_graph
6546 already_deep = "deep" in self.myparams
6547 if not already_deep:
6548 self.myparams.add("deep")
6550 for root in self.roots:
6551 required_set_names = self._required_set_names.copy()
6552 if root == self.target_root and \
6553 (already_deep or "empty" in self.myparams):
6554 required_set_names.difference_update(self._sets)
6555 if not required_set_names and not self._ignored_deps:
6557 root_config = self.roots[root]
6558 setconfig = root_config.setconfig
6560 # Reuse existing SetArg instances when available.
6561 for arg in self.digraph.root_nodes():
6562 if not isinstance(arg, SetArg):
6564 if arg.root_config != root_config:
6566 if arg.name in required_set_names:
6568 required_set_names.remove(arg.name)
6569 # Create new SetArg instances only when necessary.
6570 for s in required_set_names:
6571 expanded_set = InternalPackageSet(
6572 initial_atoms=setconfig.getSetAtoms(s))
6573 atom = SETPREFIX + s
6574 args.append(SetArg(arg=atom, set=expanded_set,
6575 root_config=root_config))
6576 vardb = root_config.trees["vartree"].dbapi
6578 for atom in arg.set:
6579 self._dep_stack.append(
6580 Dependency(atom=atom, root=root, parent=arg))
6581 if self._ignored_deps:
6582 self._dep_stack.extend(self._ignored_deps)
6583 self._ignored_deps = []
6584 if not self._create_graph(allow_unsatisfied=True):
6586 # Check the unsatisfied deps to see if any initially satisfied deps
6587 # will become unsatisfied due to an upgrade. Initially unsatisfied
6588 # deps are irrelevant since we only want to avoid breaking deps
6589 # that are initially satisfied.
6590 while self._unsatisfied_deps:
6591 dep = self._unsatisfied_deps.pop()
6592 matches = vardb.match_pkgs(dep.atom)
6594 self._initially_unsatisfied_deps.append(dep)
6596 # An scheduled installation broke a deep dependency.
6597 # Add the installed package to the graph so that it
6598 # will be appropriately reported as a slot collision
6599 # (possibly solvable via backtracking).
6600 pkg = matches[-1] # highest match
6601 if not self._add_pkg(pkg, dep):
6603 if not self._create_graph(allow_unsatisfied=True):
6607 def _pkg(self, cpv, type_name, root_config, installed=False):
6609 Get a package instance from the cache, or create a new
6610 one if necessary. Raises KeyError from aux_get if it
6611 failures for some reason (package does not exist or is
6616 operation = "nomerge"
6617 pkg = self._pkg_cache.get(
6618 (type_name, root_config.root, cpv, operation))
6620 tree_type = self.pkg_tree_map[type_name]
6621 db = root_config.trees[tree_type].dbapi
6622 db_keys = list(self._trees_orig[root_config.root][
6623 tree_type].dbapi._aux_cache_keys)
6625 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6627 raise portage.exception.PackageNotFound(cpv)
6628 pkg = Package(cpv=cpv, metadata=metadata,
6629 root_config=root_config, installed=installed)
6630 if type_name == "ebuild":
6631 settings = self.pkgsettings[root_config.root]
6632 settings.setcpv(pkg)
6633 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6634 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6635 self._pkg_cache[pkg] = pkg
6638 def validate_blockers(self):
6639 """Remove any blockers from the digraph that do not match any of the
6640 packages within the graph. If necessary, create hard deps to ensure
6641 correct merge order such that mutually blocking packages are never
6642 installed simultaneously."""
6644 if "--buildpkgonly" in self.myopts or \
6645 "--nodeps" in self.myopts:
6648 #if "deep" in self.myparams:
6650 # Pull in blockers from all installed packages that haven't already
6651 # been pulled into the depgraph. This is not enabled by default
6652 # due to the performance penalty that is incurred by all the
6653 # additional dep_check calls that are required.
6655 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6656 for myroot in self.trees:
6657 vardb = self.trees[myroot]["vartree"].dbapi
6658 portdb = self.trees[myroot]["porttree"].dbapi
6659 pkgsettings = self.pkgsettings[myroot]
6660 final_db = self.mydbapi[myroot]
6662 blocker_cache = BlockerCache(myroot, vardb)
6663 stale_cache = set(blocker_cache)
6666 stale_cache.discard(cpv)
6667 pkg_in_graph = self.digraph.contains(pkg)
6669 # Check for masked installed packages. Only warn about
6670 # packages that are in the graph in order to avoid warning
6671 # about those that will be automatically uninstalled during
6672 # the merge process or by --depclean.
6674 if pkg_in_graph and not visible(pkgsettings, pkg):
6675 self._masked_installed.add(pkg)
6677 blocker_atoms = None
6683 self._blocker_parents.child_nodes(pkg))
6688 self._irrelevant_blockers.child_nodes(pkg))
6691 if blockers is not None:
6692 blockers = set(str(blocker.atom) \
6693 for blocker in blockers)
6695 # If this node has any blockers, create a "nomerge"
6696 # node for it so that they can be enforced.
6697 self.spinner.update()
6698 blocker_data = blocker_cache.get(cpv)
6699 if blocker_data is not None and \
6700 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6703 # If blocker data from the graph is available, use
6704 # it to validate the cache and update the cache if
6706 if blocker_data is not None and \
6707 blockers is not None:
6708 if not blockers.symmetric_difference(
6709 blocker_data.atoms):
6713 if blocker_data is None and \
6714 blockers is not None:
6715 # Re-use the blockers from the graph.
6716 blocker_atoms = sorted(blockers)
6717 counter = long(pkg.metadata["COUNTER"])
6719 blocker_cache.BlockerData(counter, blocker_atoms)
6720 blocker_cache[pkg.cpv] = blocker_data
6724 blocker_atoms = blocker_data.atoms
6726 # Use aux_get() to trigger FakeVartree global
6727 # updates on *DEPEND when appropriate.
6728 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6729 # It is crucial to pass in final_db here in order to
6730 # optimize dep_check calls by eliminating atoms via
6731 # dep_wordreduce and dep_eval calls.
6733 portage.dep._dep_check_strict = False
6735 success, atoms = portage.dep_check(depstr,
6736 final_db, pkgsettings, myuse=pkg.use.enabled,
6737 trees=self._graph_trees, myroot=myroot)
6738 except Exception, e:
6739 if isinstance(e, SystemExit):
6741 # This is helpful, for example, if a ValueError
6742 # is thrown from cpv_expand due to multiple
6743 # matches (this can happen if an atom lacks a
6745 show_invalid_depstring_notice(
6746 pkg, depstr, str(e))
6750 portage.dep._dep_check_strict = True
6752 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6753 if replacement_pkg and \
6754 replacement_pkg[0].operation == "merge":
6755 # This package is being replaced anyway, so
6756 # ignore invalid dependencies so as not to
6757 # annoy the user too much (otherwise they'd be
6758 # forced to manually unmerge it first).
6760 show_invalid_depstring_notice(pkg, depstr, atoms)
6762 blocker_atoms = [myatom for myatom in atoms \
6763 if myatom.startswith("!")]
6764 blocker_atoms.sort()
6765 counter = long(pkg.metadata["COUNTER"])
6766 blocker_cache[cpv] = \
6767 blocker_cache.BlockerData(counter, blocker_atoms)
6770 for atom in blocker_atoms:
6771 blocker = Blocker(atom=portage.dep.Atom(atom),
6772 eapi=pkg.metadata["EAPI"], root=myroot)
6773 self._blocker_parents.add(blocker, pkg)
6774 except portage.exception.InvalidAtom, e:
6775 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6776 show_invalid_depstring_notice(
6777 pkg, depstr, "Invalid Atom: %s" % (e,))
6779 for cpv in stale_cache:
6780 del blocker_cache[cpv]
6781 blocker_cache.flush()
6784 # Discard any "uninstall" tasks scheduled by previous calls
6785 # to this method, since those tasks may not make sense given
6786 # the current graph state.
6787 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6788 if previous_uninstall_tasks:
6789 self._blocker_uninstalls = digraph()
6790 self.digraph.difference_update(previous_uninstall_tasks)
6792 for blocker in self._blocker_parents.leaf_nodes():
6793 self.spinner.update()
6794 root_config = self.roots[blocker.root]
6795 virtuals = root_config.settings.getvirtuals()
6796 myroot = blocker.root
6797 initial_db = self.trees[myroot]["vartree"].dbapi
6798 final_db = self.mydbapi[myroot]
6800 provider_virtual = False
6801 if blocker.cp in virtuals and \
6802 not self._have_new_virt(blocker.root, blocker.cp):
6803 provider_virtual = True
6805 # Use this to check PROVIDE for each matched package
6807 atom_set = InternalPackageSet(
6808 initial_atoms=[blocker.atom])
6810 if provider_virtual:
6812 for provider_entry in virtuals[blocker.cp]:
6814 portage.dep_getkey(provider_entry)
6815 atoms.append(blocker.atom.replace(
6816 blocker.cp, provider_cp))
6818 atoms = [blocker.atom]
6820 blocked_initial = set()
6822 for pkg in initial_db.match_pkgs(atom):
6823 if atom_set.findAtomForPackage(pkg):
6824 blocked_initial.add(pkg)
6826 blocked_final = set()
6828 for pkg in final_db.match_pkgs(atom):
6829 if atom_set.findAtomForPackage(pkg):
6830 blocked_final.add(pkg)
6832 if not blocked_initial and not blocked_final:
6833 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6834 self._blocker_parents.remove(blocker)
6835 # Discard any parents that don't have any more blockers.
6836 for pkg in parent_pkgs:
6837 self._irrelevant_blockers.add(blocker, pkg)
6838 if not self._blocker_parents.child_nodes(pkg):
6839 self._blocker_parents.remove(pkg)
6841 for parent in self._blocker_parents.parent_nodes(blocker):
6842 unresolved_blocks = False
6843 depends_on_order = set()
6844 for pkg in blocked_initial:
6845 if pkg.slot_atom == parent.slot_atom:
6846 # TODO: Support blocks within slots in cases where it
6847 # might make sense. For example, a new version might
6848 # require that the old version be uninstalled at build
6851 if parent.installed:
6852 # Two currently installed packages conflict with
6853 # eachother. Ignore this case since the damage
6854 # is already done and this would be likely to
6855 # confuse users if displayed like a normal blocker.
6858 self._blocked_pkgs.add(pkg, blocker)
6860 if parent.operation == "merge":
6861 # Maybe the blocked package can be replaced or simply
6862 # unmerged to resolve this block.
6863 depends_on_order.add((pkg, parent))
6865 # None of the above blocker resolutions techniques apply,
6866 # so apparently this one is unresolvable.
6867 unresolved_blocks = True
6868 for pkg in blocked_final:
6869 if pkg.slot_atom == parent.slot_atom:
6870 # TODO: Support blocks within slots.
6872 if parent.operation == "nomerge" and \
6873 pkg.operation == "nomerge":
6874 # This blocker will be handled the next time that a
6875 # merge of either package is triggered.
6878 self._blocked_pkgs.add(pkg, blocker)
6880 # Maybe the blocking package can be
6881 # unmerged to resolve this block.
6882 if parent.operation == "merge" and pkg.installed:
6883 depends_on_order.add((pkg, parent))
6885 elif parent.operation == "nomerge":
6886 depends_on_order.add((parent, pkg))
6888 # None of the above blocker resolutions techniques apply,
6889 # so apparently this one is unresolvable.
6890 unresolved_blocks = True
6892 # Make sure we don't unmerge any package that have been pulled
6894 if not unresolved_blocks and depends_on_order:
6895 for inst_pkg, inst_task in depends_on_order:
6896 if self.digraph.contains(inst_pkg) and \
6897 self.digraph.parent_nodes(inst_pkg):
6898 unresolved_blocks = True
6901 if not unresolved_blocks and depends_on_order:
6902 for inst_pkg, inst_task in depends_on_order:
6903 uninst_task = Package(built=inst_pkg.built,
6904 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6905 metadata=inst_pkg.metadata,
6906 operation="uninstall",
6907 root_config=inst_pkg.root_config,
6908 type_name=inst_pkg.type_name)
6909 self._pkg_cache[uninst_task] = uninst_task
6910 # Enforce correct merge order with a hard dep.
6911 self.digraph.addnode(uninst_task, inst_task,
6912 priority=BlockerDepPriority.instance)
6913 # Count references to this blocker so that it can be
6914 # invalidated after nodes referencing it have been
6916 self._blocker_uninstalls.addnode(uninst_task, blocker)
6917 if not unresolved_blocks and not depends_on_order:
6918 self._irrelevant_blockers.add(blocker, parent)
6919 self._blocker_parents.remove_edge(blocker, parent)
6920 if not self._blocker_parents.parent_nodes(blocker):
6921 self._blocker_parents.remove(blocker)
6922 if not self._blocker_parents.child_nodes(parent):
6923 self._blocker_parents.remove(parent)
6924 if unresolved_blocks:
6925 self._unsolvable_blockers.add(blocker, parent)
6929 def _accept_blocker_conflicts(self):
6931 for x in ("--buildpkgonly", "--fetchonly",
6932 "--fetch-all-uri", "--nodeps"):
6933 if x in self.myopts:
6938 def _merge_order_bias(self, mygraph):
6940 For optimal leaf node selection, promote deep system runtime deps and
6941 order nodes from highest to lowest overall reference count.
6945 for node in mygraph.order:
6946 node_info[node] = len(mygraph.parent_nodes(node))
6947 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6949 def cmp_merge_preference(node1, node2):
6951 if node1.operation == 'uninstall':
6952 if node2.operation == 'uninstall':
6956 if node2.operation == 'uninstall':
6957 if node1.operation == 'uninstall':
6961 node1_sys = node1 in deep_system_deps
6962 node2_sys = node2 in deep_system_deps
6963 if node1_sys != node2_sys:
6968 return node_info[node2] - node_info[node1]
6970 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6972 def altlist(self, reversed=False):
6974 while self._serialized_tasks_cache is None:
6975 self._resolve_conflicts()
6977 self._serialized_tasks_cache, self._scheduler_graph = \
6978 self._serialize_tasks()
6979 except self._serialize_tasks_retry:
6982 retlist = self._serialized_tasks_cache[:]
6987 def schedulerGraph(self):
6989 The scheduler graph is identical to the normal one except that
6990 uninstall edges are reversed in specific cases that require
6991 conflicting packages to be temporarily installed simultaneously.
6992 This is intended for use by the Scheduler in it's parallelization
6993 logic. It ensures that temporary simultaneous installation of
6994 conflicting packages is avoided when appropriate (especially for
6995 !!atom blockers), but allowed in specific cases that require it.
6997 Note that this method calls break_refs() which alters the state of
6998 internal Package instances such that this depgraph instance should
6999 not be used to perform any more calculations.
7001 if self._scheduler_graph is None:
7003 self.break_refs(self._scheduler_graph.order)
7004 return self._scheduler_graph
7006 def break_refs(self, nodes):
7008 Take a mergelist like that returned from self.altlist() and
7009 break any references that lead back to the depgraph. This is
7010 useful if you want to hold references to packages without
7011 also holding the depgraph on the heap.
7014 if hasattr(node, "root_config"):
7015 # The FakeVartree references the _package_cache which
7016 # references the depgraph. So that Package instances don't
7017 # hold the depgraph and FakeVartree on the heap, replace
7018 # the RootConfig that references the FakeVartree with the
7019 # original RootConfig instance which references the actual
7021 node.root_config = \
7022 self._trees_orig[node.root_config.root]["root_config"]
7024 def _resolve_conflicts(self):
7025 if not self._complete_graph():
7026 raise self._unknown_internal_error()
7028 if not self.validate_blockers():
7029 raise self._unknown_internal_error()
7031 if self._slot_collision_info:
7032 self._process_slot_conflicts()
7034 def _serialize_tasks(self):
7036 if "--debug" in self.myopts:
7037 writemsg("\ndigraph:\n\n", noiselevel=-1)
7038 self.digraph.debug_print()
7039 writemsg("\n", noiselevel=-1)
7041 scheduler_graph = self.digraph.copy()
7042 mygraph=self.digraph.copy()
7043 # Prune "nomerge" root nodes if nothing depends on them, since
7044 # otherwise they slow down merge order calculation. Don't remove
7045 # non-root nodes since they help optimize merge order in some cases
7046 # such as revdep-rebuild.
7047 removed_nodes = set()
7049 for node in mygraph.root_nodes():
7050 if not isinstance(node, Package) or \
7051 node.installed or node.onlydeps:
7052 removed_nodes.add(node)
7054 self.spinner.update()
7055 mygraph.difference_update(removed_nodes)
7056 if not removed_nodes:
7058 removed_nodes.clear()
7059 self._merge_order_bias(mygraph)
7060 def cmp_circular_bias(n1, n2):
7062 RDEPEND is stronger than PDEPEND and this function
7063 measures such a strength bias within a circular
7064 dependency relationship.
7066 n1_n2_medium = n2 in mygraph.child_nodes(n1,
7067 ignore_priority=priority_range.ignore_medium_soft)
7068 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7069 ignore_priority=priority_range.ignore_medium_soft)
7070 if n1_n2_medium == n2_n1_medium:
7075 myblocker_uninstalls = self._blocker_uninstalls.copy()
7077 # Contains uninstall tasks that have been scheduled to
7078 # occur after overlapping blockers have been installed.
7079 scheduled_uninstalls = set()
7080 # Contains any Uninstall tasks that have been ignored
7081 # in order to avoid the circular deps code path. These
7082 # correspond to blocker conflicts that could not be
7084 ignored_uninstall_tasks = set()
7085 have_uninstall_task = False
7086 complete = "complete" in self.myparams
7089 def get_nodes(**kwargs):
7091 Returns leaf nodes excluding Uninstall instances
7092 since those should be executed as late as possible.
7094 return [node for node in mygraph.leaf_nodes(**kwargs) \
7095 if isinstance(node, Package) and \
7096 (node.operation != "uninstall" or \
7097 node in scheduled_uninstalls)]
7099 # sys-apps/portage needs special treatment if ROOT="/"
7100 running_root = self._running_root.root
7101 from portage.const import PORTAGE_PACKAGE_ATOM
7102 runtime_deps = InternalPackageSet(
7103 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7104 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7105 PORTAGE_PACKAGE_ATOM)
7106 replacement_portage = self.mydbapi[running_root].match_pkgs(
7107 PORTAGE_PACKAGE_ATOM)
7110 running_portage = running_portage[0]
7112 running_portage = None
7114 if replacement_portage:
7115 replacement_portage = replacement_portage[0]
7117 replacement_portage = None
7119 if replacement_portage == running_portage:
7120 replacement_portage = None
7122 if replacement_portage is not None:
7123 # update from running_portage to replacement_portage asap
7124 asap_nodes.append(replacement_portage)
7126 if running_portage is not None:
7128 portage_rdepend = self._select_atoms_highest_available(
7129 running_root, running_portage.metadata["RDEPEND"],
7130 myuse=running_portage.use.enabled,
7131 parent=running_portage, strict=False)
7132 except portage.exception.InvalidDependString, e:
7133 portage.writemsg("!!! Invalid RDEPEND in " + \
7134 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7135 (running_root, running_portage.cpv, e), noiselevel=-1)
7137 portage_rdepend = []
7138 runtime_deps.update(atom for atom in portage_rdepend \
7139 if not atom.startswith("!"))
7141 def gather_deps(ignore_priority, mergeable_nodes,
7142 selected_nodes, node):
7144 Recursively gather a group of nodes that RDEPEND on
7145 eachother. This ensures that they are merged as a group
7146 and get their RDEPENDs satisfied as soon as possible.
7148 if node in selected_nodes:
7150 if node not in mergeable_nodes:
7152 if node == replacement_portage and \
7153 mygraph.child_nodes(node,
7154 ignore_priority=priority_range.ignore_medium_soft):
7155 # Make sure that portage always has all of it's
7156 # RDEPENDs installed first.
7158 selected_nodes.add(node)
7159 for child in mygraph.child_nodes(node,
7160 ignore_priority=ignore_priority):
7161 if not gather_deps(ignore_priority,
7162 mergeable_nodes, selected_nodes, child):
7166 def ignore_uninst_or_med(priority):
7167 if priority is BlockerDepPriority.instance:
7169 return priority_range.ignore_medium(priority)
7171 def ignore_uninst_or_med_soft(priority):
7172 if priority is BlockerDepPriority.instance:
7174 return priority_range.ignore_medium_soft(priority)
7176 tree_mode = "--tree" in self.myopts
7177 # Tracks whether or not the current iteration should prefer asap_nodes
7178 # if available. This is set to False when the previous iteration
7179 # failed to select any nodes. It is reset whenever nodes are
7180 # successfully selected.
7183 # Controls whether or not the current iteration should drop edges that
7184 # are "satisfied" by installed packages, in order to solve circular
7185 # dependencies. The deep runtime dependencies of installed packages are
7186 # not checked in this case (bug #199856), so it must be avoided
7187 # whenever possible.
7188 drop_satisfied = False
7190 # State of variables for successive iterations that loosen the
7191 # criteria for node selection.
7193 # iteration prefer_asap drop_satisfied
7198 # If no nodes are selected on the last iteration, it is due to
7199 # unresolved blockers or circular dependencies.
7201 while not mygraph.empty():
7202 self.spinner.update()
7203 selected_nodes = None
7204 ignore_priority = None
7205 if drop_satisfied or (prefer_asap and asap_nodes):
7206 priority_range = DepPrioritySatisfiedRange
7208 priority_range = DepPriorityNormalRange
7209 if prefer_asap and asap_nodes:
7210 # ASAP nodes are merged before their soft deps. Go ahead and
7211 # select root nodes here if necessary, since it's typical for
7212 # the parent to have been removed from the graph already.
7213 asap_nodes = [node for node in asap_nodes \
7214 if mygraph.contains(node)]
7215 for node in asap_nodes:
7216 if not mygraph.child_nodes(node,
7217 ignore_priority=priority_range.ignore_soft):
7218 selected_nodes = [node]
7219 asap_nodes.remove(node)
7221 if not selected_nodes and \
7222 not (prefer_asap and asap_nodes):
7223 for i in xrange(priority_range.NONE,
7224 priority_range.MEDIUM_SOFT + 1):
7225 ignore_priority = priority_range.ignore_priority[i]
7226 nodes = get_nodes(ignore_priority=ignore_priority)
7228 # If there is a mix of uninstall nodes with other
7229 # types, save the uninstall nodes for later since
7230 # sometimes a merge node will render an uninstall
7231 # node unnecessary (due to occupying the same slot),
7232 # and we want to avoid executing a separate uninstall
7233 # task in that case.
7235 good_uninstalls = []
7236 with_some_uninstalls_excluded = []
7238 if node.operation == "uninstall":
7239 slot_node = self.mydbapi[node.root
7240 ].match_pkgs(node.slot_atom)
7242 slot_node[0].operation == "merge":
7244 good_uninstalls.append(node)
7245 with_some_uninstalls_excluded.append(node)
7247 nodes = good_uninstalls
7248 elif with_some_uninstalls_excluded:
7249 nodes = with_some_uninstalls_excluded
7253 if ignore_priority is None and not tree_mode:
7254 # Greedily pop all of these nodes since no
7255 # relationship has been ignored. This optimization
7256 # destroys --tree output, so it's disabled in tree
7258 selected_nodes = nodes
7260 # For optimal merge order:
7261 # * Only pop one node.
7262 # * Removing a root node (node without a parent)
7263 # will not produce a leaf node, so avoid it.
7264 # * It's normal for a selected uninstall to be a
7265 # root node, so don't check them for parents.
7267 if node.operation == "uninstall" or \
7268 mygraph.parent_nodes(node):
7269 selected_nodes = [node]
7275 if not selected_nodes:
7276 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7278 mergeable_nodes = set(nodes)
7279 if prefer_asap and asap_nodes:
7281 for i in xrange(priority_range.SOFT,
7282 priority_range.MEDIUM_SOFT + 1):
7283 ignore_priority = priority_range.ignore_priority[i]
7285 if not mygraph.parent_nodes(node):
7287 selected_nodes = set()
7288 if gather_deps(ignore_priority,
7289 mergeable_nodes, selected_nodes, node):
7292 selected_nodes = None
7296 if prefer_asap and asap_nodes and not selected_nodes:
7297 # We failed to find any asap nodes to merge, so ignore
7298 # them for the next iteration.
7302 if selected_nodes and ignore_priority is not None:
7303 # Try to merge ignored medium_soft deps as soon as possible
7304 # if they're not satisfied by installed packages.
7305 for node in selected_nodes:
7306 children = set(mygraph.child_nodes(node))
7307 soft = children.difference(
7308 mygraph.child_nodes(node,
7309 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7310 medium_soft = children.difference(
7311 mygraph.child_nodes(node,
7313 DepPrioritySatisfiedRange.ignore_medium_soft))
7314 medium_soft.difference_update(soft)
7315 for child in medium_soft:
7316 if child in selected_nodes:
7318 if child in asap_nodes:
7320 asap_nodes.append(child)
7322 if selected_nodes and len(selected_nodes) > 1:
7323 if not isinstance(selected_nodes, list):
7324 selected_nodes = list(selected_nodes)
7325 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7327 if not selected_nodes and not myblocker_uninstalls.is_empty():
7328 # An Uninstall task needs to be executed in order to
7329 # avoid conflict if possible.
7332 priority_range = DepPrioritySatisfiedRange
7334 priority_range = DepPriorityNormalRange
7336 mergeable_nodes = get_nodes(
7337 ignore_priority=ignore_uninst_or_med)
7339 min_parent_deps = None
7341 for task in myblocker_uninstalls.leaf_nodes():
7342 # Do some sanity checks so that system or world packages
7343 # don't get uninstalled inappropriately here (only really
7344 # necessary when --complete-graph has not been enabled).
7346 if task in ignored_uninstall_tasks:
7349 if task in scheduled_uninstalls:
7350 # It's been scheduled but it hasn't
7351 # been executed yet due to dependence
7352 # on installation of blocking packages.
7355 root_config = self.roots[task.root]
7356 inst_pkg = self._pkg_cache[
7357 ("installed", task.root, task.cpv, "nomerge")]
7359 if self.digraph.contains(inst_pkg):
7362 forbid_overlap = False
7363 heuristic_overlap = False
7364 for blocker in myblocker_uninstalls.parent_nodes(task):
7365 if blocker.eapi in ("0", "1"):
7366 heuristic_overlap = True
7367 elif blocker.atom.blocker.overlap.forbid:
7368 forbid_overlap = True
7370 if forbid_overlap and running_root == task.root:
7373 if heuristic_overlap and running_root == task.root:
7374 # Never uninstall sys-apps/portage or it's essential
7375 # dependencies, except through replacement.
7377 runtime_dep_atoms = \
7378 list(runtime_deps.iterAtomsForPackage(task))
7379 except portage.exception.InvalidDependString, e:
7380 portage.writemsg("!!! Invalid PROVIDE in " + \
7381 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7382 (task.root, task.cpv, e), noiselevel=-1)
7386 # Don't uninstall a runtime dep if it appears
7387 # to be the only suitable one installed.
7389 vardb = root_config.trees["vartree"].dbapi
7390 for atom in runtime_dep_atoms:
7391 other_version = None
7392 for pkg in vardb.match_pkgs(atom):
7393 if pkg.cpv == task.cpv and \
7394 pkg.metadata["COUNTER"] == \
7395 task.metadata["COUNTER"]:
7399 if other_version is None:
7405 # For packages in the system set, don't take
7406 # any chances. If the conflict can't be resolved
7407 # by a normal replacement operation then abort.
7410 for atom in root_config.sets[
7411 "system"].iterAtomsForPackage(task):
7414 except portage.exception.InvalidDependString, e:
7415 portage.writemsg("!!! Invalid PROVIDE in " + \
7416 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7417 (task.root, task.cpv, e), noiselevel=-1)
7423 # Note that the world check isn't always
7424 # necessary since self._complete_graph() will
7425 # add all packages from the system and world sets to the
7426 # graph. This just allows unresolved conflicts to be
7427 # detected as early as possible, which makes it possible
7428 # to avoid calling self._complete_graph() when it is
7429 # unnecessary due to blockers triggering an abortion.
7431 # For packages in the world set, go ahead an uninstall
7432 # when necessary, as long as the atom will be satisfied
7433 # in the final state.
7434 graph_db = self.mydbapi[task.root]
7437 for atom in root_config.sets[
7438 "world"].iterAtomsForPackage(task):
7440 for pkg in graph_db.match_pkgs(atom):
7447 self._blocked_world_pkgs[inst_pkg] = atom
7449 except portage.exception.InvalidDependString, e:
7450 portage.writemsg("!!! Invalid PROVIDE in " + \
7451 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7452 (task.root, task.cpv, e), noiselevel=-1)
7458 # Check the deps of parent nodes to ensure that
7459 # the chosen task produces a leaf node. Maybe
7460 # this can be optimized some more to make the
7461 # best possible choice, but the current algorithm
7462 # is simple and should be near optimal for most
7464 mergeable_parent = False
7466 for parent in mygraph.parent_nodes(task):
7467 parent_deps.update(mygraph.child_nodes(parent,
7468 ignore_priority=priority_range.ignore_medium_soft))
7469 if parent in mergeable_nodes and \
7470 gather_deps(ignore_uninst_or_med_soft,
7471 mergeable_nodes, set(), parent):
7472 mergeable_parent = True
7474 if not mergeable_parent:
7477 parent_deps.remove(task)
7478 if min_parent_deps is None or \
7479 len(parent_deps) < min_parent_deps:
7480 min_parent_deps = len(parent_deps)
7483 if uninst_task is not None:
7484 # The uninstall is performed only after blocking
7485 # packages have been merged on top of it. File
7486 # collisions between blocking packages are detected
7487 # and removed from the list of files to be uninstalled.
7488 scheduled_uninstalls.add(uninst_task)
7489 parent_nodes = mygraph.parent_nodes(uninst_task)
7491 # Reverse the parent -> uninstall edges since we want
7492 # to do the uninstall after blocking packages have
7493 # been merged on top of it.
7494 mygraph.remove(uninst_task)
7495 for blocked_pkg in parent_nodes:
7496 mygraph.add(blocked_pkg, uninst_task,
7497 priority=BlockerDepPriority.instance)
7498 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7499 scheduler_graph.add(blocked_pkg, uninst_task,
7500 priority=BlockerDepPriority.instance)
7502 # Reset the state variables for leaf node selection and
7503 # continue trying to select leaf nodes.
7505 drop_satisfied = False
7508 if not selected_nodes:
7509 # Only select root nodes as a last resort. This case should
7510 # only trigger when the graph is nearly empty and the only
7511 # remaining nodes are isolated (no parents or children). Since
7512 # the nodes must be isolated, ignore_priority is not needed.
7513 selected_nodes = get_nodes()
7515 if not selected_nodes and not drop_satisfied:
7516 drop_satisfied = True
7519 if not selected_nodes and not myblocker_uninstalls.is_empty():
7520 # If possible, drop an uninstall task here in order to avoid
7521 # the circular deps code path. The corresponding blocker will
7522 # still be counted as an unresolved conflict.
7524 for node in myblocker_uninstalls.leaf_nodes():
7526 mygraph.remove(node)
7531 ignored_uninstall_tasks.add(node)
7534 if uninst_task is not None:
7535 # Reset the state variables for leaf node selection and
7536 # continue trying to select leaf nodes.
7538 drop_satisfied = False
7541 if not selected_nodes:
7542 self._circular_deps_for_display = mygraph
7543 raise self._unknown_internal_error()
7545 # At this point, we've succeeded in selecting one or more nodes, so
7546 # reset state variables for leaf node selection.
7548 drop_satisfied = False
7550 mygraph.difference_update(selected_nodes)
7552 for node in selected_nodes:
7553 if isinstance(node, Package) and \
7554 node.operation == "nomerge":
7557 # Handle interactions between blockers
7558 # and uninstallation tasks.
7559 solved_blockers = set()
7561 if isinstance(node, Package) and \
7562 "uninstall" == node.operation:
7563 have_uninstall_task = True
7566 vardb = self.trees[node.root]["vartree"].dbapi
7567 previous_cpv = vardb.match(node.slot_atom)
7569 # The package will be replaced by this one, so remove
7570 # the corresponding Uninstall task if necessary.
7571 previous_cpv = previous_cpv[0]
7573 ("installed", node.root, previous_cpv, "uninstall")
7575 mygraph.remove(uninst_task)
7579 if uninst_task is not None and \
7580 uninst_task not in ignored_uninstall_tasks and \
7581 myblocker_uninstalls.contains(uninst_task):
7582 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7583 myblocker_uninstalls.remove(uninst_task)
7584 # Discard any blockers that this Uninstall solves.
7585 for blocker in blocker_nodes:
7586 if not myblocker_uninstalls.child_nodes(blocker):
7587 myblocker_uninstalls.remove(blocker)
7588 solved_blockers.add(blocker)
7590 retlist.append(node)
7592 if (isinstance(node, Package) and \
7593 "uninstall" == node.operation) or \
7594 (uninst_task is not None and \
7595 uninst_task in scheduled_uninstalls):
7596 # Include satisfied blockers in the merge list
7597 # since the user might be interested and also
7598 # it serves as an indicator that blocking packages
7599 # will be temporarily installed simultaneously.
7600 for blocker in solved_blockers:
7601 retlist.append(Blocker(atom=blocker.atom,
7602 root=blocker.root, eapi=blocker.eapi,
7605 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7606 for node in myblocker_uninstalls.root_nodes():
7607 unsolvable_blockers.add(node)
7609 for blocker in unsolvable_blockers:
7610 retlist.append(blocker)
7612 # If any Uninstall tasks need to be executed in order
7613 # to avoid a conflict, complete the graph with any
7614 # dependencies that may have been initially
7615 # neglected (to ensure that unsafe Uninstall tasks
7616 # are properly identified and blocked from execution).
7617 if have_uninstall_task and \
7619 not unsolvable_blockers:
7620 self.myparams.add("complete")
7621 raise self._serialize_tasks_retry("")
7623 if unsolvable_blockers and \
7624 not self._accept_blocker_conflicts():
7625 self._unsatisfied_blockers_for_display = unsolvable_blockers
7626 self._serialized_tasks_cache = retlist[:]
7627 self._scheduler_graph = scheduler_graph
7628 raise self._unknown_internal_error()
7630 if self._slot_collision_info and \
7631 not self._accept_blocker_conflicts():
7632 self._serialized_tasks_cache = retlist[:]
7633 self._scheduler_graph = scheduler_graph
7634 raise self._unknown_internal_error()
7636 return retlist, scheduler_graph
7638 def _show_circular_deps(self, mygraph):
7639 # No leaf nodes are available, so we have a circular
7640 # dependency panic situation. Reduce the noise level to a
7641 # minimum via repeated elimination of root nodes since they
7642 # have no parents and thus can not be part of a cycle.
7644 root_nodes = mygraph.root_nodes(
7645 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7648 mygraph.difference_update(root_nodes)
7649 # Display the USE flags that are enabled on nodes that are part
7650 # of dependency cycles in case that helps the user decide to
7651 # disable some of them.
7653 tempgraph = mygraph.copy()
7654 while not tempgraph.empty():
7655 nodes = tempgraph.leaf_nodes()
7657 node = tempgraph.order[0]
7660 display_order.append(node)
7661 tempgraph.remove(node)
7662 display_order.reverse()
7663 self.myopts.pop("--quiet", None)
7664 self.myopts.pop("--verbose", None)
7665 self.myopts["--tree"] = True
7666 portage.writemsg("\n\n", noiselevel=-1)
7667 self.display(display_order)
7668 prefix = colorize("BAD", " * ")
7669 portage.writemsg("\n", noiselevel=-1)
7670 portage.writemsg(prefix + "Error: circular dependencies:\n",
7672 portage.writemsg("\n", noiselevel=-1)
7673 mygraph.debug_print()
7674 portage.writemsg("\n", noiselevel=-1)
7675 portage.writemsg(prefix + "Note that circular dependencies " + \
7676 "can often be avoided by temporarily\n", noiselevel=-1)
7677 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7678 "optional dependencies.\n", noiselevel=-1)
7680 def _show_merge_list(self):
7681 if self._serialized_tasks_cache is not None and \
7682 not (self._displayed_list and \
7683 (self._displayed_list == self._serialized_tasks_cache or \
7684 self._displayed_list == \
7685 list(reversed(self._serialized_tasks_cache)))):
7686 display_list = self._serialized_tasks_cache[:]
7687 if "--tree" in self.myopts:
7688 display_list.reverse()
7689 self.display(display_list)
7691 def _show_unsatisfied_blockers(self, blockers):
7692 self._show_merge_list()
7693 msg = "Error: The above package list contains " + \
7694 "packages which cannot be installed " + \
7695 "at the same time on the same system."
7696 prefix = colorize("BAD", " * ")
7697 from textwrap import wrap
7698 portage.writemsg("\n", noiselevel=-1)
7699 for line in wrap(msg, 70):
7700 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7702 # Display the conflicting packages along with the packages
7703 # that pulled them in. This is helpful for troubleshooting
7704 # cases in which blockers don't solve automatically and
7705 # the reasons are not apparent from the normal merge list
7709 for blocker in blockers:
7710 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7711 self._blocker_parents.parent_nodes(blocker)):
7712 parent_atoms = self._parent_atoms.get(pkg)
7713 if not parent_atoms:
7714 atom = self._blocked_world_pkgs.get(pkg)
7715 if atom is not None:
7716 parent_atoms = set([("@world", atom)])
7718 conflict_pkgs[pkg] = parent_atoms
7721 # Reduce noise by pruning packages that are only
7722 # pulled in by other conflict packages.
7724 for pkg, parent_atoms in conflict_pkgs.iteritems():
7725 relevant_parent = False
7726 for parent, atom in parent_atoms:
7727 if parent not in conflict_pkgs:
7728 relevant_parent = True
7730 if not relevant_parent:
7731 pruned_pkgs.add(pkg)
7732 for pkg in pruned_pkgs:
7733 del conflict_pkgs[pkg]
7739 # Max number of parents shown, to avoid flooding the display.
7741 for pkg, parent_atoms in conflict_pkgs.iteritems():
7745 # Prefer packages that are not directly involved in a conflict.
7746 for parent_atom in parent_atoms:
7747 if len(pruned_list) >= max_parents:
7749 parent, atom = parent_atom
7750 if parent not in conflict_pkgs:
7751 pruned_list.add(parent_atom)
7753 for parent_atom in parent_atoms:
7754 if len(pruned_list) >= max_parents:
7756 pruned_list.add(parent_atom)
7758 omitted_parents = len(parent_atoms) - len(pruned_list)
7759 msg.append(indent + "%s pulled in by\n" % pkg)
7761 for parent_atom in pruned_list:
7762 parent, atom = parent_atom
7763 msg.append(2*indent)
7764 if isinstance(parent,
7765 (PackageArg, AtomArg)):
7766 # For PackageArg and AtomArg types, it's
7767 # redundant to display the atom attribute.
7768 msg.append(str(parent))
7770 # Display the specific atom from SetArg or
7772 msg.append("%s required by %s" % (atom, parent))
7776 msg.append(2*indent)
7777 msg.append("(and %d more)\n" % omitted_parents)
7781 sys.stderr.write("".join(msg))
7784 if "--quiet" not in self.myopts:
7785 show_blocker_docs_link()
7787 def display(self, mylist, favorites=[], verbosity=None):
7789 # This is used to prevent display_problems() from
7790 # redundantly displaying this exact same merge list
7791 # again via _show_merge_list().
7792 self._displayed_list = mylist
7794 if verbosity is None:
7795 verbosity = ("--quiet" in self.myopts and 1 or \
7796 "--verbose" in self.myopts and 3 or 2)
7797 favorites_set = InternalPackageSet(favorites)
7798 oneshot = "--oneshot" in self.myopts or \
7799 "--onlydeps" in self.myopts
7800 columns = "--columns" in self.myopts
7805 counters = PackageCounters()
7807 if verbosity == 1 and "--verbose" not in self.myopts:
7808 def create_use_string(*args):
7811 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7813 is_new, reinst_flags,
7814 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7815 alphabetical=("--alphabetical" in self.myopts)):
7823 cur_iuse = set(cur_iuse)
7824 enabled_flags = cur_iuse.intersection(cur_use)
7825 removed_iuse = set(old_iuse).difference(cur_iuse)
7826 any_iuse = cur_iuse.union(old_iuse)
7827 any_iuse = list(any_iuse)
7829 for flag in any_iuse:
7832 reinst_flag = reinst_flags and flag in reinst_flags
7833 if flag in enabled_flags:
7835 if is_new or flag in old_use and \
7836 (all_flags or reinst_flag):
7837 flag_str = red(flag)
7838 elif flag not in old_iuse:
7839 flag_str = yellow(flag) + "%*"
7840 elif flag not in old_use:
7841 flag_str = green(flag) + "*"
7842 elif flag in removed_iuse:
7843 if all_flags or reinst_flag:
7844 flag_str = yellow("-" + flag) + "%"
7847 flag_str = "(" + flag_str + ")"
7848 removed.append(flag_str)
7851 if is_new or flag in old_iuse and \
7852 flag not in old_use and \
7853 (all_flags or reinst_flag):
7854 flag_str = blue("-" + flag)
7855 elif flag not in old_iuse:
7856 flag_str = yellow("-" + flag)
7857 if flag not in iuse_forced:
7859 elif flag in old_use:
7860 flag_str = green("-" + flag) + "*"
7862 if flag in iuse_forced:
7863 flag_str = "(" + flag_str + ")"
7865 enabled.append(flag_str)
7867 disabled.append(flag_str)
7870 ret = " ".join(enabled)
7872 ret = " ".join(enabled + disabled + removed)
7874 ret = '%s="%s" ' % (name, ret)
7877 repo_display = RepoDisplay(self.roots)
7881 mygraph = self.digraph.copy()
7883 # If there are any Uninstall instances, add the corresponding
7884 # blockers to the digraph (useful for --tree display).
7886 executed_uninstalls = set(node for node in mylist \
7887 if isinstance(node, Package) and node.operation == "unmerge")
7889 for uninstall in self._blocker_uninstalls.leaf_nodes():
7890 uninstall_parents = \
7891 self._blocker_uninstalls.parent_nodes(uninstall)
7892 if not uninstall_parents:
7895 # Remove the corresponding "nomerge" node and substitute
7896 # the Uninstall node.
7897 inst_pkg = self._pkg_cache[
7898 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7900 mygraph.remove(inst_pkg)
7905 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7907 inst_pkg_blockers = []
7909 # Break the Package -> Uninstall edges.
7910 mygraph.remove(uninstall)
7912 # Resolution of a package's blockers
7913 # depend on it's own uninstallation.
7914 for blocker in inst_pkg_blockers:
7915 mygraph.add(uninstall, blocker)
7917 # Expand Package -> Uninstall edges into
7918 # Package -> Blocker -> Uninstall edges.
7919 for blocker in uninstall_parents:
7920 mygraph.add(uninstall, blocker)
7921 for parent in self._blocker_parents.parent_nodes(blocker):
7922 if parent != inst_pkg:
7923 mygraph.add(blocker, parent)
7925 # If the uninstall task did not need to be executed because
7926 # of an upgrade, display Blocker -> Upgrade edges since the
7927 # corresponding Blocker -> Uninstall edges will not be shown.
7929 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7930 if upgrade_node is not None and \
7931 uninstall not in executed_uninstalls:
7932 for blocker in uninstall_parents:
7933 mygraph.add(upgrade_node, blocker)
7935 unsatisfied_blockers = []
7940 if isinstance(x, Blocker) and not x.satisfied:
7941 unsatisfied_blockers.append(x)
7944 if "--tree" in self.myopts:
7945 depth = len(tree_nodes)
7946 while depth and graph_key not in \
7947 mygraph.child_nodes(tree_nodes[depth-1]):
7950 tree_nodes = tree_nodes[:depth]
7951 tree_nodes.append(graph_key)
7952 display_list.append((x, depth, True))
7953 shown_edges.add((graph_key, tree_nodes[depth-1]))
7955 traversed_nodes = set() # prevent endless circles
7956 traversed_nodes.add(graph_key)
7957 def add_parents(current_node, ordered):
7959 # Do not traverse to parents if this node is an
7960 # an argument or a direct member of a set that has
7961 # been specified as an argument (system or world).
7962 if current_node not in self._set_nodes:
7963 parent_nodes = mygraph.parent_nodes(current_node)
7965 child_nodes = set(mygraph.child_nodes(current_node))
7966 selected_parent = None
7967 # First, try to avoid a direct cycle.
7968 for node in parent_nodes:
7969 if not isinstance(node, (Blocker, Package)):
7971 if node not in traversed_nodes and \
7972 node not in child_nodes:
7973 edge = (current_node, node)
7974 if edge in shown_edges:
7976 selected_parent = node
7978 if not selected_parent:
7979 # A direct cycle is unavoidable.
7980 for node in parent_nodes:
7981 if not isinstance(node, (Blocker, Package)):
7983 if node not in traversed_nodes:
7984 edge = (current_node, node)
7985 if edge in shown_edges:
7987 selected_parent = node
7990 shown_edges.add((current_node, selected_parent))
7991 traversed_nodes.add(selected_parent)
7992 add_parents(selected_parent, False)
7993 display_list.append((current_node,
7994 len(tree_nodes), ordered))
7995 tree_nodes.append(current_node)
7997 add_parents(graph_key, True)
7999 display_list.append((x, depth, True))
8000 mylist = display_list
8001 for x in unsatisfied_blockers:
8002 mylist.append((x, 0, True))
8004 last_merge_depth = 0
8005 for i in xrange(len(mylist)-1,-1,-1):
8006 graph_key, depth, ordered = mylist[i]
8007 if not ordered and depth == 0 and i > 0 \
8008 and graph_key == mylist[i-1][0] and \
8009 mylist[i-1][1] == 0:
8010 # An ordered node got a consecutive duplicate when the tree was
8014 if ordered and graph_key[-1] != "nomerge":
8015 last_merge_depth = depth
8017 if depth >= last_merge_depth or \
8018 i < len(mylist) - 1 and \
8019 depth >= mylist[i+1][1]:
8022 from portage import flatten
8023 from portage.dep import use_reduce, paren_reduce
8024 # files to fetch list - avoids counting a same file twice
8025 # in size display (verbose mode)
8028 # Use this set to detect when all the "repoadd" strings are "[0]"
8029 # and disable the entire repo display in this case.
8032 for mylist_index in xrange(len(mylist)):
8033 x, depth, ordered = mylist[mylist_index]
8037 portdb = self.trees[myroot]["porttree"].dbapi
8038 bindb = self.trees[myroot]["bintree"].dbapi
8039 vardb = self.trees[myroot]["vartree"].dbapi
8040 vartree = self.trees[myroot]["vartree"]
8041 pkgsettings = self.pkgsettings[myroot]
8044 indent = " " * depth
8046 if isinstance(x, Blocker):
8048 blocker_style = "PKG_BLOCKER_SATISFIED"
8049 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
8051 blocker_style = "PKG_BLOCKER"
8052 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
8054 counters.blocks += 1
8056 counters.blocks_satisfied += 1
8057 resolved = portage.key_expand(
8058 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8059 if "--columns" in self.myopts and "--quiet" in self.myopts:
8060 addl += " " + colorize(blocker_style, resolved)
8062 addl = "[%s %s] %s%s" % \
8063 (colorize(blocker_style, "blocks"),
8064 addl, indent, colorize(blocker_style, resolved))
8065 block_parents = self._blocker_parents.parent_nodes(x)
8066 block_parents = set([pnode[2] for pnode in block_parents])
8067 block_parents = ", ".join(block_parents)
8069 addl += colorize(blocker_style,
8070 " (\"%s\" is blocking %s)") % \
8071 (str(x.atom).lstrip("!"), block_parents)
8073 addl += colorize(blocker_style,
8074 " (is blocking %s)") % block_parents
8075 if isinstance(x, Blocker) and x.satisfied:
8080 blockers.append(addl)
8083 pkg_merge = ordered and pkg_status == "merge"
8084 if not pkg_merge and pkg_status == "merge":
8085 pkg_status = "nomerge"
8086 built = pkg_type != "ebuild"
8087 installed = pkg_type == "installed"
8089 metadata = pkg.metadata
8091 repo_name = metadata["repository"]
8092 if pkg_type == "ebuild":
8093 ebuild_path = portdb.findname(pkg_key)
8094 if not ebuild_path: # shouldn't happen
8095 raise portage.exception.PackageNotFound(pkg_key)
8096 repo_path_real = os.path.dirname(os.path.dirname(
8097 os.path.dirname(ebuild_path)))
8099 repo_path_real = portdb.getRepositoryPath(repo_name)
8100 pkg_use = list(pkg.use.enabled)
8102 restrict = flatten(use_reduce(paren_reduce(
8103 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8104 except portage.exception.InvalidDependString, e:
8105 if not pkg.installed:
8106 show_invalid_depstring_notice(x,
8107 pkg.metadata["RESTRICT"], str(e))
8111 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8112 "fetch" in restrict:
8115 counters.restrict_fetch += 1
8116 if portdb.fetch_check(pkg_key, pkg_use):
8119 counters.restrict_fetch_satisfied += 1
8121 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8122 #param is used for -u, where you still *do* want to see when something is being upgraded.
8125 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8126 if vardb.cpv_exists(pkg_key):
8127 addl=" "+yellow("R")+fetch+" "
8130 counters.reinst += 1
8131 elif pkg_status == "uninstall":
8132 counters.uninst += 1
8133 # filter out old-style virtual matches
8134 elif installed_versions and \
8135 portage.cpv_getkey(installed_versions[0]) == \
8136 portage.cpv_getkey(pkg_key):
8137 myinslotlist = vardb.match(pkg.slot_atom)
8138 # If this is the first install of a new-style virtual, we
8139 # need to filter out old-style virtual matches.
8140 if myinslotlist and \
8141 portage.cpv_getkey(myinslotlist[0]) != \
8142 portage.cpv_getkey(pkg_key):
8145 myoldbest = myinslotlist[:]
8147 if not portage.dep.cpvequal(pkg_key,
8148 portage.best([pkg_key] + myoldbest)):
8150 addl += turquoise("U")+blue("D")
8152 counters.downgrades += 1
8155 addl += turquoise("U") + " "
8157 counters.upgrades += 1
8159 # New slot, mark it new.
8160 addl = " " + green("NS") + fetch + " "
8161 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8163 counters.newslot += 1
8165 if "--changelog" in self.myopts:
8166 inst_matches = vardb.match(pkg.slot_atom)
8168 changelogs.extend(self.calc_changelog(
8169 portdb.findname(pkg_key),
8170 inst_matches[0], pkg_key))
8172 addl = " " + green("N") + " " + fetch + " "
8181 forced_flags = set()
8182 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8183 forced_flags.update(pkgsettings.useforce)
8184 forced_flags.update(pkgsettings.usemask)
8186 cur_use = [flag for flag in pkg.use.enabled \
8187 if flag in pkg.iuse.all]
8188 cur_iuse = sorted(pkg.iuse.all)
8190 if myoldbest and myinslotlist:
8191 previous_cpv = myoldbest[0]
8193 previous_cpv = pkg.cpv
8194 if vardb.cpv_exists(previous_cpv):
8195 old_iuse, old_use = vardb.aux_get(
8196 previous_cpv, ["IUSE", "USE"])
8197 old_iuse = list(set(
8198 filter_iuse_defaults(old_iuse.split())))
8200 old_use = old_use.split()
8207 old_use = [flag for flag in old_use if flag in old_iuse]
8209 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8211 use_expand.reverse()
8212 use_expand_hidden = \
8213 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8215 def map_to_use_expand(myvals, forcedFlags=False,
8219 for exp in use_expand:
8222 for val in myvals[:]:
8223 if val.startswith(exp.lower()+"_"):
8224 if val in forced_flags:
8225 forced[exp].add(val[len(exp)+1:])
8226 ret[exp].append(val[len(exp)+1:])
8229 forced["USE"] = [val for val in myvals \
8230 if val in forced_flags]
8232 for exp in use_expand_hidden:
8238 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8239 # are the only thing that triggered reinstallation.
8240 reinst_flags_map = {}
8241 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8242 reinst_expand_map = None
8243 if reinstall_for_flags:
8244 reinst_flags_map = map_to_use_expand(
8245 list(reinstall_for_flags), removeHidden=False)
8246 for k in list(reinst_flags_map):
8247 if not reinst_flags_map[k]:
8248 del reinst_flags_map[k]
8249 if not reinst_flags_map.get("USE"):
8250 reinst_expand_map = reinst_flags_map.copy()
8251 reinst_expand_map.pop("USE", None)
8252 if reinst_expand_map and \
8253 not set(reinst_expand_map).difference(
8255 use_expand_hidden = \
8256 set(use_expand_hidden).difference(
8259 cur_iuse_map, iuse_forced = \
8260 map_to_use_expand(cur_iuse, forcedFlags=True)
8261 cur_use_map = map_to_use_expand(cur_use)
8262 old_iuse_map = map_to_use_expand(old_iuse)
8263 old_use_map = map_to_use_expand(old_use)
8266 use_expand.insert(0, "USE")
8268 for key in use_expand:
8269 if key in use_expand_hidden:
8271 verboseadd += create_use_string(key.upper(),
8272 cur_iuse_map[key], iuse_forced[key],
8273 cur_use_map[key], old_iuse_map[key],
8274 old_use_map[key], is_new,
8275 reinst_flags_map.get(key))
8280 if pkg_type == "ebuild" and pkg_merge:
8282 myfilesdict = portdb.getfetchsizes(pkg_key,
8283 useflags=pkg_use, debug=self.edebug)
8284 except portage.exception.InvalidDependString, e:
8285 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8286 show_invalid_depstring_notice(x, src_uri, str(e))
8289 if myfilesdict is None:
8290 myfilesdict="[empty/missing/bad digest]"
8292 for myfetchfile in myfilesdict:
8293 if myfetchfile not in myfetchlist:
8294 mysize+=myfilesdict[myfetchfile]
8295 myfetchlist.append(myfetchfile)
8297 counters.totalsize += mysize
8298 verboseadd += format_size(mysize)
8301 # assign index for a previous version in the same slot
8302 has_previous = False
8303 repo_name_prev = None
8304 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8306 slot_matches = vardb.match(slot_atom)
8309 repo_name_prev = vardb.aux_get(slot_matches[0],
8312 # now use the data to generate output
8313 if pkg.installed or not has_previous:
8314 repoadd = repo_display.repoStr(repo_path_real)
8316 repo_path_prev = None
8318 repo_path_prev = portdb.getRepositoryPath(
8320 if repo_path_prev == repo_path_real:
8321 repoadd = repo_display.repoStr(repo_path_real)
8323 repoadd = "%s=>%s" % (
8324 repo_display.repoStr(repo_path_prev),
8325 repo_display.repoStr(repo_path_real))
8327 repoadd_set.add(repoadd)
8329 xs = [portage.cpv_getkey(pkg_key)] + \
8330 list(portage.catpkgsplit(pkg_key)[2:])
8337 if "COLUMNWIDTH" in self.settings:
8339 mywidth = int(self.settings["COLUMNWIDTH"])
8340 except ValueError, e:
8341 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8343 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8344 self.settings["COLUMNWIDTH"], noiselevel=-1)
8346 oldlp = mywidth - 30
8349 # Convert myoldbest from a list to a string.
8353 for pos, key in enumerate(myoldbest):
8354 key = portage.catpkgsplit(key)[2] + \
8355 "-" + portage.catpkgsplit(key)[3]
8356 if key[-3:] == "-r0":
8358 myoldbest[pos] = key
8359 myoldbest = blue("["+", ".join(myoldbest)+"]")
8362 root_config = self.roots[myroot]
8363 system_set = root_config.sets["system"]
8364 world_set = root_config.sets["world"]
8369 pkg_system = system_set.findAtomForPackage(pkg)
8370 pkg_world = world_set.findAtomForPackage(pkg)
8371 if not (oneshot or pkg_world) and \
8372 myroot == self.target_root and \
8373 favorites_set.findAtomForPackage(pkg):
8374 # Maybe it will be added to world now.
8375 if create_world_atom(pkg, favorites_set, root_config):
8377 except portage.exception.InvalidDependString:
8378 # This is reported elsewhere if relevant.
8381 def pkgprint(pkg_str):
8384 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8386 return colorize("PKG_MERGE_WORLD", pkg_str)
8388 return colorize("PKG_MERGE", pkg_str)
8389 elif pkg_status == "uninstall":
8390 return colorize("PKG_UNINSTALL", pkg_str)
8393 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8395 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8397 return colorize("PKG_NOMERGE", pkg_str)
8400 properties = flatten(use_reduce(paren_reduce(
8401 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8402 except portage.exception.InvalidDependString, e:
8403 if not pkg.installed:
8404 show_invalid_depstring_notice(pkg,
8405 pkg.metadata["PROPERTIES"], str(e))
8409 interactive = "interactive" in properties
8410 if interactive and pkg.operation == "merge":
8411 addl = colorize("WARN", "I") + addl[1:]
8413 counters.interactive += 1
8418 if "--columns" in self.myopts:
8419 if "--quiet" in self.myopts:
8420 myprint=addl+" "+indent+pkgprint(pkg_cp)
8421 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8422 myprint=myprint+myoldbest
8423 myprint=myprint+darkgreen("to "+x[1])
8427 myprint = "[%s] %s%s" % \
8428 (pkgprint(pkg_status.ljust(13)),
8429 indent, pkgprint(pkg.cp))
8431 myprint = "[%s %s] %s%s" % \
8432 (pkgprint(pkg.type_name), addl,
8433 indent, pkgprint(pkg.cp))
8434 if (newlp-nc_len(myprint)) > 0:
8435 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8436 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8437 if (oldlp-nc_len(myprint)) > 0:
8438 myprint=myprint+" "*(oldlp-nc_len(myprint))
8439 myprint=myprint+myoldbest
8440 myprint += darkgreen("to " + pkg.root)
8443 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8445 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8446 myprint += indent + pkgprint(pkg_key) + " " + \
8447 myoldbest + darkgreen("to " + myroot)
8449 if "--columns" in self.myopts:
8450 if "--quiet" in self.myopts:
8451 myprint=addl+" "+indent+pkgprint(pkg_cp)
8452 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8453 myprint=myprint+myoldbest
8457 myprint = "[%s] %s%s" % \
8458 (pkgprint(pkg_status.ljust(13)),
8459 indent, pkgprint(pkg.cp))
8461 myprint = "[%s %s] %s%s" % \
8462 (pkgprint(pkg.type_name), addl,
8463 indent, pkgprint(pkg.cp))
8464 if (newlp-nc_len(myprint)) > 0:
8465 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8466 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8467 if (oldlp-nc_len(myprint)) > 0:
8468 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8469 myprint += myoldbest
8472 myprint = "[%s] %s%s %s" % \
8473 (pkgprint(pkg_status.ljust(13)),
8474 indent, pkgprint(pkg.cpv),
8477 myprint = "[%s %s] %s%s %s" % \
8478 (pkgprint(pkg_type), addl, indent,
8479 pkgprint(pkg.cpv), myoldbest)
8481 if columns and pkg.operation == "uninstall":
8483 p.append((myprint, verboseadd, repoadd))
8485 if "--tree" not in self.myopts and \
8486 "--quiet" not in self.myopts and \
8487 not self._opts_no_restart.intersection(self.myopts) and \
8488 pkg.root == self._running_root.root and \
8489 portage.match_from_list(
8490 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8491 not vardb.cpv_exists(pkg.cpv) and \
8492 "--quiet" not in self.myopts:
8493 if mylist_index < len(mylist) - 1:
8494 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8495 p.append(colorize("WARN", " then resume the merge."))
8498 show_repos = repoadd_set and repoadd_set != set(["0"])
8501 if isinstance(x, basestring):
8502 out.write("%s\n" % (x,))
8505 myprint, verboseadd, repoadd = x
8508 myprint += " " + verboseadd
8510 if show_repos and repoadd:
8511 myprint += " " + teal("[%s]" % repoadd)
8513 out.write("%s\n" % (myprint,))
8522 sys.stdout.write(str(repo_display))
8524 if "--changelog" in self.myopts:
8526 for revision,text in changelogs:
8527 print bold('*'+revision)
8528 sys.stdout.write(text)
8533 def display_problems(self):
8535 Display problems with the dependency graph such as slot collisions.
8536 This is called internally by display() to show the problems _after_
8537 the merge list where it is most likely to be seen, but if display()
8538 is not going to be called then this method should be called explicitly
8539 to ensure that the user is notified of problems with the graph.
8541 All output goes to stderr, except for unsatisfied dependencies which
8542 go to stdout for parsing by programs such as autounmask.
8545 # Note that show_masked_packages() sends it's output to
8546 # stdout, and some programs such as autounmask parse the
8547 # output in cases when emerge bails out. However, when
8548 # show_masked_packages() is called for installed packages
8549 # here, the message is a warning that is more appropriate
8550 # to send to stderr, so temporarily redirect stdout to
8551 # stderr. TODO: Fix output code so there's a cleaner way
8552 # to redirect everything to stderr.
8557 sys.stdout = sys.stderr
8558 self._display_problems()
8564 # This goes to stdout for parsing by programs like autounmask.
8565 for pargs, kwargs in self._unsatisfied_deps_for_display:
8566 self._show_unsatisfied_dep(*pargs, **kwargs)
8568 def _display_problems(self):
8569 if self._circular_deps_for_display is not None:
8570 self._show_circular_deps(
8571 self._circular_deps_for_display)
8573 # The user is only notified of a slot conflict if
8574 # there are no unresolvable blocker conflicts.
8575 if self._unsatisfied_blockers_for_display is not None:
8576 self._show_unsatisfied_blockers(
8577 self._unsatisfied_blockers_for_display)
8579 self._show_slot_collision_notice()
8581 # TODO: Add generic support for "set problem" handlers so that
8582 # the below warnings aren't special cases for world only.
8584 if self._missing_args:
8585 world_problems = False
8586 if "world" in self._sets:
8587 # Filter out indirect members of world (from nested sets)
8588 # since only direct members of world are desired here.
8589 world_set = self.roots[self.target_root].sets["world"]
8590 for arg, atom in self._missing_args:
8591 if arg.name == "world" and atom in world_set:
8592 world_problems = True
8596 sys.stderr.write("\n!!! Problems have been " + \
8597 "detected with your world file\n")
8598 sys.stderr.write("!!! Please run " + \
8599 green("emaint --check world")+"\n\n")
8601 if self._missing_args:
8602 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8603 " Ebuilds for the following packages are either all\n")
8604 sys.stderr.write(colorize("BAD", "!!!") + \
8605 " masked or don't exist:\n")
8606 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8607 self._missing_args) + "\n")
8609 if self._pprovided_args:
8611 for arg, atom in self._pprovided_args:
8612 if isinstance(arg, SetArg):
8614 arg_atom = (atom, atom)
8617 arg_atom = (arg.arg, atom)
8618 refs = arg_refs.setdefault(arg_atom, [])
8619 if parent not in refs:
8622 msg.append(bad("\nWARNING: "))
8623 if len(self._pprovided_args) > 1:
8624 msg.append("Requested packages will not be " + \
8625 "merged because they are listed in\n")
8627 msg.append("A requested package will not be " + \
8628 "merged because it is listed in\n")
8629 msg.append("package.provided:\n\n")
8630 problems_sets = set()
8631 for (arg, atom), refs in arg_refs.iteritems():
8634 problems_sets.update(refs)
8636 ref_string = ", ".join(["'%s'" % name for name in refs])
8637 ref_string = " pulled in by " + ref_string
8638 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8640 if "world" in problems_sets:
8641 msg.append("This problem can be solved in one of the following ways:\n\n")
8642 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8643 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8644 msg.append(" C) Remove offending entries from package.provided.\n\n")
8645 msg.append("The best course of action depends on the reason that an offending\n")
8646 msg.append("package.provided entry exists.\n\n")
8647 sys.stderr.write("".join(msg))
8649 masked_packages = []
8650 for pkg in self._masked_installed:
8651 root_config = pkg.root_config
8652 pkgsettings = self.pkgsettings[pkg.root]
8653 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8654 masked_packages.append((root_config, pkgsettings,
8655 pkg.cpv, pkg.metadata, mreasons))
8657 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8658 " The following installed packages are masked:\n")
8659 show_masked_packages(masked_packages)
8663 def calc_changelog(self,ebuildpath,current,next):
8664 if ebuildpath == None or not os.path.exists(ebuildpath):
8666 current = '-'.join(portage.catpkgsplit(current)[1:])
8667 if current.endswith('-r0'):
8668 current = current[:-3]
8669 next = '-'.join(portage.catpkgsplit(next)[1:])
8670 if next.endswith('-r0'):
8672 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8674 changelog = open(changelogpath).read()
8675 except SystemExit, e:
8676 raise # Needed else can't exit
8679 divisions = self.find_changelog_tags(changelog)
8680 #print 'XX from',current,'to',next
8681 #for div,text in divisions: print 'XX',div
8682 # skip entries for all revisions above the one we are about to emerge
8683 for i in range(len(divisions)):
8684 if divisions[i][0]==next:
8685 divisions = divisions[i:]
8687 # find out how many entries we are going to display
8688 for i in range(len(divisions)):
8689 if divisions[i][0]==current:
8690 divisions = divisions[:i]
8693 # couldnt find the current revision in the list. display nothing
8697 def find_changelog_tags(self,changelog):
8701 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8703 if release is not None:
8704 divs.append((release,changelog))
8706 if release is not None:
8707 divs.append((release,changelog[:match.start()]))
8708 changelog = changelog[match.end():]
8709 release = match.group(1)
8710 if release.endswith('.ebuild'):
8711 release = release[:-7]
8712 if release.endswith('-r0'):
8713 release = release[:-3]
8715 def saveNomergeFavorites(self):
8716 """Find atoms in favorites that are not in the mergelist and add them
8717 to the world file if necessary."""
8718 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8719 "--oneshot", "--onlydeps", "--pretend"):
8720 if x in self.myopts:
8722 root_config = self.roots[self.target_root]
8723 world_set = root_config.sets["world"]
8725 world_locked = False
8726 if hasattr(world_set, "lock"):
8730 if hasattr(world_set, "load"):
8731 world_set.load() # maybe it's changed on disk
8733 args_set = self._sets["args"]
8734 portdb = self.trees[self.target_root]["porttree"].dbapi
8735 added_favorites = set()
8736 for x in self._set_nodes:
8737 pkg_type, root, pkg_key, pkg_status = x
8738 if pkg_status != "nomerge":
8742 myfavkey = create_world_atom(x, args_set, root_config)
8744 if myfavkey in added_favorites:
8746 added_favorites.add(myfavkey)
8747 except portage.exception.InvalidDependString, e:
8748 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8749 (pkg_key, str(e)), noiselevel=-1)
8750 writemsg("!!! see '%s'\n\n" % os.path.join(
8751 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8754 for k in self._sets:
8755 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8760 all_added.append(SETPREFIX + k)
8761 all_added.extend(added_favorites)
8764 print ">>> Recording %s in \"world\" favorites file..." % \
8765 colorize("INFORM", str(a))
8767 world_set.update(all_added)
8772 def loadResumeCommand(self, resume_data, skip_masked=True,
8775 Add a resume command to the graph and validate it in the process. This
8776 will raise a PackageNotFound exception if a package is not available.
8779 if not isinstance(resume_data, dict):
8782 mergelist = resume_data.get("mergelist")
8783 if not isinstance(mergelist, list):
8786 fakedb = self.mydbapi
8788 serialized_tasks = []
8791 if not (isinstance(x, list) and len(x) == 4):
8793 pkg_type, myroot, pkg_key, action = x
8794 if pkg_type not in self.pkg_tree_map:
8796 if action != "merge":
8798 tree_type = self.pkg_tree_map[pkg_type]
8799 mydb = trees[myroot][tree_type].dbapi
8800 db_keys = list(self._trees_orig[myroot][
8801 tree_type].dbapi._aux_cache_keys)
8803 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8805 # It does no exist or it is corrupt.
8806 if action == "uninstall":
8809 # TODO: log these somewhere
8811 raise portage.exception.PackageNotFound(pkg_key)
8812 installed = action == "uninstall"
8813 built = pkg_type != "ebuild"
8814 root_config = self.roots[myroot]
8815 pkg = Package(built=built, cpv=pkg_key,
8816 installed=installed, metadata=metadata,
8817 operation=action, root_config=root_config,
8819 if pkg_type == "ebuild":
8820 pkgsettings = self.pkgsettings[myroot]
8821 pkgsettings.setcpv(pkg)
8822 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8823 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8824 self._pkg_cache[pkg] = pkg
8826 root_config = self.roots[pkg.root]
8827 if "merge" == pkg.operation and \
8828 not visible(root_config.settings, pkg):
8830 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8832 self._unsatisfied_deps_for_display.append(
8833 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8835 fakedb[myroot].cpv_inject(pkg)
8836 serialized_tasks.append(pkg)
8837 self.spinner.update()
8839 if self._unsatisfied_deps_for_display:
8842 if not serialized_tasks or "--nodeps" in self.myopts:
8843 self._serialized_tasks_cache = serialized_tasks
8844 self._scheduler_graph = self.digraph
8846 self._select_package = self._select_pkg_from_graph
8847 self.myparams.add("selective")
8848 # Always traverse deep dependencies in order to account for
8849 # potentially unsatisfied dependencies of installed packages.
8850 # This is necessary for correct --keep-going or --resume operation
8851 # in case a package from a group of circularly dependent packages
8852 # fails. In this case, a package which has recently been installed
8853 # may have an unsatisfied circular dependency (pulled in by
8854 # PDEPEND, for example). So, even though a package is already
8855 # installed, it may not have all of it's dependencies satisfied, so
8856 # it may not be usable. If such a package is in the subgraph of
8857 # deep depenedencies of a scheduled build, that build needs to
8858 # be cancelled. In order for this type of situation to be
8859 # recognized, deep traversal of dependencies is required.
8860 self.myparams.add("deep")
8862 favorites = resume_data.get("favorites")
8863 args_set = self._sets["args"]
8864 if isinstance(favorites, list):
8865 args = self._load_favorites(favorites)
8869 for task in serialized_tasks:
8870 if isinstance(task, Package) and \
8871 task.operation == "merge":
8872 if not self._add_pkg(task, None):
8875 # Packages for argument atoms need to be explicitly
8876 # added via _add_pkg() so that they are included in the
8877 # digraph (needed at least for --tree display).
8879 for atom in arg.set:
8880 pkg, existing_node = self._select_package(
8881 arg.root_config.root, atom)
8882 if existing_node is None and \
8884 if not self._add_pkg(pkg, Dependency(atom=atom,
8885 root=pkg.root, parent=arg)):
8888 # Allow unsatisfied deps here to avoid showing a masking
8889 # message for an unsatisfied dep that isn't necessarily
8891 if not self._create_graph(allow_unsatisfied=True):
8894 unsatisfied_deps = []
8895 for dep in self._unsatisfied_deps:
8896 if not isinstance(dep.parent, Package):
8898 if dep.parent.operation == "merge":
8899 unsatisfied_deps.append(dep)
8902 # For unsatisfied deps of installed packages, only account for
8903 # them if they are in the subgraph of dependencies of a package
8904 # which is scheduled to be installed.
8905 unsatisfied_install = False
8907 dep_stack = self.digraph.parent_nodes(dep.parent)
8909 node = dep_stack.pop()
8910 if not isinstance(node, Package):
8912 if node.operation == "merge":
8913 unsatisfied_install = True
8915 if node in traversed:
8918 dep_stack.extend(self.digraph.parent_nodes(node))
8920 if unsatisfied_install:
8921 unsatisfied_deps.append(dep)
8923 if masked_tasks or unsatisfied_deps:
8924 # This probably means that a required package
8925 # was dropped via --skipfirst. It makes the
8926 # resume list invalid, so convert it to a
8927 # UnsatisfiedResumeDep exception.
8928 raise self.UnsatisfiedResumeDep(self,
8929 masked_tasks + unsatisfied_deps)
8930 self._serialized_tasks_cache = None
8933 except self._unknown_internal_error:
8938 def _load_favorites(self, favorites):
8940 Use a list of favorites to resume state from a
8941 previous select_files() call. This creates similar
8942 DependencyArg instances to those that would have
8943 been created by the original select_files() call.
8944 This allows Package instances to be matched with
8945 DependencyArg instances during graph creation.
8947 root_config = self.roots[self.target_root]
8948 getSetAtoms = root_config.setconfig.getSetAtoms
8949 sets = root_config.sets
8952 if not isinstance(x, basestring):
8954 if x in ("system", "world"):
8956 if x.startswith(SETPREFIX):
8957 s = x[len(SETPREFIX):]
8962 # Recursively expand sets so that containment tests in
8963 # self._get_parent_sets() properly match atoms in nested
8964 # sets (like if world contains system).
8965 expanded_set = InternalPackageSet(
8966 initial_atoms=getSetAtoms(s))
8967 self._sets[s] = expanded_set
8968 args.append(SetArg(arg=x, set=expanded_set,
8969 root_config=root_config))
8971 if not portage.isvalidatom(x):
8973 args.append(AtomArg(arg=x, atom=x,
8974 root_config=root_config))
8976 self._set_args(args)
8979 class UnsatisfiedResumeDep(portage.exception.PortageException):
8981 A dependency of a resume list is not installed. This
8982 can occur when a required package is dropped from the
8983 merge list via --skipfirst.
8985 def __init__(self, depgraph, value):
8986 portage.exception.PortageException.__init__(self, value)
8987 self.depgraph = depgraph
8989 class _internal_exception(portage.exception.PortageException):
8990 def __init__(self, value=""):
8991 portage.exception.PortageException.__init__(self, value)
8993 class _unknown_internal_error(_internal_exception):
8995 Used by the depgraph internally to terminate graph creation.
8996 The specific reason for the failure should have been dumped
8997 to stderr, unfortunately, the exact reason for the failure
9001 class _serialize_tasks_retry(_internal_exception):
9003 This is raised by the _serialize_tasks() method when it needs to
9004 be called again for some reason. The only case that it's currently
9005 used for is when neglected dependencies need to be added to the
9006 graph in order to avoid making a potentially unsafe decision.
9009 class _dep_check_composite_db(portage.dbapi):
9011 A dbapi-like interface that is optimized for use in dep_check() calls.
9012 This is built on top of the existing depgraph package selection logic.
9013 Some packages that have been added to the graph may be masked from this
9014 view in order to influence the atom preference selection that occurs
9017 def __init__(self, depgraph, root):
9018 portage.dbapi.__init__(self)
9019 self._depgraph = depgraph
9021 self._match_cache = {}
9022 self._cpv_pkg_map = {}
9024 def _clear_cache(self):
9025 self._match_cache.clear()
9026 self._cpv_pkg_map.clear()
9028 def match(self, atom):
9029 ret = self._match_cache.get(atom)
9034 atom = self._dep_expand(atom)
9035 pkg, existing = self._depgraph._select_package(self._root, atom)
9039 # Return the highest available from select_package() as well as
9040 # any matching slots in the graph db.
9042 slots.add(pkg.metadata["SLOT"])
9043 atom_cp = portage.dep_getkey(atom)
9044 if pkg.cp.startswith("virtual/"):
9045 # For new-style virtual lookahead that occurs inside
9046 # dep_check(), examine all slots. This is needed
9047 # so that newer slots will not unnecessarily be pulled in
9048 # when a satisfying lower slot is already installed. For
9049 # example, if virtual/jdk-1.4 is satisfied via kaffe then
9050 # there's no need to pull in a newer slot to satisfy a
9051 # virtual/jdk dependency.
9052 for db, pkg_type, built, installed, db_keys in \
9053 self._depgraph._filtered_trees[self._root]["dbs"]:
9054 for cpv in db.match(atom):
9055 if portage.cpv_getkey(cpv) != pkg.cp:
9057 slots.add(db.aux_get(cpv, ["SLOT"])[0])
9059 if self._visible(pkg):
9060 self._cpv_pkg_map[pkg.cpv] = pkg
9062 slots.remove(pkg.metadata["SLOT"])
9064 slot_atom = "%s:%s" % (atom_cp, slots.pop())
9065 pkg, existing = self._depgraph._select_package(
9066 self._root, slot_atom)
9069 if not self._visible(pkg):
9071 self._cpv_pkg_map[pkg.cpv] = pkg
9074 self._cpv_sort_ascending(ret)
9075 self._match_cache[orig_atom] = ret
9078 def _visible(self, pkg):
9079 if pkg.installed and "selective" not in self._depgraph.myparams:
9081 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9082 except (StopIteration, portage.exception.InvalidDependString):
9089 self._depgraph.pkgsettings[pkg.root], pkg):
9091 except portage.exception.InvalidDependString:
9093 in_graph = self._depgraph._slot_pkg_map[
9094 self._root].get(pkg.slot_atom)
9095 if in_graph is None:
9096 # Mask choices for packages which are not the highest visible
9097 # version within their slot (since they usually trigger slot
9099 highest_visible, in_graph = self._depgraph._select_package(
9100 self._root, pkg.slot_atom)
9101 if pkg != highest_visible:
9103 elif in_graph != pkg:
9104 # Mask choices for packages that would trigger a slot
9105 # conflict with a previously selected package.
9109 def _dep_expand(self, atom):
9111 This is only needed for old installed packages that may
9112 contain atoms that are not fully qualified with a specific
9113 category. Emulate the cpv_expand() function that's used by
9114 dbapi.match() in cases like this. If there are multiple
9115 matches, it's often due to a new-style virtual that has
9116 been added, so try to filter those out to avoid raising
9119 root_config = self._depgraph.roots[self._root]
9121 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9122 if len(expanded_atoms) > 1:
9123 non_virtual_atoms = []
9124 for x in expanded_atoms:
9125 if not portage.dep_getkey(x).startswith("virtual/"):
9126 non_virtual_atoms.append(x)
9127 if len(non_virtual_atoms) == 1:
9128 expanded_atoms = non_virtual_atoms
9129 if len(expanded_atoms) > 1:
9130 # compatible with portage.cpv_expand()
9131 raise portage.exception.AmbiguousPackageName(
9132 [portage.dep_getkey(x) for x in expanded_atoms])
9134 atom = expanded_atoms[0]
9136 null_atom = insert_category_into_atom(atom, "null")
9137 null_cp = portage.dep_getkey(null_atom)
9138 cat, atom_pn = portage.catsplit(null_cp)
9139 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9141 # Allow the resolver to choose which virtual.
9142 atom = insert_category_into_atom(atom, "virtual")
9144 atom = insert_category_into_atom(atom, "null")
9147 def aux_get(self, cpv, wants):
9148 metadata = self._cpv_pkg_map[cpv].metadata
9149 return [metadata.get(x, "") for x in wants]
9151 class RepoDisplay(object):
9152 def __init__(self, roots):
9153 self._shown_repos = {}
9154 self._unknown_repo = False
9156 for root_config in roots.itervalues():
9157 portdir = root_config.settings.get("PORTDIR")
9159 repo_paths.add(portdir)
9160 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9162 repo_paths.update(overlays.split())
9163 repo_paths = list(repo_paths)
9164 self._repo_paths = repo_paths
9165 self._repo_paths_real = [ os.path.realpath(repo_path) \
9166 for repo_path in repo_paths ]
9168 # pre-allocate index for PORTDIR so that it always has index 0.
9169 for root_config in roots.itervalues():
9170 portdb = root_config.trees["porttree"].dbapi
9171 portdir = portdb.porttree_root
9173 self.repoStr(portdir)
9175 def repoStr(self, repo_path_real):
9178 real_index = self._repo_paths_real.index(repo_path_real)
9179 if real_index == -1:
9181 self._unknown_repo = True
9183 shown_repos = self._shown_repos
9184 repo_paths = self._repo_paths
9185 repo_path = repo_paths[real_index]
9186 index = shown_repos.get(repo_path)
9188 index = len(shown_repos)
9189 shown_repos[repo_path] = index
9195 shown_repos = self._shown_repos
9196 unknown_repo = self._unknown_repo
9197 if shown_repos or self._unknown_repo:
9198 output.append("Portage tree and overlays:\n")
9199 show_repo_paths = list(shown_repos)
9200 for repo_path, repo_index in shown_repos.iteritems():
9201 show_repo_paths[repo_index] = repo_path
9203 for index, repo_path in enumerate(show_repo_paths):
9204 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9206 output.append(" "+teal("[?]") + \
9207 " indicates that the source repository could not be determined\n")
9208 return "".join(output)
9210 class PackageCounters(object):
9220 self.blocks_satisfied = 0
9222 self.restrict_fetch = 0
9223 self.restrict_fetch_satisfied = 0
9224 self.interactive = 0
9227 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9230 myoutput.append("Total: %s package" % total_installs)
9231 if total_installs != 1:
9232 myoutput.append("s")
9233 if total_installs != 0:
9234 myoutput.append(" (")
9235 if self.upgrades > 0:
9236 details.append("%s upgrade" % self.upgrades)
9237 if self.upgrades > 1:
9239 if self.downgrades > 0:
9240 details.append("%s downgrade" % self.downgrades)
9241 if self.downgrades > 1:
9244 details.append("%s new" % self.new)
9245 if self.newslot > 0:
9246 details.append("%s in new slot" % self.newslot)
9247 if self.newslot > 1:
9250 details.append("%s reinstall" % self.reinst)
9254 details.append("%s uninstall" % self.uninst)
9257 if self.interactive > 0:
9258 details.append("%s %s" % (self.interactive,
9259 colorize("WARN", "interactive")))
9260 myoutput.append(", ".join(details))
9261 if total_installs != 0:
9262 myoutput.append(")")
9263 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9264 if self.restrict_fetch:
9265 myoutput.append("\nFetch Restriction: %s package" % \
9266 self.restrict_fetch)
9267 if self.restrict_fetch > 1:
9268 myoutput.append("s")
9269 if self.restrict_fetch_satisfied < self.restrict_fetch:
9270 myoutput.append(bad(" (%s unsatisfied)") % \
9271 (self.restrict_fetch - self.restrict_fetch_satisfied))
9273 myoutput.append("\nConflict: %s block" % \
9276 myoutput.append("s")
9277 if self.blocks_satisfied < self.blocks:
9278 myoutput.append(bad(" (%s unsatisfied)") % \
9279 (self.blocks - self.blocks_satisfied))
9280 return "".join(myoutput)
9282 class UseFlagDisplay(object):
9284 __slots__ = ('name', 'enabled', 'forced')
9286 def __init__(self, name, enabled, forced):
9288 self.enabled = enabled
9289 self.forced = forced
9302 def _cmp_combined(a, b):
9304 Sort by name, combining enabled and disabled flags.
9306 return (a.name > b.name) - (a.name < b.name)
9308 sort_combined = cmp_sort_key(_cmp_combined)
9311 def _cmp_separated(a, b):
9313 Sort by name, separating enabled flags from disabled flags.
9315 enabled_diff = b.enabled - a.enabled
9318 return (a.name > b.name) - (a.name < b.name)
9320 sort_separated = cmp_sort_key(_cmp_separated)
9323 class PollSelectAdapter(PollConstants):
9326 Use select to emulate a poll object, for
9327 systems that don't support poll().
9331 self._registered = {}
9332 self._select_args = [[], [], []]
9334 def register(self, fd, *args):
9336 Only POLLIN is currently supported!
9340 "register expected at most 2 arguments, got " + \
9341 repr(1 + len(args)))
9343 eventmask = PollConstants.POLLIN | \
9344 PollConstants.POLLPRI | PollConstants.POLLOUT
9348 self._registered[fd] = eventmask
9349 self._select_args = None
9351 def unregister(self, fd):
9352 self._select_args = None
9353 del self._registered[fd]
9355 def poll(self, *args):
9358 "poll expected at most 2 arguments, got " + \
9359 repr(1 + len(args)))
9365 select_args = self._select_args
9366 if select_args is None:
9367 select_args = [self._registered.keys(), [], []]
9369 if timeout is not None:
9370 select_args = select_args[:]
9371 # Translate poll() timeout args to select() timeout args:
9373 # | units | value(s) for indefinite block
9374 # ---------|--------------|------------------------------
9375 # poll | milliseconds | omitted, negative, or None
9376 # ---------|--------------|------------------------------
9377 # select | seconds | omitted
9378 # ---------|--------------|------------------------------
9380 if timeout is not None and timeout < 0:
9382 if timeout is not None:
9383 select_args.append(timeout / 1000)
9385 select_events = select.select(*select_args)
9387 for fd in select_events[0]:
9388 poll_events.append((fd, PollConstants.POLLIN))
9391 class SequentialTaskQueue(SlotObject):
9393 __slots__ = ("max_jobs", "running_tasks") + \
9394 ("_dirty", "_scheduling", "_task_queue")
9396 def __init__(self, **kwargs):
9397 SlotObject.__init__(self, **kwargs)
9398 self._task_queue = deque()
9399 self.running_tasks = set()
9400 if self.max_jobs is None:
9404 def add(self, task):
9405 self._task_queue.append(task)
9408 def addFront(self, task):
9409 self._task_queue.appendleft(task)
9420 if self._scheduling:
9421 # Ignore any recursive schedule() calls triggered via
9422 # self._task_exit().
9425 self._scheduling = True
9427 task_queue = self._task_queue
9428 running_tasks = self.running_tasks
9429 max_jobs = self.max_jobs
9430 state_changed = False
9432 while task_queue and \
9433 (max_jobs is True or len(running_tasks) < max_jobs):
9434 task = task_queue.popleft()
9435 cancelled = getattr(task, "cancelled", None)
9437 running_tasks.add(task)
9438 task.addExitListener(self._task_exit)
9440 state_changed = True
9443 self._scheduling = False
9445 return state_changed
9447 def _task_exit(self, task):
9449 Since we can always rely on exit listeners being called, the set of
9450 running tasks is always pruned automatically and there is never any need
9451 to actively prune it.
9453 self.running_tasks.remove(task)
9454 if self._task_queue:
9458 self._task_queue.clear()
9459 running_tasks = self.running_tasks
9460 while running_tasks:
9461 task = running_tasks.pop()
9462 task.removeExitListener(self._task_exit)
9466 def __nonzero__(self):
9467 return bool(self._task_queue or self.running_tasks)
9470 return len(self._task_queue) + len(self.running_tasks)
9472 _can_poll_device = None
9474 def can_poll_device():
9476 Test if it's possible to use poll() on a device such as a pty. This
9477 is known to fail on Darwin.
9479 @returns: True if poll() on a device succeeds, False otherwise.
9482 global _can_poll_device
9483 if _can_poll_device is not None:
9484 return _can_poll_device
9486 if not hasattr(select, "poll"):
9487 _can_poll_device = False
9488 return _can_poll_device
9491 dev_null = open('/dev/null', 'rb')
9493 _can_poll_device = False
9494 return _can_poll_device
9497 p.register(dev_null.fileno(), PollConstants.POLLIN)
9499 invalid_request = False
9500 for f, event in p.poll():
9501 if event & PollConstants.POLLNVAL:
9502 invalid_request = True
9506 _can_poll_device = not invalid_request
9507 return _can_poll_device
9509 def create_poll_instance():
9511 Create an instance of select.poll, or an instance of
9512 PollSelectAdapter there is no poll() implementation or
9513 it is broken somehow.
9515 if can_poll_device():
9516 return select.poll()
9517 return PollSelectAdapter()
9519 getloadavg = getattr(os, "getloadavg", None)
9520 if getloadavg is None:
9523 Uses /proc/loadavg to emulate os.getloadavg().
9524 Raises OSError if the load average was unobtainable.
9527 loadavg_str = open('/proc/loadavg').readline()
9529 # getloadavg() is only supposed to raise OSError, so convert
9530 raise OSError('unknown')
9531 loadavg_split = loadavg_str.split()
9532 if len(loadavg_split) < 3:
9533 raise OSError('unknown')
9537 loadavg_floats.append(float(loadavg_split[i]))
9539 raise OSError('unknown')
9540 return tuple(loadavg_floats)
9542 class PollScheduler(object):
9544 class _sched_iface_class(SlotObject):
9545 __slots__ = ("register", "schedule", "unregister")
9549 self._max_load = None
9551 self._poll_event_queue = []
9552 self._poll_event_handlers = {}
9553 self._poll_event_handler_ids = {}
9554 # Increment id for each new handler.
9555 self._event_handler_id = 0
9556 self._poll_obj = create_poll_instance()
9557 self._scheduling = False
9559 def _schedule(self):
9561 Calls _schedule_tasks() and automatically returns early from
9562 any recursive calls to this method that the _schedule_tasks()
9563 call might trigger. This makes _schedule() safe to call from
9564 inside exit listeners.
9566 if self._scheduling:
9568 self._scheduling = True
9570 return self._schedule_tasks()
9572 self._scheduling = False
9574 def _running_job_count(self):
9577 def _can_add_job(self):
9578 max_jobs = self._max_jobs
9579 max_load = self._max_load
9581 if self._max_jobs is not True and \
9582 self._running_job_count() >= self._max_jobs:
9585 if max_load is not None and \
9586 (max_jobs is True or max_jobs > 1) and \
9587 self._running_job_count() >= 1:
9589 avg1, avg5, avg15 = getloadavg()
9593 if avg1 >= max_load:
9598 def _poll(self, timeout=None):
9600 All poll() calls pass through here. The poll events
9601 are added directly to self._poll_event_queue.
9602 In order to avoid endless blocking, this raises
9603 StopIteration if timeout is None and there are
9604 no file descriptors to poll.
9606 if not self._poll_event_handlers:
9608 if timeout is None and \
9609 not self._poll_event_handlers:
9610 raise StopIteration(
9611 "timeout is None and there are no poll() event handlers")
9613 # The following error is known to occur with Linux kernel versions
9616 # select.error: (4, 'Interrupted system call')
9618 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9619 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9620 # without any events.
9623 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9625 except select.error, e:
9626 writemsg_level("\n!!! select error: %s\n" % (e,),
9627 level=logging.ERROR, noiselevel=-1)
9629 if timeout is not None:
9632 def _next_poll_event(self, timeout=None):
9634 Since the _schedule_wait() loop is called by event
9635 handlers from _poll_loop(), maintain a central event
9636 queue for both of them to share events from a single
9637 poll() call. In order to avoid endless blocking, this
9638 raises StopIteration if timeout is None and there are
9639 no file descriptors to poll.
9641 if not self._poll_event_queue:
9643 return self._poll_event_queue.pop()
9645 def _poll_loop(self):
9647 event_handlers = self._poll_event_handlers
9648 event_handled = False
9651 while event_handlers:
9652 f, event = self._next_poll_event()
9653 handler, reg_id = event_handlers[f]
9655 event_handled = True
9656 except StopIteration:
9657 event_handled = True
9659 if not event_handled:
9660 raise AssertionError("tight loop")
9662 def _schedule_yield(self):
9664 Schedule for a short period of time chosen by the scheduler based
9665 on internal state. Synchronous tasks should call this periodically
9666 in order to allow the scheduler to service pending poll events. The
9667 scheduler will call poll() exactly once, without blocking, and any
9668 resulting poll events will be serviced.
9670 event_handlers = self._poll_event_handlers
9673 if not event_handlers:
9674 return bool(events_handled)
9676 if not self._poll_event_queue:
9680 while event_handlers and self._poll_event_queue:
9681 f, event = self._next_poll_event()
9682 handler, reg_id = event_handlers[f]
9685 except StopIteration:
9688 return bool(events_handled)
9690 def _register(self, f, eventmask, handler):
9693 @return: A unique registration id, for use in schedule() or
9696 if f in self._poll_event_handlers:
9697 raise AssertionError("fd %d is already registered" % f)
9698 self._event_handler_id += 1
9699 reg_id = self._event_handler_id
9700 self._poll_event_handler_ids[reg_id] = f
9701 self._poll_event_handlers[f] = (handler, reg_id)
9702 self._poll_obj.register(f, eventmask)
9705 def _unregister(self, reg_id):
9706 f = self._poll_event_handler_ids[reg_id]
9707 self._poll_obj.unregister(f)
9708 del self._poll_event_handlers[f]
9709 del self._poll_event_handler_ids[reg_id]
9711 def _schedule_wait(self, wait_ids):
9713 Schedule until wait_id is not longer registered
9716 @param wait_id: a task id to wait for
9718 event_handlers = self._poll_event_handlers
9719 handler_ids = self._poll_event_handler_ids
9720 event_handled = False
9722 if isinstance(wait_ids, int):
9723 wait_ids = frozenset([wait_ids])
9726 while wait_ids.intersection(handler_ids):
9727 f, event = self._next_poll_event()
9728 handler, reg_id = event_handlers[f]
9730 event_handled = True
9731 except StopIteration:
9732 event_handled = True
9734 return event_handled
9736 class QueueScheduler(PollScheduler):
9739 Add instances of SequentialTaskQueue and then call run(). The
9740 run() method returns when no tasks remain.
9743 def __init__(self, max_jobs=None, max_load=None):
9744 PollScheduler.__init__(self)
9746 if max_jobs is None:
9749 self._max_jobs = max_jobs
9750 self._max_load = max_load
9751 self.sched_iface = self._sched_iface_class(
9752 register=self._register,
9753 schedule=self._schedule_wait,
9754 unregister=self._unregister)
9757 self._schedule_listeners = []
9760 self._queues.append(q)
9762 def remove(self, q):
9763 self._queues.remove(q)
9767 while self._schedule():
9770 while self._running_job_count():
9773 def _schedule_tasks(self):
9776 @returns: True if there may be remaining tasks to schedule,
9779 while self._can_add_job():
9780 n = self._max_jobs - self._running_job_count()
9784 if not self._start_next_job(n):
9787 for q in self._queues:
9792 def _running_job_count(self):
9794 for q in self._queues:
9795 job_count += len(q.running_tasks)
9796 self._jobs = job_count
9799 def _start_next_job(self, n=1):
9801 for q in self._queues:
9802 initial_job_count = len(q.running_tasks)
9804 final_job_count = len(q.running_tasks)
9805 if final_job_count > initial_job_count:
9806 started_count += (final_job_count - initial_job_count)
9807 if started_count >= n:
9809 return started_count
9811 class TaskScheduler(object):
9814 A simple way to handle scheduling of AsynchrousTask instances. Simply
9815 add tasks and call run(). The run() method returns when no tasks remain.
9818 def __init__(self, max_jobs=None, max_load=None):
9819 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9820 self._scheduler = QueueScheduler(
9821 max_jobs=max_jobs, max_load=max_load)
9822 self.sched_iface = self._scheduler.sched_iface
9823 self.run = self._scheduler.run
9824 self._scheduler.add(self._queue)
9826 def add(self, task):
9827 self._queue.add(task)
9829 class JobStatusDisplay(object):
9831 _bound_properties = ("curval", "failed", "running")
9832 _jobs_column_width = 48
9834 # Don't update the display unless at least this much
9835 # time has passed, in units of seconds.
9836 _min_display_latency = 2
9838 _default_term_codes = {
9844 _termcap_name_map = {
9845 'carriage_return' : 'cr',
9850 def __init__(self, out=sys.stdout, quiet=False):
9851 object.__setattr__(self, "out", out)
9852 object.__setattr__(self, "quiet", quiet)
9853 object.__setattr__(self, "maxval", 0)
9854 object.__setattr__(self, "merges", 0)
9855 object.__setattr__(self, "_changed", False)
9856 object.__setattr__(self, "_displayed", False)
9857 object.__setattr__(self, "_last_display_time", 0)
9858 object.__setattr__(self, "width", 80)
9861 isatty = hasattr(out, "isatty") and out.isatty()
9862 object.__setattr__(self, "_isatty", isatty)
9863 if not isatty or not self._init_term():
9865 for k, capname in self._termcap_name_map.iteritems():
9866 term_codes[k] = self._default_term_codes[capname]
9867 object.__setattr__(self, "_term_codes", term_codes)
9868 encoding = sys.getdefaultencoding()
9869 for k, v in self._term_codes.items():
9870 if not isinstance(v, basestring):
9871 self._term_codes[k] = v.decode(encoding, 'replace')
9873 def _init_term(self):
9875 Initialize term control codes.
9877 @returns: True if term codes were successfully initialized,
9881 term_type = os.environ.get("TERM", "vt100")
9887 curses.setupterm(term_type, self.out.fileno())
9888 tigetstr = curses.tigetstr
9889 except curses.error:
9894 if tigetstr is None:
9898 for k, capname in self._termcap_name_map.iteritems():
9899 code = tigetstr(capname)
9901 code = self._default_term_codes[capname]
9902 term_codes[k] = code
9903 object.__setattr__(self, "_term_codes", term_codes)
9906 def _format_msg(self, msg):
9907 return ">>> %s" % msg
9911 self._term_codes['carriage_return'] + \
9912 self._term_codes['clr_eol'])
9914 self._displayed = False
9916 def _display(self, line):
9917 self.out.write(line)
9919 self._displayed = True
9921 def _update(self, msg):
9924 if not self._isatty:
9925 out.write(self._format_msg(msg) + self._term_codes['newline'])
9927 self._displayed = True
9933 self._display(self._format_msg(msg))
9935 def displayMessage(self, msg):
9937 was_displayed = self._displayed
9939 if self._isatty and self._displayed:
9942 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9944 self._displayed = False
9947 self._changed = True
9953 for name in self._bound_properties:
9954 object.__setattr__(self, name, 0)
9957 self.out.write(self._term_codes['newline'])
9959 self._displayed = False
9961 def __setattr__(self, name, value):
9962 old_value = getattr(self, name)
9963 if value == old_value:
9965 object.__setattr__(self, name, value)
9966 if name in self._bound_properties:
9967 self._property_change(name, old_value, value)
9969 def _property_change(self, name, old_value, new_value):
9970 self._changed = True
9973 def _load_avg_str(self):
9988 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9992 Display status on stdout, but only if something has
9993 changed since the last call.
9999 current_time = time.time()
10000 time_delta = current_time - self._last_display_time
10001 if self._displayed and \
10003 if not self._isatty:
10005 if time_delta < self._min_display_latency:
10008 self._last_display_time = current_time
10009 self._changed = False
10010 self._display_status()
10012 def _display_status(self):
10013 # Don't use len(self._completed_tasks) here since that also
10014 # can include uninstall tasks.
10015 curval_str = str(self.curval)
10016 maxval_str = str(self.maxval)
10017 running_str = str(self.running)
10018 failed_str = str(self.failed)
10019 load_avg_str = self._load_avg_str()
10021 color_output = StringIO()
10022 plain_output = StringIO()
10023 style_file = portage.output.ConsoleStyleFile(color_output)
10024 style_file.write_listener = plain_output
10025 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10026 style_writer.style_listener = style_file.new_styles
10027 f = formatter.AbstractFormatter(style_writer)
10029 number_style = "INFORM"
10030 f.add_literal_data("Jobs: ")
10031 f.push_style(number_style)
10032 f.add_literal_data(curval_str)
10034 f.add_literal_data(" of ")
10035 f.push_style(number_style)
10036 f.add_literal_data(maxval_str)
10038 f.add_literal_data(" complete")
10041 f.add_literal_data(", ")
10042 f.push_style(number_style)
10043 f.add_literal_data(running_str)
10045 f.add_literal_data(" running")
10048 f.add_literal_data(", ")
10049 f.push_style(number_style)
10050 f.add_literal_data(failed_str)
10052 f.add_literal_data(" failed")
10054 padding = self._jobs_column_width - len(plain_output.getvalue())
10056 f.add_literal_data(padding * " ")
10058 f.add_literal_data("Load avg: ")
10059 f.add_literal_data(load_avg_str)
10061 # Truncate to fit width, to avoid making the terminal scroll if the
10062 # line overflows (happens when the load average is large).
10063 plain_output = plain_output.getvalue()
10064 if self._isatty and len(plain_output) > self.width:
10065 # Use plain_output here since it's easier to truncate
10066 # properly than the color output which contains console
10068 self._update(plain_output[:self.width])
10070 self._update(color_output.getvalue())
10072 xtermTitle(" ".join(plain_output.split()))
10074 class ProgressHandler(object):
10075 def __init__(self):
10078 self._last_update = 0
10079 self.min_latency = 0.2
10081 def onProgress(self, maxval, curval):
10082 self.maxval = maxval
10083 self.curval = curval
10084 cur_time = time.time()
10085 if cur_time - self._last_update >= self.min_latency:
10086 self._last_update = cur_time
10090 raise NotImplementedError(self)
10092 class Scheduler(PollScheduler):
10094 _opts_ignore_blockers = \
10095 frozenset(["--buildpkgonly",
10096 "--fetchonly", "--fetch-all-uri",
10097 "--nodeps", "--pretend"])
10099 _opts_no_background = \
10100 frozenset(["--pretend",
10101 "--fetchonly", "--fetch-all-uri"])
10103 _opts_no_restart = frozenset(["--buildpkgonly",
10104 "--fetchonly", "--fetch-all-uri", "--pretend"])
10106 _bad_resume_opts = set(["--ask", "--changelog",
10107 "--resume", "--skipfirst"])
10109 _fetch_log = "/var/log/emerge-fetch.log"
10111 class _iface_class(SlotObject):
10112 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10113 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10114 "scheduleSetup", "scheduleUnpack", "scheduleYield",
10117 class _fetch_iface_class(SlotObject):
10118 __slots__ = ("log_file", "schedule")
10120 _task_queues_class = slot_dict_class(
10121 ("merge", "jobs", "fetch", "unpack"), prefix="")
10123 class _build_opts_class(SlotObject):
10124 __slots__ = ("buildpkg", "buildpkgonly",
10125 "fetch_all_uri", "fetchonly", "pretend")
10127 class _binpkg_opts_class(SlotObject):
10128 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10130 class _pkg_count_class(SlotObject):
10131 __slots__ = ("curval", "maxval")
10133 class _emerge_log_class(SlotObject):
10134 __slots__ = ("xterm_titles",)
10136 def log(self, *pargs, **kwargs):
10137 if not self.xterm_titles:
10138 # Avoid interference with the scheduler's status display.
10139 kwargs.pop("short_msg", None)
10140 emergelog(self.xterm_titles, *pargs, **kwargs)
10142 class _failed_pkg(SlotObject):
10143 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10145 class _ConfigPool(object):
10146 """Interface for a task to temporarily allocate a config
10147 instance from a pool. This allows a task to be constructed
10148 long before the config instance actually becomes needed, like
10149 when prefetchers are constructed for the whole merge list."""
10150 __slots__ = ("_root", "_allocate", "_deallocate")
10151 def __init__(self, root, allocate, deallocate):
10153 self._allocate = allocate
10154 self._deallocate = deallocate
10155 def allocate(self):
10156 return self._allocate(self._root)
10157 def deallocate(self, settings):
10158 self._deallocate(settings)
10160 class _unknown_internal_error(portage.exception.PortageException):
10162 Used internally to terminate scheduling. The specific reason for
10163 the failure should have been dumped to stderr.
10165 def __init__(self, value=""):
10166 portage.exception.PortageException.__init__(self, value)
10168 def __init__(self, settings, trees, mtimedb, myopts,
10169 spinner, mergelist, favorites, digraph):
10170 PollScheduler.__init__(self)
10171 self.settings = settings
10172 self.target_root = settings["ROOT"]
10174 self.myopts = myopts
10175 self._spinner = spinner
10176 self._mtimedb = mtimedb
10177 self._mergelist = mergelist
10178 self._favorites = favorites
10179 self._args_set = InternalPackageSet(favorites)
10180 self._build_opts = self._build_opts_class()
10181 for k in self._build_opts.__slots__:
10182 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10183 self._binpkg_opts = self._binpkg_opts_class()
10184 for k in self._binpkg_opts.__slots__:
10185 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10188 self._logger = self._emerge_log_class()
10189 self._task_queues = self._task_queues_class()
10190 for k in self._task_queues.allowed_keys:
10191 setattr(self._task_queues, k,
10192 SequentialTaskQueue())
10194 # Holds merges that will wait to be executed when no builds are
10195 # executing. This is useful for system packages since dependencies
10196 # on system packages are frequently unspecified.
10197 self._merge_wait_queue = []
10198 # Holds merges that have been transfered from the merge_wait_queue to
10199 # the actual merge queue. They are removed from this list upon
10200 # completion. Other packages can start building only when this list is
10202 self._merge_wait_scheduled = []
10204 # Holds system packages and their deep runtime dependencies. Before
10205 # being merged, these packages go to merge_wait_queue, to be merged
10206 # when no other packages are building.
10207 self._deep_system_deps = set()
10209 # Holds packages to merge which will satisfy currently unsatisfied
10210 # deep runtime dependencies of system packages. If this is not empty
10211 # then no parallel builds will be spawned until it is empty. This
10212 # minimizes the possibility that a build will fail due to the system
10213 # being in a fragile state. For example, see bug #259954.
10214 self._unsatisfied_system_deps = set()
10216 self._status_display = JobStatusDisplay()
10217 self._max_load = myopts.get("--load-average")
10218 max_jobs = myopts.get("--jobs")
10219 if max_jobs is None:
10221 self._set_max_jobs(max_jobs)
10223 # The root where the currently running
10224 # portage instance is installed.
10225 self._running_root = trees["/"]["root_config"]
10227 if settings.get("PORTAGE_DEBUG", "") == "1":
10229 self.pkgsettings = {}
10230 self._config_pool = {}
10231 self._blocker_db = {}
10233 self._config_pool[root] = []
10234 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10236 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10237 schedule=self._schedule_fetch)
10238 self._sched_iface = self._iface_class(
10239 dblinkEbuildPhase=self._dblink_ebuild_phase,
10240 dblinkDisplayMerge=self._dblink_display_merge,
10241 dblinkElog=self._dblink_elog,
10242 dblinkEmergeLog=self._dblink_emerge_log,
10243 fetch=fetch_iface, register=self._register,
10244 schedule=self._schedule_wait,
10245 scheduleSetup=self._schedule_setup,
10246 scheduleUnpack=self._schedule_unpack,
10247 scheduleYield=self._schedule_yield,
10248 unregister=self._unregister)
10250 self._prefetchers = weakref.WeakValueDictionary()
10251 self._pkg_queue = []
10252 self._completed_tasks = set()
10254 self._failed_pkgs = []
10255 self._failed_pkgs_all = []
10256 self._failed_pkgs_die_msgs = []
10257 self._post_mod_echo_msgs = []
10258 self._parallel_fetch = False
10259 merge_count = len([x for x in mergelist \
10260 if isinstance(x, Package) and x.operation == "merge"])
10261 self._pkg_count = self._pkg_count_class(
10262 curval=0, maxval=merge_count)
10263 self._status_display.maxval = self._pkg_count.maxval
10265 # The load average takes some time to respond when new
10266 # jobs are added, so we need to limit the rate of adding
10268 self._job_delay_max = 10
10269 self._job_delay_factor = 1.0
10270 self._job_delay_exp = 1.5
10271 self._previous_job_start_time = None
10273 self._set_digraph(digraph)
10275 # This is used to memoize the _choose_pkg() result when
10276 # no packages can be chosen until one of the existing
10278 self._choose_pkg_return_early = False
10280 features = self.settings.features
10281 if "parallel-fetch" in features and \
10282 not ("--pretend" in self.myopts or \
10283 "--fetch-all-uri" in self.myopts or \
10284 "--fetchonly" in self.myopts):
10285 if "distlocks" not in features:
10286 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10287 portage.writemsg(red("!!!")+" parallel-fetching " + \
10288 "requires the distlocks feature enabled"+"\n",
10290 portage.writemsg(red("!!!")+" you have it disabled, " + \
10291 "thus parallel-fetching is being disabled"+"\n",
10293 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10294 elif len(mergelist) > 1:
10295 self._parallel_fetch = True
10297 if self._parallel_fetch:
10298 # clear out existing fetch log if it exists
10300 open(self._fetch_log, 'w')
10301 except EnvironmentError:
10304 self._running_portage = None
10305 portage_match = self._running_root.trees["vartree"].dbapi.match(
10306 portage.const.PORTAGE_PACKAGE_ATOM)
10308 cpv = portage_match.pop()
10309 self._running_portage = self._pkg(cpv, "installed",
10310 self._running_root, installed=True)
10312 def _poll(self, timeout=None):
10314 PollScheduler._poll(self, timeout=timeout)
10316 def _set_max_jobs(self, max_jobs):
10317 self._max_jobs = max_jobs
10318 self._task_queues.jobs.max_jobs = max_jobs
10320 def _background_mode(self):
10322 Check if background mode is enabled and adjust states as necessary.
10325 @returns: True if background mode is enabled, False otherwise.
10327 background = (self._max_jobs is True or \
10328 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10329 not bool(self._opts_no_background.intersection(self.myopts))
10332 interactive_tasks = self._get_interactive_tasks()
10333 if interactive_tasks:
10335 writemsg_level(">>> Sending package output to stdio due " + \
10336 "to interactive package(s):\n",
10337 level=logging.INFO, noiselevel=-1)
10339 for pkg in interactive_tasks:
10340 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10341 if pkg.root != "/":
10342 pkg_str += " for " + pkg.root
10343 msg.append(pkg_str)
10345 writemsg_level("".join("%s\n" % (l,) for l in msg),
10346 level=logging.INFO, noiselevel=-1)
10347 if self._max_jobs is True or self._max_jobs > 1:
10348 self._set_max_jobs(1)
10349 writemsg_level(">>> Setting --jobs=1 due " + \
10350 "to the above interactive package(s)\n",
10351 level=logging.INFO, noiselevel=-1)
10353 self._status_display.quiet = \
10354 not background or \
10355 ("--quiet" in self.myopts and \
10356 "--verbose" not in self.myopts)
10358 self._logger.xterm_titles = \
10359 "notitles" not in self.settings.features and \
10360 self._status_display.quiet
10364 def _get_interactive_tasks(self):
10365 from portage import flatten
10366 from portage.dep import use_reduce, paren_reduce
10367 interactive_tasks = []
10368 for task in self._mergelist:
10369 if not (isinstance(task, Package) and \
10370 task.operation == "merge"):
10373 properties = flatten(use_reduce(paren_reduce(
10374 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10375 except portage.exception.InvalidDependString, e:
10376 show_invalid_depstring_notice(task,
10377 task.metadata["PROPERTIES"], str(e))
10378 raise self._unknown_internal_error()
10379 if "interactive" in properties:
10380 interactive_tasks.append(task)
10381 return interactive_tasks
10383 def _set_digraph(self, digraph):
10384 if "--nodeps" in self.myopts or \
10385 (self._max_jobs is not True and self._max_jobs < 2):
10387 self._digraph = None
10390 self._digraph = digraph
10391 self._find_system_deps()
10392 self._prune_digraph()
10393 self._prevent_builddir_collisions()
10395 def _find_system_deps(self):
10397 Find system packages and their deep runtime dependencies. Before being
10398 merged, these packages go to merge_wait_queue, to be merged when no
10399 other packages are building.
10401 deep_system_deps = self._deep_system_deps
10402 deep_system_deps.clear()
10403 deep_system_deps.update(
10404 _find_deep_system_runtime_deps(self._digraph))
10405 deep_system_deps.difference_update([pkg for pkg in \
10406 deep_system_deps if pkg.operation != "merge"])
10408 def _prune_digraph(self):
10410 Prune any root nodes that are irrelevant.
10413 graph = self._digraph
10414 completed_tasks = self._completed_tasks
10415 removed_nodes = set()
10417 for node in graph.root_nodes():
10418 if not isinstance(node, Package) or \
10419 (node.installed and node.operation == "nomerge") or \
10421 node in completed_tasks:
10422 removed_nodes.add(node)
10424 graph.difference_update(removed_nodes)
10425 if not removed_nodes:
10427 removed_nodes.clear()
10429 def _prevent_builddir_collisions(self):
10431 When building stages, sometimes the same exact cpv needs to be merged
10432 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10433 in the builddir. Currently, normal file locks would be inappropriate
10434 for this purpose since emerge holds all of it's build dir locks from
10438 for pkg in self._mergelist:
10439 if not isinstance(pkg, Package):
10440 # a satisfied blocker
10444 if pkg.cpv not in cpv_map:
10445 cpv_map[pkg.cpv] = [pkg]
10447 for earlier_pkg in cpv_map[pkg.cpv]:
10448 self._digraph.add(earlier_pkg, pkg,
10449 priority=DepPriority(buildtime=True))
10450 cpv_map[pkg.cpv].append(pkg)
10452 class _pkg_failure(portage.exception.PortageException):
10454 An instance of this class is raised by unmerge() when
10455 an uninstallation fails.
10458 def __init__(self, *pargs):
10459 portage.exception.PortageException.__init__(self, pargs)
10461 self.status = pargs[0]
10463 def _schedule_fetch(self, fetcher):
10465 Schedule a fetcher on the fetch queue, in order to
10466 serialize access to the fetch log.
10468 self._task_queues.fetch.addFront(fetcher)
10470 def _schedule_setup(self, setup_phase):
10472 Schedule a setup phase on the merge queue, in order to
10473 serialize unsandboxed access to the live filesystem.
10475 self._task_queues.merge.addFront(setup_phase)
10478 def _schedule_unpack(self, unpack_phase):
10480 Schedule an unpack phase on the unpack queue, in order
10481 to serialize $DISTDIR access for live ebuilds.
10483 self._task_queues.unpack.add(unpack_phase)
10485 def _find_blockers(self, new_pkg):
10487 Returns a callable which should be called only when
10488 the vdb lock has been acquired.
10490 def get_blockers():
10491 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10492 return get_blockers
10494 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10495 if self._opts_ignore_blockers.intersection(self.myopts):
10498 # Call gc.collect() here to avoid heap overflow that
10499 # triggers 'Cannot allocate memory' errors (reported
10500 # with python-2.5).
10504 blocker_db = self._blocker_db[new_pkg.root]
10506 blocker_dblinks = []
10507 for blocking_pkg in blocker_db.findInstalledBlockers(
10508 new_pkg, acquire_lock=acquire_lock):
10509 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10511 if new_pkg.cpv == blocking_pkg.cpv:
10513 blocker_dblinks.append(portage.dblink(
10514 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10515 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10516 vartree=self.trees[blocking_pkg.root]["vartree"]))
10520 return blocker_dblinks
10522 def _dblink_pkg(self, pkg_dblink):
10523 cpv = pkg_dblink.mycpv
10524 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10525 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10526 installed = type_name == "installed"
10527 return self._pkg(cpv, type_name, root_config, installed=installed)
10529 def _append_to_log_path(self, log_path, msg):
10530 f = open(log_path, 'a')
10536 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10538 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10541 background = self._background
10543 if background and log_path is not None:
10544 log_file = open(log_path, 'a')
10549 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10551 if log_file is not None:
10554 def _dblink_emerge_log(self, msg):
10555 self._logger.log(msg)
10557 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10558 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10559 background = self._background
10561 if log_path is None:
10562 if not (background and level < logging.WARN):
10563 portage.util.writemsg_level(msg,
10564 level=level, noiselevel=noiselevel)
10567 portage.util.writemsg_level(msg,
10568 level=level, noiselevel=noiselevel)
10569 self._append_to_log_path(log_path, msg)
10571 def _dblink_ebuild_phase(self,
10572 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10574 Using this callback for merge phases allows the scheduler
10575 to run while these phases execute asynchronously, and allows
10576 the scheduler control output handling.
10579 scheduler = self._sched_iface
10580 settings = pkg_dblink.settings
10581 pkg = self._dblink_pkg(pkg_dblink)
10582 background = self._background
10583 log_path = settings.get("PORTAGE_LOG_FILE")
10585 ebuild_phase = EbuildPhase(background=background,
10586 pkg=pkg, phase=phase, scheduler=scheduler,
10587 settings=settings, tree=pkg_dblink.treetype)
10588 ebuild_phase.start()
10589 ebuild_phase.wait()
10591 return ebuild_phase.returncode
10593 def _generate_digests(self):
10595 Generate digests if necessary for --digests or FEATURES=digest.
10596 In order to avoid interference, this must done before parallel
10600 if '--fetchonly' in self.myopts:
10603 digest = '--digest' in self.myopts
10605 for pkgsettings in self.pkgsettings.itervalues():
10606 if 'digest' in pkgsettings.features:
10613 for x in self._mergelist:
10614 if not isinstance(x, Package) or \
10615 x.type_name != 'ebuild' or \
10616 x.operation != 'merge':
10618 pkgsettings = self.pkgsettings[x.root]
10619 if '--digest' not in self.myopts and \
10620 'digest' not in pkgsettings.features:
10622 portdb = x.root_config.trees['porttree'].dbapi
10623 ebuild_path = portdb.findname(x.cpv)
10624 if not ebuild_path:
10626 "!!! Could not locate ebuild for '%s'.\n" \
10627 % x.cpv, level=logging.ERROR, noiselevel=-1)
10629 pkgsettings['O'] = os.path.dirname(ebuild_path)
10630 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10632 "!!! Unable to generate manifest for '%s'.\n" \
10633 % x.cpv, level=logging.ERROR, noiselevel=-1)
10638 def _check_manifests(self):
10639 # Verify all the manifests now so that the user is notified of failure
10640 # as soon as possible.
10641 if "strict" not in self.settings.features or \
10642 "--fetchonly" in self.myopts or \
10643 "--fetch-all-uri" in self.myopts:
10646 shown_verifying_msg = False
10647 quiet_settings = {}
10648 for myroot, pkgsettings in self.pkgsettings.iteritems():
10649 quiet_config = portage.config(clone=pkgsettings)
10650 quiet_config["PORTAGE_QUIET"] = "1"
10651 quiet_config.backup_changes("PORTAGE_QUIET")
10652 quiet_settings[myroot] = quiet_config
10655 for x in self._mergelist:
10656 if not isinstance(x, Package) or \
10657 x.type_name != "ebuild":
10660 if not shown_verifying_msg:
10661 shown_verifying_msg = True
10662 self._status_msg("Verifying ebuild manifests")
10664 root_config = x.root_config
10665 portdb = root_config.trees["porttree"].dbapi
10666 quiet_config = quiet_settings[root_config.root]
10667 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10668 if not portage.digestcheck([], quiet_config, strict=True):
10673 def _add_prefetchers(self):
10675 if not self._parallel_fetch:
10678 if self._parallel_fetch:
10679 self._status_msg("Starting parallel fetch")
10681 prefetchers = self._prefetchers
10682 getbinpkg = "--getbinpkg" in self.myopts
10684 # In order to avoid "waiting for lock" messages
10685 # at the beginning, which annoy users, never
10686 # spawn a prefetcher for the first package.
10687 for pkg in self._mergelist[1:]:
10688 prefetcher = self._create_prefetcher(pkg)
10689 if prefetcher is not None:
10690 self._task_queues.fetch.add(prefetcher)
10691 prefetchers[pkg] = prefetcher
10693 def _create_prefetcher(self, pkg):
10695 @return: a prefetcher, or None if not applicable
10699 if not isinstance(pkg, Package):
10702 elif pkg.type_name == "ebuild":
10704 prefetcher = EbuildFetcher(background=True,
10705 config_pool=self._ConfigPool(pkg.root,
10706 self._allocate_config, self._deallocate_config),
10707 fetchonly=1, logfile=self._fetch_log,
10708 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10710 elif pkg.type_name == "binary" and \
10711 "--getbinpkg" in self.myopts and \
10712 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10714 prefetcher = BinpkgPrefetcher(background=True,
10715 pkg=pkg, scheduler=self._sched_iface)
10719 def _is_restart_scheduled(self):
10721 Check if the merge list contains a replacement
10722 for the current running instance, that will result
10723 in restart after merge.
10725 @returns: True if a restart is scheduled, False otherwise.
10727 if self._opts_no_restart.intersection(self.myopts):
10730 mergelist = self._mergelist
10732 for i, pkg in enumerate(mergelist):
10733 if self._is_restart_necessary(pkg) and \
10734 i != len(mergelist) - 1:
10739 def _is_restart_necessary(self, pkg):
10741 @return: True if merging the given package
10742 requires restart, False otherwise.
10745 # Figure out if we need a restart.
10746 if pkg.root == self._running_root.root and \
10747 portage.match_from_list(
10748 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10749 if self._running_portage:
10750 return pkg.cpv != self._running_portage.cpv
10754 def _restart_if_necessary(self, pkg):
10756 Use execv() to restart emerge. This happens
10757 if portage upgrades itself and there are
10758 remaining packages in the list.
10761 if self._opts_no_restart.intersection(self.myopts):
10764 if not self._is_restart_necessary(pkg):
10767 if pkg == self._mergelist[-1]:
10770 self._main_loop_cleanup()
10772 logger = self._logger
10773 pkg_count = self._pkg_count
10774 mtimedb = self._mtimedb
10775 bad_resume_opts = self._bad_resume_opts
10777 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10778 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10780 logger.log(" *** RESTARTING " + \
10781 "emerge via exec() after change of " + \
10782 "portage version.")
10784 mtimedb["resume"]["mergelist"].remove(list(pkg))
10786 portage.run_exitfuncs()
10787 mynewargv = [sys.argv[0], "--resume"]
10788 resume_opts = self.myopts.copy()
10789 # For automatic resume, we need to prevent
10790 # any of bad_resume_opts from leaking in
10791 # via EMERGE_DEFAULT_OPTS.
10792 resume_opts["--ignore-default-opts"] = True
10793 for myopt, myarg in resume_opts.iteritems():
10794 if myopt not in bad_resume_opts:
10796 mynewargv.append(myopt)
10798 mynewargv.append(myopt +"="+ str(myarg))
10799 # priority only needs to be adjusted on the first run
10800 os.environ["PORTAGE_NICENESS"] = "0"
10801 os.execv(mynewargv[0], mynewargv)
10805 if "--resume" in self.myopts:
10807 portage.writemsg_stdout(
10808 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10809 self._logger.log(" *** Resuming merge...")
10811 self._save_resume_list()
10814 self._background = self._background_mode()
10815 except self._unknown_internal_error:
10818 for root in self.trees:
10819 root_config = self.trees[root]["root_config"]
10821 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10822 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10823 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10824 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10825 if not tmpdir or not os.path.isdir(tmpdir):
10826 msg = "The directory specified in your " + \
10827 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10828 "does not exist. Please create this " + \
10829 "directory or correct your PORTAGE_TMPDIR setting."
10830 msg = textwrap.wrap(msg, 70)
10831 out = portage.output.EOutput()
10836 if self._background:
10837 root_config.settings.unlock()
10838 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10839 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10840 root_config.settings.lock()
10842 self.pkgsettings[root] = portage.config(
10843 clone=root_config.settings)
10845 rval = self._generate_digests()
10846 if rval != os.EX_OK:
10849 rval = self._check_manifests()
10850 if rval != os.EX_OK:
10853 keep_going = "--keep-going" in self.myopts
10854 fetchonly = self._build_opts.fetchonly
10855 mtimedb = self._mtimedb
10856 failed_pkgs = self._failed_pkgs
10859 rval = self._merge()
10860 if rval == os.EX_OK or fetchonly or not keep_going:
10862 if "resume" not in mtimedb:
10864 mergelist = self._mtimedb["resume"].get("mergelist")
10868 if not failed_pkgs:
10871 for failed_pkg in failed_pkgs:
10872 mergelist.remove(list(failed_pkg.pkg))
10874 self._failed_pkgs_all.extend(failed_pkgs)
10880 if not self._calc_resume_list():
10883 clear_caches(self.trees)
10884 if not self._mergelist:
10887 self._save_resume_list()
10888 self._pkg_count.curval = 0
10889 self._pkg_count.maxval = len([x for x in self._mergelist \
10890 if isinstance(x, Package) and x.operation == "merge"])
10891 self._status_display.maxval = self._pkg_count.maxval
10893 self._logger.log(" *** Finished. Cleaning up...")
10896 self._failed_pkgs_all.extend(failed_pkgs)
10899 background = self._background
10900 failure_log_shown = False
10901 if background and len(self._failed_pkgs_all) == 1:
10902 # If only one package failed then just show it's
10903 # whole log for easy viewing.
10904 failed_pkg = self._failed_pkgs_all[-1]
10905 build_dir = failed_pkg.build_dir
10908 log_paths = [failed_pkg.build_log]
10910 log_path = self._locate_failure_log(failed_pkg)
10911 if log_path is not None:
10913 log_file = open(log_path)
10917 if log_file is not None:
10919 for line in log_file:
10920 writemsg_level(line, noiselevel=-1)
10923 failure_log_shown = True
10925 # Dump mod_echo output now since it tends to flood the terminal.
10926 # This allows us to avoid having more important output, generated
10927 # later, from being swept away by the mod_echo output.
10928 mod_echo_output = _flush_elog_mod_echo()
10930 if background and not failure_log_shown and \
10931 self._failed_pkgs_all and \
10932 self._failed_pkgs_die_msgs and \
10933 not mod_echo_output:
10935 printer = portage.output.EOutput()
10936 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10938 if mysettings["ROOT"] != "/":
10939 root_msg = " merged to %s" % mysettings["ROOT"]
10941 printer.einfo("Error messages for package %s%s:" % \
10942 (colorize("INFORM", key), root_msg))
10944 for phase in portage.const.EBUILD_PHASES:
10945 if phase not in logentries:
10947 for msgtype, msgcontent in logentries[phase]:
10948 if isinstance(msgcontent, basestring):
10949 msgcontent = [msgcontent]
10950 for line in msgcontent:
10951 printer.eerror(line.strip("\n"))
10953 if self._post_mod_echo_msgs:
10954 for msg in self._post_mod_echo_msgs:
10957 if len(self._failed_pkgs_all) > 1 or \
10958 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10959 if len(self._failed_pkgs_all) > 1:
10960 msg = "The following %d packages have " % \
10961 len(self._failed_pkgs_all) + \
10962 "failed to build or install:"
10964 msg = "The following package has " + \
10965 "failed to build or install:"
10966 prefix = bad(" * ")
10967 writemsg(prefix + "\n", noiselevel=-1)
10968 from textwrap import wrap
10969 for line in wrap(msg, 72):
10970 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10971 writemsg(prefix + "\n", noiselevel=-1)
10972 for failed_pkg in self._failed_pkgs_all:
10973 writemsg("%s\t%s\n" % (prefix,
10974 colorize("INFORM", str(failed_pkg.pkg))),
10976 writemsg(prefix + "\n", noiselevel=-1)
10980 def _elog_listener(self, mysettings, key, logentries, fulltext):
10981 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10983 self._failed_pkgs_die_msgs.append(
10984 (mysettings, key, errors))
10986 def _locate_failure_log(self, failed_pkg):
10988 build_dir = failed_pkg.build_dir
10991 log_paths = [failed_pkg.build_log]
10993 for log_path in log_paths:
10998 log_size = os.stat(log_path).st_size
11009 def _add_packages(self):
11010 pkg_queue = self._pkg_queue
11011 for pkg in self._mergelist:
11012 if isinstance(pkg, Package):
11013 pkg_queue.append(pkg)
11014 elif isinstance(pkg, Blocker):
11017 def _system_merge_started(self, merge):
11019 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
11021 graph = self._digraph
11024 pkg = merge.merge.pkg
11026 # Skip this if $ROOT != / since it shouldn't matter if there
11027 # are unsatisfied system runtime deps in this case.
11028 if pkg.root != '/':
11031 completed_tasks = self._completed_tasks
11032 unsatisfied = self._unsatisfied_system_deps
11034 def ignore_non_runtime_or_satisfied(priority):
11036 Ignore non-runtime and satisfied runtime priorities.
11038 if isinstance(priority, DepPriority) and \
11039 not priority.satisfied and \
11040 (priority.runtime or priority.runtime_post):
11044 # When checking for unsatisfied runtime deps, only check
11045 # direct deps since indirect deps are checked when the
11046 # corresponding parent is merged.
11047 for child in graph.child_nodes(pkg,
11048 ignore_priority=ignore_non_runtime_or_satisfied):
11049 if not isinstance(child, Package) or \
11050 child.operation == 'uninstall':
11054 if child.operation == 'merge' and \
11055 child not in completed_tasks:
11056 unsatisfied.add(child)
11058 def _merge_wait_exit_handler(self, task):
11059 self._merge_wait_scheduled.remove(task)
11060 self._merge_exit(task)
11062 def _merge_exit(self, merge):
11063 self._do_merge_exit(merge)
11064 self._deallocate_config(merge.merge.settings)
11065 if merge.returncode == os.EX_OK and \
11066 not merge.merge.pkg.installed:
11067 self._status_display.curval += 1
11068 self._status_display.merges = len(self._task_queues.merge)
11071 def _do_merge_exit(self, merge):
11072 pkg = merge.merge.pkg
11073 if merge.returncode != os.EX_OK:
11074 settings = merge.merge.settings
11075 build_dir = settings.get("PORTAGE_BUILDDIR")
11076 build_log = settings.get("PORTAGE_LOG_FILE")
11078 self._failed_pkgs.append(self._failed_pkg(
11079 build_dir=build_dir, build_log=build_log,
11081 returncode=merge.returncode))
11082 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11084 self._status_display.failed = len(self._failed_pkgs)
11087 self._task_complete(pkg)
11088 pkg_to_replace = merge.merge.pkg_to_replace
11089 if pkg_to_replace is not None:
11090 # When a package is replaced, mark it's uninstall
11091 # task complete (if any).
11092 uninst_hash_key = \
11093 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11094 self._task_complete(uninst_hash_key)
11099 self._restart_if_necessary(pkg)
11101 # Call mtimedb.commit() after each merge so that
11102 # --resume still works after being interrupted
11103 # by reboot, sigkill or similar.
11104 mtimedb = self._mtimedb
11105 mtimedb["resume"]["mergelist"].remove(list(pkg))
11106 if not mtimedb["resume"]["mergelist"]:
11107 del mtimedb["resume"]
11110 def _build_exit(self, build):
11111 if build.returncode == os.EX_OK:
11113 merge = PackageMerge(merge=build)
11114 if not build.build_opts.buildpkgonly and \
11115 build.pkg in self._deep_system_deps:
11116 # Since dependencies on system packages are frequently
11117 # unspecified, merge them only when no builds are executing.
11118 self._merge_wait_queue.append(merge)
11119 merge.addStartListener(self._system_merge_started)
11121 merge.addExitListener(self._merge_exit)
11122 self._task_queues.merge.add(merge)
11123 self._status_display.merges = len(self._task_queues.merge)
11125 settings = build.settings
11126 build_dir = settings.get("PORTAGE_BUILDDIR")
11127 build_log = settings.get("PORTAGE_LOG_FILE")
11129 self._failed_pkgs.append(self._failed_pkg(
11130 build_dir=build_dir, build_log=build_log,
11132 returncode=build.returncode))
11133 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11135 self._status_display.failed = len(self._failed_pkgs)
11136 self._deallocate_config(build.settings)
11138 self._status_display.running = self._jobs
11141 def _extract_exit(self, build):
11142 self._build_exit(build)
11144 def _task_complete(self, pkg):
11145 self._completed_tasks.add(pkg)
11146 self._unsatisfied_system_deps.discard(pkg)
11147 self._choose_pkg_return_early = False
11151 self._add_prefetchers()
11152 self._add_packages()
11153 pkg_queue = self._pkg_queue
11154 failed_pkgs = self._failed_pkgs
11155 portage.locks._quiet = self._background
11156 portage.elog._emerge_elog_listener = self._elog_listener
11162 self._main_loop_cleanup()
11163 portage.locks._quiet = False
11164 portage.elog._emerge_elog_listener = None
11166 rval = failed_pkgs[-1].returncode
11170 def _main_loop_cleanup(self):
11171 del self._pkg_queue[:]
11172 self._completed_tasks.clear()
11173 self._deep_system_deps.clear()
11174 self._unsatisfied_system_deps.clear()
11175 self._choose_pkg_return_early = False
11176 self._status_display.reset()
11177 self._digraph = None
11178 self._task_queues.fetch.clear()
11180 def _choose_pkg(self):
11182 Choose a task that has all it's dependencies satisfied.
11185 if self._choose_pkg_return_early:
11188 if self._digraph is None:
11189 if (self._jobs or self._task_queues.merge) and \
11190 not ("--nodeps" in self.myopts and \
11191 (self._max_jobs is True or self._max_jobs > 1)):
11192 self._choose_pkg_return_early = True
11194 return self._pkg_queue.pop(0)
11196 if not (self._jobs or self._task_queues.merge):
11197 return self._pkg_queue.pop(0)
11199 self._prune_digraph()
11202 later = set(self._pkg_queue)
11203 for pkg in self._pkg_queue:
11205 if not self._dependent_on_scheduled_merges(pkg, later):
11209 if chosen_pkg is not None:
11210 self._pkg_queue.remove(chosen_pkg)
11212 if chosen_pkg is None:
11213 # There's no point in searching for a package to
11214 # choose until at least one of the existing jobs
11216 self._choose_pkg_return_early = True
11220 def _dependent_on_scheduled_merges(self, pkg, later):
11222 Traverse the subgraph of the given packages deep dependencies
11223 to see if it contains any scheduled merges.
11224 @param pkg: a package to check dependencies for
11226 @param later: packages for which dependence should be ignored
11227 since they will be merged later than pkg anyway and therefore
11228 delaying the merge of pkg will not result in a more optimal
11232 @returns: True if the package is dependent, False otherwise.
11235 graph = self._digraph
11236 completed_tasks = self._completed_tasks
11239 traversed_nodes = set([pkg])
11240 direct_deps = graph.child_nodes(pkg)
11241 node_stack = direct_deps
11242 direct_deps = frozenset(direct_deps)
11244 node = node_stack.pop()
11245 if node in traversed_nodes:
11247 traversed_nodes.add(node)
11248 if not ((node.installed and node.operation == "nomerge") or \
11249 (node.operation == "uninstall" and \
11250 node not in direct_deps) or \
11251 node in completed_tasks or \
11255 node_stack.extend(graph.child_nodes(node))
11259 def _allocate_config(self, root):
11261 Allocate a unique config instance for a task in order
11262 to prevent interference between parallel tasks.
11264 if self._config_pool[root]:
11265 temp_settings = self._config_pool[root].pop()
11267 temp_settings = portage.config(clone=self.pkgsettings[root])
11268 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11269 # performance reasons, call it here to make sure all settings from the
11270 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11271 temp_settings.reload()
11272 temp_settings.reset()
11273 return temp_settings
11275 def _deallocate_config(self, settings):
11276 self._config_pool[settings["ROOT"]].append(settings)
11278 def _main_loop(self):
11280 # Only allow 1 job max if a restart is scheduled
11281 # due to portage update.
11282 if self._is_restart_scheduled() or \
11283 self._opts_no_background.intersection(self.myopts):
11284 self._set_max_jobs(1)
11286 merge_queue = self._task_queues.merge
11288 while self._schedule():
11289 if self._poll_event_handlers:
11294 if not (self._jobs or merge_queue):
11296 if self._poll_event_handlers:
11299 def _keep_scheduling(self):
11300 return bool(self._pkg_queue and \
11301 not (self._failed_pkgs and not self._build_opts.fetchonly))
11303 def _schedule_tasks(self):
11305 # When the number of jobs drops to zero, process all waiting merges.
11306 if not self._jobs and self._merge_wait_queue:
11307 for task in self._merge_wait_queue:
11308 task.addExitListener(self._merge_wait_exit_handler)
11309 self._task_queues.merge.add(task)
11310 self._status_display.merges = len(self._task_queues.merge)
11311 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11312 del self._merge_wait_queue[:]
11314 self._schedule_tasks_imp()
11315 self._status_display.display()
11318 for q in self._task_queues.values():
11322 # Cancel prefetchers if they're the only reason
11323 # the main poll loop is still running.
11324 if self._failed_pkgs and not self._build_opts.fetchonly and \
11325 not (self._jobs or self._task_queues.merge) and \
11326 self._task_queues.fetch:
11327 self._task_queues.fetch.clear()
11331 self._schedule_tasks_imp()
11332 self._status_display.display()
11334 return self._keep_scheduling()
11336 def _job_delay(self):
11339 @returns: True if job scheduling should be delayed, False otherwise.
11342 if self._jobs and self._max_load is not None:
11344 current_time = time.time()
11346 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11347 if delay > self._job_delay_max:
11348 delay = self._job_delay_max
11349 if (current_time - self._previous_job_start_time) < delay:
11354 def _schedule_tasks_imp(self):
11357 @returns: True if state changed, False otherwise.
11364 if not self._keep_scheduling():
11365 return bool(state_change)
11367 if self._choose_pkg_return_early or \
11368 self._merge_wait_scheduled or \
11369 (self._jobs and self._unsatisfied_system_deps) or \
11370 not self._can_add_job() or \
11372 return bool(state_change)
11374 pkg = self._choose_pkg()
11376 return bool(state_change)
11380 if not pkg.installed:
11381 self._pkg_count.curval += 1
11383 task = self._task(pkg)
11386 merge = PackageMerge(merge=task)
11387 merge.addExitListener(self._merge_exit)
11388 self._task_queues.merge.add(merge)
11392 self._previous_job_start_time = time.time()
11393 self._status_display.running = self._jobs
11394 task.addExitListener(self._extract_exit)
11395 self._task_queues.jobs.add(task)
11399 self._previous_job_start_time = time.time()
11400 self._status_display.running = self._jobs
11401 task.addExitListener(self._build_exit)
11402 self._task_queues.jobs.add(task)
11404 return bool(state_change)
11406 def _task(self, pkg):
11408 pkg_to_replace = None
11409 if pkg.operation != "uninstall":
11410 vardb = pkg.root_config.trees["vartree"].dbapi
11411 previous_cpv = vardb.match(pkg.slot_atom)
11413 previous_cpv = previous_cpv.pop()
11414 pkg_to_replace = self._pkg(previous_cpv,
11415 "installed", pkg.root_config, installed=True)
11417 task = MergeListItem(args_set=self._args_set,
11418 background=self._background, binpkg_opts=self._binpkg_opts,
11419 build_opts=self._build_opts,
11420 config_pool=self._ConfigPool(pkg.root,
11421 self._allocate_config, self._deallocate_config),
11422 emerge_opts=self.myopts,
11423 find_blockers=self._find_blockers(pkg), logger=self._logger,
11424 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11425 pkg_to_replace=pkg_to_replace,
11426 prefetcher=self._prefetchers.get(pkg),
11427 scheduler=self._sched_iface,
11428 settings=self._allocate_config(pkg.root),
11429 statusMessage=self._status_msg,
11430 world_atom=self._world_atom)
11434 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11435 pkg = failed_pkg.pkg
11436 msg = "%s to %s %s" % \
11437 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11438 if pkg.root != "/":
11439 msg += " %s %s" % (preposition, pkg.root)
11441 log_path = self._locate_failure_log(failed_pkg)
11442 if log_path is not None:
11443 msg += ", Log file:"
11444 self._status_msg(msg)
11446 if log_path is not None:
11447 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11449 def _status_msg(self, msg):
11451 Display a brief status message (no newlines) in the status display.
11452 This is called by tasks to provide feedback to the user. This
11453 delegates the resposibility of generating \r and \n control characters,
11454 to guarantee that lines are created or erased when necessary and
11458 @param msg: a brief status message (no newlines allowed)
11460 if not self._background:
11461 writemsg_level("\n")
11462 self._status_display.displayMessage(msg)
11464 def _save_resume_list(self):
11466 Do this before verifying the ebuild Manifests since it might
11467 be possible for the user to use --resume --skipfirst get past
11468 a non-essential package with a broken digest.
11470 mtimedb = self._mtimedb
11471 mtimedb["resume"]["mergelist"] = [list(x) \
11472 for x in self._mergelist \
11473 if isinstance(x, Package) and x.operation == "merge"]
11477 def _calc_resume_list(self):
11479 Use the current resume list to calculate a new one,
11480 dropping any packages with unsatisfied deps.
11482 @returns: True if successful, False otherwise.
11484 print colorize("GOOD", "*** Resuming merge...")
11486 if self._show_list():
11487 if "--tree" in self.myopts:
11488 portage.writemsg_stdout("\n" + \
11489 darkgreen("These are the packages that " + \
11490 "would be merged, in reverse order:\n\n"))
11493 portage.writemsg_stdout("\n" + \
11494 darkgreen("These are the packages that " + \
11495 "would be merged, in order:\n\n"))
11497 show_spinner = "--quiet" not in self.myopts and \
11498 "--nodeps" not in self.myopts
11501 print "Calculating dependencies ",
11503 myparams = create_depgraph_params(self.myopts, None)
11507 success, mydepgraph, dropped_tasks = resume_depgraph(
11508 self.settings, self.trees, self._mtimedb, self.myopts,
11509 myparams, self._spinner)
11510 except depgraph.UnsatisfiedResumeDep, exc:
11511 # rename variable to avoid python-3.0 error:
11512 # SyntaxError: can not delete variable 'e' referenced in nested
11515 mydepgraph = e.depgraph
11516 dropped_tasks = set()
11519 print "\b\b... done!"
11522 def unsatisfied_resume_dep_msg():
11523 mydepgraph.display_problems()
11524 out = portage.output.EOutput()
11525 out.eerror("One or more packages are either masked or " + \
11526 "have missing dependencies:")
11529 show_parents = set()
11530 for dep in e.value:
11531 if dep.parent in show_parents:
11533 show_parents.add(dep.parent)
11534 if dep.atom is None:
11535 out.eerror(indent + "Masked package:")
11536 out.eerror(2 * indent + str(dep.parent))
11539 out.eerror(indent + str(dep.atom) + " pulled in by:")
11540 out.eerror(2 * indent + str(dep.parent))
11542 msg = "The resume list contains packages " + \
11543 "that are either masked or have " + \
11544 "unsatisfied dependencies. " + \
11545 "Please restart/continue " + \
11546 "the operation manually, or use --skipfirst " + \
11547 "to skip the first package in the list and " + \
11548 "any other packages that may be " + \
11549 "masked or have missing dependencies."
11550 for line in textwrap.wrap(msg, 72):
11552 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11555 if success and self._show_list():
11556 mylist = mydepgraph.altlist()
11558 if "--tree" in self.myopts:
11560 mydepgraph.display(mylist, favorites=self._favorites)
11563 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11565 mydepgraph.display_problems()
11567 mylist = mydepgraph.altlist()
11568 mydepgraph.break_refs(mylist)
11569 mydepgraph.break_refs(dropped_tasks)
11570 self._mergelist = mylist
11571 self._set_digraph(mydepgraph.schedulerGraph())
11574 for task in dropped_tasks:
11575 if not (isinstance(task, Package) and task.operation == "merge"):
11578 msg = "emerge --keep-going:" + \
11580 if pkg.root != "/":
11581 msg += " for %s" % (pkg.root,)
11582 msg += " dropped due to unsatisfied dependency."
11583 for line in textwrap.wrap(msg, msg_width):
11584 eerror(line, phase="other", key=pkg.cpv)
11585 settings = self.pkgsettings[pkg.root]
11586 # Ensure that log collection from $T is disabled inside
11587 # elog_process(), since any logs that might exist are
11589 settings.pop("T", None)
11590 portage.elog.elog_process(pkg.cpv, settings)
11591 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11595 def _show_list(self):
11596 myopts = self.myopts
11597 if "--quiet" not in myopts and \
11598 ("--ask" in myopts or "--tree" in myopts or \
11599 "--verbose" in myopts):
11603 def _world_atom(self, pkg):
11605 Add the package to the world file, but only if
11606 it's supposed to be added. Otherwise, do nothing.
11609 if set(("--buildpkgonly", "--fetchonly",
11611 "--oneshot", "--onlydeps",
11612 "--pretend")).intersection(self.myopts):
11615 if pkg.root != self.target_root:
11618 args_set = self._args_set
11619 if not args_set.findAtomForPackage(pkg):
11622 logger = self._logger
11623 pkg_count = self._pkg_count
11624 root_config = pkg.root_config
11625 world_set = root_config.sets["world"]
11626 world_locked = False
11627 if hasattr(world_set, "lock"):
11629 world_locked = True
11632 if hasattr(world_set, "load"):
11633 world_set.load() # maybe it's changed on disk
11635 atom = create_world_atom(pkg, args_set, root_config)
11637 if hasattr(world_set, "add"):
11638 self._status_msg(('Recording %s in "world" ' + \
11639 'favorites file...') % atom)
11640 logger.log(" === (%s of %s) Updating world file (%s)" % \
11641 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11642 world_set.add(atom)
11644 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11645 (atom,), level=logging.WARN, noiselevel=-1)
11650 def _pkg(self, cpv, type_name, root_config, installed=False):
11652 Get a package instance from the cache, or create a new
11653 one if necessary. Raises KeyError from aux_get if it
11654 failures for some reason (package does not exist or is
11657 operation = "merge"
11659 operation = "nomerge"
11661 if self._digraph is not None:
11662 # Reuse existing instance when available.
11663 pkg = self._digraph.get(
11664 (type_name, root_config.root, cpv, operation))
11665 if pkg is not None:
11668 tree_type = depgraph.pkg_tree_map[type_name]
11669 db = root_config.trees[tree_type].dbapi
11670 db_keys = list(self.trees[root_config.root][
11671 tree_type].dbapi._aux_cache_keys)
11672 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11673 pkg = Package(cpv=cpv, metadata=metadata,
11674 root_config=root_config, installed=installed)
11675 if type_name == "ebuild":
11676 settings = self.pkgsettings[root_config.root]
11677 settings.setcpv(pkg)
11678 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11679 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11683 class MetadataRegen(PollScheduler):
11685 def __init__(self, portdb, cp_iter=None, consumer=None,
11686 max_jobs=None, max_load=None):
11687 PollScheduler.__init__(self)
11688 self._portdb = portdb
11689 self._global_cleanse = False
11690 if cp_iter is None:
11691 cp_iter = self._iter_every_cp()
11692 # We can globally cleanse stale cache only if we
11693 # iterate over every single cp.
11694 self._global_cleanse = True
11695 self._cp_iter = cp_iter
11696 self._consumer = consumer
11698 if max_jobs is None:
11701 self._max_jobs = max_jobs
11702 self._max_load = max_load
11703 self._sched_iface = self._sched_iface_class(
11704 register=self._register,
11705 schedule=self._schedule_wait,
11706 unregister=self._unregister)
11708 self._valid_pkgs = set()
11709 self._cp_set = set()
11710 self._process_iter = self._iter_metadata_processes()
11711 self.returncode = os.EX_OK
11712 self._error_count = 0
11714 def _iter_every_cp(self):
11715 every_cp = self._portdb.cp_all()
11716 every_cp.sort(reverse=True)
11719 yield every_cp.pop()
11723 def _iter_metadata_processes(self):
11724 portdb = self._portdb
11725 valid_pkgs = self._valid_pkgs
11726 cp_set = self._cp_set
11727 consumer = self._consumer
11729 for cp in self._cp_iter:
11731 portage.writemsg_stdout("Processing %s\n" % cp)
11732 cpv_list = portdb.cp_list(cp)
11733 for cpv in cpv_list:
11734 valid_pkgs.add(cpv)
11735 ebuild_path, repo_path = portdb.findname2(cpv)
11736 metadata, st, emtime = portdb._pull_valid_cache(
11737 cpv, ebuild_path, repo_path)
11738 if metadata is not None:
11739 if consumer is not None:
11740 consumer(cpv, ebuild_path,
11741 repo_path, metadata)
11744 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11745 ebuild_mtime=emtime,
11746 metadata_callback=portdb._metadata_callback,
11747 portdb=portdb, repo_path=repo_path,
11748 settings=portdb.doebuild_settings)
11752 portdb = self._portdb
11753 from portage.cache.cache_errors import CacheError
11756 while self._schedule():
11762 if self._global_cleanse:
11763 for mytree in portdb.porttrees:
11765 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11766 except CacheError, e:
11767 portage.writemsg("Error listing cache entries for " + \
11768 "'%s': %s, continuing...\n" % (mytree, e),
11774 cp_set = self._cp_set
11775 cpv_getkey = portage.cpv_getkey
11776 for mytree in portdb.porttrees:
11778 dead_nodes[mytree] = set(cpv for cpv in \
11779 portdb.auxdb[mytree].iterkeys() \
11780 if cpv_getkey(cpv) in cp_set)
11781 except CacheError, e:
11782 portage.writemsg("Error listing cache entries for " + \
11783 "'%s': %s, continuing...\n" % (mytree, e),
11790 for y in self._valid_pkgs:
11791 for mytree in portdb.porttrees:
11792 if portdb.findname2(y, mytree=mytree)[0]:
11793 dead_nodes[mytree].discard(y)
11795 for mytree, nodes in dead_nodes.iteritems():
11796 auxdb = portdb.auxdb[mytree]
11800 except (KeyError, CacheError):
11803 def _schedule_tasks(self):
11806 @returns: True if there may be remaining tasks to schedule,
11809 while self._can_add_job():
11811 metadata_process = self._process_iter.next()
11812 except StopIteration:
11816 metadata_process.scheduler = self._sched_iface
11817 metadata_process.addExitListener(self._metadata_exit)
11818 metadata_process.start()
11821 def _metadata_exit(self, metadata_process):
11823 if metadata_process.returncode != os.EX_OK:
11824 self.returncode = 1
11825 self._error_count += 1
11826 self._valid_pkgs.discard(metadata_process.cpv)
11827 portage.writemsg("Error processing %s, continuing...\n" % \
11828 (metadata_process.cpv,), noiselevel=-1)
11830 if self._consumer is not None:
11831 # On failure, still notify the consumer (in this case the metadata
11832 # argument is None).
11833 self._consumer(metadata_process.cpv,
11834 metadata_process.ebuild_path,
11835 metadata_process.repo_path,
11836 metadata_process.metadata)
11840 class UninstallFailure(portage.exception.PortageException):
11842 An instance of this class is raised by unmerge() when
11843 an uninstallation fails.
11846 def __init__(self, *pargs):
11847 portage.exception.PortageException.__init__(self, pargs)
11849 self.status = pargs[0]
11851 def unmerge(root_config, myopts, unmerge_action,
11852 unmerge_files, ldpath_mtimes, autoclean=0,
11853 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11854 scheduler=None, writemsg_level=portage.util.writemsg_level):
11857 clean_world = myopts.get('--deselect') != 'n'
11858 quiet = "--quiet" in myopts
11859 settings = root_config.settings
11860 sets = root_config.sets
11861 vartree = root_config.trees["vartree"]
11862 candidate_catpkgs=[]
11864 xterm_titles = "notitles" not in settings.features
11865 out = portage.output.EOutput()
11867 db_keys = list(vartree.dbapi._aux_cache_keys)
11870 pkg = pkg_cache.get(cpv)
11872 pkg = Package(cpv=cpv, installed=True,
11873 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11874 root_config=root_config,
11875 type_name="installed")
11876 pkg_cache[cpv] = pkg
11879 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11881 # At least the parent needs to exist for the lock file.
11882 portage.util.ensure_dirs(vdb_path)
11883 except portage.exception.PortageException:
11887 if os.access(vdb_path, os.W_OK):
11888 vdb_lock = portage.locks.lockdir(vdb_path)
11889 realsyslist = sets["system"].getAtoms()
11891 for x in realsyslist:
11892 mycp = portage.dep_getkey(x)
11893 if mycp in settings.getvirtuals():
11895 for provider in settings.getvirtuals()[mycp]:
11896 if vartree.dbapi.match(provider):
11897 providers.append(provider)
11898 if len(providers) == 1:
11899 syslist.extend(providers)
11901 syslist.append(mycp)
11903 mysettings = portage.config(clone=settings)
11905 if not unmerge_files:
11906 if unmerge_action == "unmerge":
11908 print bold("emerge unmerge") + " can only be used with specific package names"
11914 localtree = vartree
11915 # process all arguments and add all
11916 # valid db entries to candidate_catpkgs
11918 if not unmerge_files:
11919 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11921 #we've got command-line arguments
11922 if not unmerge_files:
11923 print "\nNo packages to unmerge have been provided.\n"
11925 for x in unmerge_files:
11926 arg_parts = x.split('/')
11927 if x[0] not in [".","/"] and \
11928 arg_parts[-1][-7:] != ".ebuild":
11929 #possible cat/pkg or dep; treat as such
11930 candidate_catpkgs.append(x)
11931 elif unmerge_action in ["prune","clean"]:
11932 print "\n!!! Prune and clean do not accept individual" + \
11933 " ebuilds as arguments;\n skipping.\n"
11936 # it appears that the user is specifying an installed
11937 # ebuild and we're in "unmerge" mode, so it's ok.
11938 if not os.path.exists(x):
11939 print "\n!!! The path '"+x+"' doesn't exist.\n"
11942 absx = os.path.abspath(x)
11943 sp_absx = absx.split("/")
11944 if sp_absx[-1][-7:] == ".ebuild":
11946 absx = "/".join(sp_absx)
11948 sp_absx_len = len(sp_absx)
11950 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11951 vdb_len = len(vdb_path)
11953 sp_vdb = vdb_path.split("/")
11954 sp_vdb_len = len(sp_vdb)
11956 if not os.path.exists(absx+"/CONTENTS"):
11957 print "!!! Not a valid db dir: "+str(absx)
11960 if sp_absx_len <= sp_vdb_len:
11961 # The Path is shorter... so it can't be inside the vdb.
11964 print "\n!!!",x,"cannot be inside "+ \
11965 vdb_path+"; aborting.\n"
11968 for idx in range(0,sp_vdb_len):
11969 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11972 print "\n!!!", x, "is not inside "+\
11973 vdb_path+"; aborting.\n"
11976 print "="+"/".join(sp_absx[sp_vdb_len:])
11977 candidate_catpkgs.append(
11978 "="+"/".join(sp_absx[sp_vdb_len:]))
11981 if (not "--quiet" in myopts):
11983 if settings["ROOT"] != "/":
11984 writemsg_level(darkgreen(newline+ \
11985 ">>> Using system located in ROOT tree %s\n" % \
11988 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11989 not ("--quiet" in myopts):
11990 writemsg_level(darkgreen(newline+\
11991 ">>> These are the packages that would be unmerged:\n"))
11993 # Preservation of order is required for --depclean and --prune so
11994 # that dependencies are respected. Use all_selected to eliminate
11995 # duplicate packages since the same package may be selected by
11998 all_selected = set()
11999 for x in candidate_catpkgs:
12000 # cycle through all our candidate deps and determine
12001 # what will and will not get unmerged
12003 mymatch = vartree.dbapi.match(x)
12004 except portage.exception.AmbiguousPackageName, errpkgs:
12005 print "\n\n!!! The short ebuild name \"" + \
12006 x + "\" is ambiguous. Please specify"
12007 print "!!! one of the following fully-qualified " + \
12008 "ebuild names instead:\n"
12009 for i in errpkgs[0]:
12010 print " " + green(i)
12014 if not mymatch and x[0] not in "<>=~":
12015 mymatch = localtree.dep_match(x)
12017 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
12018 (x, unmerge_action), noiselevel=-1)
12022 {"protected": set(), "selected": set(), "omitted": set()})
12023 mykey = len(pkgmap) - 1
12024 if unmerge_action=="unmerge":
12026 if y not in all_selected:
12027 pkgmap[mykey]["selected"].add(y)
12028 all_selected.add(y)
12029 elif unmerge_action == "prune":
12030 if len(mymatch) == 1:
12032 best_version = mymatch[0]
12033 best_slot = vartree.getslot(best_version)
12034 best_counter = vartree.dbapi.cpv_counter(best_version)
12035 for mypkg in mymatch[1:]:
12036 myslot = vartree.getslot(mypkg)
12037 mycounter = vartree.dbapi.cpv_counter(mypkg)
12038 if (myslot == best_slot and mycounter > best_counter) or \
12039 mypkg == portage.best([mypkg, best_version]):
12040 if myslot == best_slot:
12041 if mycounter < best_counter:
12042 # On slot collision, keep the one with the
12043 # highest counter since it is the most
12044 # recently installed.
12046 best_version = mypkg
12048 best_counter = mycounter
12049 pkgmap[mykey]["protected"].add(best_version)
12050 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12051 if mypkg != best_version and mypkg not in all_selected)
12052 all_selected.update(pkgmap[mykey]["selected"])
12054 # unmerge_action == "clean"
12056 for mypkg in mymatch:
12057 if unmerge_action == "clean":
12058 myslot = localtree.getslot(mypkg)
12060 # since we're pruning, we don't care about slots
12061 # and put all the pkgs in together
12063 if myslot not in slotmap:
12064 slotmap[myslot] = {}
12065 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12067 for mypkg in vartree.dbapi.cp_list(
12068 portage.dep_getkey(mymatch[0])):
12069 myslot = vartree.getslot(mypkg)
12070 if myslot not in slotmap:
12071 slotmap[myslot] = {}
12072 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12074 for myslot in slotmap:
12075 counterkeys = slotmap[myslot].keys()
12076 if not counterkeys:
12079 pkgmap[mykey]["protected"].add(
12080 slotmap[myslot][counterkeys[-1]])
12081 del counterkeys[-1]
12083 for counter in counterkeys[:]:
12084 mypkg = slotmap[myslot][counter]
12085 if mypkg not in mymatch:
12086 counterkeys.remove(counter)
12087 pkgmap[mykey]["protected"].add(
12088 slotmap[myslot][counter])
12090 #be pretty and get them in order of merge:
12091 for ckey in counterkeys:
12092 mypkg = slotmap[myslot][ckey]
12093 if mypkg not in all_selected:
12094 pkgmap[mykey]["selected"].add(mypkg)
12095 all_selected.add(mypkg)
12096 # ok, now the last-merged package
12097 # is protected, and the rest are selected
12098 numselected = len(all_selected)
12099 if global_unmerge and not numselected:
12100 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12103 if not numselected:
12104 portage.writemsg_stdout(
12105 "\n>>> No packages selected for removal by " + \
12106 unmerge_action + "\n")
12110 vartree.dbapi.flush_cache()
12111 portage.locks.unlockdir(vdb_lock)
12113 from portage.sets.base import EditablePackageSet
12115 # generate a list of package sets that are directly or indirectly listed in "world",
12116 # as there is no persistent list of "installed" sets
12117 installed_sets = ["world"]
12122 pos = len(installed_sets)
12123 for s in installed_sets[pos - 1:]:
12126 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12129 installed_sets += candidates
12130 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12133 # we don't want to unmerge packages that are still listed in user-editable package sets
12134 # listed in "world" as they would be remerged on the next update of "world" or the
12135 # relevant package sets.
12136 unknown_sets = set()
12137 for cp in xrange(len(pkgmap)):
12138 for cpv in pkgmap[cp]["selected"].copy():
12142 # It could have been uninstalled
12143 # by a concurrent process.
12146 if unmerge_action != "clean" and \
12147 root_config.root == "/" and \
12148 portage.match_from_list(
12149 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12150 msg = ("Not unmerging package %s since there is no valid " + \
12151 "reason for portage to unmerge itself.") % (pkg.cpv,)
12152 for line in textwrap.wrap(msg, 75):
12154 # adjust pkgmap so the display output is correct
12155 pkgmap[cp]["selected"].remove(cpv)
12156 all_selected.remove(cpv)
12157 pkgmap[cp]["protected"].add(cpv)
12161 for s in installed_sets:
12162 # skip sets that the user requested to unmerge, and skip world
12163 # unless we're unmerging a package set (as the package would be
12164 # removed from "world" later on)
12165 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12169 if s in unknown_sets:
12171 unknown_sets.add(s)
12172 out = portage.output.EOutput()
12173 out.eerror(("Unknown set '@%s' in " + \
12174 "%svar/lib/portage/world_sets") % \
12175 (s, root_config.root))
12178 # only check instances of EditablePackageSet as other classes are generally used for
12179 # special purposes and can be ignored here (and are usually generated dynamically, so the
12180 # user can't do much about them anyway)
12181 if isinstance(sets[s], EditablePackageSet):
12183 # This is derived from a snippet of code in the
12184 # depgraph._iter_atoms_for_pkg() method.
12185 for atom in sets[s].iterAtomsForPackage(pkg):
12186 inst_matches = vartree.dbapi.match(atom)
12187 inst_matches.reverse() # descending order
12189 for inst_cpv in inst_matches:
12191 inst_pkg = _pkg(inst_cpv)
12193 # It could have been uninstalled
12194 # by a concurrent process.
12197 if inst_pkg.cp != atom.cp:
12199 if pkg >= inst_pkg:
12200 # This is descending order, and we're not
12201 # interested in any versions <= pkg given.
12203 if pkg.slot_atom != inst_pkg.slot_atom:
12204 higher_slot = inst_pkg
12206 if higher_slot is None:
12210 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12211 #print colorize("WARN", "but still listed in the following package sets:")
12212 #print " %s\n" % ", ".join(parents)
12213 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12214 print colorize("WARN", "still referenced by the following package sets:")
12215 print " %s\n" % ", ".join(parents)
12216 # adjust pkgmap so the display output is correct
12217 pkgmap[cp]["selected"].remove(cpv)
12218 all_selected.remove(cpv)
12219 pkgmap[cp]["protected"].add(cpv)
12223 numselected = len(all_selected)
12224 if not numselected:
12226 "\n>>> No packages selected for removal by " + \
12227 unmerge_action + "\n")
12230 # Unmerge order only matters in some cases
12234 selected = d["selected"]
12237 cp = portage.cpv_getkey(iter(selected).next())
12238 cp_dict = unordered.get(cp)
12239 if cp_dict is None:
12241 unordered[cp] = cp_dict
12244 for k, v in d.iteritems():
12245 cp_dict[k].update(v)
12246 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12248 for x in xrange(len(pkgmap)):
12249 selected = pkgmap[x]["selected"]
12252 for mytype, mylist in pkgmap[x].iteritems():
12253 if mytype == "selected":
12255 mylist.difference_update(all_selected)
12256 cp = portage.cpv_getkey(iter(selected).next())
12257 for y in localtree.dep_match(cp):
12258 if y not in pkgmap[x]["omitted"] and \
12259 y not in pkgmap[x]["selected"] and \
12260 y not in pkgmap[x]["protected"] and \
12261 y not in all_selected:
12262 pkgmap[x]["omitted"].add(y)
12263 if global_unmerge and not pkgmap[x]["selected"]:
12264 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12266 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12267 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12268 "'%s' is part of your system profile.\n" % cp),
12269 level=logging.WARNING, noiselevel=-1)
12270 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12271 "be damaging to your system.\n\n"),
12272 level=logging.WARNING, noiselevel=-1)
12273 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12274 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12275 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12277 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12279 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12280 for mytype in ["selected","protected","omitted"]:
12282 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12283 if pkgmap[x][mytype]:
12284 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12285 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12286 for pn, ver, rev in sorted_pkgs:
12290 myversion = ver + "-" + rev
12291 if mytype == "selected":
12293 colorize("UNMERGE_WARN", myversion + " "),
12297 colorize("GOOD", myversion + " "), noiselevel=-1)
12299 writemsg_level("none ", noiselevel=-1)
12301 writemsg_level("\n", noiselevel=-1)
12303 writemsg_level("\n", noiselevel=-1)
12305 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12306 " packages are slated for removal.\n")
12307 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12308 " and " + colorize("GOOD", "'omitted'") + \
12309 " packages will not be removed.\n\n")
12311 if "--pretend" in myopts:
12312 #we're done... return
12314 if "--ask" in myopts:
12315 if userquery("Would you like to unmerge these packages?")=="No":
12316 # enter pretend mode for correct formatting of results
12317 myopts["--pretend"] = True
12322 #the real unmerging begins, after a short delay....
12323 if clean_delay and not autoclean:
12324 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12326 for x in xrange(len(pkgmap)):
12327 for y in pkgmap[x]["selected"]:
12328 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12329 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12330 mysplit = y.split("/")
12332 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12333 mysettings, unmerge_action not in ["clean","prune"],
12334 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12335 scheduler=scheduler)
12337 if retval != os.EX_OK:
12338 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12340 raise UninstallFailure(retval)
12343 if clean_world and hasattr(sets["world"], "cleanPackage"):
12344 sets["world"].cleanPackage(vartree.dbapi, y)
12345 emergelog(xterm_titles, " >>> unmerge success: "+y)
12346 if clean_world and hasattr(sets["world"], "remove"):
12347 for s in root_config.setconfig.active:
12348 sets["world"].remove(SETPREFIX+s)
12351 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12353 if os.path.exists("/usr/bin/install-info"):
12354 out = portage.output.EOutput()
12359 inforoot=normpath(root+z)
12360 if os.path.isdir(inforoot):
12361 infomtime = long(os.stat(inforoot).st_mtime)
12362 if inforoot not in prev_mtimes or \
12363 prev_mtimes[inforoot] != infomtime:
12364 regen_infodirs.append(inforoot)
12366 if not regen_infodirs:
12367 portage.writemsg_stdout("\n")
12368 out.einfo("GNU info directory index is up-to-date.")
12370 portage.writemsg_stdout("\n")
12371 out.einfo("Regenerating GNU info directory index...")
12373 dir_extensions = ("", ".gz", ".bz2")
12377 for inforoot in regen_infodirs:
12381 if not os.path.isdir(inforoot) or \
12382 not os.access(inforoot, os.W_OK):
12385 file_list = os.listdir(inforoot)
12387 dir_file = os.path.join(inforoot, "dir")
12388 moved_old_dir = False
12389 processed_count = 0
12390 for x in file_list:
12391 if x.startswith(".") or \
12392 os.path.isdir(os.path.join(inforoot, x)):
12394 if x.startswith("dir"):
12396 for ext in dir_extensions:
12397 if x == "dir" + ext or \
12398 x == "dir" + ext + ".old":
12403 if processed_count == 0:
12404 for ext in dir_extensions:
12406 os.rename(dir_file + ext, dir_file + ext + ".old")
12407 moved_old_dir = True
12408 except EnvironmentError, e:
12409 if e.errno != errno.ENOENT:
12412 processed_count += 1
12413 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12414 existsstr="already exists, for file `"
12416 if re.search(existsstr,myso):
12417 # Already exists... Don't increment the count for this.
12419 elif myso[:44]=="install-info: warning: no info dir entry in ":
12420 # This info file doesn't contain a DIR-header: install-info produces this
12421 # (harmless) warning (the --quiet switch doesn't seem to work).
12422 # Don't increment the count for this.
12425 badcount=badcount+1
12426 errmsg += myso + "\n"
12429 if moved_old_dir and not os.path.exists(dir_file):
12430 # We didn't generate a new dir file, so put the old file
12431 # back where it was originally found.
12432 for ext in dir_extensions:
12434 os.rename(dir_file + ext + ".old", dir_file + ext)
12435 except EnvironmentError, e:
12436 if e.errno != errno.ENOENT:
12440 # Clean dir.old cruft so that they don't prevent
12441 # unmerge of otherwise empty directories.
12442 for ext in dir_extensions:
12444 os.unlink(dir_file + ext + ".old")
12445 except EnvironmentError, e:
12446 if e.errno != errno.ENOENT:
12450 #update mtime so we can potentially avoid regenerating.
12451 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12454 out.eerror("Processed %d info files; %d errors." % \
12455 (icount, badcount))
12456 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12459 out.einfo("Processed %d info files." % (icount,))
12462 def display_news_notification(root_config, myopts):
12463 target_root = root_config.root
12464 trees = root_config.trees
12465 settings = trees["vartree"].settings
12466 portdb = trees["porttree"].dbapi
12467 vardb = trees["vartree"].dbapi
12468 NEWS_PATH = os.path.join("metadata", "news")
12469 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12470 newsReaderDisplay = False
12471 update = "--pretend" not in myopts
12473 for repo in portdb.getRepositories():
12474 unreadItems = checkUpdatedNewsItems(
12475 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12477 if not newsReaderDisplay:
12478 newsReaderDisplay = True
12480 print colorize("WARN", " * IMPORTANT:"),
12481 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12484 if newsReaderDisplay:
12485 print colorize("WARN", " *"),
12486 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12489 def display_preserved_libs(vardbapi):
12492 # Ensure the registry is consistent with existing files.
12493 vardbapi.plib_registry.pruneNonExisting()
12495 if vardbapi.plib_registry.hasEntries():
12497 print colorize("WARN", "!!!") + " existing preserved libs:"
12498 plibdata = vardbapi.plib_registry.getPreservedLibs()
12499 linkmap = vardbapi.linkmap
12502 linkmap_broken = False
12506 except portage.exception.CommandNotFound, e:
12507 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12508 level=logging.ERROR, noiselevel=-1)
12510 linkmap_broken = True
12512 search_for_owners = set()
12513 for cpv in plibdata:
12514 internal_plib_keys = set(linkmap._obj_key(f) \
12515 for f in plibdata[cpv])
12516 for f in plibdata[cpv]:
12517 if f in consumer_map:
12520 for c in linkmap.findConsumers(f):
12521 # Filter out any consumers that are also preserved libs
12522 # belonging to the same package as the provider.
12523 if linkmap._obj_key(c) not in internal_plib_keys:
12524 consumers.append(c)
12526 consumer_map[f] = consumers
12527 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12529 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12531 for cpv in plibdata:
12532 print colorize("WARN", ">>>") + " package: %s" % cpv
12534 for f in plibdata[cpv]:
12535 obj_key = linkmap._obj_key(f)
12536 alt_paths = samefile_map.get(obj_key)
12537 if alt_paths is None:
12539 samefile_map[obj_key] = alt_paths
12542 for alt_paths in samefile_map.itervalues():
12543 alt_paths = sorted(alt_paths)
12544 for p in alt_paths:
12545 print colorize("WARN", " * ") + " - %s" % (p,)
12547 consumers = consumer_map.get(f, [])
12548 for c in consumers[:MAX_DISPLAY]:
12549 print colorize("WARN", " * ") + " used by %s (%s)" % \
12550 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12551 if len(consumers) == MAX_DISPLAY + 1:
12552 print colorize("WARN", " * ") + " used by %s (%s)" % \
12553 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12554 for x in owners.get(consumers[MAX_DISPLAY], [])))
12555 elif len(consumers) > MAX_DISPLAY:
12556 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12557 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12560 def _flush_elog_mod_echo():
12562 Dump the mod_echo output now so that our other
12563 notifications are shown last.
12565 @returns: True if messages were shown, False otherwise.
12567 messages_shown = False
12569 from portage.elog import mod_echo
12570 except ImportError:
12571 pass # happens during downgrade to a version without the module
12573 messages_shown = bool(mod_echo._items)
12574 mod_echo.finalize()
12575 return messages_shown
12577 def post_emerge(root_config, myopts, mtimedb, retval):
12579 Misc. things to run at the end of a merge session.
12582 Update Config Files
12585 Display preserved libs warnings
12588 @param trees: A dictionary mapping each ROOT to it's package databases
12590 @param mtimedb: The mtimeDB to store data needed across merge invocations
12591 @type mtimedb: MtimeDB class instance
12592 @param retval: Emerge's return value
12596 1. Calls sys.exit(retval)
12599 target_root = root_config.root
12600 trees = { target_root : root_config.trees }
12601 vardbapi = trees[target_root]["vartree"].dbapi
12602 settings = vardbapi.settings
12603 info_mtimes = mtimedb["info"]
12605 # Load the most current variables from ${ROOT}/etc/profile.env
12608 settings.regenerate()
12611 config_protect = settings.get("CONFIG_PROTECT","").split()
12612 infodirs = settings.get("INFOPATH","").split(":") + \
12613 settings.get("INFODIR","").split(":")
12617 if retval == os.EX_OK:
12618 exit_msg = " *** exiting successfully."
12620 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12621 emergelog("notitles" not in settings.features, exit_msg)
12623 _flush_elog_mod_echo()
12625 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12626 if "--pretend" in myopts or (counter_hash is not None and \
12627 counter_hash == vardbapi._counter_hash()):
12628 display_news_notification(root_config, myopts)
12629 # If vdb state has not changed then there's nothing else to do.
12632 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12633 portage.util.ensure_dirs(vdb_path)
12635 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12636 vdb_lock = portage.locks.lockdir(vdb_path)
12640 if "noinfo" not in settings.features:
12641 chk_updated_info_files(target_root,
12642 infodirs, info_mtimes, retval)
12646 portage.locks.unlockdir(vdb_lock)
12648 chk_updated_cfg_files(target_root, config_protect)
12650 display_news_notification(root_config, myopts)
12651 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12652 display_preserved_libs(vardbapi)
12657 def chk_updated_cfg_files(target_root, config_protect):
12659 #number of directories with some protect files in them
12661 for x in config_protect:
12662 x = os.path.join(target_root, x.lstrip(os.path.sep))
12663 if not os.access(x, os.W_OK):
12664 # Avoid Permission denied errors generated
12668 mymode = os.lstat(x).st_mode
12671 if stat.S_ISLNK(mymode):
12672 # We want to treat it like a directory if it
12673 # is a symlink to an existing directory.
12675 real_mode = os.stat(x).st_mode
12676 if stat.S_ISDIR(real_mode):
12680 if stat.S_ISDIR(mymode):
12681 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12683 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12684 os.path.split(x.rstrip(os.path.sep))
12685 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12686 a = commands.getstatusoutput(mycommand)
12688 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12690 # Show the error message alone, sending stdout to /dev/null.
12691 os.system(mycommand + " 1>/dev/null")
12693 files = a[1].split('\0')
12694 # split always produces an empty string as the last element
12695 if files and not files[-1]:
12699 print "\n"+colorize("WARN", " * IMPORTANT:"),
12700 if stat.S_ISDIR(mymode):
12701 print "%d config files in '%s' need updating." % \
12704 print "config file '%s' needs updating." % x
12707 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12708 " section of the " + bold("emerge")
12709 print " "+yellow("*")+" man page to learn how to update config files."
12711 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12714 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12715 Returns the number of unread (yet relevent) items.
12717 @param portdb: a portage tree database
12718 @type portdb: pordbapi
12719 @param vardb: an installed package database
12720 @type vardb: vardbapi
12723 @param UNREAD_PATH:
12729 1. The number of unread but relevant news items.
12732 from portage.news import NewsManager
12733 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12734 return manager.getUnreadItems( repo_id, update=update )
12736 def insert_category_into_atom(atom, category):
12737 alphanum = re.search(r'\w', atom)
12739 ret = atom[:alphanum.start()] + "%s/" % category + \
12740 atom[alphanum.start():]
12745 def is_valid_package_atom(x):
12747 alphanum = re.search(r'\w', x)
12749 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12750 return portage.isvalidatom(x)
12752 def show_blocker_docs_link():
12754 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12755 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12757 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12760 def show_mask_docs():
12761 print "For more information, see the MASKED PACKAGES section in the emerge"
12762 print "man page or refer to the Gentoo Handbook."
12764 def action_sync(settings, trees, mtimedb, myopts, myaction):
12765 xterm_titles = "notitles" not in settings.features
12766 emergelog(xterm_titles, " === sync")
12767 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12768 myportdir = portdb.porttree_root
12769 out = portage.output.EOutput()
12771 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12773 if myportdir[-1]=="/":
12774 myportdir=myportdir[:-1]
12776 st = os.stat(myportdir)
12780 print ">>>",myportdir,"not found, creating it."
12781 os.makedirs(myportdir,0755)
12782 st = os.stat(myportdir)
12785 spawn_kwargs["env"] = settings.environ()
12786 if 'usersync' in settings.features and \
12787 portage.data.secpass >= 2 and \
12788 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12789 st.st_gid != os.getgid() and st.st_mode & 0070):
12791 homedir = pwd.getpwuid(st.st_uid).pw_dir
12795 # Drop privileges when syncing, in order to match
12796 # existing uid/gid settings.
12797 spawn_kwargs["uid"] = st.st_uid
12798 spawn_kwargs["gid"] = st.st_gid
12799 spawn_kwargs["groups"] = [st.st_gid]
12800 spawn_kwargs["env"]["HOME"] = homedir
12802 if not st.st_mode & 0020:
12803 umask = umask | 0020
12804 spawn_kwargs["umask"] = umask
12806 syncuri = settings.get("SYNC", "").strip()
12808 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12809 noiselevel=-1, level=logging.ERROR)
12812 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12813 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12816 dosyncuri = syncuri
12817 updatecache_flg = False
12818 if myaction == "metadata":
12819 print "skipping sync"
12820 updatecache_flg = True
12821 elif ".git" in vcs_dirs:
12822 # Update existing git repository, and ignore the syncuri. We are
12823 # going to trust the user and assume that the user is in the branch
12824 # that he/she wants updated. We'll let the user manage branches with
12826 if portage.process.find_binary("git") is None:
12827 msg = ["Command not found: git",
12828 "Type \"emerge dev-util/git\" to enable git support."]
12830 writemsg_level("!!! %s\n" % l,
12831 level=logging.ERROR, noiselevel=-1)
12833 msg = ">>> Starting git pull in %s..." % myportdir
12834 emergelog(xterm_titles, msg )
12835 writemsg_level(msg + "\n")
12836 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12837 (portage._shell_quote(myportdir),), **spawn_kwargs)
12838 if exitcode != os.EX_OK:
12839 msg = "!!! git pull error in %s." % myportdir
12840 emergelog(xterm_titles, msg)
12841 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12843 msg = ">>> Git pull in %s successful" % myportdir
12844 emergelog(xterm_titles, msg)
12845 writemsg_level(msg + "\n")
12846 exitcode = git_sync_timestamps(settings, myportdir)
12847 if exitcode == os.EX_OK:
12848 updatecache_flg = True
12849 elif syncuri[:8]=="rsync://":
12850 for vcs_dir in vcs_dirs:
12851 writemsg_level(("!!! %s appears to be under revision " + \
12852 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12853 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12855 if not os.path.exists("/usr/bin/rsync"):
12856 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12857 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12862 if settings["PORTAGE_RSYNC_OPTS"] == "":
12863 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12864 rsync_opts.extend([
12865 "--recursive", # Recurse directories
12866 "--links", # Consider symlinks
12867 "--safe-links", # Ignore links outside of tree
12868 "--perms", # Preserve permissions
12869 "--times", # Preserive mod times
12870 "--compress", # Compress the data transmitted
12871 "--force", # Force deletion on non-empty dirs
12872 "--whole-file", # Don't do block transfers, only entire files
12873 "--delete", # Delete files that aren't in the master tree
12874 "--stats", # Show final statistics about what was transfered
12875 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12876 "--exclude=/distfiles", # Exclude distfiles from consideration
12877 "--exclude=/local", # Exclude local from consideration
12878 "--exclude=/packages", # Exclude packages from consideration
12882 # The below validation is not needed when using the above hardcoded
12885 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12887 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12888 for opt in ("--recursive", "--times"):
12889 if opt not in rsync_opts:
12890 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12891 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12892 rsync_opts.append(opt)
12894 for exclude in ("distfiles", "local", "packages"):
12895 opt = "--exclude=/%s" % exclude
12896 if opt not in rsync_opts:
12897 portage.writemsg(yellow("WARNING:") + \
12898 " adding required option %s not included in " % opt + \
12899 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12900 rsync_opts.append(opt)
12902 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12903 def rsync_opt_startswith(opt_prefix):
12904 for x in rsync_opts:
12905 if x.startswith(opt_prefix):
12909 if not rsync_opt_startswith("--timeout="):
12910 rsync_opts.append("--timeout=%d" % mytimeout)
12912 for opt in ("--compress", "--whole-file"):
12913 if opt not in rsync_opts:
12914 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12915 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12916 rsync_opts.append(opt)
12918 if "--quiet" in myopts:
12919 rsync_opts.append("--quiet") # Shut up a lot
12921 rsync_opts.append("--verbose") # Print filelist
12923 if "--verbose" in myopts:
12924 rsync_opts.append("--progress") # Progress meter for each file
12926 if "--debug" in myopts:
12927 rsync_opts.append("--checksum") # Force checksum on all files
12929 # Real local timestamp file.
12930 servertimestampfile = os.path.join(
12931 myportdir, "metadata", "timestamp.chk")
12933 content = portage.util.grabfile(servertimestampfile)
12937 mytimestamp = time.mktime(time.strptime(content[0],
12938 "%a, %d %b %Y %H:%M:%S +0000"))
12939 except (OverflowError, ValueError):
12944 rsync_initial_timeout = \
12945 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12947 rsync_initial_timeout = 15
12950 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12951 except SystemExit, e:
12952 raise # Needed else can't exit
12954 maxretries=3 #default number of retries
12957 user_name, hostname, port = re.split(
12958 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12961 if user_name is None:
12963 updatecache_flg=True
12964 all_rsync_opts = set(rsync_opts)
12965 extra_rsync_opts = shlex.split(
12966 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12967 all_rsync_opts.update(extra_rsync_opts)
12968 family = socket.AF_INET
12969 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12970 family = socket.AF_INET
12971 elif socket.has_ipv6 and \
12972 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12973 family = socket.AF_INET6
12975 SERVER_OUT_OF_DATE = -1
12976 EXCEEDED_MAX_RETRIES = -2
12982 for addrinfo in socket.getaddrinfo(
12983 hostname, None, family, socket.SOCK_STREAM):
12984 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12985 # IPv6 addresses need to be enclosed in square brackets
12986 ips.append("[%s]" % addrinfo[4][0])
12988 ips.append(addrinfo[4][0])
12989 from random import shuffle
12991 except SystemExit, e:
12992 raise # Needed else can't exit
12993 except Exception, e:
12994 print "Notice:",str(e)
12999 dosyncuri = syncuri.replace(
13000 "//" + user_name + hostname + port + "/",
13001 "//" + user_name + ips[0] + port + "/", 1)
13002 except SystemExit, e:
13003 raise # Needed else can't exit
13004 except Exception, e:
13005 print "Notice:",str(e)
13009 if "--ask" in myopts:
13010 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
13015 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
13016 if "--quiet" not in myopts:
13017 print ">>> Starting rsync with "+dosyncuri+"..."
13019 emergelog(xterm_titles,
13020 ">>> Starting retry %d of %d with %s" % \
13021 (retries,maxretries,dosyncuri))
13022 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
13024 if mytimestamp != 0 and "--quiet" not in myopts:
13025 print ">>> Checking server timestamp ..."
13027 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
13029 if "--debug" in myopts:
13032 exitcode = os.EX_OK
13033 servertimestamp = 0
13034 # Even if there's no timestamp available locally, fetch the
13035 # timestamp anyway as an initial probe to verify that the server is
13036 # responsive. This protects us from hanging indefinitely on a
13037 # connection attempt to an unresponsive server which rsync's
13038 # --timeout option does not prevent.
13040 # Temporary file for remote server timestamp comparison.
13041 from tempfile import mkstemp
13042 fd, tmpservertimestampfile = mkstemp()
13044 mycommand = rsynccommand[:]
13045 mycommand.append(dosyncuri.rstrip("/") + \
13046 "/metadata/timestamp.chk")
13047 mycommand.append(tmpservertimestampfile)
13051 def timeout_handler(signum, frame):
13052 raise portage.exception.PortageException("timed out")
13053 signal.signal(signal.SIGALRM, timeout_handler)
13054 # Timeout here in case the server is unresponsive. The
13055 # --timeout rsync option doesn't apply to the initial
13056 # connection attempt.
13057 if rsync_initial_timeout:
13058 signal.alarm(rsync_initial_timeout)
13060 mypids.extend(portage.process.spawn(
13061 mycommand, env=settings.environ(), returnpid=True))
13062 exitcode = os.waitpid(mypids[0], 0)[1]
13063 content = portage.grabfile(tmpservertimestampfile)
13065 if rsync_initial_timeout:
13068 os.unlink(tmpservertimestampfile)
13071 except portage.exception.PortageException, e:
13075 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13076 os.kill(mypids[0], signal.SIGTERM)
13077 os.waitpid(mypids[0], 0)
13078 # This is the same code rsync uses for timeout.
13081 if exitcode != os.EX_OK:
13082 if exitcode & 0xff:
13083 exitcode = (exitcode & 0xff) << 8
13085 exitcode = exitcode >> 8
13087 portage.process.spawned_pids.remove(mypids[0])
13090 servertimestamp = time.mktime(time.strptime(
13091 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13092 except (OverflowError, ValueError):
13094 del mycommand, mypids, content
13095 if exitcode == os.EX_OK:
13096 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13097 emergelog(xterm_titles,
13098 ">>> Cancelling sync -- Already current.")
13101 print ">>> Timestamps on the server and in the local repository are the same."
13102 print ">>> Cancelling all further sync action. You are already up to date."
13104 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13108 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13109 emergelog(xterm_titles,
13110 ">>> Server out of date: %s" % dosyncuri)
13113 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13115 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13118 exitcode = SERVER_OUT_OF_DATE
13119 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13121 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13122 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13123 if exitcode in [0,1,3,4,11,14,20,21]:
13125 elif exitcode in [1,3,4,11,14,20,21]:
13128 # Code 2 indicates protocol incompatibility, which is expected
13129 # for servers with protocol < 29 that don't support
13130 # --prune-empty-directories. Retry for a server that supports
13131 # at least rsync protocol version 29 (>=rsync-2.6.4).
13136 if retries<=maxretries:
13137 print ">>> Retrying..."
13142 updatecache_flg=False
13143 exitcode = EXCEEDED_MAX_RETRIES
13147 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13148 elif exitcode == SERVER_OUT_OF_DATE:
13150 elif exitcode == EXCEEDED_MAX_RETRIES:
13152 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13157 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13158 msg.append("that your SYNC statement is proper.")
13159 msg.append("SYNC=" + settings["SYNC"])
13161 msg.append("Rsync has reported that there is a File IO error. Normally")
13162 msg.append("this means your disk is full, but can be caused by corruption")
13163 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13164 msg.append("and try again after the problem has been fixed.")
13165 msg.append("PORTDIR=" + settings["PORTDIR"])
13167 msg.append("Rsync was killed before it finished.")
13169 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13170 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13171 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13172 msg.append("temporary problem unless complications exist with your network")
13173 msg.append("(and possibly your system's filesystem) configuration.")
13177 elif syncuri[:6]=="cvs://":
13178 if not os.path.exists("/usr/bin/cvs"):
13179 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13180 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13182 cvsroot=syncuri[6:]
13183 cvsdir=os.path.dirname(myportdir)
13184 if not os.path.exists(myportdir+"/CVS"):
13186 print ">>> Starting initial cvs checkout with "+syncuri+"..."
13187 if os.path.exists(cvsdir+"/gentoo-x86"):
13188 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13191 os.rmdir(myportdir)
13193 if e.errno != errno.ENOENT:
13195 "!!! existing '%s' directory; exiting.\n" % myportdir)
13198 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13199 print "!!! cvs checkout error; exiting."
13201 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13204 print ">>> Starting cvs update with "+syncuri+"..."
13205 retval = portage.process.spawn_bash(
13206 "cd %s; cvs -z0 -q update -dP" % \
13207 (portage._shell_quote(myportdir),), **spawn_kwargs)
13208 if retval != os.EX_OK:
13210 dosyncuri = syncuri
13212 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13213 noiselevel=-1, level=logging.ERROR)
13216 if updatecache_flg and \
13217 myaction != "metadata" and \
13218 "metadata-transfer" not in settings.features:
13219 updatecache_flg = False
13221 # Reload the whole config from scratch.
13222 settings, trees, mtimedb = load_emerge_config(trees=trees)
13223 root_config = trees[settings["ROOT"]]["root_config"]
13224 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13226 if updatecache_flg and \
13227 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
13229 # Only update cache for myportdir since that's
13230 # the only one that's been synced here.
13231 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
13233 if portage._global_updates(trees, mtimedb["updates"]):
13235 # Reload the whole config from scratch.
13236 settings, trees, mtimedb = load_emerge_config(trees=trees)
13237 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13238 root_config = trees[settings["ROOT"]]["root_config"]
13240 mybestpv = portdb.xmatch("bestmatch-visible",
13241 portage.const.PORTAGE_PACKAGE_ATOM)
13242 mypvs = portage.best(
13243 trees[settings["ROOT"]]["vartree"].dbapi.match(
13244 portage.const.PORTAGE_PACKAGE_ATOM))
13246 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13248 if myaction != "metadata":
13249 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13250 retval = portage.process.spawn(
13251 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13252 dosyncuri], env=settings.environ())
13253 if retval != os.EX_OK:
13254 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13256 if(mybestpv != mypvs) and not "--quiet" in myopts:
13258 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13259 print red(" * ")+"that you update portage now, before any other packages are updated."
13261 print red(" * ")+"To update portage, run 'emerge portage' now."
13264 display_news_notification(root_config, myopts)
13267 def git_sync_timestamps(settings, portdir):
13269 Since git doesn't preserve timestamps, synchronize timestamps between
13270 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13271 for a given file as long as the file in the working tree is not modified
13272 (relative to HEAD).
13274 cache_dir = os.path.join(portdir, "metadata", "cache")
13275 if not os.path.isdir(cache_dir):
13277 writemsg_level(">>> Synchronizing timestamps...\n")
13279 from portage.cache.cache_errors import CacheError
13281 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13282 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13283 except CacheError, e:
13284 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13285 level=logging.ERROR, noiselevel=-1)
13288 ec_dir = os.path.join(portdir, "eclass")
13290 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13291 if f.endswith(".eclass"))
13293 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13294 level=logging.ERROR, noiselevel=-1)
13297 args = [portage.const.BASH_BINARY, "-c",
13298 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13299 portage._shell_quote(portdir)]
13301 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13302 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13304 if rval != os.EX_OK:
13307 modified_eclasses = set(ec for ec in ec_names \
13308 if os.path.join("eclass", ec + ".eclass") in modified_files)
13310 updated_ec_mtimes = {}
13312 for cpv in cache_db:
13313 cpv_split = portage.catpkgsplit(cpv)
13314 if cpv_split is None:
13315 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13316 level=logging.ERROR, noiselevel=-1)
13319 cat, pn, ver, rev = cpv_split
13320 cat, pf = portage.catsplit(cpv)
13321 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13322 if relative_eb_path in modified_files:
13326 cache_entry = cache_db[cpv]
13327 eb_mtime = cache_entry.get("_mtime_")
13328 ec_mtimes = cache_entry.get("_eclasses_")
13330 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13331 level=logging.ERROR, noiselevel=-1)
13333 except CacheError, e:
13334 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13335 (cpv, e), level=logging.ERROR, noiselevel=-1)
13338 if eb_mtime is None:
13339 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13340 level=logging.ERROR, noiselevel=-1)
13344 eb_mtime = long(eb_mtime)
13346 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13347 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13350 if ec_mtimes is None:
13351 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13352 level=logging.ERROR, noiselevel=-1)
13355 if modified_eclasses.intersection(ec_mtimes):
13358 missing_eclasses = set(ec_mtimes).difference(ec_names)
13359 if missing_eclasses:
13360 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13361 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13365 eb_path = os.path.join(portdir, relative_eb_path)
13367 current_eb_mtime = os.stat(eb_path)
13369 writemsg_level("!!! Missing ebuild: %s\n" % \
13370 (cpv,), level=logging.ERROR, noiselevel=-1)
13373 inconsistent = False
13374 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13375 updated_mtime = updated_ec_mtimes.get(ec)
13376 if updated_mtime is not None and updated_mtime != ec_mtime:
13377 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13378 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13379 inconsistent = True
13385 if current_eb_mtime != eb_mtime:
13386 os.utime(eb_path, (eb_mtime, eb_mtime))
13388 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13389 if ec in updated_ec_mtimes:
13391 ec_path = os.path.join(ec_dir, ec + ".eclass")
13392 current_mtime = long(os.stat(ec_path).st_mtime)
13393 if current_mtime != ec_mtime:
13394 os.utime(ec_path, (ec_mtime, ec_mtime))
13395 updated_ec_mtimes[ec] = ec_mtime
13399 def action_metadata(settings, portdb, myopts, porttrees=None):
13400 if porttrees is None:
13401 porttrees = portdb.porttrees
13402 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
13403 old_umask = os.umask(0002)
13404 cachedir = os.path.normpath(settings.depcachedir)
13405 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13406 "/lib", "/opt", "/proc", "/root", "/sbin",
13407 "/sys", "/tmp", "/usr", "/var"]:
13408 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13409 "ROOT DIRECTORY ON YOUR SYSTEM."
13410 print >> sys.stderr, \
13411 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13413 if not os.path.exists(cachedir):
13414 os.makedirs(cachedir)
13416 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
13417 auxdbkeys = tuple(auxdbkeys)
13419 class TreeData(object):
13420 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
13421 def __init__(self, dest_db, eclass_db, path, src_db):
13422 self.dest_db = dest_db
13423 self.eclass_db = eclass_db
13425 self.src_db = src_db
13426 self.valid_nodes = set()
13428 porttrees_data = []
13429 for path in porttrees:
13430 src_db = portdb._pregen_auxdb.get(path)
13431 if src_db is None and \
13432 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
13433 src_db = portdb.metadbmodule(
13434 path, 'metadata/cache', auxdbkeys, readonly=True)
13436 src_db.ec = portdb._repo_info[path].eclass_db
13437 except AttributeError:
13440 if src_db is not None:
13441 porttrees_data.append(TreeData(portdb.auxdb[path],
13442 portdb._repo_info[path].eclass_db, path, src_db))
13444 porttrees = [tree_data.path for tree_data in porttrees_data]
13446 isatty = sys.stdout.isatty()
13447 quiet = not isatty or '--quiet' in myopts
13450 progressBar = portage.output.TermProgressBar()
13451 progressHandler = ProgressHandler()
13452 onProgress = progressHandler.onProgress
13454 progressBar.set(progressHandler.curval, progressHandler.maxval)
13455 progressHandler.display = display
13456 def sigwinch_handler(signum, frame):
13457 lines, progressBar.term_columns = \
13458 portage.output.get_term_size()
13459 signal.signal(signal.SIGWINCH, sigwinch_handler)
13461 # Temporarily override portdb.porttrees so portdb.cp_all()
13462 # will only return the relevant subset.
13463 portdb_porttrees = portdb.porttrees
13464 portdb.porttrees = porttrees
13466 cp_all = portdb.cp_all()
13468 portdb.porttrees = portdb_porttrees
13471 maxval = len(cp_all)
13472 if onProgress is not None:
13473 onProgress(maxval, curval)
13475 from portage.cache.util import quiet_mirroring
13476 from portage import eapi_is_supported, \
13477 _validate_cache_for_unsupported_eapis
13479 # TODO: Display error messages, but do not interfere with the progress bar.
13481 # 1) erase the progress bar
13482 # 2) show the error message
13483 # 3) redraw the progress bar on a new line
13484 noise = quiet_mirroring()
13487 for tree_data in porttrees_data:
13488 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
13489 tree_data.valid_nodes.add(cpv)
13491 src = tree_data.src_db[cpv]
13492 except KeyError, e:
13493 noise.missing_entry(cpv)
13496 except CacheError, ce:
13497 noise.exception(cpv, ce)
13501 eapi = src.get('EAPI')
13504 eapi = eapi.lstrip('-')
13505 eapi_supported = eapi_is_supported(eapi)
13506 if not eapi_supported:
13507 if not _validate_cache_for_unsupported_eapis:
13508 noise.misc(cpv, "unable to validate " + \
13509 "cache for EAPI='%s'" % eapi)
13514 dest = tree_data.dest_db[cpv]
13515 except (KeyError, CacheError):
13518 for d in (src, dest):
13519 if d is not None and d.get('EAPI') in ('', '0'):
13522 if dest is not None:
13523 if not (dest['_mtime_'] == src['_mtime_'] and \
13524 tree_data.eclass_db.is_eclass_data_valid(
13525 dest['_eclasses_']) and \
13526 set(dest['_eclasses_']) == set(src['_eclasses_'])):
13529 # We don't want to skip the write unless we're really
13530 # sure that the existing cache is identical, so don't
13531 # trust _mtime_ and _eclasses_ alone.
13532 for k in set(chain(src, dest)).difference(
13533 ('_mtime_', '_eclasses_')):
13534 if dest.get(k, '') != src.get(k, ''):
13538 if dest is not None:
13539 # The existing data is valid and identical,
13540 # so there's no need to overwrite it.
13544 inherited = src.get('INHERITED', '')
13545 eclasses = src.get('_eclasses_')
13546 except CacheError, ce:
13547 noise.exception(cpv, ce)
13551 if eclasses is not None:
13552 if not tree_data.eclass_db.is_eclass_data_valid(
13553 src['_eclasses_']):
13554 noise.eclass_stale(cpv)
13556 inherited = eclasses
13558 inherited = inherited.split()
13560 if tree_data.src_db.complete_eclass_entries and \
13562 noise.corruption(cpv, "missing _eclasses_ field")
13566 # Even if _eclasses_ already exists, replace it with data from
13567 # eclass_cache, in order to insert local eclass paths.
13569 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
13571 # INHERITED contains a non-existent eclass.
13572 noise.eclass_stale(cpv)
13575 if eclasses is None:
13576 noise.eclass_stale(cpv)
13578 src['_eclasses_'] = eclasses
13580 src['_eclasses_'] = {}
13582 if not eapi_supported:
13584 'EAPI' : '-' + eapi,
13585 '_mtime_' : src['_mtime_'],
13586 '_eclasses_' : src['_eclasses_'],
13590 tree_data.dest_db[cpv] = src
13591 except CacheError, ce:
13592 noise.exception(cpv, ce)
13596 if onProgress is not None:
13597 onProgress(maxval, curval)
13599 if onProgress is not None:
13600 onProgress(maxval, curval)
13602 for tree_data in porttrees_data:
13604 dead_nodes = set(tree_data.dest_db.iterkeys())
13605 except CacheError, e:
13606 writemsg_level("Error listing cache entries for " + \
13607 "'%s': %s, continuing...\n" % (tree_data.path, e),
13608 level=logging.ERROR, noiselevel=-1)
13611 dead_nodes.difference_update(tree_data.valid_nodes)
13612 for cpv in dead_nodes:
13614 tree_data.dest_db[cpv]
13615 except (KeyError, CacheError):
13619 # make sure the final progress is displayed
13620 progressHandler.display()
13622 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
13625 os.umask(old_umask)
13627 def action_regen(settings, portdb, max_jobs, max_load):
13628 xterm_titles = "notitles" not in settings.features
13629 emergelog(xterm_titles, " === regen")
13630 #regenerate cache entries
13631 portage.writemsg_stdout("Regenerating cache entries...\n")
13633 os.close(sys.stdin.fileno())
13634 except SystemExit, e:
13635 raise # Needed else can't exit
13640 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13643 portage.writemsg_stdout("done!\n")
13644 return regen.returncode
13646 def action_config(settings, trees, myopts, myfiles):
13647 if len(myfiles) != 1:
13648 print red("!!! config can only take a single package atom at this time\n")
13650 if not is_valid_package_atom(myfiles[0]):
13651 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13653 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13654 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13658 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13659 except portage.exception.AmbiguousPackageName, e:
13660 # Multiple matches thrown from cpv_expand
13663 print "No packages found.\n"
13665 elif len(pkgs) > 1:
13666 if "--ask" in myopts:
13668 print "Please select a package to configure:"
13672 options.append(str(idx))
13673 print options[-1]+") "+pkg
13675 options.append("X")
13676 idx = userquery("Selection?", options)
13679 pkg = pkgs[int(idx)-1]
13681 print "The following packages available:"
13684 print "\nPlease use a specific atom or the --ask option."
13690 if "--ask" in myopts:
13691 if userquery("Ready to configure "+pkg+"?") == "No":
13694 print "Configuring pkg..."
13696 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13697 mysettings = portage.config(clone=settings)
13698 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13699 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13700 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13702 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13703 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13704 if retval == os.EX_OK:
13705 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13706 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13709 def action_info(settings, trees, myopts, myfiles):
13710 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13711 settings.profile_path, settings["CHOST"],
13712 trees[settings["ROOT"]]["vartree"].dbapi)
13714 header_title = "System Settings"
13716 print header_width * "="
13717 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13718 print header_width * "="
13719 print "System uname: "+platform.platform(aliased=1)
13721 lastSync = portage.grabfile(os.path.join(
13722 settings["PORTDIR"], "metadata", "timestamp.chk"))
13723 print "Timestamp of tree:",
13729 output=commands.getstatusoutput("distcc --version")
13731 print str(output[1].split("\n",1)[0]),
13732 if "distcc" in settings.features:
13737 output=commands.getstatusoutput("ccache -V")
13739 print str(output[1].split("\n",1)[0]),
13740 if "ccache" in settings.features:
13745 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13746 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13747 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13748 myvars = portage.util.unique_array(myvars)
13752 if portage.isvalidatom(x):
13753 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13754 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13755 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13757 for pn, ver, rev in pkg_matches:
13759 pkgs.append(ver + "-" + rev)
13763 pkgs = ", ".join(pkgs)
13764 print "%-20s %s" % (x+":", pkgs)
13766 print "%-20s %s" % (x+":", "[NOT VALID]")
13768 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13770 if "--verbose" in myopts:
13771 myvars=settings.keys()
13773 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13774 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13775 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13776 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13778 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13780 myvars = portage.util.unique_array(myvars)
13781 use_expand = settings.get('USE_EXPAND', '').split()
13783 use_expand_hidden = set(
13784 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13785 alphabetical_use = '--alphabetical' in myopts
13786 root_config = trees[settings["ROOT"]]['root_config']
13792 print '%s="%s"' % (x, settings[x])
13794 use = set(settings["USE"].split())
13795 for varname in use_expand:
13796 flag_prefix = varname.lower() + "_"
13797 for f in list(use):
13798 if f.startswith(flag_prefix):
13802 print 'USE="%s"' % " ".join(use),
13803 for varname in use_expand:
13804 myval = settings.get(varname)
13806 print '%s="%s"' % (varname, myval),
13809 unset_vars.append(x)
13811 print "Unset: "+", ".join(unset_vars)
13814 if "--debug" in myopts:
13815 for x in dir(portage):
13816 module = getattr(portage, x)
13817 if "cvs_id_string" in dir(module):
13818 print "%s: %s" % (str(x), str(module.cvs_id_string))
13820 # See if we can find any packages installed matching the strings
13821 # passed on the command line
13823 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13824 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13826 mypkgs.extend(vardb.match(x))
13828 # If some packages were found...
13830 # Get our global settings (we only print stuff if it varies from
13831 # the current config)
13832 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13833 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13834 auxkeys.append('DEFINED_PHASES')
13836 pkgsettings = portage.config(clone=settings)
13838 for myvar in mydesiredvars:
13839 global_vals[myvar] = set(settings.get(myvar, "").split())
13841 # Loop through each package
13842 # Only print settings if they differ from global settings
13843 header_title = "Package Settings"
13844 print header_width * "="
13845 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13846 print header_width * "="
13847 from portage.output import EOutput
13850 # Get all package specific variables
13851 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13852 pkg = Package(built=True, cpv=cpv,
13853 installed=True, metadata=izip(Package.metadata_keys,
13854 (metadata.get(x, '') for x in Package.metadata_keys)),
13855 root_config=root_config, type_name='installed')
13858 valuesmap[k] = set(metadata[k].split())
13861 for myvar in mydesiredvars:
13862 # If the package variable doesn't match the
13863 # current global variable, something has changed
13864 # so set diff_found so we know to print
13865 if valuesmap[myvar] != global_vals[myvar]:
13866 diff_values[myvar] = valuesmap[myvar]
13868 print "\n%s was built with the following:" % \
13869 colorize("INFORM", str(pkg.cpv))
13871 pkgsettings.setcpv(pkg)
13872 forced_flags = set(chain(pkgsettings.useforce,
13873 pkgsettings.usemask))
13874 use = set(pkg.use.enabled)
13875 use.discard(pkgsettings.get('ARCH'))
13876 use_expand_flags = set()
13879 for varname in use_expand:
13880 flag_prefix = varname.lower() + "_"
13882 if f.startswith(flag_prefix):
13883 use_expand_flags.add(f)
13884 use_enabled.setdefault(
13885 varname.upper(), []).append(f[len(flag_prefix):])
13887 for f in pkg.iuse.all:
13888 if f.startswith(flag_prefix):
13889 use_expand_flags.add(f)
13891 use_disabled.setdefault(
13892 varname.upper(), []).append(f[len(flag_prefix):])
13894 var_order = set(use_enabled)
13895 var_order.update(use_disabled)
13896 var_order = sorted(var_order)
13897 var_order.insert(0, 'USE')
13898 use.difference_update(use_expand_flags)
13899 use_enabled['USE'] = list(use)
13900 use_disabled['USE'] = []
13902 for f in pkg.iuse.all:
13903 if f not in use and \
13904 f not in use_expand_flags:
13905 use_disabled['USE'].append(f)
13907 for varname in var_order:
13908 if varname in use_expand_hidden:
13911 for f in use_enabled.get(varname, []):
13912 flags.append(UseFlagDisplay(f, True, f in forced_flags))
13913 for f in use_disabled.get(varname, []):
13914 flags.append(UseFlagDisplay(f, False, f in forced_flags))
13915 if alphabetical_use:
13916 flags.sort(key=UseFlagDisplay.sort_combined)
13918 flags.sort(key=UseFlagDisplay.sort_separated)
13919 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13922 # If a difference was found, print the info for
13925 # Print package info
13926 for myvar in mydesiredvars:
13927 if myvar in diff_values:
13928 mylist = list(diff_values[myvar])
13930 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13933 if metadata['DEFINED_PHASES']:
13934 if 'info' not in metadata['DEFINED_PHASES'].split():
13937 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13938 ebuildpath = vardb.findname(pkg.cpv)
13939 if not ebuildpath or not os.path.exists(ebuildpath):
13940 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13942 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13943 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13944 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13947 def action_search(root_config, myopts, myfiles, spinner):
13949 print "emerge: no search terms provided."
13951 searchinstance = search(root_config,
13952 spinner, "--searchdesc" in myopts,
13953 "--quiet" not in myopts, "--usepkg" in myopts,
13954 "--usepkgonly" in myopts)
13955 for mysearch in myfiles:
13957 searchinstance.execute(mysearch)
13958 except re.error, comment:
13959 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13961 searchinstance.output()
13963 def action_uninstall(settings, trees, ldpath_mtimes,
13964 opts, action, files, spinner):
13966 # For backward compat, some actions do not require leading '='.
13967 ignore_missing_eq = action in ('clean', 'unmerge')
13968 root = settings['ROOT']
13969 vardb = trees[root]['vartree'].dbapi
13973 # Ensure atoms are valid before calling unmerge().
13974 # For backward compat, leading '=' is not required.
13976 if is_valid_package_atom(x) or \
13977 (ignore_missing_eq and is_valid_package_atom('=' + x)):
13980 valid_atoms.append(
13981 portage.dep_expand(x, mydb=vardb, settings=settings))
13982 except portage.exception.AmbiguousPackageName, e:
13983 msg = "The short ebuild name \"" + x + \
13984 "\" is ambiguous. Please specify " + \
13985 "one of the following " + \
13986 "fully-qualified ebuild names instead:"
13987 for line in textwrap.wrap(msg, 70):
13988 writemsg_level("!!! %s\n" % (line,),
13989 level=logging.ERROR, noiselevel=-1)
13991 writemsg_level(" %s\n" % colorize("INFORM", i),
13992 level=logging.ERROR, noiselevel=-1)
13993 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13996 elif x.startswith(os.sep):
13997 if not x.startswith(root):
13998 writemsg_level(("!!! '%s' does not start with" + \
13999 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
14001 # Queue these up since it's most efficient to handle
14002 # multiple files in a single iter_owners() call.
14003 lookup_owners.append(x)
14007 msg.append("'%s' is not a valid package atom." % (x,))
14008 msg.append("Please check ebuild(5) for full details.")
14009 writemsg_level("".join("!!! %s\n" % line for line in msg),
14010 level=logging.ERROR, noiselevel=-1)
14014 relative_paths = []
14015 search_for_multiple = False
14016 if len(lookup_owners) > 1:
14017 search_for_multiple = True
14019 for x in lookup_owners:
14020 if not search_for_multiple and os.path.isdir(x):
14021 search_for_multiple = True
14022 relative_paths.append(x[len(root):])
14025 for pkg, relative_path in \
14026 vardb._owners.iter_owners(relative_paths):
14027 owners.add(pkg.mycpv)
14028 if not search_for_multiple:
14033 slot = vardb.aux_get(cpv, ['SLOT'])[0]
14035 # portage now masks packages with missing slot, but it's
14036 # possible that one was installed by an older version
14037 atom = portage.cpv_getkey(cpv)
14039 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
14040 valid_atoms.append(portage.dep.Atom(atom))
14042 writemsg_level(("!!! '%s' is not claimed " + \
14043 "by any package.\n") % lookup_owners[0],
14044 level=logging.WARNING, noiselevel=-1)
14046 if files and not valid_atoms:
14049 if action in ('clean', 'unmerge') or \
14050 (action == 'prune' and "--nodeps" in opts):
14051 # When given a list of atoms, unmerge them in the order given.
14052 ordered = action == 'unmerge'
14053 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
14054 valid_atoms, ldpath_mtimes, ordered=ordered)
14056 elif action == 'deselect':
14057 rval = action_deselect(settings, trees, opts, valid_atoms)
14059 rval = action_depclean(settings, trees, ldpath_mtimes,
14060 opts, action, valid_atoms, spinner)
14064 def action_deselect(settings, trees, opts, atoms):
14065 root_config = trees[settings['ROOT']]['root_config']
14066 world_set = root_config.sets['world']
14067 if not hasattr(world_set, 'update'):
14068 writemsg_level("World set does not appear to be mutable.\n",
14069 level=logging.ERROR, noiselevel=-1)
14072 vardb = root_config.trees['vartree'].dbapi
14073 expanded_atoms = set(atoms)
14074 from portage.dep import Atom
14076 for cpv in vardb.match(atom):
14077 slot, = vardb.aux_get(cpv, ['SLOT'])
14080 expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
14082 pretend = '--pretend' in opts
14084 if not pretend and hasattr(world_set, 'lock'):
14088 discard_atoms = set()
14090 for atom in world_set:
14091 if not isinstance(atom, Atom):
14094 for arg_atom in expanded_atoms:
14095 if arg_atom.intersects(atom) and \
14096 not (arg_atom.slot and not atom.slot):
14097 discard_atoms.add(atom)
14100 for atom in sorted(discard_atoms):
14101 print ">>> Removing %s from \"world\" favorites file..." % \
14102 colorize("INFORM", str(atom))
14104 if '--ask' in opts:
14105 prompt = "Would you like to remove these " + \
14106 "packages from your world favorites?"
14107 if userquery(prompt) == 'No':
14110 remaining = set(world_set)
14111 remaining.difference_update(discard_atoms)
14113 world_set.replace(remaining)
14115 print ">>> No matching atoms found in \"world\" favorites file..."
14121 def action_depclean(settings, trees, ldpath_mtimes,
14122 myopts, action, myfiles, spinner):
14123 # Kill packages that aren't explicitly merged or are required as a
14124 # dependency of another package. World file is explicit.
14126 # Global depclean or prune operations are not very safe when there are
14127 # missing dependencies since it's unknown how badly incomplete
14128 # the dependency graph is, and we might accidentally remove packages
14129 # that should have been pulled into the graph. On the other hand, it's
14130 # relatively safe to ignore missing deps when only asked to remove
14131 # specific packages.
14132 allow_missing_deps = len(myfiles) > 0
14135 msg.append("Always study the list of packages to be cleaned for any obvious\n")
14136 msg.append("mistakes. Packages that are part of the world set will always\n")
14137 msg.append("be kept. They can be manually added to this set with\n")
14138 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
14139 msg.append("package.provided (see portage(5)) will be removed by\n")
14140 msg.append("depclean, even if they are part of the world set.\n")
14142 msg.append("As a safety measure, depclean will not remove any packages\n")
14143 msg.append("unless *all* required dependencies have been resolved. As a\n")
14144 msg.append("consequence, it is often necessary to run %s\n" % \
14145 good("`emerge --update"))
14146 msg.append(good("--newuse --deep @system @world`") + \
14147 " prior to depclean.\n")
14149 if action == "depclean" and "--quiet" not in myopts and not myfiles:
14150 portage.writemsg_stdout("\n")
14152 portage.writemsg_stdout(colorize("WARN", " * ") + x)
14154 xterm_titles = "notitles" not in settings.features
14155 myroot = settings["ROOT"]
14156 root_config = trees[myroot]["root_config"]
14157 getSetAtoms = root_config.setconfig.getSetAtoms
14158 vardb = trees[myroot]["vartree"].dbapi
14159 deselect = myopts.get('--deselect') != 'n'
14161 required_set_names = ("system", "world")
14165 for s in required_set_names:
14166 required_sets[s] = InternalPackageSet(
14167 initial_atoms=getSetAtoms(s))
14170 # When removing packages, use a temporary version of world
14171 # which excludes packages that are intended to be eligible for
14173 world_temp_set = required_sets["world"]
14174 system_set = required_sets["system"]
14176 if not system_set or not world_temp_set:
14179 writemsg_level("!!! You have no system list.\n",
14180 level=logging.ERROR, noiselevel=-1)
14182 if not world_temp_set:
14183 writemsg_level("!!! You have no world file.\n",
14184 level=logging.WARNING, noiselevel=-1)
14186 writemsg_level("!!! Proceeding is likely to " + \
14187 "break your installation.\n",
14188 level=logging.WARNING, noiselevel=-1)
14189 if "--pretend" not in myopts:
14190 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
14192 if action == "depclean":
14193 emergelog(xterm_titles, " >>> depclean")
14196 args_set = InternalPackageSet()
14198 args_set.update(myfiles)
14199 matched_packages = False
14202 matched_packages = True
14204 if not matched_packages:
14205 writemsg_level(">>> No packages selected for removal by %s\n" % \
14209 writemsg_level("\nCalculating dependencies ")
14210 resolver_params = create_depgraph_params(myopts, "remove")
14211 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
14212 vardb = resolver.trees[myroot]["vartree"].dbapi
14214 if action == "depclean":
14219 world_temp_set.clear()
14221 # Pull in everything that's installed but not matched
14222 # by an argument atom since we don't want to clean any
14223 # package if something depends on it.
14228 if args_set.findAtomForPackage(pkg) is None:
14229 world_temp_set.add("=" + pkg.cpv)
14231 except portage.exception.InvalidDependString, e:
14232 show_invalid_depstring_notice(pkg,
14233 pkg.metadata["PROVIDE"], str(e))
14235 world_temp_set.add("=" + pkg.cpv)
14238 elif action == "prune":
14241 world_temp_set.clear()
14243 # Pull in everything that's installed since we don't
14244 # to prune a package if something depends on it.
14245 world_temp_set.update(vardb.cp_all())
14249 # Try to prune everything that's slotted.
14250 for cp in vardb.cp_all():
14251 if len(vardb.cp_list(cp)) > 1:
14254 # Remove atoms from world that match installed packages
14255 # that are also matched by argument atoms, but do not remove
14256 # them if they match the highest installed version.
14259 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
14260 if not pkgs_for_cp or pkg not in pkgs_for_cp:
14261 raise AssertionError("package expected in matches: " + \
14262 "cp = %s, cpv = %s matches = %s" % \
14263 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14265 highest_version = pkgs_for_cp[-1]
14266 if pkg == highest_version:
14267 # pkg is the highest version
14268 world_temp_set.add("=" + pkg.cpv)
14271 if len(pkgs_for_cp) <= 1:
14272 raise AssertionError("more packages expected: " + \
14273 "cp = %s, cpv = %s matches = %s" % \
14274 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14277 if args_set.findAtomForPackage(pkg) is None:
14278 world_temp_set.add("=" + pkg.cpv)
14280 except portage.exception.InvalidDependString, e:
14281 show_invalid_depstring_notice(pkg,
14282 pkg.metadata["PROVIDE"], str(e))
14284 world_temp_set.add("=" + pkg.cpv)
14288 for s, package_set in required_sets.iteritems():
14289 set_atom = SETPREFIX + s
14290 set_arg = SetArg(arg=set_atom, set=package_set,
14291 root_config=resolver.roots[myroot])
14292 set_args[s] = set_arg
14293 for atom in set_arg.set:
14294 resolver._dep_stack.append(
14295 Dependency(atom=atom, root=myroot, parent=set_arg))
14296 resolver.digraph.add(set_arg, None)
14298 success = resolver._complete_graph()
14299 writemsg_level("\b\b... done!\n")
14301 resolver.display_problems()
14306 def unresolved_deps():
14308 unresolvable = set()
14309 for dep in resolver._initially_unsatisfied_deps:
14310 if isinstance(dep.parent, Package) and \
14311 (dep.priority > UnmergeDepPriority.SOFT):
14312 unresolvable.add((dep.atom, dep.parent.cpv))
14314 if not unresolvable:
14317 if unresolvable and not allow_missing_deps:
14318 prefix = bad(" * ")
14320 msg.append("Dependencies could not be completely resolved due to")
14321 msg.append("the following required packages not being installed:")
14323 for atom, parent in unresolvable:
14324 msg.append(" %s pulled in by:" % (atom,))
14325 msg.append(" %s" % (parent,))
14327 msg.append("Have you forgotten to run " + \
14328 good("`emerge --update --newuse --deep @system @world`") + " prior")
14329 msg.append(("to %s? It may be necessary to manually " + \
14330 "uninstall packages that no longer") % action)
14331 msg.append("exist in the portage tree since " + \
14332 "it may not be possible to satisfy their")
14333 msg.append("dependencies. Also, be aware of " + \
14334 "the --with-bdeps option that is documented")
14335 msg.append("in " + good("`man emerge`") + ".")
14336 if action == "prune":
14338 msg.append("If you would like to ignore " + \
14339 "dependencies then use %s." % good("--nodeps"))
14340 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14341 level=logging.ERROR, noiselevel=-1)
14345 if unresolved_deps():
14348 graph = resolver.digraph.copy()
14349 required_pkgs_total = 0
14351 if isinstance(node, Package):
14352 required_pkgs_total += 1
14354 def show_parents(child_node):
14355 parent_nodes = graph.parent_nodes(child_node)
14356 if not parent_nodes:
14357 # With --prune, the highest version can be pulled in without any
14358 # real parent since all installed packages are pulled in. In that
14359 # case there's nothing to show here.
14362 for node in parent_nodes:
14363 parent_strs.append(str(getattr(node, "cpv", node)))
14366 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
14367 for parent_str in parent_strs:
14368 msg.append(" %s\n" % (parent_str,))
14370 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14372 def cmp_pkg_cpv(pkg1, pkg2):
14373 """Sort Package instances by cpv."""
14374 if pkg1.cpv > pkg2.cpv:
14376 elif pkg1.cpv == pkg2.cpv:
14381 def create_cleanlist():
14382 pkgs_to_remove = []
14384 if action == "depclean":
14387 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14390 arg_atom = args_set.findAtomForPackage(pkg)
14391 except portage.exception.InvalidDependString:
14392 # this error has already been displayed by now
14396 if pkg not in graph:
14397 pkgs_to_remove.append(pkg)
14398 elif "--verbose" in myopts:
14402 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14403 if pkg not in graph:
14404 pkgs_to_remove.append(pkg)
14405 elif "--verbose" in myopts:
14408 elif action == "prune":
14409 # Prune really uses all installed instead of world. It's not
14410 # a real reverse dependency so don't display it as such.
14411 graph.remove(set_args["world"])
14413 for atom in args_set:
14414 for pkg in vardb.match_pkgs(atom):
14415 if pkg not in graph:
14416 pkgs_to_remove.append(pkg)
14417 elif "--verbose" in myopts:
14420 if not pkgs_to_remove:
14422 ">>> No packages selected for removal by %s\n" % action)
14423 if "--verbose" not in myopts:
14425 ">>> To see reverse dependencies, use %s\n" % \
14427 if action == "prune":
14429 ">>> To ignore dependencies, use %s\n" % \
14432 return pkgs_to_remove
14434 cleanlist = create_cleanlist()
14437 clean_set = set(cleanlist)
14439 # Check if any of these package are the sole providers of libraries
14440 # with consumers that have not been selected for removal. If so, these
14441 # packages and any dependencies need to be added to the graph.
14442 real_vardb = trees[myroot]["vartree"].dbapi
14443 linkmap = real_vardb.linkmap
14444 liblist = linkmap.listLibraryObjects()
14445 consumer_cache = {}
14446 provider_cache = {}
14450 writemsg_level(">>> Checking for lib consumers...\n")
14452 for pkg in cleanlist:
14453 pkg_dblink = real_vardb._dblink(pkg.cpv)
14454 provided_libs = set()
14456 for lib in liblist:
14457 if pkg_dblink.isowner(lib, myroot):
14458 provided_libs.add(lib)
14460 if not provided_libs:
14464 for lib in provided_libs:
14465 lib_consumers = consumer_cache.get(lib)
14466 if lib_consumers is None:
14467 lib_consumers = linkmap.findConsumers(lib)
14468 consumer_cache[lib] = lib_consumers
14470 consumers[lib] = lib_consumers
14475 for lib, lib_consumers in consumers.items():
14476 for consumer_file in list(lib_consumers):
14477 if pkg_dblink.isowner(consumer_file, myroot):
14478 lib_consumers.remove(consumer_file)
14479 if not lib_consumers:
14485 for lib, lib_consumers in consumers.iteritems():
14487 soname = soname_cache.get(lib)
14489 soname = linkmap.getSoname(lib)
14490 soname_cache[lib] = soname
14492 consumer_providers = []
14493 for lib_consumer in lib_consumers:
14494 providers = provider_cache.get(lib)
14495 if providers is None:
14496 providers = linkmap.findProviders(lib_consumer)
14497 provider_cache[lib_consumer] = providers
14498 if soname not in providers:
14499 # Why does this happen?
14501 consumer_providers.append(
14502 (lib_consumer, providers[soname]))
14504 consumers[lib] = consumer_providers
14506 consumer_map[pkg] = consumers
14510 search_files = set()
14511 for consumers in consumer_map.itervalues():
14512 for lib, consumer_providers in consumers.iteritems():
14513 for lib_consumer, providers in consumer_providers:
14514 search_files.add(lib_consumer)
14515 search_files.update(providers)
14517 writemsg_level(">>> Assigning files to packages...\n")
14518 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14520 for pkg, consumers in consumer_map.items():
14521 for lib, consumer_providers in consumers.items():
14522 lib_consumers = set()
14524 for lib_consumer, providers in consumer_providers:
14525 owner_set = file_owners.get(lib_consumer)
14526 provider_dblinks = set()
14527 provider_pkgs = set()
14529 if len(providers) > 1:
14530 for provider in providers:
14531 provider_set = file_owners.get(provider)
14532 if provider_set is not None:
14533 provider_dblinks.update(provider_set)
14535 if len(provider_dblinks) > 1:
14536 for provider_dblink in provider_dblinks:
14537 pkg_key = ("installed", myroot,
14538 provider_dblink.mycpv, "nomerge")
14539 if pkg_key not in clean_set:
14540 provider_pkgs.add(vardb.get(pkg_key))
14545 if owner_set is not None:
14546 lib_consumers.update(owner_set)
14548 for consumer_dblink in list(lib_consumers):
14549 if ("installed", myroot, consumer_dblink.mycpv,
14550 "nomerge") in clean_set:
14551 lib_consumers.remove(consumer_dblink)
14555 consumers[lib] = lib_consumers
14559 del consumer_map[pkg]
14562 # TODO: Implement a package set for rebuilding consumer packages.
14564 msg = "In order to avoid breakage of link level " + \
14565 "dependencies, one or more packages will not be removed. " + \
14566 "This can be solved by rebuilding " + \
14567 "the packages that pulled them in."
14569 prefix = bad(" * ")
14570 from textwrap import wrap
14571 writemsg_level("".join(prefix + "%s\n" % line for \
14572 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14575 for pkg, consumers in consumer_map.iteritems():
14576 unique_consumers = set(chain(*consumers.values()))
14577 unique_consumers = sorted(consumer.mycpv \
14578 for consumer in unique_consumers)
14580 msg.append(" %s pulled in by:" % (pkg.cpv,))
14581 for consumer in unique_consumers:
14582 msg.append(" %s" % (consumer,))
14584 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14585 level=logging.WARNING, noiselevel=-1)
14587 # Add lib providers to the graph as children of lib consumers,
14588 # and also add any dependencies pulled in by the provider.
14589 writemsg_level(">>> Adding lib providers to graph...\n")
14591 for pkg, consumers in consumer_map.iteritems():
14592 for consumer_dblink in set(chain(*consumers.values())):
14593 consumer_pkg = vardb.get(("installed", myroot,
14594 consumer_dblink.mycpv, "nomerge"))
14595 if not resolver._add_pkg(pkg,
14596 Dependency(parent=consumer_pkg,
14597 priority=UnmergeDepPriority(runtime=True),
14599 resolver.display_problems()
14602 writemsg_level("\nCalculating dependencies ")
14603 success = resolver._complete_graph()
14604 writemsg_level("\b\b... done!\n")
14605 resolver.display_problems()
14608 if unresolved_deps():
14611 graph = resolver.digraph.copy()
14612 required_pkgs_total = 0
14614 if isinstance(node, Package):
14615 required_pkgs_total += 1
14616 cleanlist = create_cleanlist()
14619 clean_set = set(cleanlist)
14621 # Use a topological sort to create an unmerge order such that
14622 # each package is unmerged before it's dependencies. This is
14623 # necessary to avoid breaking things that may need to run
14624 # during pkg_prerm or pkg_postrm phases.
14626 # Create a new graph to account for dependencies between the
14627 # packages being unmerged.
14631 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14632 runtime = UnmergeDepPriority(runtime=True)
14633 runtime_post = UnmergeDepPriority(runtime_post=True)
14634 buildtime = UnmergeDepPriority(buildtime=True)
14636 "RDEPEND": runtime,
14637 "PDEPEND": runtime_post,
14638 "DEPEND": buildtime,
14641 for node in clean_set:
14642 graph.add(node, None)
14644 node_use = node.metadata["USE"].split()
14645 for dep_type in dep_keys:
14646 depstr = node.metadata[dep_type]
14650 portage.dep._dep_check_strict = False
14651 success, atoms = portage.dep_check(depstr, None, settings,
14652 myuse=node_use, trees=resolver._graph_trees,
14655 portage.dep._dep_check_strict = True
14657 # Ignore invalid deps of packages that will
14658 # be uninstalled anyway.
14661 priority = priority_map[dep_type]
14663 if not isinstance(atom, portage.dep.Atom):
14664 # Ignore invalid atoms returned from dep_check().
14668 matches = vardb.match_pkgs(atom)
14671 for child_node in matches:
14672 if child_node in clean_set:
14673 graph.add(child_node, node, priority=priority)
14676 if len(graph.order) == len(graph.root_nodes()):
14677 # If there are no dependencies between packages
14678 # let unmerge() group them by cat/pn.
14680 cleanlist = [pkg.cpv for pkg in graph.order]
14682 # Order nodes from lowest to highest overall reference count for
14683 # optimal root node selection.
14684 node_refcounts = {}
14685 for node in graph.order:
14686 node_refcounts[node] = len(graph.parent_nodes(node))
14687 def cmp_reference_count(node1, node2):
14688 return node_refcounts[node1] - node_refcounts[node2]
14689 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14691 ignore_priority_range = [None]
14692 ignore_priority_range.extend(
14693 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14694 while not graph.empty():
14695 for ignore_priority in ignore_priority_range:
14696 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14700 raise AssertionError("no root nodes")
14701 if ignore_priority is not None:
14702 # Some deps have been dropped due to circular dependencies,
14703 # so only pop one node in order do minimize the number that
14708 cleanlist.append(node.cpv)
14710 unmerge(root_config, myopts, "unmerge", cleanlist,
14711 ldpath_mtimes, ordered=ordered)
14713 if action == "prune":
14716 if not cleanlist and "--quiet" in myopts:
14719 print "Packages installed: "+str(len(vardb.cpv_all()))
14720 print "Packages in world: " + \
14721 str(len(root_config.sets["world"].getAtoms()))
14722 print "Packages in system: " + \
14723 str(len(root_config.sets["system"].getAtoms()))
14724 print "Required packages: "+str(required_pkgs_total)
14725 if "--pretend" in myopts:
14726 print "Number to remove: "+str(len(cleanlist))
14728 print "Number removed: "+str(len(cleanlist))
14730 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14732 Construct a depgraph for the given resume list. This will raise
14733 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14735 @returns: (success, depgraph, dropped_tasks)
14738 skip_unsatisfied = True
14739 mergelist = mtimedb["resume"]["mergelist"]
14740 dropped_tasks = set()
14742 mydepgraph = depgraph(settings, trees,
14743 myopts, myparams, spinner)
14745 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14746 skip_masked=skip_masked)
14747 except depgraph.UnsatisfiedResumeDep, e:
14748 if not skip_unsatisfied:
14751 graph = mydepgraph.digraph
14752 unsatisfied_parents = dict((dep.parent, dep.parent) \
14753 for dep in e.value)
14754 traversed_nodes = set()
14755 unsatisfied_stack = list(unsatisfied_parents)
14756 while unsatisfied_stack:
14757 pkg = unsatisfied_stack.pop()
14758 if pkg in traversed_nodes:
14760 traversed_nodes.add(pkg)
14762 # If this package was pulled in by a parent
14763 # package scheduled for merge, removing this
14764 # package may cause the the parent package's
14765 # dependency to become unsatisfied.
14766 for parent_node in graph.parent_nodes(pkg):
14767 if not isinstance(parent_node, Package) \
14768 or parent_node.operation not in ("merge", "nomerge"):
14771 graph.child_nodes(parent_node,
14772 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14773 if pkg in unsatisfied:
14774 unsatisfied_parents[parent_node] = parent_node
14775 unsatisfied_stack.append(parent_node)
14777 pruned_mergelist = []
14778 for x in mergelist:
14779 if isinstance(x, list) and \
14780 tuple(x) not in unsatisfied_parents:
14781 pruned_mergelist.append(x)
14783 # If the mergelist doesn't shrink then this loop is infinite.
14784 if len(pruned_mergelist) == len(mergelist):
14785 # This happens if a package can't be dropped because
14786 # it's already installed, but it has unsatisfied PDEPEND.
14788 mergelist[:] = pruned_mergelist
14790 # Exclude installed packages that have been removed from the graph due
14791 # to failure to build/install runtime dependencies after the dependent
14792 # package has already been installed.
14793 dropped_tasks.update(pkg for pkg in \
14794 unsatisfied_parents if pkg.operation != "nomerge")
14795 mydepgraph.break_refs(unsatisfied_parents)
14797 del e, graph, traversed_nodes, \
14798 unsatisfied_parents, unsatisfied_stack
14802 return (success, mydepgraph, dropped_tasks)
14804 def action_build(settings, trees, mtimedb,
14805 myopts, myaction, myfiles, spinner):
14807 # validate the state of the resume data
14808 # so that we can make assumptions later.
14809 for k in ("resume", "resume_backup"):
14810 if k not in mtimedb:
14812 resume_data = mtimedb[k]
14813 if not isinstance(resume_data, dict):
14816 mergelist = resume_data.get("mergelist")
14817 if not isinstance(mergelist, list):
14820 for x in mergelist:
14821 if not (isinstance(x, list) and len(x) == 4):
14823 pkg_type, pkg_root, pkg_key, pkg_action = x
14824 if pkg_root not in trees:
14825 # Current $ROOT setting differs,
14826 # so the list must be stale.
14832 resume_opts = resume_data.get("myopts")
14833 if not isinstance(resume_opts, (dict, list)):
14836 favorites = resume_data.get("favorites")
14837 if not isinstance(favorites, list):
14842 if "--resume" in myopts and \
14843 ("resume" in mtimedb or
14844 "resume_backup" in mtimedb):
14846 if "resume" not in mtimedb:
14847 mtimedb["resume"] = mtimedb["resume_backup"]
14848 del mtimedb["resume_backup"]
14850 # "myopts" is a list for backward compatibility.
14851 resume_opts = mtimedb["resume"].get("myopts", [])
14852 if isinstance(resume_opts, list):
14853 resume_opts = dict((k,True) for k in resume_opts)
14854 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14855 resume_opts.pop(opt, None)
14857 # Current options always override resume_opts.
14858 resume_opts.update(myopts)
14860 myopts.update(resume_opts)
14862 if "--debug" in myopts:
14863 writemsg_level("myopts %s\n" % (myopts,))
14865 # Adjust config according to options of the command being resumed.
14866 for myroot in trees:
14867 mysettings = trees[myroot]["vartree"].settings
14868 mysettings.unlock()
14869 adjust_config(myopts, mysettings)
14871 del myroot, mysettings
14873 ldpath_mtimes = mtimedb["ldpath"]
14876 buildpkgonly = "--buildpkgonly" in myopts
14877 pretend = "--pretend" in myopts
14878 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14879 ask = "--ask" in myopts
14880 nodeps = "--nodeps" in myopts
14881 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14882 tree = "--tree" in myopts
14883 if nodeps and tree:
14885 del myopts["--tree"]
14886 portage.writemsg(colorize("WARN", " * ") + \
14887 "--tree is broken with --nodeps. Disabling...\n")
14888 debug = "--debug" in myopts
14889 verbose = "--verbose" in myopts
14890 quiet = "--quiet" in myopts
14891 if pretend or fetchonly:
14892 # make the mtimedb readonly
14893 mtimedb.filename = None
14894 if '--digest' in myopts or 'digest' in settings.features:
14895 if '--digest' in myopts:
14896 msg = "The --digest option"
14898 msg = "The FEATURES=digest setting"
14900 msg += " can prevent corruption from being" + \
14901 " noticed. The `repoman manifest` command is the preferred" + \
14902 " way to generate manifests and it is capable of doing an" + \
14903 " entire repository or category at once."
14904 prefix = bad(" * ")
14905 writemsg(prefix + "\n")
14906 from textwrap import wrap
14907 for line in wrap(msg, 72):
14908 writemsg("%s%s\n" % (prefix, line))
14909 writemsg(prefix + "\n")
14911 if "--quiet" not in myopts and \
14912 ("--pretend" in myopts or "--ask" in myopts or \
14913 "--tree" in myopts or "--verbose" in myopts):
14915 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14917 elif "--buildpkgonly" in myopts:
14921 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14923 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14927 print darkgreen("These are the packages that would be %s, in order:") % action
14930 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14931 if not show_spinner:
14932 spinner.update = spinner.update_quiet
14935 favorites = mtimedb["resume"].get("favorites")
14936 if not isinstance(favorites, list):
14940 print "Calculating dependencies ",
14941 myparams = create_depgraph_params(myopts, myaction)
14943 resume_data = mtimedb["resume"]
14944 mergelist = resume_data["mergelist"]
14945 if mergelist and "--skipfirst" in myopts:
14946 for i, task in enumerate(mergelist):
14947 if isinstance(task, list) and \
14948 task and task[-1] == "merge":
14955 success, mydepgraph, dropped_tasks = resume_depgraph(
14956 settings, trees, mtimedb, myopts, myparams, spinner)
14957 except (portage.exception.PackageNotFound,
14958 depgraph.UnsatisfiedResumeDep), e:
14959 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14960 mydepgraph = e.depgraph
14963 from textwrap import wrap
14964 from portage.output import EOutput
14967 resume_data = mtimedb["resume"]
14968 mergelist = resume_data.get("mergelist")
14969 if not isinstance(mergelist, list):
14971 if mergelist and debug or (verbose and not quiet):
14972 out.eerror("Invalid resume list:")
14975 for task in mergelist:
14976 if isinstance(task, list):
14977 out.eerror(indent + str(tuple(task)))
14980 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14981 out.eerror("One or more packages are either masked or " + \
14982 "have missing dependencies:")
14985 for dep in e.value:
14986 if dep.atom is None:
14987 out.eerror(indent + "Masked package:")
14988 out.eerror(2 * indent + str(dep.parent))
14991 out.eerror(indent + str(dep.atom) + " pulled in by:")
14992 out.eerror(2 * indent + str(dep.parent))
14994 msg = "The resume list contains packages " + \
14995 "that are either masked or have " + \
14996 "unsatisfied dependencies. " + \
14997 "Please restart/continue " + \
14998 "the operation manually, or use --skipfirst " + \
14999 "to skip the first package in the list and " + \
15000 "any other packages that may be " + \
15001 "masked or have missing dependencies."
15002 for line in wrap(msg, 72):
15004 elif isinstance(e, portage.exception.PackageNotFound):
15005 out.eerror("An expected package is " + \
15006 "not available: %s" % str(e))
15008 msg = "The resume list contains one or more " + \
15009 "packages that are no longer " + \
15010 "available. Please restart/continue " + \
15011 "the operation manually."
15012 for line in wrap(msg, 72):
15016 print "\b\b... done!"
15020 portage.writemsg("!!! One or more packages have been " + \
15021 "dropped due to\n" + \
15022 "!!! masking or unsatisfied dependencies:\n\n",
15024 for task in dropped_tasks:
15025 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
15026 portage.writemsg("\n", noiselevel=-1)
15029 if mydepgraph is not None:
15030 mydepgraph.display_problems()
15031 if not (ask or pretend):
15032 # delete the current list and also the backup
15033 # since it's probably stale too.
15034 for k in ("resume", "resume_backup"):
15035 mtimedb.pop(k, None)
15040 if ("--resume" in myopts):
15041 print darkgreen("emerge: It seems we have nothing to resume...")
15044 myparams = create_depgraph_params(myopts, myaction)
15045 if "--quiet" not in myopts and "--nodeps" not in myopts:
15046 print "Calculating dependencies ",
15048 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
15050 retval, favorites = mydepgraph.select_files(myfiles)
15051 except portage.exception.PackageNotFound, e:
15052 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
15054 except portage.exception.PackageSetNotFound, e:
15055 root_config = trees[settings["ROOT"]]["root_config"]
15056 display_missing_pkg_set(root_config, e.value)
15059 print "\b\b... done!"
15061 mydepgraph.display_problems()
15064 if "--pretend" not in myopts and \
15065 ("--ask" in myopts or "--tree" in myopts or \
15066 "--verbose" in myopts) and \
15067 not ("--quiet" in myopts and "--ask" not in myopts):
15068 if "--resume" in myopts:
15069 mymergelist = mydepgraph.altlist()
15070 if len(mymergelist) == 0:
15071 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15073 favorites = mtimedb["resume"]["favorites"]
15074 retval = mydepgraph.display(
15075 mydepgraph.altlist(reversed=tree),
15076 favorites=favorites)
15077 mydepgraph.display_problems()
15078 if retval != os.EX_OK:
15080 prompt="Would you like to resume merging these packages?"
15082 retval = mydepgraph.display(
15083 mydepgraph.altlist(reversed=("--tree" in myopts)),
15084 favorites=favorites)
15085 mydepgraph.display_problems()
15086 if retval != os.EX_OK:
15089 for x in mydepgraph.altlist():
15090 if isinstance(x, Package) and x.operation == "merge":
15094 sets = trees[settings["ROOT"]]["root_config"].sets
15095 world_candidates = None
15096 if "--noreplace" in myopts and \
15097 not oneshot and favorites:
15098 # Sets that are not world candidates are filtered
15099 # out here since the favorites list needs to be
15100 # complete for depgraph.loadResumeCommand() to
15101 # operate correctly.
15102 world_candidates = [x for x in favorites \
15103 if not (x.startswith(SETPREFIX) and \
15104 not sets[x[1:]].world_candidate)]
15105 if "--noreplace" in myopts and \
15106 not oneshot and world_candidates:
15108 for x in world_candidates:
15109 print " %s %s" % (good("*"), x)
15110 prompt="Would you like to add these packages to your world favorites?"
15111 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
15112 prompt="Nothing to merge; would you like to auto-clean packages?"
15115 print "Nothing to merge; quitting."
15118 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
15119 prompt="Would you like to fetch the source files for these packages?"
15121 prompt="Would you like to merge these packages?"
15123 if "--ask" in myopts and userquery(prompt) == "No":
15128 # Don't ask again (e.g. when auto-cleaning packages after merge)
15129 myopts.pop("--ask", None)
15131 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
15132 if ("--resume" in myopts):
15133 mymergelist = mydepgraph.altlist()
15134 if len(mymergelist) == 0:
15135 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15137 favorites = mtimedb["resume"]["favorites"]
15138 retval = mydepgraph.display(
15139 mydepgraph.altlist(reversed=tree),
15140 favorites=favorites)
15141 mydepgraph.display_problems()
15142 if retval != os.EX_OK:
15145 retval = mydepgraph.display(
15146 mydepgraph.altlist(reversed=("--tree" in myopts)),
15147 favorites=favorites)
15148 mydepgraph.display_problems()
15149 if retval != os.EX_OK:
15151 if "--buildpkgonly" in myopts:
15152 graph_copy = mydepgraph.digraph.clone()
15153 removed_nodes = set()
15154 for node in graph_copy:
15155 if not isinstance(node, Package) or \
15156 node.operation == "nomerge":
15157 removed_nodes.add(node)
15158 graph_copy.difference_update(removed_nodes)
15159 if not graph_copy.hasallzeros(ignore_priority = \
15160 DepPrioritySatisfiedRange.ignore_medium):
15161 print "\n!!! --buildpkgonly requires all dependencies to be merged."
15162 print "!!! You have to merge the dependencies before you can build this package.\n"
15165 if "--buildpkgonly" in myopts:
15166 graph_copy = mydepgraph.digraph.clone()
15167 removed_nodes = set()
15168 for node in graph_copy:
15169 if not isinstance(node, Package) or \
15170 node.operation == "nomerge":
15171 removed_nodes.add(node)
15172 graph_copy.difference_update(removed_nodes)
15173 if not graph_copy.hasallzeros(ignore_priority = \
15174 DepPrioritySatisfiedRange.ignore_medium):
15175 print "\n!!! --buildpkgonly requires all dependencies to be merged."
15176 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
15179 if ("--resume" in myopts):
15180 favorites=mtimedb["resume"]["favorites"]
15181 mymergelist = mydepgraph.altlist()
15182 mydepgraph.break_refs(mymergelist)
15183 mergetask = Scheduler(settings, trees, mtimedb, myopts,
15184 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
15185 del mydepgraph, mymergelist
15186 clear_caches(trees)
15188 retval = mergetask.merge()
15189 merge_count = mergetask.curval
15191 if "resume" in mtimedb and \
15192 "mergelist" in mtimedb["resume"] and \
15193 len(mtimedb["resume"]["mergelist"]) > 1:
15194 mtimedb["resume_backup"] = mtimedb["resume"]
15195 del mtimedb["resume"]
15197 mtimedb["resume"]={}
15198 # Stored as a dict starting with portage-2.1.6_rc1, and supported
15199 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
15200 # a list type for options.
15201 mtimedb["resume"]["myopts"] = myopts.copy()
15203 # Convert Atom instances to plain str.
15204 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
15206 pkglist = mydepgraph.altlist()
15207 mydepgraph.saveNomergeFavorites()
15208 mydepgraph.break_refs(pkglist)
15209 mergetask = Scheduler(settings, trees, mtimedb, myopts,
15210 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
15211 del mydepgraph, pkglist
15212 clear_caches(trees)
15214 retval = mergetask.merge()
15215 merge_count = mergetask.curval
15217 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
15218 if "yes" == settings.get("AUTOCLEAN"):
15219 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
15220 unmerge(trees[settings["ROOT"]]["root_config"],
15221 myopts, "clean", [],
15222 ldpath_mtimes, autoclean=1)
15224 portage.writemsg_stdout(colorize("WARN", "WARNING:")
15225 + " AUTOCLEAN is disabled. This can cause serious"
15226 + " problems due to overlapping packages.\n")
15227 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
15231 def multiple_actions(action1, action2):
15232 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
15233 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
15236 def insert_optional_args(args):
15238 Parse optional arguments and insert a value if one has
15239 not been provided. This is done before feeding the args
15240 to the optparse parser since that parser does not support
15241 this feature natively.
15245 jobs_opts = ("-j", "--jobs")
15246 default_arg_opts = {
15247 '--deselect' : ('n',),
15248 '--root-deps' : ('rdeps',),
15250 arg_stack = args[:]
15251 arg_stack.reverse()
15253 arg = arg_stack.pop()
15255 default_arg_choices = default_arg_opts.get(arg)
15256 if default_arg_choices is not None:
15257 new_args.append(arg)
15258 if arg_stack and arg_stack[-1] in default_arg_choices:
15259 new_args.append(arg_stack.pop())
15261 # insert default argument
15262 new_args.append('True')
15265 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
15266 if not (short_job_opt or arg in jobs_opts):
15267 new_args.append(arg)
15270 # Insert an empty placeholder in order to
15271 # satisfy the requirements of optparse.
15273 new_args.append("--jobs")
15276 if short_job_opt and len(arg) > 2:
15277 if arg[:2] == "-j":
15279 job_count = int(arg[2:])
15281 saved_opts = arg[2:]
15284 saved_opts = arg[1:].replace("j", "")
15286 if job_count is None and arg_stack:
15288 job_count = int(arg_stack[-1])
15292 # Discard the job count from the stack
15293 # since we're consuming it here.
15296 if job_count is None:
15297 # unlimited number of jobs
15298 new_args.append("True")
15300 new_args.append(str(job_count))
15302 if saved_opts is not None:
15303 new_args.append("-" + saved_opts)
15307 def parse_opts(tmpcmdline, silent=False):
15312 global actions, options, shortmapping
15314 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
15315 argument_options = {
15317 "help":"specify the location for portage configuration files",
15321 "help":"enable or disable color output",
15323 "choices":("y", "n")
15327 "help" : "remove atoms from the world file",
15329 "choices" : ("True", "n")
15334 "help" : "Specifies the number of packages to build " + \
15340 "--load-average": {
15342 "help" :"Specifies that no new builds should be started " + \
15343 "if there are other builds running and the load average " + \
15344 "is at least LOAD (a floating-point number).",
15350 "help":"include unnecessary build time dependencies",
15352 "choices":("y", "n")
15355 "help":"specify conditions to trigger package reinstallation",
15357 "choices":["changed-use"]
15360 "help" : "specify the target root filesystem for merging packages",
15365 "help" : "modify interpretation of depedencies",
15367 "choices" :("True", "rdeps")
15371 from optparse import OptionParser
15372 parser = OptionParser()
15373 if parser.has_option("--help"):
15374 parser.remove_option("--help")
15376 for action_opt in actions:
15377 parser.add_option("--" + action_opt, action="store_true",
15378 dest=action_opt.replace("-", "_"), default=False)
15379 for myopt in options:
15380 parser.add_option(myopt, action="store_true",
15381 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15382 for shortopt, longopt in shortmapping.iteritems():
15383 parser.add_option("-" + shortopt, action="store_true",
15384 dest=longopt.lstrip("--").replace("-", "_"), default=False)
15385 for myalias, myopt in longopt_aliases.iteritems():
15386 parser.add_option(myalias, action="store_true",
15387 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15389 for myopt, kwargs in argument_options.iteritems():
15390 parser.add_option(myopt,
15391 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15393 tmpcmdline = insert_optional_args(tmpcmdline)
15395 myoptions, myargs = parser.parse_args(args=tmpcmdline)
15397 if myoptions.deselect == "True":
15398 myoptions.deselect = True
15400 if myoptions.root_deps == "True":
15401 myoptions.root_deps = True
15405 if myoptions.jobs == "True":
15409 jobs = int(myoptions.jobs)
15413 if jobs is not True and \
15417 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15418 (myoptions.jobs,), noiselevel=-1)
15420 myoptions.jobs = jobs
15422 if myoptions.load_average:
15424 load_average = float(myoptions.load_average)
15428 if load_average <= 0.0:
15429 load_average = None
15431 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15432 (myoptions.load_average,), noiselevel=-1)
15434 myoptions.load_average = load_average
15436 for myopt in options:
15437 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15439 myopts[myopt] = True
15441 for myopt in argument_options:
15442 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15446 if myoptions.searchdesc:
15447 myoptions.search = True
15449 for action_opt in actions:
15450 v = getattr(myoptions, action_opt.replace("-", "_"))
15453 multiple_actions(myaction, action_opt)
15455 myaction = action_opt
15457 if myaction is None and myoptions.deselect is True:
15458 myaction = 'deselect'
15462 return myaction, myopts, myfiles
15464 def validate_ebuild_environment(trees):
15465 for myroot in trees:
15466 settings = trees[myroot]["vartree"].settings
15467 settings.validate()
15469 def clear_caches(trees):
15470 for d in trees.itervalues():
15471 d["porttree"].dbapi.melt()
15472 d["porttree"].dbapi._aux_cache.clear()
15473 d["bintree"].dbapi._aux_cache.clear()
15474 d["bintree"].dbapi._clear_cache()
15475 d["vartree"].dbapi.linkmap._clear_cache()
15476 portage.dircache.clear()
15479 def load_emerge_config(trees=None):
15481 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15482 v = os.environ.get(envvar, None)
15483 if v and v.strip():
15485 trees = portage.create_trees(trees=trees, **kwargs)
15487 for root, root_trees in trees.iteritems():
15488 settings = root_trees["vartree"].settings
15489 setconfig = load_default_config(settings, root_trees)
15490 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15492 settings = trees["/"]["vartree"].settings
15494 for myroot in trees:
15496 settings = trees[myroot]["vartree"].settings
15499 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15500 mtimedb = portage.MtimeDB(mtimedbfile)
15502 return settings, trees, mtimedb
15504 def adjust_config(myopts, settings):
15505 """Make emerge specific adjustments to the config."""
15507 # To enhance usability, make some vars case insensitive by forcing them to
15509 for myvar in ("AUTOCLEAN", "NOCOLOR"):
15510 if myvar in settings:
15511 settings[myvar] = settings[myvar].lower()
15512 settings.backup_changes(myvar)
15515 # Kill noauto as it will break merges otherwise.
15516 if "noauto" in settings.features:
15517 settings.features.remove('noauto')
15518 settings['FEATURES'] = ' '.join(sorted(settings.features))
15519 settings.backup_changes("FEATURES")
15523 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15524 except ValueError, e:
15525 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15526 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15527 settings["CLEAN_DELAY"], noiselevel=-1)
15528 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15529 settings.backup_changes("CLEAN_DELAY")
15531 EMERGE_WARNING_DELAY = 10
15533 EMERGE_WARNING_DELAY = int(settings.get(
15534 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15535 except ValueError, e:
15536 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15537 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15538 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15539 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15540 settings.backup_changes("EMERGE_WARNING_DELAY")
15542 if "--quiet" in myopts:
15543 settings["PORTAGE_QUIET"]="1"
15544 settings.backup_changes("PORTAGE_QUIET")
15546 if "--verbose" in myopts:
15547 settings["PORTAGE_VERBOSE"] = "1"
15548 settings.backup_changes("PORTAGE_VERBOSE")
15550 # Set so that configs will be merged regardless of remembered status
15551 if ("--noconfmem" in myopts):
15552 settings["NOCONFMEM"]="1"
15553 settings.backup_changes("NOCONFMEM")
15555 # Set various debug markers... They should be merged somehow.
15558 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15559 if PORTAGE_DEBUG not in (0, 1):
15560 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15561 PORTAGE_DEBUG, noiselevel=-1)
15562 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15565 except ValueError, e:
15566 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15567 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15568 settings["PORTAGE_DEBUG"], noiselevel=-1)
15570 if "--debug" in myopts:
15572 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15573 settings.backup_changes("PORTAGE_DEBUG")
15575 if settings.get("NOCOLOR") not in ("yes","true"):
15576 portage.output.havecolor = 1
15578 """The explicit --color < y | n > option overrides the NOCOLOR environment
15579 variable and stdout auto-detection."""
15580 if "--color" in myopts:
15581 if "y" == myopts["--color"]:
15582 portage.output.havecolor = 1
15583 settings["NOCOLOR"] = "false"
15585 portage.output.havecolor = 0
15586 settings["NOCOLOR"] = "true"
15587 settings.backup_changes("NOCOLOR")
15588 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15589 portage.output.havecolor = 0
15590 settings["NOCOLOR"] = "true"
15591 settings.backup_changes("NOCOLOR")
15593 def apply_priorities(settings):
15597 def nice(settings):
15599 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15600 except (OSError, ValueError), e:
15601 out = portage.output.EOutput()
15602 out.eerror("Failed to change nice value to '%s'" % \
15603 settings["PORTAGE_NICENESS"])
15604 out.eerror("%s\n" % str(e))
15606 def ionice(settings):
15608 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15610 ionice_cmd = shlex.split(ionice_cmd)
15614 from portage.util import varexpand
15615 variables = {"PID" : str(os.getpid())}
15616 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15619 rval = portage.process.spawn(cmd, env=os.environ)
15620 except portage.exception.CommandNotFound:
15621 # The OS kernel probably doesn't support ionice,
15622 # so return silently.
15625 if rval != os.EX_OK:
15626 out = portage.output.EOutput()
15627 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15628 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15630 def display_missing_pkg_set(root_config, set_name):
15633 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15634 "The following sets exist:") % \
15635 colorize("INFORM", set_name))
15638 for s in sorted(root_config.sets):
15639 msg.append(" %s" % s)
15642 writemsg_level("".join("%s\n" % l for l in msg),
15643 level=logging.ERROR, noiselevel=-1)
15645 def expand_set_arguments(myfiles, myaction, root_config):
15647 setconfig = root_config.setconfig
15649 sets = setconfig.getSets()
15651 # In order to know exactly which atoms/sets should be added to the
15652 # world file, the depgraph performs set expansion later. It will get
15653 # confused about where the atoms came from if it's not allowed to
15654 # expand them itself.
15655 do_not_expand = (None, )
15658 if a in ("system", "world"):
15659 newargs.append(SETPREFIX+a)
15666 # separators for set arguments
15670 # WARNING: all operators must be of equal length
15672 DIFF_OPERATOR = "-@"
15673 UNION_OPERATOR = "+@"
15675 for i in range(0, len(myfiles)):
15676 if myfiles[i].startswith(SETPREFIX):
15679 x = myfiles[i][len(SETPREFIX):]
15682 start = x.find(ARG_START)
15683 end = x.find(ARG_END)
15684 if start > 0 and start < end:
15685 namepart = x[:start]
15686 argpart = x[start+1:end]
15688 # TODO: implement proper quoting
15689 args = argpart.split(",")
15693 k, v = a.split("=", 1)
15696 options[a] = "True"
15697 setconfig.update(namepart, options)
15698 newset += (x[:start-len(namepart)]+namepart)
15699 x = x[end+len(ARG_END):]
15703 myfiles[i] = SETPREFIX+newset
15705 sets = setconfig.getSets()
15707 # display errors that occured while loading the SetConfig instance
15708 for e in setconfig.errors:
15709 print colorize("BAD", "Error during set creation: %s" % e)
15711 # emerge relies on the existance of sets with names "world" and "system"
15712 required_sets = ("world", "system")
15715 for s in required_sets:
15717 missing_sets.append(s)
15719 if len(missing_sets) > 2:
15720 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15721 missing_sets_str += ', and "%s"' % missing_sets[-1]
15722 elif len(missing_sets) == 2:
15723 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15725 missing_sets_str = '"%s"' % missing_sets[-1]
15726 msg = ["emerge: incomplete set configuration, " + \
15727 "missing set(s): %s" % missing_sets_str]
15729 msg.append(" sets defined: %s" % ", ".join(sets))
15730 msg.append(" This usually means that '%s'" % \
15731 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15732 msg.append(" is missing or corrupt.")
15734 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15736 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15739 if a.startswith(SETPREFIX):
15740 # support simple set operations (intersection, difference and union)
15741 # on the commandline. Expressions are evaluated strictly left-to-right
15742 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15743 expression = a[len(SETPREFIX):]
15746 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15747 is_pos = expression.rfind(IS_OPERATOR)
15748 diff_pos = expression.rfind(DIFF_OPERATOR)
15749 union_pos = expression.rfind(UNION_OPERATOR)
15750 op_pos = max(is_pos, diff_pos, union_pos)
15751 s1 = expression[:op_pos]
15752 s2 = expression[op_pos+len(IS_OPERATOR):]
15753 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15755 display_missing_pkg_set(root_config, s2)
15757 expr_sets.insert(0, s2)
15758 expr_ops.insert(0, op)
15760 if not expression in sets:
15761 display_missing_pkg_set(root_config, expression)
15763 expr_sets.insert(0, expression)
15764 result = set(setconfig.getSetAtoms(expression))
15765 for i in range(0, len(expr_ops)):
15766 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15767 if expr_ops[i] == IS_OPERATOR:
15768 result.intersection_update(s2)
15769 elif expr_ops[i] == DIFF_OPERATOR:
15770 result.difference_update(s2)
15771 elif expr_ops[i] == UNION_OPERATOR:
15774 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15775 newargs.extend(result)
15777 s = a[len(SETPREFIX):]
15779 display_missing_pkg_set(root_config, s)
15781 setconfig.active.append(s)
15783 set_atoms = setconfig.getSetAtoms(s)
15784 except portage.exception.PackageSetNotFound, e:
15785 writemsg_level(("emerge: the given set '%s' " + \
15786 "contains a non-existent set named '%s'.\n") % \
15787 (s, e), level=logging.ERROR, noiselevel=-1)
15789 if myaction in unmerge_actions and \
15790 not sets[s].supportsOperation("unmerge"):
15791 sys.stderr.write("emerge: the given set '%s' does " % s + \
15792 "not support unmerge operations\n")
15794 elif not set_atoms:
15795 print "emerge: '%s' is an empty set" % s
15796 elif myaction not in do_not_expand:
15797 newargs.extend(set_atoms)
15799 newargs.append(SETPREFIX+s)
15800 for e in sets[s].errors:
15804 return (newargs, retval)
15806 def repo_name_check(trees):
15807 missing_repo_names = set()
15808 for root, root_trees in trees.iteritems():
15809 if "porttree" in root_trees:
15810 portdb = root_trees["porttree"].dbapi
15811 missing_repo_names.update(portdb.porttrees)
15812 repos = portdb.getRepositories()
15814 missing_repo_names.discard(portdb.getRepositoryPath(r))
15815 if portdb.porttree_root in missing_repo_names and \
15816 not os.path.exists(os.path.join(
15817 portdb.porttree_root, "profiles")):
15818 # This is normal if $PORTDIR happens to be empty,
15819 # so don't warn about it.
15820 missing_repo_names.remove(portdb.porttree_root)
15822 if missing_repo_names:
15824 msg.append("WARNING: One or more repositories " + \
15825 "have missing repo_name entries:")
15827 for p in missing_repo_names:
15828 msg.append("\t%s/profiles/repo_name" % (p,))
15830 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15831 "should be a plain text file containing a unique " + \
15832 "name for the repository on the first line.", 70))
15833 writemsg_level("".join("%s\n" % l for l in msg),
15834 level=logging.WARNING, noiselevel=-1)
15836 return bool(missing_repo_names)
15838 def repo_name_duplicate_check(trees):
15840 for root, root_trees in trees.iteritems():
15841 if 'porttree' in root_trees:
15842 portdb = root_trees['porttree'].dbapi
15843 if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
15844 for repo_name, paths in portdb._ignored_repos:
15845 k = (root, repo_name, portdb.getRepositoryPath(repo_name))
15846 ignored_repos.setdefault(k, []).extend(paths)
15850 msg.append('WARNING: One or more repositories ' + \
15851 'have been ignored due to duplicate')
15852 msg.append(' profiles/repo_name entries:')
15854 for k in sorted(ignored_repos):
15855 msg.append(' %s overrides' % (k,))
15856 for path in ignored_repos[k]:
15857 msg.append(' %s' % (path,))
15859 msg.extend(' ' + x for x in textwrap.wrap(
15860 "All profiles/repo_name entries must be unique in order " + \
15861 "to avoid having duplicates ignored. " + \
15862 "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
15863 "/etc/make.conf if you would like to disable this warning."))
15864 writemsg_level(''.join('%s\n' % l for l in msg),
15865 level=logging.WARNING, noiselevel=-1)
15867 return bool(ignored_repos)
15869 def config_protect_check(trees):
15870 for root, root_trees in trees.iteritems():
15871 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15872 msg = "!!! CONFIG_PROTECT is empty"
15874 msg += " for '%s'" % root
15875 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15877 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15879 if "--quiet" in myopts:
15880 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15881 print "!!! one of the following fully-qualified ebuild names instead:\n"
15882 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15883 print " " + colorize("INFORM", cp)
15886 s = search(root_config, spinner, "--searchdesc" in myopts,
15887 "--quiet" not in myopts, "--usepkg" in myopts,
15888 "--usepkgonly" in myopts)
15889 null_cp = portage.dep_getkey(insert_category_into_atom(
15891 cat, atom_pn = portage.catsplit(null_cp)
15892 s.searchkey = atom_pn
15893 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15896 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15897 print "!!! one of the above fully-qualified ebuild names instead.\n"
15899 def profile_check(trees, myaction, myopts):
15900 if myaction in ("info", "sync"):
15902 elif "--version" in myopts or "--help" in myopts:
15904 for root, root_trees in trees.iteritems():
15905 if root_trees["root_config"].settings.profiles:
15907 # generate some profile related warning messages
15908 validate_ebuild_environment(trees)
15909 msg = "If you have just changed your profile configuration, you " + \
15910 "should revert back to the previous configuration. Due to " + \
15911 "your current profile being invalid, allowed actions are " + \
15912 "limited to --help, --info, --sync, and --version."
15913 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15914 level=logging.ERROR, noiselevel=-1)
15919 global portage # NFC why this is necessary now - genone
15920 portage._disable_legacy_globals()
15921 # Disable color until we're sure that it should be enabled (after
15922 # EMERGE_DEFAULT_OPTS has been parsed).
15923 portage.output.havecolor = 0
15924 # This first pass is just for options that need to be known as early as
15925 # possible, such as --config-root. They will be parsed again later,
15926 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15927 # the value of --config-root).
15928 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15929 if "--debug" in myopts:
15930 os.environ["PORTAGE_DEBUG"] = "1"
15931 if "--config-root" in myopts:
15932 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15933 if "--root" in myopts:
15934 os.environ["ROOT"] = myopts["--root"]
15936 # Portage needs to ensure a sane umask for the files it creates.
15938 settings, trees, mtimedb = load_emerge_config()
15939 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15940 rval = profile_check(trees, myaction, myopts)
15941 if rval != os.EX_OK:
15944 if portage._global_updates(trees, mtimedb["updates"]):
15946 # Reload the whole config from scratch.
15947 settings, trees, mtimedb = load_emerge_config(trees=trees)
15948 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15950 xterm_titles = "notitles" not in settings.features
15953 if "--ignore-default-opts" not in myopts:
15954 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15955 tmpcmdline.extend(sys.argv[1:])
15956 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15958 if "--digest" in myopts:
15959 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15960 # Reload the whole config from scratch so that the portdbapi internal
15961 # config is updated with new FEATURES.
15962 settings, trees, mtimedb = load_emerge_config(trees=trees)
15963 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15965 for myroot in trees:
15966 mysettings = trees[myroot]["vartree"].settings
15967 mysettings.unlock()
15968 adjust_config(myopts, mysettings)
15969 if '--pretend' not in myopts and myaction in \
15970 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15971 mysettings["PORTAGE_COUNTER_HASH"] = \
15972 trees[myroot]["vartree"].dbapi._counter_hash()
15973 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15975 del myroot, mysettings
15977 apply_priorities(settings)
15979 spinner = stdout_spinner()
15980 if "candy" in settings.features:
15981 spinner.update = spinner.update_scroll
15983 if "--quiet" not in myopts:
15984 portage.deprecated_profile_check(settings=settings)
15985 repo_name_check(trees)
15986 repo_name_duplicate_check(trees)
15987 config_protect_check(trees)
15989 for mytrees in trees.itervalues():
15990 mydb = mytrees["porttree"].dbapi
15991 # Freeze the portdbapi for performance (memoize all xmatch results).
15995 if "moo" in myfiles:
15998 Larry loves Gentoo (""" + platform.system() + """)
16000 _______________________
16001 < Have you mooed today? >
16002 -----------------------
16012 ext = os.path.splitext(x)[1]
16013 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
16014 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
16017 root_config = trees[settings["ROOT"]]["root_config"]
16018 if myaction == "list-sets":
16019 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
16023 # only expand sets for actions taking package arguments
16024 oldargs = myfiles[:]
16025 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
16026 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
16027 if retval != os.EX_OK:
16030 # Need to handle empty sets specially, otherwise emerge will react
16031 # with the help message for empty argument lists
16032 if oldargs and not myfiles:
16033 print "emerge: no targets left after set expansion"
16036 if ("--tree" in myopts) and ("--columns" in myopts):
16037 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
16040 if ("--quiet" in myopts):
16041 spinner.update = spinner.update_quiet
16042 portage.util.noiselimit = -1
16044 # Always create packages if FEATURES=buildpkg
16045 # Imply --buildpkg if --buildpkgonly
16046 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
16047 if "--buildpkg" not in myopts:
16048 myopts["--buildpkg"] = True
16050 # Always try and fetch binary packages if FEATURES=getbinpkg
16051 if ("getbinpkg" in settings.features):
16052 myopts["--getbinpkg"] = True
16054 if "--buildpkgonly" in myopts:
16055 # --buildpkgonly will not merge anything, so
16056 # it cancels all binary package options.
16057 for opt in ("--getbinpkg", "--getbinpkgonly",
16058 "--usepkg", "--usepkgonly"):
16059 myopts.pop(opt, None)
16061 if "--fetch-all-uri" in myopts:
16062 myopts["--fetchonly"] = True
16064 if "--skipfirst" in myopts and "--resume" not in myopts:
16065 myopts["--resume"] = True
16067 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
16068 myopts["--usepkgonly"] = True
16070 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
16071 myopts["--getbinpkg"] = True
16073 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
16074 myopts["--usepkg"] = True
16076 # Also allow -K to apply --usepkg/-k
16077 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
16078 myopts["--usepkg"] = True
16080 # Allow -p to remove --ask
16081 if ("--pretend" in myopts) and ("--ask" in myopts):
16082 print ">>> --pretend disables --ask... removing --ask from options."
16083 del myopts["--ask"]
16085 # forbid --ask when not in a terminal
16086 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
16087 if ("--ask" in myopts) and (not sys.stdin.isatty()):
16088 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
16092 if settings.get("PORTAGE_DEBUG", "") == "1":
16093 spinner.update = spinner.update_quiet
16095 if "python-trace" in settings.features:
16096 import portage.debug
16097 portage.debug.set_trace(True)
16099 if not ("--quiet" in myopts):
16100 if not sys.stdout.isatty() or ("--nospinner" in myopts):
16101 spinner.update = spinner.update_basic
16103 if myaction == 'version':
16104 print getportageversion(settings["PORTDIR"], settings["ROOT"],
16105 settings.profile_path, settings["CHOST"],
16106 trees[settings["ROOT"]]["vartree"].dbapi)
16108 elif "--help" in myopts:
16109 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16112 if "--debug" in myopts:
16113 print "myaction", myaction
16114 print "myopts", myopts
16116 if not myaction and not myfiles and "--resume" not in myopts:
16117 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16120 pretend = "--pretend" in myopts
16121 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
16122 buildpkgonly = "--buildpkgonly" in myopts
16124 # check if root user is the current user for the actions where emerge needs this
16125 if portage.secpass < 2:
16126 # We've already allowed "--version" and "--help" above.
16127 if "--pretend" not in myopts and myaction not in ("search","info"):
16128 need_superuser = myaction in ('deselect',) or not \
16130 (buildpkgonly and secpass >= 1) or \
16131 myaction in ("metadata", "regen") or \
16132 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
16133 if portage.secpass < 1 or \
16136 access_desc = "superuser"
16138 access_desc = "portage group"
16139 # Always show portage_group_warning() when only portage group
16140 # access is required but the user is not in the portage group.
16141 from portage.data import portage_group_warning
16142 if "--ask" in myopts:
16143 myopts["--pretend"] = True
16144 del myopts["--ask"]
16145 print ("%s access is required... " + \
16146 "adding --pretend to options.\n") % access_desc
16147 if portage.secpass < 1 and not need_superuser:
16148 portage_group_warning()
16150 sys.stderr.write(("emerge: %s access is " + \
16151 "required.\n\n") % access_desc)
16152 if portage.secpass < 1 and not need_superuser:
16153 portage_group_warning()
16156 disable_emergelog = False
16157 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
16159 disable_emergelog = True
16161 if myaction in ("search", "info"):
16162 disable_emergelog = True
16163 if disable_emergelog:
16164 """ Disable emergelog for everything except build or unmerge
16165 operations. This helps minimize parallel emerge.log entries that can
16166 confuse log parsers. We especially want it disabled during
16167 parallel-fetch, which uses --resume --fetchonly."""
16169 def emergelog(*pargs, **kargs):
16172 if not "--pretend" in myopts:
16173 emergelog(xterm_titles, "Started emerge on: "+\
16174 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
16177 myelogstr=" ".join(myopts)
16179 myelogstr+=" "+myaction
16181 myelogstr += " " + " ".join(oldargs)
16182 emergelog(xterm_titles, " *** emerge " + myelogstr)
16185 def emergeexitsig(signum, frame):
16186 signal.signal(signal.SIGINT, signal.SIG_IGN)
16187 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16188 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
16189 sys.exit(100+signum)
16190 signal.signal(signal.SIGINT, emergeexitsig)
16191 signal.signal(signal.SIGTERM, emergeexitsig)
16194 """This gets out final log message in before we quit."""
16195 if "--pretend" not in myopts:
16196 emergelog(xterm_titles, " *** terminating.")
16197 if "notitles" not in settings.features:
16199 portage.atexit_register(emergeexit)
16201 if myaction in ("config", "metadata", "regen", "sync"):
16202 if "--pretend" in myopts:
16203 sys.stderr.write(("emerge: The '%s' action does " + \
16204 "not support '--pretend'.\n") % myaction)
16207 if "sync" == myaction:
16208 return action_sync(settings, trees, mtimedb, myopts, myaction)
16209 elif "metadata" == myaction:
16210 action_metadata(settings, portdb, myopts)
16211 elif myaction=="regen":
16212 validate_ebuild_environment(trees)
16213 return action_regen(settings, portdb, myopts.get("--jobs"),
16214 myopts.get("--load-average"))
16216 elif "config"==myaction:
16217 validate_ebuild_environment(trees)
16218 action_config(settings, trees, myopts, myfiles)
16221 elif "search"==myaction:
16222 validate_ebuild_environment(trees)
16223 action_search(trees[settings["ROOT"]]["root_config"],
16224 myopts, myfiles, spinner)
16226 elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
16227 validate_ebuild_environment(trees)
16228 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
16229 myopts, myaction, myfiles, spinner)
16230 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
16231 post_emerge(root_config, myopts, mtimedb, rval)
16234 elif myaction == 'info':
16236 # Ensure atoms are valid before calling unmerge().
16237 vardb = trees[settings["ROOT"]]["vartree"].dbapi
16240 if is_valid_package_atom(x):
16242 valid_atoms.append(
16243 portage.dep_expand(x, mydb=vardb, settings=settings))
16244 except portage.exception.AmbiguousPackageName, e:
16245 msg = "The short ebuild name \"" + x + \
16246 "\" is ambiguous. Please specify " + \
16247 "one of the following " + \
16248 "fully-qualified ebuild names instead:"
16249 for line in textwrap.wrap(msg, 70):
16250 writemsg_level("!!! %s\n" % (line,),
16251 level=logging.ERROR, noiselevel=-1)
16253 writemsg_level(" %s\n" % colorize("INFORM", i),
16254 level=logging.ERROR, noiselevel=-1)
16255 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
16259 msg.append("'%s' is not a valid package atom." % (x,))
16260 msg.append("Please check ebuild(5) for full details.")
16261 writemsg_level("".join("!!! %s\n" % line for line in msg),
16262 level=logging.ERROR, noiselevel=-1)
16265 return action_info(settings, trees, myopts, valid_atoms)
16267 # "update", "system", or just process files:
16269 validate_ebuild_environment(trees)
16272 if x.startswith(SETPREFIX) or \
16273 is_valid_package_atom(x):
16275 if x[:1] == os.sep:
16283 msg.append("'%s' is not a valid package atom." % (x,))
16284 msg.append("Please check ebuild(5) for full details.")
16285 writemsg_level("".join("!!! %s\n" % line for line in msg),
16286 level=logging.ERROR, noiselevel=-1)
16289 if "--pretend" not in myopts:
16290 display_news_notification(root_config, myopts)
16291 retval = action_build(settings, trees, mtimedb,
16292 myopts, myaction, myfiles, spinner)
16293 root_config = trees[settings["ROOT"]]["root_config"]
16294 post_emerge(root_config, myopts, mtimedb, retval)