2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
8 from collections import deque
28 from os import path as osp
29 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 nc_len, red, teal, turquoise, xtermTitle, \
39 xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.cache.cache_errors import CacheError
53 from portage.data import secpass
54 from portage.elog.messages import eerror
55 from portage.util import normalize_path as normpath
56 from portage.util import cmp_sort_key, writemsg, writemsg_level
57 from portage.sets import load_default_config, SETPREFIX
58 from portage.sets.base import InternalPackageSet
60 from itertools import chain, izip
63 import cPickle as pickle
68 from cStringIO import StringIO
70 from StringIO import StringIO
72 class stdout_spinner(object):
74 "Gentoo Rocks ("+platform.system()+")",
75 "Thank you for using Gentoo. :)",
76 "Are you actually trying to read this?",
77 "How many times have you stared at this?",
78 "We are generating the cache right now",
79 "You are paying too much attention.",
80 "A theory is better than its explanation.",
81 "Phasers locked on target, Captain.",
82 "Thrashing is just virtual crashing.",
83 "To be is to program.",
84 "Real Users hate Real Programmers.",
85 "When all else fails, read the instructions.",
86 "Functionality breeds Contempt.",
87 "The future lies ahead.",
88 "3.1415926535897932384626433832795028841971694",
89 "Sometimes insanity is the only alternative.",
90 "Inaccuracy saves a world of explanation.",
93 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
97 self.update = self.update_twirl
98 self.scroll_sequence = self.scroll_msgs[
99 int(time.time() * 100) % len(self.scroll_msgs)]
101 self.min_display_latency = 0.05
103 def _return_early(self):
105 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
106 each update* method should return without doing any output when this
109 cur_time = time.time()
110 if cur_time - self.last_update < self.min_display_latency:
112 self.last_update = cur_time
115 def update_basic(self):
116 self.spinpos = (self.spinpos + 1) % 500
117 if self._return_early():
119 if (self.spinpos % 100) == 0:
120 if self.spinpos == 0:
121 sys.stdout.write(". ")
123 sys.stdout.write(".")
126 def update_scroll(self):
127 if self._return_early():
129 if(self.spinpos >= len(self.scroll_sequence)):
130 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
131 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
133 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
135 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
137 def update_twirl(self):
138 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
139 if self._return_early():
141 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
144 def update_quiet(self):
147 def userquery(prompt, responses=None, colours=None):
148 """Displays a prompt and a set of responses, then waits for a response
149 which is checked against the responses and the first to match is
150 returned. An empty response will match the first value in responses. The
151 input buffer is *not* cleared prior to the prompt!
154 responses: a List of Strings.
155 colours: a List of Functions taking and returning a String, used to
156 process the responses for display. Typically these will be functions
157 like red() but could be e.g. lambda x: "DisplayString".
158 If responses is omitted, defaults to ["Yes", "No"], [green, red].
159 If only colours is omitted, defaults to [bold, ...].
161 Returns a member of the List responses. (If called without optional
162 arguments, returns "Yes" or "No".)
163 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
165 if responses is None:
166 responses = ["Yes", "No"]
168 create_color_func("PROMPT_CHOICE_DEFAULT"),
169 create_color_func("PROMPT_CHOICE_OTHER")
171 elif colours is None:
173 colours=(colours*len(responses))[:len(responses)]
177 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
178 for key in responses:
179 # An empty response will match the first value in responses.
180 if response.upper()==key[:len(response)].upper():
182 print "Sorry, response '%s' not understood." % response,
183 except (EOFError, KeyboardInterrupt):
187 actions = frozenset([
188 "clean", "config", "depclean",
189 "info", "list-sets", "metadata",
190 "prune", "regen", "search",
191 "sync", "unmerge", "version",
194 "--ask", "--alphabetical",
195 "--buildpkg", "--buildpkgonly",
196 "--changelog", "--columns",
201 "--fetchonly", "--fetch-all-uri",
202 "--getbinpkg", "--getbinpkgonly",
203 "--help", "--ignore-default-opts",
207 "--nodeps", "--noreplace",
208 "--nospinner", "--oneshot",
209 "--onlydeps", "--pretend",
210 "--quiet", "--resume",
211 "--searchdesc", "--selective",
215 "--usepkg", "--usepkgonly",
222 "b":"--buildpkg", "B":"--buildpkgonly",
223 "c":"--clean", "C":"--unmerge",
224 "d":"--debug", "D":"--deep",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
229 "k":"--usepkg", "K":"--usepkgonly",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps", "O":"--nodeps",
233 "p":"--pretend", "P":"--prune",
235 "s":"--search", "S":"--searchdesc",
238 "v":"--verbose", "V":"--version"
241 def emergelog(xterm_titles, mystr, short_msg=None):
242 if xterm_titles and short_msg:
243 if "HOSTNAME" in os.environ:
244 short_msg = os.environ["HOSTNAME"]+": "+short_msg
245 xtermTitle(short_msg)
247 file_path = "/var/log/emerge.log"
248 mylogfile = open(file_path, "a")
249 portage.util.apply_secpass_permissions(file_path,
250 uid=portage.portage_uid, gid=portage.portage_gid,
254 mylock = portage.locks.lockfile(mylogfile)
255 # seek because we may have gotten held up by the lock.
256 # if so, we may not be positioned at the end of the file.
258 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
262 portage.locks.unlockfile(mylock)
264 except (IOError,OSError,portage.exception.PortageException), e:
266 print >> sys.stderr, "emergelog():",e
268 def countdown(secs=5, doing="Starting"):
270 print ">>> Waiting",secs,"seconds before starting..."
271 print ">>> (Control-C to abort)...\n"+doing+" in: ",
275 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282 if isinstance(mysize, basestring):
284 if 0 != mysize % 1024:
285 # Always round up to the next kB so that it doesn't show 0 kB when
286 # some small file still needs to be fetched.
287 mysize += 1024 - mysize % 1024
288 mystr=str(mysize/1024)
292 mystr=mystr[:mycount]+","+mystr[mycount:]
296 def getgccversion(chost):
299 return: the current in-use gcc version
302 gcc_ver_command = 'gcc -dumpversion'
303 gcc_ver_prefix = 'gcc-'
305 gcc_not_found_error = red(
306 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307 "!!! to update the environment of this terminal and possibly\n" +
308 "!!! other terminals also.\n"
311 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
315 mystatus, myoutput = commands.getstatusoutput(
316 chost + "-" + gcc_ver_command)
317 if mystatus == os.EX_OK:
318 return gcc_ver_prefix + myoutput
320 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321 if mystatus == os.EX_OK:
322 return gcc_ver_prefix + myoutput
324 portage.writemsg(gcc_not_found_error, noiselevel=-1)
325 return "[unavailable]"
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328 profilever = "unavailable"
330 realpath = os.path.realpath(profile)
331 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
332 if realpath.startswith(basepath):
333 profilever = realpath[1 + len(basepath):]
336 profilever = "!" + os.readlink(profile)
339 del realpath, basepath
342 libclist = vardb.match("virtual/libc")
343 libclist += vardb.match("virtual/glibc")
344 libclist = portage.util.unique_array(libclist)
346 xs=portage.catpkgsplit(x)
348 libcver+=","+"-".join(xs[1:])
350 libcver="-".join(xs[1:])
352 libcver="unavailable"
354 gccver = getgccversion(chost)
355 unameout=platform.release()+" "+platform.machine()
357 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
359 def create_depgraph_params(myopts, myaction):
360 #configure emerge engine parameters
362 # self: include _this_ package regardless of if it is merged.
363 # selective: exclude the package if it is merged
364 # recurse: go into the dependencies
365 # deep: go into the dependencies of already merged packages
366 # empty: pretend nothing is merged
367 # complete: completely account for all known dependencies
368 # remove: build graph for use in removing packages
369 myparams = set(["recurse"])
371 if myaction == "remove":
372 myparams.add("remove")
373 myparams.add("complete")
376 if "--update" in myopts or \
377 "--newuse" in myopts or \
378 "--reinstall" in myopts or \
379 "--noreplace" in myopts:
380 myparams.add("selective")
381 if "--emptytree" in myopts:
382 myparams.add("empty")
383 myparams.discard("selective")
384 if "--nodeps" in myopts:
385 myparams.discard("recurse")
386 if "--deep" in myopts:
388 if "--complete-graph" in myopts:
389 myparams.add("complete")
392 # search functionality
393 class search(object):
404 def __init__(self, root_config, spinner, searchdesc,
405 verbose, usepkg, usepkgonly):
406 """Searches the available and installed packages for the supplied search key.
407 The list of available and installed packages is created at object instantiation.
408 This makes successive searches faster."""
409 self.settings = root_config.settings
410 self.vartree = root_config.trees["vartree"]
411 self.spinner = spinner
412 self.verbose = verbose
413 self.searchdesc = searchdesc
414 self.root_config = root_config
415 self.setconfig = root_config.setconfig
416 self.matches = {"pkg" : []}
421 self.portdb = fake_portdb
422 for attrib in ("aux_get", "cp_all",
423 "xmatch", "findname", "getFetchMap"):
424 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
428 portdb = root_config.trees["porttree"].dbapi
429 bindb = root_config.trees["bintree"].dbapi
430 vardb = root_config.trees["vartree"].dbapi
432 if not usepkgonly and portdb._have_root_eclass_dir:
433 self._dbs.append(portdb)
435 if (usepkg or usepkgonly) and bindb.cp_all():
436 self._dbs.append(bindb)
438 self._dbs.append(vardb)
439 self._portdb = portdb
444 cp_all.update(db.cp_all())
445 return list(sorted(cp_all))
447 def _aux_get(self, *args, **kwargs):
450 return db.aux_get(*args, **kwargs)
455 def _findname(self, *args, **kwargs):
457 if db is not self._portdb:
458 # We don't want findname to return anything
459 # unless it's an ebuild in a portage tree.
460 # Otherwise, it's already built and we don't
463 func = getattr(db, "findname", None)
465 value = func(*args, **kwargs)
470 def _getFetchMap(self, *args, **kwargs):
472 func = getattr(db, "getFetchMap", None)
474 value = func(*args, **kwargs)
479 def _visible(self, db, cpv, metadata):
480 installed = db is self.vartree.dbapi
481 built = installed or db is not self._portdb
484 pkg_type = "installed"
487 return visible(self.settings,
488 Package(type_name=pkg_type, root_config=self.root_config,
489 cpv=cpv, built=built, installed=installed, metadata=metadata))
491 def _xmatch(self, level, atom):
493 This method does not expand old-style virtuals because it
494 is restricted to returning matches for a single ${CATEGORY}/${PN}
495 and old-style virual matches unreliable for that when querying
496 multiple package databases. If necessary, old-style virtuals
497 can be performed on atoms prior to calling this method.
499 cp = portage.dep_getkey(atom)
500 if level == "match-all":
503 if hasattr(db, "xmatch"):
504 matches.update(db.xmatch(level, atom))
506 matches.update(db.match(atom))
507 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508 db._cpv_sort_ascending(result)
509 elif level == "match-visible":
512 if hasattr(db, "xmatch"):
513 matches.update(db.xmatch(level, atom))
515 db_keys = list(db._aux_cache_keys)
516 for cpv in db.match(atom):
517 metadata = izip(db_keys,
518 db.aux_get(cpv, db_keys))
519 if not self._visible(db, cpv, metadata):
522 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523 db._cpv_sort_ascending(result)
524 elif level == "bestmatch-visible":
527 if hasattr(db, "xmatch"):
528 cpv = db.xmatch("bestmatch-visible", atom)
529 if not cpv or portage.cpv_getkey(cpv) != cp:
531 if not result or cpv == portage.best([cpv, result]):
534 db_keys = Package.metadata_keys
535 # break out of this loop with highest visible
536 # match, checked in descending order
537 for cpv in reversed(db.match(atom)):
538 if portage.cpv_getkey(cpv) != cp:
540 metadata = izip(db_keys,
541 db.aux_get(cpv, db_keys))
542 if not self._visible(db, cpv, metadata):
544 if not result or cpv == portage.best([cpv, result]):
548 raise NotImplementedError(level)
551 def execute(self,searchkey):
552 """Performs the search for the supplied search key"""
554 self.searchkey=searchkey
555 self.packagematches = []
558 self.matches = {"pkg":[], "desc":[], "set":[]}
561 self.matches = {"pkg":[], "set":[]}
562 print "Searching... ",
565 if self.searchkey.startswith('%'):
567 self.searchkey = self.searchkey[1:]
568 if self.searchkey.startswith('@'):
570 self.searchkey = self.searchkey[1:]
572 self.searchre=re.compile(self.searchkey,re.I)
574 self.searchre=re.compile(re.escape(self.searchkey), re.I)
575 for package in self.portdb.cp_all():
576 self.spinner.update()
579 match_string = package[:]
581 match_string = package.split("/")[-1]
584 if self.searchre.search(match_string):
585 if not self.portdb.xmatch("match-visible", package):
587 self.matches["pkg"].append([package,masked])
588 elif self.searchdesc: # DESCRIPTION searching
589 full_package = self.portdb.xmatch("bestmatch-visible", package)
591 #no match found; we don't want to query description
592 full_package = portage.best(
593 self.portdb.xmatch("match-all", package))
599 full_desc = self.portdb.aux_get(
600 full_package, ["DESCRIPTION"])[0]
602 print "emerge: search: aux_get() failed, skipping"
604 if self.searchre.search(full_desc):
605 self.matches["desc"].append([full_package,masked])
607 self.sdict = self.setconfig.getSets()
608 for setname in self.sdict:
609 self.spinner.update()
611 match_string = setname
613 match_string = setname.split("/")[-1]
615 if self.searchre.search(match_string):
616 self.matches["set"].append([setname, False])
617 elif self.searchdesc:
618 if self.searchre.search(
619 self.sdict[setname].getMetadata("DESCRIPTION")):
620 self.matches["set"].append([setname, False])
623 for mtype in self.matches:
624 self.matches[mtype].sort()
625 self.mlen += len(self.matches[mtype])
628 if not self.portdb.xmatch("match-all", cp):
631 if not self.portdb.xmatch("bestmatch-visible", cp):
633 self.matches["pkg"].append([cp, masked])
637 """Outputs the results of the search."""
638 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
639 print "[ Applications found : "+white(str(self.mlen))+" ]"
641 vardb = self.vartree.dbapi
642 for mtype in self.matches:
643 for match,masked in self.matches[mtype]:
647 full_package = self.portdb.xmatch(
648 "bestmatch-visible", match)
650 #no match found; we don't want to query description
652 full_package = portage.best(
653 self.portdb.xmatch("match-all",match))
654 elif mtype == "desc":
656 match = portage.cpv_getkey(match)
658 print green("*")+" "+white(match)
659 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
663 desc, homepage, license = self.portdb.aux_get(
664 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
666 print "emerge: search: aux_get() failed, skipping"
669 print green("*")+" "+white(match)+" "+red("[ Masked ]")
671 print green("*")+" "+white(match)
672 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
676 mycat = match.split("/")[0]
677 mypkg = match.split("/")[1]
678 mycpv = match + "-" + myversion
679 myebuild = self.portdb.findname(mycpv)
681 pkgdir = os.path.dirname(myebuild)
682 from portage import manifest
683 mf = manifest.Manifest(
684 pkgdir, self.settings["DISTDIR"])
686 uri_map = self.portdb.getFetchMap(mycpv)
687 except portage.exception.InvalidDependString, e:
688 file_size_str = "Unknown (%s)" % (e,)
692 mysum[0] = mf.getDistfilesSize(uri_map)
694 file_size_str = "Unknown (missing " + \
695 "digest for %s)" % (e,)
700 if db is not vardb and \
701 db.cpv_exists(mycpv):
703 if not myebuild and hasattr(db, "bintree"):
704 myebuild = db.bintree.getname(mycpv)
706 mysum[0] = os.stat(myebuild).st_size
711 if myebuild and file_size_str is None:
712 mystr = str(mysum[0] / 1024)
716 mystr = mystr[:mycount] + "," + mystr[mycount:]
717 file_size_str = mystr + " kB"
721 print " ", darkgreen("Latest version available:"),myversion
722 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
725 (darkgreen("Size of files:"), file_size_str)
726 print " ", darkgreen("Homepage:")+" ",homepage
727 print " ", darkgreen("Description:")+" ",desc
728 print " ", darkgreen("License:")+" ",license
733 def getInstallationStatus(self,package):
734 installed_package = self.vartree.dep_bestmatch(package)
736 version = self.getVersion(installed_package,search.VERSION_RELEASE)
738 result = darkgreen("Latest version installed:")+" "+version
740 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
743 def getVersion(self,full_package,detail):
744 if len(full_package) > 1:
745 package_parts = portage.catpkgsplit(full_package)
746 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747 result = package_parts[2]+ "-" + package_parts[3]
749 result = package_parts[2]
754 class RootConfig(object):
755 """This is used internally by depgraph to track information about a
759 "ebuild" : "porttree",
760 "binary" : "bintree",
761 "installed" : "vartree"
765 for k, v in pkg_tree_map.iteritems():
768 def __init__(self, settings, trees, setconfig):
770 self.settings = settings
771 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772 self.root = self.settings["ROOT"]
773 self.setconfig = setconfig
774 if setconfig is None:
777 self.sets = self.setconfig.getSets()
778 self.visible_pkgs = PackageVirtualDbapi(self.settings)
780 def create_world_atom(pkg, args_set, root_config):
781 """Create a new atom for the world file if one does not exist. If the
782 argument atom is precise enough to identify a specific slot then a slot
783 atom will be returned. Atoms that are in the system set may also be stored
784 in world since system atoms can only match one slot while world atoms can
785 be greedy with respect to slots. Unslotted system packages will not be
788 arg_atom = args_set.findAtomForPackage(pkg)
791 cp = portage.dep_getkey(arg_atom)
793 sets = root_config.sets
794 portdb = root_config.trees["porttree"].dbapi
795 vardb = root_config.trees["vartree"].dbapi
796 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
797 for cpv in portdb.match(cp))
798 slotted = len(available_slots) > 1 or \
799 (len(available_slots) == 1 and "0" not in available_slots)
801 # check the vdb in case this is multislot
802 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
803 for cpv in vardb.match(cp))
804 slotted = len(available_slots) > 1 or \
805 (len(available_slots) == 1 and "0" not in available_slots)
806 if slotted and arg_atom != cp:
807 # If the user gave a specific atom, store it as a
808 # slot atom in the world file.
809 slot_atom = pkg.slot_atom
811 # For USE=multislot, there are a couple of cases to
814 # 1) SLOT="0", but the real SLOT spontaneously changed to some
815 # unknown value, so just record an unslotted atom.
817 # 2) SLOT comes from an installed package and there is no
818 # matching SLOT in the portage tree.
820 # Make sure that the slot atom is available in either the
821 # portdb or the vardb, since otherwise the user certainly
822 # doesn't want the SLOT atom recorded in the world file
823 # (case 1 above). If it's only available in the vardb,
824 # the user may be trying to prevent a USE=multislot
825 # package from being removed by --depclean (case 2 above).
828 if not portdb.match(slot_atom):
829 # SLOT seems to come from an installed multislot package
831 # If there is no installed package matching the SLOT atom,
832 # it probably changed SLOT spontaneously due to USE=multislot,
833 # so just record an unslotted atom.
834 if vardb.match(slot_atom):
835 # Now verify that the argument is precise
836 # enough to identify a specific slot.
837 matches = mydb.match(arg_atom)
838 matched_slots = set()
840 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
841 if len(matched_slots) == 1:
842 new_world_atom = slot_atom
844 if new_world_atom == sets["world"].findAtomForPackage(pkg):
845 # Both atoms would be identical, so there's nothing to add.
848 # Unlike world atoms, system atoms are not greedy for slots, so they
849 # can't be safely excluded from world if they are slotted.
850 system_atom = sets["system"].findAtomForPackage(pkg)
852 if not portage.dep_getkey(system_atom).startswith("virtual/"):
854 # System virtuals aren't safe to exclude from world since they can
855 # match multiple old-style virtuals but only one of them will be
856 # pulled in by update or depclean.
857 providers = portdb.mysettings.getvirtuals().get(
858 portage.dep_getkey(system_atom))
859 if providers and len(providers) == 1 and providers[0] == cp:
861 return new_world_atom
863 def filter_iuse_defaults(iuse):
865 if flag.startswith("+") or flag.startswith("-"):
870 class SlotObject(object):
871 __slots__ = ("__weakref__",)
873 def __init__(self, **kwargs):
874 classes = [self.__class__]
879 classes.extend(c.__bases__)
880 slots = getattr(c, "__slots__", None)
884 myvalue = kwargs.get(myattr, None)
885 setattr(self, myattr, myvalue)
889 Create a new instance and copy all attributes
890 defined from __slots__ (including those from
893 obj = self.__class__()
895 classes = [self.__class__]
900 classes.extend(c.__bases__)
901 slots = getattr(c, "__slots__", None)
905 setattr(obj, myattr, getattr(self, myattr))
909 class AbstractDepPriority(SlotObject):
910 __slots__ = ("buildtime", "runtime", "runtime_post")
912 def __lt__(self, other):
913 return self.__int__() < other
915 def __le__(self, other):
916 return self.__int__() <= other
918 def __eq__(self, other):
919 return self.__int__() == other
921 def __ne__(self, other):
922 return self.__int__() != other
924 def __gt__(self, other):
925 return self.__int__() > other
927 def __ge__(self, other):
928 return self.__int__() >= other
932 return copy.copy(self)
934 class DepPriority(AbstractDepPriority):
936 __slots__ = ("satisfied", "optional", "rebuild")
948 if self.runtime_post:
949 return "runtime_post"
952 class BlockerDepPriority(DepPriority):
960 BlockerDepPriority.instance = BlockerDepPriority()
962 class UnmergeDepPriority(AbstractDepPriority):
963 __slots__ = ("optional", "satisfied",)
965 Combination of properties Priority Category
970 (none of the above) -2 SOFT
980 if self.runtime_post:
987 myvalue = self.__int__()
988 if myvalue > self.SOFT:
992 class DepPriorityNormalRange(object):
994 DepPriority properties Index Category
998 runtime_post 2 MEDIUM_SOFT
1000 (none of the above) 0 NONE
1008 def _ignore_optional(cls, priority):
1009 if priority.__class__ is not DepPriority:
1011 return bool(priority.optional)
1014 def _ignore_runtime_post(cls, priority):
1015 if priority.__class__ is not DepPriority:
1017 return bool(priority.optional or priority.runtime_post)
1020 def _ignore_runtime(cls, priority):
1021 if priority.__class__ is not DepPriority:
1023 return not priority.buildtime
1025 ignore_medium = _ignore_runtime
1026 ignore_medium_soft = _ignore_runtime_post
1027 ignore_soft = _ignore_optional
1029 DepPriorityNormalRange.ignore_priority = (
1031 DepPriorityNormalRange._ignore_optional,
1032 DepPriorityNormalRange._ignore_runtime_post,
1033 DepPriorityNormalRange._ignore_runtime
1036 class DepPrioritySatisfiedRange(object):
1038 DepPriority Index Category
1040 not satisfied and buildtime HARD
1041 not satisfied and runtime 7 MEDIUM
1042 not satisfied and runtime_post 6 MEDIUM_SOFT
1043 satisfied and buildtime and rebuild 5 SOFT
1044 satisfied and buildtime 4 SOFT
1045 satisfied and runtime 3 SOFT
1046 satisfied and runtime_post 2 SOFT
1048 (none of the above) 0 NONE
1056 def _ignore_optional(cls, priority):
1057 if priority.__class__ is not DepPriority:
1059 return bool(priority.optional)
1062 def _ignore_satisfied_runtime_post(cls, priority):
1063 if priority.__class__ is not DepPriority:
1065 if priority.optional:
1067 if not priority.satisfied:
1069 return bool(priority.runtime_post)
1072 def _ignore_satisfied_runtime(cls, priority):
1073 if priority.__class__ is not DepPriority:
1075 if priority.optional:
1077 if not priority.satisfied:
1079 return not priority.buildtime
1082 def _ignore_satisfied_buildtime(cls, priority):
1083 if priority.__class__ is not DepPriority:
1085 if priority.optional:
1087 if not priority.satisfied:
1089 if priority.buildtime:
1090 return not priority.rebuild
1094 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1095 if priority.__class__ is not DepPriority:
1097 if priority.optional:
1099 return bool(priority.satisfied)
1102 def _ignore_runtime_post(cls, priority):
1103 if priority.__class__ is not DepPriority:
1105 return bool(priority.optional or \
1106 priority.satisfied or \
1107 priority.runtime_post)
1110 def _ignore_runtime(cls, priority):
1111 if priority.__class__ is not DepPriority:
1113 return bool(priority.satisfied or \
1114 not priority.buildtime)
1116 ignore_medium = _ignore_runtime
1117 ignore_medium_soft = _ignore_runtime_post
1118 ignore_soft = _ignore_satisfied_buildtime_rebuild
1120 DepPrioritySatisfiedRange.ignore_priority = (
1122 DepPrioritySatisfiedRange._ignore_optional,
1123 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1124 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1125 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1126 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1127 DepPrioritySatisfiedRange._ignore_runtime_post,
1128 DepPrioritySatisfiedRange._ignore_runtime
1131 def _find_deep_system_runtime_deps(graph):
1132 deep_system_deps = set()
1135 if not isinstance(node, Package) or \
1136 node.operation == 'uninstall':
1138 if node.root_config.sets['system'].findAtomForPackage(node):
1139 node_stack.append(node)
1141 def ignore_priority(priority):
1143 Ignore non-runtime priorities.
1145 if isinstance(priority, DepPriority) and \
1146 (priority.runtime or priority.runtime_post):
1151 node = node_stack.pop()
1152 if node in deep_system_deps:
1154 deep_system_deps.add(node)
1155 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1156 if not isinstance(child, Package) or \
1157 child.operation == 'uninstall':
1159 node_stack.append(child)
1161 return deep_system_deps
1163 class FakeVartree(portage.vartree):
1164 """This is implements an in-memory copy of a vartree instance that provides
1165 all the interfaces required for use by the depgraph. The vardb is locked
1166 during the constructor call just long enough to read a copy of the
1167 installed package information. This allows the depgraph to do it's
1168 dependency calculations without holding a lock on the vardb. It also
1169 allows things like vardb global updates to be done in memory so that the
1170 user doesn't necessarily need write access to the vardb in cases where
1171 global updates are necessary (updates are performed when necessary if there
1172 is not a matching ebuild in the tree)."""
1173 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1174 self._root_config = root_config
1175 if pkg_cache is None:
1177 real_vartree = root_config.trees["vartree"]
1178 portdb = root_config.trees["porttree"].dbapi
1179 self.root = real_vartree.root
1180 self.settings = real_vartree.settings
1181 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1182 if "_mtime_" not in mykeys:
1183 mykeys.append("_mtime_")
1184 self._db_keys = mykeys
1185 self._pkg_cache = pkg_cache
1186 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1187 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1189 # At least the parent needs to exist for the lock file.
1190 portage.util.ensure_dirs(vdb_path)
1191 except portage.exception.PortageException:
1195 if acquire_lock and os.access(vdb_path, os.W_OK):
1196 vdb_lock = portage.locks.lockdir(vdb_path)
1197 real_dbapi = real_vartree.dbapi
1199 for cpv in real_dbapi.cpv_all():
1200 cache_key = ("installed", self.root, cpv, "nomerge")
1201 pkg = self._pkg_cache.get(cache_key)
1203 metadata = pkg.metadata
1205 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1206 myslot = metadata["SLOT"]
1207 mycp = portage.dep_getkey(cpv)
1208 myslot_atom = "%s:%s" % (mycp, myslot)
1210 mycounter = long(metadata["COUNTER"])
1213 metadata["COUNTER"] = str(mycounter)
1214 other_counter = slot_counters.get(myslot_atom, None)
1215 if other_counter is not None:
1216 if other_counter > mycounter:
1218 slot_counters[myslot_atom] = mycounter
1220 pkg = Package(built=True, cpv=cpv,
1221 installed=True, metadata=metadata,
1222 root_config=root_config, type_name="installed")
1223 self._pkg_cache[pkg] = pkg
1224 self.dbapi.cpv_inject(pkg)
1225 real_dbapi.flush_cache()
1228 portage.locks.unlockdir(vdb_lock)
1229 # Populate the old-style virtuals using the cached values.
1230 if not self.settings.treeVirtuals:
1231 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1232 portage.getCPFromCPV, self.get_all_provides())
1234 # Intialize variables needed for lazy cache pulls of the live ebuild
1235 # metadata. This ensures that the vardb lock is released ASAP, without
1236 # being delayed in case cache generation is triggered.
1237 self._aux_get = self.dbapi.aux_get
1238 self.dbapi.aux_get = self._aux_get_wrapper
1239 self._match = self.dbapi.match
1240 self.dbapi.match = self._match_wrapper
1241 self._aux_get_history = set()
1242 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1243 self._portdb = portdb
1244 self._global_updates = None
1246 def _match_wrapper(self, cpv, use_cache=1):
1248 Make sure the metadata in Package instances gets updated for any
1249 cpv that is returned from a match() call, since the metadata can
1250 be accessed directly from the Package instance instead of via
1253 matches = self._match(cpv, use_cache=use_cache)
1255 if cpv in self._aux_get_history:
1257 self._aux_get_wrapper(cpv, [])
1260 def _aux_get_wrapper(self, pkg, wants):
1261 if pkg in self._aux_get_history:
1262 return self._aux_get(pkg, wants)
1263 self._aux_get_history.add(pkg)
1265 # Use the live ebuild metadata if possible.
1266 live_metadata = dict(izip(self._portdb_keys,
1267 self._portdb.aux_get(pkg, self._portdb_keys)))
1268 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1270 self.dbapi.aux_update(pkg, live_metadata)
1271 except (KeyError, portage.exception.PortageException):
1272 if self._global_updates is None:
1273 self._global_updates = \
1274 grab_global_updates(self._portdb.porttree_root)
1275 perform_global_updates(
1276 pkg, self.dbapi, self._global_updates)
1277 return self._aux_get(pkg, wants)
1279 def sync(self, acquire_lock=1):
1281 Call this method to synchronize state with the real vardb
1282 after one or more packages may have been installed or
1285 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1287 # At least the parent needs to exist for the lock file.
1288 portage.util.ensure_dirs(vdb_path)
1289 except portage.exception.PortageException:
1293 if acquire_lock and os.access(vdb_path, os.W_OK):
1294 vdb_lock = portage.locks.lockdir(vdb_path)
1298 portage.locks.unlockdir(vdb_lock)
1302 real_vardb = self._root_config.trees["vartree"].dbapi
1303 current_cpv_set = frozenset(real_vardb.cpv_all())
1304 pkg_vardb = self.dbapi
1305 aux_get_history = self._aux_get_history
1307 # Remove any packages that have been uninstalled.
1308 for pkg in list(pkg_vardb):
1309 if pkg.cpv not in current_cpv_set:
1310 pkg_vardb.cpv_remove(pkg)
1311 aux_get_history.discard(pkg.cpv)
1313 # Validate counters and timestamps.
1316 validation_keys = ["COUNTER", "_mtime_"]
1317 for cpv in current_cpv_set:
1319 pkg_hash_key = ("installed", root, cpv, "nomerge")
1320 pkg = pkg_vardb.get(pkg_hash_key)
1322 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1324 counter = long(counter)
1328 if counter != pkg.counter or \
1330 pkg_vardb.cpv_remove(pkg)
1331 aux_get_history.discard(pkg.cpv)
1335 pkg = self._pkg(cpv)
1337 other_counter = slot_counters.get(pkg.slot_atom)
1338 if other_counter is not None:
1339 if other_counter > pkg.counter:
1342 slot_counters[pkg.slot_atom] = pkg.counter
1343 pkg_vardb.cpv_inject(pkg)
1345 real_vardb.flush_cache()
1347 def _pkg(self, cpv):
1348 root_config = self._root_config
1349 real_vardb = root_config.trees["vartree"].dbapi
1350 pkg = Package(cpv=cpv, installed=True,
1351 metadata=izip(self._db_keys,
1352 real_vardb.aux_get(cpv, self._db_keys)),
1353 root_config=root_config,
1354 type_name="installed")
1357 mycounter = long(pkg.metadata["COUNTER"])
1360 pkg.metadata["COUNTER"] = str(mycounter)
1364 def grab_global_updates(portdir):
1365 from portage.update import grab_updates, parse_updates
1366 updpath = os.path.join(portdir, "profiles", "updates")
1368 rawupdates = grab_updates(updpath)
1369 except portage.exception.DirectoryNotFound:
1372 for mykey, mystat, mycontent in rawupdates:
1373 commands, errors = parse_updates(mycontent)
1374 upd_commands.extend(commands)
1377 def perform_global_updates(mycpv, mydb, mycommands):
1378 from portage.update import update_dbentries
1379 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1380 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1381 updates = update_dbentries(mycommands, aux_dict)
1383 mydb.aux_update(mycpv, updates)
1385 def visible(pkgsettings, pkg):
1387 Check if a package is visible. This can raise an InvalidDependString
1388 exception if LICENSE is invalid.
1389 TODO: optionally generate a list of masking reasons
1391 @returns: True if the package is visible, False otherwise.
1393 if not pkg.metadata["SLOT"]:
1395 if not pkg.installed:
1396 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1398 eapi = pkg.metadata["EAPI"]
1399 if not portage.eapi_is_supported(eapi):
1401 if not pkg.installed:
1402 if portage._eapi_is_deprecated(eapi):
1404 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1408 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1411 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1413 except portage.exception.InvalidDependString:
1417 def get_masking_status(pkg, pkgsettings, root_config):
1419 mreasons = portage.getmaskingstatus(
1420 pkg, settings=pkgsettings,
1421 portdb=root_config.trees["porttree"].dbapi)
1423 if not pkg.installed:
1424 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1425 mreasons.append("CHOST: %s" % \
1426 pkg.metadata["CHOST"])
1428 if not pkg.metadata["SLOT"]:
1429 mreasons.append("invalid: SLOT is undefined")
1433 def get_mask_info(root_config, cpv, pkgsettings,
1434 db, pkg_type, built, installed, db_keys):
1437 metadata = dict(izip(db_keys,
1438 db.aux_get(cpv, db_keys)))
1441 if metadata and not built:
1442 pkgsettings.setcpv(cpv, mydb=metadata)
1443 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1444 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1445 if metadata is None:
1446 mreasons = ["corruption"]
1448 eapi = metadata['EAPI']
1451 if not portage.eapi_is_supported(eapi):
1452 mreasons = ['EAPI %s' % eapi]
1454 pkg = Package(type_name=pkg_type, root_config=root_config,
1455 cpv=cpv, built=built, installed=installed, metadata=metadata)
1456 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1457 return metadata, mreasons
1459 def show_masked_packages(masked_packages):
1460 shown_licenses = set()
1461 shown_comments = set()
1462 # Maybe there is both an ebuild and a binary. Only
1463 # show one of them to avoid redundant appearance.
1465 have_eapi_mask = False
1466 for (root_config, pkgsettings, cpv,
1467 metadata, mreasons) in masked_packages:
1468 if cpv in shown_cpvs:
1471 comment, filename = None, None
1472 if "package.mask" in mreasons:
1473 comment, filename = \
1474 portage.getmaskingreason(
1475 cpv, metadata=metadata,
1476 settings=pkgsettings,
1477 portdb=root_config.trees["porttree"].dbapi,
1478 return_location=True)
1479 missing_licenses = []
1481 if not portage.eapi_is_supported(metadata["EAPI"]):
1482 have_eapi_mask = True
1484 missing_licenses = \
1485 pkgsettings._getMissingLicenses(
1487 except portage.exception.InvalidDependString:
1488 # This will have already been reported
1489 # above via mreasons.
1492 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1493 if comment and comment not in shown_comments:
1496 shown_comments.add(comment)
1497 portdb = root_config.trees["porttree"].dbapi
1498 for l in missing_licenses:
1499 l_path = portdb.findLicensePath(l)
1500 if l in shown_licenses:
1502 msg = ("A copy of the '%s' license" + \
1503 " is located at '%s'.") % (l, l_path)
1506 shown_licenses.add(l)
1507 return have_eapi_mask
1509 class Task(SlotObject):
1510 __slots__ = ("_hash_key", "_hash_value")
1512 def _get_hash_key(self):
1513 hash_key = getattr(self, "_hash_key", None)
1514 if hash_key is None:
1515 raise NotImplementedError(self)
1518 def __eq__(self, other):
1519 return self._get_hash_key() == other
1521 def __ne__(self, other):
1522 return self._get_hash_key() != other
1525 hash_value = getattr(self, "_hash_value", None)
1526 if hash_value is None:
1527 self._hash_value = hash(self._get_hash_key())
1528 return self._hash_value
1531 return len(self._get_hash_key())
1533 def __getitem__(self, key):
1534 return self._get_hash_key()[key]
1537 return iter(self._get_hash_key())
1539 def __contains__(self, key):
1540 return key in self._get_hash_key()
1543 return str(self._get_hash_key())
1545 class Blocker(Task):
1547 __hash__ = Task.__hash__
1548 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1550 def __init__(self, **kwargs):
1551 Task.__init__(self, **kwargs)
1552 self.cp = portage.dep_getkey(self.atom)
1554 def _get_hash_key(self):
1555 hash_key = getattr(self, "_hash_key", None)
1556 if hash_key is None:
1558 ("blocks", self.root, self.atom, self.eapi)
1559 return self._hash_key
1561 class Package(Task):
1563 __hash__ = Task.__hash__
1564 __slots__ = ("built", "cpv", "depth",
1565 "installed", "metadata", "onlydeps", "operation",
1566 "root_config", "type_name",
1567 "category", "counter", "cp", "cpv_split",
1568 "inherited", "iuse", "mtime",
1569 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1572 "CHOST", "COUNTER", "DEPEND", "EAPI",
1573 "INHERITED", "IUSE", "KEYWORDS",
1574 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1575 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1577 def __init__(self, **kwargs):
1578 Task.__init__(self, **kwargs)
1579 self.root = self.root_config.root
1580 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1581 self.cp = portage.cpv_getkey(self.cpv)
1584 # Avoid an InvalidAtom exception when creating slot_atom.
1585 # This package instance will be masked due to empty SLOT.
1587 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1588 self.category, self.pf = portage.catsplit(self.cpv)
1589 self.cpv_split = portage.catpkgsplit(self.cpv)
1590 self.pv_split = self.cpv_split[1:]
1594 __slots__ = ("__weakref__", "enabled")
1596 def __init__(self, use):
1597 self.enabled = frozenset(use)
1599 class _iuse(object):
1601 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1603 def __init__(self, tokens, iuse_implicit):
1604 self.tokens = tuple(tokens)
1605 self.iuse_implicit = iuse_implicit
1612 enabled.append(x[1:])
1614 disabled.append(x[1:])
1617 self.enabled = frozenset(enabled)
1618 self.disabled = frozenset(disabled)
1619 self.all = frozenset(chain(enabled, disabled, other))
1621 def __getattribute__(self, name):
1624 return object.__getattribute__(self, "regex")
1625 except AttributeError:
1626 all = object.__getattribute__(self, "all")
1627 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1628 # Escape anything except ".*" which is supposed
1629 # to pass through from _get_implicit_iuse()
1630 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1631 regex = "^(%s)$" % "|".join(regex)
1632 regex = regex.replace("\\.\\*", ".*")
1633 self.regex = re.compile(regex)
1634 return object.__getattribute__(self, name)
1636 def _get_hash_key(self):
1637 hash_key = getattr(self, "_hash_key", None)
1638 if hash_key is None:
1639 if self.operation is None:
1640 self.operation = "merge"
1641 if self.onlydeps or self.installed:
1642 self.operation = "nomerge"
1644 (self.type_name, self.root, self.cpv, self.operation)
1645 return self._hash_key
1647 def __lt__(self, other):
1648 if other.cp != self.cp:
1650 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1654 def __le__(self, other):
1655 if other.cp != self.cp:
1657 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1661 def __gt__(self, other):
1662 if other.cp != self.cp:
1664 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1668 def __ge__(self, other):
1669 if other.cp != self.cp:
1671 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1675 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1676 if not x.startswith("UNUSED_"))
1677 _all_metadata_keys.discard("CDEPEND")
1678 _all_metadata_keys.update(Package.metadata_keys)
1680 from portage.cache.mappings import slot_dict_class
1681 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1683 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1685 Detect metadata updates and synchronize Package attributes.
1688 __slots__ = ("_pkg",)
1689 _wrapped_keys = frozenset(
1690 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1692 def __init__(self, pkg, metadata):
1693 _PackageMetadataWrapperBase.__init__(self)
1695 self.update(metadata)
1697 def __setitem__(self, k, v):
1698 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1699 if k in self._wrapped_keys:
1700 getattr(self, "_set_" + k.lower())(k, v)
1702 def _set_inherited(self, k, v):
1703 if isinstance(v, basestring):
1704 v = frozenset(v.split())
1705 self._pkg.inherited = v
1707 def _set_iuse(self, k, v):
1708 self._pkg.iuse = self._pkg._iuse(
1709 v.split(), self._pkg.root_config.iuse_implicit)
1711 def _set_slot(self, k, v):
1714 def _set_use(self, k, v):
1715 self._pkg.use = self._pkg._use(v.split())
1717 def _set_counter(self, k, v):
1718 if isinstance(v, basestring):
1723 self._pkg.counter = v
1725 def _set__mtime_(self, k, v):
1726 if isinstance(v, basestring):
1733 class EbuildFetchonly(SlotObject):
1735 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1738 settings = self.settings
1740 portdb = pkg.root_config.trees["porttree"].dbapi
1741 ebuild_path = portdb.findname(pkg.cpv)
1742 settings.setcpv(pkg)
1743 debug = settings.get("PORTAGE_DEBUG") == "1"
1744 use_cache = 1 # always true
1745 portage.doebuild_environment(ebuild_path, "fetch",
1746 settings["ROOT"], settings, debug, use_cache, portdb)
1747 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1750 rval = self._execute_with_builddir()
1752 rval = portage.doebuild(ebuild_path, "fetch",
1753 settings["ROOT"], settings, debug=debug,
1754 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1755 mydbapi=portdb, tree="porttree")
1757 if rval != os.EX_OK:
1758 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1759 eerror(msg, phase="unpack", key=pkg.cpv)
1763 def _execute_with_builddir(self):
1764 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1765 # ensuring sane $PWD (bug #239560) and storing elog
1766 # messages. Use a private temp directory, in order
1767 # to avoid locking the main one.
1768 settings = self.settings
1769 global_tmpdir = settings["PORTAGE_TMPDIR"]
1770 from tempfile import mkdtemp
1772 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1774 if e.errno != portage.exception.PermissionDenied.errno:
1776 raise portage.exception.PermissionDenied(global_tmpdir)
1777 settings["PORTAGE_TMPDIR"] = private_tmpdir
1778 settings.backup_changes("PORTAGE_TMPDIR")
1780 retval = self._execute()
1782 settings["PORTAGE_TMPDIR"] = global_tmpdir
1783 settings.backup_changes("PORTAGE_TMPDIR")
1784 shutil.rmtree(private_tmpdir)
1788 settings = self.settings
1790 root_config = pkg.root_config
1791 portdb = root_config.trees["porttree"].dbapi
1792 ebuild_path = portdb.findname(pkg.cpv)
1793 debug = settings.get("PORTAGE_DEBUG") == "1"
1794 retval = portage.doebuild(ebuild_path, "fetch",
1795 self.settings["ROOT"], self.settings, debug=debug,
1796 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1797 mydbapi=portdb, tree="porttree")
1799 if retval != os.EX_OK:
1800 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1801 eerror(msg, phase="unpack", key=pkg.cpv)
1803 portage.elog.elog_process(self.pkg.cpv, self.settings)
1806 class PollConstants(object):
1809 Provides POLL* constants that are equivalent to those from the
1810 select module, for use by PollSelectAdapter.
1813 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1816 locals()[k] = getattr(select, k, v)
1820 class AsynchronousTask(SlotObject):
1822 Subclasses override _wait() and _poll() so that calls
1823 to public methods can be wrapped for implementing
1824 hooks such as exit listener notification.
1826 Sublasses should call self.wait() to notify exit listeners after
1827 the task is complete and self.returncode has been set.
1830 __slots__ = ("background", "cancelled", "returncode") + \
1831 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1835 Start an asynchronous task and then return as soon as possible.
1841 raise NotImplementedError(self)
1844 return self.returncode is None
1851 return self.returncode
1854 if self.returncode is None:
1857 return self.returncode
1860 return self.returncode
1863 self.cancelled = True
1866 def addStartListener(self, f):
1868 The function will be called with one argument, a reference to self.
1870 if self._start_listeners is None:
1871 self._start_listeners = []
1872 self._start_listeners.append(f)
1874 def removeStartListener(self, f):
1875 if self._start_listeners is None:
1877 self._start_listeners.remove(f)
1879 def _start_hook(self):
1880 if self._start_listeners is not None:
1881 start_listeners = self._start_listeners
1882 self._start_listeners = None
1884 for f in start_listeners:
1887 def addExitListener(self, f):
1889 The function will be called with one argument, a reference to self.
1891 if self._exit_listeners is None:
1892 self._exit_listeners = []
1893 self._exit_listeners.append(f)
1895 def removeExitListener(self, f):
1896 if self._exit_listeners is None:
1897 if self._exit_listener_stack is not None:
1898 self._exit_listener_stack.remove(f)
1900 self._exit_listeners.remove(f)
1902 def _wait_hook(self):
1904 Call this method after the task completes, just before returning
1905 the returncode from wait() or poll(). This hook is
1906 used to trigger exit listeners when the returncode first
1909 if self.returncode is not None and \
1910 self._exit_listeners is not None:
1912 # This prevents recursion, in case one of the
1913 # exit handlers triggers this method again by
1914 # calling wait(). Use a stack that gives
1915 # removeExitListener() an opportunity to consume
1916 # listeners from the stack, before they can get
1917 # called below. This is necessary because a call
1918 # to one exit listener may result in a call to
1919 # removeExitListener() for another listener on
1920 # the stack. That listener needs to be removed
1921 # from the stack since it would be inconsistent
1922 # to call it after it has been been passed into
1923 # removeExitListener().
1924 self._exit_listener_stack = self._exit_listeners
1925 self._exit_listeners = None
1927 self._exit_listener_stack.reverse()
1928 while self._exit_listener_stack:
1929 self._exit_listener_stack.pop()(self)
1931 class AbstractPollTask(AsynchronousTask):
1933 __slots__ = ("scheduler",) + \
1937 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1938 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1941 def _unregister(self):
1942 raise NotImplementedError(self)
1944 def _unregister_if_appropriate(self, event):
1945 if self._registered:
1946 if event & self._exceptional_events:
1949 elif event & PollConstants.POLLHUP:
1953 class PipeReader(AbstractPollTask):
1956 Reads output from one or more files and saves it in memory,
1957 for retrieval via the getvalue() method. This is driven by
1958 the scheduler's poll() loop, so it runs entirely within the
1962 __slots__ = ("input_files",) + \
1963 ("_read_data", "_reg_ids")
1966 self._reg_ids = set()
1967 self._read_data = []
1968 for k, f in self.input_files.iteritems():
1969 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1970 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1971 self._reg_ids.add(self.scheduler.register(f.fileno(),
1972 self._registered_events, self._output_handler))
1973 self._registered = True
1976 return self._registered
1979 if self.returncode is None:
1981 self.cancelled = True
1985 if self.returncode is not None:
1986 return self.returncode
1988 if self._registered:
1989 self.scheduler.schedule(self._reg_ids)
1992 self.returncode = os.EX_OK
1993 return self.returncode
1996 """Retrieve the entire contents"""
1997 if sys.hexversion >= 0x3000000:
1998 return bytes().join(self._read_data)
1999 return "".join(self._read_data)
2002 """Free the memory buffer."""
2003 self._read_data = None
2005 def _output_handler(self, fd, event):
2007 if event & PollConstants.POLLIN:
2009 for f in self.input_files.itervalues():
2010 if fd == f.fileno():
2013 buf = array.array('B')
2015 buf.fromfile(f, self._bufsize)
2020 self._read_data.append(buf.tostring())
2025 self._unregister_if_appropriate(event)
2026 return self._registered
2028 def _unregister(self):
2030 Unregister from the scheduler and close open files.
2033 self._registered = False
2035 if self._reg_ids is not None:
2036 for reg_id in self._reg_ids:
2037 self.scheduler.unregister(reg_id)
2038 self._reg_ids = None
2040 if self.input_files is not None:
2041 for f in self.input_files.itervalues():
2043 self.input_files = None
2045 class CompositeTask(AsynchronousTask):
2047 __slots__ = ("scheduler",) + ("_current_task",)
2050 return self._current_task is not None
2053 self.cancelled = True
2054 if self._current_task is not None:
2055 self._current_task.cancel()
2059 This does a loop calling self._current_task.poll()
2060 repeatedly as long as the value of self._current_task
2061 keeps changing. It calls poll() a maximum of one time
2062 for a given self._current_task instance. This is useful
2063 since calling poll() on a task can trigger advance to
2064 the next task could eventually lead to the returncode
2065 being set in cases when polling only a single task would
2066 not have the same effect.
2071 task = self._current_task
2072 if task is None or task is prev:
2073 # don't poll the same task more than once
2078 return self.returncode
2084 task = self._current_task
2086 # don't wait for the same task more than once
2089 # Before the task.wait() method returned, an exit
2090 # listener should have set self._current_task to either
2091 # a different task or None. Something is wrong.
2092 raise AssertionError("self._current_task has not " + \
2093 "changed since calling wait", self, task)
2097 return self.returncode
2099 def _assert_current(self, task):
2101 Raises an AssertionError if the given task is not the
2102 same one as self._current_task. This can be useful
2105 if task is not self._current_task:
2106 raise AssertionError("Unrecognized task: %s" % (task,))
2108 def _default_exit(self, task):
2110 Calls _assert_current() on the given task and then sets the
2111 composite returncode attribute if task.returncode != os.EX_OK.
2112 If the task failed then self._current_task will be set to None.
2113 Subclasses can use this as a generic task exit callback.
2116 @returns: The task.returncode attribute.
2118 self._assert_current(task)
2119 if task.returncode != os.EX_OK:
2120 self.returncode = task.returncode
2121 self._current_task = None
2122 return task.returncode
2124 def _final_exit(self, task):
2126 Assumes that task is the final task of this composite task.
2127 Calls _default_exit() and sets self.returncode to the task's
2128 returncode and sets self._current_task to None.
2130 self._default_exit(task)
2131 self._current_task = None
2132 self.returncode = task.returncode
2133 return self.returncode
2135 def _default_final_exit(self, task):
2137 This calls _final_exit() and then wait().
2139 Subclasses can use this as a generic final task exit callback.
2142 self._final_exit(task)
2145 def _start_task(self, task, exit_handler):
2147 Register exit handler for the given task, set it
2148 as self._current_task, and call task.start().
2150 Subclasses can use this as a generic way to start
2154 task.addExitListener(exit_handler)
2155 self._current_task = task
2158 class TaskSequence(CompositeTask):
2160 A collection of tasks that executes sequentially. Each task
2161 must have a addExitListener() method that can be used as
2162 a means to trigger movement from one task to the next.
2165 __slots__ = ("_task_queue",)
2167 def __init__(self, **kwargs):
2168 AsynchronousTask.__init__(self, **kwargs)
2169 self._task_queue = deque()
2171 def add(self, task):
2172 self._task_queue.append(task)
2175 self._start_next_task()
2178 self._task_queue.clear()
2179 CompositeTask.cancel(self)
2181 def _start_next_task(self):
2182 self._start_task(self._task_queue.popleft(),
2183 self._task_exit_handler)
2185 def _task_exit_handler(self, task):
2186 if self._default_exit(task) != os.EX_OK:
2188 elif self._task_queue:
2189 self._start_next_task()
2191 self._final_exit(task)
2194 class SubProcess(AbstractPollTask):
2196 __slots__ = ("pid",) + \
2197 ("_files", "_reg_id")
2199 # A file descriptor is required for the scheduler to monitor changes from
2200 # inside a poll() loop. When logging is not enabled, create a pipe just to
2201 # serve this purpose alone.
2205 if self.returncode is not None:
2206 return self.returncode
2207 if self.pid is None:
2208 return self.returncode
2209 if self._registered:
2210 return self.returncode
2213 retval = os.waitpid(self.pid, os.WNOHANG)
2215 if e.errno != errno.ECHILD:
2218 retval = (self.pid, 1)
2220 if retval == (0, 0):
2222 self._set_returncode(retval)
2223 return self.returncode
2228 os.kill(self.pid, signal.SIGTERM)
2230 if e.errno != errno.ESRCH:
2234 self.cancelled = True
2235 if self.pid is not None:
2237 return self.returncode
2240 return self.pid is not None and \
2241 self.returncode is None
2245 if self.returncode is not None:
2246 return self.returncode
2248 if self._registered:
2249 self.scheduler.schedule(self._reg_id)
2251 if self.returncode is not None:
2252 return self.returncode
2255 wait_retval = os.waitpid(self.pid, 0)
2257 if e.errno != errno.ECHILD:
2260 self._set_returncode((self.pid, 1))
2262 self._set_returncode(wait_retval)
2264 return self.returncode
2266 def _unregister(self):
2268 Unregister from the scheduler and close open files.
2271 self._registered = False
2273 if self._reg_id is not None:
2274 self.scheduler.unregister(self._reg_id)
2277 if self._files is not None:
2278 for f in self._files.itervalues():
2282 def _set_returncode(self, wait_retval):
2284 retval = wait_retval[1]
2286 if retval != os.EX_OK:
2288 retval = (retval & 0xff) << 8
2290 retval = retval >> 8
2292 self.returncode = retval
2294 class SpawnProcess(SubProcess):
2297 Constructor keyword args are passed into portage.process.spawn().
2298 The required "args" keyword argument will be passed as the first
2302 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2303 "uid", "gid", "groups", "umask", "logfile",
2304 "path_lookup", "pre_exec")
2306 __slots__ = ("args",) + \
2309 _file_names = ("log", "process", "stdout")
2310 _files_dict = slot_dict_class(_file_names, prefix="")
2317 if self.fd_pipes is None:
2319 fd_pipes = self.fd_pipes
2320 fd_pipes.setdefault(0, sys.stdin.fileno())
2321 fd_pipes.setdefault(1, sys.stdout.fileno())
2322 fd_pipes.setdefault(2, sys.stderr.fileno())
2324 # flush any pending output
2325 for fd in fd_pipes.itervalues():
2326 if fd == sys.stdout.fileno():
2328 if fd == sys.stderr.fileno():
2331 logfile = self.logfile
2332 self._files = self._files_dict()
2335 master_fd, slave_fd = self._pipe(fd_pipes)
2336 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2337 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2340 fd_pipes_orig = fd_pipes.copy()
2342 # TODO: Use job control functions like tcsetpgrp() to control
2343 # access to stdin. Until then, use /dev/null so that any
2344 # attempts to read from stdin will immediately return EOF
2345 # instead of blocking indefinitely.
2346 null_input = open('/dev/null', 'rb')
2347 fd_pipes[0] = null_input.fileno()
2349 fd_pipes[0] = fd_pipes_orig[0]
2351 files.process = os.fdopen(master_fd, 'rb')
2352 if logfile is not None:
2354 fd_pipes[1] = slave_fd
2355 fd_pipes[2] = slave_fd
2357 files.log = open(logfile, mode='ab')
2358 portage.util.apply_secpass_permissions(logfile,
2359 uid=portage.portage_uid, gid=portage.portage_gid,
2362 if not self.background:
2363 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2365 output_handler = self._output_handler
2369 # Create a dummy pipe so the scheduler can monitor
2370 # the process from inside a poll() loop.
2371 fd_pipes[self._dummy_pipe_fd] = slave_fd
2373 fd_pipes[1] = slave_fd
2374 fd_pipes[2] = slave_fd
2375 output_handler = self._dummy_handler
2378 for k in self._spawn_kwarg_names:
2379 v = getattr(self, k)
2383 kwargs["fd_pipes"] = fd_pipes
2384 kwargs["returnpid"] = True
2385 kwargs.pop("logfile", None)
2387 self._reg_id = self.scheduler.register(files.process.fileno(),
2388 self._registered_events, output_handler)
2389 self._registered = True
2391 retval = self._spawn(self.args, **kwargs)
2394 if null_input is not None:
2397 if isinstance(retval, int):
2400 self.returncode = retval
2404 self.pid = retval[0]
2405 portage.process.spawned_pids.remove(self.pid)
2407 def _pipe(self, fd_pipes):
2409 @type fd_pipes: dict
2410 @param fd_pipes: pipes from which to copy terminal size if desired.
2414 def _spawn(self, args, **kwargs):
2415 return portage.process.spawn(args, **kwargs)
2417 def _output_handler(self, fd, event):
2419 if event & PollConstants.POLLIN:
2422 buf = array.array('B')
2424 buf.fromfile(files.process, self._bufsize)
2429 if not self.background:
2430 write_successful = False
2434 if not write_successful:
2435 buf.tofile(files.stdout)
2436 write_successful = True
2437 files.stdout.flush()
2440 if e.errno != errno.EAGAIN:
2445 # Avoid a potentially infinite loop. In
2446 # most cases, the failure count is zero
2447 # and it's unlikely to exceed 1.
2450 # This means that a subprocess has put an inherited
2451 # stdio file descriptor (typically stdin) into
2452 # O_NONBLOCK mode. This is not acceptable (see bug
2453 # #264435), so revert it. We need to use a loop
2454 # here since there's a race condition due to
2455 # parallel processes being able to change the
2456 # flags on the inherited file descriptor.
2457 # TODO: When possible, avoid having child processes
2458 # inherit stdio file descriptors from portage
2459 # (maybe it can't be avoided with
2460 # PROPERTIES=interactive).
2461 fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2462 fcntl.fcntl(files.stdout.fileno(),
2463 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2465 buf.tofile(files.log)
2471 self._unregister_if_appropriate(event)
2472 return self._registered
2474 def _dummy_handler(self, fd, event):
2476 This method is mainly interested in detecting EOF, since
2477 the only purpose of the pipe is to allow the scheduler to
2478 monitor the process from inside a poll() loop.
2481 if event & PollConstants.POLLIN:
2483 buf = array.array('B')
2485 buf.fromfile(self._files.process, self._bufsize)
2495 self._unregister_if_appropriate(event)
2496 return self._registered
2498 class MiscFunctionsProcess(SpawnProcess):
2500 Spawns misc-functions.sh with an existing ebuild environment.
2503 __slots__ = ("commands", "phase", "pkg", "settings")
2506 settings = self.settings
2507 settings.pop("EBUILD_PHASE", None)
2508 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2509 misc_sh_binary = os.path.join(portage_bin_path,
2510 os.path.basename(portage.const.MISC_SH_BINARY))
2512 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2513 self.logfile = settings.get("PORTAGE_LOG_FILE")
2515 portage._doebuild_exit_status_unlink(
2516 settings.get("EBUILD_EXIT_STATUS_FILE"))
2518 SpawnProcess._start(self)
2520 def _spawn(self, args, **kwargs):
2521 settings = self.settings
2522 debug = settings.get("PORTAGE_DEBUG") == "1"
2523 return portage.spawn(" ".join(args), settings,
2524 debug=debug, **kwargs)
2526 def _set_returncode(self, wait_retval):
2527 SpawnProcess._set_returncode(self, wait_retval)
2528 self.returncode = portage._doebuild_exit_status_check_and_log(
2529 self.settings, self.phase, self.returncode)
2531 class EbuildFetcher(SpawnProcess):
2533 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2538 root_config = self.pkg.root_config
2539 portdb = root_config.trees["porttree"].dbapi
2540 ebuild_path = portdb.findname(self.pkg.cpv)
2541 settings = self.config_pool.allocate()
2542 settings.setcpv(self.pkg)
2544 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2545 # should not be touched since otherwise it could interfere with
2546 # another instance of the same cpv concurrently being built for a
2547 # different $ROOT (currently, builds only cooperate with prefetchers
2548 # that are spawned for the same $ROOT).
2549 if not self.prefetch:
2550 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2551 self._build_dir.lock()
2552 self._build_dir.clean_log()
2553 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2554 if self.logfile is None:
2555 self.logfile = settings.get("PORTAGE_LOG_FILE")
2561 # If any incremental variables have been overridden
2562 # via the environment, those values need to be passed
2563 # along here so that they are correctly considered by
2564 # the config instance in the subproccess.
2565 fetch_env = os.environ.copy()
2567 nocolor = settings.get("NOCOLOR")
2568 if nocolor is not None:
2569 fetch_env["NOCOLOR"] = nocolor
2571 fetch_env["PORTAGE_NICENESS"] = "0"
2573 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2575 ebuild_binary = os.path.join(
2576 settings["PORTAGE_BIN_PATH"], "ebuild")
2578 fetch_args = [ebuild_binary, ebuild_path, phase]
2579 debug = settings.get("PORTAGE_DEBUG") == "1"
2581 fetch_args.append("--debug")
2583 self.args = fetch_args
2584 self.env = fetch_env
2585 SpawnProcess._start(self)
2587 def _pipe(self, fd_pipes):
2588 """When appropriate, use a pty so that fetcher progress bars,
2589 like wget has, will work properly."""
2590 if self.background or not sys.stdout.isatty():
2591 # When the output only goes to a log file,
2592 # there's no point in creating a pty.
2594 stdout_pipe = fd_pipes.get(1)
2595 got_pty, master_fd, slave_fd = \
2596 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2597 return (master_fd, slave_fd)
2599 def _set_returncode(self, wait_retval):
2600 SpawnProcess._set_returncode(self, wait_retval)
2601 # Collect elog messages that might have been
2602 # created by the pkg_nofetch phase.
2603 if self._build_dir is not None:
2604 # Skip elog messages for prefetch, in order to avoid duplicates.
2605 if not self.prefetch and self.returncode != os.EX_OK:
2607 if self.logfile is not None:
2609 elog_out = open(self.logfile, 'a')
2610 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2611 if self.logfile is not None:
2612 msg += ", Log file:"
2613 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2614 if self.logfile is not None:
2615 eerror(" '%s'" % (self.logfile,),
2616 phase="unpack", key=self.pkg.cpv, out=elog_out)
2617 if elog_out is not None:
2619 if not self.prefetch:
2620 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2621 features = self._build_dir.settings.features
2622 if self.returncode == os.EX_OK:
2623 self._build_dir.clean_log()
2624 self._build_dir.unlock()
2625 self.config_pool.deallocate(self._build_dir.settings)
2626 self._build_dir = None
2628 class EbuildBuildDir(SlotObject):
2630 __slots__ = ("dir_path", "pkg", "settings",
2631 "locked", "_catdir", "_lock_obj")
2633 def __init__(self, **kwargs):
2634 SlotObject.__init__(self, **kwargs)
2639 This raises an AlreadyLocked exception if lock() is called
2640 while a lock is already held. In order to avoid this, call
2641 unlock() or check whether the "locked" attribute is True
2642 or False before calling lock().
2644 if self._lock_obj is not None:
2645 raise self.AlreadyLocked((self._lock_obj,))
2647 dir_path = self.dir_path
2648 if dir_path is None:
2649 root_config = self.pkg.root_config
2650 portdb = root_config.trees["porttree"].dbapi
2651 ebuild_path = portdb.findname(self.pkg.cpv)
2652 settings = self.settings
2653 settings.setcpv(self.pkg)
2654 debug = settings.get("PORTAGE_DEBUG") == "1"
2655 use_cache = 1 # always true
2656 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2657 self.settings, debug, use_cache, portdb)
2658 dir_path = self.settings["PORTAGE_BUILDDIR"]
2660 catdir = os.path.dirname(dir_path)
2661 self._catdir = catdir
2663 portage.util.ensure_dirs(os.path.dirname(catdir),
2664 gid=portage.portage_gid,
2668 catdir_lock = portage.locks.lockdir(catdir)
2669 portage.util.ensure_dirs(catdir,
2670 gid=portage.portage_gid,
2672 self._lock_obj = portage.locks.lockdir(dir_path)
2674 self.locked = self._lock_obj is not None
2675 if catdir_lock is not None:
2676 portage.locks.unlockdir(catdir_lock)
2678 def clean_log(self):
2679 """Discard existing log."""
2680 settings = self.settings
2682 for x in ('.logid', 'temp/build.log'):
2684 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2689 if self._lock_obj is None:
2692 portage.locks.unlockdir(self._lock_obj)
2693 self._lock_obj = None
2696 catdir = self._catdir
2699 catdir_lock = portage.locks.lockdir(catdir)
2705 if e.errno not in (errno.ENOENT,
2706 errno.ENOTEMPTY, errno.EEXIST):
2709 portage.locks.unlockdir(catdir_lock)
2711 class AlreadyLocked(portage.exception.PortageException):
2714 class EbuildBuild(CompositeTask):
2716 __slots__ = ("args_set", "config_pool", "find_blockers",
2717 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2718 "prefetcher", "settings", "world_atom") + \
2719 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2723 logger = self.logger
2726 settings = self.settings
2727 world_atom = self.world_atom
2728 root_config = pkg.root_config
2731 portdb = root_config.trees[tree].dbapi
2732 settings.setcpv(pkg)
2733 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2734 ebuild_path = portdb.findname(self.pkg.cpv)
2735 self._ebuild_path = ebuild_path
2737 prefetcher = self.prefetcher
2738 if prefetcher is None:
2740 elif not prefetcher.isAlive():
2742 elif prefetcher.poll() is None:
2744 waiting_msg = "Fetching files " + \
2745 "in the background. " + \
2746 "To view fetch progress, run `tail -f " + \
2747 "/var/log/emerge-fetch.log` in another " + \
2749 msg_prefix = colorize("GOOD", " * ")
2750 from textwrap import wrap
2751 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2752 for line in wrap(waiting_msg, 65))
2753 if not self.background:
2754 writemsg(waiting_msg, noiselevel=-1)
2756 self._current_task = prefetcher
2757 prefetcher.addExitListener(self._prefetch_exit)
2760 self._prefetch_exit(prefetcher)
2762 def _prefetch_exit(self, prefetcher):
2766 settings = self.settings
2769 fetcher = EbuildFetchonly(
2770 fetch_all=opts.fetch_all_uri,
2771 pkg=pkg, pretend=opts.pretend,
2773 retval = fetcher.execute()
2774 self.returncode = retval
2778 fetcher = EbuildFetcher(config_pool=self.config_pool,
2779 fetchall=opts.fetch_all_uri,
2780 fetchonly=opts.fetchonly,
2781 background=self.background,
2782 pkg=pkg, scheduler=self.scheduler)
2784 self._start_task(fetcher, self._fetch_exit)
2786 def _fetch_exit(self, fetcher):
2790 fetch_failed = False
2792 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2794 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2796 if fetch_failed and fetcher.logfile is not None and \
2797 os.path.exists(fetcher.logfile):
2798 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2800 if not fetch_failed and fetcher.logfile is not None:
2801 # Fetch was successful, so remove the fetch log.
2803 os.unlink(fetcher.logfile)
2807 if fetch_failed or opts.fetchonly:
2811 logger = self.logger
2813 pkg_count = self.pkg_count
2814 scheduler = self.scheduler
2815 settings = self.settings
2816 features = settings.features
2817 ebuild_path = self._ebuild_path
2818 system_set = pkg.root_config.sets["system"]
2820 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2821 self._build_dir.lock()
2823 # Cleaning is triggered before the setup
2824 # phase, in portage.doebuild().
2825 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2826 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2827 short_msg = "emerge: (%s of %s) %s Clean" % \
2828 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2829 logger.log(msg, short_msg=short_msg)
2831 #buildsyspkg: Check if we need to _force_ binary package creation
2832 self._issyspkg = "buildsyspkg" in features and \
2833 system_set.findAtomForPackage(pkg) and \
2836 if opts.buildpkg or self._issyspkg:
2838 self._buildpkg = True
2840 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2841 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2842 short_msg = "emerge: (%s of %s) %s Compile" % \
2843 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2844 logger.log(msg, short_msg=short_msg)
2847 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2848 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2849 short_msg = "emerge: (%s of %s) %s Compile" % \
2850 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2851 logger.log(msg, short_msg=short_msg)
2853 build = EbuildExecuter(background=self.background, pkg=pkg,
2854 scheduler=scheduler, settings=settings)
2855 self._start_task(build, self._build_exit)
2857 def _unlock_builddir(self):
2858 portage.elog.elog_process(self.pkg.cpv, self.settings)
2859 self._build_dir.unlock()
2861 def _build_exit(self, build):
2862 if self._default_exit(build) != os.EX_OK:
2863 self._unlock_builddir()
2868 buildpkg = self._buildpkg
2871 self._final_exit(build)
2876 msg = ">>> This is a system package, " + \
2877 "let's pack a rescue tarball.\n"
2879 log_path = self.settings.get("PORTAGE_LOG_FILE")
2880 if log_path is not None:
2881 log_file = open(log_path, 'a')
2887 if not self.background:
2888 portage.writemsg_stdout(msg, noiselevel=-1)
2890 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2891 scheduler=self.scheduler, settings=self.settings)
2893 self._start_task(packager, self._buildpkg_exit)
2895 def _buildpkg_exit(self, packager):
2897 Released build dir lock when there is a failure or
2898 when in buildpkgonly mode. Otherwise, the lock will
2899 be released when merge() is called.
2902 if self._default_exit(packager) != os.EX_OK:
2903 self._unlock_builddir()
2907 if self.opts.buildpkgonly:
2908 # Need to call "clean" phase for buildpkgonly mode
2909 portage.elog.elog_process(self.pkg.cpv, self.settings)
2911 clean_phase = EbuildPhase(background=self.background,
2912 pkg=self.pkg, phase=phase,
2913 scheduler=self.scheduler, settings=self.settings,
2915 self._start_task(clean_phase, self._clean_exit)
2918 # Continue holding the builddir lock until
2919 # after the package has been installed.
2920 self._current_task = None
2921 self.returncode = packager.returncode
2924 def _clean_exit(self, clean_phase):
2925 if self._final_exit(clean_phase) != os.EX_OK or \
2926 self.opts.buildpkgonly:
2927 self._unlock_builddir()
2932 Install the package and then clean up and release locks.
2933 Only call this after the build has completed successfully
2934 and neither fetchonly nor buildpkgonly mode are enabled.
2937 find_blockers = self.find_blockers
2938 ldpath_mtimes = self.ldpath_mtimes
2939 logger = self.logger
2941 pkg_count = self.pkg_count
2942 settings = self.settings
2943 world_atom = self.world_atom
2944 ebuild_path = self._ebuild_path
2947 merge = EbuildMerge(find_blockers=self.find_blockers,
2948 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2949 pkg_count=pkg_count, pkg_path=ebuild_path,
2950 scheduler=self.scheduler,
2951 settings=settings, tree=tree, world_atom=world_atom)
2953 msg = " === (%s of %s) Merging (%s::%s)" % \
2954 (pkg_count.curval, pkg_count.maxval,
2955 pkg.cpv, ebuild_path)
2956 short_msg = "emerge: (%s of %s) %s Merge" % \
2957 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2958 logger.log(msg, short_msg=short_msg)
2961 rval = merge.execute()
2963 self._unlock_builddir()
2967 class EbuildExecuter(CompositeTask):
2969 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2971 _phases = ("prepare", "configure", "compile", "test", "install")
2973 _live_eclasses = frozenset([
2983 self._tree = "porttree"
2986 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2987 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2988 self._start_task(clean_phase, self._clean_phase_exit)
2990 def _clean_phase_exit(self, clean_phase):
2992 if self._default_exit(clean_phase) != os.EX_OK:
2997 scheduler = self.scheduler
2998 settings = self.settings
3001 # This initializes PORTAGE_LOG_FILE.
3002 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3004 setup_phase = EbuildPhase(background=self.background,
3005 pkg=pkg, phase="setup", scheduler=scheduler,
3006 settings=settings, tree=self._tree)
3008 setup_phase.addExitListener(self._setup_exit)
3009 self._current_task = setup_phase
3010 self.scheduler.scheduleSetup(setup_phase)
3012 def _setup_exit(self, setup_phase):
3014 if self._default_exit(setup_phase) != os.EX_OK:
3018 unpack_phase = EbuildPhase(background=self.background,
3019 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3020 settings=self.settings, tree=self._tree)
3022 if self._live_eclasses.intersection(self.pkg.inherited):
3023 # Serialize $DISTDIR access for live ebuilds since
3024 # otherwise they can interfere with eachother.
3026 unpack_phase.addExitListener(self._unpack_exit)
3027 self._current_task = unpack_phase
3028 self.scheduler.scheduleUnpack(unpack_phase)
3031 self._start_task(unpack_phase, self._unpack_exit)
3033 def _unpack_exit(self, unpack_phase):
3035 if self._default_exit(unpack_phase) != os.EX_OK:
3039 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3042 phases = self._phases
3043 eapi = pkg.metadata["EAPI"]
3044 if eapi in ("0", "1"):
3045 # skip src_prepare and src_configure
3048 for phase in phases:
3049 ebuild_phases.add(EbuildPhase(background=self.background,
3050 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3051 settings=self.settings, tree=self._tree))
3053 self._start_task(ebuild_phases, self._default_final_exit)
3055 class EbuildMetadataPhase(SubProcess):
3058 Asynchronous interface for the ebuild "depend" phase which is
3059 used to extract metadata from the ebuild.
3062 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3063 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3066 _file_names = ("ebuild",)
3067 _files_dict = slot_dict_class(_file_names, prefix="")
3071 settings = self.settings
3072 settings.setcpv(self.cpv)
3073 ebuild_path = self.ebuild_path
3076 if 'parse-eapi-glep-55' in settings.features:
3077 pf, eapi = portage._split_ebuild_name_glep55(
3078 os.path.basename(ebuild_path))
3079 if eapi is None and \
3080 'parse-eapi-ebuild-head' in settings.features:
3081 eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3082 mode='r', encoding='utf_8', errors='replace'))
3084 if eapi is not None:
3085 if not portage.eapi_is_supported(eapi):
3086 self.metadata_callback(self.cpv, self.ebuild_path,
3087 self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3088 self.returncode = os.EX_OK
3092 settings.configdict['pkg']['EAPI'] = eapi
3094 debug = settings.get("PORTAGE_DEBUG") == "1"
3098 if self.fd_pipes is not None:
3099 fd_pipes = self.fd_pipes.copy()
3103 fd_pipes.setdefault(0, sys.stdin.fileno())
3104 fd_pipes.setdefault(1, sys.stdout.fileno())
3105 fd_pipes.setdefault(2, sys.stderr.fileno())
3107 # flush any pending output
3108 for fd in fd_pipes.itervalues():
3109 if fd == sys.stdout.fileno():
3111 if fd == sys.stderr.fileno():
3114 fd_pipes_orig = fd_pipes.copy()
3115 self._files = self._files_dict()
3118 master_fd, slave_fd = os.pipe()
3119 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3120 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3122 fd_pipes[self._metadata_fd] = slave_fd
3124 self._raw_metadata = []
3125 files.ebuild = os.fdopen(master_fd, 'r')
3126 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3127 self._registered_events, self._output_handler)
3128 self._registered = True
3130 retval = portage.doebuild(ebuild_path, "depend",
3131 settings["ROOT"], settings, debug,
3132 mydbapi=self.portdb, tree="porttree",
3133 fd_pipes=fd_pipes, returnpid=True)
3137 if isinstance(retval, int):
3138 # doebuild failed before spawning
3140 self.returncode = retval
3144 self.pid = retval[0]
3145 portage.process.spawned_pids.remove(self.pid)
3147 def _output_handler(self, fd, event):
3149 if event & PollConstants.POLLIN:
3150 self._raw_metadata.append(self._files.ebuild.read())
3151 if not self._raw_metadata[-1]:
3155 self._unregister_if_appropriate(event)
3156 return self._registered
3158 def _set_returncode(self, wait_retval):
3159 SubProcess._set_returncode(self, wait_retval)
3160 if self.returncode == os.EX_OK:
3161 metadata_lines = "".join(self._raw_metadata).splitlines()
3162 if len(portage.auxdbkeys) != len(metadata_lines):
3163 # Don't trust bash's returncode if the
3164 # number of lines is incorrect.
3167 metadata = izip(portage.auxdbkeys, metadata_lines)
3168 self.metadata = self.metadata_callback(self.cpv,
3169 self.ebuild_path, self.repo_path, metadata,
3172 class EbuildProcess(SpawnProcess):
3174 __slots__ = ("phase", "pkg", "settings", "tree")
3177 # Don't open the log file during the clean phase since the
3178 # open file can result in an nfs lock on $T/build.log which
3179 # prevents the clean phase from removing $T.
3180 if self.phase not in ("clean", "cleanrm"):
3181 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3182 SpawnProcess._start(self)
3184 def _pipe(self, fd_pipes):
3185 stdout_pipe = fd_pipes.get(1)
3186 got_pty, master_fd, slave_fd = \
3187 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3188 return (master_fd, slave_fd)
3190 def _spawn(self, args, **kwargs):
3192 root_config = self.pkg.root_config
3194 mydbapi = root_config.trees[tree].dbapi
3195 settings = self.settings
3196 ebuild_path = settings["EBUILD"]
3197 debug = settings.get("PORTAGE_DEBUG") == "1"
3199 rval = portage.doebuild(ebuild_path, self.phase,
3200 root_config.root, settings, debug,
3201 mydbapi=mydbapi, tree=tree, **kwargs)
3205 def _set_returncode(self, wait_retval):
3206 SpawnProcess._set_returncode(self, wait_retval)
3208 if self.phase not in ("clean", "cleanrm"):
3209 self.returncode = portage._doebuild_exit_status_check_and_log(
3210 self.settings, self.phase, self.returncode)
3212 if self.phase == "test" and self.returncode != os.EX_OK and \
3213 "test-fail-continue" in self.settings.features:
3214 self.returncode = os.EX_OK
3216 portage._post_phase_userpriv_perms(self.settings)
3218 class EbuildPhase(CompositeTask):
3220 __slots__ = ("background", "pkg", "phase",
3221 "scheduler", "settings", "tree")
3223 _post_phase_cmds = portage._post_phase_cmds
3227 ebuild_process = EbuildProcess(background=self.background,
3228 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3229 settings=self.settings, tree=self.tree)
3231 self._start_task(ebuild_process, self._ebuild_exit)
3233 def _ebuild_exit(self, ebuild_process):
3235 if self.phase == "install":
3237 log_path = self.settings.get("PORTAGE_LOG_FILE")
3239 if self.background and log_path is not None:
3240 log_file = open(log_path, 'a')
3243 portage._check_build_log(self.settings, out=out)
3245 if log_file is not None:
3248 if self._default_exit(ebuild_process) != os.EX_OK:
3252 settings = self.settings
3254 if self.phase == "install":
3255 portage._post_src_install_chost_fix(settings)
3256 portage._post_src_install_uid_fix(settings)
3258 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3259 if post_phase_cmds is not None:
3260 post_phase = MiscFunctionsProcess(background=self.background,
3261 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3262 scheduler=self.scheduler, settings=settings)
3263 self._start_task(post_phase, self._post_phase_exit)
3266 self.returncode = ebuild_process.returncode
3267 self._current_task = None
3270 def _post_phase_exit(self, post_phase):
3271 if self._final_exit(post_phase) != os.EX_OK:
3272 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3274 self._current_task = None
3278 class EbuildBinpkg(EbuildProcess):
3280 This assumes that src_install() has successfully completed.
3282 __slots__ = ("_binpkg_tmpfile",)
3285 self.phase = "package"
3286 self.tree = "porttree"
3288 root_config = pkg.root_config
3289 portdb = root_config.trees["porttree"].dbapi
3290 bintree = root_config.trees["bintree"]
3291 ebuild_path = portdb.findname(self.pkg.cpv)
3292 settings = self.settings
3293 debug = settings.get("PORTAGE_DEBUG") == "1"
3295 bintree.prevent_collision(pkg.cpv)
3296 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3297 pkg.cpv + ".tbz2." + str(os.getpid()))
3298 self._binpkg_tmpfile = binpkg_tmpfile
3299 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3300 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3303 EbuildProcess._start(self)
3305 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3307 def _set_returncode(self, wait_retval):
3308 EbuildProcess._set_returncode(self, wait_retval)
3311 bintree = pkg.root_config.trees["bintree"]
3312 binpkg_tmpfile = self._binpkg_tmpfile
3313 if self.returncode == os.EX_OK:
3314 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3316 class EbuildMerge(SlotObject):
3318 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3319 "pkg", "pkg_count", "pkg_path", "pretend",
3320 "scheduler", "settings", "tree", "world_atom")
3323 root_config = self.pkg.root_config
3324 settings = self.settings
3325 retval = portage.merge(settings["CATEGORY"],
3326 settings["PF"], settings["D"],
3327 os.path.join(settings["PORTAGE_BUILDDIR"],
3328 "build-info"), root_config.root, settings,
3329 myebuild=settings["EBUILD"],
3330 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3331 vartree=root_config.trees["vartree"],
3332 prev_mtimes=self.ldpath_mtimes,
3333 scheduler=self.scheduler,
3334 blockers=self.find_blockers)
3336 if retval == os.EX_OK:
3337 self.world_atom(self.pkg)
3342 def _log_success(self):
3344 pkg_count = self.pkg_count
3345 pkg_path = self.pkg_path
3346 logger = self.logger
3347 if "noclean" not in self.settings.features:
3348 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3349 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3350 logger.log((" === (%s of %s) " + \
3351 "Post-Build Cleaning (%s::%s)") % \
3352 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3353 short_msg=short_msg)
3354 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3355 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3357 class PackageUninstall(AsynchronousTask):
3359 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3363 unmerge(self.pkg.root_config, self.opts, "unmerge",
3364 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3365 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3366 writemsg_level=self._writemsg_level)
3367 except UninstallFailure, e:
3368 self.returncode = e.status
3370 self.returncode = os.EX_OK
3373 def _writemsg_level(self, msg, level=0, noiselevel=0):
3375 log_path = self.settings.get("PORTAGE_LOG_FILE")
3376 background = self.background
3378 if log_path is None:
3379 if not (background and level < logging.WARNING):
3380 portage.util.writemsg_level(msg,
3381 level=level, noiselevel=noiselevel)
3384 portage.util.writemsg_level(msg,
3385 level=level, noiselevel=noiselevel)
3387 f = open(log_path, 'a')
3393 class Binpkg(CompositeTask):
3395 __slots__ = ("find_blockers",
3396 "ldpath_mtimes", "logger", "opts",
3397 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3398 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3399 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3401 def _writemsg_level(self, msg, level=0, noiselevel=0):
3403 if not self.background:
3404 portage.util.writemsg_level(msg,
3405 level=level, noiselevel=noiselevel)
3407 log_path = self.settings.get("PORTAGE_LOG_FILE")
3408 if log_path is not None:
3409 f = open(log_path, 'a')
3418 settings = self.settings
3419 settings.setcpv(pkg)
3420 self._tree = "bintree"
3421 self._bintree = self.pkg.root_config.trees[self._tree]
3422 self._verify = not self.opts.pretend
3424 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3425 "portage", pkg.category, pkg.pf)
3426 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3427 pkg=pkg, settings=settings)
3428 self._image_dir = os.path.join(dir_path, "image")
3429 self._infloc = os.path.join(dir_path, "build-info")
3430 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3431 settings["EBUILD"] = self._ebuild_path
3432 debug = settings.get("PORTAGE_DEBUG") == "1"
3433 portage.doebuild_environment(self._ebuild_path, "setup",
3434 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3435 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3437 # The prefetcher has already completed or it
3438 # could be running now. If it's running now,
3439 # wait for it to complete since it holds
3440 # a lock on the file being fetched. The
3441 # portage.locks functions are only designed
3442 # to work between separate processes. Since
3443 # the lock is held by the current process,
3444 # use the scheduler and fetcher methods to
3445 # synchronize with the fetcher.
3446 prefetcher = self.prefetcher
3447 if prefetcher is None:
3449 elif not prefetcher.isAlive():
3451 elif prefetcher.poll() is None:
3453 waiting_msg = ("Fetching '%s' " + \
3454 "in the background. " + \
3455 "To view fetch progress, run `tail -f " + \
3456 "/var/log/emerge-fetch.log` in another " + \
3457 "terminal.") % prefetcher.pkg_path
3458 msg_prefix = colorize("GOOD", " * ")
3459 from textwrap import wrap
3460 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3461 for line in wrap(waiting_msg, 65))
3462 if not self.background:
3463 writemsg(waiting_msg, noiselevel=-1)
3465 self._current_task = prefetcher
3466 prefetcher.addExitListener(self._prefetch_exit)
3469 self._prefetch_exit(prefetcher)
3471 def _prefetch_exit(self, prefetcher):
3474 pkg_count = self.pkg_count
3475 if not (self.opts.pretend or self.opts.fetchonly):
3476 self._build_dir.lock()
3477 # If necessary, discard old log so that we don't
3479 self._build_dir.clean_log()
3480 # Initialze PORTAGE_LOG_FILE.
3481 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3482 fetcher = BinpkgFetcher(background=self.background,
3483 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3484 pretend=self.opts.pretend, scheduler=self.scheduler)
3485 pkg_path = fetcher.pkg_path
3486 self._pkg_path = pkg_path
3488 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3490 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3491 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3492 short_msg = "emerge: (%s of %s) %s Fetch" % \
3493 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3494 self.logger.log(msg, short_msg=short_msg)
3495 self._start_task(fetcher, self._fetcher_exit)
3498 self._fetcher_exit(fetcher)
3500 def _fetcher_exit(self, fetcher):
3502 # The fetcher only has a returncode when
3503 # --getbinpkg is enabled.
3504 if fetcher.returncode is not None:
3505 self._fetched_pkg = True
3506 if self._default_exit(fetcher) != os.EX_OK:
3507 self._unlock_builddir()
3511 if self.opts.pretend:
3512 self._current_task = None
3513 self.returncode = os.EX_OK
3521 logfile = self.settings.get("PORTAGE_LOG_FILE")
3522 verifier = BinpkgVerifier(background=self.background,
3523 logfile=logfile, pkg=self.pkg)
3524 self._start_task(verifier, self._verifier_exit)
3527 self._verifier_exit(verifier)
3529 def _verifier_exit(self, verifier):
3530 if verifier is not None and \
3531 self._default_exit(verifier) != os.EX_OK:
3532 self._unlock_builddir()
3536 logger = self.logger
3538 pkg_count = self.pkg_count
3539 pkg_path = self._pkg_path
3541 if self._fetched_pkg:
3542 self._bintree.inject(pkg.cpv, filename=pkg_path)
3544 if self.opts.fetchonly:
3545 self._current_task = None
3546 self.returncode = os.EX_OK
3550 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3551 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3552 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3553 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3554 logger.log(msg, short_msg=short_msg)
3557 settings = self.settings
3558 ebuild_phase = EbuildPhase(background=self.background,
3559 pkg=pkg, phase=phase, scheduler=self.scheduler,
3560 settings=settings, tree=self._tree)
3562 self._start_task(ebuild_phase, self._clean_exit)
3564 def _clean_exit(self, clean_phase):
3565 if self._default_exit(clean_phase) != os.EX_OK:
3566 self._unlock_builddir()
3570 dir_path = self._build_dir.dir_path
3572 infloc = self._infloc
3574 pkg_path = self._pkg_path
3577 for mydir in (dir_path, self._image_dir, infloc):
3578 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3579 gid=portage.data.portage_gid, mode=dir_mode)
3581 # This initializes PORTAGE_LOG_FILE.
3582 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3583 self._writemsg_level(">>> Extracting info\n")
3585 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3586 check_missing_metadata = ("CATEGORY", "PF")
3587 missing_metadata = set()
3588 for k in check_missing_metadata:
3589 v = pkg_xpak.getfile(k)
3591 missing_metadata.add(k)
3593 pkg_xpak.unpackinfo(infloc)
3594 for k in missing_metadata:
3602 f = open(os.path.join(infloc, k), 'wb')
3608 # Store the md5sum in the vdb.
3609 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3611 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3615 # This gives bashrc users an opportunity to do various things
3616 # such as remove binary packages after they're installed.
3617 settings = self.settings
3618 settings.setcpv(self.pkg)
3619 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3620 settings.backup_changes("PORTAGE_BINPKG_FILE")
3623 setup_phase = EbuildPhase(background=self.background,
3624 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3625 settings=settings, tree=self._tree)
3627 setup_phase.addExitListener(self._setup_exit)
3628 self._current_task = setup_phase
3629 self.scheduler.scheduleSetup(setup_phase)
3631 def _setup_exit(self, setup_phase):
3632 if self._default_exit(setup_phase) != os.EX_OK:
3633 self._unlock_builddir()
3637 extractor = BinpkgExtractorAsync(background=self.background,
3638 image_dir=self._image_dir,
3639 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3640 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3641 self._start_task(extractor, self._extractor_exit)
3643 def _extractor_exit(self, extractor):
3644 if self._final_exit(extractor) != os.EX_OK:
3645 self._unlock_builddir()
3646 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3650 def _unlock_builddir(self):
3651 if self.opts.pretend or self.opts.fetchonly:
3653 portage.elog.elog_process(self.pkg.cpv, self.settings)
3654 self._build_dir.unlock()
3658 # This gives bashrc users an opportunity to do various things
3659 # such as remove binary packages after they're installed.
3660 settings = self.settings
3661 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3662 settings.backup_changes("PORTAGE_BINPKG_FILE")
3664 merge = EbuildMerge(find_blockers=self.find_blockers,
3665 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3666 pkg=self.pkg, pkg_count=self.pkg_count,
3667 pkg_path=self._pkg_path, scheduler=self.scheduler,
3668 settings=settings, tree=self._tree, world_atom=self.world_atom)
3671 retval = merge.execute()
3673 settings.pop("PORTAGE_BINPKG_FILE", None)
3674 self._unlock_builddir()
3677 class BinpkgFetcher(SpawnProcess):
3679 __slots__ = ("pkg", "pretend",
3680 "locked", "pkg_path", "_lock_obj")
3682 def __init__(self, **kwargs):
3683 SpawnProcess.__init__(self, **kwargs)
3685 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3693 pretend = self.pretend
3694 bintree = pkg.root_config.trees["bintree"]
3695 settings = bintree.settings
3696 use_locks = "distlocks" in settings.features
3697 pkg_path = self.pkg_path
3700 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3703 exists = os.path.exists(pkg_path)
3704 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3705 if not (pretend or resume):
3706 # Remove existing file or broken symlink.
3712 # urljoin doesn't work correctly with
3713 # unrecognized protocols like sftp
3714 if bintree._remote_has_index:
3715 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3717 rel_uri = pkg.cpv + ".tbz2"
3718 uri = bintree._remote_base_uri.rstrip("/") + \
3719 "/" + rel_uri.lstrip("/")
3721 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3722 "/" + pkg.pf + ".tbz2"
3725 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3726 self.returncode = os.EX_OK
3730 protocol = urlparse.urlparse(uri)[0]
3731 fcmd_prefix = "FETCHCOMMAND"
3733 fcmd_prefix = "RESUMECOMMAND"
3734 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3736 fcmd = settings.get(fcmd_prefix)
3739 "DISTDIR" : os.path.dirname(pkg_path),
3741 "FILE" : os.path.basename(pkg_path)
3744 fetch_env = dict(settings.iteritems())
3745 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3746 for x in shlex.split(fcmd)]
3748 if self.fd_pipes is None:
3750 fd_pipes = self.fd_pipes
3752 # Redirect all output to stdout since some fetchers like
3753 # wget pollute stderr (if portage detects a problem then it
3754 # can send it's own message to stderr).
3755 fd_pipes.setdefault(0, sys.stdin.fileno())
3756 fd_pipes.setdefault(1, sys.stdout.fileno())
3757 fd_pipes.setdefault(2, sys.stdout.fileno())
3759 self.args = fetch_args
3760 self.env = fetch_env
3761 SpawnProcess._start(self)
3763 def _set_returncode(self, wait_retval):
3764 SpawnProcess._set_returncode(self, wait_retval)
3765 if self.returncode == os.EX_OK:
3766 # If possible, update the mtime to match the remote package if
3767 # the fetcher didn't already do it automatically.
3768 bintree = self.pkg.root_config.trees["bintree"]
3769 if bintree._remote_has_index:
3770 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3771 if remote_mtime is not None:
3773 remote_mtime = long(remote_mtime)
3778 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3782 if remote_mtime != local_mtime:
3784 os.utime(self.pkg_path,
3785 (remote_mtime, remote_mtime))
3794 This raises an AlreadyLocked exception if lock() is called
3795 while a lock is already held. In order to avoid this, call
3796 unlock() or check whether the "locked" attribute is True
3797 or False before calling lock().
3799 if self._lock_obj is not None:
3800 raise self.AlreadyLocked((self._lock_obj,))
3802 self._lock_obj = portage.locks.lockfile(
3803 self.pkg_path, wantnewlockfile=1)
3806 class AlreadyLocked(portage.exception.PortageException):
3810 if self._lock_obj is None:
3812 portage.locks.unlockfile(self._lock_obj)
3813 self._lock_obj = None
3816 class BinpkgVerifier(AsynchronousTask):
3817 __slots__ = ("logfile", "pkg",)
3821 Note: Unlike a normal AsynchronousTask.start() method,
3822 this one does all work is synchronously. The returncode
3823 attribute will be set before it returns.
3827 root_config = pkg.root_config
3828 bintree = root_config.trees["bintree"]
3830 stdout_orig = sys.stdout
3831 stderr_orig = sys.stderr
3833 if self.background and self.logfile is not None:
3834 log_file = open(self.logfile, 'a')
3836 if log_file is not None:
3837 sys.stdout = log_file
3838 sys.stderr = log_file
3840 bintree.digestCheck(pkg)
3841 except portage.exception.FileNotFound:
3842 writemsg("!!! Fetching Binary failed " + \
3843 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3845 except portage.exception.DigestException, e:
3846 writemsg("\n!!! Digest verification failed:\n",
3848 writemsg("!!! %s\n" % e.value[0],
3850 writemsg("!!! Reason: %s\n" % e.value[1],
3852 writemsg("!!! Got: %s\n" % e.value[2],
3854 writemsg("!!! Expected: %s\n" % e.value[3],
3857 if rval != os.EX_OK:
3858 pkg_path = bintree.getname(pkg.cpv)
3859 head, tail = os.path.split(pkg_path)
3860 temp_filename = portage._checksum_failure_temp_file(head, tail)
3861 writemsg("File renamed to '%s'\n" % (temp_filename,),
3864 sys.stdout = stdout_orig
3865 sys.stderr = stderr_orig
3866 if log_file is not None:
3869 self.returncode = rval
3872 class BinpkgPrefetcher(CompositeTask):
3874 __slots__ = ("pkg",) + \
3875 ("pkg_path", "_bintree",)
3878 self._bintree = self.pkg.root_config.trees["bintree"]
3879 fetcher = BinpkgFetcher(background=self.background,
3880 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3881 scheduler=self.scheduler)
3882 self.pkg_path = fetcher.pkg_path
3883 self._start_task(fetcher, self._fetcher_exit)
3885 def _fetcher_exit(self, fetcher):
3887 if self._default_exit(fetcher) != os.EX_OK:
3891 verifier = BinpkgVerifier(background=self.background,
3892 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3893 self._start_task(verifier, self._verifier_exit)
3895 def _verifier_exit(self, verifier):
3896 if self._default_exit(verifier) != os.EX_OK:
3900 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3902 self._current_task = None
3903 self.returncode = os.EX_OK
3906 class BinpkgExtractorAsync(SpawnProcess):
3908 __slots__ = ("image_dir", "pkg", "pkg_path")
3910 _shell_binary = portage.const.BASH_BINARY
3913 self.args = [self._shell_binary, "-c",
3914 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3915 (portage._shell_quote(self.pkg_path),
3916 portage._shell_quote(self.image_dir))]
3918 self.env = self.pkg.root_config.settings.environ()
3919 SpawnProcess._start(self)
3921 class MergeListItem(CompositeTask):
3924 TODO: For parallel scheduling, everything here needs asynchronous
3925 execution support (start, poll, and wait methods).
3928 __slots__ = ("args_set",
3929 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3930 "find_blockers", "logger", "mtimedb", "pkg",
3931 "pkg_count", "pkg_to_replace", "prefetcher",
3932 "settings", "statusMessage", "world_atom") + \
3938 build_opts = self.build_opts
3941 # uninstall, executed by self.merge()
3942 self.returncode = os.EX_OK
3946 args_set = self.args_set
3947 find_blockers = self.find_blockers
3948 logger = self.logger
3949 mtimedb = self.mtimedb
3950 pkg_count = self.pkg_count
3951 scheduler = self.scheduler
3952 settings = self.settings
3953 world_atom = self.world_atom
3954 ldpath_mtimes = mtimedb["ldpath"]
3956 action_desc = "Emerging"
3958 if pkg.type_name == "binary":
3959 action_desc += " binary"
3961 if build_opts.fetchonly:
3962 action_desc = "Fetching"
3964 msg = "%s (%s of %s) %s" % \
3966 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3967 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3968 colorize("GOOD", pkg.cpv))
3970 portdb = pkg.root_config.trees["porttree"].dbapi
3971 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3972 if portdir_repo_name:
3973 pkg_repo_name = pkg.metadata.get("repository")
3974 if pkg_repo_name != portdir_repo_name:
3975 if not pkg_repo_name:
3976 pkg_repo_name = "unknown repo"
3977 msg += " from %s" % pkg_repo_name
3980 msg += " %s %s" % (preposition, pkg.root)
3982 if not build_opts.pretend:
3983 self.statusMessage(msg)
3984 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3985 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3987 if pkg.type_name == "ebuild":
3989 build = EbuildBuild(args_set=args_set,
3990 background=self.background,
3991 config_pool=self.config_pool,
3992 find_blockers=find_blockers,
3993 ldpath_mtimes=ldpath_mtimes, logger=logger,
3994 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3995 prefetcher=self.prefetcher, scheduler=scheduler,
3996 settings=settings, world_atom=world_atom)
3998 self._install_task = build
3999 self._start_task(build, self._default_final_exit)
4002 elif pkg.type_name == "binary":
4004 binpkg = Binpkg(background=self.background,
4005 find_blockers=find_blockers,
4006 ldpath_mtimes=ldpath_mtimes, logger=logger,
4007 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4008 prefetcher=self.prefetcher, settings=settings,
4009 scheduler=scheduler, world_atom=world_atom)
4011 self._install_task = binpkg
4012 self._start_task(binpkg, self._default_final_exit)
4016 self._install_task.poll()
4017 return self.returncode
4020 self._install_task.wait()
4021 return self.returncode
4026 build_opts = self.build_opts
4027 find_blockers = self.find_blockers
4028 logger = self.logger
4029 mtimedb = self.mtimedb
4030 pkg_count = self.pkg_count
4031 prefetcher = self.prefetcher
4032 scheduler = self.scheduler
4033 settings = self.settings
4034 world_atom = self.world_atom
4035 ldpath_mtimes = mtimedb["ldpath"]
4038 if not (build_opts.buildpkgonly or \
4039 build_opts.fetchonly or build_opts.pretend):
4041 uninstall = PackageUninstall(background=self.background,
4042 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4043 pkg=pkg, scheduler=scheduler, settings=settings)
4046 retval = uninstall.wait()
4047 if retval != os.EX_OK:
4051 if build_opts.fetchonly or \
4052 build_opts.buildpkgonly:
4053 return self.returncode
4055 retval = self._install_task.install()
4058 class PackageMerge(AsynchronousTask):
4060 TODO: Implement asynchronous merge so that the scheduler can
4061 run while a merge is executing.
4064 __slots__ = ("merge",)
4068 pkg = self.merge.pkg
4069 pkg_count = self.merge.pkg_count
4072 action_desc = "Uninstalling"
4073 preposition = "from"
4076 action_desc = "Installing"
4078 counter_str = "(%s of %s) " % \
4079 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4080 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4085 colorize("GOOD", pkg.cpv))
4088 msg += " %s %s" % (preposition, pkg.root)
4090 if not self.merge.build_opts.fetchonly and \
4091 not self.merge.build_opts.pretend and \
4092 not self.merge.build_opts.buildpkgonly:
4093 self.merge.statusMessage(msg)
4095 self.returncode = self.merge.merge()
4098 class DependencyArg(object):
4099 def __init__(self, arg=None, root_config=None):
4101 self.root_config = root_config
4104 return str(self.arg)
4106 class AtomArg(DependencyArg):
4107 def __init__(self, atom=None, **kwargs):
4108 DependencyArg.__init__(self, **kwargs)
4110 if not isinstance(self.atom, portage.dep.Atom):
4111 self.atom = portage.dep.Atom(self.atom)
4112 self.set = (self.atom, )
4114 class PackageArg(DependencyArg):
4115 def __init__(self, package=None, **kwargs):
4116 DependencyArg.__init__(self, **kwargs)
4117 self.package = package
4118 self.atom = portage.dep.Atom("=" + package.cpv)
4119 self.set = (self.atom, )
4121 class SetArg(DependencyArg):
4122 def __init__(self, set=None, **kwargs):
4123 DependencyArg.__init__(self, **kwargs)
4125 self.name = self.arg[len(SETPREFIX):]
4127 class Dependency(SlotObject):
4128 __slots__ = ("atom", "blocker", "depth",
4129 "parent", "onlydeps", "priority", "root")
4130 def __init__(self, **kwargs):
4131 SlotObject.__init__(self, **kwargs)
4132 if self.priority is None:
4133 self.priority = DepPriority()
4134 if self.depth is None:
4137 class BlockerCache(portage.cache.mappings.MutableMapping):
4138 """This caches blockers of installed packages so that dep_check does not
4139 have to be done for every single installed package on every invocation of
4140 emerge. The cache is invalidated whenever it is detected that something
4141 has changed that might alter the results of dep_check() calls:
4142 1) the set of installed packages (including COUNTER) has changed
4143 2) the old-style virtuals have changed
4146 # Number of uncached packages to trigger cache update, since
4147 # it's wasteful to update it for every vdb change.
4148 _cache_threshold = 5
4150 class BlockerData(object):
4152 __slots__ = ("__weakref__", "atoms", "counter")
4154 def __init__(self, counter, atoms):
4155 self.counter = counter
4158 def __init__(self, myroot, vardb):
4160 self._virtuals = vardb.settings.getvirtuals()
4161 self._cache_filename = os.path.join(myroot,
4162 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4163 self._cache_version = "1"
4164 self._cache_data = None
4165 self._modified = set()
4170 f = open(self._cache_filename, mode='rb')
4171 mypickle = pickle.Unpickler(f)
4173 mypickle.find_global = None
4174 except AttributeError:
4175 # TODO: If py3k, override Unpickler.find_class().
4177 self._cache_data = mypickle.load()
4180 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4181 if isinstance(e, pickle.UnpicklingError):
4182 writemsg("!!! Error loading '%s': %s\n" % \
4183 (self._cache_filename, str(e)), noiselevel=-1)
4186 cache_valid = self._cache_data and \
4187 isinstance(self._cache_data, dict) and \
4188 self._cache_data.get("version") == self._cache_version and \
4189 isinstance(self._cache_data.get("blockers"), dict)
4191 # Validate all the atoms and counters so that
4192 # corruption is detected as soon as possible.
4193 invalid_items = set()
4194 for k, v in self._cache_data["blockers"].iteritems():
4195 if not isinstance(k, basestring):
4196 invalid_items.add(k)
4199 if portage.catpkgsplit(k) is None:
4200 invalid_items.add(k)
4202 except portage.exception.InvalidData:
4203 invalid_items.add(k)
4205 if not isinstance(v, tuple) or \
4207 invalid_items.add(k)
4210 if not isinstance(counter, (int, long)):
4211 invalid_items.add(k)
4213 if not isinstance(atoms, (list, tuple)):
4214 invalid_items.add(k)
4216 invalid_atom = False
4218 if not isinstance(atom, basestring):
4221 if atom[:1] != "!" or \
4222 not portage.isvalidatom(
4223 atom, allow_blockers=True):
4227 invalid_items.add(k)
4230 for k in invalid_items:
4231 del self._cache_data["blockers"][k]
4232 if not self._cache_data["blockers"]:
4236 self._cache_data = {"version":self._cache_version}
4237 self._cache_data["blockers"] = {}
4238 self._cache_data["virtuals"] = self._virtuals
4239 self._modified.clear()
4242 """If the current user has permission and the internal blocker cache
4243 been updated, save it to disk and mark it unmodified. This is called
4244 by emerge after it has proccessed blockers for all installed packages.
4245 Currently, the cache is only written if the user has superuser
4246 privileges (since that's required to obtain a lock), but all users
4247 have read access and benefit from faster blocker lookups (as long as
4248 the entire cache is still valid). The cache is stored as a pickled
4249 dict object with the following format:
4253 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4254 "virtuals" : vardb.settings.getvirtuals()
4257 if len(self._modified) >= self._cache_threshold and \
4260 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4261 pickle.dump(self._cache_data, f, protocol=2)
4263 portage.util.apply_secpass_permissions(
4264 self._cache_filename, gid=portage.portage_gid, mode=0644)
4265 except (IOError, OSError), e:
4267 self._modified.clear()
4269 def __setitem__(self, cpv, blocker_data):
4271 Update the cache and mark it as modified for a future call to
4274 @param cpv: Package for which to cache blockers.
4276 @param blocker_data: An object with counter and atoms attributes.
4277 @type blocker_data: BlockerData
4279 self._cache_data["blockers"][cpv] = \
4280 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4281 self._modified.add(cpv)
4284 if self._cache_data is None:
4285 # triggered by python-trace
4287 return iter(self._cache_data["blockers"])
4289 def __delitem__(self, cpv):
4290 del self._cache_data["blockers"][cpv]
4292 def __getitem__(self, cpv):
4295 @returns: An object with counter and atoms attributes.
4297 return self.BlockerData(*self._cache_data["blockers"][cpv])
4299 class BlockerDB(object):
4301 def __init__(self, root_config):
4302 self._root_config = root_config
4303 self._vartree = root_config.trees["vartree"]
4304 self._portdb = root_config.trees["porttree"].dbapi
4306 self._dep_check_trees = None
4307 self._fake_vartree = None
4309 def _get_fake_vartree(self, acquire_lock=0):
4310 fake_vartree = self._fake_vartree
4311 if fake_vartree is None:
4312 fake_vartree = FakeVartree(self._root_config,
4313 acquire_lock=acquire_lock)
4314 self._fake_vartree = fake_vartree
4315 self._dep_check_trees = { self._vartree.root : {
4316 "porttree" : fake_vartree,
4317 "vartree" : fake_vartree,
4320 fake_vartree.sync(acquire_lock=acquire_lock)
4323 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4324 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4325 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4326 settings = self._vartree.settings
4327 stale_cache = set(blocker_cache)
4328 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4329 dep_check_trees = self._dep_check_trees
4330 vardb = fake_vartree.dbapi
4331 installed_pkgs = list(vardb)
4333 for inst_pkg in installed_pkgs:
4334 stale_cache.discard(inst_pkg.cpv)
4335 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4336 if cached_blockers is not None and \
4337 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4338 cached_blockers = None
4339 if cached_blockers is not None:
4340 blocker_atoms = cached_blockers.atoms
4342 # Use aux_get() to trigger FakeVartree global
4343 # updates on *DEPEND when appropriate.
4344 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4346 portage.dep._dep_check_strict = False
4347 success, atoms = portage.dep_check(depstr,
4348 vardb, settings, myuse=inst_pkg.use.enabled,
4349 trees=dep_check_trees, myroot=inst_pkg.root)
4351 portage.dep._dep_check_strict = True
4353 pkg_location = os.path.join(inst_pkg.root,
4354 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4355 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4356 (pkg_location, atoms), noiselevel=-1)
4359 blocker_atoms = [atom for atom in atoms \
4360 if atom.startswith("!")]
4361 blocker_atoms.sort()
4362 counter = long(inst_pkg.metadata["COUNTER"])
4363 blocker_cache[inst_pkg.cpv] = \
4364 blocker_cache.BlockerData(counter, blocker_atoms)
4365 for cpv in stale_cache:
4366 del blocker_cache[cpv]
4367 blocker_cache.flush()
4369 blocker_parents = digraph()
4371 for pkg in installed_pkgs:
4372 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4373 blocker_atom = blocker_atom.lstrip("!")
4374 blocker_atoms.append(blocker_atom)
4375 blocker_parents.add(blocker_atom, pkg)
4377 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4378 blocking_pkgs = set()
4379 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4380 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4382 # Check for blockers in the other direction.
4383 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4385 portage.dep._dep_check_strict = False
4386 success, atoms = portage.dep_check(depstr,
4387 vardb, settings, myuse=new_pkg.use.enabled,
4388 trees=dep_check_trees, myroot=new_pkg.root)
4390 portage.dep._dep_check_strict = True
4392 # We should never get this far with invalid deps.
4393 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4396 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4399 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4400 for inst_pkg in installed_pkgs:
4402 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4403 except (portage.exception.InvalidDependString, StopIteration):
4405 blocking_pkgs.add(inst_pkg)
4407 return blocking_pkgs
4409 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4411 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4412 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4413 p_type, p_root, p_key, p_status = parent_node
4415 if p_status == "nomerge":
4416 category, pf = portage.catsplit(p_key)
4417 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4418 msg.append("Portage is unable to process the dependencies of the ")
4419 msg.append("'%s' package. " % p_key)
4420 msg.append("In order to correct this problem, the package ")
4421 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4422 msg.append("As a temporary workaround, the --nodeps option can ")
4423 msg.append("be used to ignore all dependencies. For reference, ")
4424 msg.append("the problematic dependencies can be found in the ")
4425 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4427 msg.append("This package can not be installed. ")
4428 msg.append("Please notify the '%s' package maintainer " % p_key)
4429 msg.append("about this problem.")
4431 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4432 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4434 class PackageVirtualDbapi(portage.dbapi):
4436 A dbapi-like interface class that represents the state of the installed
4437 package database as new packages are installed, replacing any packages
4438 that previously existed in the same slot. The main difference between
4439 this class and fakedbapi is that this one uses Package instances
4440 internally (passed in via cpv_inject() and cpv_remove() calls).
4442 def __init__(self, settings):
4443 portage.dbapi.__init__(self)
4444 self.settings = settings
4445 self._match_cache = {}
4451 Remove all packages.
4455 self._cp_map.clear()
4456 self._cpv_map.clear()
4459 obj = PackageVirtualDbapi(self.settings)
4460 obj._match_cache = self._match_cache.copy()
4461 obj._cp_map = self._cp_map.copy()
4462 for k, v in obj._cp_map.iteritems():
4463 obj._cp_map[k] = v[:]
4464 obj._cpv_map = self._cpv_map.copy()
4468 return self._cpv_map.itervalues()
4470 def __contains__(self, item):
4471 existing = self._cpv_map.get(item.cpv)
4472 if existing is not None and \
4477 def get(self, item, default=None):
4478 cpv = getattr(item, "cpv", None)
4482 type_name, root, cpv, operation = item
4484 existing = self._cpv_map.get(cpv)
4485 if existing is not None and \
4490 def match_pkgs(self, atom):
4491 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4493 def _clear_cache(self):
4494 if self._categories is not None:
4495 self._categories = None
4496 if self._match_cache:
4497 self._match_cache = {}
4499 def match(self, origdep, use_cache=1):
4500 result = self._match_cache.get(origdep)
4501 if result is not None:
4503 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4504 self._match_cache[origdep] = result
4507 def cpv_exists(self, cpv):
4508 return cpv in self._cpv_map
4510 def cp_list(self, mycp, use_cache=1):
4511 cachelist = self._match_cache.get(mycp)
4512 # cp_list() doesn't expand old-style virtuals
4513 if cachelist and cachelist[0].startswith(mycp):
4515 cpv_list = self._cp_map.get(mycp)
4516 if cpv_list is None:
4519 cpv_list = [pkg.cpv for pkg in cpv_list]
4520 self._cpv_sort_ascending(cpv_list)
4521 if not (not cpv_list and mycp.startswith("virtual/")):
4522 self._match_cache[mycp] = cpv_list
4526 return list(self._cp_map)
4529 return list(self._cpv_map)
4531 def cpv_inject(self, pkg):
4532 cp_list = self._cp_map.get(pkg.cp)
4535 self._cp_map[pkg.cp] = cp_list
4536 e_pkg = self._cpv_map.get(pkg.cpv)
4537 if e_pkg is not None:
4540 self.cpv_remove(e_pkg)
4541 for e_pkg in cp_list:
4542 if e_pkg.slot_atom == pkg.slot_atom:
4545 self.cpv_remove(e_pkg)
4548 self._cpv_map[pkg.cpv] = pkg
4551 def cpv_remove(self, pkg):
4552 old_pkg = self._cpv_map.get(pkg.cpv)
4555 self._cp_map[pkg.cp].remove(pkg)
4556 del self._cpv_map[pkg.cpv]
4559 def aux_get(self, cpv, wants):
4560 metadata = self._cpv_map[cpv].metadata
4561 return [metadata.get(x, "") for x in wants]
4563 def aux_update(self, cpv, values):
4564 self._cpv_map[cpv].metadata.update(values)
4567 class depgraph(object):
4569 pkg_tree_map = RootConfig.pkg_tree_map
4571 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4573 def __init__(self, settings, trees, myopts, myparams, spinner):
4574 self.settings = settings
4575 self.target_root = settings["ROOT"]
4576 self.myopts = myopts
4577 self.myparams = myparams
4579 if settings.get("PORTAGE_DEBUG", "") == "1":
4581 self.spinner = spinner
4582 self._running_root = trees["/"]["root_config"]
4583 self._opts_no_restart = Scheduler._opts_no_restart
4584 self.pkgsettings = {}
4585 # Maps slot atom to package for each Package added to the graph.
4586 self._slot_pkg_map = {}
4587 # Maps nodes to the reasons they were selected for reinstallation.
4588 self._reinstall_nodes = {}
4591 self._trees_orig = trees
4593 # Contains a filtered view of preferred packages that are selected
4594 # from available repositories.
4595 self._filtered_trees = {}
4596 # Contains installed packages and new packages that have been added
4598 self._graph_trees = {}
4599 # All Package instances
4600 self._pkg_cache = {}
4601 for myroot in trees:
4602 self.trees[myroot] = {}
4603 # Create a RootConfig instance that references
4604 # the FakeVartree instead of the real one.
4605 self.roots[myroot] = RootConfig(
4606 trees[myroot]["vartree"].settings,
4608 trees[myroot]["root_config"].setconfig)
4609 for tree in ("porttree", "bintree"):
4610 self.trees[myroot][tree] = trees[myroot][tree]
4611 self.trees[myroot]["vartree"] = \
4612 FakeVartree(trees[myroot]["root_config"],
4613 pkg_cache=self._pkg_cache)
4614 self.pkgsettings[myroot] = portage.config(
4615 clone=self.trees[myroot]["vartree"].settings)
4616 self._slot_pkg_map[myroot] = {}
4617 vardb = self.trees[myroot]["vartree"].dbapi
4618 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4619 "--buildpkgonly" not in self.myopts
4620 # This fakedbapi instance will model the state that the vdb will
4621 # have after new packages have been installed.
4622 fakedb = PackageVirtualDbapi(vardb.settings)
4623 if preload_installed_pkgs:
4625 self.spinner.update()
4626 # This triggers metadata updates via FakeVartree.
4627 vardb.aux_get(pkg.cpv, [])
4628 fakedb.cpv_inject(pkg)
4630 # Now that the vardb state is cached in our FakeVartree,
4631 # we won't be needing the real vartree cache for awhile.
4632 # To make some room on the heap, clear the vardbapi
4634 trees[myroot]["vartree"].dbapi._clear_cache()
4637 self.mydbapi[myroot] = fakedb
4640 graph_tree.dbapi = fakedb
4641 self._graph_trees[myroot] = {}
4642 self._filtered_trees[myroot] = {}
4643 # Substitute the graph tree for the vartree in dep_check() since we
4644 # want atom selections to be consistent with package selections
4645 # have already been made.
4646 self._graph_trees[myroot]["porttree"] = graph_tree
4647 self._graph_trees[myroot]["vartree"] = graph_tree
4648 def filtered_tree():
4650 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4651 self._filtered_trees[myroot]["porttree"] = filtered_tree
4653 # Passing in graph_tree as the vartree here could lead to better
4654 # atom selections in some cases by causing atoms for packages that
4655 # have been added to the graph to be preferred over other choices.
4656 # However, it can trigger atom selections that result in
4657 # unresolvable direct circular dependencies. For example, this
4658 # happens with gwydion-dylan which depends on either itself or
4659 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4660 # gwydion-dylan-bin needs to be selected in order to avoid a
4661 # an unresolvable direct circular dependency.
4663 # To solve the problem described above, pass in "graph_db" so that
4664 # packages that have been added to the graph are distinguishable
4665 # from other available packages and installed packages. Also, pass
4666 # the parent package into self._select_atoms() calls so that
4667 # unresolvable direct circular dependencies can be detected and
4668 # avoided when possible.
4669 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4670 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4673 portdb = self.trees[myroot]["porttree"].dbapi
4674 bindb = self.trees[myroot]["bintree"].dbapi
4675 vardb = self.trees[myroot]["vartree"].dbapi
4676 # (db, pkg_type, built, installed, db_keys)
4677 if "--usepkgonly" not in self.myopts:
4678 db_keys = list(portdb._aux_cache_keys)
4679 dbs.append((portdb, "ebuild", False, False, db_keys))
4680 if "--usepkg" in self.myopts:
4681 db_keys = list(bindb._aux_cache_keys)
4682 dbs.append((bindb, "binary", True, False, db_keys))
4683 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4684 dbs.append((vardb, "installed", True, True, db_keys))
4685 self._filtered_trees[myroot]["dbs"] = dbs
4686 if "--usepkg" in self.myopts:
4687 self.trees[myroot]["bintree"].populate(
4688 "--getbinpkg" in self.myopts,
4689 "--getbinpkgonly" in self.myopts)
4692 self.digraph=portage.digraph()
4693 # contains all sets added to the graph
4695 # contains atoms given as arguments
4696 self._sets["args"] = InternalPackageSet()
4697 # contains all atoms from all sets added to the graph, including
4698 # atoms given as arguments
4699 self._set_atoms = InternalPackageSet()
4700 self._atom_arg_map = {}
4701 # contains all nodes pulled in by self._set_atoms
4702 self._set_nodes = set()
4703 # Contains only Blocker -> Uninstall edges
4704 self._blocker_uninstalls = digraph()
4705 # Contains only Package -> Blocker edges
4706 self._blocker_parents = digraph()
4707 # Contains only irrelevant Package -> Blocker edges
4708 self._irrelevant_blockers = digraph()
4709 # Contains only unsolvable Package -> Blocker edges
4710 self._unsolvable_blockers = digraph()
4711 # Contains all Blocker -> Blocked Package edges
4712 self._blocked_pkgs = digraph()
4713 # Contains world packages that have been protected from
4714 # uninstallation but may not have been added to the graph
4715 # if the graph is not complete yet.
4716 self._blocked_world_pkgs = {}
4717 self._slot_collision_info = {}
4718 # Slot collision nodes are not allowed to block other packages since
4719 # blocker validation is only able to account for one package per slot.
4720 self._slot_collision_nodes = set()
4721 self._parent_atoms = {}
4722 self._slot_conflict_parent_atoms = set()
4723 self._serialized_tasks_cache = None
4724 self._scheduler_graph = None
4725 self._displayed_list = None
4726 self._pprovided_args = []
4727 self._missing_args = []
4728 self._masked_installed = set()
4729 self._unsatisfied_deps_for_display = []
4730 self._unsatisfied_blockers_for_display = None
4731 self._circular_deps_for_display = None
4732 self._dep_stack = []
4733 self._unsatisfied_deps = []
4734 self._initially_unsatisfied_deps = []
4735 self._ignored_deps = []
4736 self._required_set_names = set(["system", "world"])
4737 self._select_atoms = self._select_atoms_highest_available
4738 self._select_package = self._select_pkg_highest_available
4739 self._highest_pkg_cache = {}
4741 def _show_slot_collision_notice(self):
4742 """Show an informational message advising the user to mask one of the
4743 the packages. In some cases it may be possible to resolve this
4744 automatically, but support for backtracking (removal nodes that have
4745 already been selected) will be required in order to handle all possible
4749 if not self._slot_collision_info:
4752 self._show_merge_list()
4755 msg.append("\n!!! Multiple package instances within a single " + \
4756 "package slot have been pulled\n")
4757 msg.append("!!! into the dependency graph, resulting" + \
4758 " in a slot conflict:\n\n")
4760 # Max number of parents shown, to avoid flooding the display.
4762 explanation_columns = 70
4764 for (slot_atom, root), slot_nodes \
4765 in self._slot_collision_info.iteritems():
4766 msg.append(str(slot_atom))
4769 for node in slot_nodes:
4771 msg.append(str(node))
4772 parent_atoms = self._parent_atoms.get(node)
4775 # Prefer conflict atoms over others.
4776 for parent_atom in parent_atoms:
4777 if len(pruned_list) >= max_parents:
4779 if parent_atom in self._slot_conflict_parent_atoms:
4780 pruned_list.add(parent_atom)
4782 # If this package was pulled in by conflict atoms then
4783 # show those alone since those are the most interesting.
4785 # When generating the pruned list, prefer instances
4786 # of DependencyArg over instances of Package.
4787 for parent_atom in parent_atoms:
4788 if len(pruned_list) >= max_parents:
4790 parent, atom = parent_atom
4791 if isinstance(parent, DependencyArg):
4792 pruned_list.add(parent_atom)
4793 # Prefer Packages instances that themselves have been
4794 # pulled into collision slots.
4795 for parent_atom in parent_atoms:
4796 if len(pruned_list) >= max_parents:
4798 parent, atom = parent_atom
4799 if isinstance(parent, Package) and \
4800 (parent.slot_atom, parent.root) \
4801 in self._slot_collision_info:
4802 pruned_list.add(parent_atom)
4803 for parent_atom in parent_atoms:
4804 if len(pruned_list) >= max_parents:
4806 pruned_list.add(parent_atom)
4807 omitted_parents = len(parent_atoms) - len(pruned_list)
4808 parent_atoms = pruned_list
4809 msg.append(" pulled in by\n")
4810 for parent_atom in parent_atoms:
4811 parent, atom = parent_atom
4812 msg.append(2*indent)
4813 if isinstance(parent,
4814 (PackageArg, AtomArg)):
4815 # For PackageArg and AtomArg types, it's
4816 # redundant to display the atom attribute.
4817 msg.append(str(parent))
4819 # Display the specific atom from SetArg or
4821 msg.append("%s required by %s" % (atom, parent))
4824 msg.append(2*indent)
4825 msg.append("(and %d more)\n" % omitted_parents)
4827 msg.append(" (no parents)\n")
4829 explanation = self._slot_conflict_explanation(slot_nodes)
4832 msg.append(indent + "Explanation:\n\n")
4833 for line in textwrap.wrap(explanation, explanation_columns):
4834 msg.append(2*indent + line + "\n")
4837 sys.stderr.write("".join(msg))
4840 explanations_for_all = explanations == len(self._slot_collision_info)
4842 if explanations_for_all or "--quiet" in self.myopts:
4846 msg.append("It may be possible to solve this problem ")
4847 msg.append("by using package.mask to prevent one of ")
4848 msg.append("those packages from being selected. ")
4849 msg.append("However, it is also possible that conflicting ")
4850 msg.append("dependencies exist such that they are impossible to ")
4851 msg.append("satisfy simultaneously. If such a conflict exists in ")
4852 msg.append("the dependencies of two different packages, then those ")
4853 msg.append("packages can not be installed simultaneously.")
4855 from formatter import AbstractFormatter, DumbWriter
4856 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4858 f.add_flowing_data(x)
4862 msg.append("For more information, see MASKED PACKAGES ")
4863 msg.append("section in the emerge man page or refer ")
4864 msg.append("to the Gentoo Handbook.")
4866 f.add_flowing_data(x)
4870 def _slot_conflict_explanation(self, slot_nodes):
4872 When a slot conflict occurs due to USE deps, there are a few
4873 different cases to consider:
4875 1) New USE are correctly set but --newuse wasn't requested so an
4876 installed package with incorrect USE happened to get pulled
4877 into graph before the new one.
4879 2) New USE are incorrectly set but an installed package has correct
4880 USE so it got pulled into the graph, and a new instance also got
4881 pulled in due to --newuse or an upgrade.
4883 3) Multiple USE deps exist that can't be satisfied simultaneously,
4884 and multiple package instances got pulled into the same slot to
4885 satisfy the conflicting deps.
4887 Currently, explanations and suggested courses of action are generated
4888 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4891 if len(slot_nodes) != 2:
4892 # Suggestions are only implemented for
4893 # conflicts between two packages.
4896 all_conflict_atoms = self._slot_conflict_parent_atoms
4898 matched_atoms = None
4899 unmatched_node = None
4900 for node in slot_nodes:
4901 parent_atoms = self._parent_atoms.get(node)
4902 if not parent_atoms:
4903 # Normally, there are always parent atoms. If there are
4904 # none then something unexpected is happening and there's
4905 # currently no suggestion for this case.
4907 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4908 for parent_atom in conflict_atoms:
4909 parent, atom = parent_atom
4911 # Suggestions are currently only implemented for cases
4912 # in which all conflict atoms have USE deps.
4915 if matched_node is not None:
4916 # If conflict atoms match multiple nodes
4917 # then there's no suggestion.
4920 matched_atoms = conflict_atoms
4922 if unmatched_node is not None:
4923 # Neither node is matched by conflict atoms, and
4924 # there is no suggestion for this case.
4926 unmatched_node = node
4928 if matched_node is None or unmatched_node is None:
4929 # This shouldn't happen.
4932 if unmatched_node.installed and not matched_node.installed and \
4933 unmatched_node.cpv == matched_node.cpv:
4934 # If the conflicting packages are the same version then
4935 # --newuse should be all that's needed. If they are different
4936 # versions then there's some other problem.
4937 return "New USE are correctly set, but --newuse wasn't" + \
4938 " requested, so an installed package with incorrect USE " + \
4939 "happened to get pulled into the dependency graph. " + \
4940 "In order to solve " + \
4941 "this, either specify the --newuse option or explicitly " + \
4942 " reinstall '%s'." % matched_node.slot_atom
4944 if matched_node.installed and not unmatched_node.installed:
4945 atoms = sorted(set(atom for parent, atom in matched_atoms))
4946 explanation = ("New USE for '%s' are incorrectly set. " + \
4947 "In order to solve this, adjust USE to satisfy '%s'") % \
4948 (matched_node.slot_atom, atoms[0])
4950 for atom in atoms[1:-1]:
4951 explanation += ", '%s'" % (atom,)
4954 explanation += " and '%s'" % (atoms[-1],)
4960 def _process_slot_conflicts(self):
4962 Process slot conflict data to identify specific atoms which
4963 lead to conflict. These atoms only match a subset of the
4964 packages that have been pulled into a given slot.
4966 for (slot_atom, root), slot_nodes \
4967 in self._slot_collision_info.iteritems():
4969 all_parent_atoms = set()
4970 for pkg in slot_nodes:
4971 parent_atoms = self._parent_atoms.get(pkg)
4972 if not parent_atoms:
4974 all_parent_atoms.update(parent_atoms)
4976 for pkg in slot_nodes:
4977 parent_atoms = self._parent_atoms.get(pkg)
4978 if parent_atoms is None:
4979 parent_atoms = set()
4980 self._parent_atoms[pkg] = parent_atoms
4981 for parent_atom in all_parent_atoms:
4982 if parent_atom in parent_atoms:
4984 # Use package set for matching since it will match via
4985 # PROVIDE when necessary, while match_from_list does not.
4986 parent, atom = parent_atom
4987 atom_set = InternalPackageSet(
4988 initial_atoms=(atom,))
4989 if atom_set.findAtomForPackage(pkg):
4990 parent_atoms.add(parent_atom)
4992 self._slot_conflict_parent_atoms.add(parent_atom)
4994 def _reinstall_for_flags(self, forced_flags,
4995 orig_use, orig_iuse, cur_use, cur_iuse):
4996 """Return a set of flags that trigger reinstallation, or None if there
4997 are no such flags."""
4998 if "--newuse" in self.myopts:
4999 flags = set(orig_iuse.symmetric_difference(
5000 cur_iuse).difference(forced_flags))
5001 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5002 cur_iuse.intersection(cur_use)))
5005 elif "changed-use" == self.myopts.get("--reinstall"):
5006 flags = orig_iuse.intersection(orig_use).symmetric_difference(
5007 cur_iuse.intersection(cur_use))
5012 def _create_graph(self, allow_unsatisfied=False):
5013 dep_stack = self._dep_stack
5015 self.spinner.update()
5016 dep = dep_stack.pop()
5017 if isinstance(dep, Package):
5018 if not self._add_pkg_deps(dep,
5019 allow_unsatisfied=allow_unsatisfied):
5022 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5026 def _add_dep(self, dep, allow_unsatisfied=False):
5027 debug = "--debug" in self.myopts
5028 buildpkgonly = "--buildpkgonly" in self.myopts
5029 nodeps = "--nodeps" in self.myopts
5030 empty = "empty" in self.myparams
5031 deep = "deep" in self.myparams
5032 update = "--update" in self.myopts and dep.depth <= 1
5034 if not buildpkgonly and \
5036 dep.parent not in self._slot_collision_nodes:
5037 if dep.parent.onlydeps:
5038 # It's safe to ignore blockers if the
5039 # parent is an --onlydeps node.
5041 # The blocker applies to the root where
5042 # the parent is or will be installed.
5043 blocker = Blocker(atom=dep.atom,
5044 eapi=dep.parent.metadata["EAPI"],
5045 root=dep.parent.root)
5046 self._blocker_parents.add(blocker, dep.parent)
5048 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5049 onlydeps=dep.onlydeps)
5051 if dep.priority.optional:
5052 # This could be an unecessary build-time dep
5053 # pulled in by --with-bdeps=y.
5055 if allow_unsatisfied:
5056 self._unsatisfied_deps.append(dep)
5058 self._unsatisfied_deps_for_display.append(
5059 ((dep.root, dep.atom), {"myparent":dep.parent}))
5061 # In some cases, dep_check will return deps that shouldn't
5062 # be proccessed any further, so they are identified and
5063 # discarded here. Try to discard as few as possible since
5064 # discarded dependencies reduce the amount of information
5065 # available for optimization of merge order.
5066 if dep.priority.satisfied and \
5067 not dep_pkg.installed and \
5068 not (existing_node or empty or deep or update):
5070 if dep.root == self.target_root:
5072 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5073 except StopIteration:
5075 except portage.exception.InvalidDependString:
5076 if not dep_pkg.installed:
5077 # This shouldn't happen since the package
5078 # should have been masked.
5081 self._ignored_deps.append(dep)
5084 if not self._add_pkg(dep_pkg, dep):
5088 def _add_pkg(self, pkg, dep):
5095 myparent = dep.parent
5096 priority = dep.priority
5098 if priority is None:
5099 priority = DepPriority()
5101 Fills the digraph with nodes comprised of packages to merge.
5102 mybigkey is the package spec of the package to merge.
5103 myparent is the package depending on mybigkey ( or None )
5104 addme = Should we add this package to the digraph or are we just looking at it's deps?
5105 Think --onlydeps, we need to ignore packages in that case.
5108 #IUSE-aware emerge -> USE DEP aware depgraph
5109 #"no downgrade" emerge
5111 # Ensure that the dependencies of the same package
5112 # are never processed more than once.
5113 previously_added = pkg in self.digraph
5115 # select the correct /var database that we'll be checking against
5116 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5117 pkgsettings = self.pkgsettings[pkg.root]
5122 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5123 except portage.exception.InvalidDependString, e:
5124 if not pkg.installed:
5125 show_invalid_depstring_notice(
5126 pkg, pkg.metadata["PROVIDE"], str(e))
5130 if not pkg.onlydeps:
5131 if not pkg.installed and \
5132 "empty" not in self.myparams and \
5133 vardbapi.match(pkg.slot_atom):
5134 # Increase the priority of dependencies on packages that
5135 # are being rebuilt. This optimizes merge order so that
5136 # dependencies are rebuilt/updated as soon as possible,
5137 # which is needed especially when emerge is called by
5138 # revdep-rebuild since dependencies may be affected by ABI
5139 # breakage that has rendered them useless. Don't adjust
5140 # priority here when in "empty" mode since all packages
5141 # are being merged in that case.
5142 priority.rebuild = True
5144 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5145 slot_collision = False
5147 existing_node_matches = pkg.cpv == existing_node.cpv
5148 if existing_node_matches and \
5149 pkg != existing_node and \
5150 dep.atom is not None:
5151 # Use package set for matching since it will match via
5152 # PROVIDE when necessary, while match_from_list does not.
5153 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5154 if not atom_set.findAtomForPackage(existing_node):
5155 existing_node_matches = False
5156 if existing_node_matches:
5157 # The existing node can be reused.
5159 for parent_atom in arg_atoms:
5160 parent, atom = parent_atom
5161 self.digraph.add(existing_node, parent,
5163 self._add_parent_atom(existing_node, parent_atom)
5164 # If a direct circular dependency is not an unsatisfied
5165 # buildtime dependency then drop it here since otherwise
5166 # it can skew the merge order calculation in an unwanted
5168 if existing_node != myparent or \
5169 (priority.buildtime and not priority.satisfied):
5170 self.digraph.addnode(existing_node, myparent,
5172 if dep.atom is not None and dep.parent is not None:
5173 self._add_parent_atom(existing_node,
5174 (dep.parent, dep.atom))
5178 # A slot collision has occurred. Sometimes this coincides
5179 # with unresolvable blockers, so the slot collision will be
5180 # shown later if there are no unresolvable blockers.
5181 self._add_slot_conflict(pkg)
5182 slot_collision = True
5185 # Now add this node to the graph so that self.display()
5186 # can show use flags and --tree portage.output. This node is
5187 # only being partially added to the graph. It must not be
5188 # allowed to interfere with the other nodes that have been
5189 # added. Do not overwrite data for existing nodes in
5190 # self.mydbapi since that data will be used for blocker
5192 # Even though the graph is now invalid, continue to process
5193 # dependencies so that things like --fetchonly can still
5194 # function despite collisions.
5196 elif not previously_added:
5197 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5198 self.mydbapi[pkg.root].cpv_inject(pkg)
5199 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5201 if not pkg.installed:
5202 # Allow this package to satisfy old-style virtuals in case it
5203 # doesn't already. Any pre-existing providers will be preferred
5206 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5207 # For consistency, also update the global virtuals.
5208 settings = self.roots[pkg.root].settings
5210 settings.setinst(pkg.cpv, pkg.metadata)
5212 except portage.exception.InvalidDependString, e:
5213 show_invalid_depstring_notice(
5214 pkg, pkg.metadata["PROVIDE"], str(e))
5219 self._set_nodes.add(pkg)
5221 # Do this even when addme is False (--onlydeps) so that the
5222 # parent/child relationship is always known in case
5223 # self._show_slot_collision_notice() needs to be called later.
5224 self.digraph.add(pkg, myparent, priority=priority)
5225 if dep.atom is not None and dep.parent is not None:
5226 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5229 for parent_atom in arg_atoms:
5230 parent, atom = parent_atom
5231 self.digraph.add(pkg, parent, priority=priority)
5232 self._add_parent_atom(pkg, parent_atom)
5234 """ This section determines whether we go deeper into dependencies or not.
5235 We want to go deeper on a few occasions:
5236 Installing package A, we need to make sure package A's deps are met.
5237 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5238 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5240 dep_stack = self._dep_stack
5241 if "recurse" not in self.myparams:
5243 elif pkg.installed and \
5244 "deep" not in self.myparams:
5245 dep_stack = self._ignored_deps
5247 self.spinner.update()
5252 if not previously_added:
5253 dep_stack.append(pkg)
5256 def _add_parent_atom(self, pkg, parent_atom):
5257 parent_atoms = self._parent_atoms.get(pkg)
5258 if parent_atoms is None:
5259 parent_atoms = set()
5260 self._parent_atoms[pkg] = parent_atoms
5261 parent_atoms.add(parent_atom)
5263 def _add_slot_conflict(self, pkg):
5264 self._slot_collision_nodes.add(pkg)
5265 slot_key = (pkg.slot_atom, pkg.root)
5266 slot_nodes = self._slot_collision_info.get(slot_key)
5267 if slot_nodes is None:
5269 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5270 self._slot_collision_info[slot_key] = slot_nodes
5273 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5275 mytype = pkg.type_name
5278 metadata = pkg.metadata
5279 myuse = pkg.use.enabled
5281 depth = pkg.depth + 1
5282 removal_action = "remove" in self.myparams
5285 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5287 edepend[k] = metadata[k]
5289 if not pkg.built and \
5290 "--buildpkgonly" in self.myopts and \
5291 "deep" not in self.myparams and \
5292 "empty" not in self.myparams:
5293 edepend["RDEPEND"] = ""
5294 edepend["PDEPEND"] = ""
5295 bdeps_optional = False
5297 if pkg.built and not removal_action:
5298 if self.myopts.get("--with-bdeps", "n") == "y":
5299 # Pull in build time deps as requested, but marked them as
5300 # "optional" since they are not strictly required. This allows
5301 # more freedom in the merge order calculation for solving
5302 # circular dependencies. Don't convert to PDEPEND since that
5303 # could make --with-bdeps=y less effective if it is used to
5304 # adjust merge order to prevent built_with_use() calls from
5306 bdeps_optional = True
5308 # built packages do not have build time dependencies.
5309 edepend["DEPEND"] = ""
5311 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5312 edepend["DEPEND"] = ""
5315 root_deps = self.myopts.get("--root-deps")
5316 if root_deps is not None:
5317 if root_deps is True:
5319 elif root_deps == "rdeps":
5320 edepend["DEPEND"] = ""
5323 (bdeps_root, edepend["DEPEND"],
5324 self._priority(buildtime=(not bdeps_optional),
5325 optional=bdeps_optional)),
5326 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5327 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5330 debug = "--debug" in self.myopts
5331 strict = mytype != "installed"
5333 for dep_root, dep_string, dep_priority in deps:
5338 print "Parent: ", jbigkey
5339 print "Depstring:", dep_string
5340 print "Priority:", dep_priority
5341 vardb = self.roots[dep_root].trees["vartree"].dbapi
5343 selected_atoms = self._select_atoms(dep_root,
5344 dep_string, myuse=myuse, parent=pkg, strict=strict,
5345 priority=dep_priority)
5346 except portage.exception.InvalidDependString, e:
5347 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5350 print "Candidates:", selected_atoms
5352 for atom in selected_atoms:
5355 atom = portage.dep.Atom(atom)
5357 mypriority = dep_priority.copy()
5358 if not atom.blocker and vardb.match(atom):
5359 mypriority.satisfied = True
5361 if not self._add_dep(Dependency(atom=atom,
5362 blocker=atom.blocker, depth=depth, parent=pkg,
5363 priority=mypriority, root=dep_root),
5364 allow_unsatisfied=allow_unsatisfied):
5367 except portage.exception.InvalidAtom, e:
5368 show_invalid_depstring_notice(
5369 pkg, dep_string, str(e))
5371 if not pkg.installed:
5375 print "Exiting...", jbigkey
5376 except portage.exception.AmbiguousPackageName, e:
5378 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5379 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5381 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5382 portage.writemsg("\n", noiselevel=-1)
5383 if mytype == "binary":
5385 "!!! This binary package cannot be installed: '%s'\n" % \
5386 mykey, noiselevel=-1)
5387 elif mytype == "ebuild":
5388 portdb = self.roots[myroot].trees["porttree"].dbapi
5389 myebuild, mylocation = portdb.findname2(mykey)
5390 portage.writemsg("!!! This ebuild cannot be installed: " + \
5391 "'%s'\n" % myebuild, noiselevel=-1)
5392 portage.writemsg("!!! Please notify the package maintainer " + \
5393 "that atoms must be fully-qualified.\n", noiselevel=-1)
5397 def _priority(self, **kwargs):
5398 if "remove" in self.myparams:
5399 priority_constructor = UnmergeDepPriority
5401 priority_constructor = DepPriority
5402 return priority_constructor(**kwargs)
5404 def _dep_expand(self, root_config, atom_without_category):
5406 @param root_config: a root config instance
5407 @type root_config: RootConfig
5408 @param atom_without_category: an atom without a category component
5409 @type atom_without_category: String
5411 @returns: a list of atoms containing categories (possibly empty)
5413 null_cp = portage.dep_getkey(insert_category_into_atom(
5414 atom_without_category, "null"))
5415 cat, atom_pn = portage.catsplit(null_cp)
5417 dbs = self._filtered_trees[root_config.root]["dbs"]
5419 for db, pkg_type, built, installed, db_keys in dbs:
5420 for cat in db.categories:
5421 if db.cp_list("%s/%s" % (cat, atom_pn)):
5425 for cat in categories:
5426 deps.append(insert_category_into_atom(
5427 atom_without_category, cat))
5430 def _have_new_virt(self, root, atom_cp):
5432 for db, pkg_type, built, installed, db_keys in \
5433 self._filtered_trees[root]["dbs"]:
5434 if db.cp_list(atom_cp):
5439 def _iter_atoms_for_pkg(self, pkg):
5440 # TODO: add multiple $ROOT support
5441 if pkg.root != self.target_root:
5443 atom_arg_map = self._atom_arg_map
5444 root_config = self.roots[pkg.root]
5445 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5446 atom_cp = portage.dep_getkey(atom)
5447 if atom_cp != pkg.cp and \
5448 self._have_new_virt(pkg.root, atom_cp):
5450 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5451 visible_pkgs.reverse() # descending order
5453 for visible_pkg in visible_pkgs:
5454 if visible_pkg.cp != atom_cp:
5456 if pkg >= visible_pkg:
5457 # This is descending order, and we're not
5458 # interested in any versions <= pkg given.
5460 if pkg.slot_atom != visible_pkg.slot_atom:
5461 higher_slot = visible_pkg
5463 if higher_slot is not None:
5465 for arg in atom_arg_map[(atom, pkg.root)]:
5466 if isinstance(arg, PackageArg) and \
5471 def select_files(self, myfiles):
5472 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5473 appropriate depgraph and return a favorite list."""
5474 debug = "--debug" in self.myopts
5475 root_config = self.roots[self.target_root]
5476 sets = root_config.sets
5477 getSetAtoms = root_config.setconfig.getSetAtoms
5479 myroot = self.target_root
5480 dbs = self._filtered_trees[myroot]["dbs"]
5481 vardb = self.trees[myroot]["vartree"].dbapi
5482 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5483 portdb = self.trees[myroot]["porttree"].dbapi
5484 bindb = self.trees[myroot]["bintree"].dbapi
5485 pkgsettings = self.pkgsettings[myroot]
5487 onlydeps = "--onlydeps" in self.myopts
5490 ext = os.path.splitext(x)[1]
5492 if not os.path.exists(x):
5494 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5495 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5496 elif os.path.exists(
5497 os.path.join(pkgsettings["PKGDIR"], x)):
5498 x = os.path.join(pkgsettings["PKGDIR"], x)
5500 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5501 print "!!! Please ensure the tbz2 exists as specified.\n"
5502 return 0, myfavorites
5503 mytbz2=portage.xpak.tbz2(x)
5504 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5505 if os.path.realpath(x) != \
5506 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5507 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5508 return 0, myfavorites
5509 db_keys = list(bindb._aux_cache_keys)
5510 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5511 pkg = Package(type_name="binary", root_config=root_config,
5512 cpv=mykey, built=True, metadata=metadata,
5514 self._pkg_cache[pkg] = pkg
5515 args.append(PackageArg(arg=x, package=pkg,
5516 root_config=root_config))
5517 elif ext==".ebuild":
5518 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5519 pkgdir = os.path.dirname(ebuild_path)
5520 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5521 cp = pkgdir[len(tree_root)+1:]
5522 e = portage.exception.PackageNotFound(
5523 ("%s is not in a valid portage tree " + \
5524 "hierarchy or does not exist") % x)
5525 if not portage.isvalidatom(cp):
5527 cat = portage.catsplit(cp)[0]
5528 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5529 if not portage.isvalidatom("="+mykey):
5531 ebuild_path = portdb.findname(mykey)
5533 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5534 cp, os.path.basename(ebuild_path)):
5535 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5536 return 0, myfavorites
5537 if mykey not in portdb.xmatch(
5538 "match-visible", portage.dep_getkey(mykey)):
5539 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5540 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5541 print colorize("BAD", "*** page for details.")
5542 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5545 raise portage.exception.PackageNotFound(
5546 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5547 db_keys = list(portdb._aux_cache_keys)
5548 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5549 pkg = Package(type_name="ebuild", root_config=root_config,
5550 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5551 pkgsettings.setcpv(pkg)
5552 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5553 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5554 self._pkg_cache[pkg] = pkg
5555 args.append(PackageArg(arg=x, package=pkg,
5556 root_config=root_config))
5557 elif x.startswith(os.path.sep):
5558 if not x.startswith(myroot):
5559 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5560 " $ROOT.\n") % x, noiselevel=-1)
5562 # Queue these up since it's most efficient to handle
5563 # multiple files in a single iter_owners() call.
5564 lookup_owners.append(x)
5566 if x in ("system", "world"):
5568 if x.startswith(SETPREFIX):
5569 s = x[len(SETPREFIX):]
5571 raise portage.exception.PackageSetNotFound(s)
5574 # Recursively expand sets so that containment tests in
5575 # self._get_parent_sets() properly match atoms in nested
5576 # sets (like if world contains system).
5577 expanded_set = InternalPackageSet(
5578 initial_atoms=getSetAtoms(s))
5579 self._sets[s] = expanded_set
5580 args.append(SetArg(arg=x, set=expanded_set,
5581 root_config=root_config))
5583 if not is_valid_package_atom(x):
5584 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5586 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5587 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5589 # Don't expand categories or old-style virtuals here unless
5590 # necessary. Expansion of old-style virtuals here causes at
5591 # least the following problems:
5592 # 1) It's more difficult to determine which set(s) an atom
5593 # came from, if any.
5594 # 2) It takes away freedom from the resolver to choose other
5595 # possible expansions when necessary.
5597 args.append(AtomArg(arg=x, atom=x,
5598 root_config=root_config))
5600 expanded_atoms = self._dep_expand(root_config, x)
5601 installed_cp_set = set()
5602 for atom in expanded_atoms:
5603 atom_cp = portage.dep_getkey(atom)
5604 if vardb.cp_list(atom_cp):
5605 installed_cp_set.add(atom_cp)
5607 if len(installed_cp_set) > 1:
5608 non_virtual_cps = set()
5609 for atom_cp in installed_cp_set:
5610 if not atom_cp.startswith("virtual/"):
5611 non_virtual_cps.add(atom_cp)
5612 if len(non_virtual_cps) == 1:
5613 installed_cp_set = non_virtual_cps
5615 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5616 installed_cp = iter(installed_cp_set).next()
5617 expanded_atoms = [atom for atom in expanded_atoms \
5618 if portage.dep_getkey(atom) == installed_cp]
5620 if len(expanded_atoms) > 1:
5623 ambiguous_package_name(x, expanded_atoms, root_config,
5624 self.spinner, self.myopts)
5625 return False, myfavorites
5627 atom = expanded_atoms[0]
5629 null_atom = insert_category_into_atom(x, "null")
5630 null_cp = portage.dep_getkey(null_atom)
5631 cat, atom_pn = portage.catsplit(null_cp)
5632 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5634 # Allow the depgraph to choose which virtual.
5635 atom = insert_category_into_atom(x, "virtual")
5637 atom = insert_category_into_atom(x, "null")
5639 args.append(AtomArg(arg=x, atom=atom,
5640 root_config=root_config))
5644 search_for_multiple = False
5645 if len(lookup_owners) > 1:
5646 search_for_multiple = True
5648 for x in lookup_owners:
5649 if not search_for_multiple and os.path.isdir(x):
5650 search_for_multiple = True
5651 relative_paths.append(x[len(myroot):])
5654 for pkg, relative_path in \
5655 real_vardb._owners.iter_owners(relative_paths):
5656 owners.add(pkg.mycpv)
5657 if not search_for_multiple:
5661 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5662 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5666 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5668 # portage now masks packages with missing slot, but it's
5669 # possible that one was installed by an older version
5670 atom = portage.cpv_getkey(cpv)
5672 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5673 args.append(AtomArg(arg=atom, atom=atom,
5674 root_config=root_config))
5676 if "--update" in self.myopts:
5677 # In some cases, the greedy slots behavior can pull in a slot that
5678 # the user would want to uninstall due to it being blocked by a
5679 # newer version in a different slot. Therefore, it's necessary to
5680 # detect and discard any that should be uninstalled. Each time
5681 # that arguments are updated, package selections are repeated in
5682 # order to ensure consistency with the current arguments:
5684 # 1) Initialize args
5685 # 2) Select packages and generate initial greedy atoms
5686 # 3) Update args with greedy atoms
5687 # 4) Select packages and generate greedy atoms again, while
5688 # accounting for any blockers between selected packages
5689 # 5) Update args with revised greedy atoms
5691 self._set_args(args)
5694 greedy_args.append(arg)
5695 if not isinstance(arg, AtomArg):
5697 for atom in self._greedy_slots(arg.root_config, arg.atom):
5699 AtomArg(arg=arg.arg, atom=atom,
5700 root_config=arg.root_config))
5702 self._set_args(greedy_args)
5705 # Revise greedy atoms, accounting for any blockers
5706 # between selected packages.
5707 revised_greedy_args = []
5709 revised_greedy_args.append(arg)
5710 if not isinstance(arg, AtomArg):
5712 for atom in self._greedy_slots(arg.root_config, arg.atom,
5713 blocker_lookahead=True):
5714 revised_greedy_args.append(
5715 AtomArg(arg=arg.arg, atom=atom,
5716 root_config=arg.root_config))
5717 args = revised_greedy_args
5718 del revised_greedy_args
5720 self._set_args(args)
5722 myfavorites = set(myfavorites)
5724 if isinstance(arg, (AtomArg, PackageArg)):
5725 myfavorites.add(arg.atom)
5726 elif isinstance(arg, SetArg):
5727 myfavorites.add(arg.arg)
5728 myfavorites = list(myfavorites)
5730 pprovideddict = pkgsettings.pprovideddict
5732 portage.writemsg("\n", noiselevel=-1)
5733 # Order needs to be preserved since a feature of --nodeps
5734 # is to allow the user to force a specific merge order.
5738 for atom in arg.set:
5739 self.spinner.update()
5740 dep = Dependency(atom=atom, onlydeps=onlydeps,
5741 root=myroot, parent=arg)
5742 atom_cp = portage.dep_getkey(atom)
5744 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5745 if pprovided and portage.match_from_list(atom, pprovided):
5746 # A provided package has been specified on the command line.
5747 self._pprovided_args.append((arg, atom))
5749 if isinstance(arg, PackageArg):
5750 if not self._add_pkg(arg.package, dep) or \
5751 not self._create_graph():
5752 sys.stderr.write(("\n\n!!! Problem resolving " + \
5753 "dependencies for %s\n") % arg.arg)
5754 return 0, myfavorites
5757 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5758 (arg, atom), noiselevel=-1)
5759 pkg, existing_node = self._select_package(
5760 myroot, atom, onlydeps=onlydeps)
5762 if not (isinstance(arg, SetArg) and \
5763 arg.name in ("system", "world")):
5764 self._unsatisfied_deps_for_display.append(
5765 ((myroot, atom), {}))
5766 return 0, myfavorites
5767 self._missing_args.append((arg, atom))
5769 if atom_cp != pkg.cp:
5770 # For old-style virtuals, we need to repeat the
5771 # package.provided check against the selected package.
5772 expanded_atom = atom.replace(atom_cp, pkg.cp)
5773 pprovided = pprovideddict.get(pkg.cp)
5775 portage.match_from_list(expanded_atom, pprovided):
5776 # A provided package has been
5777 # specified on the command line.
5778 self._pprovided_args.append((arg, atom))
5780 if pkg.installed and "selective" not in self.myparams:
5781 self._unsatisfied_deps_for_display.append(
5782 ((myroot, atom), {}))
5783 # Previous behavior was to bail out in this case, but
5784 # since the dep is satisfied by the installed package,
5785 # it's more friendly to continue building the graph
5786 # and just show a warning message. Therefore, only bail
5787 # out here if the atom is not from either the system or
5789 if not (isinstance(arg, SetArg) and \
5790 arg.name in ("system", "world")):
5791 return 0, myfavorites
5793 # Add the selected package to the graph as soon as possible
5794 # so that later dep_check() calls can use it as feedback
5795 # for making more consistent atom selections.
5796 if not self._add_pkg(pkg, dep):
5797 if isinstance(arg, SetArg):
5798 sys.stderr.write(("\n\n!!! Problem resolving " + \
5799 "dependencies for %s from %s\n") % \
5802 sys.stderr.write(("\n\n!!! Problem resolving " + \
5803 "dependencies for %s\n") % atom)
5804 return 0, myfavorites
5806 except portage.exception.MissingSignature, e:
5807 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5808 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5809 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5810 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5811 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5812 return 0, myfavorites
5813 except portage.exception.InvalidSignature, e:
5814 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5815 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5816 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5817 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5818 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5819 return 0, myfavorites
5820 except SystemExit, e:
5821 raise # Needed else can't exit
5822 except Exception, e:
5823 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5824 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5827 # Now that the root packages have been added to the graph,
5828 # process the dependencies.
5829 if not self._create_graph():
5830 return 0, myfavorites
5833 if "--usepkgonly" in self.myopts:
5834 for xs in self.digraph.all_nodes():
5835 if not isinstance(xs, Package):
5837 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5841 print "Missing binary for:",xs[2]
5845 except self._unknown_internal_error:
5846 return False, myfavorites
5848 # We're true here unless we are missing binaries.
5849 return (not missing,myfavorites)
5851 def _set_args(self, args):
5853 Create the "args" package set from atoms and packages given as
5854 arguments. This method can be called multiple times if necessary.
5855 The package selection cache is automatically invalidated, since
5856 arguments influence package selections.
5858 args_set = self._sets["args"]
5861 if not isinstance(arg, (AtomArg, PackageArg)):
5864 if atom in args_set:
5868 self._set_atoms.clear()
5869 self._set_atoms.update(chain(*self._sets.itervalues()))
5870 atom_arg_map = self._atom_arg_map
5871 atom_arg_map.clear()
5873 for atom in arg.set:
5874 atom_key = (atom, arg.root_config.root)
5875 refs = atom_arg_map.get(atom_key)
5878 atom_arg_map[atom_key] = refs
5882 # Invalidate the package selection cache, since
5883 # arguments influence package selections.
5884 self._highest_pkg_cache.clear()
5885 for trees in self._filtered_trees.itervalues():
5886 trees["porttree"].dbapi._clear_cache()
5888 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5890 Return a list of slot atoms corresponding to installed slots that
5891 differ from the slot of the highest visible match. When
5892 blocker_lookahead is True, slot atoms that would trigger a blocker
5893 conflict are automatically discarded, potentially allowing automatic
5894 uninstallation of older slots when appropriate.
5896 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5897 if highest_pkg is None:
5899 vardb = root_config.trees["vartree"].dbapi
5901 for cpv in vardb.match(atom):
5902 # don't mix new virtuals with old virtuals
5903 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5904 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5906 slots.add(highest_pkg.metadata["SLOT"])
5910 slots.remove(highest_pkg.metadata["SLOT"])
5913 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5914 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5915 if pkg is not None and \
5916 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5917 greedy_pkgs.append(pkg)
5920 if not blocker_lookahead:
5921 return [pkg.slot_atom for pkg in greedy_pkgs]
5924 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5925 for pkg in greedy_pkgs + [highest_pkg]:
5926 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5928 atoms = self._select_atoms(
5929 pkg.root, dep_str, pkg.use.enabled,
5930 parent=pkg, strict=True)
5931 except portage.exception.InvalidDependString:
5933 blocker_atoms = (x for x in atoms if x.blocker)
5934 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5936 if highest_pkg not in blockers:
5939 # filter packages with invalid deps
5940 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5942 # filter packages that conflict with highest_pkg
5943 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5944 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5945 blockers[pkg].findAtomForPackage(highest_pkg))]
5950 # If two packages conflict, discard the lower version.
5951 discard_pkgs = set()
5952 greedy_pkgs.sort(reverse=True)
5953 for i in xrange(len(greedy_pkgs) - 1):
5954 pkg1 = greedy_pkgs[i]
5955 if pkg1 in discard_pkgs:
5957 for j in xrange(i + 1, len(greedy_pkgs)):
5958 pkg2 = greedy_pkgs[j]
5959 if pkg2 in discard_pkgs:
5961 if blockers[pkg1].findAtomForPackage(pkg2) or \
5962 blockers[pkg2].findAtomForPackage(pkg1):
5964 discard_pkgs.add(pkg2)
5966 return [pkg.slot_atom for pkg in greedy_pkgs \
5967 if pkg not in discard_pkgs]
5969 def _select_atoms_from_graph(self, *pargs, **kwargs):
5971 Prefer atoms matching packages that have already been
5972 added to the graph or those that are installed and have
5973 not been scheduled for replacement.
5975 kwargs["trees"] = self._graph_trees
5976 return self._select_atoms_highest_available(*pargs, **kwargs)
5978 def _select_atoms_highest_available(self, root, depstring,
5979 myuse=None, parent=None, strict=True, trees=None, priority=None):
5980 """This will raise InvalidDependString if necessary. If trees is
5981 None then self._filtered_trees is used."""
5982 pkgsettings = self.pkgsettings[root]
5984 trees = self._filtered_trees
5985 if not getattr(priority, "buildtime", False):
5986 # The parent should only be passed to dep_check() for buildtime
5987 # dependencies since that's the only case when it's appropriate
5988 # to trigger the circular dependency avoidance code which uses it.
5989 # It's important not to trigger the same circular dependency
5990 # avoidance code for runtime dependencies since it's not needed
5991 # and it can promote an incorrect package choice.
5995 if parent is not None:
5996 trees[root]["parent"] = parent
5998 portage.dep._dep_check_strict = False
5999 mycheck = portage.dep_check(depstring, None,
6000 pkgsettings, myuse=myuse,
6001 myroot=root, trees=trees)
6003 if parent is not None:
6004 trees[root].pop("parent")
6005 portage.dep._dep_check_strict = True
6007 raise portage.exception.InvalidDependString(mycheck[1])
6008 selected_atoms = mycheck[1]
6009 return selected_atoms
6011 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6012 atom = portage.dep.Atom(atom)
6013 atom_set = InternalPackageSet(initial_atoms=(atom,))
6014 atom_without_use = atom
6016 atom_without_use = portage.dep.remove_slot(atom)
6018 atom_without_use += ":" + atom.slot
6019 atom_without_use = portage.dep.Atom(atom_without_use)
6020 xinfo = '"%s"' % atom
6023 # Discard null/ from failed cpv_expand category expansion.
6024 xinfo = xinfo.replace("null/", "")
6025 masked_packages = []
6027 masked_pkg_instances = set()
6028 missing_licenses = []
6029 have_eapi_mask = False
6030 pkgsettings = self.pkgsettings[root]
6031 implicit_iuse = pkgsettings._get_implicit_iuse()
6032 root_config = self.roots[root]
6033 portdb = self.roots[root].trees["porttree"].dbapi
6034 dbs = self._filtered_trees[root]["dbs"]
6035 for db, pkg_type, built, installed, db_keys in dbs:
6039 if hasattr(db, "xmatch"):
6040 cpv_list = db.xmatch("match-all", atom_without_use)
6042 cpv_list = db.match(atom_without_use)
6045 for cpv in cpv_list:
6046 metadata, mreasons = get_mask_info(root_config, cpv,
6047 pkgsettings, db, pkg_type, built, installed, db_keys)
6048 if metadata is not None:
6049 pkg = Package(built=built, cpv=cpv,
6050 installed=installed, metadata=metadata,
6051 root_config=root_config)
6052 if pkg.cp != atom.cp:
6053 # A cpv can be returned from dbapi.match() as an
6054 # old-style virtual match even in cases when the
6055 # package does not actually PROVIDE the virtual.
6056 # Filter out any such false matches here.
6057 if not atom_set.findAtomForPackage(pkg):
6060 masked_pkg_instances.add(pkg)
6062 missing_use.append(pkg)
6065 masked_packages.append(
6066 (root_config, pkgsettings, cpv, metadata, mreasons))
6068 missing_use_reasons = []
6069 missing_iuse_reasons = []
6070 for pkg in missing_use:
6071 use = pkg.use.enabled
6072 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6073 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6075 for x in atom.use.required:
6076 if iuse_re.match(x) is None:
6077 missing_iuse.append(x)
6080 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6081 missing_iuse_reasons.append((pkg, mreasons))
6083 need_enable = sorted(atom.use.enabled.difference(use))
6084 need_disable = sorted(atom.use.disabled.intersection(use))
6085 if need_enable or need_disable:
6087 changes.extend(colorize("red", "+" + x) \
6088 for x in need_enable)
6089 changes.extend(colorize("blue", "-" + x) \
6090 for x in need_disable)
6091 mreasons.append("Change USE: %s" % " ".join(changes))
6092 missing_use_reasons.append((pkg, mreasons))
6094 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6095 in missing_use_reasons if pkg not in masked_pkg_instances]
6097 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6098 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6100 show_missing_use = False
6101 if unmasked_use_reasons:
6102 # Only show the latest version.
6103 show_missing_use = unmasked_use_reasons[:1]
6104 elif unmasked_iuse_reasons:
6105 if missing_use_reasons:
6106 # All packages with required IUSE are masked,
6107 # so display a normal masking message.
6110 show_missing_use = unmasked_iuse_reasons
6112 if show_missing_use:
6113 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6114 print "!!! One of the following packages is required to complete your request:"
6115 for pkg, mreasons in show_missing_use:
6116 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6118 elif masked_packages:
6120 colorize("BAD", "All ebuilds that could satisfy ") + \
6121 colorize("INFORM", xinfo) + \
6122 colorize("BAD", " have been masked.")
6123 print "!!! One of the following masked packages is required to complete your request:"
6124 have_eapi_mask = show_masked_packages(masked_packages)
6127 msg = ("The current version of portage supports " + \
6128 "EAPI '%s'. You must upgrade to a newer version" + \
6129 " of portage before EAPI masked packages can" + \
6130 " be installed.") % portage.const.EAPI
6131 from textwrap import wrap
6132 for line in wrap(msg, 75):
6137 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6139 # Show parent nodes and the argument that pulled them in.
6140 traversed_nodes = set()
6143 while node is not None:
6144 traversed_nodes.add(node)
6145 msg.append('(dependency required by "%s" [%s])' % \
6146 (colorize('INFORM', str(node.cpv)), node.type_name))
6147 # When traversing to parents, prefer arguments over packages
6148 # since arguments are root nodes. Never traverse the same
6149 # package twice, in order to prevent an infinite loop.
6150 selected_parent = None
6151 for parent in self.digraph.parent_nodes(node):
6152 if isinstance(parent, DependencyArg):
6153 msg.append('(dependency required by "%s" [argument])' % \
6154 (colorize('INFORM', str(parent))))
6155 selected_parent = None
6157 if parent not in traversed_nodes:
6158 selected_parent = parent
6159 node = selected_parent
6165 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6166 cache_key = (root, atom, onlydeps)
6167 ret = self._highest_pkg_cache.get(cache_key)
6170 if pkg and not existing:
6171 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6172 if existing and existing == pkg:
6173 # Update the cache to reflect that the
6174 # package has been added to the graph.
6176 self._highest_pkg_cache[cache_key] = ret
6178 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6179 self._highest_pkg_cache[cache_key] = ret
6182 settings = pkg.root_config.settings
6183 if visible(settings, pkg) and not (pkg.installed and \
6184 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6185 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6188 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6189 root_config = self.roots[root]
6190 pkgsettings = self.pkgsettings[root]
6191 dbs = self._filtered_trees[root]["dbs"]
6192 vardb = self.roots[root].trees["vartree"].dbapi
6193 portdb = self.roots[root].trees["porttree"].dbapi
6194 # List of acceptable packages, ordered by type preference.
6195 matched_packages = []
6196 highest_version = None
6197 if not isinstance(atom, portage.dep.Atom):
6198 atom = portage.dep.Atom(atom)
6200 atom_set = InternalPackageSet(initial_atoms=(atom,))
6201 existing_node = None
6203 usepkgonly = "--usepkgonly" in self.myopts
6204 empty = "empty" in self.myparams
6205 selective = "selective" in self.myparams
6207 noreplace = "--noreplace" in self.myopts
6208 # Behavior of the "selective" parameter depends on
6209 # whether or not a package matches an argument atom.
6210 # If an installed package provides an old-style
6211 # virtual that is no longer provided by an available
6212 # package, the installed package may match an argument
6213 # atom even though none of the available packages do.
6214 # Therefore, "selective" logic does not consider
6215 # whether or not an installed package matches an
6216 # argument atom. It only considers whether or not
6217 # available packages match argument atoms, which is
6218 # represented by the found_available_arg flag.
6219 found_available_arg = False
6220 for find_existing_node in True, False:
6223 for db, pkg_type, built, installed, db_keys in dbs:
6226 if installed and not find_existing_node:
6227 want_reinstall = reinstall or empty or \
6228 (found_available_arg and not selective)
6229 if want_reinstall and matched_packages:
6231 if hasattr(db, "xmatch"):
6232 cpv_list = db.xmatch("match-all", atom)
6234 cpv_list = db.match(atom)
6236 # USE=multislot can make an installed package appear as if
6237 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6238 # won't do any good as long as USE=multislot is enabled since
6239 # the newly built package still won't have the expected slot.
6240 # Therefore, assume that such SLOT dependencies are already
6241 # satisfied rather than forcing a rebuild.
6242 if installed and not cpv_list and atom.slot:
6243 for cpv in db.match(atom.cp):
6244 slot_available = False
6245 for other_db, other_type, other_built, \
6246 other_installed, other_keys in dbs:
6249 other_db.aux_get(cpv, ["SLOT"])[0]:
6250 slot_available = True
6254 if not slot_available:
6256 inst_pkg = self._pkg(cpv, "installed",
6257 root_config, installed=installed)
6258 # Remove the slot from the atom and verify that
6259 # the package matches the resulting atom.
6260 atom_without_slot = portage.dep.remove_slot(atom)
6262 atom_without_slot += str(atom.use)
6263 atom_without_slot = portage.dep.Atom(atom_without_slot)
6264 if portage.match_from_list(
6265 atom_without_slot, [inst_pkg]):
6266 cpv_list = [inst_pkg.cpv]
6271 pkg_status = "merge"
6272 if installed or onlydeps:
6273 pkg_status = "nomerge"
6276 for cpv in cpv_list:
6277 # Make --noreplace take precedence over --newuse.
6278 if not installed and noreplace and \
6279 cpv in vardb.match(atom):
6280 # If the installed version is masked, it may
6281 # be necessary to look at lower versions,
6282 # in case there is a visible downgrade.
6284 reinstall_for_flags = None
6285 cache_key = (pkg_type, root, cpv, pkg_status)
6286 calculated_use = True
6287 pkg = self._pkg_cache.get(cache_key)
6289 calculated_use = False
6291 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6294 pkg = Package(built=built, cpv=cpv,
6295 installed=installed, metadata=metadata,
6296 onlydeps=onlydeps, root_config=root_config,
6298 metadata = pkg.metadata
6300 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6301 if not built and ("?" in metadata["LICENSE"] or \
6302 "?" in metadata["PROVIDE"]):
6303 # This is avoided whenever possible because
6304 # it's expensive. It only needs to be done here
6305 # if it has an effect on visibility.
6306 pkgsettings.setcpv(pkg)
6307 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6308 calculated_use = True
6309 self._pkg_cache[pkg] = pkg
6311 if not installed or (built and matched_packages):
6312 # Only enforce visibility on installed packages
6313 # if there is at least one other visible package
6314 # available. By filtering installed masked packages
6315 # here, packages that have been masked since they
6316 # were installed can be automatically downgraded
6317 # to an unmasked version.
6319 if not visible(pkgsettings, pkg):
6321 except portage.exception.InvalidDependString:
6325 # Enable upgrade or downgrade to a version
6326 # with visible KEYWORDS when the installed
6327 # version is masked by KEYWORDS, but never
6328 # reinstall the same exact version only due
6329 # to a KEYWORDS mask.
6330 if built and matched_packages:
6332 different_version = None
6333 for avail_pkg in matched_packages:
6334 if not portage.dep.cpvequal(
6335 pkg.cpv, avail_pkg.cpv):
6336 different_version = avail_pkg
6338 if different_version is not None:
6341 pkgsettings._getMissingKeywords(
6342 pkg.cpv, pkg.metadata):
6345 # If the ebuild no longer exists or it's
6346 # keywords have been dropped, reject built
6347 # instances (installed or binary).
6348 # If --usepkgonly is enabled, assume that
6349 # the ebuild status should be ignored.
6353 pkg.cpv, "ebuild", root_config)
6354 except portage.exception.PackageNotFound:
6357 if not visible(pkgsettings, pkg_eb):
6360 if not pkg.built and not calculated_use:
6361 # This is avoided whenever possible because
6363 pkgsettings.setcpv(pkg)
6364 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6366 if pkg.cp != atom.cp:
6367 # A cpv can be returned from dbapi.match() as an
6368 # old-style virtual match even in cases when the
6369 # package does not actually PROVIDE the virtual.
6370 # Filter out any such false matches here.
6371 if not atom_set.findAtomForPackage(pkg):
6375 if root == self.target_root:
6377 # Ebuild USE must have been calculated prior
6378 # to this point, in case atoms have USE deps.
6379 myarg = self._iter_atoms_for_pkg(pkg).next()
6380 except StopIteration:
6382 except portage.exception.InvalidDependString:
6384 # masked by corruption
6386 if not installed and myarg:
6387 found_available_arg = True
6389 if atom.use and not pkg.built:
6390 use = pkg.use.enabled
6391 if atom.use.enabled.difference(use):
6393 if atom.use.disabled.intersection(use):
6395 if pkg.cp == atom_cp:
6396 if highest_version is None:
6397 highest_version = pkg
6398 elif pkg > highest_version:
6399 highest_version = pkg
6400 # At this point, we've found the highest visible
6401 # match from the current repo. Any lower versions
6402 # from this repo are ignored, so this so the loop
6403 # will always end with a break statement below
6405 if find_existing_node:
6406 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6409 if portage.dep.match_from_list(atom, [e_pkg]):
6410 if highest_version and \
6411 e_pkg.cp == atom_cp and \
6412 e_pkg < highest_version and \
6413 e_pkg.slot_atom != highest_version.slot_atom:
6414 # There is a higher version available in a
6415 # different slot, so this existing node is
6419 matched_packages.append(e_pkg)
6420 existing_node = e_pkg
6422 # Compare built package to current config and
6423 # reject the built package if necessary.
6424 if built and not installed and \
6425 ("--newuse" in self.myopts or \
6426 "--reinstall" in self.myopts):
6427 iuses = pkg.iuse.all
6428 old_use = pkg.use.enabled
6430 pkgsettings.setcpv(myeb)
6432 pkgsettings.setcpv(pkg)
6433 now_use = pkgsettings["PORTAGE_USE"].split()
6434 forced_flags = set()
6435 forced_flags.update(pkgsettings.useforce)
6436 forced_flags.update(pkgsettings.usemask)
6438 if myeb and not usepkgonly:
6439 cur_iuse = myeb.iuse.all
6440 if self._reinstall_for_flags(forced_flags,
6444 # Compare current config to installed package
6445 # and do not reinstall if possible.
6446 if not installed and \
6447 ("--newuse" in self.myopts or \
6448 "--reinstall" in self.myopts) and \
6449 cpv in vardb.match(atom):
6450 pkgsettings.setcpv(pkg)
6451 forced_flags = set()
6452 forced_flags.update(pkgsettings.useforce)
6453 forced_flags.update(pkgsettings.usemask)
6454 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6455 old_iuse = set(filter_iuse_defaults(
6456 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6457 cur_use = pkg.use.enabled
6458 cur_iuse = pkg.iuse.all
6459 reinstall_for_flags = \
6460 self._reinstall_for_flags(
6461 forced_flags, old_use, old_iuse,
6463 if reinstall_for_flags:
6467 matched_packages.append(pkg)
6468 if reinstall_for_flags:
6469 self._reinstall_nodes[pkg] = \
6473 if not matched_packages:
6476 if "--debug" in self.myopts:
6477 for pkg in matched_packages:
6478 portage.writemsg("%s %s\n" % \
6479 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6481 # Filter out any old-style virtual matches if they are
6482 # mixed with new-style virtual matches.
6483 cp = portage.dep_getkey(atom)
6484 if len(matched_packages) > 1 and \
6485 "virtual" == portage.catsplit(cp)[0]:
6486 for pkg in matched_packages:
6489 # Got a new-style virtual, so filter
6490 # out any old-style virtuals.
6491 matched_packages = [pkg for pkg in matched_packages \
6495 if len(matched_packages) > 1:
6496 bestmatch = portage.best(
6497 [pkg.cpv for pkg in matched_packages])
6498 matched_packages = [pkg for pkg in matched_packages \
6499 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6501 # ordered by type preference ("ebuild" type is the last resort)
6502 return matched_packages[-1], existing_node
6504 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6506 Select packages that have already been added to the graph or
6507 those that are installed and have not been scheduled for
6510 graph_db = self._graph_trees[root]["porttree"].dbapi
6511 matches = graph_db.match_pkgs(atom)
6514 pkg = matches[-1] # highest match
6515 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6516 return pkg, in_graph
6518 def _complete_graph(self):
6520 Add any deep dependencies of required sets (args, system, world) that
6521 have not been pulled into the graph yet. This ensures that the graph
6522 is consistent such that initially satisfied deep dependencies are not
6523 broken in the new graph. Initially unsatisfied dependencies are
6524 irrelevant since we only want to avoid breaking dependencies that are
6527 Since this method can consume enough time to disturb users, it is
6528 currently only enabled by the --complete-graph option.
6530 if "--buildpkgonly" in self.myopts or \
6531 "recurse" not in self.myparams:
6534 if "complete" not in self.myparams:
6535 # Skip this to avoid consuming enough time to disturb users.
6538 # Put the depgraph into a mode that causes it to only
6539 # select packages that have already been added to the
6540 # graph or those that are installed and have not been
6541 # scheduled for replacement. Also, toggle the "deep"
6542 # parameter so that all dependencies are traversed and
6544 self._select_atoms = self._select_atoms_from_graph
6545 self._select_package = self._select_pkg_from_graph
6546 already_deep = "deep" in self.myparams
6547 if not already_deep:
6548 self.myparams.add("deep")
6550 for root in self.roots:
6551 required_set_names = self._required_set_names.copy()
6552 if root == self.target_root and \
6553 (already_deep or "empty" in self.myparams):
6554 required_set_names.difference_update(self._sets)
6555 if not required_set_names and not self._ignored_deps:
6557 root_config = self.roots[root]
6558 setconfig = root_config.setconfig
6560 # Reuse existing SetArg instances when available.
6561 for arg in self.digraph.root_nodes():
6562 if not isinstance(arg, SetArg):
6564 if arg.root_config != root_config:
6566 if arg.name in required_set_names:
6568 required_set_names.remove(arg.name)
6569 # Create new SetArg instances only when necessary.
6570 for s in required_set_names:
6571 expanded_set = InternalPackageSet(
6572 initial_atoms=setconfig.getSetAtoms(s))
6573 atom = SETPREFIX + s
6574 args.append(SetArg(arg=atom, set=expanded_set,
6575 root_config=root_config))
6576 vardb = root_config.trees["vartree"].dbapi
6578 for atom in arg.set:
6579 self._dep_stack.append(
6580 Dependency(atom=atom, root=root, parent=arg))
6581 if self._ignored_deps:
6582 self._dep_stack.extend(self._ignored_deps)
6583 self._ignored_deps = []
6584 if not self._create_graph(allow_unsatisfied=True):
6586 # Check the unsatisfied deps to see if any initially satisfied deps
6587 # will become unsatisfied due to an upgrade. Initially unsatisfied
6588 # deps are irrelevant since we only want to avoid breaking deps
6589 # that are initially satisfied.
6590 while self._unsatisfied_deps:
6591 dep = self._unsatisfied_deps.pop()
6592 matches = vardb.match_pkgs(dep.atom)
6594 self._initially_unsatisfied_deps.append(dep)
6596 # An scheduled installation broke a deep dependency.
6597 # Add the installed package to the graph so that it
6598 # will be appropriately reported as a slot collision
6599 # (possibly solvable via backtracking).
6600 pkg = matches[-1] # highest match
6601 if not self._add_pkg(pkg, dep):
6603 if not self._create_graph(allow_unsatisfied=True):
6607 def _pkg(self, cpv, type_name, root_config, installed=False):
6609 Get a package instance from the cache, or create a new
6610 one if necessary. Raises KeyError from aux_get if it
6611 failures for some reason (package does not exist or is
6616 operation = "nomerge"
6617 pkg = self._pkg_cache.get(
6618 (type_name, root_config.root, cpv, operation))
6620 tree_type = self.pkg_tree_map[type_name]
6621 db = root_config.trees[tree_type].dbapi
6622 db_keys = list(self._trees_orig[root_config.root][
6623 tree_type].dbapi._aux_cache_keys)
6625 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6627 raise portage.exception.PackageNotFound(cpv)
6628 pkg = Package(cpv=cpv, metadata=metadata,
6629 root_config=root_config, installed=installed)
6630 if type_name == "ebuild":
6631 settings = self.pkgsettings[root_config.root]
6632 settings.setcpv(pkg)
6633 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6634 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6635 self._pkg_cache[pkg] = pkg
6638 def validate_blockers(self):
6639 """Remove any blockers from the digraph that do not match any of the
6640 packages within the graph. If necessary, create hard deps to ensure
6641 correct merge order such that mutually blocking packages are never
6642 installed simultaneously."""
6644 if "--buildpkgonly" in self.myopts or \
6645 "--nodeps" in self.myopts:
6648 #if "deep" in self.myparams:
6650 # Pull in blockers from all installed packages that haven't already
6651 # been pulled into the depgraph. This is not enabled by default
6652 # due to the performance penalty that is incurred by all the
6653 # additional dep_check calls that are required.
6655 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6656 for myroot in self.trees:
6657 vardb = self.trees[myroot]["vartree"].dbapi
6658 portdb = self.trees[myroot]["porttree"].dbapi
6659 pkgsettings = self.pkgsettings[myroot]
6660 final_db = self.mydbapi[myroot]
6662 blocker_cache = BlockerCache(myroot, vardb)
6663 stale_cache = set(blocker_cache)
6666 stale_cache.discard(cpv)
6667 pkg_in_graph = self.digraph.contains(pkg)
6669 # Check for masked installed packages. Only warn about
6670 # packages that are in the graph in order to avoid warning
6671 # about those that will be automatically uninstalled during
6672 # the merge process or by --depclean.
6674 if pkg_in_graph and not visible(pkgsettings, pkg):
6675 self._masked_installed.add(pkg)
6677 blocker_atoms = None
6683 self._blocker_parents.child_nodes(pkg))
6688 self._irrelevant_blockers.child_nodes(pkg))
6691 if blockers is not None:
6692 blockers = set(str(blocker.atom) \
6693 for blocker in blockers)
6695 # If this node has any blockers, create a "nomerge"
6696 # node for it so that they can be enforced.
6697 self.spinner.update()
6698 blocker_data = blocker_cache.get(cpv)
6699 if blocker_data is not None and \
6700 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6703 # If blocker data from the graph is available, use
6704 # it to validate the cache and update the cache if
6706 if blocker_data is not None and \
6707 blockers is not None:
6708 if not blockers.symmetric_difference(
6709 blocker_data.atoms):
6713 if blocker_data is None and \
6714 blockers is not None:
6715 # Re-use the blockers from the graph.
6716 blocker_atoms = sorted(blockers)
6717 counter = long(pkg.metadata["COUNTER"])
6719 blocker_cache.BlockerData(counter, blocker_atoms)
6720 blocker_cache[pkg.cpv] = blocker_data
6724 blocker_atoms = blocker_data.atoms
6726 # Use aux_get() to trigger FakeVartree global
6727 # updates on *DEPEND when appropriate.
6728 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6729 # It is crucial to pass in final_db here in order to
6730 # optimize dep_check calls by eliminating atoms via
6731 # dep_wordreduce and dep_eval calls.
6733 portage.dep._dep_check_strict = False
6735 success, atoms = portage.dep_check(depstr,
6736 final_db, pkgsettings, myuse=pkg.use.enabled,
6737 trees=self._graph_trees, myroot=myroot)
6738 except Exception, e:
6739 if isinstance(e, SystemExit):
6741 # This is helpful, for example, if a ValueError
6742 # is thrown from cpv_expand due to multiple
6743 # matches (this can happen if an atom lacks a
6745 show_invalid_depstring_notice(
6746 pkg, depstr, str(e))
6750 portage.dep._dep_check_strict = True
6752 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6753 if replacement_pkg and \
6754 replacement_pkg[0].operation == "merge":
6755 # This package is being replaced anyway, so
6756 # ignore invalid dependencies so as not to
6757 # annoy the user too much (otherwise they'd be
6758 # forced to manually unmerge it first).
6760 show_invalid_depstring_notice(pkg, depstr, atoms)
6762 blocker_atoms = [myatom for myatom in atoms \
6763 if myatom.startswith("!")]
6764 blocker_atoms.sort()
6765 counter = long(pkg.metadata["COUNTER"])
6766 blocker_cache[cpv] = \
6767 blocker_cache.BlockerData(counter, blocker_atoms)
6770 for atom in blocker_atoms:
6771 blocker = Blocker(atom=portage.dep.Atom(atom),
6772 eapi=pkg.metadata["EAPI"], root=myroot)
6773 self._blocker_parents.add(blocker, pkg)
6774 except portage.exception.InvalidAtom, e:
6775 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6776 show_invalid_depstring_notice(
6777 pkg, depstr, "Invalid Atom: %s" % (e,))
6779 for cpv in stale_cache:
6780 del blocker_cache[cpv]
6781 blocker_cache.flush()
6784 # Discard any "uninstall" tasks scheduled by previous calls
6785 # to this method, since those tasks may not make sense given
6786 # the current graph state.
6787 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6788 if previous_uninstall_tasks:
6789 self._blocker_uninstalls = digraph()
6790 self.digraph.difference_update(previous_uninstall_tasks)
6792 for blocker in self._blocker_parents.leaf_nodes():
6793 self.spinner.update()
6794 root_config = self.roots[blocker.root]
6795 virtuals = root_config.settings.getvirtuals()
6796 myroot = blocker.root
6797 initial_db = self.trees[myroot]["vartree"].dbapi
6798 final_db = self.mydbapi[myroot]
6800 provider_virtual = False
6801 if blocker.cp in virtuals and \
6802 not self._have_new_virt(blocker.root, blocker.cp):
6803 provider_virtual = True
6805 # Use this to check PROVIDE for each matched package
6807 atom_set = InternalPackageSet(
6808 initial_atoms=[blocker.atom])
6810 if provider_virtual:
6812 for provider_entry in virtuals[blocker.cp]:
6814 portage.dep_getkey(provider_entry)
6815 atoms.append(blocker.atom.replace(
6816 blocker.cp, provider_cp))
6818 atoms = [blocker.atom]
6820 blocked_initial = set()
6822 for pkg in initial_db.match_pkgs(atom):
6823 if atom_set.findAtomForPackage(pkg):
6824 blocked_initial.add(pkg)
6826 blocked_final = set()
6828 for pkg in final_db.match_pkgs(atom):
6829 if atom_set.findAtomForPackage(pkg):
6830 blocked_final.add(pkg)
6832 if not blocked_initial and not blocked_final:
6833 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6834 self._blocker_parents.remove(blocker)
6835 # Discard any parents that don't have any more blockers.
6836 for pkg in parent_pkgs:
6837 self._irrelevant_blockers.add(blocker, pkg)
6838 if not self._blocker_parents.child_nodes(pkg):
6839 self._blocker_parents.remove(pkg)
6841 for parent in self._blocker_parents.parent_nodes(blocker):
6842 unresolved_blocks = False
6843 depends_on_order = set()
6844 for pkg in blocked_initial:
6845 if pkg.slot_atom == parent.slot_atom:
6846 # TODO: Support blocks within slots in cases where it
6847 # might make sense. For example, a new version might
6848 # require that the old version be uninstalled at build
6851 if parent.installed:
6852 # Two currently installed packages conflict with
6853 # eachother. Ignore this case since the damage
6854 # is already done and this would be likely to
6855 # confuse users if displayed like a normal blocker.
6858 self._blocked_pkgs.add(pkg, blocker)
6860 if parent.operation == "merge":
6861 # Maybe the blocked package can be replaced or simply
6862 # unmerged to resolve this block.
6863 depends_on_order.add((pkg, parent))
6865 # None of the above blocker resolutions techniques apply,
6866 # so apparently this one is unresolvable.
6867 unresolved_blocks = True
6868 for pkg in blocked_final:
6869 if pkg.slot_atom == parent.slot_atom:
6870 # TODO: Support blocks within slots.
6872 if parent.operation == "nomerge" and \
6873 pkg.operation == "nomerge":
6874 # This blocker will be handled the next time that a
6875 # merge of either package is triggered.
6878 self._blocked_pkgs.add(pkg, blocker)
6880 # Maybe the blocking package can be
6881 # unmerged to resolve this block.
6882 if parent.operation == "merge" and pkg.installed:
6883 depends_on_order.add((pkg, parent))
6885 elif parent.operation == "nomerge":
6886 depends_on_order.add((parent, pkg))
6888 # None of the above blocker resolutions techniques apply,
6889 # so apparently this one is unresolvable.
6890 unresolved_blocks = True
6892 # Make sure we don't unmerge any package that have been pulled
6894 if not unresolved_blocks and depends_on_order:
6895 for inst_pkg, inst_task in depends_on_order:
6896 if self.digraph.contains(inst_pkg) and \
6897 self.digraph.parent_nodes(inst_pkg):
6898 unresolved_blocks = True
6901 if not unresolved_blocks and depends_on_order:
6902 for inst_pkg, inst_task in depends_on_order:
6903 uninst_task = Package(built=inst_pkg.built,
6904 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6905 metadata=inst_pkg.metadata,
6906 operation="uninstall",
6907 root_config=inst_pkg.root_config,
6908 type_name=inst_pkg.type_name)
6909 self._pkg_cache[uninst_task] = uninst_task
6910 # Enforce correct merge order with a hard dep.
6911 self.digraph.addnode(uninst_task, inst_task,
6912 priority=BlockerDepPriority.instance)
6913 # Count references to this blocker so that it can be
6914 # invalidated after nodes referencing it have been
6916 self._blocker_uninstalls.addnode(uninst_task, blocker)
6917 if not unresolved_blocks and not depends_on_order:
6918 self._irrelevant_blockers.add(blocker, parent)
6919 self._blocker_parents.remove_edge(blocker, parent)
6920 if not self._blocker_parents.parent_nodes(blocker):
6921 self._blocker_parents.remove(blocker)
6922 if not self._blocker_parents.child_nodes(parent):
6923 self._blocker_parents.remove(parent)
6924 if unresolved_blocks:
6925 self._unsolvable_blockers.add(blocker, parent)
6929 def _accept_blocker_conflicts(self):
6931 for x in ("--buildpkgonly", "--fetchonly",
6932 "--fetch-all-uri", "--nodeps"):
6933 if x in self.myopts:
6938 def _merge_order_bias(self, mygraph):
6940 For optimal leaf node selection, promote deep system runtime deps and
6941 order nodes from highest to lowest overall reference count.
6945 for node in mygraph.order:
6946 node_info[node] = len(mygraph.parent_nodes(node))
6947 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6949 def cmp_merge_preference(node1, node2):
6951 if node1.operation == 'uninstall':
6952 if node2.operation == 'uninstall':
6956 if node2.operation == 'uninstall':
6957 if node1.operation == 'uninstall':
6961 node1_sys = node1 in deep_system_deps
6962 node2_sys = node2 in deep_system_deps
6963 if node1_sys != node2_sys:
6968 return node_info[node2] - node_info[node1]
6970 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6972 def altlist(self, reversed=False):
6974 while self._serialized_tasks_cache is None:
6975 self._resolve_conflicts()
6977 self._serialized_tasks_cache, self._scheduler_graph = \
6978 self._serialize_tasks()
6979 except self._serialize_tasks_retry:
6982 retlist = self._serialized_tasks_cache[:]
6987 def schedulerGraph(self):
6989 The scheduler graph is identical to the normal one except that
6990 uninstall edges are reversed in specific cases that require
6991 conflicting packages to be temporarily installed simultaneously.
6992 This is intended for use by the Scheduler in it's parallelization
6993 logic. It ensures that temporary simultaneous installation of
6994 conflicting packages is avoided when appropriate (especially for
6995 !!atom blockers), but allowed in specific cases that require it.
6997 Note that this method calls break_refs() which alters the state of
6998 internal Package instances such that this depgraph instance should
6999 not be used to perform any more calculations.
7001 if self._scheduler_graph is None:
7003 self.break_refs(self._scheduler_graph.order)
7004 return self._scheduler_graph
7006 def break_refs(self, nodes):
7008 Take a mergelist like that returned from self.altlist() and
7009 break any references that lead back to the depgraph. This is
7010 useful if you want to hold references to packages without
7011 also holding the depgraph on the heap.
7014 if hasattr(node, "root_config"):
7015 # The FakeVartree references the _package_cache which
7016 # references the depgraph. So that Package instances don't
7017 # hold the depgraph and FakeVartree on the heap, replace
7018 # the RootConfig that references the FakeVartree with the
7019 # original RootConfig instance which references the actual
7021 node.root_config = \
7022 self._trees_orig[node.root_config.root]["root_config"]
7024 def _resolve_conflicts(self):
7025 if not self._complete_graph():
7026 raise self._unknown_internal_error()
7028 if not self.validate_blockers():
7029 raise self._unknown_internal_error()
7031 if self._slot_collision_info:
7032 self._process_slot_conflicts()
7034 def _serialize_tasks(self):
7036 if "--debug" in self.myopts:
7037 writemsg("\ndigraph:\n\n", noiselevel=-1)
7038 self.digraph.debug_print()
7039 writemsg("\n", noiselevel=-1)
7041 scheduler_graph = self.digraph.copy()
7043 if '--nodeps' in self.myopts:
7044 # Preserve the package order given on the command line.
7045 return ([node for node in scheduler_graph \
7046 if isinstance(node, Package) \
7047 and node.operation == 'merge'], scheduler_graph)
7049 mygraph=self.digraph.copy()
7050 # Prune "nomerge" root nodes if nothing depends on them, since
7051 # otherwise they slow down merge order calculation. Don't remove
7052 # non-root nodes since they help optimize merge order in some cases
7053 # such as revdep-rebuild.
7054 removed_nodes = set()
7056 for node in mygraph.root_nodes():
7057 if not isinstance(node, Package) or \
7058 node.installed or node.onlydeps:
7059 removed_nodes.add(node)
7061 self.spinner.update()
7062 mygraph.difference_update(removed_nodes)
7063 if not removed_nodes:
7065 removed_nodes.clear()
7066 self._merge_order_bias(mygraph)
7067 def cmp_circular_bias(n1, n2):
7069 RDEPEND is stronger than PDEPEND and this function
7070 measures such a strength bias within a circular
7071 dependency relationship.
7073 n1_n2_medium = n2 in mygraph.child_nodes(n1,
7074 ignore_priority=priority_range.ignore_medium_soft)
7075 n2_n1_medium = n1 in mygraph.child_nodes(n2,
7076 ignore_priority=priority_range.ignore_medium_soft)
7077 if n1_n2_medium == n2_n1_medium:
7082 myblocker_uninstalls = self._blocker_uninstalls.copy()
7084 # Contains uninstall tasks that have been scheduled to
7085 # occur after overlapping blockers have been installed.
7086 scheduled_uninstalls = set()
7087 # Contains any Uninstall tasks that have been ignored
7088 # in order to avoid the circular deps code path. These
7089 # correspond to blocker conflicts that could not be
7091 ignored_uninstall_tasks = set()
7092 have_uninstall_task = False
7093 complete = "complete" in self.myparams
7096 def get_nodes(**kwargs):
7098 Returns leaf nodes excluding Uninstall instances
7099 since those should be executed as late as possible.
7101 return [node for node in mygraph.leaf_nodes(**kwargs) \
7102 if isinstance(node, Package) and \
7103 (node.operation != "uninstall" or \
7104 node in scheduled_uninstalls)]
7106 # sys-apps/portage needs special treatment if ROOT="/"
7107 running_root = self._running_root.root
7108 from portage.const import PORTAGE_PACKAGE_ATOM
7109 runtime_deps = InternalPackageSet(
7110 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7111 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7112 PORTAGE_PACKAGE_ATOM)
7113 replacement_portage = self.mydbapi[running_root].match_pkgs(
7114 PORTAGE_PACKAGE_ATOM)
7117 running_portage = running_portage[0]
7119 running_portage = None
7121 if replacement_portage:
7122 replacement_portage = replacement_portage[0]
7124 replacement_portage = None
7126 if replacement_portage == running_portage:
7127 replacement_portage = None
7129 if replacement_portage is not None:
7130 # update from running_portage to replacement_portage asap
7131 asap_nodes.append(replacement_portage)
7133 if running_portage is not None:
7135 portage_rdepend = self._select_atoms_highest_available(
7136 running_root, running_portage.metadata["RDEPEND"],
7137 myuse=running_portage.use.enabled,
7138 parent=running_portage, strict=False)
7139 except portage.exception.InvalidDependString, e:
7140 portage.writemsg("!!! Invalid RDEPEND in " + \
7141 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7142 (running_root, running_portage.cpv, e), noiselevel=-1)
7144 portage_rdepend = []
7145 runtime_deps.update(atom for atom in portage_rdepend \
7146 if not atom.startswith("!"))
7148 def gather_deps(ignore_priority, mergeable_nodes,
7149 selected_nodes, node):
7151 Recursively gather a group of nodes that RDEPEND on
7152 eachother. This ensures that they are merged as a group
7153 and get their RDEPENDs satisfied as soon as possible.
7155 if node in selected_nodes:
7157 if node not in mergeable_nodes:
7159 if node == replacement_portage and \
7160 mygraph.child_nodes(node,
7161 ignore_priority=priority_range.ignore_medium_soft):
7162 # Make sure that portage always has all of it's
7163 # RDEPENDs installed first.
7165 selected_nodes.add(node)
7166 for child in mygraph.child_nodes(node,
7167 ignore_priority=ignore_priority):
7168 if not gather_deps(ignore_priority,
7169 mergeable_nodes, selected_nodes, child):
7173 def ignore_uninst_or_med(priority):
7174 if priority is BlockerDepPriority.instance:
7176 return priority_range.ignore_medium(priority)
7178 def ignore_uninst_or_med_soft(priority):
7179 if priority is BlockerDepPriority.instance:
7181 return priority_range.ignore_medium_soft(priority)
7183 tree_mode = "--tree" in self.myopts
7184 # Tracks whether or not the current iteration should prefer asap_nodes
7185 # if available. This is set to False when the previous iteration
7186 # failed to select any nodes. It is reset whenever nodes are
7187 # successfully selected.
7190 # Controls whether or not the current iteration should drop edges that
7191 # are "satisfied" by installed packages, in order to solve circular
7192 # dependencies. The deep runtime dependencies of installed packages are
7193 # not checked in this case (bug #199856), so it must be avoided
7194 # whenever possible.
7195 drop_satisfied = False
7197 # State of variables for successive iterations that loosen the
7198 # criteria for node selection.
7200 # iteration prefer_asap drop_satisfied
7205 # If no nodes are selected on the last iteration, it is due to
7206 # unresolved blockers or circular dependencies.
7208 while not mygraph.empty():
7209 self.spinner.update()
7210 selected_nodes = None
7211 ignore_priority = None
7212 if drop_satisfied or (prefer_asap and asap_nodes):
7213 priority_range = DepPrioritySatisfiedRange
7215 priority_range = DepPriorityNormalRange
7216 if prefer_asap and asap_nodes:
7217 # ASAP nodes are merged before their soft deps. Go ahead and
7218 # select root nodes here if necessary, since it's typical for
7219 # the parent to have been removed from the graph already.
7220 asap_nodes = [node for node in asap_nodes \
7221 if mygraph.contains(node)]
7222 for node in asap_nodes:
7223 if not mygraph.child_nodes(node,
7224 ignore_priority=priority_range.ignore_soft):
7225 selected_nodes = [node]
7226 asap_nodes.remove(node)
7228 if not selected_nodes and \
7229 not (prefer_asap and asap_nodes):
7230 for i in xrange(priority_range.NONE,
7231 priority_range.MEDIUM_SOFT + 1):
7232 ignore_priority = priority_range.ignore_priority[i]
7233 nodes = get_nodes(ignore_priority=ignore_priority)
7235 # If there is a mix of uninstall nodes with other
7236 # types, save the uninstall nodes for later since
7237 # sometimes a merge node will render an uninstall
7238 # node unnecessary (due to occupying the same slot),
7239 # and we want to avoid executing a separate uninstall
7240 # task in that case.
7242 good_uninstalls = []
7243 with_some_uninstalls_excluded = []
7245 if node.operation == "uninstall":
7246 slot_node = self.mydbapi[node.root
7247 ].match_pkgs(node.slot_atom)
7249 slot_node[0].operation == "merge":
7251 good_uninstalls.append(node)
7252 with_some_uninstalls_excluded.append(node)
7254 nodes = good_uninstalls
7255 elif with_some_uninstalls_excluded:
7256 nodes = with_some_uninstalls_excluded
7260 if ignore_priority is None and not tree_mode:
7261 # Greedily pop all of these nodes since no
7262 # relationship has been ignored. This optimization
7263 # destroys --tree output, so it's disabled in tree
7265 selected_nodes = nodes
7267 # For optimal merge order:
7268 # * Only pop one node.
7269 # * Removing a root node (node without a parent)
7270 # will not produce a leaf node, so avoid it.
7271 # * It's normal for a selected uninstall to be a
7272 # root node, so don't check them for parents.
7274 if node.operation == "uninstall" or \
7275 mygraph.parent_nodes(node):
7276 selected_nodes = [node]
7282 if not selected_nodes:
7283 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7285 mergeable_nodes = set(nodes)
7286 if prefer_asap and asap_nodes:
7288 for i in xrange(priority_range.SOFT,
7289 priority_range.MEDIUM_SOFT + 1):
7290 ignore_priority = priority_range.ignore_priority[i]
7292 if not mygraph.parent_nodes(node):
7294 selected_nodes = set()
7295 if gather_deps(ignore_priority,
7296 mergeable_nodes, selected_nodes, node):
7299 selected_nodes = None
7303 if prefer_asap and asap_nodes and not selected_nodes:
7304 # We failed to find any asap nodes to merge, so ignore
7305 # them for the next iteration.
7309 if selected_nodes and ignore_priority is not None:
7310 # Try to merge ignored medium_soft deps as soon as possible
7311 # if they're not satisfied by installed packages.
7312 for node in selected_nodes:
7313 children = set(mygraph.child_nodes(node))
7314 soft = children.difference(
7315 mygraph.child_nodes(node,
7316 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7317 medium_soft = children.difference(
7318 mygraph.child_nodes(node,
7320 DepPrioritySatisfiedRange.ignore_medium_soft))
7321 medium_soft.difference_update(soft)
7322 for child in medium_soft:
7323 if child in selected_nodes:
7325 if child in asap_nodes:
7327 asap_nodes.append(child)
7329 if selected_nodes and len(selected_nodes) > 1:
7330 if not isinstance(selected_nodes, list):
7331 selected_nodes = list(selected_nodes)
7332 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7334 if not selected_nodes and not myblocker_uninstalls.is_empty():
7335 # An Uninstall task needs to be executed in order to
7336 # avoid conflict if possible.
7339 priority_range = DepPrioritySatisfiedRange
7341 priority_range = DepPriorityNormalRange
7343 mergeable_nodes = get_nodes(
7344 ignore_priority=ignore_uninst_or_med)
7346 min_parent_deps = None
7348 for task in myblocker_uninstalls.leaf_nodes():
7349 # Do some sanity checks so that system or world packages
7350 # don't get uninstalled inappropriately here (only really
7351 # necessary when --complete-graph has not been enabled).
7353 if task in ignored_uninstall_tasks:
7356 if task in scheduled_uninstalls:
7357 # It's been scheduled but it hasn't
7358 # been executed yet due to dependence
7359 # on installation of blocking packages.
7362 root_config = self.roots[task.root]
7363 inst_pkg = self._pkg_cache[
7364 ("installed", task.root, task.cpv, "nomerge")]
7366 if self.digraph.contains(inst_pkg):
7369 forbid_overlap = False
7370 heuristic_overlap = False
7371 for blocker in myblocker_uninstalls.parent_nodes(task):
7372 if blocker.eapi in ("0", "1"):
7373 heuristic_overlap = True
7374 elif blocker.atom.blocker.overlap.forbid:
7375 forbid_overlap = True
7377 if forbid_overlap and running_root == task.root:
7380 if heuristic_overlap and running_root == task.root:
7381 # Never uninstall sys-apps/portage or it's essential
7382 # dependencies, except through replacement.
7384 runtime_dep_atoms = \
7385 list(runtime_deps.iterAtomsForPackage(task))
7386 except portage.exception.InvalidDependString, e:
7387 portage.writemsg("!!! Invalid PROVIDE in " + \
7388 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7389 (task.root, task.cpv, e), noiselevel=-1)
7393 # Don't uninstall a runtime dep if it appears
7394 # to be the only suitable one installed.
7396 vardb = root_config.trees["vartree"].dbapi
7397 for atom in runtime_dep_atoms:
7398 other_version = None
7399 for pkg in vardb.match_pkgs(atom):
7400 if pkg.cpv == task.cpv and \
7401 pkg.metadata["COUNTER"] == \
7402 task.metadata["COUNTER"]:
7406 if other_version is None:
7412 # For packages in the system set, don't take
7413 # any chances. If the conflict can't be resolved
7414 # by a normal replacement operation then abort.
7417 for atom in root_config.sets[
7418 "system"].iterAtomsForPackage(task):
7421 except portage.exception.InvalidDependString, e:
7422 portage.writemsg("!!! Invalid PROVIDE in " + \
7423 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7424 (task.root, task.cpv, e), noiselevel=-1)
7430 # Note that the world check isn't always
7431 # necessary since self._complete_graph() will
7432 # add all packages from the system and world sets to the
7433 # graph. This just allows unresolved conflicts to be
7434 # detected as early as possible, which makes it possible
7435 # to avoid calling self._complete_graph() when it is
7436 # unnecessary due to blockers triggering an abortion.
7438 # For packages in the world set, go ahead an uninstall
7439 # when necessary, as long as the atom will be satisfied
7440 # in the final state.
7441 graph_db = self.mydbapi[task.root]
7444 for atom in root_config.sets[
7445 "world"].iterAtomsForPackage(task):
7447 for pkg in graph_db.match_pkgs(atom):
7454 self._blocked_world_pkgs[inst_pkg] = atom
7456 except portage.exception.InvalidDependString, e:
7457 portage.writemsg("!!! Invalid PROVIDE in " + \
7458 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7459 (task.root, task.cpv, e), noiselevel=-1)
7465 # Check the deps of parent nodes to ensure that
7466 # the chosen task produces a leaf node. Maybe
7467 # this can be optimized some more to make the
7468 # best possible choice, but the current algorithm
7469 # is simple and should be near optimal for most
7471 mergeable_parent = False
7473 for parent in mygraph.parent_nodes(task):
7474 parent_deps.update(mygraph.child_nodes(parent,
7475 ignore_priority=priority_range.ignore_medium_soft))
7476 if parent in mergeable_nodes and \
7477 gather_deps(ignore_uninst_or_med_soft,
7478 mergeable_nodes, set(), parent):
7479 mergeable_parent = True
7481 if not mergeable_parent:
7484 parent_deps.remove(task)
7485 if min_parent_deps is None or \
7486 len(parent_deps) < min_parent_deps:
7487 min_parent_deps = len(parent_deps)
7490 if uninst_task is not None:
7491 # The uninstall is performed only after blocking
7492 # packages have been merged on top of it. File
7493 # collisions between blocking packages are detected
7494 # and removed from the list of files to be uninstalled.
7495 scheduled_uninstalls.add(uninst_task)
7496 parent_nodes = mygraph.parent_nodes(uninst_task)
7498 # Reverse the parent -> uninstall edges since we want
7499 # to do the uninstall after blocking packages have
7500 # been merged on top of it.
7501 mygraph.remove(uninst_task)
7502 for blocked_pkg in parent_nodes:
7503 mygraph.add(blocked_pkg, uninst_task,
7504 priority=BlockerDepPriority.instance)
7505 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7506 scheduler_graph.add(blocked_pkg, uninst_task,
7507 priority=BlockerDepPriority.instance)
7509 # Reset the state variables for leaf node selection and
7510 # continue trying to select leaf nodes.
7512 drop_satisfied = False
7515 if not selected_nodes:
7516 # Only select root nodes as a last resort. This case should
7517 # only trigger when the graph is nearly empty and the only
7518 # remaining nodes are isolated (no parents or children). Since
7519 # the nodes must be isolated, ignore_priority is not needed.
7520 selected_nodes = get_nodes()
7522 if not selected_nodes and not drop_satisfied:
7523 drop_satisfied = True
7526 if not selected_nodes and not myblocker_uninstalls.is_empty():
7527 # If possible, drop an uninstall task here in order to avoid
7528 # the circular deps code path. The corresponding blocker will
7529 # still be counted as an unresolved conflict.
7531 for node in myblocker_uninstalls.leaf_nodes():
7533 mygraph.remove(node)
7538 ignored_uninstall_tasks.add(node)
7541 if uninst_task is not None:
7542 # Reset the state variables for leaf node selection and
7543 # continue trying to select leaf nodes.
7545 drop_satisfied = False
7548 if not selected_nodes:
7549 self._circular_deps_for_display = mygraph
7550 raise self._unknown_internal_error()
7552 # At this point, we've succeeded in selecting one or more nodes, so
7553 # reset state variables for leaf node selection.
7555 drop_satisfied = False
7557 mygraph.difference_update(selected_nodes)
7559 for node in selected_nodes:
7560 if isinstance(node, Package) and \
7561 node.operation == "nomerge":
7564 # Handle interactions between blockers
7565 # and uninstallation tasks.
7566 solved_blockers = set()
7568 if isinstance(node, Package) and \
7569 "uninstall" == node.operation:
7570 have_uninstall_task = True
7573 vardb = self.trees[node.root]["vartree"].dbapi
7574 previous_cpv = vardb.match(node.slot_atom)
7576 # The package will be replaced by this one, so remove
7577 # the corresponding Uninstall task if necessary.
7578 previous_cpv = previous_cpv[0]
7580 ("installed", node.root, previous_cpv, "uninstall")
7582 mygraph.remove(uninst_task)
7586 if uninst_task is not None and \
7587 uninst_task not in ignored_uninstall_tasks and \
7588 myblocker_uninstalls.contains(uninst_task):
7589 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7590 myblocker_uninstalls.remove(uninst_task)
7591 # Discard any blockers that this Uninstall solves.
7592 for blocker in blocker_nodes:
7593 if not myblocker_uninstalls.child_nodes(blocker):
7594 myblocker_uninstalls.remove(blocker)
7595 solved_blockers.add(blocker)
7597 retlist.append(node)
7599 if (isinstance(node, Package) and \
7600 "uninstall" == node.operation) or \
7601 (uninst_task is not None and \
7602 uninst_task in scheduled_uninstalls):
7603 # Include satisfied blockers in the merge list
7604 # since the user might be interested and also
7605 # it serves as an indicator that blocking packages
7606 # will be temporarily installed simultaneously.
7607 for blocker in solved_blockers:
7608 retlist.append(Blocker(atom=blocker.atom,
7609 root=blocker.root, eapi=blocker.eapi,
7612 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7613 for node in myblocker_uninstalls.root_nodes():
7614 unsolvable_blockers.add(node)
7616 for blocker in unsolvable_blockers:
7617 retlist.append(blocker)
7619 # If any Uninstall tasks need to be executed in order
7620 # to avoid a conflict, complete the graph with any
7621 # dependencies that may have been initially
7622 # neglected (to ensure that unsafe Uninstall tasks
7623 # are properly identified and blocked from execution).
7624 if have_uninstall_task and \
7626 not unsolvable_blockers:
7627 self.myparams.add("complete")
7628 raise self._serialize_tasks_retry("")
7630 if unsolvable_blockers and \
7631 not self._accept_blocker_conflicts():
7632 self._unsatisfied_blockers_for_display = unsolvable_blockers
7633 self._serialized_tasks_cache = retlist[:]
7634 self._scheduler_graph = scheduler_graph
7635 raise self._unknown_internal_error()
7637 if self._slot_collision_info and \
7638 not self._accept_blocker_conflicts():
7639 self._serialized_tasks_cache = retlist[:]
7640 self._scheduler_graph = scheduler_graph
7641 raise self._unknown_internal_error()
7643 return retlist, scheduler_graph
7645 def _show_circular_deps(self, mygraph):
7646 # No leaf nodes are available, so we have a circular
7647 # dependency panic situation. Reduce the noise level to a
7648 # minimum via repeated elimination of root nodes since they
7649 # have no parents and thus can not be part of a cycle.
7651 root_nodes = mygraph.root_nodes(
7652 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7655 mygraph.difference_update(root_nodes)
7656 # Display the USE flags that are enabled on nodes that are part
7657 # of dependency cycles in case that helps the user decide to
7658 # disable some of them.
7660 tempgraph = mygraph.copy()
7661 while not tempgraph.empty():
7662 nodes = tempgraph.leaf_nodes()
7664 node = tempgraph.order[0]
7667 display_order.append(node)
7668 tempgraph.remove(node)
7669 display_order.reverse()
7670 self.myopts.pop("--quiet", None)
7671 self.myopts.pop("--verbose", None)
7672 self.myopts["--tree"] = True
7673 portage.writemsg("\n\n", noiselevel=-1)
7674 self.display(display_order)
7675 prefix = colorize("BAD", " * ")
7676 portage.writemsg("\n", noiselevel=-1)
7677 portage.writemsg(prefix + "Error: circular dependencies:\n",
7679 portage.writemsg("\n", noiselevel=-1)
7680 mygraph.debug_print()
7681 portage.writemsg("\n", noiselevel=-1)
7682 portage.writemsg(prefix + "Note that circular dependencies " + \
7683 "can often be avoided by temporarily\n", noiselevel=-1)
7684 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7685 "optional dependencies.\n", noiselevel=-1)
7687 def _show_merge_list(self):
7688 if self._serialized_tasks_cache is not None and \
7689 not (self._displayed_list and \
7690 (self._displayed_list == self._serialized_tasks_cache or \
7691 self._displayed_list == \
7692 list(reversed(self._serialized_tasks_cache)))):
7693 display_list = self._serialized_tasks_cache[:]
7694 if "--tree" in self.myopts:
7695 display_list.reverse()
7696 self.display(display_list)
7698 def _show_unsatisfied_blockers(self, blockers):
7699 self._show_merge_list()
7700 msg = "Error: The above package list contains " + \
7701 "packages which cannot be installed " + \
7702 "at the same time on the same system."
7703 prefix = colorize("BAD", " * ")
7704 from textwrap import wrap
7705 portage.writemsg("\n", noiselevel=-1)
7706 for line in wrap(msg, 70):
7707 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7709 # Display the conflicting packages along with the packages
7710 # that pulled them in. This is helpful for troubleshooting
7711 # cases in which blockers don't solve automatically and
7712 # the reasons are not apparent from the normal merge list
7716 for blocker in blockers:
7717 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7718 self._blocker_parents.parent_nodes(blocker)):
7719 parent_atoms = self._parent_atoms.get(pkg)
7720 if not parent_atoms:
7721 atom = self._blocked_world_pkgs.get(pkg)
7722 if atom is not None:
7723 parent_atoms = set([("@world", atom)])
7725 conflict_pkgs[pkg] = parent_atoms
7728 # Reduce noise by pruning packages that are only
7729 # pulled in by other conflict packages.
7731 for pkg, parent_atoms in conflict_pkgs.iteritems():
7732 relevant_parent = False
7733 for parent, atom in parent_atoms:
7734 if parent not in conflict_pkgs:
7735 relevant_parent = True
7737 if not relevant_parent:
7738 pruned_pkgs.add(pkg)
7739 for pkg in pruned_pkgs:
7740 del conflict_pkgs[pkg]
7746 # Max number of parents shown, to avoid flooding the display.
7748 for pkg, parent_atoms in conflict_pkgs.iteritems():
7752 # Prefer packages that are not directly involved in a conflict.
7753 for parent_atom in parent_atoms:
7754 if len(pruned_list) >= max_parents:
7756 parent, atom = parent_atom
7757 if parent not in conflict_pkgs:
7758 pruned_list.add(parent_atom)
7760 for parent_atom in parent_atoms:
7761 if len(pruned_list) >= max_parents:
7763 pruned_list.add(parent_atom)
7765 omitted_parents = len(parent_atoms) - len(pruned_list)
7766 msg.append(indent + "%s pulled in by\n" % pkg)
7768 for parent_atom in pruned_list:
7769 parent, atom = parent_atom
7770 msg.append(2*indent)
7771 if isinstance(parent,
7772 (PackageArg, AtomArg)):
7773 # For PackageArg and AtomArg types, it's
7774 # redundant to display the atom attribute.
7775 msg.append(str(parent))
7777 # Display the specific atom from SetArg or
7779 msg.append("%s required by %s" % (atom, parent))
7783 msg.append(2*indent)
7784 msg.append("(and %d more)\n" % omitted_parents)
7788 sys.stderr.write("".join(msg))
7791 if "--quiet" not in self.myopts:
7792 show_blocker_docs_link()
7794 def display(self, mylist, favorites=[], verbosity=None):
7796 # This is used to prevent display_problems() from
7797 # redundantly displaying this exact same merge list
7798 # again via _show_merge_list().
7799 self._displayed_list = mylist
7801 if verbosity is None:
7802 verbosity = ("--quiet" in self.myopts and 1 or \
7803 "--verbose" in self.myopts and 3 or 2)
7804 favorites_set = InternalPackageSet(favorites)
7805 oneshot = "--oneshot" in self.myopts or \
7806 "--onlydeps" in self.myopts
7807 columns = "--columns" in self.myopts
7812 counters = PackageCounters()
7814 if verbosity == 1 and "--verbose" not in self.myopts:
7815 def create_use_string(*args):
7818 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7820 is_new, reinst_flags,
7821 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7822 alphabetical=("--alphabetical" in self.myopts)):
7830 cur_iuse = set(cur_iuse)
7831 enabled_flags = cur_iuse.intersection(cur_use)
7832 removed_iuse = set(old_iuse).difference(cur_iuse)
7833 any_iuse = cur_iuse.union(old_iuse)
7834 any_iuse = list(any_iuse)
7836 for flag in any_iuse:
7839 reinst_flag = reinst_flags and flag in reinst_flags
7840 if flag in enabled_flags:
7842 if is_new or flag in old_use and \
7843 (all_flags or reinst_flag):
7844 flag_str = red(flag)
7845 elif flag not in old_iuse:
7846 flag_str = yellow(flag) + "%*"
7847 elif flag not in old_use:
7848 flag_str = green(flag) + "*"
7849 elif flag in removed_iuse:
7850 if all_flags or reinst_flag:
7851 flag_str = yellow("-" + flag) + "%"
7854 flag_str = "(" + flag_str + ")"
7855 removed.append(flag_str)
7858 if is_new or flag in old_iuse and \
7859 flag not in old_use and \
7860 (all_flags or reinst_flag):
7861 flag_str = blue("-" + flag)
7862 elif flag not in old_iuse:
7863 flag_str = yellow("-" + flag)
7864 if flag not in iuse_forced:
7866 elif flag in old_use:
7867 flag_str = green("-" + flag) + "*"
7869 if flag in iuse_forced:
7870 flag_str = "(" + flag_str + ")"
7872 enabled.append(flag_str)
7874 disabled.append(flag_str)
7877 ret = " ".join(enabled)
7879 ret = " ".join(enabled + disabled + removed)
7881 ret = '%s="%s" ' % (name, ret)
7884 repo_display = RepoDisplay(self.roots)
7888 mygraph = self.digraph.copy()
7890 # If there are any Uninstall instances, add the corresponding
7891 # blockers to the digraph (useful for --tree display).
7893 executed_uninstalls = set(node for node in mylist \
7894 if isinstance(node, Package) and node.operation == "unmerge")
7896 for uninstall in self._blocker_uninstalls.leaf_nodes():
7897 uninstall_parents = \
7898 self._blocker_uninstalls.parent_nodes(uninstall)
7899 if not uninstall_parents:
7902 # Remove the corresponding "nomerge" node and substitute
7903 # the Uninstall node.
7904 inst_pkg = self._pkg_cache[
7905 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7907 mygraph.remove(inst_pkg)
7912 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7914 inst_pkg_blockers = []
7916 # Break the Package -> Uninstall edges.
7917 mygraph.remove(uninstall)
7919 # Resolution of a package's blockers
7920 # depend on it's own uninstallation.
7921 for blocker in inst_pkg_blockers:
7922 mygraph.add(uninstall, blocker)
7924 # Expand Package -> Uninstall edges into
7925 # Package -> Blocker -> Uninstall edges.
7926 for blocker in uninstall_parents:
7927 mygraph.add(uninstall, blocker)
7928 for parent in self._blocker_parents.parent_nodes(blocker):
7929 if parent != inst_pkg:
7930 mygraph.add(blocker, parent)
7932 # If the uninstall task did not need to be executed because
7933 # of an upgrade, display Blocker -> Upgrade edges since the
7934 # corresponding Blocker -> Uninstall edges will not be shown.
7936 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7937 if upgrade_node is not None and \
7938 uninstall not in executed_uninstalls:
7939 for blocker in uninstall_parents:
7940 mygraph.add(upgrade_node, blocker)
7942 unsatisfied_blockers = []
7947 if isinstance(x, Blocker) and not x.satisfied:
7948 unsatisfied_blockers.append(x)
7951 if "--tree" in self.myopts:
7952 depth = len(tree_nodes)
7953 while depth and graph_key not in \
7954 mygraph.child_nodes(tree_nodes[depth-1]):
7957 tree_nodes = tree_nodes[:depth]
7958 tree_nodes.append(graph_key)
7959 display_list.append((x, depth, True))
7960 shown_edges.add((graph_key, tree_nodes[depth-1]))
7962 traversed_nodes = set() # prevent endless circles
7963 traversed_nodes.add(graph_key)
7964 def add_parents(current_node, ordered):
7966 # Do not traverse to parents if this node is an
7967 # an argument or a direct member of a set that has
7968 # been specified as an argument (system or world).
7969 if current_node not in self._set_nodes:
7970 parent_nodes = mygraph.parent_nodes(current_node)
7972 child_nodes = set(mygraph.child_nodes(current_node))
7973 selected_parent = None
7974 # First, try to avoid a direct cycle.
7975 for node in parent_nodes:
7976 if not isinstance(node, (Blocker, Package)):
7978 if node not in traversed_nodes and \
7979 node not in child_nodes:
7980 edge = (current_node, node)
7981 if edge in shown_edges:
7983 selected_parent = node
7985 if not selected_parent:
7986 # A direct cycle is unavoidable.
7987 for node in parent_nodes:
7988 if not isinstance(node, (Blocker, Package)):
7990 if node not in traversed_nodes:
7991 edge = (current_node, node)
7992 if edge in shown_edges:
7994 selected_parent = node
7997 shown_edges.add((current_node, selected_parent))
7998 traversed_nodes.add(selected_parent)
7999 add_parents(selected_parent, False)
8000 display_list.append((current_node,
8001 len(tree_nodes), ordered))
8002 tree_nodes.append(current_node)
8004 add_parents(graph_key, True)
8006 display_list.append((x, depth, True))
8007 mylist = display_list
8008 for x in unsatisfied_blockers:
8009 mylist.append((x, 0, True))
8011 last_merge_depth = 0
8012 for i in xrange(len(mylist)-1,-1,-1):
8013 graph_key, depth, ordered = mylist[i]
8014 if not ordered and depth == 0 and i > 0 \
8015 and graph_key == mylist[i-1][0] and \
8016 mylist[i-1][1] == 0:
8017 # An ordered node got a consecutive duplicate when the tree was
8021 if ordered and graph_key[-1] != "nomerge":
8022 last_merge_depth = depth
8024 if depth >= last_merge_depth or \
8025 i < len(mylist) - 1 and \
8026 depth >= mylist[i+1][1]:
8029 from portage import flatten
8030 from portage.dep import use_reduce, paren_reduce
8031 # files to fetch list - avoids counting a same file twice
8032 # in size display (verbose mode)
8035 # Use this set to detect when all the "repoadd" strings are "[0]"
8036 # and disable the entire repo display in this case.
8039 for mylist_index in xrange(len(mylist)):
8040 x, depth, ordered = mylist[mylist_index]
8044 portdb = self.trees[myroot]["porttree"].dbapi
8045 bindb = self.trees[myroot]["bintree"].dbapi
8046 vardb = self.trees[myroot]["vartree"].dbapi
8047 vartree = self.trees[myroot]["vartree"]
8048 pkgsettings = self.pkgsettings[myroot]
8051 indent = " " * depth
8053 if isinstance(x, Blocker):
8055 blocker_style = "PKG_BLOCKER_SATISFIED"
8056 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
8058 blocker_style = "PKG_BLOCKER"
8059 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
8061 counters.blocks += 1
8063 counters.blocks_satisfied += 1
8064 resolved = portage.key_expand(
8065 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8066 if "--columns" in self.myopts and "--quiet" in self.myopts:
8067 addl += " " + colorize(blocker_style, resolved)
8069 addl = "[%s %s] %s%s" % \
8070 (colorize(blocker_style, "blocks"),
8071 addl, indent, colorize(blocker_style, resolved))
8072 block_parents = self._blocker_parents.parent_nodes(x)
8073 block_parents = set([pnode[2] for pnode in block_parents])
8074 block_parents = ", ".join(block_parents)
8076 addl += colorize(blocker_style,
8077 " (\"%s\" is blocking %s)") % \
8078 (str(x.atom).lstrip("!"), block_parents)
8080 addl += colorize(blocker_style,
8081 " (is blocking %s)") % block_parents
8082 if isinstance(x, Blocker) and x.satisfied:
8087 blockers.append(addl)
8090 pkg_merge = ordered and pkg_status == "merge"
8091 if not pkg_merge and pkg_status == "merge":
8092 pkg_status = "nomerge"
8093 built = pkg_type != "ebuild"
8094 installed = pkg_type == "installed"
8096 metadata = pkg.metadata
8098 repo_name = metadata["repository"]
8099 if pkg_type == "ebuild":
8100 ebuild_path = portdb.findname(pkg_key)
8101 if not ebuild_path: # shouldn't happen
8102 raise portage.exception.PackageNotFound(pkg_key)
8103 repo_path_real = os.path.dirname(os.path.dirname(
8104 os.path.dirname(ebuild_path)))
8106 repo_path_real = portdb.getRepositoryPath(repo_name)
8107 pkg_use = list(pkg.use.enabled)
8109 restrict = flatten(use_reduce(paren_reduce(
8110 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8111 except portage.exception.InvalidDependString, e:
8112 if not pkg.installed:
8113 show_invalid_depstring_notice(x,
8114 pkg.metadata["RESTRICT"], str(e))
8118 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8119 "fetch" in restrict:
8122 counters.restrict_fetch += 1
8123 if portdb.fetch_check(pkg_key, pkg_use):
8126 counters.restrict_fetch_satisfied += 1
8128 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8129 #param is used for -u, where you still *do* want to see when something is being upgraded.
8132 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8133 if vardb.cpv_exists(pkg_key):
8134 addl=" "+yellow("R")+fetch+" "
8137 counters.reinst += 1
8138 elif pkg_status == "uninstall":
8139 counters.uninst += 1
8140 # filter out old-style virtual matches
8141 elif installed_versions and \
8142 portage.cpv_getkey(installed_versions[0]) == \
8143 portage.cpv_getkey(pkg_key):
8144 myinslotlist = vardb.match(pkg.slot_atom)
8145 # If this is the first install of a new-style virtual, we
8146 # need to filter out old-style virtual matches.
8147 if myinslotlist and \
8148 portage.cpv_getkey(myinslotlist[0]) != \
8149 portage.cpv_getkey(pkg_key):
8152 myoldbest = myinslotlist[:]
8154 if not portage.dep.cpvequal(pkg_key,
8155 portage.best([pkg_key] + myoldbest)):
8157 addl += turquoise("U")+blue("D")
8159 counters.downgrades += 1
8162 addl += turquoise("U") + " "
8164 counters.upgrades += 1
8166 # New slot, mark it new.
8167 addl = " " + green("NS") + fetch + " "
8168 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8170 counters.newslot += 1
8172 if "--changelog" in self.myopts:
8173 inst_matches = vardb.match(pkg.slot_atom)
8175 changelogs.extend(self.calc_changelog(
8176 portdb.findname(pkg_key),
8177 inst_matches[0], pkg_key))
8179 addl = " " + green("N") + " " + fetch + " "
8188 forced_flags = set()
8189 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8190 forced_flags.update(pkgsettings.useforce)
8191 forced_flags.update(pkgsettings.usemask)
8193 cur_use = [flag for flag in pkg.use.enabled \
8194 if flag in pkg.iuse.all]
8195 cur_iuse = sorted(pkg.iuse.all)
8197 if myoldbest and myinslotlist:
8198 previous_cpv = myoldbest[0]
8200 previous_cpv = pkg.cpv
8201 if vardb.cpv_exists(previous_cpv):
8202 old_iuse, old_use = vardb.aux_get(
8203 previous_cpv, ["IUSE", "USE"])
8204 old_iuse = list(set(
8205 filter_iuse_defaults(old_iuse.split())))
8207 old_use = old_use.split()
8214 old_use = [flag for flag in old_use if flag in old_iuse]
8216 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8218 use_expand.reverse()
8219 use_expand_hidden = \
8220 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8222 def map_to_use_expand(myvals, forcedFlags=False,
8226 for exp in use_expand:
8229 for val in myvals[:]:
8230 if val.startswith(exp.lower()+"_"):
8231 if val in forced_flags:
8232 forced[exp].add(val[len(exp)+1:])
8233 ret[exp].append(val[len(exp)+1:])
8236 forced["USE"] = [val for val in myvals \
8237 if val in forced_flags]
8239 for exp in use_expand_hidden:
8245 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8246 # are the only thing that triggered reinstallation.
8247 reinst_flags_map = {}
8248 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8249 reinst_expand_map = None
8250 if reinstall_for_flags:
8251 reinst_flags_map = map_to_use_expand(
8252 list(reinstall_for_flags), removeHidden=False)
8253 for k in list(reinst_flags_map):
8254 if not reinst_flags_map[k]:
8255 del reinst_flags_map[k]
8256 if not reinst_flags_map.get("USE"):
8257 reinst_expand_map = reinst_flags_map.copy()
8258 reinst_expand_map.pop("USE", None)
8259 if reinst_expand_map and \
8260 not set(reinst_expand_map).difference(
8262 use_expand_hidden = \
8263 set(use_expand_hidden).difference(
8266 cur_iuse_map, iuse_forced = \
8267 map_to_use_expand(cur_iuse, forcedFlags=True)
8268 cur_use_map = map_to_use_expand(cur_use)
8269 old_iuse_map = map_to_use_expand(old_iuse)
8270 old_use_map = map_to_use_expand(old_use)
8273 use_expand.insert(0, "USE")
8275 for key in use_expand:
8276 if key in use_expand_hidden:
8278 verboseadd += create_use_string(key.upper(),
8279 cur_iuse_map[key], iuse_forced[key],
8280 cur_use_map[key], old_iuse_map[key],
8281 old_use_map[key], is_new,
8282 reinst_flags_map.get(key))
8287 if pkg_type == "ebuild" and pkg_merge:
8289 myfilesdict = portdb.getfetchsizes(pkg_key,
8290 useflags=pkg_use, debug=self.edebug)
8291 except portage.exception.InvalidDependString, e:
8292 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8293 show_invalid_depstring_notice(x, src_uri, str(e))
8296 if myfilesdict is None:
8297 myfilesdict="[empty/missing/bad digest]"
8299 for myfetchfile in myfilesdict:
8300 if myfetchfile not in myfetchlist:
8301 mysize+=myfilesdict[myfetchfile]
8302 myfetchlist.append(myfetchfile)
8304 counters.totalsize += mysize
8305 verboseadd += format_size(mysize)
8308 # assign index for a previous version in the same slot
8309 has_previous = False
8310 repo_name_prev = None
8311 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8313 slot_matches = vardb.match(slot_atom)
8316 repo_name_prev = vardb.aux_get(slot_matches[0],
8319 # now use the data to generate output
8320 if pkg.installed or not has_previous:
8321 repoadd = repo_display.repoStr(repo_path_real)
8323 repo_path_prev = None
8325 repo_path_prev = portdb.getRepositoryPath(
8327 if repo_path_prev == repo_path_real:
8328 repoadd = repo_display.repoStr(repo_path_real)
8330 repoadd = "%s=>%s" % (
8331 repo_display.repoStr(repo_path_prev),
8332 repo_display.repoStr(repo_path_real))
8334 repoadd_set.add(repoadd)
8336 xs = [portage.cpv_getkey(pkg_key)] + \
8337 list(portage.catpkgsplit(pkg_key)[2:])
8344 if "COLUMNWIDTH" in self.settings:
8346 mywidth = int(self.settings["COLUMNWIDTH"])
8347 except ValueError, e:
8348 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8350 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8351 self.settings["COLUMNWIDTH"], noiselevel=-1)
8353 oldlp = mywidth - 30
8356 # Convert myoldbest from a list to a string.
8360 for pos, key in enumerate(myoldbest):
8361 key = portage.catpkgsplit(key)[2] + \
8362 "-" + portage.catpkgsplit(key)[3]
8363 if key[-3:] == "-r0":
8365 myoldbest[pos] = key
8366 myoldbest = blue("["+", ".join(myoldbest)+"]")
8369 root_config = self.roots[myroot]
8370 system_set = root_config.sets["system"]
8371 world_set = root_config.sets["world"]
8376 pkg_system = system_set.findAtomForPackage(pkg)
8377 pkg_world = world_set.findAtomForPackage(pkg)
8378 if not (oneshot or pkg_world) and \
8379 myroot == self.target_root and \
8380 favorites_set.findAtomForPackage(pkg):
8381 # Maybe it will be added to world now.
8382 if create_world_atom(pkg, favorites_set, root_config):
8384 except portage.exception.InvalidDependString:
8385 # This is reported elsewhere if relevant.
8388 def pkgprint(pkg_str):
8391 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8393 return colorize("PKG_MERGE_WORLD", pkg_str)
8395 return colorize("PKG_MERGE", pkg_str)
8396 elif pkg_status == "uninstall":
8397 return colorize("PKG_UNINSTALL", pkg_str)
8400 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8402 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8404 return colorize("PKG_NOMERGE", pkg_str)
8407 properties = flatten(use_reduce(paren_reduce(
8408 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8409 except portage.exception.InvalidDependString, e:
8410 if not pkg.installed:
8411 show_invalid_depstring_notice(pkg,
8412 pkg.metadata["PROPERTIES"], str(e))
8416 interactive = "interactive" in properties
8417 if interactive and pkg.operation == "merge":
8418 addl = colorize("WARN", "I") + addl[1:]
8420 counters.interactive += 1
8425 if "--columns" in self.myopts:
8426 if "--quiet" in self.myopts:
8427 myprint=addl+" "+indent+pkgprint(pkg_cp)
8428 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8429 myprint=myprint+myoldbest
8430 myprint=myprint+darkgreen("to "+x[1])
8434 myprint = "[%s] %s%s" % \
8435 (pkgprint(pkg_status.ljust(13)),
8436 indent, pkgprint(pkg.cp))
8438 myprint = "[%s %s] %s%s" % \
8439 (pkgprint(pkg.type_name), addl,
8440 indent, pkgprint(pkg.cp))
8441 if (newlp-nc_len(myprint)) > 0:
8442 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8443 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8444 if (oldlp-nc_len(myprint)) > 0:
8445 myprint=myprint+" "*(oldlp-nc_len(myprint))
8446 myprint=myprint+myoldbest
8447 myprint += darkgreen("to " + pkg.root)
8450 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8452 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8453 myprint += indent + pkgprint(pkg_key) + " " + \
8454 myoldbest + darkgreen("to " + myroot)
8456 if "--columns" in self.myopts:
8457 if "--quiet" in self.myopts:
8458 myprint=addl+" "+indent+pkgprint(pkg_cp)
8459 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8460 myprint=myprint+myoldbest
8464 myprint = "[%s] %s%s" % \
8465 (pkgprint(pkg_status.ljust(13)),
8466 indent, pkgprint(pkg.cp))
8468 myprint = "[%s %s] %s%s" % \
8469 (pkgprint(pkg.type_name), addl,
8470 indent, pkgprint(pkg.cp))
8471 if (newlp-nc_len(myprint)) > 0:
8472 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8473 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8474 if (oldlp-nc_len(myprint)) > 0:
8475 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8476 myprint += myoldbest
8479 myprint = "[%s] %s%s %s" % \
8480 (pkgprint(pkg_status.ljust(13)),
8481 indent, pkgprint(pkg.cpv),
8484 myprint = "[%s %s] %s%s %s" % \
8485 (pkgprint(pkg_type), addl, indent,
8486 pkgprint(pkg.cpv), myoldbest)
8488 if columns and pkg.operation == "uninstall":
8490 p.append((myprint, verboseadd, repoadd))
8492 if "--tree" not in self.myopts and \
8493 "--quiet" not in self.myopts and \
8494 not self._opts_no_restart.intersection(self.myopts) and \
8495 pkg.root == self._running_root.root and \
8496 portage.match_from_list(
8497 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8498 not vardb.cpv_exists(pkg.cpv) and \
8499 "--quiet" not in self.myopts:
8500 if mylist_index < len(mylist) - 1:
8501 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8502 p.append(colorize("WARN", " then resume the merge."))
8505 show_repos = repoadd_set and repoadd_set != set(["0"])
8508 if isinstance(x, basestring):
8509 out.write("%s\n" % (x,))
8512 myprint, verboseadd, repoadd = x
8515 myprint += " " + verboseadd
8517 if show_repos and repoadd:
8518 myprint += " " + teal("[%s]" % repoadd)
8520 out.write("%s\n" % (myprint,))
8529 sys.stdout.write(str(repo_display))
8531 if "--changelog" in self.myopts:
8533 for revision,text in changelogs:
8534 print bold('*'+revision)
8535 sys.stdout.write(text)
8540 def display_problems(self):
8542 Display problems with the dependency graph such as slot collisions.
8543 This is called internally by display() to show the problems _after_
8544 the merge list where it is most likely to be seen, but if display()
8545 is not going to be called then this method should be called explicitly
8546 to ensure that the user is notified of problems with the graph.
8548 All output goes to stderr, except for unsatisfied dependencies which
8549 go to stdout for parsing by programs such as autounmask.
8552 # Note that show_masked_packages() sends it's output to
8553 # stdout, and some programs such as autounmask parse the
8554 # output in cases when emerge bails out. However, when
8555 # show_masked_packages() is called for installed packages
8556 # here, the message is a warning that is more appropriate
8557 # to send to stderr, so temporarily redirect stdout to
8558 # stderr. TODO: Fix output code so there's a cleaner way
8559 # to redirect everything to stderr.
8564 sys.stdout = sys.stderr
8565 self._display_problems()
8571 # This goes to stdout for parsing by programs like autounmask.
8572 for pargs, kwargs in self._unsatisfied_deps_for_display:
8573 self._show_unsatisfied_dep(*pargs, **kwargs)
8575 def _display_problems(self):
8576 if self._circular_deps_for_display is not None:
8577 self._show_circular_deps(
8578 self._circular_deps_for_display)
8580 # The user is only notified of a slot conflict if
8581 # there are no unresolvable blocker conflicts.
8582 if self._unsatisfied_blockers_for_display is not None:
8583 self._show_unsatisfied_blockers(
8584 self._unsatisfied_blockers_for_display)
8586 self._show_slot_collision_notice()
8588 # TODO: Add generic support for "set problem" handlers so that
8589 # the below warnings aren't special cases for world only.
8591 if self._missing_args:
8592 world_problems = False
8593 if "world" in self._sets:
8594 # Filter out indirect members of world (from nested sets)
8595 # since only direct members of world are desired here.
8596 world_set = self.roots[self.target_root].sets["world"]
8597 for arg, atom in self._missing_args:
8598 if arg.name == "world" and atom in world_set:
8599 world_problems = True
8603 sys.stderr.write("\n!!! Problems have been " + \
8604 "detected with your world file\n")
8605 sys.stderr.write("!!! Please run " + \
8606 green("emaint --check world")+"\n\n")
8608 if self._missing_args:
8609 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8610 " Ebuilds for the following packages are either all\n")
8611 sys.stderr.write(colorize("BAD", "!!!") + \
8612 " masked or don't exist:\n")
8613 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8614 self._missing_args) + "\n")
8616 if self._pprovided_args:
8618 for arg, atom in self._pprovided_args:
8619 if isinstance(arg, SetArg):
8621 arg_atom = (atom, atom)
8624 arg_atom = (arg.arg, atom)
8625 refs = arg_refs.setdefault(arg_atom, [])
8626 if parent not in refs:
8629 msg.append(bad("\nWARNING: "))
8630 if len(self._pprovided_args) > 1:
8631 msg.append("Requested packages will not be " + \
8632 "merged because they are listed in\n")
8634 msg.append("A requested package will not be " + \
8635 "merged because it is listed in\n")
8636 msg.append("package.provided:\n\n")
8637 problems_sets = set()
8638 for (arg, atom), refs in arg_refs.iteritems():
8641 problems_sets.update(refs)
8643 ref_string = ", ".join(["'%s'" % name for name in refs])
8644 ref_string = " pulled in by " + ref_string
8645 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8647 if "world" in problems_sets:
8648 msg.append("This problem can be solved in one of the following ways:\n\n")
8649 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8650 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8651 msg.append(" C) Remove offending entries from package.provided.\n\n")
8652 msg.append("The best course of action depends on the reason that an offending\n")
8653 msg.append("package.provided entry exists.\n\n")
8654 sys.stderr.write("".join(msg))
8656 masked_packages = []
8657 for pkg in self._masked_installed:
8658 root_config = pkg.root_config
8659 pkgsettings = self.pkgsettings[pkg.root]
8660 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8661 masked_packages.append((root_config, pkgsettings,
8662 pkg.cpv, pkg.metadata, mreasons))
8664 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8665 " The following installed packages are masked:\n")
8666 show_masked_packages(masked_packages)
8670 def calc_changelog(self,ebuildpath,current,next):
8671 if ebuildpath == None or not os.path.exists(ebuildpath):
8673 current = '-'.join(portage.catpkgsplit(current)[1:])
8674 if current.endswith('-r0'):
8675 current = current[:-3]
8676 next = '-'.join(portage.catpkgsplit(next)[1:])
8677 if next.endswith('-r0'):
8679 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8681 changelog = open(changelogpath).read()
8682 except SystemExit, e:
8683 raise # Needed else can't exit
8686 divisions = self.find_changelog_tags(changelog)
8687 #print 'XX from',current,'to',next
8688 #for div,text in divisions: print 'XX',div
8689 # skip entries for all revisions above the one we are about to emerge
8690 for i in range(len(divisions)):
8691 if divisions[i][0]==next:
8692 divisions = divisions[i:]
8694 # find out how many entries we are going to display
8695 for i in range(len(divisions)):
8696 if divisions[i][0]==current:
8697 divisions = divisions[:i]
8700 # couldnt find the current revision in the list. display nothing
8704 def find_changelog_tags(self,changelog):
8708 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8710 if release is not None:
8711 divs.append((release,changelog))
8713 if release is not None:
8714 divs.append((release,changelog[:match.start()]))
8715 changelog = changelog[match.end():]
8716 release = match.group(1)
8717 if release.endswith('.ebuild'):
8718 release = release[:-7]
8719 if release.endswith('-r0'):
8720 release = release[:-3]
8722 def saveNomergeFavorites(self):
8723 """Find atoms in favorites that are not in the mergelist and add them
8724 to the world file if necessary."""
8725 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8726 "--oneshot", "--onlydeps", "--pretend"):
8727 if x in self.myopts:
8729 root_config = self.roots[self.target_root]
8730 world_set = root_config.sets["world"]
8732 world_locked = False
8733 if hasattr(world_set, "lock"):
8737 if hasattr(world_set, "load"):
8738 world_set.load() # maybe it's changed on disk
8740 args_set = self._sets["args"]
8741 portdb = self.trees[self.target_root]["porttree"].dbapi
8742 added_favorites = set()
8743 for x in self._set_nodes:
8744 pkg_type, root, pkg_key, pkg_status = x
8745 if pkg_status != "nomerge":
8749 myfavkey = create_world_atom(x, args_set, root_config)
8751 if myfavkey in added_favorites:
8753 added_favorites.add(myfavkey)
8754 except portage.exception.InvalidDependString, e:
8755 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8756 (pkg_key, str(e)), noiselevel=-1)
8757 writemsg("!!! see '%s'\n\n" % os.path.join(
8758 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8761 for k in self._sets:
8762 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8767 all_added.append(SETPREFIX + k)
8768 all_added.extend(added_favorites)
8771 print ">>> Recording %s in \"world\" favorites file..." % \
8772 colorize("INFORM", str(a))
8774 world_set.update(all_added)
8779 def loadResumeCommand(self, resume_data, skip_masked=True,
8782 Add a resume command to the graph and validate it in the process. This
8783 will raise a PackageNotFound exception if a package is not available.
8786 if not isinstance(resume_data, dict):
8789 mergelist = resume_data.get("mergelist")
8790 if not isinstance(mergelist, list):
8793 fakedb = self.mydbapi
8795 serialized_tasks = []
8798 if not (isinstance(x, list) and len(x) == 4):
8800 pkg_type, myroot, pkg_key, action = x
8801 if pkg_type not in self.pkg_tree_map:
8803 if action != "merge":
8805 tree_type = self.pkg_tree_map[pkg_type]
8806 mydb = trees[myroot][tree_type].dbapi
8807 db_keys = list(self._trees_orig[myroot][
8808 tree_type].dbapi._aux_cache_keys)
8810 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8812 # It does no exist or it is corrupt.
8813 if action == "uninstall":
8816 # TODO: log these somewhere
8818 raise portage.exception.PackageNotFound(pkg_key)
8819 installed = action == "uninstall"
8820 built = pkg_type != "ebuild"
8821 root_config = self.roots[myroot]
8822 pkg = Package(built=built, cpv=pkg_key,
8823 installed=installed, metadata=metadata,
8824 operation=action, root_config=root_config,
8826 if pkg_type == "ebuild":
8827 pkgsettings = self.pkgsettings[myroot]
8828 pkgsettings.setcpv(pkg)
8829 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8830 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8831 self._pkg_cache[pkg] = pkg
8833 root_config = self.roots[pkg.root]
8834 if "merge" == pkg.operation and \
8835 not visible(root_config.settings, pkg):
8837 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8839 self._unsatisfied_deps_for_display.append(
8840 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8842 fakedb[myroot].cpv_inject(pkg)
8843 serialized_tasks.append(pkg)
8844 self.spinner.update()
8846 if self._unsatisfied_deps_for_display:
8849 if not serialized_tasks or "--nodeps" in self.myopts:
8850 self._serialized_tasks_cache = serialized_tasks
8851 self._scheduler_graph = self.digraph
8853 self._select_package = self._select_pkg_from_graph
8854 self.myparams.add("selective")
8855 # Always traverse deep dependencies in order to account for
8856 # potentially unsatisfied dependencies of installed packages.
8857 # This is necessary for correct --keep-going or --resume operation
8858 # in case a package from a group of circularly dependent packages
8859 # fails. In this case, a package which has recently been installed
8860 # may have an unsatisfied circular dependency (pulled in by
8861 # PDEPEND, for example). So, even though a package is already
8862 # installed, it may not have all of it's dependencies satisfied, so
8863 # it may not be usable. If such a package is in the subgraph of
8864 # deep depenedencies of a scheduled build, that build needs to
8865 # be cancelled. In order for this type of situation to be
8866 # recognized, deep traversal of dependencies is required.
8867 self.myparams.add("deep")
8869 favorites = resume_data.get("favorites")
8870 args_set = self._sets["args"]
8871 if isinstance(favorites, list):
8872 args = self._load_favorites(favorites)
8876 for task in serialized_tasks:
8877 if isinstance(task, Package) and \
8878 task.operation == "merge":
8879 if not self._add_pkg(task, None):
8882 # Packages for argument atoms need to be explicitly
8883 # added via _add_pkg() so that they are included in the
8884 # digraph (needed at least for --tree display).
8886 for atom in arg.set:
8887 pkg, existing_node = self._select_package(
8888 arg.root_config.root, atom)
8889 if existing_node is None and \
8891 if not self._add_pkg(pkg, Dependency(atom=atom,
8892 root=pkg.root, parent=arg)):
8895 # Allow unsatisfied deps here to avoid showing a masking
8896 # message for an unsatisfied dep that isn't necessarily
8898 if not self._create_graph(allow_unsatisfied=True):
8901 unsatisfied_deps = []
8902 for dep in self._unsatisfied_deps:
8903 if not isinstance(dep.parent, Package):
8905 if dep.parent.operation == "merge":
8906 unsatisfied_deps.append(dep)
8909 # For unsatisfied deps of installed packages, only account for
8910 # them if they are in the subgraph of dependencies of a package
8911 # which is scheduled to be installed.
8912 unsatisfied_install = False
8914 dep_stack = self.digraph.parent_nodes(dep.parent)
8916 node = dep_stack.pop()
8917 if not isinstance(node, Package):
8919 if node.operation == "merge":
8920 unsatisfied_install = True
8922 if node in traversed:
8925 dep_stack.extend(self.digraph.parent_nodes(node))
8927 if unsatisfied_install:
8928 unsatisfied_deps.append(dep)
8930 if masked_tasks or unsatisfied_deps:
8931 # This probably means that a required package
8932 # was dropped via --skipfirst. It makes the
8933 # resume list invalid, so convert it to a
8934 # UnsatisfiedResumeDep exception.
8935 raise self.UnsatisfiedResumeDep(self,
8936 masked_tasks + unsatisfied_deps)
8937 self._serialized_tasks_cache = None
8940 except self._unknown_internal_error:
8945 def _load_favorites(self, favorites):
8947 Use a list of favorites to resume state from a
8948 previous select_files() call. This creates similar
8949 DependencyArg instances to those that would have
8950 been created by the original select_files() call.
8951 This allows Package instances to be matched with
8952 DependencyArg instances during graph creation.
8954 root_config = self.roots[self.target_root]
8955 getSetAtoms = root_config.setconfig.getSetAtoms
8956 sets = root_config.sets
8959 if not isinstance(x, basestring):
8961 if x in ("system", "world"):
8963 if x.startswith(SETPREFIX):
8964 s = x[len(SETPREFIX):]
8969 # Recursively expand sets so that containment tests in
8970 # self._get_parent_sets() properly match atoms in nested
8971 # sets (like if world contains system).
8972 expanded_set = InternalPackageSet(
8973 initial_atoms=getSetAtoms(s))
8974 self._sets[s] = expanded_set
8975 args.append(SetArg(arg=x, set=expanded_set,
8976 root_config=root_config))
8978 if not portage.isvalidatom(x):
8980 args.append(AtomArg(arg=x, atom=x,
8981 root_config=root_config))
8983 self._set_args(args)
8986 class UnsatisfiedResumeDep(portage.exception.PortageException):
8988 A dependency of a resume list is not installed. This
8989 can occur when a required package is dropped from the
8990 merge list via --skipfirst.
8992 def __init__(self, depgraph, value):
8993 portage.exception.PortageException.__init__(self, value)
8994 self.depgraph = depgraph
8996 class _internal_exception(portage.exception.PortageException):
8997 def __init__(self, value=""):
8998 portage.exception.PortageException.__init__(self, value)
9000 class _unknown_internal_error(_internal_exception):
9002 Used by the depgraph internally to terminate graph creation.
9003 The specific reason for the failure should have been dumped
9004 to stderr, unfortunately, the exact reason for the failure
9008 class _serialize_tasks_retry(_internal_exception):
9010 This is raised by the _serialize_tasks() method when it needs to
9011 be called again for some reason. The only case that it's currently
9012 used for is when neglected dependencies need to be added to the
9013 graph in order to avoid making a potentially unsafe decision.
9016 class _dep_check_composite_db(portage.dbapi):
9018 A dbapi-like interface that is optimized for use in dep_check() calls.
9019 This is built on top of the existing depgraph package selection logic.
9020 Some packages that have been added to the graph may be masked from this
9021 view in order to influence the atom preference selection that occurs
9024 def __init__(self, depgraph, root):
9025 portage.dbapi.__init__(self)
9026 self._depgraph = depgraph
9028 self._match_cache = {}
9029 self._cpv_pkg_map = {}
9031 def _clear_cache(self):
9032 self._match_cache.clear()
9033 self._cpv_pkg_map.clear()
9035 def match(self, atom):
9036 ret = self._match_cache.get(atom)
9041 atom = self._dep_expand(atom)
9042 pkg, existing = self._depgraph._select_package(self._root, atom)
9046 # Return the highest available from select_package() as well as
9047 # any matching slots in the graph db.
9049 slots.add(pkg.metadata["SLOT"])
9050 atom_cp = portage.dep_getkey(atom)
9051 if pkg.cp.startswith("virtual/"):
9052 # For new-style virtual lookahead that occurs inside
9053 # dep_check(), examine all slots. This is needed
9054 # so that newer slots will not unnecessarily be pulled in
9055 # when a satisfying lower slot is already installed. For
9056 # example, if virtual/jdk-1.4 is satisfied via kaffe then
9057 # there's no need to pull in a newer slot to satisfy a
9058 # virtual/jdk dependency.
9059 for db, pkg_type, built, installed, db_keys in \
9060 self._depgraph._filtered_trees[self._root]["dbs"]:
9061 for cpv in db.match(atom):
9062 if portage.cpv_getkey(cpv) != pkg.cp:
9064 slots.add(db.aux_get(cpv, ["SLOT"])[0])
9066 if self._visible(pkg):
9067 self._cpv_pkg_map[pkg.cpv] = pkg
9069 slots.remove(pkg.metadata["SLOT"])
9071 slot_atom = "%s:%s" % (atom_cp, slots.pop())
9072 pkg, existing = self._depgraph._select_package(
9073 self._root, slot_atom)
9076 if not self._visible(pkg):
9078 self._cpv_pkg_map[pkg.cpv] = pkg
9081 self._cpv_sort_ascending(ret)
9082 self._match_cache[orig_atom] = ret
9085 def _visible(self, pkg):
9086 if pkg.installed and "selective" not in self._depgraph.myparams:
9088 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9089 except (StopIteration, portage.exception.InvalidDependString):
9096 self._depgraph.pkgsettings[pkg.root], pkg):
9098 except portage.exception.InvalidDependString:
9100 in_graph = self._depgraph._slot_pkg_map[
9101 self._root].get(pkg.slot_atom)
9102 if in_graph is None:
9103 # Mask choices for packages which are not the highest visible
9104 # version within their slot (since they usually trigger slot
9106 highest_visible, in_graph = self._depgraph._select_package(
9107 self._root, pkg.slot_atom)
9108 if pkg != highest_visible:
9110 elif in_graph != pkg:
9111 # Mask choices for packages that would trigger a slot
9112 # conflict with a previously selected package.
9116 def _dep_expand(self, atom):
9118 This is only needed for old installed packages that may
9119 contain atoms that are not fully qualified with a specific
9120 category. Emulate the cpv_expand() function that's used by
9121 dbapi.match() in cases like this. If there are multiple
9122 matches, it's often due to a new-style virtual that has
9123 been added, so try to filter those out to avoid raising
9126 root_config = self._depgraph.roots[self._root]
9128 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9129 if len(expanded_atoms) > 1:
9130 non_virtual_atoms = []
9131 for x in expanded_atoms:
9132 if not portage.dep_getkey(x).startswith("virtual/"):
9133 non_virtual_atoms.append(x)
9134 if len(non_virtual_atoms) == 1:
9135 expanded_atoms = non_virtual_atoms
9136 if len(expanded_atoms) > 1:
9137 # compatible with portage.cpv_expand()
9138 raise portage.exception.AmbiguousPackageName(
9139 [portage.dep_getkey(x) for x in expanded_atoms])
9141 atom = expanded_atoms[0]
9143 null_atom = insert_category_into_atom(atom, "null")
9144 null_cp = portage.dep_getkey(null_atom)
9145 cat, atom_pn = portage.catsplit(null_cp)
9146 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9148 # Allow the resolver to choose which virtual.
9149 atom = insert_category_into_atom(atom, "virtual")
9151 atom = insert_category_into_atom(atom, "null")
9154 def aux_get(self, cpv, wants):
9155 metadata = self._cpv_pkg_map[cpv].metadata
9156 return [metadata.get(x, "") for x in wants]
9158 class RepoDisplay(object):
9159 def __init__(self, roots):
9160 self._shown_repos = {}
9161 self._unknown_repo = False
9163 for root_config in roots.itervalues():
9164 portdir = root_config.settings.get("PORTDIR")
9166 repo_paths.add(portdir)
9167 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9169 repo_paths.update(overlays.split())
9170 repo_paths = list(repo_paths)
9171 self._repo_paths = repo_paths
9172 self._repo_paths_real = [ os.path.realpath(repo_path) \
9173 for repo_path in repo_paths ]
9175 # pre-allocate index for PORTDIR so that it always has index 0.
9176 for root_config in roots.itervalues():
9177 portdb = root_config.trees["porttree"].dbapi
9178 portdir = portdb.porttree_root
9180 self.repoStr(portdir)
9182 def repoStr(self, repo_path_real):
9185 real_index = self._repo_paths_real.index(repo_path_real)
9186 if real_index == -1:
9188 self._unknown_repo = True
9190 shown_repos = self._shown_repos
9191 repo_paths = self._repo_paths
9192 repo_path = repo_paths[real_index]
9193 index = shown_repos.get(repo_path)
9195 index = len(shown_repos)
9196 shown_repos[repo_path] = index
9202 shown_repos = self._shown_repos
9203 unknown_repo = self._unknown_repo
9204 if shown_repos or self._unknown_repo:
9205 output.append("Portage tree and overlays:\n")
9206 show_repo_paths = list(shown_repos)
9207 for repo_path, repo_index in shown_repos.iteritems():
9208 show_repo_paths[repo_index] = repo_path
9210 for index, repo_path in enumerate(show_repo_paths):
9211 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9213 output.append(" "+teal("[?]") + \
9214 " indicates that the source repository could not be determined\n")
9215 return "".join(output)
9217 class PackageCounters(object):
9227 self.blocks_satisfied = 0
9229 self.restrict_fetch = 0
9230 self.restrict_fetch_satisfied = 0
9231 self.interactive = 0
9234 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9237 myoutput.append("Total: %s package" % total_installs)
9238 if total_installs != 1:
9239 myoutput.append("s")
9240 if total_installs != 0:
9241 myoutput.append(" (")
9242 if self.upgrades > 0:
9243 details.append("%s upgrade" % self.upgrades)
9244 if self.upgrades > 1:
9246 if self.downgrades > 0:
9247 details.append("%s downgrade" % self.downgrades)
9248 if self.downgrades > 1:
9251 details.append("%s new" % self.new)
9252 if self.newslot > 0:
9253 details.append("%s in new slot" % self.newslot)
9254 if self.newslot > 1:
9257 details.append("%s reinstall" % self.reinst)
9261 details.append("%s uninstall" % self.uninst)
9264 if self.interactive > 0:
9265 details.append("%s %s" % (self.interactive,
9266 colorize("WARN", "interactive")))
9267 myoutput.append(", ".join(details))
9268 if total_installs != 0:
9269 myoutput.append(")")
9270 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9271 if self.restrict_fetch:
9272 myoutput.append("\nFetch Restriction: %s package" % \
9273 self.restrict_fetch)
9274 if self.restrict_fetch > 1:
9275 myoutput.append("s")
9276 if self.restrict_fetch_satisfied < self.restrict_fetch:
9277 myoutput.append(bad(" (%s unsatisfied)") % \
9278 (self.restrict_fetch - self.restrict_fetch_satisfied))
9280 myoutput.append("\nConflict: %s block" % \
9283 myoutput.append("s")
9284 if self.blocks_satisfied < self.blocks:
9285 myoutput.append(bad(" (%s unsatisfied)") % \
9286 (self.blocks - self.blocks_satisfied))
9287 return "".join(myoutput)
9289 class UseFlagDisplay(object):
9291 __slots__ = ('name', 'enabled', 'forced')
9293 def __init__(self, name, enabled, forced):
9295 self.enabled = enabled
9296 self.forced = forced
9309 def _cmp_combined(a, b):
9311 Sort by name, combining enabled and disabled flags.
9313 return (a.name > b.name) - (a.name < b.name)
9315 sort_combined = cmp_sort_key(_cmp_combined)
9318 def _cmp_separated(a, b):
9320 Sort by name, separating enabled flags from disabled flags.
9322 enabled_diff = b.enabled - a.enabled
9325 return (a.name > b.name) - (a.name < b.name)
9327 sort_separated = cmp_sort_key(_cmp_separated)
9330 class PollSelectAdapter(PollConstants):
9333 Use select to emulate a poll object, for
9334 systems that don't support poll().
9338 self._registered = {}
9339 self._select_args = [[], [], []]
9341 def register(self, fd, *args):
9343 Only POLLIN is currently supported!
9347 "register expected at most 2 arguments, got " + \
9348 repr(1 + len(args)))
9350 eventmask = PollConstants.POLLIN | \
9351 PollConstants.POLLPRI | PollConstants.POLLOUT
9355 self._registered[fd] = eventmask
9356 self._select_args = None
9358 def unregister(self, fd):
9359 self._select_args = None
9360 del self._registered[fd]
9362 def poll(self, *args):
9365 "poll expected at most 2 arguments, got " + \
9366 repr(1 + len(args)))
9372 select_args = self._select_args
9373 if select_args is None:
9374 select_args = [self._registered.keys(), [], []]
9376 if timeout is not None:
9377 select_args = select_args[:]
9378 # Translate poll() timeout args to select() timeout args:
9380 # | units | value(s) for indefinite block
9381 # ---------|--------------|------------------------------
9382 # poll | milliseconds | omitted, negative, or None
9383 # ---------|--------------|------------------------------
9384 # select | seconds | omitted
9385 # ---------|--------------|------------------------------
9387 if timeout is not None and timeout < 0:
9389 if timeout is not None:
9390 select_args.append(timeout / 1000)
9392 select_events = select.select(*select_args)
9394 for fd in select_events[0]:
9395 poll_events.append((fd, PollConstants.POLLIN))
9398 class SequentialTaskQueue(SlotObject):
9400 __slots__ = ("max_jobs", "running_tasks") + \
9401 ("_dirty", "_scheduling", "_task_queue")
9403 def __init__(self, **kwargs):
9404 SlotObject.__init__(self, **kwargs)
9405 self._task_queue = deque()
9406 self.running_tasks = set()
9407 if self.max_jobs is None:
9411 def add(self, task):
9412 self._task_queue.append(task)
9415 def addFront(self, task):
9416 self._task_queue.appendleft(task)
9427 if self._scheduling:
9428 # Ignore any recursive schedule() calls triggered via
9429 # self._task_exit().
9432 self._scheduling = True
9434 task_queue = self._task_queue
9435 running_tasks = self.running_tasks
9436 max_jobs = self.max_jobs
9437 state_changed = False
9439 while task_queue and \
9440 (max_jobs is True or len(running_tasks) < max_jobs):
9441 task = task_queue.popleft()
9442 cancelled = getattr(task, "cancelled", None)
9444 running_tasks.add(task)
9445 task.addExitListener(self._task_exit)
9447 state_changed = True
9450 self._scheduling = False
9452 return state_changed
9454 def _task_exit(self, task):
9456 Since we can always rely on exit listeners being called, the set of
9457 running tasks is always pruned automatically and there is never any need
9458 to actively prune it.
9460 self.running_tasks.remove(task)
9461 if self._task_queue:
9465 self._task_queue.clear()
9466 running_tasks = self.running_tasks
9467 while running_tasks:
9468 task = running_tasks.pop()
9469 task.removeExitListener(self._task_exit)
9473 def __nonzero__(self):
9474 return bool(self._task_queue or self.running_tasks)
9477 return len(self._task_queue) + len(self.running_tasks)
9479 _can_poll_device = None
9481 def can_poll_device():
9483 Test if it's possible to use poll() on a device such as a pty. This
9484 is known to fail on Darwin.
9486 @returns: True if poll() on a device succeeds, False otherwise.
9489 global _can_poll_device
9490 if _can_poll_device is not None:
9491 return _can_poll_device
9493 if not hasattr(select, "poll"):
9494 _can_poll_device = False
9495 return _can_poll_device
9498 dev_null = open('/dev/null', 'rb')
9500 _can_poll_device = False
9501 return _can_poll_device
9504 p.register(dev_null.fileno(), PollConstants.POLLIN)
9506 invalid_request = False
9507 for f, event in p.poll():
9508 if event & PollConstants.POLLNVAL:
9509 invalid_request = True
9513 _can_poll_device = not invalid_request
9514 return _can_poll_device
9516 def create_poll_instance():
9518 Create an instance of select.poll, or an instance of
9519 PollSelectAdapter there is no poll() implementation or
9520 it is broken somehow.
9522 if can_poll_device():
9523 return select.poll()
9524 return PollSelectAdapter()
9526 getloadavg = getattr(os, "getloadavg", None)
9527 if getloadavg is None:
9530 Uses /proc/loadavg to emulate os.getloadavg().
9531 Raises OSError if the load average was unobtainable.
9534 loadavg_str = open('/proc/loadavg').readline()
9536 # getloadavg() is only supposed to raise OSError, so convert
9537 raise OSError('unknown')
9538 loadavg_split = loadavg_str.split()
9539 if len(loadavg_split) < 3:
9540 raise OSError('unknown')
9544 loadavg_floats.append(float(loadavg_split[i]))
9546 raise OSError('unknown')
9547 return tuple(loadavg_floats)
9549 class PollScheduler(object):
9551 class _sched_iface_class(SlotObject):
9552 __slots__ = ("register", "schedule", "unregister")
9556 self._max_load = None
9558 self._poll_event_queue = []
9559 self._poll_event_handlers = {}
9560 self._poll_event_handler_ids = {}
9561 # Increment id for each new handler.
9562 self._event_handler_id = 0
9563 self._poll_obj = create_poll_instance()
9564 self._scheduling = False
9566 def _schedule(self):
9568 Calls _schedule_tasks() and automatically returns early from
9569 any recursive calls to this method that the _schedule_tasks()
9570 call might trigger. This makes _schedule() safe to call from
9571 inside exit listeners.
9573 if self._scheduling:
9575 self._scheduling = True
9577 return self._schedule_tasks()
9579 self._scheduling = False
9581 def _running_job_count(self):
9584 def _can_add_job(self):
9585 max_jobs = self._max_jobs
9586 max_load = self._max_load
9588 if self._max_jobs is not True and \
9589 self._running_job_count() >= self._max_jobs:
9592 if max_load is not None and \
9593 (max_jobs is True or max_jobs > 1) and \
9594 self._running_job_count() >= 1:
9596 avg1, avg5, avg15 = getloadavg()
9600 if avg1 >= max_load:
9605 def _poll(self, timeout=None):
9607 All poll() calls pass through here. The poll events
9608 are added directly to self._poll_event_queue.
9609 In order to avoid endless blocking, this raises
9610 StopIteration if timeout is None and there are
9611 no file descriptors to poll.
9613 if not self._poll_event_handlers:
9615 if timeout is None and \
9616 not self._poll_event_handlers:
9617 raise StopIteration(
9618 "timeout is None and there are no poll() event handlers")
9620 # The following error is known to occur with Linux kernel versions
9623 # select.error: (4, 'Interrupted system call')
9625 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9626 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9627 # without any events.
9630 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9632 except select.error, e:
9633 writemsg_level("\n!!! select error: %s\n" % (e,),
9634 level=logging.ERROR, noiselevel=-1)
9636 if timeout is not None:
9639 def _next_poll_event(self, timeout=None):
9641 Since the _schedule_wait() loop is called by event
9642 handlers from _poll_loop(), maintain a central event
9643 queue for both of them to share events from a single
9644 poll() call. In order to avoid endless blocking, this
9645 raises StopIteration if timeout is None and there are
9646 no file descriptors to poll.
9648 if not self._poll_event_queue:
9650 return self._poll_event_queue.pop()
9652 def _poll_loop(self):
9654 event_handlers = self._poll_event_handlers
9655 event_handled = False
9658 while event_handlers:
9659 f, event = self._next_poll_event()
9660 handler, reg_id = event_handlers[f]
9662 event_handled = True
9663 except StopIteration:
9664 event_handled = True
9666 if not event_handled:
9667 raise AssertionError("tight loop")
9669 def _schedule_yield(self):
9671 Schedule for a short period of time chosen by the scheduler based
9672 on internal state. Synchronous tasks should call this periodically
9673 in order to allow the scheduler to service pending poll events. The
9674 scheduler will call poll() exactly once, without blocking, and any
9675 resulting poll events will be serviced.
9677 event_handlers = self._poll_event_handlers
9680 if not event_handlers:
9681 return bool(events_handled)
9683 if not self._poll_event_queue:
9687 while event_handlers and self._poll_event_queue:
9688 f, event = self._next_poll_event()
9689 handler, reg_id = event_handlers[f]
9692 except StopIteration:
9695 return bool(events_handled)
9697 def _register(self, f, eventmask, handler):
9700 @return: A unique registration id, for use in schedule() or
9703 if f in self._poll_event_handlers:
9704 raise AssertionError("fd %d is already registered" % f)
9705 self._event_handler_id += 1
9706 reg_id = self._event_handler_id
9707 self._poll_event_handler_ids[reg_id] = f
9708 self._poll_event_handlers[f] = (handler, reg_id)
9709 self._poll_obj.register(f, eventmask)
9712 def _unregister(self, reg_id):
9713 f = self._poll_event_handler_ids[reg_id]
9714 self._poll_obj.unregister(f)
9715 del self._poll_event_handlers[f]
9716 del self._poll_event_handler_ids[reg_id]
9718 def _schedule_wait(self, wait_ids):
9720 Schedule until wait_id is not longer registered
9723 @param wait_id: a task id to wait for
9725 event_handlers = self._poll_event_handlers
9726 handler_ids = self._poll_event_handler_ids
9727 event_handled = False
9729 if isinstance(wait_ids, int):
9730 wait_ids = frozenset([wait_ids])
9733 while wait_ids.intersection(handler_ids):
9734 f, event = self._next_poll_event()
9735 handler, reg_id = event_handlers[f]
9737 event_handled = True
9738 except StopIteration:
9739 event_handled = True
9741 return event_handled
9743 class QueueScheduler(PollScheduler):
9746 Add instances of SequentialTaskQueue and then call run(). The
9747 run() method returns when no tasks remain.
9750 def __init__(self, max_jobs=None, max_load=None):
9751 PollScheduler.__init__(self)
9753 if max_jobs is None:
9756 self._max_jobs = max_jobs
9757 self._max_load = max_load
9758 self.sched_iface = self._sched_iface_class(
9759 register=self._register,
9760 schedule=self._schedule_wait,
9761 unregister=self._unregister)
9764 self._schedule_listeners = []
9767 self._queues.append(q)
9769 def remove(self, q):
9770 self._queues.remove(q)
9774 while self._schedule():
9777 while self._running_job_count():
9780 def _schedule_tasks(self):
9783 @returns: True if there may be remaining tasks to schedule,
9786 while self._can_add_job():
9787 n = self._max_jobs - self._running_job_count()
9791 if not self._start_next_job(n):
9794 for q in self._queues:
9799 def _running_job_count(self):
9801 for q in self._queues:
9802 job_count += len(q.running_tasks)
9803 self._jobs = job_count
9806 def _start_next_job(self, n=1):
9808 for q in self._queues:
9809 initial_job_count = len(q.running_tasks)
9811 final_job_count = len(q.running_tasks)
9812 if final_job_count > initial_job_count:
9813 started_count += (final_job_count - initial_job_count)
9814 if started_count >= n:
9816 return started_count
9818 class TaskScheduler(object):
9821 A simple way to handle scheduling of AsynchrousTask instances. Simply
9822 add tasks and call run(). The run() method returns when no tasks remain.
9825 def __init__(self, max_jobs=None, max_load=None):
9826 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9827 self._scheduler = QueueScheduler(
9828 max_jobs=max_jobs, max_load=max_load)
9829 self.sched_iface = self._scheduler.sched_iface
9830 self.run = self._scheduler.run
9831 self._scheduler.add(self._queue)
9833 def add(self, task):
9834 self._queue.add(task)
9836 class JobStatusDisplay(object):
9838 _bound_properties = ("curval", "failed", "running")
9839 _jobs_column_width = 48
9841 # Don't update the display unless at least this much
9842 # time has passed, in units of seconds.
9843 _min_display_latency = 2
9845 _default_term_codes = {
9851 _termcap_name_map = {
9852 'carriage_return' : 'cr',
9857 def __init__(self, out=sys.stdout, quiet=False):
9858 object.__setattr__(self, "out", out)
9859 object.__setattr__(self, "quiet", quiet)
9860 object.__setattr__(self, "maxval", 0)
9861 object.__setattr__(self, "merges", 0)
9862 object.__setattr__(self, "_changed", False)
9863 object.__setattr__(self, "_displayed", False)
9864 object.__setattr__(self, "_last_display_time", 0)
9865 object.__setattr__(self, "width", 80)
9868 isatty = hasattr(out, "isatty") and out.isatty()
9869 object.__setattr__(self, "_isatty", isatty)
9870 if not isatty or not self._init_term():
9872 for k, capname in self._termcap_name_map.iteritems():
9873 term_codes[k] = self._default_term_codes[capname]
9874 object.__setattr__(self, "_term_codes", term_codes)
9875 encoding = sys.getdefaultencoding()
9876 for k, v in self._term_codes.items():
9877 if not isinstance(v, basestring):
9878 self._term_codes[k] = v.decode(encoding, 'replace')
9880 def _init_term(self):
9882 Initialize term control codes.
9884 @returns: True if term codes were successfully initialized,
9888 term_type = os.environ.get("TERM", "vt100")
9894 curses.setupterm(term_type, self.out.fileno())
9895 tigetstr = curses.tigetstr
9896 except curses.error:
9901 if tigetstr is None:
9905 for k, capname in self._termcap_name_map.iteritems():
9906 code = tigetstr(capname)
9908 code = self._default_term_codes[capname]
9909 term_codes[k] = code
9910 object.__setattr__(self, "_term_codes", term_codes)
9913 def _format_msg(self, msg):
9914 return ">>> %s" % msg
9918 self._term_codes['carriage_return'] + \
9919 self._term_codes['clr_eol'])
9921 self._displayed = False
9923 def _display(self, line):
9924 self.out.write(line)
9926 self._displayed = True
9928 def _update(self, msg):
9931 if not self._isatty:
9932 out.write(self._format_msg(msg) + self._term_codes['newline'])
9934 self._displayed = True
9940 self._display(self._format_msg(msg))
9942 def displayMessage(self, msg):
9944 was_displayed = self._displayed
9946 if self._isatty and self._displayed:
9949 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9951 self._displayed = False
9954 self._changed = True
9960 for name in self._bound_properties:
9961 object.__setattr__(self, name, 0)
9964 self.out.write(self._term_codes['newline'])
9966 self._displayed = False
9968 def __setattr__(self, name, value):
9969 old_value = getattr(self, name)
9970 if value == old_value:
9972 object.__setattr__(self, name, value)
9973 if name in self._bound_properties:
9974 self._property_change(name, old_value, value)
9976 def _property_change(self, name, old_value, new_value):
9977 self._changed = True
9980 def _load_avg_str(self):
9995 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9999 Display status on stdout, but only if something has
10000 changed since the last call.
10006 current_time = time.time()
10007 time_delta = current_time - self._last_display_time
10008 if self._displayed and \
10010 if not self._isatty:
10012 if time_delta < self._min_display_latency:
10015 self._last_display_time = current_time
10016 self._changed = False
10017 self._display_status()
10019 def _display_status(self):
10020 # Don't use len(self._completed_tasks) here since that also
10021 # can include uninstall tasks.
10022 curval_str = str(self.curval)
10023 maxval_str = str(self.maxval)
10024 running_str = str(self.running)
10025 failed_str = str(self.failed)
10026 load_avg_str = self._load_avg_str()
10028 color_output = StringIO()
10029 plain_output = StringIO()
10030 style_file = portage.output.ConsoleStyleFile(color_output)
10031 style_file.write_listener = plain_output
10032 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10033 style_writer.style_listener = style_file.new_styles
10034 f = formatter.AbstractFormatter(style_writer)
10036 number_style = "INFORM"
10037 f.add_literal_data("Jobs: ")
10038 f.push_style(number_style)
10039 f.add_literal_data(curval_str)
10041 f.add_literal_data(" of ")
10042 f.push_style(number_style)
10043 f.add_literal_data(maxval_str)
10045 f.add_literal_data(" complete")
10048 f.add_literal_data(", ")
10049 f.push_style(number_style)
10050 f.add_literal_data(running_str)
10052 f.add_literal_data(" running")
10055 f.add_literal_data(", ")
10056 f.push_style(number_style)
10057 f.add_literal_data(failed_str)
10059 f.add_literal_data(" failed")
10061 padding = self._jobs_column_width - len(plain_output.getvalue())
10063 f.add_literal_data(padding * " ")
10065 f.add_literal_data("Load avg: ")
10066 f.add_literal_data(load_avg_str)
10068 # Truncate to fit width, to avoid making the terminal scroll if the
10069 # line overflows (happens when the load average is large).
10070 plain_output = plain_output.getvalue()
10071 if self._isatty and len(plain_output) > self.width:
10072 # Use plain_output here since it's easier to truncate
10073 # properly than the color output which contains console
10075 self._update(plain_output[:self.width])
10077 self._update(color_output.getvalue())
10079 xtermTitle(" ".join(plain_output.split()))
10081 class ProgressHandler(object):
10082 def __init__(self):
10085 self._last_update = 0
10086 self.min_latency = 0.2
10088 def onProgress(self, maxval, curval):
10089 self.maxval = maxval
10090 self.curval = curval
10091 cur_time = time.time()
10092 if cur_time - self._last_update >= self.min_latency:
10093 self._last_update = cur_time
10097 raise NotImplementedError(self)
10099 class Scheduler(PollScheduler):
10101 _opts_ignore_blockers = \
10102 frozenset(["--buildpkgonly",
10103 "--fetchonly", "--fetch-all-uri",
10104 "--nodeps", "--pretend"])
10106 _opts_no_background = \
10107 frozenset(["--pretend",
10108 "--fetchonly", "--fetch-all-uri"])
10110 _opts_no_restart = frozenset(["--buildpkgonly",
10111 "--fetchonly", "--fetch-all-uri", "--pretend"])
10113 _bad_resume_opts = set(["--ask", "--changelog",
10114 "--resume", "--skipfirst"])
10116 _fetch_log = "/var/log/emerge-fetch.log"
10118 class _iface_class(SlotObject):
10119 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10120 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10121 "scheduleSetup", "scheduleUnpack", "scheduleYield",
10124 class _fetch_iface_class(SlotObject):
10125 __slots__ = ("log_file", "schedule")
10127 _task_queues_class = slot_dict_class(
10128 ("merge", "jobs", "fetch", "unpack"), prefix="")
10130 class _build_opts_class(SlotObject):
10131 __slots__ = ("buildpkg", "buildpkgonly",
10132 "fetch_all_uri", "fetchonly", "pretend")
10134 class _binpkg_opts_class(SlotObject):
10135 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10137 class _pkg_count_class(SlotObject):
10138 __slots__ = ("curval", "maxval")
10140 class _emerge_log_class(SlotObject):
10141 __slots__ = ("xterm_titles",)
10143 def log(self, *pargs, **kwargs):
10144 if not self.xterm_titles:
10145 # Avoid interference with the scheduler's status display.
10146 kwargs.pop("short_msg", None)
10147 emergelog(self.xterm_titles, *pargs, **kwargs)
10149 class _failed_pkg(SlotObject):
10150 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10152 class _ConfigPool(object):
10153 """Interface for a task to temporarily allocate a config
10154 instance from a pool. This allows a task to be constructed
10155 long before the config instance actually becomes needed, like
10156 when prefetchers are constructed for the whole merge list."""
10157 __slots__ = ("_root", "_allocate", "_deallocate")
10158 def __init__(self, root, allocate, deallocate):
10160 self._allocate = allocate
10161 self._deallocate = deallocate
10162 def allocate(self):
10163 return self._allocate(self._root)
10164 def deallocate(self, settings):
10165 self._deallocate(settings)
10167 class _unknown_internal_error(portage.exception.PortageException):
10169 Used internally to terminate scheduling. The specific reason for
10170 the failure should have been dumped to stderr.
10172 def __init__(self, value=""):
10173 portage.exception.PortageException.__init__(self, value)
10175 def __init__(self, settings, trees, mtimedb, myopts,
10176 spinner, mergelist, favorites, digraph):
10177 PollScheduler.__init__(self)
10178 self.settings = settings
10179 self.target_root = settings["ROOT"]
10181 self.myopts = myopts
10182 self._spinner = spinner
10183 self._mtimedb = mtimedb
10184 self._mergelist = mergelist
10185 self._favorites = favorites
10186 self._args_set = InternalPackageSet(favorites)
10187 self._build_opts = self._build_opts_class()
10188 for k in self._build_opts.__slots__:
10189 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10190 self._binpkg_opts = self._binpkg_opts_class()
10191 for k in self._binpkg_opts.__slots__:
10192 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10195 self._logger = self._emerge_log_class()
10196 self._task_queues = self._task_queues_class()
10197 for k in self._task_queues.allowed_keys:
10198 setattr(self._task_queues, k,
10199 SequentialTaskQueue())
10201 # Holds merges that will wait to be executed when no builds are
10202 # executing. This is useful for system packages since dependencies
10203 # on system packages are frequently unspecified.
10204 self._merge_wait_queue = []
10205 # Holds merges that have been transfered from the merge_wait_queue to
10206 # the actual merge queue. They are removed from this list upon
10207 # completion. Other packages can start building only when this list is
10209 self._merge_wait_scheduled = []
10211 # Holds system packages and their deep runtime dependencies. Before
10212 # being merged, these packages go to merge_wait_queue, to be merged
10213 # when no other packages are building.
10214 self._deep_system_deps = set()
10216 # Holds packages to merge which will satisfy currently unsatisfied
10217 # deep runtime dependencies of system packages. If this is not empty
10218 # then no parallel builds will be spawned until it is empty. This
10219 # minimizes the possibility that a build will fail due to the system
10220 # being in a fragile state. For example, see bug #259954.
10221 self._unsatisfied_system_deps = set()
10223 self._status_display = JobStatusDisplay()
10224 self._max_load = myopts.get("--load-average")
10225 max_jobs = myopts.get("--jobs")
10226 if max_jobs is None:
10228 self._set_max_jobs(max_jobs)
10230 # The root where the currently running
10231 # portage instance is installed.
10232 self._running_root = trees["/"]["root_config"]
10234 if settings.get("PORTAGE_DEBUG", "") == "1":
10236 self.pkgsettings = {}
10237 self._config_pool = {}
10238 self._blocker_db = {}
10240 self._config_pool[root] = []
10241 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10243 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10244 schedule=self._schedule_fetch)
10245 self._sched_iface = self._iface_class(
10246 dblinkEbuildPhase=self._dblink_ebuild_phase,
10247 dblinkDisplayMerge=self._dblink_display_merge,
10248 dblinkElog=self._dblink_elog,
10249 dblinkEmergeLog=self._dblink_emerge_log,
10250 fetch=fetch_iface, register=self._register,
10251 schedule=self._schedule_wait,
10252 scheduleSetup=self._schedule_setup,
10253 scheduleUnpack=self._schedule_unpack,
10254 scheduleYield=self._schedule_yield,
10255 unregister=self._unregister)
10257 self._prefetchers = weakref.WeakValueDictionary()
10258 self._pkg_queue = []
10259 self._completed_tasks = set()
10261 self._failed_pkgs = []
10262 self._failed_pkgs_all = []
10263 self._failed_pkgs_die_msgs = []
10264 self._post_mod_echo_msgs = []
10265 self._parallel_fetch = False
10266 merge_count = len([x for x in mergelist \
10267 if isinstance(x, Package) and x.operation == "merge"])
10268 self._pkg_count = self._pkg_count_class(
10269 curval=0, maxval=merge_count)
10270 self._status_display.maxval = self._pkg_count.maxval
10272 # The load average takes some time to respond when new
10273 # jobs are added, so we need to limit the rate of adding
10275 self._job_delay_max = 10
10276 self._job_delay_factor = 1.0
10277 self._job_delay_exp = 1.5
10278 self._previous_job_start_time = None
10280 self._set_digraph(digraph)
10282 # This is used to memoize the _choose_pkg() result when
10283 # no packages can be chosen until one of the existing
10285 self._choose_pkg_return_early = False
10287 features = self.settings.features
10288 if "parallel-fetch" in features and \
10289 not ("--pretend" in self.myopts or \
10290 "--fetch-all-uri" in self.myopts or \
10291 "--fetchonly" in self.myopts):
10292 if "distlocks" not in features:
10293 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10294 portage.writemsg(red("!!!")+" parallel-fetching " + \
10295 "requires the distlocks feature enabled"+"\n",
10297 portage.writemsg(red("!!!")+" you have it disabled, " + \
10298 "thus parallel-fetching is being disabled"+"\n",
10300 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10301 elif len(mergelist) > 1:
10302 self._parallel_fetch = True
10304 if self._parallel_fetch:
10305 # clear out existing fetch log if it exists
10307 open(self._fetch_log, 'w')
10308 except EnvironmentError:
10311 self._running_portage = None
10312 portage_match = self._running_root.trees["vartree"].dbapi.match(
10313 portage.const.PORTAGE_PACKAGE_ATOM)
10315 cpv = portage_match.pop()
10316 self._running_portage = self._pkg(cpv, "installed",
10317 self._running_root, installed=True)
10319 def _poll(self, timeout=None):
10321 PollScheduler._poll(self, timeout=timeout)
10323 def _set_max_jobs(self, max_jobs):
10324 self._max_jobs = max_jobs
10325 self._task_queues.jobs.max_jobs = max_jobs
10327 def _background_mode(self):
10329 Check if background mode is enabled and adjust states as necessary.
10332 @returns: True if background mode is enabled, False otherwise.
10334 background = (self._max_jobs is True or \
10335 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10336 not bool(self._opts_no_background.intersection(self.myopts))
10339 interactive_tasks = self._get_interactive_tasks()
10340 if interactive_tasks:
10342 writemsg_level(">>> Sending package output to stdio due " + \
10343 "to interactive package(s):\n",
10344 level=logging.INFO, noiselevel=-1)
10346 for pkg in interactive_tasks:
10347 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10348 if pkg.root != "/":
10349 pkg_str += " for " + pkg.root
10350 msg.append(pkg_str)
10352 writemsg_level("".join("%s\n" % (l,) for l in msg),
10353 level=logging.INFO, noiselevel=-1)
10354 if self._max_jobs is True or self._max_jobs > 1:
10355 self._set_max_jobs(1)
10356 writemsg_level(">>> Setting --jobs=1 due " + \
10357 "to the above interactive package(s)\n",
10358 level=logging.INFO, noiselevel=-1)
10360 self._status_display.quiet = \
10361 not background or \
10362 ("--quiet" in self.myopts and \
10363 "--verbose" not in self.myopts)
10365 self._logger.xterm_titles = \
10366 "notitles" not in self.settings.features and \
10367 self._status_display.quiet
10371 def _get_interactive_tasks(self):
10372 from portage import flatten
10373 from portage.dep import use_reduce, paren_reduce
10374 interactive_tasks = []
10375 for task in self._mergelist:
10376 if not (isinstance(task, Package) and \
10377 task.operation == "merge"):
10380 properties = flatten(use_reduce(paren_reduce(
10381 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10382 except portage.exception.InvalidDependString, e:
10383 show_invalid_depstring_notice(task,
10384 task.metadata["PROPERTIES"], str(e))
10385 raise self._unknown_internal_error()
10386 if "interactive" in properties:
10387 interactive_tasks.append(task)
10388 return interactive_tasks
10390 def _set_digraph(self, digraph):
10391 if "--nodeps" in self.myopts or \
10392 (self._max_jobs is not True and self._max_jobs < 2):
10394 self._digraph = None
10397 self._digraph = digraph
10398 self._find_system_deps()
10399 self._prune_digraph()
10400 self._prevent_builddir_collisions()
10402 def _find_system_deps(self):
10404 Find system packages and their deep runtime dependencies. Before being
10405 merged, these packages go to merge_wait_queue, to be merged when no
10406 other packages are building.
10408 deep_system_deps = self._deep_system_deps
10409 deep_system_deps.clear()
10410 deep_system_deps.update(
10411 _find_deep_system_runtime_deps(self._digraph))
10412 deep_system_deps.difference_update([pkg for pkg in \
10413 deep_system_deps if pkg.operation != "merge"])
10415 def _prune_digraph(self):
10417 Prune any root nodes that are irrelevant.
10420 graph = self._digraph
10421 completed_tasks = self._completed_tasks
10422 removed_nodes = set()
10424 for node in graph.root_nodes():
10425 if not isinstance(node, Package) or \
10426 (node.installed and node.operation == "nomerge") or \
10428 node in completed_tasks:
10429 removed_nodes.add(node)
10431 graph.difference_update(removed_nodes)
10432 if not removed_nodes:
10434 removed_nodes.clear()
10436 def _prevent_builddir_collisions(self):
10438 When building stages, sometimes the same exact cpv needs to be merged
10439 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10440 in the builddir. Currently, normal file locks would be inappropriate
10441 for this purpose since emerge holds all of it's build dir locks from
10445 for pkg in self._mergelist:
10446 if not isinstance(pkg, Package):
10447 # a satisfied blocker
10451 if pkg.cpv not in cpv_map:
10452 cpv_map[pkg.cpv] = [pkg]
10454 for earlier_pkg in cpv_map[pkg.cpv]:
10455 self._digraph.add(earlier_pkg, pkg,
10456 priority=DepPriority(buildtime=True))
10457 cpv_map[pkg.cpv].append(pkg)
10459 class _pkg_failure(portage.exception.PortageException):
10461 An instance of this class is raised by unmerge() when
10462 an uninstallation fails.
10465 def __init__(self, *pargs):
10466 portage.exception.PortageException.__init__(self, pargs)
10468 self.status = pargs[0]
10470 def _schedule_fetch(self, fetcher):
10472 Schedule a fetcher on the fetch queue, in order to
10473 serialize access to the fetch log.
10475 self._task_queues.fetch.addFront(fetcher)
10477 def _schedule_setup(self, setup_phase):
10479 Schedule a setup phase on the merge queue, in order to
10480 serialize unsandboxed access to the live filesystem.
10482 self._task_queues.merge.addFront(setup_phase)
10485 def _schedule_unpack(self, unpack_phase):
10487 Schedule an unpack phase on the unpack queue, in order
10488 to serialize $DISTDIR access for live ebuilds.
10490 self._task_queues.unpack.add(unpack_phase)
10492 def _find_blockers(self, new_pkg):
10494 Returns a callable which should be called only when
10495 the vdb lock has been acquired.
10497 def get_blockers():
10498 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10499 return get_blockers
10501 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10502 if self._opts_ignore_blockers.intersection(self.myopts):
10505 # Call gc.collect() here to avoid heap overflow that
10506 # triggers 'Cannot allocate memory' errors (reported
10507 # with python-2.5).
10511 blocker_db = self._blocker_db[new_pkg.root]
10513 blocker_dblinks = []
10514 for blocking_pkg in blocker_db.findInstalledBlockers(
10515 new_pkg, acquire_lock=acquire_lock):
10516 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10518 if new_pkg.cpv == blocking_pkg.cpv:
10520 blocker_dblinks.append(portage.dblink(
10521 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10522 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10523 vartree=self.trees[blocking_pkg.root]["vartree"]))
10527 return blocker_dblinks
10529 def _dblink_pkg(self, pkg_dblink):
10530 cpv = pkg_dblink.mycpv
10531 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10532 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10533 installed = type_name == "installed"
10534 return self._pkg(cpv, type_name, root_config, installed=installed)
10536 def _append_to_log_path(self, log_path, msg):
10537 f = open(log_path, 'a')
10543 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10545 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10548 background = self._background
10550 if background and log_path is not None:
10551 log_file = open(log_path, 'a')
10556 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10558 if log_file is not None:
10561 def _dblink_emerge_log(self, msg):
10562 self._logger.log(msg)
10564 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10565 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10566 background = self._background
10568 if log_path is None:
10569 if not (background and level < logging.WARN):
10570 portage.util.writemsg_level(msg,
10571 level=level, noiselevel=noiselevel)
10574 portage.util.writemsg_level(msg,
10575 level=level, noiselevel=noiselevel)
10576 self._append_to_log_path(log_path, msg)
10578 def _dblink_ebuild_phase(self,
10579 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10581 Using this callback for merge phases allows the scheduler
10582 to run while these phases execute asynchronously, and allows
10583 the scheduler control output handling.
10586 scheduler = self._sched_iface
10587 settings = pkg_dblink.settings
10588 pkg = self._dblink_pkg(pkg_dblink)
10589 background = self._background
10590 log_path = settings.get("PORTAGE_LOG_FILE")
10592 ebuild_phase = EbuildPhase(background=background,
10593 pkg=pkg, phase=phase, scheduler=scheduler,
10594 settings=settings, tree=pkg_dblink.treetype)
10595 ebuild_phase.start()
10596 ebuild_phase.wait()
10598 return ebuild_phase.returncode
10600 def _generate_digests(self):
10602 Generate digests if necessary for --digests or FEATURES=digest.
10603 In order to avoid interference, this must done before parallel
10607 if '--fetchonly' in self.myopts:
10610 digest = '--digest' in self.myopts
10612 for pkgsettings in self.pkgsettings.itervalues():
10613 if 'digest' in pkgsettings.features:
10620 for x in self._mergelist:
10621 if not isinstance(x, Package) or \
10622 x.type_name != 'ebuild' or \
10623 x.operation != 'merge':
10625 pkgsettings = self.pkgsettings[x.root]
10626 if '--digest' not in self.myopts and \
10627 'digest' not in pkgsettings.features:
10629 portdb = x.root_config.trees['porttree'].dbapi
10630 ebuild_path = portdb.findname(x.cpv)
10631 if not ebuild_path:
10633 "!!! Could not locate ebuild for '%s'.\n" \
10634 % x.cpv, level=logging.ERROR, noiselevel=-1)
10636 pkgsettings['O'] = os.path.dirname(ebuild_path)
10637 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10639 "!!! Unable to generate manifest for '%s'.\n" \
10640 % x.cpv, level=logging.ERROR, noiselevel=-1)
10645 def _check_manifests(self):
10646 # Verify all the manifests now so that the user is notified of failure
10647 # as soon as possible.
10648 if "strict" not in self.settings.features or \
10649 "--fetchonly" in self.myopts or \
10650 "--fetch-all-uri" in self.myopts:
10653 shown_verifying_msg = False
10654 quiet_settings = {}
10655 for myroot, pkgsettings in self.pkgsettings.iteritems():
10656 quiet_config = portage.config(clone=pkgsettings)
10657 quiet_config["PORTAGE_QUIET"] = "1"
10658 quiet_config.backup_changes("PORTAGE_QUIET")
10659 quiet_settings[myroot] = quiet_config
10662 for x in self._mergelist:
10663 if not isinstance(x, Package) or \
10664 x.type_name != "ebuild":
10667 if not shown_verifying_msg:
10668 shown_verifying_msg = True
10669 self._status_msg("Verifying ebuild manifests")
10671 root_config = x.root_config
10672 portdb = root_config.trees["porttree"].dbapi
10673 quiet_config = quiet_settings[root_config.root]
10674 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10675 if not portage.digestcheck([], quiet_config, strict=True):
10680 def _add_prefetchers(self):
10682 if not self._parallel_fetch:
10685 if self._parallel_fetch:
10686 self._status_msg("Starting parallel fetch")
10688 prefetchers = self._prefetchers
10689 getbinpkg = "--getbinpkg" in self.myopts
10691 # In order to avoid "waiting for lock" messages
10692 # at the beginning, which annoy users, never
10693 # spawn a prefetcher for the first package.
10694 for pkg in self._mergelist[1:]:
10695 prefetcher = self._create_prefetcher(pkg)
10696 if prefetcher is not None:
10697 self._task_queues.fetch.add(prefetcher)
10698 prefetchers[pkg] = prefetcher
10700 def _create_prefetcher(self, pkg):
10702 @return: a prefetcher, or None if not applicable
10706 if not isinstance(pkg, Package):
10709 elif pkg.type_name == "ebuild":
10711 prefetcher = EbuildFetcher(background=True,
10712 config_pool=self._ConfigPool(pkg.root,
10713 self._allocate_config, self._deallocate_config),
10714 fetchonly=1, logfile=self._fetch_log,
10715 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10717 elif pkg.type_name == "binary" and \
10718 "--getbinpkg" in self.myopts and \
10719 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10721 prefetcher = BinpkgPrefetcher(background=True,
10722 pkg=pkg, scheduler=self._sched_iface)
10726 def _is_restart_scheduled(self):
10728 Check if the merge list contains a replacement
10729 for the current running instance, that will result
10730 in restart after merge.
10732 @returns: True if a restart is scheduled, False otherwise.
10734 if self._opts_no_restart.intersection(self.myopts):
10737 mergelist = self._mergelist
10739 for i, pkg in enumerate(mergelist):
10740 if self._is_restart_necessary(pkg) and \
10741 i != len(mergelist) - 1:
10746 def _is_restart_necessary(self, pkg):
10748 @return: True if merging the given package
10749 requires restart, False otherwise.
10752 # Figure out if we need a restart.
10753 if pkg.root == self._running_root.root and \
10754 portage.match_from_list(
10755 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10756 if self._running_portage:
10757 return pkg.cpv != self._running_portage.cpv
10761 def _restart_if_necessary(self, pkg):
10763 Use execv() to restart emerge. This happens
10764 if portage upgrades itself and there are
10765 remaining packages in the list.
10768 if self._opts_no_restart.intersection(self.myopts):
10771 if not self._is_restart_necessary(pkg):
10774 if pkg == self._mergelist[-1]:
10777 self._main_loop_cleanup()
10779 logger = self._logger
10780 pkg_count = self._pkg_count
10781 mtimedb = self._mtimedb
10782 bad_resume_opts = self._bad_resume_opts
10784 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10785 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10787 logger.log(" *** RESTARTING " + \
10788 "emerge via exec() after change of " + \
10789 "portage version.")
10791 mtimedb["resume"]["mergelist"].remove(list(pkg))
10793 portage.run_exitfuncs()
10794 mynewargv = [sys.argv[0], "--resume"]
10795 resume_opts = self.myopts.copy()
10796 # For automatic resume, we need to prevent
10797 # any of bad_resume_opts from leaking in
10798 # via EMERGE_DEFAULT_OPTS.
10799 resume_opts["--ignore-default-opts"] = True
10800 for myopt, myarg in resume_opts.iteritems():
10801 if myopt not in bad_resume_opts:
10803 mynewargv.append(myopt)
10805 mynewargv.append(myopt +"="+ str(myarg))
10806 # priority only needs to be adjusted on the first run
10807 os.environ["PORTAGE_NICENESS"] = "0"
10808 os.execv(mynewargv[0], mynewargv)
10812 if "--resume" in self.myopts:
10814 portage.writemsg_stdout(
10815 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10816 self._logger.log(" *** Resuming merge...")
10818 self._save_resume_list()
10821 self._background = self._background_mode()
10822 except self._unknown_internal_error:
10825 for root in self.trees:
10826 root_config = self.trees[root]["root_config"]
10828 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10829 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10830 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10831 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10832 if not tmpdir or not os.path.isdir(tmpdir):
10833 msg = "The directory specified in your " + \
10834 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10835 "does not exist. Please create this " + \
10836 "directory or correct your PORTAGE_TMPDIR setting."
10837 msg = textwrap.wrap(msg, 70)
10838 out = portage.output.EOutput()
10843 if self._background:
10844 root_config.settings.unlock()
10845 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10846 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10847 root_config.settings.lock()
10849 self.pkgsettings[root] = portage.config(
10850 clone=root_config.settings)
10852 rval = self._generate_digests()
10853 if rval != os.EX_OK:
10856 rval = self._check_manifests()
10857 if rval != os.EX_OK:
10860 keep_going = "--keep-going" in self.myopts
10861 fetchonly = self._build_opts.fetchonly
10862 mtimedb = self._mtimedb
10863 failed_pkgs = self._failed_pkgs
10866 rval = self._merge()
10867 if rval == os.EX_OK or fetchonly or not keep_going:
10869 if "resume" not in mtimedb:
10871 mergelist = self._mtimedb["resume"].get("mergelist")
10875 if not failed_pkgs:
10878 for failed_pkg in failed_pkgs:
10879 mergelist.remove(list(failed_pkg.pkg))
10881 self._failed_pkgs_all.extend(failed_pkgs)
10887 if not self._calc_resume_list():
10890 clear_caches(self.trees)
10891 if not self._mergelist:
10894 self._save_resume_list()
10895 self._pkg_count.curval = 0
10896 self._pkg_count.maxval = len([x for x in self._mergelist \
10897 if isinstance(x, Package) and x.operation == "merge"])
10898 self._status_display.maxval = self._pkg_count.maxval
10900 self._logger.log(" *** Finished. Cleaning up...")
10903 self._failed_pkgs_all.extend(failed_pkgs)
10906 background = self._background
10907 failure_log_shown = False
10908 if background and len(self._failed_pkgs_all) == 1:
10909 # If only one package failed then just show it's
10910 # whole log for easy viewing.
10911 failed_pkg = self._failed_pkgs_all[-1]
10912 build_dir = failed_pkg.build_dir
10915 log_paths = [failed_pkg.build_log]
10917 log_path = self._locate_failure_log(failed_pkg)
10918 if log_path is not None:
10920 log_file = open(log_path)
10924 if log_file is not None:
10926 for line in log_file:
10927 writemsg_level(line, noiselevel=-1)
10930 failure_log_shown = True
10932 # Dump mod_echo output now since it tends to flood the terminal.
10933 # This allows us to avoid having more important output, generated
10934 # later, from being swept away by the mod_echo output.
10935 mod_echo_output = _flush_elog_mod_echo()
10937 if background and not failure_log_shown and \
10938 self._failed_pkgs_all and \
10939 self._failed_pkgs_die_msgs and \
10940 not mod_echo_output:
10942 printer = portage.output.EOutput()
10943 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10945 if mysettings["ROOT"] != "/":
10946 root_msg = " merged to %s" % mysettings["ROOT"]
10948 printer.einfo("Error messages for package %s%s:" % \
10949 (colorize("INFORM", key), root_msg))
10951 for phase in portage.const.EBUILD_PHASES:
10952 if phase not in logentries:
10954 for msgtype, msgcontent in logentries[phase]:
10955 if isinstance(msgcontent, basestring):
10956 msgcontent = [msgcontent]
10957 for line in msgcontent:
10958 printer.eerror(line.strip("\n"))
10960 if self._post_mod_echo_msgs:
10961 for msg in self._post_mod_echo_msgs:
10964 if len(self._failed_pkgs_all) > 1 or \
10965 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10966 if len(self._failed_pkgs_all) > 1:
10967 msg = "The following %d packages have " % \
10968 len(self._failed_pkgs_all) + \
10969 "failed to build or install:"
10971 msg = "The following package has " + \
10972 "failed to build or install:"
10973 prefix = bad(" * ")
10974 writemsg(prefix + "\n", noiselevel=-1)
10975 from textwrap import wrap
10976 for line in wrap(msg, 72):
10977 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10978 writemsg(prefix + "\n", noiselevel=-1)
10979 for failed_pkg in self._failed_pkgs_all:
10980 writemsg("%s\t%s\n" % (prefix,
10981 colorize("INFORM", str(failed_pkg.pkg))),
10983 writemsg(prefix + "\n", noiselevel=-1)
10987 def _elog_listener(self, mysettings, key, logentries, fulltext):
10988 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10990 self._failed_pkgs_die_msgs.append(
10991 (mysettings, key, errors))
10993 def _locate_failure_log(self, failed_pkg):
10995 build_dir = failed_pkg.build_dir
10998 log_paths = [failed_pkg.build_log]
11000 for log_path in log_paths:
11005 log_size = os.stat(log_path).st_size
11016 def _add_packages(self):
11017 pkg_queue = self._pkg_queue
11018 for pkg in self._mergelist:
11019 if isinstance(pkg, Package):
11020 pkg_queue.append(pkg)
11021 elif isinstance(pkg, Blocker):
11024 def _system_merge_started(self, merge):
11026 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
11028 graph = self._digraph
11031 pkg = merge.merge.pkg
11033 # Skip this if $ROOT != / since it shouldn't matter if there
11034 # are unsatisfied system runtime deps in this case.
11035 if pkg.root != '/':
11038 completed_tasks = self._completed_tasks
11039 unsatisfied = self._unsatisfied_system_deps
11041 def ignore_non_runtime_or_satisfied(priority):
11043 Ignore non-runtime and satisfied runtime priorities.
11045 if isinstance(priority, DepPriority) and \
11046 not priority.satisfied and \
11047 (priority.runtime or priority.runtime_post):
11051 # When checking for unsatisfied runtime deps, only check
11052 # direct deps since indirect deps are checked when the
11053 # corresponding parent is merged.
11054 for child in graph.child_nodes(pkg,
11055 ignore_priority=ignore_non_runtime_or_satisfied):
11056 if not isinstance(child, Package) or \
11057 child.operation == 'uninstall':
11061 if child.operation == 'merge' and \
11062 child not in completed_tasks:
11063 unsatisfied.add(child)
11065 def _merge_wait_exit_handler(self, task):
11066 self._merge_wait_scheduled.remove(task)
11067 self._merge_exit(task)
11069 def _merge_exit(self, merge):
11070 self._do_merge_exit(merge)
11071 self._deallocate_config(merge.merge.settings)
11072 if merge.returncode == os.EX_OK and \
11073 not merge.merge.pkg.installed:
11074 self._status_display.curval += 1
11075 self._status_display.merges = len(self._task_queues.merge)
11078 def _do_merge_exit(self, merge):
11079 pkg = merge.merge.pkg
11080 if merge.returncode != os.EX_OK:
11081 settings = merge.merge.settings
11082 build_dir = settings.get("PORTAGE_BUILDDIR")
11083 build_log = settings.get("PORTAGE_LOG_FILE")
11085 self._failed_pkgs.append(self._failed_pkg(
11086 build_dir=build_dir, build_log=build_log,
11088 returncode=merge.returncode))
11089 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11091 self._status_display.failed = len(self._failed_pkgs)
11094 self._task_complete(pkg)
11095 pkg_to_replace = merge.merge.pkg_to_replace
11096 if pkg_to_replace is not None:
11097 # When a package is replaced, mark it's uninstall
11098 # task complete (if any).
11099 uninst_hash_key = \
11100 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11101 self._task_complete(uninst_hash_key)
11106 self._restart_if_necessary(pkg)
11108 # Call mtimedb.commit() after each merge so that
11109 # --resume still works after being interrupted
11110 # by reboot, sigkill or similar.
11111 mtimedb = self._mtimedb
11112 mtimedb["resume"]["mergelist"].remove(list(pkg))
11113 if not mtimedb["resume"]["mergelist"]:
11114 del mtimedb["resume"]
11117 def _build_exit(self, build):
11118 if build.returncode == os.EX_OK:
11120 merge = PackageMerge(merge=build)
11121 if not build.build_opts.buildpkgonly and \
11122 build.pkg in self._deep_system_deps:
11123 # Since dependencies on system packages are frequently
11124 # unspecified, merge them only when no builds are executing.
11125 self._merge_wait_queue.append(merge)
11126 merge.addStartListener(self._system_merge_started)
11128 merge.addExitListener(self._merge_exit)
11129 self._task_queues.merge.add(merge)
11130 self._status_display.merges = len(self._task_queues.merge)
11132 settings = build.settings
11133 build_dir = settings.get("PORTAGE_BUILDDIR")
11134 build_log = settings.get("PORTAGE_LOG_FILE")
11136 self._failed_pkgs.append(self._failed_pkg(
11137 build_dir=build_dir, build_log=build_log,
11139 returncode=build.returncode))
11140 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11142 self._status_display.failed = len(self._failed_pkgs)
11143 self._deallocate_config(build.settings)
11145 self._status_display.running = self._jobs
11148 def _extract_exit(self, build):
11149 self._build_exit(build)
11151 def _task_complete(self, pkg):
11152 self._completed_tasks.add(pkg)
11153 self._unsatisfied_system_deps.discard(pkg)
11154 self._choose_pkg_return_early = False
11158 self._add_prefetchers()
11159 self._add_packages()
11160 pkg_queue = self._pkg_queue
11161 failed_pkgs = self._failed_pkgs
11162 portage.locks._quiet = self._background
11163 portage.elog._emerge_elog_listener = self._elog_listener
11169 self._main_loop_cleanup()
11170 portage.locks._quiet = False
11171 portage.elog._emerge_elog_listener = None
11173 rval = failed_pkgs[-1].returncode
11177 def _main_loop_cleanup(self):
11178 del self._pkg_queue[:]
11179 self._completed_tasks.clear()
11180 self._deep_system_deps.clear()
11181 self._unsatisfied_system_deps.clear()
11182 self._choose_pkg_return_early = False
11183 self._status_display.reset()
11184 self._digraph = None
11185 self._task_queues.fetch.clear()
11187 def _choose_pkg(self):
11189 Choose a task that has all it's dependencies satisfied.
11192 if self._choose_pkg_return_early:
11195 if self._digraph is None:
11196 if (self._jobs or self._task_queues.merge) and \
11197 not ("--nodeps" in self.myopts and \
11198 (self._max_jobs is True or self._max_jobs > 1)):
11199 self._choose_pkg_return_early = True
11201 return self._pkg_queue.pop(0)
11203 if not (self._jobs or self._task_queues.merge):
11204 return self._pkg_queue.pop(0)
11206 self._prune_digraph()
11209 later = set(self._pkg_queue)
11210 for pkg in self._pkg_queue:
11212 if not self._dependent_on_scheduled_merges(pkg, later):
11216 if chosen_pkg is not None:
11217 self._pkg_queue.remove(chosen_pkg)
11219 if chosen_pkg is None:
11220 # There's no point in searching for a package to
11221 # choose until at least one of the existing jobs
11223 self._choose_pkg_return_early = True
11227 def _dependent_on_scheduled_merges(self, pkg, later):
11229 Traverse the subgraph of the given packages deep dependencies
11230 to see if it contains any scheduled merges.
11231 @param pkg: a package to check dependencies for
11233 @param later: packages for which dependence should be ignored
11234 since they will be merged later than pkg anyway and therefore
11235 delaying the merge of pkg will not result in a more optimal
11239 @returns: True if the package is dependent, False otherwise.
11242 graph = self._digraph
11243 completed_tasks = self._completed_tasks
11246 traversed_nodes = set([pkg])
11247 direct_deps = graph.child_nodes(pkg)
11248 node_stack = direct_deps
11249 direct_deps = frozenset(direct_deps)
11251 node = node_stack.pop()
11252 if node in traversed_nodes:
11254 traversed_nodes.add(node)
11255 if not ((node.installed and node.operation == "nomerge") or \
11256 (node.operation == "uninstall" and \
11257 node not in direct_deps) or \
11258 node in completed_tasks or \
11262 node_stack.extend(graph.child_nodes(node))
11266 def _allocate_config(self, root):
11268 Allocate a unique config instance for a task in order
11269 to prevent interference between parallel tasks.
11271 if self._config_pool[root]:
11272 temp_settings = self._config_pool[root].pop()
11274 temp_settings = portage.config(clone=self.pkgsettings[root])
11275 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11276 # performance reasons, call it here to make sure all settings from the
11277 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11278 temp_settings.reload()
11279 temp_settings.reset()
11280 return temp_settings
11282 def _deallocate_config(self, settings):
11283 self._config_pool[settings["ROOT"]].append(settings)
11285 def _main_loop(self):
11287 # Only allow 1 job max if a restart is scheduled
11288 # due to portage update.
11289 if self._is_restart_scheduled() or \
11290 self._opts_no_background.intersection(self.myopts):
11291 self._set_max_jobs(1)
11293 merge_queue = self._task_queues.merge
11295 while self._schedule():
11296 if self._poll_event_handlers:
11301 if not (self._jobs or merge_queue):
11303 if self._poll_event_handlers:
11306 def _keep_scheduling(self):
11307 return bool(self._pkg_queue and \
11308 not (self._failed_pkgs and not self._build_opts.fetchonly))
11310 def _schedule_tasks(self):
11312 # When the number of jobs drops to zero, process all waiting merges.
11313 if not self._jobs and self._merge_wait_queue:
11314 for task in self._merge_wait_queue:
11315 task.addExitListener(self._merge_wait_exit_handler)
11316 self._task_queues.merge.add(task)
11317 self._status_display.merges = len(self._task_queues.merge)
11318 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11319 del self._merge_wait_queue[:]
11321 self._schedule_tasks_imp()
11322 self._status_display.display()
11325 for q in self._task_queues.values():
11329 # Cancel prefetchers if they're the only reason
11330 # the main poll loop is still running.
11331 if self._failed_pkgs and not self._build_opts.fetchonly and \
11332 not (self._jobs or self._task_queues.merge) and \
11333 self._task_queues.fetch:
11334 self._task_queues.fetch.clear()
11338 self._schedule_tasks_imp()
11339 self._status_display.display()
11341 return self._keep_scheduling()
11343 def _job_delay(self):
11346 @returns: True if job scheduling should be delayed, False otherwise.
11349 if self._jobs and self._max_load is not None:
11351 current_time = time.time()
11353 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11354 if delay > self._job_delay_max:
11355 delay = self._job_delay_max
11356 if (current_time - self._previous_job_start_time) < delay:
11361 def _schedule_tasks_imp(self):
11364 @returns: True if state changed, False otherwise.
11371 if not self._keep_scheduling():
11372 return bool(state_change)
11374 if self._choose_pkg_return_early or \
11375 self._merge_wait_scheduled or \
11376 (self._jobs and self._unsatisfied_system_deps) or \
11377 not self._can_add_job() or \
11379 return bool(state_change)
11381 pkg = self._choose_pkg()
11383 return bool(state_change)
11387 if not pkg.installed:
11388 self._pkg_count.curval += 1
11390 task = self._task(pkg)
11393 merge = PackageMerge(merge=task)
11394 merge.addExitListener(self._merge_exit)
11395 self._task_queues.merge.add(merge)
11399 self._previous_job_start_time = time.time()
11400 self._status_display.running = self._jobs
11401 task.addExitListener(self._extract_exit)
11402 self._task_queues.jobs.add(task)
11406 self._previous_job_start_time = time.time()
11407 self._status_display.running = self._jobs
11408 task.addExitListener(self._build_exit)
11409 self._task_queues.jobs.add(task)
11411 return bool(state_change)
11413 def _task(self, pkg):
11415 pkg_to_replace = None
11416 if pkg.operation != "uninstall":
11417 vardb = pkg.root_config.trees["vartree"].dbapi
11418 previous_cpv = vardb.match(pkg.slot_atom)
11420 previous_cpv = previous_cpv.pop()
11421 pkg_to_replace = self._pkg(previous_cpv,
11422 "installed", pkg.root_config, installed=True)
11424 task = MergeListItem(args_set=self._args_set,
11425 background=self._background, binpkg_opts=self._binpkg_opts,
11426 build_opts=self._build_opts,
11427 config_pool=self._ConfigPool(pkg.root,
11428 self._allocate_config, self._deallocate_config),
11429 emerge_opts=self.myopts,
11430 find_blockers=self._find_blockers(pkg), logger=self._logger,
11431 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11432 pkg_to_replace=pkg_to_replace,
11433 prefetcher=self._prefetchers.get(pkg),
11434 scheduler=self._sched_iface,
11435 settings=self._allocate_config(pkg.root),
11436 statusMessage=self._status_msg,
11437 world_atom=self._world_atom)
11441 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11442 pkg = failed_pkg.pkg
11443 msg = "%s to %s %s" % \
11444 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11445 if pkg.root != "/":
11446 msg += " %s %s" % (preposition, pkg.root)
11448 log_path = self._locate_failure_log(failed_pkg)
11449 if log_path is not None:
11450 msg += ", Log file:"
11451 self._status_msg(msg)
11453 if log_path is not None:
11454 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11456 def _status_msg(self, msg):
11458 Display a brief status message (no newlines) in the status display.
11459 This is called by tasks to provide feedback to the user. This
11460 delegates the resposibility of generating \r and \n control characters,
11461 to guarantee that lines are created or erased when necessary and
11465 @param msg: a brief status message (no newlines allowed)
11467 if not self._background:
11468 writemsg_level("\n")
11469 self._status_display.displayMessage(msg)
11471 def _save_resume_list(self):
11473 Do this before verifying the ebuild Manifests since it might
11474 be possible for the user to use --resume --skipfirst get past
11475 a non-essential package with a broken digest.
11477 mtimedb = self._mtimedb
11478 mtimedb["resume"]["mergelist"] = [list(x) \
11479 for x in self._mergelist \
11480 if isinstance(x, Package) and x.operation == "merge"]
11484 def _calc_resume_list(self):
11486 Use the current resume list to calculate a new one,
11487 dropping any packages with unsatisfied deps.
11489 @returns: True if successful, False otherwise.
11491 print colorize("GOOD", "*** Resuming merge...")
11493 if self._show_list():
11494 if "--tree" in self.myopts:
11495 portage.writemsg_stdout("\n" + \
11496 darkgreen("These are the packages that " + \
11497 "would be merged, in reverse order:\n\n"))
11500 portage.writemsg_stdout("\n" + \
11501 darkgreen("These are the packages that " + \
11502 "would be merged, in order:\n\n"))
11504 show_spinner = "--quiet" not in self.myopts and \
11505 "--nodeps" not in self.myopts
11508 print "Calculating dependencies ",
11510 myparams = create_depgraph_params(self.myopts, None)
11514 success, mydepgraph, dropped_tasks = resume_depgraph(
11515 self.settings, self.trees, self._mtimedb, self.myopts,
11516 myparams, self._spinner)
11517 except depgraph.UnsatisfiedResumeDep, exc:
11518 # rename variable to avoid python-3.0 error:
11519 # SyntaxError: can not delete variable 'e' referenced in nested
11522 mydepgraph = e.depgraph
11523 dropped_tasks = set()
11526 print "\b\b... done!"
11529 def unsatisfied_resume_dep_msg():
11530 mydepgraph.display_problems()
11531 out = portage.output.EOutput()
11532 out.eerror("One or more packages are either masked or " + \
11533 "have missing dependencies:")
11536 show_parents = set()
11537 for dep in e.value:
11538 if dep.parent in show_parents:
11540 show_parents.add(dep.parent)
11541 if dep.atom is None:
11542 out.eerror(indent + "Masked package:")
11543 out.eerror(2 * indent + str(dep.parent))
11546 out.eerror(indent + str(dep.atom) + " pulled in by:")
11547 out.eerror(2 * indent + str(dep.parent))
11549 msg = "The resume list contains packages " + \
11550 "that are either masked or have " + \
11551 "unsatisfied dependencies. " + \
11552 "Please restart/continue " + \
11553 "the operation manually, or use --skipfirst " + \
11554 "to skip the first package in the list and " + \
11555 "any other packages that may be " + \
11556 "masked or have missing dependencies."
11557 for line in textwrap.wrap(msg, 72):
11559 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11562 if success and self._show_list():
11563 mylist = mydepgraph.altlist()
11565 if "--tree" in self.myopts:
11567 mydepgraph.display(mylist, favorites=self._favorites)
11570 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11572 mydepgraph.display_problems()
11574 mylist = mydepgraph.altlist()
11575 mydepgraph.break_refs(mylist)
11576 mydepgraph.break_refs(dropped_tasks)
11577 self._mergelist = mylist
11578 self._set_digraph(mydepgraph.schedulerGraph())
11581 for task in dropped_tasks:
11582 if not (isinstance(task, Package) and task.operation == "merge"):
11585 msg = "emerge --keep-going:" + \
11587 if pkg.root != "/":
11588 msg += " for %s" % (pkg.root,)
11589 msg += " dropped due to unsatisfied dependency."
11590 for line in textwrap.wrap(msg, msg_width):
11591 eerror(line, phase="other", key=pkg.cpv)
11592 settings = self.pkgsettings[pkg.root]
11593 # Ensure that log collection from $T is disabled inside
11594 # elog_process(), since any logs that might exist are
11596 settings.pop("T", None)
11597 portage.elog.elog_process(pkg.cpv, settings)
11598 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11602 def _show_list(self):
11603 myopts = self.myopts
11604 if "--quiet" not in myopts and \
11605 ("--ask" in myopts or "--tree" in myopts or \
11606 "--verbose" in myopts):
11610 def _world_atom(self, pkg):
11612 Add the package to the world file, but only if
11613 it's supposed to be added. Otherwise, do nothing.
11616 if set(("--buildpkgonly", "--fetchonly",
11618 "--oneshot", "--onlydeps",
11619 "--pretend")).intersection(self.myopts):
11622 if pkg.root != self.target_root:
11625 args_set = self._args_set
11626 if not args_set.findAtomForPackage(pkg):
11629 logger = self._logger
11630 pkg_count = self._pkg_count
11631 root_config = pkg.root_config
11632 world_set = root_config.sets["world"]
11633 world_locked = False
11634 if hasattr(world_set, "lock"):
11636 world_locked = True
11639 if hasattr(world_set, "load"):
11640 world_set.load() # maybe it's changed on disk
11642 atom = create_world_atom(pkg, args_set, root_config)
11644 if hasattr(world_set, "add"):
11645 self._status_msg(('Recording %s in "world" ' + \
11646 'favorites file...') % atom)
11647 logger.log(" === (%s of %s) Updating world file (%s)" % \
11648 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11649 world_set.add(atom)
11651 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11652 (atom,), level=logging.WARN, noiselevel=-1)
11657 def _pkg(self, cpv, type_name, root_config, installed=False):
11659 Get a package instance from the cache, or create a new
11660 one if necessary. Raises KeyError from aux_get if it
11661 failures for some reason (package does not exist or is
11664 operation = "merge"
11666 operation = "nomerge"
11668 if self._digraph is not None:
11669 # Reuse existing instance when available.
11670 pkg = self._digraph.get(
11671 (type_name, root_config.root, cpv, operation))
11672 if pkg is not None:
11675 tree_type = depgraph.pkg_tree_map[type_name]
11676 db = root_config.trees[tree_type].dbapi
11677 db_keys = list(self.trees[root_config.root][
11678 tree_type].dbapi._aux_cache_keys)
11679 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11680 pkg = Package(cpv=cpv, metadata=metadata,
11681 root_config=root_config, installed=installed)
11682 if type_name == "ebuild":
11683 settings = self.pkgsettings[root_config.root]
11684 settings.setcpv(pkg)
11685 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11686 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11690 class MetadataRegen(PollScheduler):
11692 def __init__(self, portdb, cp_iter=None, consumer=None,
11693 max_jobs=None, max_load=None):
11694 PollScheduler.__init__(self)
11695 self._portdb = portdb
11696 self._global_cleanse = False
11697 if cp_iter is None:
11698 cp_iter = self._iter_every_cp()
11699 # We can globally cleanse stale cache only if we
11700 # iterate over every single cp.
11701 self._global_cleanse = True
11702 self._cp_iter = cp_iter
11703 self._consumer = consumer
11705 if max_jobs is None:
11708 self._max_jobs = max_jobs
11709 self._max_load = max_load
11710 self._sched_iface = self._sched_iface_class(
11711 register=self._register,
11712 schedule=self._schedule_wait,
11713 unregister=self._unregister)
11715 self._valid_pkgs = set()
11716 self._cp_set = set()
11717 self._process_iter = self._iter_metadata_processes()
11718 self.returncode = os.EX_OK
11719 self._error_count = 0
11721 def _iter_every_cp(self):
11722 every_cp = self._portdb.cp_all()
11723 every_cp.sort(reverse=True)
11726 yield every_cp.pop()
11730 def _iter_metadata_processes(self):
11731 portdb = self._portdb
11732 valid_pkgs = self._valid_pkgs
11733 cp_set = self._cp_set
11734 consumer = self._consumer
11736 for cp in self._cp_iter:
11738 portage.writemsg_stdout("Processing %s\n" % cp)
11739 cpv_list = portdb.cp_list(cp)
11740 for cpv in cpv_list:
11741 valid_pkgs.add(cpv)
11742 ebuild_path, repo_path = portdb.findname2(cpv)
11743 metadata, st, emtime = portdb._pull_valid_cache(
11744 cpv, ebuild_path, repo_path)
11745 if metadata is not None:
11746 if consumer is not None:
11747 consumer(cpv, ebuild_path,
11748 repo_path, metadata)
11751 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11752 ebuild_mtime=emtime,
11753 metadata_callback=portdb._metadata_callback,
11754 portdb=portdb, repo_path=repo_path,
11755 settings=portdb.doebuild_settings)
11759 portdb = self._portdb
11760 from portage.cache.cache_errors import CacheError
11763 while self._schedule():
11769 if self._global_cleanse:
11770 for mytree in portdb.porttrees:
11772 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11773 except CacheError, e:
11774 portage.writemsg("Error listing cache entries for " + \
11775 "'%s': %s, continuing...\n" % (mytree, e),
11781 cp_set = self._cp_set
11782 cpv_getkey = portage.cpv_getkey
11783 for mytree in portdb.porttrees:
11785 dead_nodes[mytree] = set(cpv for cpv in \
11786 portdb.auxdb[mytree].iterkeys() \
11787 if cpv_getkey(cpv) in cp_set)
11788 except CacheError, e:
11789 portage.writemsg("Error listing cache entries for " + \
11790 "'%s': %s, continuing...\n" % (mytree, e),
11797 for y in self._valid_pkgs:
11798 for mytree in portdb.porttrees:
11799 if portdb.findname2(y, mytree=mytree)[0]:
11800 dead_nodes[mytree].discard(y)
11802 for mytree, nodes in dead_nodes.iteritems():
11803 auxdb = portdb.auxdb[mytree]
11807 except (KeyError, CacheError):
11810 def _schedule_tasks(self):
11813 @returns: True if there may be remaining tasks to schedule,
11816 while self._can_add_job():
11818 metadata_process = self._process_iter.next()
11819 except StopIteration:
11823 metadata_process.scheduler = self._sched_iface
11824 metadata_process.addExitListener(self._metadata_exit)
11825 metadata_process.start()
11828 def _metadata_exit(self, metadata_process):
11830 if metadata_process.returncode != os.EX_OK:
11831 self.returncode = 1
11832 self._error_count += 1
11833 self._valid_pkgs.discard(metadata_process.cpv)
11834 portage.writemsg("Error processing %s, continuing...\n" % \
11835 (metadata_process.cpv,), noiselevel=-1)
11837 if self._consumer is not None:
11838 # On failure, still notify the consumer (in this case the metadata
11839 # argument is None).
11840 self._consumer(metadata_process.cpv,
11841 metadata_process.ebuild_path,
11842 metadata_process.repo_path,
11843 metadata_process.metadata)
11847 class UninstallFailure(portage.exception.PortageException):
11849 An instance of this class is raised by unmerge() when
11850 an uninstallation fails.
11853 def __init__(self, *pargs):
11854 portage.exception.PortageException.__init__(self, pargs)
11856 self.status = pargs[0]
11858 def unmerge(root_config, myopts, unmerge_action,
11859 unmerge_files, ldpath_mtimes, autoclean=0,
11860 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11861 scheduler=None, writemsg_level=portage.util.writemsg_level):
11864 clean_world = myopts.get('--deselect') != 'n'
11865 quiet = "--quiet" in myopts
11866 settings = root_config.settings
11867 sets = root_config.sets
11868 vartree = root_config.trees["vartree"]
11869 candidate_catpkgs=[]
11871 xterm_titles = "notitles" not in settings.features
11872 out = portage.output.EOutput()
11874 db_keys = list(vartree.dbapi._aux_cache_keys)
11877 pkg = pkg_cache.get(cpv)
11879 pkg = Package(cpv=cpv, installed=True,
11880 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11881 root_config=root_config,
11882 type_name="installed")
11883 pkg_cache[cpv] = pkg
11886 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11888 # At least the parent needs to exist for the lock file.
11889 portage.util.ensure_dirs(vdb_path)
11890 except portage.exception.PortageException:
11894 if os.access(vdb_path, os.W_OK):
11895 vdb_lock = portage.locks.lockdir(vdb_path)
11896 realsyslist = sets["system"].getAtoms()
11898 for x in realsyslist:
11899 mycp = portage.dep_getkey(x)
11900 if mycp in settings.getvirtuals():
11902 for provider in settings.getvirtuals()[mycp]:
11903 if vartree.dbapi.match(provider):
11904 providers.append(provider)
11905 if len(providers) == 1:
11906 syslist.extend(providers)
11908 syslist.append(mycp)
11910 mysettings = portage.config(clone=settings)
11912 if not unmerge_files:
11913 if unmerge_action == "unmerge":
11915 print bold("emerge unmerge") + " can only be used with specific package names"
11921 localtree = vartree
11922 # process all arguments and add all
11923 # valid db entries to candidate_catpkgs
11925 if not unmerge_files:
11926 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11928 #we've got command-line arguments
11929 if not unmerge_files:
11930 print "\nNo packages to unmerge have been provided.\n"
11932 for x in unmerge_files:
11933 arg_parts = x.split('/')
11934 if x[0] not in [".","/"] and \
11935 arg_parts[-1][-7:] != ".ebuild":
11936 #possible cat/pkg or dep; treat as such
11937 candidate_catpkgs.append(x)
11938 elif unmerge_action in ["prune","clean"]:
11939 print "\n!!! Prune and clean do not accept individual" + \
11940 " ebuilds as arguments;\n skipping.\n"
11943 # it appears that the user is specifying an installed
11944 # ebuild and we're in "unmerge" mode, so it's ok.
11945 if not os.path.exists(x):
11946 print "\n!!! The path '"+x+"' doesn't exist.\n"
11949 absx = os.path.abspath(x)
11950 sp_absx = absx.split("/")
11951 if sp_absx[-1][-7:] == ".ebuild":
11953 absx = "/".join(sp_absx)
11955 sp_absx_len = len(sp_absx)
11957 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11958 vdb_len = len(vdb_path)
11960 sp_vdb = vdb_path.split("/")
11961 sp_vdb_len = len(sp_vdb)
11963 if not os.path.exists(absx+"/CONTENTS"):
11964 print "!!! Not a valid db dir: "+str(absx)
11967 if sp_absx_len <= sp_vdb_len:
11968 # The Path is shorter... so it can't be inside the vdb.
11971 print "\n!!!",x,"cannot be inside "+ \
11972 vdb_path+"; aborting.\n"
11975 for idx in range(0,sp_vdb_len):
11976 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11979 print "\n!!!", x, "is not inside "+\
11980 vdb_path+"; aborting.\n"
11983 print "="+"/".join(sp_absx[sp_vdb_len:])
11984 candidate_catpkgs.append(
11985 "="+"/".join(sp_absx[sp_vdb_len:]))
11988 if (not "--quiet" in myopts):
11990 if settings["ROOT"] != "/":
11991 writemsg_level(darkgreen(newline+ \
11992 ">>> Using system located in ROOT tree %s\n" % \
11995 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11996 not ("--quiet" in myopts):
11997 writemsg_level(darkgreen(newline+\
11998 ">>> These are the packages that would be unmerged:\n"))
12000 # Preservation of order is required for --depclean and --prune so
12001 # that dependencies are respected. Use all_selected to eliminate
12002 # duplicate packages since the same package may be selected by
12005 all_selected = set()
12006 for x in candidate_catpkgs:
12007 # cycle through all our candidate deps and determine
12008 # what will and will not get unmerged
12010 mymatch = vartree.dbapi.match(x)
12011 except portage.exception.AmbiguousPackageName, errpkgs:
12012 print "\n\n!!! The short ebuild name \"" + \
12013 x + "\" is ambiguous. Please specify"
12014 print "!!! one of the following fully-qualified " + \
12015 "ebuild names instead:\n"
12016 for i in errpkgs[0]:
12017 print " " + green(i)
12021 if not mymatch and x[0] not in "<>=~":
12022 mymatch = localtree.dep_match(x)
12024 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
12025 (x, unmerge_action), noiselevel=-1)
12029 {"protected": set(), "selected": set(), "omitted": set()})
12030 mykey = len(pkgmap) - 1
12031 if unmerge_action=="unmerge":
12033 if y not in all_selected:
12034 pkgmap[mykey]["selected"].add(y)
12035 all_selected.add(y)
12036 elif unmerge_action == "prune":
12037 if len(mymatch) == 1:
12039 best_version = mymatch[0]
12040 best_slot = vartree.getslot(best_version)
12041 best_counter = vartree.dbapi.cpv_counter(best_version)
12042 for mypkg in mymatch[1:]:
12043 myslot = vartree.getslot(mypkg)
12044 mycounter = vartree.dbapi.cpv_counter(mypkg)
12045 if (myslot == best_slot and mycounter > best_counter) or \
12046 mypkg == portage.best([mypkg, best_version]):
12047 if myslot == best_slot:
12048 if mycounter < best_counter:
12049 # On slot collision, keep the one with the
12050 # highest counter since it is the most
12051 # recently installed.
12053 best_version = mypkg
12055 best_counter = mycounter
12056 pkgmap[mykey]["protected"].add(best_version)
12057 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12058 if mypkg != best_version and mypkg not in all_selected)
12059 all_selected.update(pkgmap[mykey]["selected"])
12061 # unmerge_action == "clean"
12063 for mypkg in mymatch:
12064 if unmerge_action == "clean":
12065 myslot = localtree.getslot(mypkg)
12067 # since we're pruning, we don't care about slots
12068 # and put all the pkgs in together
12070 if myslot not in slotmap:
12071 slotmap[myslot] = {}
12072 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12074 for mypkg in vartree.dbapi.cp_list(
12075 portage.dep_getkey(mymatch[0])):
12076 myslot = vartree.getslot(mypkg)
12077 if myslot not in slotmap:
12078 slotmap[myslot] = {}
12079 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12081 for myslot in slotmap:
12082 counterkeys = slotmap[myslot].keys()
12083 if not counterkeys:
12086 pkgmap[mykey]["protected"].add(
12087 slotmap[myslot][counterkeys[-1]])
12088 del counterkeys[-1]
12090 for counter in counterkeys[:]:
12091 mypkg = slotmap[myslot][counter]
12092 if mypkg not in mymatch:
12093 counterkeys.remove(counter)
12094 pkgmap[mykey]["protected"].add(
12095 slotmap[myslot][counter])
12097 #be pretty and get them in order of merge:
12098 for ckey in counterkeys:
12099 mypkg = slotmap[myslot][ckey]
12100 if mypkg not in all_selected:
12101 pkgmap[mykey]["selected"].add(mypkg)
12102 all_selected.add(mypkg)
12103 # ok, now the last-merged package
12104 # is protected, and the rest are selected
12105 numselected = len(all_selected)
12106 if global_unmerge and not numselected:
12107 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12110 if not numselected:
12111 portage.writemsg_stdout(
12112 "\n>>> No packages selected for removal by " + \
12113 unmerge_action + "\n")
12117 vartree.dbapi.flush_cache()
12118 portage.locks.unlockdir(vdb_lock)
12120 from portage.sets.base import EditablePackageSet
12122 # generate a list of package sets that are directly or indirectly listed in "world",
12123 # as there is no persistent list of "installed" sets
12124 installed_sets = ["world"]
12129 pos = len(installed_sets)
12130 for s in installed_sets[pos - 1:]:
12133 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12136 installed_sets += candidates
12137 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12140 # we don't want to unmerge packages that are still listed in user-editable package sets
12141 # listed in "world" as they would be remerged on the next update of "world" or the
12142 # relevant package sets.
12143 unknown_sets = set()
12144 for cp in xrange(len(pkgmap)):
12145 for cpv in pkgmap[cp]["selected"].copy():
12149 # It could have been uninstalled
12150 # by a concurrent process.
12153 if unmerge_action != "clean" and \
12154 root_config.root == "/" and \
12155 portage.match_from_list(
12156 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12157 msg = ("Not unmerging package %s since there is no valid " + \
12158 "reason for portage to unmerge itself.") % (pkg.cpv,)
12159 for line in textwrap.wrap(msg, 75):
12161 # adjust pkgmap so the display output is correct
12162 pkgmap[cp]["selected"].remove(cpv)
12163 all_selected.remove(cpv)
12164 pkgmap[cp]["protected"].add(cpv)
12168 for s in installed_sets:
12169 # skip sets that the user requested to unmerge, and skip world
12170 # unless we're unmerging a package set (as the package would be
12171 # removed from "world" later on)
12172 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12176 if s in unknown_sets:
12178 unknown_sets.add(s)
12179 out = portage.output.EOutput()
12180 out.eerror(("Unknown set '@%s' in " + \
12181 "%svar/lib/portage/world_sets") % \
12182 (s, root_config.root))
12185 # only check instances of EditablePackageSet as other classes are generally used for
12186 # special purposes and can be ignored here (and are usually generated dynamically, so the
12187 # user can't do much about them anyway)
12188 if isinstance(sets[s], EditablePackageSet):
12190 # This is derived from a snippet of code in the
12191 # depgraph._iter_atoms_for_pkg() method.
12192 for atom in sets[s].iterAtomsForPackage(pkg):
12193 inst_matches = vartree.dbapi.match(atom)
12194 inst_matches.reverse() # descending order
12196 for inst_cpv in inst_matches:
12198 inst_pkg = _pkg(inst_cpv)
12200 # It could have been uninstalled
12201 # by a concurrent process.
12204 if inst_pkg.cp != atom.cp:
12206 if pkg >= inst_pkg:
12207 # This is descending order, and we're not
12208 # interested in any versions <= pkg given.
12210 if pkg.slot_atom != inst_pkg.slot_atom:
12211 higher_slot = inst_pkg
12213 if higher_slot is None:
12217 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12218 #print colorize("WARN", "but still listed in the following package sets:")
12219 #print " %s\n" % ", ".join(parents)
12220 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12221 print colorize("WARN", "still referenced by the following package sets:")
12222 print " %s\n" % ", ".join(parents)
12223 # adjust pkgmap so the display output is correct
12224 pkgmap[cp]["selected"].remove(cpv)
12225 all_selected.remove(cpv)
12226 pkgmap[cp]["protected"].add(cpv)
12230 numselected = len(all_selected)
12231 if not numselected:
12233 "\n>>> No packages selected for removal by " + \
12234 unmerge_action + "\n")
12237 # Unmerge order only matters in some cases
12241 selected = d["selected"]
12244 cp = portage.cpv_getkey(iter(selected).next())
12245 cp_dict = unordered.get(cp)
12246 if cp_dict is None:
12248 unordered[cp] = cp_dict
12251 for k, v in d.iteritems():
12252 cp_dict[k].update(v)
12253 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12255 for x in xrange(len(pkgmap)):
12256 selected = pkgmap[x]["selected"]
12259 for mytype, mylist in pkgmap[x].iteritems():
12260 if mytype == "selected":
12262 mylist.difference_update(all_selected)
12263 cp = portage.cpv_getkey(iter(selected).next())
12264 for y in localtree.dep_match(cp):
12265 if y not in pkgmap[x]["omitted"] and \
12266 y not in pkgmap[x]["selected"] and \
12267 y not in pkgmap[x]["protected"] and \
12268 y not in all_selected:
12269 pkgmap[x]["omitted"].add(y)
12270 if global_unmerge and not pkgmap[x]["selected"]:
12271 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12273 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12274 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12275 "'%s' is part of your system profile.\n" % cp),
12276 level=logging.WARNING, noiselevel=-1)
12277 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12278 "be damaging to your system.\n\n"),
12279 level=logging.WARNING, noiselevel=-1)
12280 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12281 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12282 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12284 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12286 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12287 for mytype in ["selected","protected","omitted"]:
12289 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12290 if pkgmap[x][mytype]:
12291 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12292 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12293 for pn, ver, rev in sorted_pkgs:
12297 myversion = ver + "-" + rev
12298 if mytype == "selected":
12300 colorize("UNMERGE_WARN", myversion + " "),
12304 colorize("GOOD", myversion + " "), noiselevel=-1)
12306 writemsg_level("none ", noiselevel=-1)
12308 writemsg_level("\n", noiselevel=-1)
12310 writemsg_level("\n", noiselevel=-1)
12312 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12313 " packages are slated for removal.\n")
12314 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12315 " and " + colorize("GOOD", "'omitted'") + \
12316 " packages will not be removed.\n\n")
12318 if "--pretend" in myopts:
12319 #we're done... return
12321 if "--ask" in myopts:
12322 if userquery("Would you like to unmerge these packages?")=="No":
12323 # enter pretend mode for correct formatting of results
12324 myopts["--pretend"] = True
12329 #the real unmerging begins, after a short delay....
12330 if clean_delay and not autoclean:
12331 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12333 for x in xrange(len(pkgmap)):
12334 for y in pkgmap[x]["selected"]:
12335 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12336 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12337 mysplit = y.split("/")
12339 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12340 mysettings, unmerge_action not in ["clean","prune"],
12341 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12342 scheduler=scheduler)
12344 if retval != os.EX_OK:
12345 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12347 raise UninstallFailure(retval)
12350 if clean_world and hasattr(sets["world"], "cleanPackage"):
12351 sets["world"].cleanPackage(vartree.dbapi, y)
12352 emergelog(xterm_titles, " >>> unmerge success: "+y)
12353 if clean_world and hasattr(sets["world"], "remove"):
12354 for s in root_config.setconfig.active:
12355 sets["world"].remove(SETPREFIX+s)
12358 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12360 if os.path.exists("/usr/bin/install-info"):
12361 out = portage.output.EOutput()
12366 inforoot=normpath(root+z)
12367 if os.path.isdir(inforoot):
12368 infomtime = long(os.stat(inforoot).st_mtime)
12369 if inforoot not in prev_mtimes or \
12370 prev_mtimes[inforoot] != infomtime:
12371 regen_infodirs.append(inforoot)
12373 if not regen_infodirs:
12374 portage.writemsg_stdout("\n")
12375 out.einfo("GNU info directory index is up-to-date.")
12377 portage.writemsg_stdout("\n")
12378 out.einfo("Regenerating GNU info directory index...")
12380 dir_extensions = ("", ".gz", ".bz2")
12384 for inforoot in regen_infodirs:
12388 if not os.path.isdir(inforoot) or \
12389 not os.access(inforoot, os.W_OK):
12392 file_list = os.listdir(inforoot)
12394 dir_file = os.path.join(inforoot, "dir")
12395 moved_old_dir = False
12396 processed_count = 0
12397 for x in file_list:
12398 if x.startswith(".") or \
12399 os.path.isdir(os.path.join(inforoot, x)):
12401 if x.startswith("dir"):
12403 for ext in dir_extensions:
12404 if x == "dir" + ext or \
12405 x == "dir" + ext + ".old":
12410 if processed_count == 0:
12411 for ext in dir_extensions:
12413 os.rename(dir_file + ext, dir_file + ext + ".old")
12414 moved_old_dir = True
12415 except EnvironmentError, e:
12416 if e.errno != errno.ENOENT:
12419 processed_count += 1
12420 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12421 existsstr="already exists, for file `"
12423 if re.search(existsstr,myso):
12424 # Already exists... Don't increment the count for this.
12426 elif myso[:44]=="install-info: warning: no info dir entry in ":
12427 # This info file doesn't contain a DIR-header: install-info produces this
12428 # (harmless) warning (the --quiet switch doesn't seem to work).
12429 # Don't increment the count for this.
12432 badcount=badcount+1
12433 errmsg += myso + "\n"
12436 if moved_old_dir and not os.path.exists(dir_file):
12437 # We didn't generate a new dir file, so put the old file
12438 # back where it was originally found.
12439 for ext in dir_extensions:
12441 os.rename(dir_file + ext + ".old", dir_file + ext)
12442 except EnvironmentError, e:
12443 if e.errno != errno.ENOENT:
12447 # Clean dir.old cruft so that they don't prevent
12448 # unmerge of otherwise empty directories.
12449 for ext in dir_extensions:
12451 os.unlink(dir_file + ext + ".old")
12452 except EnvironmentError, e:
12453 if e.errno != errno.ENOENT:
12457 #update mtime so we can potentially avoid regenerating.
12458 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12461 out.eerror("Processed %d info files; %d errors." % \
12462 (icount, badcount))
12463 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12466 out.einfo("Processed %d info files." % (icount,))
12469 def display_news_notification(root_config, myopts):
12470 target_root = root_config.root
12471 trees = root_config.trees
12472 settings = trees["vartree"].settings
12473 portdb = trees["porttree"].dbapi
12474 vardb = trees["vartree"].dbapi
12475 NEWS_PATH = os.path.join("metadata", "news")
12476 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12477 newsReaderDisplay = False
12478 update = "--pretend" not in myopts
12480 for repo in portdb.getRepositories():
12481 unreadItems = checkUpdatedNewsItems(
12482 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12484 if not newsReaderDisplay:
12485 newsReaderDisplay = True
12487 print colorize("WARN", " * IMPORTANT:"),
12488 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12491 if newsReaderDisplay:
12492 print colorize("WARN", " *"),
12493 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12496 def display_preserved_libs(vardbapi):
12499 # Ensure the registry is consistent with existing files.
12500 vardbapi.plib_registry.pruneNonExisting()
12502 if vardbapi.plib_registry.hasEntries():
12504 print colorize("WARN", "!!!") + " existing preserved libs:"
12505 plibdata = vardbapi.plib_registry.getPreservedLibs()
12506 linkmap = vardbapi.linkmap
12509 linkmap_broken = False
12513 except portage.exception.CommandNotFound, e:
12514 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12515 level=logging.ERROR, noiselevel=-1)
12517 linkmap_broken = True
12519 search_for_owners = set()
12520 for cpv in plibdata:
12521 internal_plib_keys = set(linkmap._obj_key(f) \
12522 for f in plibdata[cpv])
12523 for f in plibdata[cpv]:
12524 if f in consumer_map:
12527 for c in linkmap.findConsumers(f):
12528 # Filter out any consumers that are also preserved libs
12529 # belonging to the same package as the provider.
12530 if linkmap._obj_key(c) not in internal_plib_keys:
12531 consumers.append(c)
12533 consumer_map[f] = consumers
12534 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12536 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12538 for cpv in plibdata:
12539 print colorize("WARN", ">>>") + " package: %s" % cpv
12541 for f in plibdata[cpv]:
12542 obj_key = linkmap._obj_key(f)
12543 alt_paths = samefile_map.get(obj_key)
12544 if alt_paths is None:
12546 samefile_map[obj_key] = alt_paths
12549 for alt_paths in samefile_map.itervalues():
12550 alt_paths = sorted(alt_paths)
12551 for p in alt_paths:
12552 print colorize("WARN", " * ") + " - %s" % (p,)
12554 consumers = consumer_map.get(f, [])
12555 for c in consumers[:MAX_DISPLAY]:
12556 print colorize("WARN", " * ") + " used by %s (%s)" % \
12557 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12558 if len(consumers) == MAX_DISPLAY + 1:
12559 print colorize("WARN", " * ") + " used by %s (%s)" % \
12560 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12561 for x in owners.get(consumers[MAX_DISPLAY], [])))
12562 elif len(consumers) > MAX_DISPLAY:
12563 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12564 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12567 def _flush_elog_mod_echo():
12569 Dump the mod_echo output now so that our other
12570 notifications are shown last.
12572 @returns: True if messages were shown, False otherwise.
12574 messages_shown = False
12576 from portage.elog import mod_echo
12577 except ImportError:
12578 pass # happens during downgrade to a version without the module
12580 messages_shown = bool(mod_echo._items)
12581 mod_echo.finalize()
12582 return messages_shown
12584 def post_emerge(root_config, myopts, mtimedb, retval):
12586 Misc. things to run at the end of a merge session.
12589 Update Config Files
12592 Display preserved libs warnings
12595 @param trees: A dictionary mapping each ROOT to it's package databases
12597 @param mtimedb: The mtimeDB to store data needed across merge invocations
12598 @type mtimedb: MtimeDB class instance
12599 @param retval: Emerge's return value
12603 1. Calls sys.exit(retval)
12606 target_root = root_config.root
12607 trees = { target_root : root_config.trees }
12608 vardbapi = trees[target_root]["vartree"].dbapi
12609 settings = vardbapi.settings
12610 info_mtimes = mtimedb["info"]
12612 # Load the most current variables from ${ROOT}/etc/profile.env
12615 settings.regenerate()
12618 config_protect = settings.get("CONFIG_PROTECT","").split()
12619 infodirs = settings.get("INFOPATH","").split(":") + \
12620 settings.get("INFODIR","").split(":")
12624 if retval == os.EX_OK:
12625 exit_msg = " *** exiting successfully."
12627 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12628 emergelog("notitles" not in settings.features, exit_msg)
12630 _flush_elog_mod_echo()
12632 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12633 if "--pretend" in myopts or (counter_hash is not None and \
12634 counter_hash == vardbapi._counter_hash()):
12635 display_news_notification(root_config, myopts)
12636 # If vdb state has not changed then there's nothing else to do.
12639 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12640 portage.util.ensure_dirs(vdb_path)
12642 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12643 vdb_lock = portage.locks.lockdir(vdb_path)
12647 if "noinfo" not in settings.features:
12648 chk_updated_info_files(target_root,
12649 infodirs, info_mtimes, retval)
12653 portage.locks.unlockdir(vdb_lock)
12655 chk_updated_cfg_files(target_root, config_protect)
12657 display_news_notification(root_config, myopts)
12658 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12659 display_preserved_libs(vardbapi)
12664 def chk_updated_cfg_files(target_root, config_protect):
12666 #number of directories with some protect files in them
12668 for x in config_protect:
12669 x = os.path.join(target_root, x.lstrip(os.path.sep))
12670 if not os.access(x, os.W_OK):
12671 # Avoid Permission denied errors generated
12675 mymode = os.lstat(x).st_mode
12678 if stat.S_ISLNK(mymode):
12679 # We want to treat it like a directory if it
12680 # is a symlink to an existing directory.
12682 real_mode = os.stat(x).st_mode
12683 if stat.S_ISDIR(real_mode):
12687 if stat.S_ISDIR(mymode):
12688 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12690 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12691 os.path.split(x.rstrip(os.path.sep))
12692 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12693 a = commands.getstatusoutput(mycommand)
12695 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12697 # Show the error message alone, sending stdout to /dev/null.
12698 os.system(mycommand + " 1>/dev/null")
12700 files = a[1].split('\0')
12701 # split always produces an empty string as the last element
12702 if files and not files[-1]:
12706 print "\n"+colorize("WARN", " * IMPORTANT:"),
12707 if stat.S_ISDIR(mymode):
12708 print "%d config files in '%s' need updating." % \
12711 print "config file '%s' needs updating." % x
12714 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12715 " section of the " + bold("emerge")
12716 print " "+yellow("*")+" man page to learn how to update config files."
12718 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12721 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12722 Returns the number of unread (yet relevent) items.
12724 @param portdb: a portage tree database
12725 @type portdb: pordbapi
12726 @param vardb: an installed package database
12727 @type vardb: vardbapi
12730 @param UNREAD_PATH:
12736 1. The number of unread but relevant news items.
12739 from portage.news import NewsManager
12740 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12741 return manager.getUnreadItems( repo_id, update=update )
12743 def insert_category_into_atom(atom, category):
12744 alphanum = re.search(r'\w', atom)
12746 ret = atom[:alphanum.start()] + "%s/" % category + \
12747 atom[alphanum.start():]
12752 def is_valid_package_atom(x):
12754 alphanum = re.search(r'\w', x)
12756 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12757 return portage.isvalidatom(x)
12759 def show_blocker_docs_link():
12761 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12762 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12764 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12767 def show_mask_docs():
12768 print "For more information, see the MASKED PACKAGES section in the emerge"
12769 print "man page or refer to the Gentoo Handbook."
12771 def action_sync(settings, trees, mtimedb, myopts, myaction):
12772 xterm_titles = "notitles" not in settings.features
12773 emergelog(xterm_titles, " === sync")
12774 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12775 myportdir = portdb.porttree_root
12776 out = portage.output.EOutput()
12778 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12780 if myportdir[-1]=="/":
12781 myportdir=myportdir[:-1]
12783 st = os.stat(myportdir)
12787 print ">>>",myportdir,"not found, creating it."
12788 os.makedirs(myportdir,0755)
12789 st = os.stat(myportdir)
12792 spawn_kwargs["env"] = settings.environ()
12793 if 'usersync' in settings.features and \
12794 portage.data.secpass >= 2 and \
12795 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12796 st.st_gid != os.getgid() and st.st_mode & 0070):
12798 homedir = pwd.getpwuid(st.st_uid).pw_dir
12802 # Drop privileges when syncing, in order to match
12803 # existing uid/gid settings.
12804 spawn_kwargs["uid"] = st.st_uid
12805 spawn_kwargs["gid"] = st.st_gid
12806 spawn_kwargs["groups"] = [st.st_gid]
12807 spawn_kwargs["env"]["HOME"] = homedir
12809 if not st.st_mode & 0020:
12810 umask = umask | 0020
12811 spawn_kwargs["umask"] = umask
12813 syncuri = settings.get("SYNC", "").strip()
12815 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12816 noiselevel=-1, level=logging.ERROR)
12819 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12820 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12823 dosyncuri = syncuri
12824 updatecache_flg = False
12825 if myaction == "metadata":
12826 print "skipping sync"
12827 updatecache_flg = True
12828 elif ".git" in vcs_dirs:
12829 # Update existing git repository, and ignore the syncuri. We are
12830 # going to trust the user and assume that the user is in the branch
12831 # that he/she wants updated. We'll let the user manage branches with
12833 if portage.process.find_binary("git") is None:
12834 msg = ["Command not found: git",
12835 "Type \"emerge dev-util/git\" to enable git support."]
12837 writemsg_level("!!! %s\n" % l,
12838 level=logging.ERROR, noiselevel=-1)
12840 msg = ">>> Starting git pull in %s..." % myportdir
12841 emergelog(xterm_titles, msg )
12842 writemsg_level(msg + "\n")
12843 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12844 (portage._shell_quote(myportdir),), **spawn_kwargs)
12845 if exitcode != os.EX_OK:
12846 msg = "!!! git pull error in %s." % myportdir
12847 emergelog(xterm_titles, msg)
12848 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12850 msg = ">>> Git pull in %s successful" % myportdir
12851 emergelog(xterm_titles, msg)
12852 writemsg_level(msg + "\n")
12853 exitcode = git_sync_timestamps(settings, myportdir)
12854 if exitcode == os.EX_OK:
12855 updatecache_flg = True
12856 elif syncuri[:8]=="rsync://":
12857 for vcs_dir in vcs_dirs:
12858 writemsg_level(("!!! %s appears to be under revision " + \
12859 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12860 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12862 if not os.path.exists("/usr/bin/rsync"):
12863 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12864 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12869 if settings["PORTAGE_RSYNC_OPTS"] == "":
12870 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12871 rsync_opts.extend([
12872 "--recursive", # Recurse directories
12873 "--links", # Consider symlinks
12874 "--safe-links", # Ignore links outside of tree
12875 "--perms", # Preserve permissions
12876 "--times", # Preserive mod times
12877 "--compress", # Compress the data transmitted
12878 "--force", # Force deletion on non-empty dirs
12879 "--whole-file", # Don't do block transfers, only entire files
12880 "--delete", # Delete files that aren't in the master tree
12881 "--stats", # Show final statistics about what was transfered
12882 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12883 "--exclude=/distfiles", # Exclude distfiles from consideration
12884 "--exclude=/local", # Exclude local from consideration
12885 "--exclude=/packages", # Exclude packages from consideration
12889 # The below validation is not needed when using the above hardcoded
12892 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12894 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12895 for opt in ("--recursive", "--times"):
12896 if opt not in rsync_opts:
12897 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12898 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12899 rsync_opts.append(opt)
12901 for exclude in ("distfiles", "local", "packages"):
12902 opt = "--exclude=/%s" % exclude
12903 if opt not in rsync_opts:
12904 portage.writemsg(yellow("WARNING:") + \
12905 " adding required option %s not included in " % opt + \
12906 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12907 rsync_opts.append(opt)
12909 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12910 def rsync_opt_startswith(opt_prefix):
12911 for x in rsync_opts:
12912 if x.startswith(opt_prefix):
12916 if not rsync_opt_startswith("--timeout="):
12917 rsync_opts.append("--timeout=%d" % mytimeout)
12919 for opt in ("--compress", "--whole-file"):
12920 if opt not in rsync_opts:
12921 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12922 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12923 rsync_opts.append(opt)
12925 if "--quiet" in myopts:
12926 rsync_opts.append("--quiet") # Shut up a lot
12928 rsync_opts.append("--verbose") # Print filelist
12930 if "--verbose" in myopts:
12931 rsync_opts.append("--progress") # Progress meter for each file
12933 if "--debug" in myopts:
12934 rsync_opts.append("--checksum") # Force checksum on all files
12936 # Real local timestamp file.
12937 servertimestampfile = os.path.join(
12938 myportdir, "metadata", "timestamp.chk")
12940 content = portage.util.grabfile(servertimestampfile)
12944 mytimestamp = time.mktime(time.strptime(content[0],
12945 "%a, %d %b %Y %H:%M:%S +0000"))
12946 except (OverflowError, ValueError):
12951 rsync_initial_timeout = \
12952 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12954 rsync_initial_timeout = 15
12957 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12958 except SystemExit, e:
12959 raise # Needed else can't exit
12961 maxretries=3 #default number of retries
12964 user_name, hostname, port = re.split(
12965 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12968 if user_name is None:
12970 updatecache_flg=True
12971 all_rsync_opts = set(rsync_opts)
12972 extra_rsync_opts = shlex.split(
12973 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12974 all_rsync_opts.update(extra_rsync_opts)
12975 family = socket.AF_INET
12976 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12977 family = socket.AF_INET
12978 elif socket.has_ipv6 and \
12979 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12980 family = socket.AF_INET6
12982 SERVER_OUT_OF_DATE = -1
12983 EXCEEDED_MAX_RETRIES = -2
12989 for addrinfo in socket.getaddrinfo(
12990 hostname, None, family, socket.SOCK_STREAM):
12991 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12992 # IPv6 addresses need to be enclosed in square brackets
12993 ips.append("[%s]" % addrinfo[4][0])
12995 ips.append(addrinfo[4][0])
12996 from random import shuffle
12998 except SystemExit, e:
12999 raise # Needed else can't exit
13000 except Exception, e:
13001 print "Notice:",str(e)
13006 dosyncuri = syncuri.replace(
13007 "//" + user_name + hostname + port + "/",
13008 "//" + user_name + ips[0] + port + "/", 1)
13009 except SystemExit, e:
13010 raise # Needed else can't exit
13011 except Exception, e:
13012 print "Notice:",str(e)
13016 if "--ask" in myopts:
13017 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
13022 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
13023 if "--quiet" not in myopts:
13024 print ">>> Starting rsync with "+dosyncuri+"..."
13026 emergelog(xterm_titles,
13027 ">>> Starting retry %d of %d with %s" % \
13028 (retries,maxretries,dosyncuri))
13029 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
13031 if mytimestamp != 0 and "--quiet" not in myopts:
13032 print ">>> Checking server timestamp ..."
13034 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
13036 if "--debug" in myopts:
13039 exitcode = os.EX_OK
13040 servertimestamp = 0
13041 # Even if there's no timestamp available locally, fetch the
13042 # timestamp anyway as an initial probe to verify that the server is
13043 # responsive. This protects us from hanging indefinitely on a
13044 # connection attempt to an unresponsive server which rsync's
13045 # --timeout option does not prevent.
13047 # Temporary file for remote server timestamp comparison.
13048 from tempfile import mkstemp
13049 fd, tmpservertimestampfile = mkstemp()
13051 mycommand = rsynccommand[:]
13052 mycommand.append(dosyncuri.rstrip("/") + \
13053 "/metadata/timestamp.chk")
13054 mycommand.append(tmpservertimestampfile)
13058 def timeout_handler(signum, frame):
13059 raise portage.exception.PortageException("timed out")
13060 signal.signal(signal.SIGALRM, timeout_handler)
13061 # Timeout here in case the server is unresponsive. The
13062 # --timeout rsync option doesn't apply to the initial
13063 # connection attempt.
13064 if rsync_initial_timeout:
13065 signal.alarm(rsync_initial_timeout)
13067 mypids.extend(portage.process.spawn(
13068 mycommand, env=settings.environ(), returnpid=True))
13069 exitcode = os.waitpid(mypids[0], 0)[1]
13070 content = portage.grabfile(tmpservertimestampfile)
13072 if rsync_initial_timeout:
13075 os.unlink(tmpservertimestampfile)
13078 except portage.exception.PortageException, e:
13082 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13083 os.kill(mypids[0], signal.SIGTERM)
13084 os.waitpid(mypids[0], 0)
13085 # This is the same code rsync uses for timeout.
13088 if exitcode != os.EX_OK:
13089 if exitcode & 0xff:
13090 exitcode = (exitcode & 0xff) << 8
13092 exitcode = exitcode >> 8
13094 portage.process.spawned_pids.remove(mypids[0])
13097 servertimestamp = time.mktime(time.strptime(
13098 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13099 except (OverflowError, ValueError):
13101 del mycommand, mypids, content
13102 if exitcode == os.EX_OK:
13103 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13104 emergelog(xterm_titles,
13105 ">>> Cancelling sync -- Already current.")
13108 print ">>> Timestamps on the server and in the local repository are the same."
13109 print ">>> Cancelling all further sync action. You are already up to date."
13111 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13115 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13116 emergelog(xterm_titles,
13117 ">>> Server out of date: %s" % dosyncuri)
13120 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13122 print ">>> In order to force sync, remove '%s'." % servertimestampfile
13125 exitcode = SERVER_OUT_OF_DATE
13126 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13128 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13129 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13130 if exitcode in [0,1,3,4,11,14,20,21]:
13132 elif exitcode in [1,3,4,11,14,20,21]:
13135 # Code 2 indicates protocol incompatibility, which is expected
13136 # for servers with protocol < 29 that don't support
13137 # --prune-empty-directories. Retry for a server that supports
13138 # at least rsync protocol version 29 (>=rsync-2.6.4).
13143 if retries<=maxretries:
13144 print ">>> Retrying..."
13149 updatecache_flg=False
13150 exitcode = EXCEEDED_MAX_RETRIES
13154 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13155 elif exitcode == SERVER_OUT_OF_DATE:
13157 elif exitcode == EXCEEDED_MAX_RETRIES:
13159 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13164 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13165 msg.append("that your SYNC statement is proper.")
13166 msg.append("SYNC=" + settings["SYNC"])
13168 msg.append("Rsync has reported that there is a File IO error. Normally")
13169 msg.append("this means your disk is full, but can be caused by corruption")
13170 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13171 msg.append("and try again after the problem has been fixed.")
13172 msg.append("PORTDIR=" + settings["PORTDIR"])
13174 msg.append("Rsync was killed before it finished.")
13176 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13177 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13178 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13179 msg.append("temporary problem unless complications exist with your network")
13180 msg.append("(and possibly your system's filesystem) configuration.")
13184 elif syncuri[:6]=="cvs://":
13185 if not os.path.exists("/usr/bin/cvs"):
13186 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13187 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13189 cvsroot=syncuri[6:]
13190 cvsdir=os.path.dirname(myportdir)
13191 if not os.path.exists(myportdir+"/CVS"):
13193 print ">>> Starting initial cvs checkout with "+syncuri+"..."
13194 if os.path.exists(cvsdir+"/gentoo-x86"):
13195 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13198 os.rmdir(myportdir)
13200 if e.errno != errno.ENOENT:
13202 "!!! existing '%s' directory; exiting.\n" % myportdir)
13205 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13206 print "!!! cvs checkout error; exiting."
13208 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13211 print ">>> Starting cvs update with "+syncuri+"..."
13212 retval = portage.process.spawn_bash(
13213 "cd %s; cvs -z0 -q update -dP" % \
13214 (portage._shell_quote(myportdir),), **spawn_kwargs)
13215 if retval != os.EX_OK:
13217 dosyncuri = syncuri
13219 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13220 noiselevel=-1, level=logging.ERROR)
13223 if updatecache_flg and \
13224 myaction != "metadata" and \
13225 "metadata-transfer" not in settings.features:
13226 updatecache_flg = False
13228 # Reload the whole config from scratch.
13229 settings, trees, mtimedb = load_emerge_config(trees=trees)
13230 root_config = trees[settings["ROOT"]]["root_config"]
13231 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13233 if updatecache_flg and \
13234 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
13236 # Only update cache for myportdir since that's
13237 # the only one that's been synced here.
13238 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
13240 if portage._global_updates(trees, mtimedb["updates"]):
13242 # Reload the whole config from scratch.
13243 settings, trees, mtimedb = load_emerge_config(trees=trees)
13244 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13245 root_config = trees[settings["ROOT"]]["root_config"]
13247 mybestpv = portdb.xmatch("bestmatch-visible",
13248 portage.const.PORTAGE_PACKAGE_ATOM)
13249 mypvs = portage.best(
13250 trees[settings["ROOT"]]["vartree"].dbapi.match(
13251 portage.const.PORTAGE_PACKAGE_ATOM))
13253 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13255 if myaction != "metadata":
13256 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13257 retval = portage.process.spawn(
13258 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13259 dosyncuri], env=settings.environ())
13260 if retval != os.EX_OK:
13261 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13263 if(mybestpv != mypvs) and not "--quiet" in myopts:
13265 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13266 print red(" * ")+"that you update portage now, before any other packages are updated."
13268 print red(" * ")+"To update portage, run 'emerge portage' now."
13271 display_news_notification(root_config, myopts)
13274 def git_sync_timestamps(settings, portdir):
13276 Since git doesn't preserve timestamps, synchronize timestamps between
13277 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13278 for a given file as long as the file in the working tree is not modified
13279 (relative to HEAD).
13281 cache_dir = os.path.join(portdir, "metadata", "cache")
13282 if not os.path.isdir(cache_dir):
13284 writemsg_level(">>> Synchronizing timestamps...\n")
13286 from portage.cache.cache_errors import CacheError
13288 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13289 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13290 except CacheError, e:
13291 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13292 level=logging.ERROR, noiselevel=-1)
13295 ec_dir = os.path.join(portdir, "eclass")
13297 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13298 if f.endswith(".eclass"))
13300 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13301 level=logging.ERROR, noiselevel=-1)
13304 args = [portage.const.BASH_BINARY, "-c",
13305 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13306 portage._shell_quote(portdir)]
13308 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13309 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13311 if rval != os.EX_OK:
13314 modified_eclasses = set(ec for ec in ec_names \
13315 if os.path.join("eclass", ec + ".eclass") in modified_files)
13317 updated_ec_mtimes = {}
13319 for cpv in cache_db:
13320 cpv_split = portage.catpkgsplit(cpv)
13321 if cpv_split is None:
13322 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13323 level=logging.ERROR, noiselevel=-1)
13326 cat, pn, ver, rev = cpv_split
13327 cat, pf = portage.catsplit(cpv)
13328 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13329 if relative_eb_path in modified_files:
13333 cache_entry = cache_db[cpv]
13334 eb_mtime = cache_entry.get("_mtime_")
13335 ec_mtimes = cache_entry.get("_eclasses_")
13337 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13338 level=logging.ERROR, noiselevel=-1)
13340 except CacheError, e:
13341 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13342 (cpv, e), level=logging.ERROR, noiselevel=-1)
13345 if eb_mtime is None:
13346 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13347 level=logging.ERROR, noiselevel=-1)
13351 eb_mtime = long(eb_mtime)
13353 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13354 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13357 if ec_mtimes is None:
13358 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13359 level=logging.ERROR, noiselevel=-1)
13362 if modified_eclasses.intersection(ec_mtimes):
13365 missing_eclasses = set(ec_mtimes).difference(ec_names)
13366 if missing_eclasses:
13367 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13368 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13372 eb_path = os.path.join(portdir, relative_eb_path)
13374 current_eb_mtime = os.stat(eb_path)
13376 writemsg_level("!!! Missing ebuild: %s\n" % \
13377 (cpv,), level=logging.ERROR, noiselevel=-1)
13380 inconsistent = False
13381 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13382 updated_mtime = updated_ec_mtimes.get(ec)
13383 if updated_mtime is not None and updated_mtime != ec_mtime:
13384 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13385 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13386 inconsistent = True
13392 if current_eb_mtime != eb_mtime:
13393 os.utime(eb_path, (eb_mtime, eb_mtime))
13395 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13396 if ec in updated_ec_mtimes:
13398 ec_path = os.path.join(ec_dir, ec + ".eclass")
13399 current_mtime = long(os.stat(ec_path).st_mtime)
13400 if current_mtime != ec_mtime:
13401 os.utime(ec_path, (ec_mtime, ec_mtime))
13402 updated_ec_mtimes[ec] = ec_mtime
13406 def action_metadata(settings, portdb, myopts, porttrees=None):
13407 if porttrees is None:
13408 porttrees = portdb.porttrees
13409 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
13410 old_umask = os.umask(0002)
13411 cachedir = os.path.normpath(settings.depcachedir)
13412 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13413 "/lib", "/opt", "/proc", "/root", "/sbin",
13414 "/sys", "/tmp", "/usr", "/var"]:
13415 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13416 "ROOT DIRECTORY ON YOUR SYSTEM."
13417 print >> sys.stderr, \
13418 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13420 if not os.path.exists(cachedir):
13421 os.makedirs(cachedir)
13423 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
13424 auxdbkeys = tuple(auxdbkeys)
13426 class TreeData(object):
13427 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
13428 def __init__(self, dest_db, eclass_db, path, src_db):
13429 self.dest_db = dest_db
13430 self.eclass_db = eclass_db
13432 self.src_db = src_db
13433 self.valid_nodes = set()
13435 porttrees_data = []
13436 for path in porttrees:
13437 src_db = portdb._pregen_auxdb.get(path)
13438 if src_db is None and \
13439 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
13440 src_db = portdb.metadbmodule(
13441 path, 'metadata/cache', auxdbkeys, readonly=True)
13443 src_db.ec = portdb._repo_info[path].eclass_db
13444 except AttributeError:
13447 if src_db is not None:
13448 porttrees_data.append(TreeData(portdb.auxdb[path],
13449 portdb._repo_info[path].eclass_db, path, src_db))
13451 porttrees = [tree_data.path for tree_data in porttrees_data]
13453 isatty = sys.stdout.isatty()
13454 quiet = not isatty or '--quiet' in myopts
13457 progressBar = portage.output.TermProgressBar()
13458 progressHandler = ProgressHandler()
13459 onProgress = progressHandler.onProgress
13461 progressBar.set(progressHandler.curval, progressHandler.maxval)
13462 progressHandler.display = display
13463 def sigwinch_handler(signum, frame):
13464 lines, progressBar.term_columns = \
13465 portage.output.get_term_size()
13466 signal.signal(signal.SIGWINCH, sigwinch_handler)
13468 # Temporarily override portdb.porttrees so portdb.cp_all()
13469 # will only return the relevant subset.
13470 portdb_porttrees = portdb.porttrees
13471 portdb.porttrees = porttrees
13473 cp_all = portdb.cp_all()
13475 portdb.porttrees = portdb_porttrees
13478 maxval = len(cp_all)
13479 if onProgress is not None:
13480 onProgress(maxval, curval)
13482 from portage.cache.util import quiet_mirroring
13483 from portage import eapi_is_supported, \
13484 _validate_cache_for_unsupported_eapis
13486 # TODO: Display error messages, but do not interfere with the progress bar.
13488 # 1) erase the progress bar
13489 # 2) show the error message
13490 # 3) redraw the progress bar on a new line
13491 noise = quiet_mirroring()
13494 for tree_data in porttrees_data:
13495 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
13496 tree_data.valid_nodes.add(cpv)
13498 src = tree_data.src_db[cpv]
13499 except KeyError, e:
13500 noise.missing_entry(cpv)
13503 except CacheError, ce:
13504 noise.exception(cpv, ce)
13508 eapi = src.get('EAPI')
13511 eapi = eapi.lstrip('-')
13512 eapi_supported = eapi_is_supported(eapi)
13513 if not eapi_supported:
13514 if not _validate_cache_for_unsupported_eapis:
13515 noise.misc(cpv, "unable to validate " + \
13516 "cache for EAPI='%s'" % eapi)
13521 dest = tree_data.dest_db[cpv]
13522 except (KeyError, CacheError):
13525 for d in (src, dest):
13526 if d is not None and d.get('EAPI') in ('', '0'):
13529 if dest is not None:
13530 if not (dest['_mtime_'] == src['_mtime_'] and \
13531 tree_data.eclass_db.is_eclass_data_valid(
13532 dest['_eclasses_']) and \
13533 set(dest['_eclasses_']) == set(src['_eclasses_'])):
13536 # We don't want to skip the write unless we're really
13537 # sure that the existing cache is identical, so don't
13538 # trust _mtime_ and _eclasses_ alone.
13539 for k in set(chain(src, dest)).difference(
13540 ('_mtime_', '_eclasses_')):
13541 if dest.get(k, '') != src.get(k, ''):
13545 if dest is not None:
13546 # The existing data is valid and identical,
13547 # so there's no need to overwrite it.
13551 inherited = src.get('INHERITED', '')
13552 eclasses = src.get('_eclasses_')
13553 except CacheError, ce:
13554 noise.exception(cpv, ce)
13558 if eclasses is not None:
13559 if not tree_data.eclass_db.is_eclass_data_valid(
13560 src['_eclasses_']):
13561 noise.eclass_stale(cpv)
13563 inherited = eclasses
13565 inherited = inherited.split()
13567 if tree_data.src_db.complete_eclass_entries and \
13569 noise.corruption(cpv, "missing _eclasses_ field")
13573 # Even if _eclasses_ already exists, replace it with data from
13574 # eclass_cache, in order to insert local eclass paths.
13576 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
13578 # INHERITED contains a non-existent eclass.
13579 noise.eclass_stale(cpv)
13582 if eclasses is None:
13583 noise.eclass_stale(cpv)
13585 src['_eclasses_'] = eclasses
13587 src['_eclasses_'] = {}
13589 if not eapi_supported:
13591 'EAPI' : '-' + eapi,
13592 '_mtime_' : src['_mtime_'],
13593 '_eclasses_' : src['_eclasses_'],
13597 tree_data.dest_db[cpv] = src
13598 except CacheError, ce:
13599 noise.exception(cpv, ce)
13603 if onProgress is not None:
13604 onProgress(maxval, curval)
13606 if onProgress is not None:
13607 onProgress(maxval, curval)
13609 for tree_data in porttrees_data:
13611 dead_nodes = set(tree_data.dest_db.iterkeys())
13612 except CacheError, e:
13613 writemsg_level("Error listing cache entries for " + \
13614 "'%s': %s, continuing...\n" % (tree_data.path, e),
13615 level=logging.ERROR, noiselevel=-1)
13618 dead_nodes.difference_update(tree_data.valid_nodes)
13619 for cpv in dead_nodes:
13621 del tree_data.dest_db[cpv]
13622 except (KeyError, CacheError):
13626 # make sure the final progress is displayed
13627 progressHandler.display()
13629 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
13632 os.umask(old_umask)
13634 def action_regen(settings, portdb, max_jobs, max_load):
13635 xterm_titles = "notitles" not in settings.features
13636 emergelog(xterm_titles, " === regen")
13637 #regenerate cache entries
13638 portage.writemsg_stdout("Regenerating cache entries...\n")
13640 os.close(sys.stdin.fileno())
13641 except SystemExit, e:
13642 raise # Needed else can't exit
13647 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13650 portage.writemsg_stdout("done!\n")
13651 return regen.returncode
13653 def action_config(settings, trees, myopts, myfiles):
13654 if len(myfiles) != 1:
13655 print red("!!! config can only take a single package atom at this time\n")
13657 if not is_valid_package_atom(myfiles[0]):
13658 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13660 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13661 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13665 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13666 except portage.exception.AmbiguousPackageName, e:
13667 # Multiple matches thrown from cpv_expand
13670 print "No packages found.\n"
13672 elif len(pkgs) > 1:
13673 if "--ask" in myopts:
13675 print "Please select a package to configure:"
13679 options.append(str(idx))
13680 print options[-1]+") "+pkg
13682 options.append("X")
13683 idx = userquery("Selection?", options)
13686 pkg = pkgs[int(idx)-1]
13688 print "The following packages available:"
13691 print "\nPlease use a specific atom or the --ask option."
13697 if "--ask" in myopts:
13698 if userquery("Ready to configure "+pkg+"?") == "No":
13701 print "Configuring pkg..."
13703 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13704 mysettings = portage.config(clone=settings)
13705 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13706 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13707 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13709 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13710 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13711 if retval == os.EX_OK:
13712 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13713 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13716 def action_info(settings, trees, myopts, myfiles):
13717 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13718 settings.profile_path, settings["CHOST"],
13719 trees[settings["ROOT"]]["vartree"].dbapi)
13721 header_title = "System Settings"
13723 print header_width * "="
13724 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13725 print header_width * "="
13726 print "System uname: "+platform.platform(aliased=1)
13728 lastSync = portage.grabfile(os.path.join(
13729 settings["PORTDIR"], "metadata", "timestamp.chk"))
13730 print "Timestamp of tree:",
13736 output=commands.getstatusoutput("distcc --version")
13738 print str(output[1].split("\n",1)[0]),
13739 if "distcc" in settings.features:
13744 output=commands.getstatusoutput("ccache -V")
13746 print str(output[1].split("\n",1)[0]),
13747 if "ccache" in settings.features:
13752 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13753 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13754 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13755 myvars = portage.util.unique_array(myvars)
13759 if portage.isvalidatom(x):
13760 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13761 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13762 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13764 for pn, ver, rev in pkg_matches:
13766 pkgs.append(ver + "-" + rev)
13770 pkgs = ", ".join(pkgs)
13771 print "%-20s %s" % (x+":", pkgs)
13773 print "%-20s %s" % (x+":", "[NOT VALID]")
13775 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13777 if "--verbose" in myopts:
13778 myvars=settings.keys()
13780 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13781 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13782 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13783 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13785 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13787 myvars = portage.util.unique_array(myvars)
13788 use_expand = settings.get('USE_EXPAND', '').split()
13790 use_expand_hidden = set(
13791 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13792 alphabetical_use = '--alphabetical' in myopts
13793 root_config = trees[settings["ROOT"]]['root_config']
13799 print '%s="%s"' % (x, settings[x])
13801 use = set(settings["USE"].split())
13802 for varname in use_expand:
13803 flag_prefix = varname.lower() + "_"
13804 for f in list(use):
13805 if f.startswith(flag_prefix):
13809 print 'USE="%s"' % " ".join(use),
13810 for varname in use_expand:
13811 myval = settings.get(varname)
13813 print '%s="%s"' % (varname, myval),
13816 unset_vars.append(x)
13818 print "Unset: "+", ".join(unset_vars)
13821 if "--debug" in myopts:
13822 for x in dir(portage):
13823 module = getattr(portage, x)
13824 if "cvs_id_string" in dir(module):
13825 print "%s: %s" % (str(x), str(module.cvs_id_string))
13827 # See if we can find any packages installed matching the strings
13828 # passed on the command line
13830 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13831 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13833 mypkgs.extend(vardb.match(x))
13835 # If some packages were found...
13837 # Get our global settings (we only print stuff if it varies from
13838 # the current config)
13839 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13840 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13841 auxkeys.append('DEFINED_PHASES')
13843 pkgsettings = portage.config(clone=settings)
13845 for myvar in mydesiredvars:
13846 global_vals[myvar] = set(settings.get(myvar, "").split())
13848 # Loop through each package
13849 # Only print settings if they differ from global settings
13850 header_title = "Package Settings"
13851 print header_width * "="
13852 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13853 print header_width * "="
13854 from portage.output import EOutput
13857 # Get all package specific variables
13858 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13859 pkg = Package(built=True, cpv=cpv,
13860 installed=True, metadata=izip(Package.metadata_keys,
13861 (metadata.get(x, '') for x in Package.metadata_keys)),
13862 root_config=root_config, type_name='installed')
13865 valuesmap[k] = set(metadata[k].split())
13868 for myvar in mydesiredvars:
13869 # If the package variable doesn't match the
13870 # current global variable, something has changed
13871 # so set diff_found so we know to print
13872 if valuesmap[myvar] != global_vals[myvar]:
13873 diff_values[myvar] = valuesmap[myvar]
13875 print "\n%s was built with the following:" % \
13876 colorize("INFORM", str(pkg.cpv))
13878 pkgsettings.setcpv(pkg)
13879 forced_flags = set(chain(pkgsettings.useforce,
13880 pkgsettings.usemask))
13881 use = set(pkg.use.enabled)
13882 use.discard(pkgsettings.get('ARCH'))
13883 use_expand_flags = set()
13886 for varname in use_expand:
13887 flag_prefix = varname.lower() + "_"
13889 if f.startswith(flag_prefix):
13890 use_expand_flags.add(f)
13891 use_enabled.setdefault(
13892 varname.upper(), []).append(f[len(flag_prefix):])
13894 for f in pkg.iuse.all:
13895 if f.startswith(flag_prefix):
13896 use_expand_flags.add(f)
13898 use_disabled.setdefault(
13899 varname.upper(), []).append(f[len(flag_prefix):])
13901 var_order = set(use_enabled)
13902 var_order.update(use_disabled)
13903 var_order = sorted(var_order)
13904 var_order.insert(0, 'USE')
13905 use.difference_update(use_expand_flags)
13906 use_enabled['USE'] = list(use)
13907 use_disabled['USE'] = []
13909 for f in pkg.iuse.all:
13910 if f not in use and \
13911 f not in use_expand_flags:
13912 use_disabled['USE'].append(f)
13914 for varname in var_order:
13915 if varname in use_expand_hidden:
13918 for f in use_enabled.get(varname, []):
13919 flags.append(UseFlagDisplay(f, True, f in forced_flags))
13920 for f in use_disabled.get(varname, []):
13921 flags.append(UseFlagDisplay(f, False, f in forced_flags))
13922 if alphabetical_use:
13923 flags.sort(key=UseFlagDisplay.sort_combined)
13925 flags.sort(key=UseFlagDisplay.sort_separated)
13926 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13929 # If a difference was found, print the info for
13932 # Print package info
13933 for myvar in mydesiredvars:
13934 if myvar in diff_values:
13935 mylist = list(diff_values[myvar])
13937 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13940 if metadata['DEFINED_PHASES']:
13941 if 'info' not in metadata['DEFINED_PHASES'].split():
13944 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13945 ebuildpath = vardb.findname(pkg.cpv)
13946 if not ebuildpath or not os.path.exists(ebuildpath):
13947 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13949 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13950 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13951 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13954 def action_search(root_config, myopts, myfiles, spinner):
13956 print "emerge: no search terms provided."
13958 searchinstance = search(root_config,
13959 spinner, "--searchdesc" in myopts,
13960 "--quiet" not in myopts, "--usepkg" in myopts,
13961 "--usepkgonly" in myopts)
13962 for mysearch in myfiles:
13964 searchinstance.execute(mysearch)
13965 except re.error, comment:
13966 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13968 searchinstance.output()
13970 def action_uninstall(settings, trees, ldpath_mtimes,
13971 opts, action, files, spinner):
13973 # For backward compat, some actions do not require leading '='.
13974 ignore_missing_eq = action in ('clean', 'unmerge')
13975 root = settings['ROOT']
13976 vardb = trees[root]['vartree'].dbapi
13980 # Ensure atoms are valid before calling unmerge().
13981 # For backward compat, leading '=' is not required.
13983 if is_valid_package_atom(x) or \
13984 (ignore_missing_eq and is_valid_package_atom('=' + x)):
13987 valid_atoms.append(
13988 portage.dep_expand(x, mydb=vardb, settings=settings))
13989 except portage.exception.AmbiguousPackageName, e:
13990 msg = "The short ebuild name \"" + x + \
13991 "\" is ambiguous. Please specify " + \
13992 "one of the following " + \
13993 "fully-qualified ebuild names instead:"
13994 for line in textwrap.wrap(msg, 70):
13995 writemsg_level("!!! %s\n" % (line,),
13996 level=logging.ERROR, noiselevel=-1)
13998 writemsg_level(" %s\n" % colorize("INFORM", i),
13999 level=logging.ERROR, noiselevel=-1)
14000 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14003 elif x.startswith(os.sep):
14004 if not x.startswith(root):
14005 writemsg_level(("!!! '%s' does not start with" + \
14006 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
14008 # Queue these up since it's most efficient to handle
14009 # multiple files in a single iter_owners() call.
14010 lookup_owners.append(x)
14014 msg.append("'%s' is not a valid package atom." % (x,))
14015 msg.append("Please check ebuild(5) for full details.")
14016 writemsg_level("".join("!!! %s\n" % line for line in msg),
14017 level=logging.ERROR, noiselevel=-1)
14021 relative_paths = []
14022 search_for_multiple = False
14023 if len(lookup_owners) > 1:
14024 search_for_multiple = True
14026 for x in lookup_owners:
14027 if not search_for_multiple and os.path.isdir(x):
14028 search_for_multiple = True
14029 relative_paths.append(x[len(root):])
14032 for pkg, relative_path in \
14033 vardb._owners.iter_owners(relative_paths):
14034 owners.add(pkg.mycpv)
14035 if not search_for_multiple:
14040 slot = vardb.aux_get(cpv, ['SLOT'])[0]
14042 # portage now masks packages with missing slot, but it's
14043 # possible that one was installed by an older version
14044 atom = portage.cpv_getkey(cpv)
14046 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
14047 valid_atoms.append(portage.dep.Atom(atom))
14049 writemsg_level(("!!! '%s' is not claimed " + \
14050 "by any package.\n") % lookup_owners[0],
14051 level=logging.WARNING, noiselevel=-1)
14053 if files and not valid_atoms:
14056 if action in ('clean', 'unmerge') or \
14057 (action == 'prune' and "--nodeps" in opts):
14058 # When given a list of atoms, unmerge them in the order given.
14059 ordered = action == 'unmerge'
14060 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
14061 valid_atoms, ldpath_mtimes, ordered=ordered)
14063 elif action == 'deselect':
14064 rval = action_deselect(settings, trees, opts, valid_atoms)
14066 rval = action_depclean(settings, trees, ldpath_mtimes,
14067 opts, action, valid_atoms, spinner)
14071 def action_deselect(settings, trees, opts, atoms):
14072 root_config = trees[settings['ROOT']]['root_config']
14073 world_set = root_config.sets['world']
14074 if not hasattr(world_set, 'update'):
14075 writemsg_level("World set does not appear to be mutable.\n",
14076 level=logging.ERROR, noiselevel=-1)
14079 vardb = root_config.trees['vartree'].dbapi
14080 expanded_atoms = set(atoms)
14081 from portage.dep import Atom
14083 for cpv in vardb.match(atom):
14084 slot, = vardb.aux_get(cpv, ['SLOT'])
14087 expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
14089 pretend = '--pretend' in opts
14091 if not pretend and hasattr(world_set, 'lock'):
14095 discard_atoms = set()
14097 for atom in world_set:
14098 if not isinstance(atom, Atom):
14101 for arg_atom in expanded_atoms:
14102 if arg_atom.intersects(atom) and \
14103 not (arg_atom.slot and not atom.slot):
14104 discard_atoms.add(atom)
14107 for atom in sorted(discard_atoms):
14108 print ">>> Removing %s from \"world\" favorites file..." % \
14109 colorize("INFORM", str(atom))
14111 if '--ask' in opts:
14112 prompt = "Would you like to remove these " + \
14113 "packages from your world favorites?"
14114 if userquery(prompt) == 'No':
14117 remaining = set(world_set)
14118 remaining.difference_update(discard_atoms)
14120 world_set.replace(remaining)
14122 print ">>> No matching atoms found in \"world\" favorites file..."
14128 def action_depclean(settings, trees, ldpath_mtimes,
14129 myopts, action, myfiles, spinner):
14130 # Kill packages that aren't explicitly merged or are required as a
14131 # dependency of another package. World file is explicit.
14133 # Global depclean or prune operations are not very safe when there are
14134 # missing dependencies since it's unknown how badly incomplete
14135 # the dependency graph is, and we might accidentally remove packages
14136 # that should have been pulled into the graph. On the other hand, it's
14137 # relatively safe to ignore missing deps when only asked to remove
14138 # specific packages.
14139 allow_missing_deps = len(myfiles) > 0
14142 msg.append("Always study the list of packages to be cleaned for any obvious\n")
14143 msg.append("mistakes. Packages that are part of the world set will always\n")
14144 msg.append("be kept. They can be manually added to this set with\n")
14145 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
14146 msg.append("package.provided (see portage(5)) will be removed by\n")
14147 msg.append("depclean, even if they are part of the world set.\n")
14149 msg.append("As a safety measure, depclean will not remove any packages\n")
14150 msg.append("unless *all* required dependencies have been resolved. As a\n")
14151 msg.append("consequence, it is often necessary to run %s\n" % \
14152 good("`emerge --update"))
14153 msg.append(good("--newuse --deep @system @world`") + \
14154 " prior to depclean.\n")
14156 if action == "depclean" and "--quiet" not in myopts and not myfiles:
14157 portage.writemsg_stdout("\n")
14159 portage.writemsg_stdout(colorize("WARN", " * ") + x)
14161 xterm_titles = "notitles" not in settings.features
14162 myroot = settings["ROOT"]
14163 root_config = trees[myroot]["root_config"]
14164 getSetAtoms = root_config.setconfig.getSetAtoms
14165 vardb = trees[myroot]["vartree"].dbapi
14166 deselect = myopts.get('--deselect') != 'n'
14168 required_set_names = ("system", "world")
14172 for s in required_set_names:
14173 required_sets[s] = InternalPackageSet(
14174 initial_atoms=getSetAtoms(s))
14177 # When removing packages, use a temporary version of world
14178 # which excludes packages that are intended to be eligible for
14180 world_temp_set = required_sets["world"]
14181 system_set = required_sets["system"]
14183 if not system_set or not world_temp_set:
14186 writemsg_level("!!! You have no system list.\n",
14187 level=logging.ERROR, noiselevel=-1)
14189 if not world_temp_set:
14190 writemsg_level("!!! You have no world file.\n",
14191 level=logging.WARNING, noiselevel=-1)
14193 writemsg_level("!!! Proceeding is likely to " + \
14194 "break your installation.\n",
14195 level=logging.WARNING, noiselevel=-1)
14196 if "--pretend" not in myopts:
14197 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
14199 if action == "depclean":
14200 emergelog(xterm_titles, " >>> depclean")
14203 args_set = InternalPackageSet()
14205 args_set.update(myfiles)
14206 matched_packages = False
14209 matched_packages = True
14211 if not matched_packages:
14212 writemsg_level(">>> No packages selected for removal by %s\n" % \
14216 writemsg_level("\nCalculating dependencies ")
14217 resolver_params = create_depgraph_params(myopts, "remove")
14218 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
14219 vardb = resolver.trees[myroot]["vartree"].dbapi
14221 if action == "depclean":
14226 world_temp_set.clear()
14228 # Pull in everything that's installed but not matched
14229 # by an argument atom since we don't want to clean any
14230 # package if something depends on it.
14235 if args_set.findAtomForPackage(pkg) is None:
14236 world_temp_set.add("=" + pkg.cpv)
14238 except portage.exception.InvalidDependString, e:
14239 show_invalid_depstring_notice(pkg,
14240 pkg.metadata["PROVIDE"], str(e))
14242 world_temp_set.add("=" + pkg.cpv)
14245 elif action == "prune":
14248 world_temp_set.clear()
14250 # Pull in everything that's installed since we don't
14251 # to prune a package if something depends on it.
14252 world_temp_set.update(vardb.cp_all())
14256 # Try to prune everything that's slotted.
14257 for cp in vardb.cp_all():
14258 if len(vardb.cp_list(cp)) > 1:
14261 # Remove atoms from world that match installed packages
14262 # that are also matched by argument atoms, but do not remove
14263 # them if they match the highest installed version.
14266 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
14267 if not pkgs_for_cp or pkg not in pkgs_for_cp:
14268 raise AssertionError("package expected in matches: " + \
14269 "cp = %s, cpv = %s matches = %s" % \
14270 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14272 highest_version = pkgs_for_cp[-1]
14273 if pkg == highest_version:
14274 # pkg is the highest version
14275 world_temp_set.add("=" + pkg.cpv)
14278 if len(pkgs_for_cp) <= 1:
14279 raise AssertionError("more packages expected: " + \
14280 "cp = %s, cpv = %s matches = %s" % \
14281 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14284 if args_set.findAtomForPackage(pkg) is None:
14285 world_temp_set.add("=" + pkg.cpv)
14287 except portage.exception.InvalidDependString, e:
14288 show_invalid_depstring_notice(pkg,
14289 pkg.metadata["PROVIDE"], str(e))
14291 world_temp_set.add("=" + pkg.cpv)
14295 for s, package_set in required_sets.iteritems():
14296 set_atom = SETPREFIX + s
14297 set_arg = SetArg(arg=set_atom, set=package_set,
14298 root_config=resolver.roots[myroot])
14299 set_args[s] = set_arg
14300 for atom in set_arg.set:
14301 resolver._dep_stack.append(
14302 Dependency(atom=atom, root=myroot, parent=set_arg))
14303 resolver.digraph.add(set_arg, None)
14305 success = resolver._complete_graph()
14306 writemsg_level("\b\b... done!\n")
14308 resolver.display_problems()
14313 def unresolved_deps():
14315 unresolvable = set()
14316 for dep in resolver._initially_unsatisfied_deps:
14317 if isinstance(dep.parent, Package) and \
14318 (dep.priority > UnmergeDepPriority.SOFT):
14319 unresolvable.add((dep.atom, dep.parent.cpv))
14321 if not unresolvable:
14324 if unresolvable and not allow_missing_deps:
14325 prefix = bad(" * ")
14327 msg.append("Dependencies could not be completely resolved due to")
14328 msg.append("the following required packages not being installed:")
14330 for atom, parent in unresolvable:
14331 msg.append(" %s pulled in by:" % (atom,))
14332 msg.append(" %s" % (parent,))
14334 msg.append("Have you forgotten to run " + \
14335 good("`emerge --update --newuse --deep @system @world`") + " prior")
14336 msg.append(("to %s? It may be necessary to manually " + \
14337 "uninstall packages that no longer") % action)
14338 msg.append("exist in the portage tree since " + \
14339 "it may not be possible to satisfy their")
14340 msg.append("dependencies. Also, be aware of " + \
14341 "the --with-bdeps option that is documented")
14342 msg.append("in " + good("`man emerge`") + ".")
14343 if action == "prune":
14345 msg.append("If you would like to ignore " + \
14346 "dependencies then use %s." % good("--nodeps"))
14347 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14348 level=logging.ERROR, noiselevel=-1)
14352 if unresolved_deps():
14355 graph = resolver.digraph.copy()
14356 required_pkgs_total = 0
14358 if isinstance(node, Package):
14359 required_pkgs_total += 1
14361 def show_parents(child_node):
14362 parent_nodes = graph.parent_nodes(child_node)
14363 if not parent_nodes:
14364 # With --prune, the highest version can be pulled in without any
14365 # real parent since all installed packages are pulled in. In that
14366 # case there's nothing to show here.
14369 for node in parent_nodes:
14370 parent_strs.append(str(getattr(node, "cpv", node)))
14373 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
14374 for parent_str in parent_strs:
14375 msg.append(" %s\n" % (parent_str,))
14377 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14379 def cmp_pkg_cpv(pkg1, pkg2):
14380 """Sort Package instances by cpv."""
14381 if pkg1.cpv > pkg2.cpv:
14383 elif pkg1.cpv == pkg2.cpv:
14388 def create_cleanlist():
14389 pkgs_to_remove = []
14391 if action == "depclean":
14394 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14397 arg_atom = args_set.findAtomForPackage(pkg)
14398 except portage.exception.InvalidDependString:
14399 # this error has already been displayed by now
14403 if pkg not in graph:
14404 pkgs_to_remove.append(pkg)
14405 elif "--verbose" in myopts:
14409 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14410 if pkg not in graph:
14411 pkgs_to_remove.append(pkg)
14412 elif "--verbose" in myopts:
14415 elif action == "prune":
14416 # Prune really uses all installed instead of world. It's not
14417 # a real reverse dependency so don't display it as such.
14418 graph.remove(set_args["world"])
14420 for atom in args_set:
14421 for pkg in vardb.match_pkgs(atom):
14422 if pkg not in graph:
14423 pkgs_to_remove.append(pkg)
14424 elif "--verbose" in myopts:
14427 if not pkgs_to_remove:
14429 ">>> No packages selected for removal by %s\n" % action)
14430 if "--verbose" not in myopts:
14432 ">>> To see reverse dependencies, use %s\n" % \
14434 if action == "prune":
14436 ">>> To ignore dependencies, use %s\n" % \
14439 return pkgs_to_remove
14441 cleanlist = create_cleanlist()
14444 clean_set = set(cleanlist)
14446 # Check if any of these package are the sole providers of libraries
14447 # with consumers that have not been selected for removal. If so, these
14448 # packages and any dependencies need to be added to the graph.
14449 real_vardb = trees[myroot]["vartree"].dbapi
14450 linkmap = real_vardb.linkmap
14451 liblist = linkmap.listLibraryObjects()
14452 consumer_cache = {}
14453 provider_cache = {}
14457 writemsg_level(">>> Checking for lib consumers...\n")
14459 for pkg in cleanlist:
14460 pkg_dblink = real_vardb._dblink(pkg.cpv)
14461 provided_libs = set()
14463 for lib in liblist:
14464 if pkg_dblink.isowner(lib, myroot):
14465 provided_libs.add(lib)
14467 if not provided_libs:
14471 for lib in provided_libs:
14472 lib_consumers = consumer_cache.get(lib)
14473 if lib_consumers is None:
14474 lib_consumers = linkmap.findConsumers(lib)
14475 consumer_cache[lib] = lib_consumers
14477 consumers[lib] = lib_consumers
14482 for lib, lib_consumers in consumers.items():
14483 for consumer_file in list(lib_consumers):
14484 if pkg_dblink.isowner(consumer_file, myroot):
14485 lib_consumers.remove(consumer_file)
14486 if not lib_consumers:
14492 for lib, lib_consumers in consumers.iteritems():
14494 soname = soname_cache.get(lib)
14496 soname = linkmap.getSoname(lib)
14497 soname_cache[lib] = soname
14499 consumer_providers = []
14500 for lib_consumer in lib_consumers:
14501 providers = provider_cache.get(lib)
14502 if providers is None:
14503 providers = linkmap.findProviders(lib_consumer)
14504 provider_cache[lib_consumer] = providers
14505 if soname not in providers:
14506 # Why does this happen?
14508 consumer_providers.append(
14509 (lib_consumer, providers[soname]))
14511 consumers[lib] = consumer_providers
14513 consumer_map[pkg] = consumers
14517 search_files = set()
14518 for consumers in consumer_map.itervalues():
14519 for lib, consumer_providers in consumers.iteritems():
14520 for lib_consumer, providers in consumer_providers:
14521 search_files.add(lib_consumer)
14522 search_files.update(providers)
14524 writemsg_level(">>> Assigning files to packages...\n")
14525 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14527 for pkg, consumers in consumer_map.items():
14528 for lib, consumer_providers in consumers.items():
14529 lib_consumers = set()
14531 for lib_consumer, providers in consumer_providers:
14532 owner_set = file_owners.get(lib_consumer)
14533 provider_dblinks = set()
14534 provider_pkgs = set()
14536 if len(providers) > 1:
14537 for provider in providers:
14538 provider_set = file_owners.get(provider)
14539 if provider_set is not None:
14540 provider_dblinks.update(provider_set)
14542 if len(provider_dblinks) > 1:
14543 for provider_dblink in provider_dblinks:
14544 pkg_key = ("installed", myroot,
14545 provider_dblink.mycpv, "nomerge")
14546 if pkg_key not in clean_set:
14547 provider_pkgs.add(vardb.get(pkg_key))
14552 if owner_set is not None:
14553 lib_consumers.update(owner_set)
14555 for consumer_dblink in list(lib_consumers):
14556 if ("installed", myroot, consumer_dblink.mycpv,
14557 "nomerge") in clean_set:
14558 lib_consumers.remove(consumer_dblink)
14562 consumers[lib] = lib_consumers
14566 del consumer_map[pkg]
14569 # TODO: Implement a package set for rebuilding consumer packages.
14571 msg = "In order to avoid breakage of link level " + \
14572 "dependencies, one or more packages will not be removed. " + \
14573 "This can be solved by rebuilding " + \
14574 "the packages that pulled them in."
14576 prefix = bad(" * ")
14577 from textwrap import wrap
14578 writemsg_level("".join(prefix + "%s\n" % line for \
14579 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14582 for pkg, consumers in consumer_map.iteritems():
14583 unique_consumers = set(chain(*consumers.values()))
14584 unique_consumers = sorted(consumer.mycpv \
14585 for consumer in unique_consumers)
14587 msg.append(" %s pulled in by:" % (pkg.cpv,))
14588 for consumer in unique_consumers:
14589 msg.append(" %s" % (consumer,))
14591 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14592 level=logging.WARNING, noiselevel=-1)
14594 # Add lib providers to the graph as children of lib consumers,
14595 # and also add any dependencies pulled in by the provider.
14596 writemsg_level(">>> Adding lib providers to graph...\n")
14598 for pkg, consumers in consumer_map.iteritems():
14599 for consumer_dblink in set(chain(*consumers.values())):
14600 consumer_pkg = vardb.get(("installed", myroot,
14601 consumer_dblink.mycpv, "nomerge"))
14602 if not resolver._add_pkg(pkg,
14603 Dependency(parent=consumer_pkg,
14604 priority=UnmergeDepPriority(runtime=True),
14606 resolver.display_problems()
14609 writemsg_level("\nCalculating dependencies ")
14610 success = resolver._complete_graph()
14611 writemsg_level("\b\b... done!\n")
14612 resolver.display_problems()
14615 if unresolved_deps():
14618 graph = resolver.digraph.copy()
14619 required_pkgs_total = 0
14621 if isinstance(node, Package):
14622 required_pkgs_total += 1
14623 cleanlist = create_cleanlist()
14626 clean_set = set(cleanlist)
14628 # Use a topological sort to create an unmerge order such that
14629 # each package is unmerged before it's dependencies. This is
14630 # necessary to avoid breaking things that may need to run
14631 # during pkg_prerm or pkg_postrm phases.
14633 # Create a new graph to account for dependencies between the
14634 # packages being unmerged.
14638 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14639 runtime = UnmergeDepPriority(runtime=True)
14640 runtime_post = UnmergeDepPriority(runtime_post=True)
14641 buildtime = UnmergeDepPriority(buildtime=True)
14643 "RDEPEND": runtime,
14644 "PDEPEND": runtime_post,
14645 "DEPEND": buildtime,
14648 for node in clean_set:
14649 graph.add(node, None)
14651 node_use = node.metadata["USE"].split()
14652 for dep_type in dep_keys:
14653 depstr = node.metadata[dep_type]
14657 portage.dep._dep_check_strict = False
14658 success, atoms = portage.dep_check(depstr, None, settings,
14659 myuse=node_use, trees=resolver._graph_trees,
14662 portage.dep._dep_check_strict = True
14664 # Ignore invalid deps of packages that will
14665 # be uninstalled anyway.
14668 priority = priority_map[dep_type]
14670 if not isinstance(atom, portage.dep.Atom):
14671 # Ignore invalid atoms returned from dep_check().
14675 matches = vardb.match_pkgs(atom)
14678 for child_node in matches:
14679 if child_node in clean_set:
14680 graph.add(child_node, node, priority=priority)
14683 if len(graph.order) == len(graph.root_nodes()):
14684 # If there are no dependencies between packages
14685 # let unmerge() group them by cat/pn.
14687 cleanlist = [pkg.cpv for pkg in graph.order]
14689 # Order nodes from lowest to highest overall reference count for
14690 # optimal root node selection.
14691 node_refcounts = {}
14692 for node in graph.order:
14693 node_refcounts[node] = len(graph.parent_nodes(node))
14694 def cmp_reference_count(node1, node2):
14695 return node_refcounts[node1] - node_refcounts[node2]
14696 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14698 ignore_priority_range = [None]
14699 ignore_priority_range.extend(
14700 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14701 while not graph.empty():
14702 for ignore_priority in ignore_priority_range:
14703 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14707 raise AssertionError("no root nodes")
14708 if ignore_priority is not None:
14709 # Some deps have been dropped due to circular dependencies,
14710 # so only pop one node in order do minimize the number that
14715 cleanlist.append(node.cpv)
14717 unmerge(root_config, myopts, "unmerge", cleanlist,
14718 ldpath_mtimes, ordered=ordered)
14720 if action == "prune":
14723 if not cleanlist and "--quiet" in myopts:
14726 print "Packages installed: "+str(len(vardb.cpv_all()))
14727 print "Packages in world: " + \
14728 str(len(root_config.sets["world"].getAtoms()))
14729 print "Packages in system: " + \
14730 str(len(root_config.sets["system"].getAtoms()))
14731 print "Required packages: "+str(required_pkgs_total)
14732 if "--pretend" in myopts:
14733 print "Number to remove: "+str(len(cleanlist))
14735 print "Number removed: "+str(len(cleanlist))
14737 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14739 Construct a depgraph for the given resume list. This will raise
14740 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14742 @returns: (success, depgraph, dropped_tasks)
14745 skip_unsatisfied = True
14746 mergelist = mtimedb["resume"]["mergelist"]
14747 dropped_tasks = set()
14749 mydepgraph = depgraph(settings, trees,
14750 myopts, myparams, spinner)
14752 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14753 skip_masked=skip_masked)
14754 except depgraph.UnsatisfiedResumeDep, e:
14755 if not skip_unsatisfied:
14758 graph = mydepgraph.digraph
14759 unsatisfied_parents = dict((dep.parent, dep.parent) \
14760 for dep in e.value)
14761 traversed_nodes = set()
14762 unsatisfied_stack = list(unsatisfied_parents)
14763 while unsatisfied_stack:
14764 pkg = unsatisfied_stack.pop()
14765 if pkg in traversed_nodes:
14767 traversed_nodes.add(pkg)
14769 # If this package was pulled in by a parent
14770 # package scheduled for merge, removing this
14771 # package may cause the the parent package's
14772 # dependency to become unsatisfied.
14773 for parent_node in graph.parent_nodes(pkg):
14774 if not isinstance(parent_node, Package) \
14775 or parent_node.operation not in ("merge", "nomerge"):
14778 graph.child_nodes(parent_node,
14779 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14780 if pkg in unsatisfied:
14781 unsatisfied_parents[parent_node] = parent_node
14782 unsatisfied_stack.append(parent_node)
14784 pruned_mergelist = []
14785 for x in mergelist:
14786 if isinstance(x, list) and \
14787 tuple(x) not in unsatisfied_parents:
14788 pruned_mergelist.append(x)
14790 # If the mergelist doesn't shrink then this loop is infinite.
14791 if len(pruned_mergelist) == len(mergelist):
14792 # This happens if a package can't be dropped because
14793 # it's already installed, but it has unsatisfied PDEPEND.
14795 mergelist[:] = pruned_mergelist
14797 # Exclude installed packages that have been removed from the graph due
14798 # to failure to build/install runtime dependencies after the dependent
14799 # package has already been installed.
14800 dropped_tasks.update(pkg for pkg in \
14801 unsatisfied_parents if pkg.operation != "nomerge")
14802 mydepgraph.break_refs(unsatisfied_parents)
14804 del e, graph, traversed_nodes, \
14805 unsatisfied_parents, unsatisfied_stack
14809 return (success, mydepgraph, dropped_tasks)
14811 def action_build(settings, trees, mtimedb,
14812 myopts, myaction, myfiles, spinner):
14814 # validate the state of the resume data
14815 # so that we can make assumptions later.
14816 for k in ("resume", "resume_backup"):
14817 if k not in mtimedb:
14819 resume_data = mtimedb[k]
14820 if not isinstance(resume_data, dict):
14823 mergelist = resume_data.get("mergelist")
14824 if not isinstance(mergelist, list):
14827 for x in mergelist:
14828 if not (isinstance(x, list) and len(x) == 4):
14830 pkg_type, pkg_root, pkg_key, pkg_action = x
14831 if pkg_root not in trees:
14832 # Current $ROOT setting differs,
14833 # so the list must be stale.
14839 resume_opts = resume_data.get("myopts")
14840 if not isinstance(resume_opts, (dict, list)):
14843 favorites = resume_data.get("favorites")
14844 if not isinstance(favorites, list):
14849 if "--resume" in myopts and \
14850 ("resume" in mtimedb or
14851 "resume_backup" in mtimedb):
14853 if "resume" not in mtimedb:
14854 mtimedb["resume"] = mtimedb["resume_backup"]
14855 del mtimedb["resume_backup"]
14857 # "myopts" is a list for backward compatibility.
14858 resume_opts = mtimedb["resume"].get("myopts", [])
14859 if isinstance(resume_opts, list):
14860 resume_opts = dict((k,True) for k in resume_opts)
14861 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14862 resume_opts.pop(opt, None)
14864 # Current options always override resume_opts.
14865 resume_opts.update(myopts)
14867 myopts.update(resume_opts)
14869 if "--debug" in myopts:
14870 writemsg_level("myopts %s\n" % (myopts,))
14872 # Adjust config according to options of the command being resumed.
14873 for myroot in trees:
14874 mysettings = trees[myroot]["vartree"].settings
14875 mysettings.unlock()
14876 adjust_config(myopts, mysettings)
14878 del myroot, mysettings
14880 ldpath_mtimes = mtimedb["ldpath"]
14883 buildpkgonly = "--buildpkgonly" in myopts
14884 pretend = "--pretend" in myopts
14885 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14886 ask = "--ask" in myopts
14887 nodeps = "--nodeps" in myopts
14888 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14889 tree = "--tree" in myopts
14890 if nodeps and tree:
14892 del myopts["--tree"]
14893 portage.writemsg(colorize("WARN", " * ") + \
14894 "--tree is broken with --nodeps. Disabling...\n")
14895 debug = "--debug" in myopts
14896 verbose = "--verbose" in myopts
14897 quiet = "--quiet" in myopts
14898 if pretend or fetchonly:
14899 # make the mtimedb readonly
14900 mtimedb.filename = None
14901 if '--digest' in myopts or 'digest' in settings.features:
14902 if '--digest' in myopts:
14903 msg = "The --digest option"
14905 msg = "The FEATURES=digest setting"
14907 msg += " can prevent corruption from being" + \
14908 " noticed. The `repoman manifest` command is the preferred" + \
14909 " way to generate manifests and it is capable of doing an" + \
14910 " entire repository or category at once."
14911 prefix = bad(" * ")
14912 writemsg(prefix + "\n")
14913 from textwrap import wrap
14914 for line in wrap(msg, 72):
14915 writemsg("%s%s\n" % (prefix, line))
14916 writemsg(prefix + "\n")
14918 if "--quiet" not in myopts and \
14919 ("--pretend" in myopts or "--ask" in myopts or \
14920 "--tree" in myopts or "--verbose" in myopts):
14922 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14924 elif "--buildpkgonly" in myopts:
14928 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14930 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14934 print darkgreen("These are the packages that would be %s, in order:") % action
14937 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14938 if not show_spinner:
14939 spinner.update = spinner.update_quiet
14942 favorites = mtimedb["resume"].get("favorites")
14943 if not isinstance(favorites, list):
14947 print "Calculating dependencies ",
14948 myparams = create_depgraph_params(myopts, myaction)
14950 resume_data = mtimedb["resume"]
14951 mergelist = resume_data["mergelist"]
14952 if mergelist and "--skipfirst" in myopts:
14953 for i, task in enumerate(mergelist):
14954 if isinstance(task, list) and \
14955 task and task[-1] == "merge":
14962 success, mydepgraph, dropped_tasks = resume_depgraph(
14963 settings, trees, mtimedb, myopts, myparams, spinner)
14964 except (portage.exception.PackageNotFound,
14965 depgraph.UnsatisfiedResumeDep), e:
14966 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14967 mydepgraph = e.depgraph
14970 from textwrap import wrap
14971 from portage.output import EOutput
14974 resume_data = mtimedb["resume"]
14975 mergelist = resume_data.get("mergelist")
14976 if not isinstance(mergelist, list):
14978 if mergelist and debug or (verbose and not quiet):
14979 out.eerror("Invalid resume list:")
14982 for task in mergelist:
14983 if isinstance(task, list):
14984 out.eerror(indent + str(tuple(task)))
14987 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14988 out.eerror("One or more packages are either masked or " + \
14989 "have missing dependencies:")
14992 for dep in e.value:
14993 if dep.atom is None:
14994 out.eerror(indent + "Masked package:")
14995 out.eerror(2 * indent + str(dep.parent))
14998 out.eerror(indent + str(dep.atom) + " pulled in by:")
14999 out.eerror(2 * indent + str(dep.parent))
15001 msg = "The resume list contains packages " + \
15002 "that are either masked or have " + \
15003 "unsatisfied dependencies. " + \
15004 "Please restart/continue " + \
15005 "the operation manually, or use --skipfirst " + \
15006 "to skip the first package in the list and " + \
15007 "any other packages that may be " + \
15008 "masked or have missing dependencies."
15009 for line in wrap(msg, 72):
15011 elif isinstance(e, portage.exception.PackageNotFound):
15012 out.eerror("An expected package is " + \
15013 "not available: %s" % str(e))
15015 msg = "The resume list contains one or more " + \
15016 "packages that are no longer " + \
15017 "available. Please restart/continue " + \
15018 "the operation manually."
15019 for line in wrap(msg, 72):
15023 print "\b\b... done!"
15027 portage.writemsg("!!! One or more packages have been " + \
15028 "dropped due to\n" + \
15029 "!!! masking or unsatisfied dependencies:\n\n",
15031 for task in dropped_tasks:
15032 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
15033 portage.writemsg("\n", noiselevel=-1)
15036 if mydepgraph is not None:
15037 mydepgraph.display_problems()
15038 if not (ask or pretend):
15039 # delete the current list and also the backup
15040 # since it's probably stale too.
15041 for k in ("resume", "resume_backup"):
15042 mtimedb.pop(k, None)
15047 if ("--resume" in myopts):
15048 print darkgreen("emerge: It seems we have nothing to resume...")
15051 myparams = create_depgraph_params(myopts, myaction)
15052 if "--quiet" not in myopts and "--nodeps" not in myopts:
15053 print "Calculating dependencies ",
15055 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
15057 retval, favorites = mydepgraph.select_files(myfiles)
15058 except portage.exception.PackageNotFound, e:
15059 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
15061 except portage.exception.PackageSetNotFound, e:
15062 root_config = trees[settings["ROOT"]]["root_config"]
15063 display_missing_pkg_set(root_config, e.value)
15066 print "\b\b... done!"
15068 mydepgraph.display_problems()
15071 if "--pretend" not in myopts and \
15072 ("--ask" in myopts or "--tree" in myopts or \
15073 "--verbose" in myopts) and \
15074 not ("--quiet" in myopts and "--ask" not in myopts):
15075 if "--resume" in myopts:
15076 mymergelist = mydepgraph.altlist()
15077 if len(mymergelist) == 0:
15078 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15080 favorites = mtimedb["resume"]["favorites"]
15081 retval = mydepgraph.display(
15082 mydepgraph.altlist(reversed=tree),
15083 favorites=favorites)
15084 mydepgraph.display_problems()
15085 if retval != os.EX_OK:
15087 prompt="Would you like to resume merging these packages?"
15089 retval = mydepgraph.display(
15090 mydepgraph.altlist(reversed=("--tree" in myopts)),
15091 favorites=favorites)
15092 mydepgraph.display_problems()
15093 if retval != os.EX_OK:
15096 for x in mydepgraph.altlist():
15097 if isinstance(x, Package) and x.operation == "merge":
15101 sets = trees[settings["ROOT"]]["root_config"].sets
15102 world_candidates = None
15103 if "--noreplace" in myopts and \
15104 not oneshot and favorites:
15105 # Sets that are not world candidates are filtered
15106 # out here since the favorites list needs to be
15107 # complete for depgraph.loadResumeCommand() to
15108 # operate correctly.
15109 world_candidates = [x for x in favorites \
15110 if not (x.startswith(SETPREFIX) and \
15111 not sets[x[1:]].world_candidate)]
15112 if "--noreplace" in myopts and \
15113 not oneshot and world_candidates:
15115 for x in world_candidates:
15116 print " %s %s" % (good("*"), x)
15117 prompt="Would you like to add these packages to your world favorites?"
15118 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
15119 prompt="Nothing to merge; would you like to auto-clean packages?"
15122 print "Nothing to merge; quitting."
15125 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
15126 prompt="Would you like to fetch the source files for these packages?"
15128 prompt="Would you like to merge these packages?"
15130 if "--ask" in myopts and userquery(prompt) == "No":
15135 # Don't ask again (e.g. when auto-cleaning packages after merge)
15136 myopts.pop("--ask", None)
15138 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
15139 if ("--resume" in myopts):
15140 mymergelist = mydepgraph.altlist()
15141 if len(mymergelist) == 0:
15142 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15144 favorites = mtimedb["resume"]["favorites"]
15145 retval = mydepgraph.display(
15146 mydepgraph.altlist(reversed=tree),
15147 favorites=favorites)
15148 mydepgraph.display_problems()
15149 if retval != os.EX_OK:
15152 retval = mydepgraph.display(
15153 mydepgraph.altlist(reversed=("--tree" in myopts)),
15154 favorites=favorites)
15155 mydepgraph.display_problems()
15156 if retval != os.EX_OK:
15158 if "--buildpkgonly" in myopts:
15159 graph_copy = mydepgraph.digraph.clone()
15160 removed_nodes = set()
15161 for node in graph_copy:
15162 if not isinstance(node, Package) or \
15163 node.operation == "nomerge":
15164 removed_nodes.add(node)
15165 graph_copy.difference_update(removed_nodes)
15166 if not graph_copy.hasallzeros(ignore_priority = \
15167 DepPrioritySatisfiedRange.ignore_medium):
15168 print "\n!!! --buildpkgonly requires all dependencies to be merged."
15169 print "!!! You have to merge the dependencies before you can build this package.\n"
15172 if "--buildpkgonly" in myopts:
15173 graph_copy = mydepgraph.digraph.clone()
15174 removed_nodes = set()
15175 for node in graph_copy:
15176 if not isinstance(node, Package) or \
15177 node.operation == "nomerge":
15178 removed_nodes.add(node)
15179 graph_copy.difference_update(removed_nodes)
15180 if not graph_copy.hasallzeros(ignore_priority = \
15181 DepPrioritySatisfiedRange.ignore_medium):
15182 print "\n!!! --buildpkgonly requires all dependencies to be merged."
15183 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
15186 if ("--resume" in myopts):
15187 favorites=mtimedb["resume"]["favorites"]
15188 mymergelist = mydepgraph.altlist()
15189 mydepgraph.break_refs(mymergelist)
15190 mergetask = Scheduler(settings, trees, mtimedb, myopts,
15191 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
15192 del mydepgraph, mymergelist
15193 clear_caches(trees)
15195 retval = mergetask.merge()
15196 merge_count = mergetask.curval
15198 if "resume" in mtimedb and \
15199 "mergelist" in mtimedb["resume"] and \
15200 len(mtimedb["resume"]["mergelist"]) > 1:
15201 mtimedb["resume_backup"] = mtimedb["resume"]
15202 del mtimedb["resume"]
15204 mtimedb["resume"]={}
15205 # Stored as a dict starting with portage-2.1.6_rc1, and supported
15206 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
15207 # a list type for options.
15208 mtimedb["resume"]["myopts"] = myopts.copy()
15210 # Convert Atom instances to plain str.
15211 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
15213 pkglist = mydepgraph.altlist()
15214 mydepgraph.saveNomergeFavorites()
15215 mydepgraph.break_refs(pkglist)
15216 mergetask = Scheduler(settings, trees, mtimedb, myopts,
15217 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
15218 del mydepgraph, pkglist
15219 clear_caches(trees)
15221 retval = mergetask.merge()
15222 merge_count = mergetask.curval
15224 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
15225 if "yes" == settings.get("AUTOCLEAN"):
15226 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
15227 unmerge(trees[settings["ROOT"]]["root_config"],
15228 myopts, "clean", [],
15229 ldpath_mtimes, autoclean=1)
15231 portage.writemsg_stdout(colorize("WARN", "WARNING:")
15232 + " AUTOCLEAN is disabled. This can cause serious"
15233 + " problems due to overlapping packages.\n")
15234 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
15238 def multiple_actions(action1, action2):
15239 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
15240 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
15243 def insert_optional_args(args):
15245 Parse optional arguments and insert a value if one has
15246 not been provided. This is done before feeding the args
15247 to the optparse parser since that parser does not support
15248 this feature natively.
15252 jobs_opts = ("-j", "--jobs")
15253 default_arg_opts = {
15254 '--deselect' : ('n',),
15255 '--root-deps' : ('rdeps',),
15257 arg_stack = args[:]
15258 arg_stack.reverse()
15260 arg = arg_stack.pop()
15262 default_arg_choices = default_arg_opts.get(arg)
15263 if default_arg_choices is not None:
15264 new_args.append(arg)
15265 if arg_stack and arg_stack[-1] in default_arg_choices:
15266 new_args.append(arg_stack.pop())
15268 # insert default argument
15269 new_args.append('True')
15272 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
15273 if not (short_job_opt or arg in jobs_opts):
15274 new_args.append(arg)
15277 # Insert an empty placeholder in order to
15278 # satisfy the requirements of optparse.
15280 new_args.append("--jobs")
15283 if short_job_opt and len(arg) > 2:
15284 if arg[:2] == "-j":
15286 job_count = int(arg[2:])
15288 saved_opts = arg[2:]
15291 saved_opts = arg[1:].replace("j", "")
15293 if job_count is None and arg_stack:
15295 job_count = int(arg_stack[-1])
15299 # Discard the job count from the stack
15300 # since we're consuming it here.
15303 if job_count is None:
15304 # unlimited number of jobs
15305 new_args.append("True")
15307 new_args.append(str(job_count))
15309 if saved_opts is not None:
15310 new_args.append("-" + saved_opts)
15314 def parse_opts(tmpcmdline, silent=False):
15319 global actions, options, shortmapping
15321 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
15322 argument_options = {
15324 "help":"specify the location for portage configuration files",
15328 "help":"enable or disable color output",
15330 "choices":("y", "n")
15334 "help" : "remove atoms from the world file",
15336 "choices" : ("True", "n")
15341 "help" : "Specifies the number of packages to build " + \
15347 "--load-average": {
15349 "help" :"Specifies that no new builds should be started " + \
15350 "if there are other builds running and the load average " + \
15351 "is at least LOAD (a floating-point number).",
15357 "help":"include unnecessary build time dependencies",
15359 "choices":("y", "n")
15362 "help":"specify conditions to trigger package reinstallation",
15364 "choices":["changed-use"]
15367 "help" : "specify the target root filesystem for merging packages",
15372 "help" : "modify interpretation of depedencies",
15374 "choices" :("True", "rdeps")
15378 from optparse import OptionParser
15379 parser = OptionParser()
15380 if parser.has_option("--help"):
15381 parser.remove_option("--help")
15383 for action_opt in actions:
15384 parser.add_option("--" + action_opt, action="store_true",
15385 dest=action_opt.replace("-", "_"), default=False)
15386 for myopt in options:
15387 parser.add_option(myopt, action="store_true",
15388 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15389 for shortopt, longopt in shortmapping.iteritems():
15390 parser.add_option("-" + shortopt, action="store_true",
15391 dest=longopt.lstrip("--").replace("-", "_"), default=False)
15392 for myalias, myopt in longopt_aliases.iteritems():
15393 parser.add_option(myalias, action="store_true",
15394 dest=myopt.lstrip("--").replace("-", "_"), default=False)
15396 for myopt, kwargs in argument_options.iteritems():
15397 parser.add_option(myopt,
15398 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15400 tmpcmdline = insert_optional_args(tmpcmdline)
15402 myoptions, myargs = parser.parse_args(args=tmpcmdline)
15404 if myoptions.deselect == "True":
15405 myoptions.deselect = True
15407 if myoptions.root_deps == "True":
15408 myoptions.root_deps = True
15412 if myoptions.jobs == "True":
15416 jobs = int(myoptions.jobs)
15420 if jobs is not True and \
15424 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15425 (myoptions.jobs,), noiselevel=-1)
15427 myoptions.jobs = jobs
15429 if myoptions.load_average:
15431 load_average = float(myoptions.load_average)
15435 if load_average <= 0.0:
15436 load_average = None
15438 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15439 (myoptions.load_average,), noiselevel=-1)
15441 myoptions.load_average = load_average
15443 for myopt in options:
15444 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15446 myopts[myopt] = True
15448 for myopt in argument_options:
15449 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15453 if myoptions.searchdesc:
15454 myoptions.search = True
15456 for action_opt in actions:
15457 v = getattr(myoptions, action_opt.replace("-", "_"))
15460 multiple_actions(myaction, action_opt)
15462 myaction = action_opt
15464 if myaction is None and myoptions.deselect is True:
15465 myaction = 'deselect'
15469 return myaction, myopts, myfiles
15471 def validate_ebuild_environment(trees):
15472 for myroot in trees:
15473 settings = trees[myroot]["vartree"].settings
15474 settings.validate()
15476 def clear_caches(trees):
15477 for d in trees.itervalues():
15478 d["porttree"].dbapi.melt()
15479 d["porttree"].dbapi._aux_cache.clear()
15480 d["bintree"].dbapi._aux_cache.clear()
15481 d["bintree"].dbapi._clear_cache()
15482 d["vartree"].dbapi.linkmap._clear_cache()
15483 portage.dircache.clear()
15486 def load_emerge_config(trees=None):
15488 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15489 v = os.environ.get(envvar, None)
15490 if v and v.strip():
15492 trees = portage.create_trees(trees=trees, **kwargs)
15494 for root, root_trees in trees.iteritems():
15495 settings = root_trees["vartree"].settings
15496 setconfig = load_default_config(settings, root_trees)
15497 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15499 settings = trees["/"]["vartree"].settings
15501 for myroot in trees:
15503 settings = trees[myroot]["vartree"].settings
15506 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15507 mtimedb = portage.MtimeDB(mtimedbfile)
15509 return settings, trees, mtimedb
15511 def adjust_config(myopts, settings):
15512 """Make emerge specific adjustments to the config."""
15514 # To enhance usability, make some vars case insensitive by forcing them to
15516 for myvar in ("AUTOCLEAN", "NOCOLOR"):
15517 if myvar in settings:
15518 settings[myvar] = settings[myvar].lower()
15519 settings.backup_changes(myvar)
15522 # Kill noauto as it will break merges otherwise.
15523 if "noauto" in settings.features:
15524 settings.features.remove('noauto')
15525 settings['FEATURES'] = ' '.join(sorted(settings.features))
15526 settings.backup_changes("FEATURES")
15530 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15531 except ValueError, e:
15532 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15533 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15534 settings["CLEAN_DELAY"], noiselevel=-1)
15535 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15536 settings.backup_changes("CLEAN_DELAY")
15538 EMERGE_WARNING_DELAY = 10
15540 EMERGE_WARNING_DELAY = int(settings.get(
15541 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15542 except ValueError, e:
15543 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15544 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15545 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15546 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15547 settings.backup_changes("EMERGE_WARNING_DELAY")
15549 if "--quiet" in myopts:
15550 settings["PORTAGE_QUIET"]="1"
15551 settings.backup_changes("PORTAGE_QUIET")
15553 if "--verbose" in myopts:
15554 settings["PORTAGE_VERBOSE"] = "1"
15555 settings.backup_changes("PORTAGE_VERBOSE")
15557 # Set so that configs will be merged regardless of remembered status
15558 if ("--noconfmem" in myopts):
15559 settings["NOCONFMEM"]="1"
15560 settings.backup_changes("NOCONFMEM")
15562 # Set various debug markers... They should be merged somehow.
15565 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15566 if PORTAGE_DEBUG not in (0, 1):
15567 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15568 PORTAGE_DEBUG, noiselevel=-1)
15569 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15572 except ValueError, e:
15573 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15574 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15575 settings["PORTAGE_DEBUG"], noiselevel=-1)
15577 if "--debug" in myopts:
15579 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15580 settings.backup_changes("PORTAGE_DEBUG")
15582 if settings.get("NOCOLOR") not in ("yes","true"):
15583 portage.output.havecolor = 1
15585 """The explicit --color < y | n > option overrides the NOCOLOR environment
15586 variable and stdout auto-detection."""
15587 if "--color" in myopts:
15588 if "y" == myopts["--color"]:
15589 portage.output.havecolor = 1
15590 settings["NOCOLOR"] = "false"
15592 portage.output.havecolor = 0
15593 settings["NOCOLOR"] = "true"
15594 settings.backup_changes("NOCOLOR")
15595 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15596 portage.output.havecolor = 0
15597 settings["NOCOLOR"] = "true"
15598 settings.backup_changes("NOCOLOR")
15600 def apply_priorities(settings):
15604 def nice(settings):
15606 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15607 except (OSError, ValueError), e:
15608 out = portage.output.EOutput()
15609 out.eerror("Failed to change nice value to '%s'" % \
15610 settings["PORTAGE_NICENESS"])
15611 out.eerror("%s\n" % str(e))
15613 def ionice(settings):
15615 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15617 ionice_cmd = shlex.split(ionice_cmd)
15621 from portage.util import varexpand
15622 variables = {"PID" : str(os.getpid())}
15623 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15626 rval = portage.process.spawn(cmd, env=os.environ)
15627 except portage.exception.CommandNotFound:
15628 # The OS kernel probably doesn't support ionice,
15629 # so return silently.
15632 if rval != os.EX_OK:
15633 out = portage.output.EOutput()
15634 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15635 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15637 def display_missing_pkg_set(root_config, set_name):
15640 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15641 "The following sets exist:") % \
15642 colorize("INFORM", set_name))
15645 for s in sorted(root_config.sets):
15646 msg.append(" %s" % s)
15649 writemsg_level("".join("%s\n" % l for l in msg),
15650 level=logging.ERROR, noiselevel=-1)
15652 def expand_set_arguments(myfiles, myaction, root_config):
15654 setconfig = root_config.setconfig
15656 sets = setconfig.getSets()
15658 # In order to know exactly which atoms/sets should be added to the
15659 # world file, the depgraph performs set expansion later. It will get
15660 # confused about where the atoms came from if it's not allowed to
15661 # expand them itself.
15662 do_not_expand = (None, )
15665 if a in ("system", "world"):
15666 newargs.append(SETPREFIX+a)
15673 # separators for set arguments
15677 # WARNING: all operators must be of equal length
15679 DIFF_OPERATOR = "-@"
15680 UNION_OPERATOR = "+@"
15682 for i in range(0, len(myfiles)):
15683 if myfiles[i].startswith(SETPREFIX):
15686 x = myfiles[i][len(SETPREFIX):]
15689 start = x.find(ARG_START)
15690 end = x.find(ARG_END)
15691 if start > 0 and start < end:
15692 namepart = x[:start]
15693 argpart = x[start+1:end]
15695 # TODO: implement proper quoting
15696 args = argpart.split(",")
15700 k, v = a.split("=", 1)
15703 options[a] = "True"
15704 setconfig.update(namepart, options)
15705 newset += (x[:start-len(namepart)]+namepart)
15706 x = x[end+len(ARG_END):]
15710 myfiles[i] = SETPREFIX+newset
15712 sets = setconfig.getSets()
15714 # display errors that occured while loading the SetConfig instance
15715 for e in setconfig.errors:
15716 print colorize("BAD", "Error during set creation: %s" % e)
15718 # emerge relies on the existance of sets with names "world" and "system"
15719 required_sets = ("world", "system")
15722 for s in required_sets:
15724 missing_sets.append(s)
15726 if len(missing_sets) > 2:
15727 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15728 missing_sets_str += ', and "%s"' % missing_sets[-1]
15729 elif len(missing_sets) == 2:
15730 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15732 missing_sets_str = '"%s"' % missing_sets[-1]
15733 msg = ["emerge: incomplete set configuration, " + \
15734 "missing set(s): %s" % missing_sets_str]
15736 msg.append(" sets defined: %s" % ", ".join(sets))
15737 msg.append(" This usually means that '%s'" % \
15738 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15739 msg.append(" is missing or corrupt.")
15741 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15743 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15746 if a.startswith(SETPREFIX):
15747 # support simple set operations (intersection, difference and union)
15748 # on the commandline. Expressions are evaluated strictly left-to-right
15749 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15750 expression = a[len(SETPREFIX):]
15753 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15754 is_pos = expression.rfind(IS_OPERATOR)
15755 diff_pos = expression.rfind(DIFF_OPERATOR)
15756 union_pos = expression.rfind(UNION_OPERATOR)
15757 op_pos = max(is_pos, diff_pos, union_pos)
15758 s1 = expression[:op_pos]
15759 s2 = expression[op_pos+len(IS_OPERATOR):]
15760 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15762 display_missing_pkg_set(root_config, s2)
15764 expr_sets.insert(0, s2)
15765 expr_ops.insert(0, op)
15767 if not expression in sets:
15768 display_missing_pkg_set(root_config, expression)
15770 expr_sets.insert(0, expression)
15771 result = set(setconfig.getSetAtoms(expression))
15772 for i in range(0, len(expr_ops)):
15773 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15774 if expr_ops[i] == IS_OPERATOR:
15775 result.intersection_update(s2)
15776 elif expr_ops[i] == DIFF_OPERATOR:
15777 result.difference_update(s2)
15778 elif expr_ops[i] == UNION_OPERATOR:
15781 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15782 newargs.extend(result)
15784 s = a[len(SETPREFIX):]
15786 display_missing_pkg_set(root_config, s)
15788 setconfig.active.append(s)
15790 set_atoms = setconfig.getSetAtoms(s)
15791 except portage.exception.PackageSetNotFound, e:
15792 writemsg_level(("emerge: the given set '%s' " + \
15793 "contains a non-existent set named '%s'.\n") % \
15794 (s, e), level=logging.ERROR, noiselevel=-1)
15796 if myaction in unmerge_actions and \
15797 not sets[s].supportsOperation("unmerge"):
15798 sys.stderr.write("emerge: the given set '%s' does " % s + \
15799 "not support unmerge operations\n")
15801 elif not set_atoms:
15802 print "emerge: '%s' is an empty set" % s
15803 elif myaction not in do_not_expand:
15804 newargs.extend(set_atoms)
15806 newargs.append(SETPREFIX+s)
15807 for e in sets[s].errors:
15811 return (newargs, retval)
15813 def repo_name_check(trees):
15814 missing_repo_names = set()
15815 for root, root_trees in trees.iteritems():
15816 if "porttree" in root_trees:
15817 portdb = root_trees["porttree"].dbapi
15818 missing_repo_names.update(portdb.porttrees)
15819 repos = portdb.getRepositories()
15821 missing_repo_names.discard(portdb.getRepositoryPath(r))
15822 if portdb.porttree_root in missing_repo_names and \
15823 not os.path.exists(os.path.join(
15824 portdb.porttree_root, "profiles")):
15825 # This is normal if $PORTDIR happens to be empty,
15826 # so don't warn about it.
15827 missing_repo_names.remove(portdb.porttree_root)
15829 if missing_repo_names:
15831 msg.append("WARNING: One or more repositories " + \
15832 "have missing repo_name entries:")
15834 for p in missing_repo_names:
15835 msg.append("\t%s/profiles/repo_name" % (p,))
15837 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15838 "should be a plain text file containing a unique " + \
15839 "name for the repository on the first line.", 70))
15840 writemsg_level("".join("%s\n" % l for l in msg),
15841 level=logging.WARNING, noiselevel=-1)
15843 return bool(missing_repo_names)
15845 def repo_name_duplicate_check(trees):
15847 for root, root_trees in trees.iteritems():
15848 if 'porttree' in root_trees:
15849 portdb = root_trees['porttree'].dbapi
15850 if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
15851 for repo_name, paths in portdb._ignored_repos:
15852 k = (root, repo_name, portdb.getRepositoryPath(repo_name))
15853 ignored_repos.setdefault(k, []).extend(paths)
15857 msg.append('WARNING: One or more repositories ' + \
15858 'have been ignored due to duplicate')
15859 msg.append(' profiles/repo_name entries:')
15861 for k in sorted(ignored_repos):
15862 msg.append(' %s overrides' % (k,))
15863 for path in ignored_repos[k]:
15864 msg.append(' %s' % (path,))
15866 msg.extend(' ' + x for x in textwrap.wrap(
15867 "All profiles/repo_name entries must be unique in order " + \
15868 "to avoid having duplicates ignored. " + \
15869 "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
15870 "/etc/make.conf if you would like to disable this warning."))
15871 writemsg_level(''.join('%s\n' % l for l in msg),
15872 level=logging.WARNING, noiselevel=-1)
15874 return bool(ignored_repos)
15876 def config_protect_check(trees):
15877 for root, root_trees in trees.iteritems():
15878 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15879 msg = "!!! CONFIG_PROTECT is empty"
15881 msg += " for '%s'" % root
15882 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15884 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15886 if "--quiet" in myopts:
15887 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15888 print "!!! one of the following fully-qualified ebuild names instead:\n"
15889 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15890 print " " + colorize("INFORM", cp)
15893 s = search(root_config, spinner, "--searchdesc" in myopts,
15894 "--quiet" not in myopts, "--usepkg" in myopts,
15895 "--usepkgonly" in myopts)
15896 null_cp = portage.dep_getkey(insert_category_into_atom(
15898 cat, atom_pn = portage.catsplit(null_cp)
15899 s.searchkey = atom_pn
15900 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15903 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15904 print "!!! one of the above fully-qualified ebuild names instead.\n"
15906 def profile_check(trees, myaction, myopts):
15907 if myaction in ("info", "sync"):
15909 elif "--version" in myopts or "--help" in myopts:
15911 for root, root_trees in trees.iteritems():
15912 if root_trees["root_config"].settings.profiles:
15914 # generate some profile related warning messages
15915 validate_ebuild_environment(trees)
15916 msg = "If you have just changed your profile configuration, you " + \
15917 "should revert back to the previous configuration. Due to " + \
15918 "your current profile being invalid, allowed actions are " + \
15919 "limited to --help, --info, --sync, and --version."
15920 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15921 level=logging.ERROR, noiselevel=-1)
15926 global portage # NFC why this is necessary now - genone
15927 portage._disable_legacy_globals()
15928 # Disable color until we're sure that it should be enabled (after
15929 # EMERGE_DEFAULT_OPTS has been parsed).
15930 portage.output.havecolor = 0
15931 # This first pass is just for options that need to be known as early as
15932 # possible, such as --config-root. They will be parsed again later,
15933 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15934 # the value of --config-root).
15935 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15936 if "--debug" in myopts:
15937 os.environ["PORTAGE_DEBUG"] = "1"
15938 if "--config-root" in myopts:
15939 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15940 if "--root" in myopts:
15941 os.environ["ROOT"] = myopts["--root"]
15943 # Portage needs to ensure a sane umask for the files it creates.
15945 settings, trees, mtimedb = load_emerge_config()
15946 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15947 rval = profile_check(trees, myaction, myopts)
15948 if rval != os.EX_OK:
15951 if portage._global_updates(trees, mtimedb["updates"]):
15953 # Reload the whole config from scratch.
15954 settings, trees, mtimedb = load_emerge_config(trees=trees)
15955 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15957 xterm_titles = "notitles" not in settings.features
15960 if "--ignore-default-opts" not in myopts:
15961 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15962 tmpcmdline.extend(sys.argv[1:])
15963 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15965 if "--digest" in myopts:
15966 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15967 # Reload the whole config from scratch so that the portdbapi internal
15968 # config is updated with new FEATURES.
15969 settings, trees, mtimedb = load_emerge_config(trees=trees)
15970 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15972 for myroot in trees:
15973 mysettings = trees[myroot]["vartree"].settings
15974 mysettings.unlock()
15975 adjust_config(myopts, mysettings)
15976 if '--pretend' not in myopts and myaction in \
15977 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15978 mysettings["PORTAGE_COUNTER_HASH"] = \
15979 trees[myroot]["vartree"].dbapi._counter_hash()
15980 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15982 del myroot, mysettings
15984 apply_priorities(settings)
15986 spinner = stdout_spinner()
15987 if "candy" in settings.features:
15988 spinner.update = spinner.update_scroll
15990 if "--quiet" not in myopts:
15991 portage.deprecated_profile_check(settings=settings)
15992 repo_name_check(trees)
15993 repo_name_duplicate_check(trees)
15994 config_protect_check(trees)
15996 for mytrees in trees.itervalues():
15997 mydb = mytrees["porttree"].dbapi
15998 # Freeze the portdbapi for performance (memoize all xmatch results).
16002 if "moo" in myfiles:
16005 Larry loves Gentoo (""" + platform.system() + """)
16007 _______________________
16008 < Have you mooed today? >
16009 -----------------------
16019 ext = os.path.splitext(x)[1]
16020 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
16021 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
16024 root_config = trees[settings["ROOT"]]["root_config"]
16025 if myaction == "list-sets":
16026 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
16030 # only expand sets for actions taking package arguments
16031 oldargs = myfiles[:]
16032 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
16033 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
16034 if retval != os.EX_OK:
16037 # Need to handle empty sets specially, otherwise emerge will react
16038 # with the help message for empty argument lists
16039 if oldargs and not myfiles:
16040 print "emerge: no targets left after set expansion"
16043 if ("--tree" in myopts) and ("--columns" in myopts):
16044 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
16047 if ("--quiet" in myopts):
16048 spinner.update = spinner.update_quiet
16049 portage.util.noiselimit = -1
16051 # Always create packages if FEATURES=buildpkg
16052 # Imply --buildpkg if --buildpkgonly
16053 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
16054 if "--buildpkg" not in myopts:
16055 myopts["--buildpkg"] = True
16057 # Always try and fetch binary packages if FEATURES=getbinpkg
16058 if ("getbinpkg" in settings.features):
16059 myopts["--getbinpkg"] = True
16061 if "--buildpkgonly" in myopts:
16062 # --buildpkgonly will not merge anything, so
16063 # it cancels all binary package options.
16064 for opt in ("--getbinpkg", "--getbinpkgonly",
16065 "--usepkg", "--usepkgonly"):
16066 myopts.pop(opt, None)
16068 if "--fetch-all-uri" in myopts:
16069 myopts["--fetchonly"] = True
16071 if "--skipfirst" in myopts and "--resume" not in myopts:
16072 myopts["--resume"] = True
16074 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
16075 myopts["--usepkgonly"] = True
16077 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
16078 myopts["--getbinpkg"] = True
16080 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
16081 myopts["--usepkg"] = True
16083 # Also allow -K to apply --usepkg/-k
16084 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
16085 myopts["--usepkg"] = True
16087 # Allow -p to remove --ask
16088 if ("--pretend" in myopts) and ("--ask" in myopts):
16089 print ">>> --pretend disables --ask... removing --ask from options."
16090 del myopts["--ask"]
16092 # forbid --ask when not in a terminal
16093 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
16094 if ("--ask" in myopts) and (not sys.stdin.isatty()):
16095 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
16099 if settings.get("PORTAGE_DEBUG", "") == "1":
16100 spinner.update = spinner.update_quiet
16102 if "python-trace" in settings.features:
16103 import portage.debug
16104 portage.debug.set_trace(True)
16106 if not ("--quiet" in myopts):
16107 if not sys.stdout.isatty() or ("--nospinner" in myopts):
16108 spinner.update = spinner.update_basic
16110 if myaction == 'version':
16111 print getportageversion(settings["PORTDIR"], settings["ROOT"],
16112 settings.profile_path, settings["CHOST"],
16113 trees[settings["ROOT"]]["vartree"].dbapi)
16115 elif "--help" in myopts:
16116 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16119 if "--debug" in myopts:
16120 print "myaction", myaction
16121 print "myopts", myopts
16123 if not myaction and not myfiles and "--resume" not in myopts:
16124 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16127 pretend = "--pretend" in myopts
16128 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
16129 buildpkgonly = "--buildpkgonly" in myopts
16131 # check if root user is the current user for the actions where emerge needs this
16132 if portage.secpass < 2:
16133 # We've already allowed "--version" and "--help" above.
16134 if "--pretend" not in myopts and myaction not in ("search","info"):
16135 need_superuser = myaction in ('clean', 'depclean', 'deselect',
16136 'prune', 'unmerge') or not \
16138 (buildpkgonly and secpass >= 1) or \
16139 myaction in ("metadata", "regen") or \
16140 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
16141 if portage.secpass < 1 or \
16144 access_desc = "superuser"
16146 access_desc = "portage group"
16147 # Always show portage_group_warning() when only portage group
16148 # access is required but the user is not in the portage group.
16149 from portage.data import portage_group_warning
16150 if "--ask" in myopts:
16151 myopts["--pretend"] = True
16152 del myopts["--ask"]
16153 print ("%s access is required... " + \
16154 "adding --pretend to options.\n") % access_desc
16155 if portage.secpass < 1 and not need_superuser:
16156 portage_group_warning()
16158 sys.stderr.write(("emerge: %s access is " + \
16159 "required.\n\n") % access_desc)
16160 if portage.secpass < 1 and not need_superuser:
16161 portage_group_warning()
16164 disable_emergelog = False
16165 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
16167 disable_emergelog = True
16169 if myaction in ("search", "info"):
16170 disable_emergelog = True
16171 if disable_emergelog:
16172 """ Disable emergelog for everything except build or unmerge
16173 operations. This helps minimize parallel emerge.log entries that can
16174 confuse log parsers. We especially want it disabled during
16175 parallel-fetch, which uses --resume --fetchonly."""
16177 def emergelog(*pargs, **kargs):
16180 if not "--pretend" in myopts:
16181 emergelog(xterm_titles, "Started emerge on: "+\
16182 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
16185 myelogstr=" ".join(myopts)
16187 myelogstr+=" "+myaction
16189 myelogstr += " " + " ".join(oldargs)
16190 emergelog(xterm_titles, " *** emerge " + myelogstr)
16193 def emergeexitsig(signum, frame):
16194 signal.signal(signal.SIGINT, signal.SIG_IGN)
16195 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16196 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
16197 sys.exit(100+signum)
16198 signal.signal(signal.SIGINT, emergeexitsig)
16199 signal.signal(signal.SIGTERM, emergeexitsig)
16202 """This gets out final log message in before we quit."""
16203 if "--pretend" not in myopts:
16204 emergelog(xterm_titles, " *** terminating.")
16205 if "notitles" not in settings.features:
16207 portage.atexit_register(emergeexit)
16209 if myaction in ("config", "metadata", "regen", "sync"):
16210 if "--pretend" in myopts:
16211 sys.stderr.write(("emerge: The '%s' action does " + \
16212 "not support '--pretend'.\n") % myaction)
16215 if "sync" == myaction:
16216 return action_sync(settings, trees, mtimedb, myopts, myaction)
16217 elif "metadata" == myaction:
16218 action_metadata(settings, portdb, myopts)
16219 elif myaction=="regen":
16220 validate_ebuild_environment(trees)
16221 return action_regen(settings, portdb, myopts.get("--jobs"),
16222 myopts.get("--load-average"))
16224 elif "config"==myaction:
16225 validate_ebuild_environment(trees)
16226 action_config(settings, trees, myopts, myfiles)
16229 elif "search"==myaction:
16230 validate_ebuild_environment(trees)
16231 action_search(trees[settings["ROOT"]]["root_config"],
16232 myopts, myfiles, spinner)
16234 elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
16235 validate_ebuild_environment(trees)
16236 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
16237 myopts, myaction, myfiles, spinner)
16238 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
16239 post_emerge(root_config, myopts, mtimedb, rval)
16242 elif myaction == 'info':
16244 # Ensure atoms are valid before calling unmerge().
16245 vardb = trees[settings["ROOT"]]["vartree"].dbapi
16248 if is_valid_package_atom(x):
16250 valid_atoms.append(
16251 portage.dep_expand(x, mydb=vardb, settings=settings))
16252 except portage.exception.AmbiguousPackageName, e:
16253 msg = "The short ebuild name \"" + x + \
16254 "\" is ambiguous. Please specify " + \
16255 "one of the following " + \
16256 "fully-qualified ebuild names instead:"
16257 for line in textwrap.wrap(msg, 70):
16258 writemsg_level("!!! %s\n" % (line,),
16259 level=logging.ERROR, noiselevel=-1)
16261 writemsg_level(" %s\n" % colorize("INFORM", i),
16262 level=logging.ERROR, noiselevel=-1)
16263 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
16267 msg.append("'%s' is not a valid package atom." % (x,))
16268 msg.append("Please check ebuild(5) for full details.")
16269 writemsg_level("".join("!!! %s\n" % line for line in msg),
16270 level=logging.ERROR, noiselevel=-1)
16273 return action_info(settings, trees, myopts, valid_atoms)
16275 # "update", "system", or just process files:
16277 validate_ebuild_environment(trees)
16280 if x.startswith(SETPREFIX) or \
16281 is_valid_package_atom(x):
16283 if x[:1] == os.sep:
16291 msg.append("'%s' is not a valid package atom." % (x,))
16292 msg.append("Please check ebuild(5) for full details.")
16293 writemsg_level("".join("!!! %s\n" % line for line in msg),
16294 level=logging.ERROR, noiselevel=-1)
16297 if "--pretend" not in myopts:
16298 display_news_notification(root_config, myopts)
16299 retval = action_build(settings, trees, mtimedb,
16300 myopts, myaction, myfiles, spinner)
16301 root_config = trees[settings["ROOT"]]["root_config"]
16302 post_emerge(root_config, myopts, mtimedb, retval)