2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
61 import cPickle as pickle
66 from cStringIO import StringIO
68 from StringIO import StringIO
70 class stdout_spinner(object):
72 "Gentoo Rocks ("+platform.system()+")",
73 "Thank you for using Gentoo. :)",
74 "Are you actually trying to read this?",
75 "How many times have you stared at this?",
76 "We are generating the cache right now",
77 "You are paying too much attention.",
78 "A theory is better than its explanation.",
79 "Phasers locked on target, Captain.",
80 "Thrashing is just virtual crashing.",
81 "To be is to program.",
82 "Real Users hate Real Programmers.",
83 "When all else fails, read the instructions.",
84 "Functionality breeds Contempt.",
85 "The future lies ahead.",
86 "3.1415926535897932384626433832795028841971694",
87 "Sometimes insanity is the only alternative.",
88 "Inaccuracy saves a world of explanation.",
91 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
95 self.update = self.update_twirl
96 self.scroll_sequence = self.scroll_msgs[
97 int(time.time() * 100) % len(self.scroll_msgs)]
99 self.min_display_latency = 0.05
101 def _return_early(self):
103 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104 each update* method should return without doing any output when this
107 cur_time = time.time()
108 if cur_time - self.last_update < self.min_display_latency:
110 self.last_update = cur_time
113 def update_basic(self):
114 self.spinpos = (self.spinpos + 1) % 500
115 if self._return_early():
117 if (self.spinpos % 100) == 0:
118 if self.spinpos == 0:
119 sys.stdout.write(". ")
121 sys.stdout.write(".")
124 def update_scroll(self):
125 if self._return_early():
127 if(self.spinpos >= len(self.scroll_sequence)):
128 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135 def update_twirl(self):
136 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137 if self._return_early():
139 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142 def update_quiet(self):
145 def userquery(prompt, responses=None, colours=None):
146 """Displays a prompt and a set of responses, then waits for a response
147 which is checked against the responses and the first to match is
148 returned. An empty response will match the first value in responses. The
149 input buffer is *not* cleared prior to the prompt!
152 responses: a List of Strings.
153 colours: a List of Functions taking and returning a String, used to
154 process the responses for display. Typically these will be functions
155 like red() but could be e.g. lambda x: "DisplayString".
156 If responses is omitted, defaults to ["Yes", "No"], [green, red].
157 If only colours is omitted, defaults to [bold, ...].
159 Returns a member of the List responses. (If called without optional
160 arguments, returns "Yes" or "No".)
161 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163 if responses is None:
164 responses = ["Yes", "No"]
166 create_color_func("PROMPT_CHOICE_DEFAULT"),
167 create_color_func("PROMPT_CHOICE_OTHER")
169 elif colours is None:
171 colours=(colours*len(responses))[:len(responses)]
175 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176 for key in responses:
177 # An empty response will match the first value in responses.
178 if response.upper()==key[:len(response)].upper():
180 print "Sorry, response '%s' not understood." % response,
181 except (EOFError, KeyboardInterrupt):
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen", "search",
192 "--ask", "--alphabetical",
193 "--buildpkg", "--buildpkgonly",
194 "--changelog", "--columns",
199 "--fetchonly", "--fetch-all-uri",
200 "--getbinpkg", "--getbinpkgonly",
201 "--help", "--ignore-default-opts",
204 "--newuse", "--nocolor",
205 "--nodeps", "--noreplace",
206 "--nospinner", "--oneshot",
207 "--onlydeps", "--pretend",
208 "--quiet", "--resume",
209 "--searchdesc", "--selective",
213 "--usepkg", "--usepkgonly",
214 "--verbose", "--version"
220 "b":"--buildpkg", "B":"--buildpkgonly",
221 "c":"--clean", "C":"--unmerge",
222 "d":"--debug", "D":"--deep",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "k":"--usepkg", "K":"--usepkgonly",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps", "O":"--nodeps",
231 "p":"--pretend", "P":"--prune",
233 "s":"--search", "S":"--searchdesc",
236 "v":"--verbose", "V":"--version"
239 def emergelog(xterm_titles, mystr, short_msg=None):
240 if xterm_titles and short_msg:
241 if "HOSTNAME" in os.environ:
242 short_msg = os.environ["HOSTNAME"]+": "+short_msg
243 xtermTitle(short_msg)
245 file_path = "/var/log/emerge.log"
246 mylogfile = open(file_path, "a")
247 portage.util.apply_secpass_permissions(file_path,
248 uid=portage.portage_uid, gid=portage.portage_gid,
252 mylock = portage.locks.lockfile(mylogfile)
253 # seek because we may have gotten held up by the lock.
254 # if so, we may not be positioned at the end of the file.
256 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
260 portage.locks.unlockfile(mylock)
262 except (IOError,OSError,portage.exception.PortageException), e:
264 print >> sys.stderr, "emergelog():",e
266 def countdown(secs=5, doing="Starting"):
268 print ">>> Waiting",secs,"seconds before starting..."
269 print ">>> (Control-C to abort)...\n"+doing+" in: ",
273 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280 if isinstance(mysize, basestring):
282 if 0 != mysize % 1024:
283 # Always round up to the next kB so that it doesn't show 0 kB when
284 # some small file still needs to be fetched.
285 mysize += 1024 - mysize % 1024
286 mystr=str(mysize/1024)
290 mystr=mystr[:mycount]+","+mystr[mycount:]
294 def getgccversion(chost):
297 return: the current in-use gcc version
300 gcc_ver_command = 'gcc -dumpversion'
301 gcc_ver_prefix = 'gcc-'
303 gcc_not_found_error = red(
304 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305 "!!! to update the environment of this terminal and possibly\n" +
306 "!!! other terminals also.\n"
309 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313 mystatus, myoutput = commands.getstatusoutput(
314 chost + "-" + gcc_ver_command)
315 if mystatus == os.EX_OK:
316 return gcc_ver_prefix + myoutput
318 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319 if mystatus == os.EX_OK:
320 return gcc_ver_prefix + myoutput
322 portage.writemsg(gcc_not_found_error, noiselevel=-1)
323 return "[unavailable]"
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326 profilever = "unavailable"
328 realpath = os.path.realpath(profile)
329 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
330 if realpath.startswith(basepath):
331 profilever = realpath[1 + len(basepath):]
334 profilever = "!" + os.readlink(profile)
337 del realpath, basepath
340 libclist = vardb.match("virtual/libc")
341 libclist += vardb.match("virtual/glibc")
342 libclist = portage.util.unique_array(libclist)
344 xs=portage.catpkgsplit(x)
346 libcver+=","+"-".join(xs[1:])
348 libcver="-".join(xs[1:])
350 libcver="unavailable"
352 gccver = getgccversion(chost)
353 unameout=platform.release()+" "+platform.machine()
355 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357 def create_depgraph_params(myopts, myaction):
358 #configure emerge engine parameters
360 # self: include _this_ package regardless of if it is merged.
361 # selective: exclude the package if it is merged
362 # recurse: go into the dependencies
363 # deep: go into the dependencies of already merged packages
364 # empty: pretend nothing is merged
365 # complete: completely account for all known dependencies
366 # remove: build graph for use in removing packages
367 myparams = set(["recurse"])
369 if myaction == "remove":
370 myparams.add("remove")
371 myparams.add("complete")
374 if "--update" in myopts or \
375 "--newuse" in myopts or \
376 "--reinstall" in myopts or \
377 "--noreplace" in myopts:
378 myparams.add("selective")
379 if "--emptytree" in myopts:
380 myparams.add("empty")
381 myparams.discard("selective")
382 if "--nodeps" in myopts:
383 myparams.discard("recurse")
384 if "--deep" in myopts:
386 if "--complete-graph" in myopts:
387 myparams.add("complete")
390 # search functionality
391 class search(object):
402 def __init__(self, root_config, spinner, searchdesc,
403 verbose, usepkg, usepkgonly):
404 """Searches the available and installed packages for the supplied search key.
405 The list of available and installed packages is created at object instantiation.
406 This makes successive searches faster."""
407 self.settings = root_config.settings
408 self.vartree = root_config.trees["vartree"]
409 self.spinner = spinner
410 self.verbose = verbose
411 self.searchdesc = searchdesc
412 self.root_config = root_config
413 self.setconfig = root_config.setconfig
414 self.matches = {"pkg" : []}
419 self.portdb = fake_portdb
420 for attrib in ("aux_get", "cp_all",
421 "xmatch", "findname", "getFetchMap"):
422 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
426 portdb = root_config.trees["porttree"].dbapi
427 bindb = root_config.trees["bintree"].dbapi
428 vardb = root_config.trees["vartree"].dbapi
430 if not usepkgonly and portdb._have_root_eclass_dir:
431 self._dbs.append(portdb)
433 if (usepkg or usepkgonly) and bindb.cp_all():
434 self._dbs.append(bindb)
436 self._dbs.append(vardb)
437 self._portdb = portdb
442 cp_all.update(db.cp_all())
443 return list(sorted(cp_all))
445 def _aux_get(self, *args, **kwargs):
448 return db.aux_get(*args, **kwargs)
453 def _findname(self, *args, **kwargs):
455 if db is not self._portdb:
456 # We don't want findname to return anything
457 # unless it's an ebuild in a portage tree.
458 # Otherwise, it's already built and we don't
461 func = getattr(db, "findname", None)
463 value = func(*args, **kwargs)
468 def _getFetchMap(self, *args, **kwargs):
470 func = getattr(db, "getFetchMap", None)
472 value = func(*args, **kwargs)
477 def _visible(self, db, cpv, metadata):
478 installed = db is self.vartree.dbapi
479 built = installed or db is not self._portdb
482 pkg_type = "installed"
485 return visible(self.settings,
486 Package(type_name=pkg_type, root_config=self.root_config,
487 cpv=cpv, built=built, installed=installed, metadata=metadata))
489 def _xmatch(self, level, atom):
491 This method does not expand old-style virtuals because it
492 is restricted to returning matches for a single ${CATEGORY}/${PN}
493 and old-style virual matches unreliable for that when querying
494 multiple package databases. If necessary, old-style virtuals
495 can be performed on atoms prior to calling this method.
497 cp = portage.dep_getkey(atom)
498 if level == "match-all":
501 if hasattr(db, "xmatch"):
502 matches.update(db.xmatch(level, atom))
504 matches.update(db.match(atom))
505 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506 db._cpv_sort_ascending(result)
507 elif level == "match-visible":
510 if hasattr(db, "xmatch"):
511 matches.update(db.xmatch(level, atom))
513 db_keys = list(db._aux_cache_keys)
514 for cpv in db.match(atom):
515 metadata = izip(db_keys,
516 db.aux_get(cpv, db_keys))
517 if not self._visible(db, cpv, metadata):
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "bestmatch-visible":
525 if hasattr(db, "xmatch"):
526 cpv = db.xmatch("bestmatch-visible", atom)
527 if not cpv or portage.cpv_getkey(cpv) != cp:
529 if not result or cpv == portage.best([cpv, result]):
532 db_keys = Package.metadata_keys
533 # break out of this loop with highest visible
534 # match, checked in descending order
535 for cpv in reversed(db.match(atom)):
536 if portage.cpv_getkey(cpv) != cp:
538 metadata = izip(db_keys,
539 db.aux_get(cpv, db_keys))
540 if not self._visible(db, cpv, metadata):
542 if not result or cpv == portage.best([cpv, result]):
546 raise NotImplementedError(level)
549 def execute(self,searchkey):
550 """Performs the search for the supplied search key"""
552 self.searchkey=searchkey
553 self.packagematches = []
556 self.matches = {"pkg":[], "desc":[], "set":[]}
559 self.matches = {"pkg":[], "set":[]}
560 print "Searching... ",
563 if self.searchkey.startswith('%'):
565 self.searchkey = self.searchkey[1:]
566 if self.searchkey.startswith('@'):
568 self.searchkey = self.searchkey[1:]
570 self.searchre=re.compile(self.searchkey,re.I)
572 self.searchre=re.compile(re.escape(self.searchkey), re.I)
573 for package in self.portdb.cp_all():
574 self.spinner.update()
577 match_string = package[:]
579 match_string = package.split("/")[-1]
582 if self.searchre.search(match_string):
583 if not self.portdb.xmatch("match-visible", package):
585 self.matches["pkg"].append([package,masked])
586 elif self.searchdesc: # DESCRIPTION searching
587 full_package = self.portdb.xmatch("bestmatch-visible", package)
589 #no match found; we don't want to query description
590 full_package = portage.best(
591 self.portdb.xmatch("match-all", package))
597 full_desc = self.portdb.aux_get(
598 full_package, ["DESCRIPTION"])[0]
600 print "emerge: search: aux_get() failed, skipping"
602 if self.searchre.search(full_desc):
603 self.matches["desc"].append([full_package,masked])
605 self.sdict = self.setconfig.getSets()
606 for setname in self.sdict:
607 self.spinner.update()
609 match_string = setname
611 match_string = setname.split("/")[-1]
613 if self.searchre.search(match_string):
614 self.matches["set"].append([setname, False])
615 elif self.searchdesc:
616 if self.searchre.search(
617 self.sdict[setname].getMetadata("DESCRIPTION")):
618 self.matches["set"].append([setname, False])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
656 print green("*")+" "+white(match)
657 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
661 desc, homepage, license = self.portdb.aux_get(
662 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664 print "emerge: search: aux_get() failed, skipping"
667 print green("*")+" "+white(match)+" "+red("[ Masked ]")
669 print green("*")+" "+white(match)
670 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
674 mycat = match.split("/")[0]
675 mypkg = match.split("/")[1]
676 mycpv = match + "-" + myversion
677 myebuild = self.portdb.findname(mycpv)
679 pkgdir = os.path.dirname(myebuild)
680 from portage import manifest
681 mf = manifest.Manifest(
682 pkgdir, self.settings["DISTDIR"])
684 uri_map = self.portdb.getFetchMap(mycpv)
685 except portage.exception.InvalidDependString, e:
686 file_size_str = "Unknown (%s)" % (e,)
690 mysum[0] = mf.getDistfilesSize(uri_map)
692 file_size_str = "Unknown (missing " + \
693 "digest for %s)" % (e,)
698 if db is not vardb and \
699 db.cpv_exists(mycpv):
701 if not myebuild and hasattr(db, "bintree"):
702 myebuild = db.bintree.getname(mycpv)
704 mysum[0] = os.stat(myebuild).st_size
709 if myebuild and file_size_str is None:
710 mystr = str(mysum[0] / 1024)
714 mystr = mystr[:mycount] + "," + mystr[mycount:]
715 file_size_str = mystr + " kB"
719 print " ", darkgreen("Latest version available:"),myversion
720 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
723 (darkgreen("Size of files:"), file_size_str)
724 print " ", darkgreen("Homepage:")+" ",homepage
725 print " ", darkgreen("Description:")+" ",desc
726 print " ", darkgreen("License:")+" ",license
731 def getInstallationStatus(self,package):
732 installed_package = self.vartree.dep_bestmatch(package)
734 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736 result = darkgreen("Latest version installed:")+" "+version
738 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741 def getVersion(self,full_package,detail):
742 if len(full_package) > 1:
743 package_parts = portage.catpkgsplit(full_package)
744 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745 result = package_parts[2]+ "-" + package_parts[3]
747 result = package_parts[2]
752 class RootConfig(object):
753 """This is used internally by depgraph to track information about a
757 "ebuild" : "porttree",
758 "binary" : "bintree",
759 "installed" : "vartree"
763 for k, v in pkg_tree_map.iteritems():
766 def __init__(self, settings, trees, setconfig):
768 self.settings = settings
769 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770 self.root = self.settings["ROOT"]
771 self.setconfig = setconfig
772 self.sets = self.setconfig.getSets()
773 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775 def create_world_atom(pkg, args_set, root_config):
776 """Create a new atom for the world file if one does not exist. If the
777 argument atom is precise enough to identify a specific slot then a slot
778 atom will be returned. Atoms that are in the system set may also be stored
779 in world since system atoms can only match one slot while world atoms can
780 be greedy with respect to slots. Unslotted system packages will not be
783 arg_atom = args_set.findAtomForPackage(pkg)
786 cp = portage.dep_getkey(arg_atom)
788 sets = root_config.sets
789 portdb = root_config.trees["porttree"].dbapi
790 vardb = root_config.trees["vartree"].dbapi
791 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792 for cpv in portdb.match(cp))
793 slotted = len(available_slots) > 1 or \
794 (len(available_slots) == 1 and "0" not in available_slots)
796 # check the vdb in case this is multislot
797 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798 for cpv in vardb.match(cp))
799 slotted = len(available_slots) > 1 or \
800 (len(available_slots) == 1 and "0" not in available_slots)
801 if slotted and arg_atom != cp:
802 # If the user gave a specific atom, store it as a
803 # slot atom in the world file.
804 slot_atom = pkg.slot_atom
806 # For USE=multislot, there are a couple of cases to
809 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810 # unknown value, so just record an unslotted atom.
812 # 2) SLOT comes from an installed package and there is no
813 # matching SLOT in the portage tree.
815 # Make sure that the slot atom is available in either the
816 # portdb or the vardb, since otherwise the user certainly
817 # doesn't want the SLOT atom recorded in the world file
818 # (case 1 above). If it's only available in the vardb,
819 # the user may be trying to prevent a USE=multislot
820 # package from being removed by --depclean (case 2 above).
823 if not portdb.match(slot_atom):
824 # SLOT seems to come from an installed multislot package
826 # If there is no installed package matching the SLOT atom,
827 # it probably changed SLOT spontaneously due to USE=multislot,
828 # so just record an unslotted atom.
829 if vardb.match(slot_atom):
830 # Now verify that the argument is precise
831 # enough to identify a specific slot.
832 matches = mydb.match(arg_atom)
833 matched_slots = set()
835 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836 if len(matched_slots) == 1:
837 new_world_atom = slot_atom
839 if new_world_atom == sets["world"].findAtomForPackage(pkg):
840 # Both atoms would be identical, so there's nothing to add.
843 # Unlike world atoms, system atoms are not greedy for slots, so they
844 # can't be safely excluded from world if they are slotted.
845 system_atom = sets["system"].findAtomForPackage(pkg)
847 if not portage.dep_getkey(system_atom).startswith("virtual/"):
849 # System virtuals aren't safe to exclude from world since they can
850 # match multiple old-style virtuals but only one of them will be
851 # pulled in by update or depclean.
852 providers = portdb.mysettings.getvirtuals().get(
853 portage.dep_getkey(system_atom))
854 if providers and len(providers) == 1 and providers[0] == cp:
856 return new_world_atom
858 def filter_iuse_defaults(iuse):
860 if flag.startswith("+") or flag.startswith("-"):
865 class SlotObject(object):
866 __slots__ = ("__weakref__",)
868 def __init__(self, **kwargs):
869 classes = [self.__class__]
874 classes.extend(c.__bases__)
875 slots = getattr(c, "__slots__", None)
879 myvalue = kwargs.get(myattr, None)
880 setattr(self, myattr, myvalue)
884 Create a new instance and copy all attributes
885 defined from __slots__ (including those from
888 obj = self.__class__()
890 classes = [self.__class__]
895 classes.extend(c.__bases__)
896 slots = getattr(c, "__slots__", None)
900 setattr(obj, myattr, getattr(self, myattr))
904 class AbstractDepPriority(SlotObject):
905 __slots__ = ("buildtime", "runtime", "runtime_post")
907 def __lt__(self, other):
908 return self.__int__() < other
910 def __le__(self, other):
911 return self.__int__() <= other
913 def __eq__(self, other):
914 return self.__int__() == other
916 def __ne__(self, other):
917 return self.__int__() != other
919 def __gt__(self, other):
920 return self.__int__() > other
922 def __ge__(self, other):
923 return self.__int__() >= other
927 return copy.copy(self)
929 class DepPriority(AbstractDepPriority):
931 __slots__ = ("satisfied", "optional", "rebuild")
943 if self.runtime_post:
944 return "runtime_post"
947 class BlockerDepPriority(DepPriority):
955 BlockerDepPriority.instance = BlockerDepPriority()
957 class UnmergeDepPriority(AbstractDepPriority):
958 __slots__ = ("optional", "satisfied",)
960 Combination of properties Priority Category
965 (none of the above) -2 SOFT
975 if self.runtime_post:
982 myvalue = self.__int__()
983 if myvalue > self.SOFT:
987 class DepPriorityNormalRange(object):
989 DepPriority properties Index Category
993 runtime_post 2 MEDIUM_SOFT
995 (none of the above) 0 NONE
1003 def _ignore_optional(cls, priority):
1004 if priority.__class__ is not DepPriority:
1006 return bool(priority.optional)
1009 def _ignore_runtime_post(cls, priority):
1010 if priority.__class__ is not DepPriority:
1012 return bool(priority.optional or priority.runtime_post)
1015 def _ignore_runtime(cls, priority):
1016 if priority.__class__ is not DepPriority:
1018 return not priority.buildtime
1020 ignore_medium = _ignore_runtime
1021 ignore_medium_soft = _ignore_runtime_post
1022 ignore_soft = _ignore_optional
1024 DepPriorityNormalRange.ignore_priority = (
1026 DepPriorityNormalRange._ignore_optional,
1027 DepPriorityNormalRange._ignore_runtime_post,
1028 DepPriorityNormalRange._ignore_runtime
1031 class DepPrioritySatisfiedRange(object):
1033 DepPriority Index Category
1035 not satisfied and buildtime HARD
1036 not satisfied and runtime 7 MEDIUM
1037 not satisfied and runtime_post 6 MEDIUM_SOFT
1038 satisfied and buildtime and rebuild 5 SOFT
1039 satisfied and buildtime 4 SOFT
1040 satisfied and runtime 3 SOFT
1041 satisfied and runtime_post 2 SOFT
1043 (none of the above) 0 NONE
1051 def _ignore_optional(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional)
1057 def _ignore_satisfied_runtime_post(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 if priority.optional:
1062 if not priority.satisfied:
1064 return bool(priority.runtime_post)
1067 def _ignore_satisfied_runtime(cls, priority):
1068 if priority.__class__ is not DepPriority:
1070 if priority.optional:
1072 if not priority.satisfied:
1074 return not priority.buildtime
1077 def _ignore_satisfied_buildtime(cls, priority):
1078 if priority.__class__ is not DepPriority:
1080 if priority.optional:
1082 if not priority.satisfied:
1084 if priority.buildtime:
1085 return not priority.rebuild
1089 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090 if priority.__class__ is not DepPriority:
1092 if priority.optional:
1094 return bool(priority.satisfied)
1097 def _ignore_runtime_post(cls, priority):
1098 if priority.__class__ is not DepPriority:
1100 return bool(priority.optional or \
1101 priority.satisfied or \
1102 priority.runtime_post)
1105 def _ignore_runtime(cls, priority):
1106 if priority.__class__ is not DepPriority:
1108 return bool(priority.satisfied or \
1109 not priority.buildtime)
1111 ignore_medium = _ignore_runtime
1112 ignore_medium_soft = _ignore_runtime_post
1113 ignore_soft = _ignore_satisfied_buildtime_rebuild
1115 DepPrioritySatisfiedRange.ignore_priority = (
1117 DepPrioritySatisfiedRange._ignore_optional,
1118 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122 DepPrioritySatisfiedRange._ignore_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_runtime
1126 def _find_deep_system_runtime_deps(graph):
1127 deep_system_deps = set()
1130 if not isinstance(node, Package) or \
1131 node.operation == 'uninstall':
1133 if node.root_config.sets['system'].findAtomForPackage(node):
1134 node_stack.append(node)
1136 def ignore_priority(priority):
1138 Ignore non-runtime priorities.
1140 if isinstance(priority, DepPriority) and \
1141 (priority.runtime or priority.runtime_post):
1146 node = node_stack.pop()
1147 if node in deep_system_deps:
1149 deep_system_deps.add(node)
1150 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151 if not isinstance(child, Package) or \
1152 child.operation == 'uninstall':
1154 node_stack.append(child)
1156 return deep_system_deps
1158 class FakeVartree(portage.vartree):
1159 """This is implements an in-memory copy of a vartree instance that provides
1160 all the interfaces required for use by the depgraph. The vardb is locked
1161 during the constructor call just long enough to read a copy of the
1162 installed package information. This allows the depgraph to do it's
1163 dependency calculations without holding a lock on the vardb. It also
1164 allows things like vardb global updates to be done in memory so that the
1165 user doesn't necessarily need write access to the vardb in cases where
1166 global updates are necessary (updates are performed when necessary if there
1167 is not a matching ebuild in the tree)."""
1168 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169 self._root_config = root_config
1170 if pkg_cache is None:
1172 real_vartree = root_config.trees["vartree"]
1173 portdb = root_config.trees["porttree"].dbapi
1174 self.root = real_vartree.root
1175 self.settings = real_vartree.settings
1176 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177 if "_mtime_" not in mykeys:
1178 mykeys.append("_mtime_")
1179 self._db_keys = mykeys
1180 self._pkg_cache = pkg_cache
1181 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184 # At least the parent needs to exist for the lock file.
1185 portage.util.ensure_dirs(vdb_path)
1186 except portage.exception.PortageException:
1190 if acquire_lock and os.access(vdb_path, os.W_OK):
1191 vdb_lock = portage.locks.lockdir(vdb_path)
1192 real_dbapi = real_vartree.dbapi
1194 for cpv in real_dbapi.cpv_all():
1195 cache_key = ("installed", self.root, cpv, "nomerge")
1196 pkg = self._pkg_cache.get(cache_key)
1198 metadata = pkg.metadata
1200 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201 myslot = metadata["SLOT"]
1202 mycp = portage.dep_getkey(cpv)
1203 myslot_atom = "%s:%s" % (mycp, myslot)
1205 mycounter = long(metadata["COUNTER"])
1208 metadata["COUNTER"] = str(mycounter)
1209 other_counter = slot_counters.get(myslot_atom, None)
1210 if other_counter is not None:
1211 if other_counter > mycounter:
1213 slot_counters[myslot_atom] = mycounter
1215 pkg = Package(built=True, cpv=cpv,
1216 installed=True, metadata=metadata,
1217 root_config=root_config, type_name="installed")
1218 self._pkg_cache[pkg] = pkg
1219 self.dbapi.cpv_inject(pkg)
1220 real_dbapi.flush_cache()
1223 portage.locks.unlockdir(vdb_lock)
1224 # Populate the old-style virtuals using the cached values.
1225 if not self.settings.treeVirtuals:
1226 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227 portage.getCPFromCPV, self.get_all_provides())
1229 # Intialize variables needed for lazy cache pulls of the live ebuild
1230 # metadata. This ensures that the vardb lock is released ASAP, without
1231 # being delayed in case cache generation is triggered.
1232 self._aux_get = self.dbapi.aux_get
1233 self.dbapi.aux_get = self._aux_get_wrapper
1234 self._match = self.dbapi.match
1235 self.dbapi.match = self._match_wrapper
1236 self._aux_get_history = set()
1237 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238 self._portdb = portdb
1239 self._global_updates = None
1241 def _match_wrapper(self, cpv, use_cache=1):
1243 Make sure the metadata in Package instances gets updated for any
1244 cpv that is returned from a match() call, since the metadata can
1245 be accessed directly from the Package instance instead of via
1248 matches = self._match(cpv, use_cache=use_cache)
1250 if cpv in self._aux_get_history:
1252 self._aux_get_wrapper(cpv, [])
1255 def _aux_get_wrapper(self, pkg, wants):
1256 if pkg in self._aux_get_history:
1257 return self._aux_get(pkg, wants)
1258 self._aux_get_history.add(pkg)
1260 # Use the live ebuild metadata if possible.
1261 live_metadata = dict(izip(self._portdb_keys,
1262 self._portdb.aux_get(pkg, self._portdb_keys)))
1263 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265 self.dbapi.aux_update(pkg, live_metadata)
1266 except (KeyError, portage.exception.PortageException):
1267 if self._global_updates is None:
1268 self._global_updates = \
1269 grab_global_updates(self._portdb.porttree_root)
1270 perform_global_updates(
1271 pkg, self.dbapi, self._global_updates)
1272 return self._aux_get(pkg, wants)
1274 def sync(self, acquire_lock=1):
1276 Call this method to synchronize state with the real vardb
1277 after one or more packages may have been installed or
1280 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282 # At least the parent needs to exist for the lock file.
1283 portage.util.ensure_dirs(vdb_path)
1284 except portage.exception.PortageException:
1288 if acquire_lock and os.access(vdb_path, os.W_OK):
1289 vdb_lock = portage.locks.lockdir(vdb_path)
1293 portage.locks.unlockdir(vdb_lock)
1297 real_vardb = self._root_config.trees["vartree"].dbapi
1298 current_cpv_set = frozenset(real_vardb.cpv_all())
1299 pkg_vardb = self.dbapi
1300 aux_get_history = self._aux_get_history
1302 # Remove any packages that have been uninstalled.
1303 for pkg in list(pkg_vardb):
1304 if pkg.cpv not in current_cpv_set:
1305 pkg_vardb.cpv_remove(pkg)
1306 aux_get_history.discard(pkg.cpv)
1308 # Validate counters and timestamps.
1311 validation_keys = ["COUNTER", "_mtime_"]
1312 for cpv in current_cpv_set:
1314 pkg_hash_key = ("installed", root, cpv, "nomerge")
1315 pkg = pkg_vardb.get(pkg_hash_key)
1317 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319 counter = long(counter)
1323 if counter != pkg.counter or \
1325 pkg_vardb.cpv_remove(pkg)
1326 aux_get_history.discard(pkg.cpv)
1330 pkg = self._pkg(cpv)
1332 other_counter = slot_counters.get(pkg.slot_atom)
1333 if other_counter is not None:
1334 if other_counter > pkg.counter:
1337 slot_counters[pkg.slot_atom] = pkg.counter
1338 pkg_vardb.cpv_inject(pkg)
1340 real_vardb.flush_cache()
1342 def _pkg(self, cpv):
1343 root_config = self._root_config
1344 real_vardb = root_config.trees["vartree"].dbapi
1345 pkg = Package(cpv=cpv, installed=True,
1346 metadata=izip(self._db_keys,
1347 real_vardb.aux_get(cpv, self._db_keys)),
1348 root_config=root_config,
1349 type_name="installed")
1352 mycounter = long(pkg.metadata["COUNTER"])
1355 pkg.metadata["COUNTER"] = str(mycounter)
1359 def grab_global_updates(portdir):
1360 from portage.update import grab_updates, parse_updates
1361 updpath = os.path.join(portdir, "profiles", "updates")
1363 rawupdates = grab_updates(updpath)
1364 except portage.exception.DirectoryNotFound:
1367 for mykey, mystat, mycontent in rawupdates:
1368 commands, errors = parse_updates(mycontent)
1369 upd_commands.extend(commands)
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373 from portage.update import update_dbentries
1374 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376 updates = update_dbentries(mycommands, aux_dict)
1378 mydb.aux_update(mycpv, updates)
1380 def visible(pkgsettings, pkg):
1382 Check if a package is visible. This can raise an InvalidDependString
1383 exception if LICENSE is invalid.
1384 TODO: optionally generate a list of masking reasons
1386 @returns: True if the package is visible, False otherwise.
1388 if not pkg.metadata["SLOT"]:
1390 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1391 if not pkgsettings._accept_chost(pkg):
1393 eapi = pkg.metadata["EAPI"]
1394 if not portage.eapi_is_supported(eapi):
1396 if not pkg.installed:
1397 if portage._eapi_is_deprecated(eapi):
1399 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408 except portage.exception.InvalidDependString:
1412 def get_masking_status(pkg, pkgsettings, root_config):
1414 mreasons = portage.getmaskingstatus(
1415 pkg, settings=pkgsettings,
1416 portdb=root_config.trees["porttree"].dbapi)
1418 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1419 if not pkgsettings._accept_chost(pkg):
1420 mreasons.append("CHOST: %s" % \
1421 pkg.metadata["CHOST"])
1423 if not pkg.metadata["SLOT"]:
1424 mreasons.append("invalid: SLOT is undefined")
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429 db, pkg_type, built, installed, db_keys):
1432 metadata = dict(izip(db_keys,
1433 db.aux_get(cpv, db_keys)))
1436 if metadata and not built:
1437 pkgsettings.setcpv(cpv, mydb=metadata)
1438 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439 if metadata is None:
1440 mreasons = ["corruption"]
1442 pkg = Package(type_name=pkg_type, root_config=root_config,
1443 cpv=cpv, built=built, installed=installed, metadata=metadata)
1444 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1445 return metadata, mreasons
1447 def show_masked_packages(masked_packages):
1448 shown_licenses = set()
1449 shown_comments = set()
1450 # Maybe there is both an ebuild and a binary. Only
1451 # show one of them to avoid redundant appearance.
1453 have_eapi_mask = False
1454 for (root_config, pkgsettings, cpv,
1455 metadata, mreasons) in masked_packages:
1456 if cpv in shown_cpvs:
1459 comment, filename = None, None
1460 if "package.mask" in mreasons:
1461 comment, filename = \
1462 portage.getmaskingreason(
1463 cpv, metadata=metadata,
1464 settings=pkgsettings,
1465 portdb=root_config.trees["porttree"].dbapi,
1466 return_location=True)
1467 missing_licenses = []
1469 if not portage.eapi_is_supported(metadata["EAPI"]):
1470 have_eapi_mask = True
1472 missing_licenses = \
1473 pkgsettings._getMissingLicenses(
1475 except portage.exception.InvalidDependString:
1476 # This will have already been reported
1477 # above via mreasons.
1480 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1481 if comment and comment not in shown_comments:
1484 shown_comments.add(comment)
1485 portdb = root_config.trees["porttree"].dbapi
1486 for l in missing_licenses:
1487 l_path = portdb.findLicensePath(l)
1488 if l in shown_licenses:
1490 msg = ("A copy of the '%s' license" + \
1491 " is located at '%s'.") % (l, l_path)
1494 shown_licenses.add(l)
1495 return have_eapi_mask
1497 class Task(SlotObject):
1498 __slots__ = ("_hash_key", "_hash_value")
1500 def _get_hash_key(self):
1501 hash_key = getattr(self, "_hash_key", None)
1502 if hash_key is None:
1503 raise NotImplementedError(self)
1506 def __eq__(self, other):
1507 return self._get_hash_key() == other
1509 def __ne__(self, other):
1510 return self._get_hash_key() != other
1513 hash_value = getattr(self, "_hash_value", None)
1514 if hash_value is None:
1515 self._hash_value = hash(self._get_hash_key())
1516 return self._hash_value
1519 return len(self._get_hash_key())
1521 def __getitem__(self, key):
1522 return self._get_hash_key()[key]
1525 return iter(self._get_hash_key())
1527 def __contains__(self, key):
1528 return key in self._get_hash_key()
1531 return str(self._get_hash_key())
1533 class Blocker(Task):
1535 __hash__ = Task.__hash__
1536 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1538 def __init__(self, **kwargs):
1539 Task.__init__(self, **kwargs)
1540 self.cp = portage.dep_getkey(self.atom)
1542 def _get_hash_key(self):
1543 hash_key = getattr(self, "_hash_key", None)
1544 if hash_key is None:
1546 ("blocks", self.root, self.atom, self.eapi)
1547 return self._hash_key
1549 class Package(Task):
1551 __hash__ = Task.__hash__
1552 __slots__ = ("built", "cpv", "depth",
1553 "installed", "metadata", "onlydeps", "operation",
1554 "root_config", "type_name",
1555 "category", "counter", "cp", "cpv_split",
1556 "inherited", "iuse", "mtime",
1557 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1560 "CHOST", "COUNTER", "DEPEND", "EAPI",
1561 "INHERITED", "IUSE", "KEYWORDS",
1562 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1563 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1565 def __init__(self, **kwargs):
1566 Task.__init__(self, **kwargs)
1567 self.root = self.root_config.root
1568 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1569 self.cp = portage.cpv_getkey(self.cpv)
1570 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1571 self.category, self.pf = portage.catsplit(self.cpv)
1572 self.cpv_split = portage.catpkgsplit(self.cpv)
1573 self.pv_split = self.cpv_split[1:]
1577 __slots__ = ("__weakref__", "enabled")
1579 def __init__(self, use):
1580 self.enabled = frozenset(use)
1582 class _iuse(object):
1584 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1586 def __init__(self, tokens, iuse_implicit):
1587 self.tokens = tuple(tokens)
1588 self.iuse_implicit = iuse_implicit
1595 enabled.append(x[1:])
1597 disabled.append(x[1:])
1600 self.enabled = frozenset(enabled)
1601 self.disabled = frozenset(disabled)
1602 self.all = frozenset(chain(enabled, disabled, other))
1604 def __getattribute__(self, name):
1607 return object.__getattribute__(self, "regex")
1608 except AttributeError:
1609 all = object.__getattribute__(self, "all")
1610 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1611 # Escape anything except ".*" which is supposed
1612 # to pass through from _get_implicit_iuse()
1613 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1614 regex = "^(%s)$" % "|".join(regex)
1615 regex = regex.replace("\\.\\*", ".*")
1616 self.regex = re.compile(regex)
1617 return object.__getattribute__(self, name)
1619 def _get_hash_key(self):
1620 hash_key = getattr(self, "_hash_key", None)
1621 if hash_key is None:
1622 if self.operation is None:
1623 self.operation = "merge"
1624 if self.onlydeps or self.installed:
1625 self.operation = "nomerge"
1627 (self.type_name, self.root, self.cpv, self.operation)
1628 return self._hash_key
1630 def __lt__(self, other):
1631 if other.cp != self.cp:
1633 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1637 def __le__(self, other):
1638 if other.cp != self.cp:
1640 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1644 def __gt__(self, other):
1645 if other.cp != self.cp:
1647 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1651 def __ge__(self, other):
1652 if other.cp != self.cp:
1654 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1658 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1659 if not x.startswith("UNUSED_"))
1660 _all_metadata_keys.discard("CDEPEND")
1661 _all_metadata_keys.update(Package.metadata_keys)
1663 from portage.cache.mappings import slot_dict_class
1664 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1666 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1668 Detect metadata updates and synchronize Package attributes.
1671 __slots__ = ("_pkg",)
1672 _wrapped_keys = frozenset(
1673 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1675 def __init__(self, pkg, metadata):
1676 _PackageMetadataWrapperBase.__init__(self)
1678 self.update(metadata)
1680 def __setitem__(self, k, v):
1681 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1682 if k in self._wrapped_keys:
1683 getattr(self, "_set_" + k.lower())(k, v)
1685 def _set_inherited(self, k, v):
1686 if isinstance(v, basestring):
1687 v = frozenset(v.split())
1688 self._pkg.inherited = v
1690 def _set_iuse(self, k, v):
1691 self._pkg.iuse = self._pkg._iuse(
1692 v.split(), self._pkg.root_config.iuse_implicit)
1694 def _set_slot(self, k, v):
1697 def _set_use(self, k, v):
1698 self._pkg.use = self._pkg._use(v.split())
1700 def _set_counter(self, k, v):
1701 if isinstance(v, basestring):
1706 self._pkg.counter = v
1708 def _set__mtime_(self, k, v):
1709 if isinstance(v, basestring):
1716 class EbuildFetchonly(SlotObject):
1718 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1721 settings = self.settings
1723 portdb = pkg.root_config.trees["porttree"].dbapi
1724 ebuild_path = portdb.findname(pkg.cpv)
1725 settings.setcpv(pkg)
1726 debug = settings.get("PORTAGE_DEBUG") == "1"
1727 use_cache = 1 # always true
1728 portage.doebuild_environment(ebuild_path, "fetch",
1729 settings["ROOT"], settings, debug, use_cache, portdb)
1730 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1733 rval = self._execute_with_builddir()
1735 rval = portage.doebuild(ebuild_path, "fetch",
1736 settings["ROOT"], settings, debug=debug,
1737 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1738 mydbapi=portdb, tree="porttree")
1740 if rval != os.EX_OK:
1741 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1742 eerror(msg, phase="unpack", key=pkg.cpv)
1746 def _execute_with_builddir(self):
1747 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1748 # ensuring sane $PWD (bug #239560) and storing elog
1749 # messages. Use a private temp directory, in order
1750 # to avoid locking the main one.
1751 settings = self.settings
1752 global_tmpdir = settings["PORTAGE_TMPDIR"]
1753 from tempfile import mkdtemp
1755 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1757 if e.errno != portage.exception.PermissionDenied.errno:
1759 raise portage.exception.PermissionDenied(global_tmpdir)
1760 settings["PORTAGE_TMPDIR"] = private_tmpdir
1761 settings.backup_changes("PORTAGE_TMPDIR")
1763 retval = self._execute()
1765 settings["PORTAGE_TMPDIR"] = global_tmpdir
1766 settings.backup_changes("PORTAGE_TMPDIR")
1767 shutil.rmtree(private_tmpdir)
1771 settings = self.settings
1773 root_config = pkg.root_config
1774 portdb = root_config.trees["porttree"].dbapi
1775 ebuild_path = portdb.findname(pkg.cpv)
1776 debug = settings.get("PORTAGE_DEBUG") == "1"
1777 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1779 retval = portage.doebuild(ebuild_path, "fetch",
1780 self.settings["ROOT"], self.settings, debug=debug,
1781 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1782 mydbapi=portdb, tree="porttree")
1784 if retval != os.EX_OK:
1785 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1786 eerror(msg, phase="unpack", key=pkg.cpv)
1788 portage.elog.elog_process(self.pkg.cpv, self.settings)
1791 class PollConstants(object):
1794 Provides POLL* constants that are equivalent to those from the
1795 select module, for use by PollSelectAdapter.
1798 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1801 locals()[k] = getattr(select, k, v)
1805 class AsynchronousTask(SlotObject):
1807 Subclasses override _wait() and _poll() so that calls
1808 to public methods can be wrapped for implementing
1809 hooks such as exit listener notification.
1811 Sublasses should call self.wait() to notify exit listeners after
1812 the task is complete and self.returncode has been set.
1815 __slots__ = ("background", "cancelled", "returncode") + \
1816 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1820 Start an asynchronous task and then return as soon as possible.
1826 raise NotImplementedError(self)
1829 return self.returncode is None
1836 return self.returncode
1839 if self.returncode is None:
1842 return self.returncode
1845 return self.returncode
1848 self.cancelled = True
1851 def addStartListener(self, f):
1853 The function will be called with one argument, a reference to self.
1855 if self._start_listeners is None:
1856 self._start_listeners = []
1857 self._start_listeners.append(f)
1859 def removeStartListener(self, f):
1860 if self._start_listeners is None:
1862 self._start_listeners.remove(f)
1864 def _start_hook(self):
1865 if self._start_listeners is not None:
1866 start_listeners = self._start_listeners
1867 self._start_listeners = None
1869 for f in start_listeners:
1872 def addExitListener(self, f):
1874 The function will be called with one argument, a reference to self.
1876 if self._exit_listeners is None:
1877 self._exit_listeners = []
1878 self._exit_listeners.append(f)
1880 def removeExitListener(self, f):
1881 if self._exit_listeners is None:
1882 if self._exit_listener_stack is not None:
1883 self._exit_listener_stack.remove(f)
1885 self._exit_listeners.remove(f)
1887 def _wait_hook(self):
1889 Call this method after the task completes, just before returning
1890 the returncode from wait() or poll(). This hook is
1891 used to trigger exit listeners when the returncode first
1894 if self.returncode is not None and \
1895 self._exit_listeners is not None:
1897 # This prevents recursion, in case one of the
1898 # exit handlers triggers this method again by
1899 # calling wait(). Use a stack that gives
1900 # removeExitListener() an opportunity to consume
1901 # listeners from the stack, before they can get
1902 # called below. This is necessary because a call
1903 # to one exit listener may result in a call to
1904 # removeExitListener() for another listener on
1905 # the stack. That listener needs to be removed
1906 # from the stack since it would be inconsistent
1907 # to call it after it has been been passed into
1908 # removeExitListener().
1909 self._exit_listener_stack = self._exit_listeners
1910 self._exit_listeners = None
1912 self._exit_listener_stack.reverse()
1913 while self._exit_listener_stack:
1914 self._exit_listener_stack.pop()(self)
1916 class AbstractPollTask(AsynchronousTask):
1918 __slots__ = ("scheduler",) + \
1922 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1923 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1926 def _unregister(self):
1927 raise NotImplementedError(self)
1929 def _unregister_if_appropriate(self, event):
1930 if self._registered:
1931 if event & self._exceptional_events:
1934 elif event & PollConstants.POLLHUP:
1938 class PipeReader(AbstractPollTask):
1941 Reads output from one or more files and saves it in memory,
1942 for retrieval via the getvalue() method. This is driven by
1943 the scheduler's poll() loop, so it runs entirely within the
1947 __slots__ = ("input_files",) + \
1948 ("_read_data", "_reg_ids")
1951 self._reg_ids = set()
1952 self._read_data = []
1953 for k, f in self.input_files.iteritems():
1954 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1955 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1956 self._reg_ids.add(self.scheduler.register(f.fileno(),
1957 self._registered_events, self._output_handler))
1958 self._registered = True
1961 return self._registered
1964 if self.returncode is None:
1966 self.cancelled = True
1970 if self.returncode is not None:
1971 return self.returncode
1973 if self._registered:
1974 self.scheduler.schedule(self._reg_ids)
1977 self.returncode = os.EX_OK
1978 return self.returncode
1981 """Retrieve the entire contents"""
1982 if sys.hexversion >= 0x3000000:
1983 return bytes().join(self._read_data)
1984 return "".join(self._read_data)
1987 """Free the memory buffer."""
1988 self._read_data = None
1990 def _output_handler(self, fd, event):
1992 if event & PollConstants.POLLIN:
1994 for f in self.input_files.itervalues():
1995 if fd == f.fileno():
1998 buf = array.array('B')
2000 buf.fromfile(f, self._bufsize)
2005 self._read_data.append(buf.tostring())
2010 self._unregister_if_appropriate(event)
2011 return self._registered
2013 def _unregister(self):
2015 Unregister from the scheduler and close open files.
2018 self._registered = False
2020 if self._reg_ids is not None:
2021 for reg_id in self._reg_ids:
2022 self.scheduler.unregister(reg_id)
2023 self._reg_ids = None
2025 if self.input_files is not None:
2026 for f in self.input_files.itervalues():
2028 self.input_files = None
2030 class CompositeTask(AsynchronousTask):
2032 __slots__ = ("scheduler",) + ("_current_task",)
2035 return self._current_task is not None
2038 self.cancelled = True
2039 if self._current_task is not None:
2040 self._current_task.cancel()
2044 This does a loop calling self._current_task.poll()
2045 repeatedly as long as the value of self._current_task
2046 keeps changing. It calls poll() a maximum of one time
2047 for a given self._current_task instance. This is useful
2048 since calling poll() on a task can trigger advance to
2049 the next task could eventually lead to the returncode
2050 being set in cases when polling only a single task would
2051 not have the same effect.
2056 task = self._current_task
2057 if task is None or task is prev:
2058 # don't poll the same task more than once
2063 return self.returncode
2069 task = self._current_task
2071 # don't wait for the same task more than once
2074 # Before the task.wait() method returned, an exit
2075 # listener should have set self._current_task to either
2076 # a different task or None. Something is wrong.
2077 raise AssertionError("self._current_task has not " + \
2078 "changed since calling wait", self, task)
2082 return self.returncode
2084 def _assert_current(self, task):
2086 Raises an AssertionError if the given task is not the
2087 same one as self._current_task. This can be useful
2090 if task is not self._current_task:
2091 raise AssertionError("Unrecognized task: %s" % (task,))
2093 def _default_exit(self, task):
2095 Calls _assert_current() on the given task and then sets the
2096 composite returncode attribute if task.returncode != os.EX_OK.
2097 If the task failed then self._current_task will be set to None.
2098 Subclasses can use this as a generic task exit callback.
2101 @returns: The task.returncode attribute.
2103 self._assert_current(task)
2104 if task.returncode != os.EX_OK:
2105 self.returncode = task.returncode
2106 self._current_task = None
2107 return task.returncode
2109 def _final_exit(self, task):
2111 Assumes that task is the final task of this composite task.
2112 Calls _default_exit() and sets self.returncode to the task's
2113 returncode and sets self._current_task to None.
2115 self._default_exit(task)
2116 self._current_task = None
2117 self.returncode = task.returncode
2118 return self.returncode
2120 def _default_final_exit(self, task):
2122 This calls _final_exit() and then wait().
2124 Subclasses can use this as a generic final task exit callback.
2127 self._final_exit(task)
2130 def _start_task(self, task, exit_handler):
2132 Register exit handler for the given task, set it
2133 as self._current_task, and call task.start().
2135 Subclasses can use this as a generic way to start
2139 task.addExitListener(exit_handler)
2140 self._current_task = task
2143 class TaskSequence(CompositeTask):
2145 A collection of tasks that executes sequentially. Each task
2146 must have a addExitListener() method that can be used as
2147 a means to trigger movement from one task to the next.
2150 __slots__ = ("_task_queue",)
2152 def __init__(self, **kwargs):
2153 AsynchronousTask.__init__(self, **kwargs)
2154 self._task_queue = deque()
2156 def add(self, task):
2157 self._task_queue.append(task)
2160 self._start_next_task()
2163 self._task_queue.clear()
2164 CompositeTask.cancel(self)
2166 def _start_next_task(self):
2167 self._start_task(self._task_queue.popleft(),
2168 self._task_exit_handler)
2170 def _task_exit_handler(self, task):
2171 if self._default_exit(task) != os.EX_OK:
2173 elif self._task_queue:
2174 self._start_next_task()
2176 self._final_exit(task)
2179 class SubProcess(AbstractPollTask):
2181 __slots__ = ("pid",) + \
2182 ("_files", "_reg_id")
2184 # A file descriptor is required for the scheduler to monitor changes from
2185 # inside a poll() loop. When logging is not enabled, create a pipe just to
2186 # serve this purpose alone.
2190 if self.returncode is not None:
2191 return self.returncode
2192 if self.pid is None:
2193 return self.returncode
2194 if self._registered:
2195 return self.returncode
2198 retval = os.waitpid(self.pid, os.WNOHANG)
2200 if e.errno != errno.ECHILD:
2203 retval = (self.pid, 1)
2205 if retval == (0, 0):
2207 self._set_returncode(retval)
2208 return self.returncode
2213 os.kill(self.pid, signal.SIGTERM)
2215 if e.errno != errno.ESRCH:
2219 self.cancelled = True
2220 if self.pid is not None:
2222 return self.returncode
2225 return self.pid is not None and \
2226 self.returncode is None
2230 if self.returncode is not None:
2231 return self.returncode
2233 if self._registered:
2234 self.scheduler.schedule(self._reg_id)
2236 if self.returncode is not None:
2237 return self.returncode
2240 wait_retval = os.waitpid(self.pid, 0)
2242 if e.errno != errno.ECHILD:
2245 self._set_returncode((self.pid, 1))
2247 self._set_returncode(wait_retval)
2249 return self.returncode
2251 def _unregister(self):
2253 Unregister from the scheduler and close open files.
2256 self._registered = False
2258 if self._reg_id is not None:
2259 self.scheduler.unregister(self._reg_id)
2262 if self._files is not None:
2263 for f in self._files.itervalues():
2267 def _set_returncode(self, wait_retval):
2269 retval = wait_retval[1]
2271 if retval != os.EX_OK:
2273 retval = (retval & 0xff) << 8
2275 retval = retval >> 8
2277 self.returncode = retval
2279 class SpawnProcess(SubProcess):
2282 Constructor keyword args are passed into portage.process.spawn().
2283 The required "args" keyword argument will be passed as the first
2287 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2288 "uid", "gid", "groups", "umask", "logfile",
2289 "path_lookup", "pre_exec")
2291 __slots__ = ("args",) + \
2294 _file_names = ("log", "process", "stdout")
2295 _files_dict = slot_dict_class(_file_names, prefix="")
2302 if self.fd_pipes is None:
2304 fd_pipes = self.fd_pipes
2305 fd_pipes.setdefault(0, sys.stdin.fileno())
2306 fd_pipes.setdefault(1, sys.stdout.fileno())
2307 fd_pipes.setdefault(2, sys.stderr.fileno())
2309 # flush any pending output
2310 for fd in fd_pipes.itervalues():
2311 if fd == sys.stdout.fileno():
2313 if fd == sys.stderr.fileno():
2316 logfile = self.logfile
2317 self._files = self._files_dict()
2320 master_fd, slave_fd = self._pipe(fd_pipes)
2321 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2322 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2325 fd_pipes_orig = fd_pipes.copy()
2327 # TODO: Use job control functions like tcsetpgrp() to control
2328 # access to stdin. Until then, use /dev/null so that any
2329 # attempts to read from stdin will immediately return EOF
2330 # instead of blocking indefinitely.
2331 null_input = open('/dev/null', 'rb')
2332 fd_pipes[0] = null_input.fileno()
2334 fd_pipes[0] = fd_pipes_orig[0]
2336 files.process = os.fdopen(master_fd, 'rb')
2337 if logfile is not None:
2339 fd_pipes[1] = slave_fd
2340 fd_pipes[2] = slave_fd
2342 files.log = open(logfile, mode='ab')
2343 portage.util.apply_secpass_permissions(logfile,
2344 uid=portage.portage_uid, gid=portage.portage_gid,
2347 if not self.background:
2348 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2350 output_handler = self._output_handler
2354 # Create a dummy pipe so the scheduler can monitor
2355 # the process from inside a poll() loop.
2356 fd_pipes[self._dummy_pipe_fd] = slave_fd
2358 fd_pipes[1] = slave_fd
2359 fd_pipes[2] = slave_fd
2360 output_handler = self._dummy_handler
2363 for k in self._spawn_kwarg_names:
2364 v = getattr(self, k)
2368 kwargs["fd_pipes"] = fd_pipes
2369 kwargs["returnpid"] = True
2370 kwargs.pop("logfile", None)
2372 self._reg_id = self.scheduler.register(files.process.fileno(),
2373 self._registered_events, output_handler)
2374 self._registered = True
2376 retval = self._spawn(self.args, **kwargs)
2379 if null_input is not None:
2382 if isinstance(retval, int):
2385 self.returncode = retval
2389 self.pid = retval[0]
2390 portage.process.spawned_pids.remove(self.pid)
2392 def _pipe(self, fd_pipes):
2394 @type fd_pipes: dict
2395 @param fd_pipes: pipes from which to copy terminal size if desired.
2399 def _spawn(self, args, **kwargs):
2400 return portage.process.spawn(args, **kwargs)
2402 def _output_handler(self, fd, event):
2404 if event & PollConstants.POLLIN:
2407 buf = array.array('B')
2409 buf.fromfile(files.process, self._bufsize)
2414 if not self.background:
2415 buf.tofile(files.stdout)
2416 files.stdout.flush()
2417 buf.tofile(files.log)
2423 self._unregister_if_appropriate(event)
2424 return self._registered
2426 def _dummy_handler(self, fd, event):
2428 This method is mainly interested in detecting EOF, since
2429 the only purpose of the pipe is to allow the scheduler to
2430 monitor the process from inside a poll() loop.
2433 if event & PollConstants.POLLIN:
2435 buf = array.array('B')
2437 buf.fromfile(self._files.process, self._bufsize)
2447 self._unregister_if_appropriate(event)
2448 return self._registered
2450 class MiscFunctionsProcess(SpawnProcess):
2452 Spawns misc-functions.sh with an existing ebuild environment.
2455 __slots__ = ("commands", "phase", "pkg", "settings")
2458 settings = self.settings
2459 settings.pop("EBUILD_PHASE", None)
2460 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2461 misc_sh_binary = os.path.join(portage_bin_path,
2462 os.path.basename(portage.const.MISC_SH_BINARY))
2464 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2465 self.logfile = settings.get("PORTAGE_LOG_FILE")
2467 portage._doebuild_exit_status_unlink(
2468 settings.get("EBUILD_EXIT_STATUS_FILE"))
2470 SpawnProcess._start(self)
2472 def _spawn(self, args, **kwargs):
2473 settings = self.settings
2474 debug = settings.get("PORTAGE_DEBUG") == "1"
2475 return portage.spawn(" ".join(args), settings,
2476 debug=debug, **kwargs)
2478 def _set_returncode(self, wait_retval):
2479 SpawnProcess._set_returncode(self, wait_retval)
2480 self.returncode = portage._doebuild_exit_status_check_and_log(
2481 self.settings, self.phase, self.returncode)
2483 class EbuildFetcher(SpawnProcess):
2485 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2490 root_config = self.pkg.root_config
2491 portdb = root_config.trees["porttree"].dbapi
2492 ebuild_path = portdb.findname(self.pkg.cpv)
2493 settings = self.config_pool.allocate()
2494 settings.setcpv(self.pkg)
2496 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2497 # should not be touched since otherwise it could interfere with
2498 # another instance of the same cpv concurrently being built for a
2499 # different $ROOT (currently, builds only cooperate with prefetchers
2500 # that are spawned for the same $ROOT).
2501 if not self.prefetch:
2502 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2503 self._build_dir.lock()
2504 self._build_dir.clean()
2505 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2506 if self.logfile is None:
2507 self.logfile = settings.get("PORTAGE_LOG_FILE")
2513 # If any incremental variables have been overridden
2514 # via the environment, those values need to be passed
2515 # along here so that they are correctly considered by
2516 # the config instance in the subproccess.
2517 fetch_env = os.environ.copy()
2519 nocolor = settings.get("NOCOLOR")
2520 if nocolor is not None:
2521 fetch_env["NOCOLOR"] = nocolor
2523 fetch_env["PORTAGE_NICENESS"] = "0"
2525 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2527 ebuild_binary = os.path.join(
2528 settings["PORTAGE_BIN_PATH"], "ebuild")
2530 fetch_args = [ebuild_binary, ebuild_path, phase]
2531 debug = settings.get("PORTAGE_DEBUG") == "1"
2533 fetch_args.append("--debug")
2535 self.args = fetch_args
2536 self.env = fetch_env
2537 SpawnProcess._start(self)
2539 def _pipe(self, fd_pipes):
2540 """When appropriate, use a pty so that fetcher progress bars,
2541 like wget has, will work properly."""
2542 if self.background or not sys.stdout.isatty():
2543 # When the output only goes to a log file,
2544 # there's no point in creating a pty.
2546 stdout_pipe = fd_pipes.get(1)
2547 got_pty, master_fd, slave_fd = \
2548 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2549 return (master_fd, slave_fd)
2551 def _set_returncode(self, wait_retval):
2552 SpawnProcess._set_returncode(self, wait_retval)
2553 # Collect elog messages that might have been
2554 # created by the pkg_nofetch phase.
2555 if self._build_dir is not None:
2556 # Skip elog messages for prefetch, in order to avoid duplicates.
2557 if not self.prefetch and self.returncode != os.EX_OK:
2559 if self.logfile is not None:
2561 elog_out = open(self.logfile, 'a')
2562 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2563 if self.logfile is not None:
2564 msg += ", Log file:"
2565 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2566 if self.logfile is not None:
2567 eerror(" '%s'" % (self.logfile,),
2568 phase="unpack", key=self.pkg.cpv, out=elog_out)
2569 if elog_out is not None:
2571 if not self.prefetch:
2572 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2573 features = self._build_dir.settings.features
2574 if self.returncode == os.EX_OK:
2575 self._build_dir.clean()
2576 self._build_dir.unlock()
2577 self.config_pool.deallocate(self._build_dir.settings)
2578 self._build_dir = None
2580 class EbuildBuildDir(SlotObject):
2582 __slots__ = ("dir_path", "pkg", "settings",
2583 "locked", "_catdir", "_lock_obj")
2585 def __init__(self, **kwargs):
2586 SlotObject.__init__(self, **kwargs)
2591 This raises an AlreadyLocked exception if lock() is called
2592 while a lock is already held. In order to avoid this, call
2593 unlock() or check whether the "locked" attribute is True
2594 or False before calling lock().
2596 if self._lock_obj is not None:
2597 raise self.AlreadyLocked((self._lock_obj,))
2599 dir_path = self.dir_path
2600 if dir_path is None:
2601 root_config = self.pkg.root_config
2602 portdb = root_config.trees["porttree"].dbapi
2603 ebuild_path = portdb.findname(self.pkg.cpv)
2604 settings = self.settings
2605 settings.setcpv(self.pkg)
2606 debug = settings.get("PORTAGE_DEBUG") == "1"
2607 use_cache = 1 # always true
2608 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2609 self.settings, debug, use_cache, portdb)
2610 dir_path = self.settings["PORTAGE_BUILDDIR"]
2612 catdir = os.path.dirname(dir_path)
2613 self._catdir = catdir
2615 portage.util.ensure_dirs(os.path.dirname(catdir),
2616 gid=portage.portage_gid,
2620 catdir_lock = portage.locks.lockdir(catdir)
2621 portage.util.ensure_dirs(catdir,
2622 gid=portage.portage_gid,
2624 self._lock_obj = portage.locks.lockdir(dir_path)
2626 self.locked = self._lock_obj is not None
2627 if catdir_lock is not None:
2628 portage.locks.unlockdir(catdir_lock)
2631 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2632 by keepwork or keeptemp in FEATURES."""
2633 settings = self.settings
2634 features = settings.features
2635 if not ("keepwork" in features or "keeptemp" in features):
2637 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2638 except EnvironmentError, e:
2639 if e.errno != errno.ENOENT:
2644 if self._lock_obj is None:
2647 portage.locks.unlockdir(self._lock_obj)
2648 self._lock_obj = None
2651 catdir = self._catdir
2654 catdir_lock = portage.locks.lockdir(catdir)
2660 if e.errno not in (errno.ENOENT,
2661 errno.ENOTEMPTY, errno.EEXIST):
2664 portage.locks.unlockdir(catdir_lock)
2666 class AlreadyLocked(portage.exception.PortageException):
2669 class EbuildBuild(CompositeTask):
2671 __slots__ = ("args_set", "config_pool", "find_blockers",
2672 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2673 "prefetcher", "settings", "world_atom") + \
2674 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2678 logger = self.logger
2681 settings = self.settings
2682 world_atom = self.world_atom
2683 root_config = pkg.root_config
2686 portdb = root_config.trees[tree].dbapi
2687 settings.setcpv(pkg)
2688 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2689 ebuild_path = portdb.findname(self.pkg.cpv)
2690 self._ebuild_path = ebuild_path
2692 prefetcher = self.prefetcher
2693 if prefetcher is None:
2695 elif not prefetcher.isAlive():
2697 elif prefetcher.poll() is None:
2699 waiting_msg = "Fetching files " + \
2700 "in the background. " + \
2701 "To view fetch progress, run `tail -f " + \
2702 "/var/log/emerge-fetch.log` in another " + \
2704 msg_prefix = colorize("GOOD", " * ")
2705 from textwrap import wrap
2706 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2707 for line in wrap(waiting_msg, 65))
2708 if not self.background:
2709 writemsg(waiting_msg, noiselevel=-1)
2711 self._current_task = prefetcher
2712 prefetcher.addExitListener(self._prefetch_exit)
2715 self._prefetch_exit(prefetcher)
2717 def _prefetch_exit(self, prefetcher):
2721 settings = self.settings
2724 fetcher = EbuildFetchonly(
2725 fetch_all=opts.fetch_all_uri,
2726 pkg=pkg, pretend=opts.pretend,
2728 retval = fetcher.execute()
2729 self.returncode = retval
2733 fetcher = EbuildFetcher(config_pool=self.config_pool,
2734 fetchall=opts.fetch_all_uri,
2735 fetchonly=opts.fetchonly,
2736 background=self.background,
2737 pkg=pkg, scheduler=self.scheduler)
2739 self._start_task(fetcher, self._fetch_exit)
2741 def _fetch_exit(self, fetcher):
2745 fetch_failed = False
2747 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2749 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2751 if fetch_failed and fetcher.logfile is not None and \
2752 os.path.exists(fetcher.logfile):
2753 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2755 if not fetch_failed and fetcher.logfile is not None:
2756 # Fetch was successful, so remove the fetch log.
2758 os.unlink(fetcher.logfile)
2762 if fetch_failed or opts.fetchonly:
2766 logger = self.logger
2768 pkg_count = self.pkg_count
2769 scheduler = self.scheduler
2770 settings = self.settings
2771 features = settings.features
2772 ebuild_path = self._ebuild_path
2773 system_set = pkg.root_config.sets["system"]
2775 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2776 self._build_dir.lock()
2778 # Cleaning is triggered before the setup
2779 # phase, in portage.doebuild().
2780 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2781 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2782 short_msg = "emerge: (%s of %s) %s Clean" % \
2783 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2784 logger.log(msg, short_msg=short_msg)
2786 #buildsyspkg: Check if we need to _force_ binary package creation
2787 self._issyspkg = "buildsyspkg" in features and \
2788 system_set.findAtomForPackage(pkg) and \
2791 if opts.buildpkg or self._issyspkg:
2793 self._buildpkg = True
2795 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2796 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2797 short_msg = "emerge: (%s of %s) %s Compile" % \
2798 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2799 logger.log(msg, short_msg=short_msg)
2802 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2803 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2804 short_msg = "emerge: (%s of %s) %s Compile" % \
2805 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2806 logger.log(msg, short_msg=short_msg)
2808 build = EbuildExecuter(background=self.background, pkg=pkg,
2809 scheduler=scheduler, settings=settings)
2810 self._start_task(build, self._build_exit)
2812 def _unlock_builddir(self):
2813 portage.elog.elog_process(self.pkg.cpv, self.settings)
2814 self._build_dir.unlock()
2816 def _build_exit(self, build):
2817 if self._default_exit(build) != os.EX_OK:
2818 self._unlock_builddir()
2823 buildpkg = self._buildpkg
2826 self._final_exit(build)
2831 msg = ">>> This is a system package, " + \
2832 "let's pack a rescue tarball.\n"
2834 log_path = self.settings.get("PORTAGE_LOG_FILE")
2835 if log_path is not None:
2836 log_file = open(log_path, 'a')
2842 if not self.background:
2843 portage.writemsg_stdout(msg, noiselevel=-1)
2845 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2846 scheduler=self.scheduler, settings=self.settings)
2848 self._start_task(packager, self._buildpkg_exit)
2850 def _buildpkg_exit(self, packager):
2852 Released build dir lock when there is a failure or
2853 when in buildpkgonly mode. Otherwise, the lock will
2854 be released when merge() is called.
2857 if self._default_exit(packager) != os.EX_OK:
2858 self._unlock_builddir()
2862 if self.opts.buildpkgonly:
2863 # Need to call "clean" phase for buildpkgonly mode
2864 portage.elog.elog_process(self.pkg.cpv, self.settings)
2866 clean_phase = EbuildPhase(background=self.background,
2867 pkg=self.pkg, phase=phase,
2868 scheduler=self.scheduler, settings=self.settings,
2870 self._start_task(clean_phase, self._clean_exit)
2873 # Continue holding the builddir lock until
2874 # after the package has been installed.
2875 self._current_task = None
2876 self.returncode = packager.returncode
2879 def _clean_exit(self, clean_phase):
2880 if self._final_exit(clean_phase) != os.EX_OK or \
2881 self.opts.buildpkgonly:
2882 self._unlock_builddir()
2887 Install the package and then clean up and release locks.
2888 Only call this after the build has completed successfully
2889 and neither fetchonly nor buildpkgonly mode are enabled.
2892 find_blockers = self.find_blockers
2893 ldpath_mtimes = self.ldpath_mtimes
2894 logger = self.logger
2896 pkg_count = self.pkg_count
2897 settings = self.settings
2898 world_atom = self.world_atom
2899 ebuild_path = self._ebuild_path
2902 merge = EbuildMerge(find_blockers=self.find_blockers,
2903 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2904 pkg_count=pkg_count, pkg_path=ebuild_path,
2905 scheduler=self.scheduler,
2906 settings=settings, tree=tree, world_atom=world_atom)
2908 msg = " === (%s of %s) Merging (%s::%s)" % \
2909 (pkg_count.curval, pkg_count.maxval,
2910 pkg.cpv, ebuild_path)
2911 short_msg = "emerge: (%s of %s) %s Merge" % \
2912 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2913 logger.log(msg, short_msg=short_msg)
2916 rval = merge.execute()
2918 self._unlock_builddir()
2922 class EbuildExecuter(CompositeTask):
2924 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2926 _phases = ("prepare", "configure", "compile", "test", "install")
2928 _live_eclasses = frozenset([
2938 self._tree = "porttree"
2941 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2942 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2943 self._start_task(clean_phase, self._clean_phase_exit)
2945 def _clean_phase_exit(self, clean_phase):
2947 if self._default_exit(clean_phase) != os.EX_OK:
2952 scheduler = self.scheduler
2953 settings = self.settings
2956 # This initializes PORTAGE_LOG_FILE.
2957 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2959 setup_phase = EbuildPhase(background=self.background,
2960 pkg=pkg, phase="setup", scheduler=scheduler,
2961 settings=settings, tree=self._tree)
2963 setup_phase.addExitListener(self._setup_exit)
2964 self._current_task = setup_phase
2965 self.scheduler.scheduleSetup(setup_phase)
2967 def _setup_exit(self, setup_phase):
2969 if self._default_exit(setup_phase) != os.EX_OK:
2973 unpack_phase = EbuildPhase(background=self.background,
2974 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2975 settings=self.settings, tree=self._tree)
2977 if self._live_eclasses.intersection(self.pkg.inherited):
2978 # Serialize $DISTDIR access for live ebuilds since
2979 # otherwise they can interfere with eachother.
2981 unpack_phase.addExitListener(self._unpack_exit)
2982 self._current_task = unpack_phase
2983 self.scheduler.scheduleUnpack(unpack_phase)
2986 self._start_task(unpack_phase, self._unpack_exit)
2988 def _unpack_exit(self, unpack_phase):
2990 if self._default_exit(unpack_phase) != os.EX_OK:
2994 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2997 phases = self._phases
2998 eapi = pkg.metadata["EAPI"]
2999 if eapi in ("0", "1"):
3000 # skip src_prepare and src_configure
3003 for phase in phases:
3004 ebuild_phases.add(EbuildPhase(background=self.background,
3005 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3006 settings=self.settings, tree=self._tree))
3008 self._start_task(ebuild_phases, self._default_final_exit)
3010 class EbuildMetadataPhase(SubProcess):
3013 Asynchronous interface for the ebuild "depend" phase which is
3014 used to extract metadata from the ebuild.
3017 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3018 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3021 _file_names = ("ebuild",)
3022 _files_dict = slot_dict_class(_file_names, prefix="")
3026 settings = self.settings
3028 ebuild_path = self.ebuild_path
3029 debug = settings.get("PORTAGE_DEBUG") == "1"
3033 if self.fd_pipes is not None:
3034 fd_pipes = self.fd_pipes.copy()
3038 fd_pipes.setdefault(0, sys.stdin.fileno())
3039 fd_pipes.setdefault(1, sys.stdout.fileno())
3040 fd_pipes.setdefault(2, sys.stderr.fileno())
3042 # flush any pending output
3043 for fd in fd_pipes.itervalues():
3044 if fd == sys.stdout.fileno():
3046 if fd == sys.stderr.fileno():
3049 fd_pipes_orig = fd_pipes.copy()
3050 self._files = self._files_dict()
3053 master_fd, slave_fd = os.pipe()
3054 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3055 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3057 fd_pipes[self._metadata_fd] = slave_fd
3059 self._raw_metadata = []
3060 files.ebuild = os.fdopen(master_fd, 'r')
3061 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3062 self._registered_events, self._output_handler)
3063 self._registered = True
3065 retval = portage.doebuild(ebuild_path, "depend",
3066 settings["ROOT"], settings, debug,
3067 mydbapi=self.portdb, tree="porttree",
3068 fd_pipes=fd_pipes, returnpid=True)
3072 if isinstance(retval, int):
3073 # doebuild failed before spawning
3075 self.returncode = retval
3079 self.pid = retval[0]
3080 portage.process.spawned_pids.remove(self.pid)
3082 def _output_handler(self, fd, event):
3084 if event & PollConstants.POLLIN:
3085 self._raw_metadata.append(self._files.ebuild.read())
3086 if not self._raw_metadata[-1]:
3090 self._unregister_if_appropriate(event)
3091 return self._registered
3093 def _set_returncode(self, wait_retval):
3094 SubProcess._set_returncode(self, wait_retval)
3095 if self.returncode == os.EX_OK:
3096 metadata_lines = "".join(self._raw_metadata).splitlines()
3097 if len(portage.auxdbkeys) != len(metadata_lines):
3098 # Don't trust bash's returncode if the
3099 # number of lines is incorrect.
3102 metadata = izip(portage.auxdbkeys, metadata_lines)
3103 self.metadata_callback(self.cpv, self.ebuild_path,
3104 self.repo_path, metadata, self.ebuild_mtime)
3106 class EbuildProcess(SpawnProcess):
3108 __slots__ = ("phase", "pkg", "settings", "tree")
3111 # Don't open the log file during the clean phase since the
3112 # open file can result in an nfs lock on $T/build.log which
3113 # prevents the clean phase from removing $T.
3114 if self.phase not in ("clean", "cleanrm"):
3115 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3116 SpawnProcess._start(self)
3118 def _pipe(self, fd_pipes):
3119 stdout_pipe = fd_pipes.get(1)
3120 got_pty, master_fd, slave_fd = \
3121 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3122 return (master_fd, slave_fd)
3124 def _spawn(self, args, **kwargs):
3126 root_config = self.pkg.root_config
3128 mydbapi = root_config.trees[tree].dbapi
3129 settings = self.settings
3130 ebuild_path = settings["EBUILD"]
3131 debug = settings.get("PORTAGE_DEBUG") == "1"
3133 rval = portage.doebuild(ebuild_path, self.phase,
3134 root_config.root, settings, debug,
3135 mydbapi=mydbapi, tree=tree, **kwargs)
3139 def _set_returncode(self, wait_retval):
3140 SpawnProcess._set_returncode(self, wait_retval)
3142 if self.phase not in ("clean", "cleanrm"):
3143 self.returncode = portage._doebuild_exit_status_check_and_log(
3144 self.settings, self.phase, self.returncode)
3146 if self.phase == "test" and self.returncode != os.EX_OK and \
3147 "test-fail-continue" in self.settings.features:
3148 self.returncode = os.EX_OK
3150 portage._post_phase_userpriv_perms(self.settings)
3152 class EbuildPhase(CompositeTask):
3154 __slots__ = ("background", "pkg", "phase",
3155 "scheduler", "settings", "tree")
3157 _post_phase_cmds = portage._post_phase_cmds
3161 ebuild_process = EbuildProcess(background=self.background,
3162 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3163 settings=self.settings, tree=self.tree)
3165 self._start_task(ebuild_process, self._ebuild_exit)
3167 def _ebuild_exit(self, ebuild_process):
3169 if self.phase == "install":
3171 log_path = self.settings.get("PORTAGE_LOG_FILE")
3173 if self.background and log_path is not None:
3174 log_file = open(log_path, 'a')
3177 portage._check_build_log(self.settings, out=out)
3179 if log_file is not None:
3182 if self._default_exit(ebuild_process) != os.EX_OK:
3186 settings = self.settings
3188 if self.phase == "install":
3189 portage._post_src_install_uid_fix(settings)
3191 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3192 if post_phase_cmds is not None:
3193 post_phase = MiscFunctionsProcess(background=self.background,
3194 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3195 scheduler=self.scheduler, settings=settings)
3196 self._start_task(post_phase, self._post_phase_exit)
3199 self.returncode = ebuild_process.returncode
3200 self._current_task = None
3203 def _post_phase_exit(self, post_phase):
3204 if self._final_exit(post_phase) != os.EX_OK:
3205 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3207 self._current_task = None
3211 class EbuildBinpkg(EbuildProcess):
3213 This assumes that src_install() has successfully completed.
3215 __slots__ = ("_binpkg_tmpfile",)
3218 self.phase = "package"
3219 self.tree = "porttree"
3221 root_config = pkg.root_config
3222 portdb = root_config.trees["porttree"].dbapi
3223 bintree = root_config.trees["bintree"]
3224 ebuild_path = portdb.findname(self.pkg.cpv)
3225 settings = self.settings
3226 debug = settings.get("PORTAGE_DEBUG") == "1"
3228 bintree.prevent_collision(pkg.cpv)
3229 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3230 pkg.cpv + ".tbz2." + str(os.getpid()))
3231 self._binpkg_tmpfile = binpkg_tmpfile
3232 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3233 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3236 EbuildProcess._start(self)
3238 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3240 def _set_returncode(self, wait_retval):
3241 EbuildProcess._set_returncode(self, wait_retval)
3244 bintree = pkg.root_config.trees["bintree"]
3245 binpkg_tmpfile = self._binpkg_tmpfile
3246 if self.returncode == os.EX_OK:
3247 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3249 class EbuildMerge(SlotObject):
3251 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3252 "pkg", "pkg_count", "pkg_path", "pretend",
3253 "scheduler", "settings", "tree", "world_atom")
3256 root_config = self.pkg.root_config
3257 settings = self.settings
3258 retval = portage.merge(settings["CATEGORY"],
3259 settings["PF"], settings["D"],
3260 os.path.join(settings["PORTAGE_BUILDDIR"],
3261 "build-info"), root_config.root, settings,
3262 myebuild=settings["EBUILD"],
3263 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3264 vartree=root_config.trees["vartree"],
3265 prev_mtimes=self.ldpath_mtimes,
3266 scheduler=self.scheduler,
3267 blockers=self.find_blockers)
3269 if retval == os.EX_OK:
3270 self.world_atom(self.pkg)
3275 def _log_success(self):
3277 pkg_count = self.pkg_count
3278 pkg_path = self.pkg_path
3279 logger = self.logger
3280 if "noclean" not in self.settings.features:
3281 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3282 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3283 logger.log((" === (%s of %s) " + \
3284 "Post-Build Cleaning (%s::%s)") % \
3285 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3286 short_msg=short_msg)
3287 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3288 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3290 class PackageUninstall(AsynchronousTask):
3292 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3296 unmerge(self.pkg.root_config, self.opts, "unmerge",
3297 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3298 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3299 writemsg_level=self._writemsg_level)
3300 except UninstallFailure, e:
3301 self.returncode = e.status
3303 self.returncode = os.EX_OK
3306 def _writemsg_level(self, msg, level=0, noiselevel=0):
3308 log_path = self.settings.get("PORTAGE_LOG_FILE")
3309 background = self.background
3311 if log_path is None:
3312 if not (background and level < logging.WARNING):
3313 portage.util.writemsg_level(msg,
3314 level=level, noiselevel=noiselevel)
3317 portage.util.writemsg_level(msg,
3318 level=level, noiselevel=noiselevel)
3320 f = open(log_path, 'a')
3326 class Binpkg(CompositeTask):
3328 __slots__ = ("find_blockers",
3329 "ldpath_mtimes", "logger", "opts",
3330 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3331 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3332 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3334 def _writemsg_level(self, msg, level=0, noiselevel=0):
3336 if not self.background:
3337 portage.util.writemsg_level(msg,
3338 level=level, noiselevel=noiselevel)
3340 log_path = self.settings.get("PORTAGE_LOG_FILE")
3341 if log_path is not None:
3342 f = open(log_path, 'a')
3351 settings = self.settings
3352 settings.setcpv(pkg)
3353 self._tree = "bintree"
3354 self._bintree = self.pkg.root_config.trees[self._tree]
3355 self._verify = not self.opts.pretend
3357 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3358 "portage", pkg.category, pkg.pf)
3359 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3360 pkg=pkg, settings=settings)
3361 self._image_dir = os.path.join(dir_path, "image")
3362 self._infloc = os.path.join(dir_path, "build-info")
3363 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3364 settings["EBUILD"] = self._ebuild_path
3365 debug = settings.get("PORTAGE_DEBUG") == "1"
3366 portage.doebuild_environment(self._ebuild_path, "setup",
3367 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3368 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3370 # The prefetcher has already completed or it
3371 # could be running now. If it's running now,
3372 # wait for it to complete since it holds
3373 # a lock on the file being fetched. The
3374 # portage.locks functions are only designed
3375 # to work between separate processes. Since
3376 # the lock is held by the current process,
3377 # use the scheduler and fetcher methods to
3378 # synchronize with the fetcher.
3379 prefetcher = self.prefetcher
3380 if prefetcher is None:
3382 elif not prefetcher.isAlive():
3384 elif prefetcher.poll() is None:
3386 waiting_msg = ("Fetching '%s' " + \
3387 "in the background. " + \
3388 "To view fetch progress, run `tail -f " + \
3389 "/var/log/emerge-fetch.log` in another " + \
3390 "terminal.") % prefetcher.pkg_path
3391 msg_prefix = colorize("GOOD", " * ")
3392 from textwrap import wrap
3393 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3394 for line in wrap(waiting_msg, 65))
3395 if not self.background:
3396 writemsg(waiting_msg, noiselevel=-1)
3398 self._current_task = prefetcher
3399 prefetcher.addExitListener(self._prefetch_exit)
3402 self._prefetch_exit(prefetcher)
3404 def _prefetch_exit(self, prefetcher):
3407 pkg_count = self.pkg_count
3408 if not (self.opts.pretend or self.opts.fetchonly):
3409 self._build_dir.lock()
3411 shutil.rmtree(self._build_dir.dir_path)
3412 except EnvironmentError, e:
3413 if e.errno != errno.ENOENT:
3416 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3417 fetcher = BinpkgFetcher(background=self.background,
3418 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3419 pretend=self.opts.pretend, scheduler=self.scheduler)
3420 pkg_path = fetcher.pkg_path
3421 self._pkg_path = pkg_path
3423 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3425 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3426 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3427 short_msg = "emerge: (%s of %s) %s Fetch" % \
3428 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3429 self.logger.log(msg, short_msg=short_msg)
3430 self._start_task(fetcher, self._fetcher_exit)
3433 self._fetcher_exit(fetcher)
3435 def _fetcher_exit(self, fetcher):
3437 # The fetcher only has a returncode when
3438 # --getbinpkg is enabled.
3439 if fetcher.returncode is not None:
3440 self._fetched_pkg = True
3441 if self._default_exit(fetcher) != os.EX_OK:
3442 self._unlock_builddir()
3446 if self.opts.pretend:
3447 self._current_task = None
3448 self.returncode = os.EX_OK
3456 logfile = self.settings.get("PORTAGE_LOG_FILE")
3457 verifier = BinpkgVerifier(background=self.background,
3458 logfile=logfile, pkg=self.pkg)
3459 self._start_task(verifier, self._verifier_exit)
3462 self._verifier_exit(verifier)
3464 def _verifier_exit(self, verifier):
3465 if verifier is not None and \
3466 self._default_exit(verifier) != os.EX_OK:
3467 self._unlock_builddir()
3471 logger = self.logger
3473 pkg_count = self.pkg_count
3474 pkg_path = self._pkg_path
3476 if self._fetched_pkg:
3477 self._bintree.inject(pkg.cpv, filename=pkg_path)
3479 if self.opts.fetchonly:
3480 self._current_task = None
3481 self.returncode = os.EX_OK
3485 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3486 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3487 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3488 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3489 logger.log(msg, short_msg=short_msg)
3492 settings = self.settings
3493 ebuild_phase = EbuildPhase(background=self.background,
3494 pkg=pkg, phase=phase, scheduler=self.scheduler,
3495 settings=settings, tree=self._tree)
3497 self._start_task(ebuild_phase, self._clean_exit)
3499 def _clean_exit(self, clean_phase):
3500 if self._default_exit(clean_phase) != os.EX_OK:
3501 self._unlock_builddir()
3505 dir_path = self._build_dir.dir_path
3508 shutil.rmtree(dir_path)
3509 except (IOError, OSError), e:
3510 if e.errno != errno.ENOENT:
3514 infloc = self._infloc
3516 pkg_path = self._pkg_path
3519 for mydir in (dir_path, self._image_dir, infloc):
3520 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3521 gid=portage.data.portage_gid, mode=dir_mode)
3523 # This initializes PORTAGE_LOG_FILE.
3524 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3525 self._writemsg_level(">>> Extracting info\n")
3527 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3528 check_missing_metadata = ("CATEGORY", "PF")
3529 missing_metadata = set()
3530 for k in check_missing_metadata:
3531 v = pkg_xpak.getfile(k)
3533 missing_metadata.add(k)
3535 pkg_xpak.unpackinfo(infloc)
3536 for k in missing_metadata:
3544 f = open(os.path.join(infloc, k), 'wb')
3550 # Store the md5sum in the vdb.
3551 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3553 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3557 # This gives bashrc users an opportunity to do various things
3558 # such as remove binary packages after they're installed.
3559 settings = self.settings
3560 settings.setcpv(self.pkg)
3561 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3562 settings.backup_changes("PORTAGE_BINPKG_FILE")
3565 setup_phase = EbuildPhase(background=self.background,
3566 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3567 settings=settings, tree=self._tree)
3569 setup_phase.addExitListener(self._setup_exit)
3570 self._current_task = setup_phase
3571 self.scheduler.scheduleSetup(setup_phase)
3573 def _setup_exit(self, setup_phase):
3574 if self._default_exit(setup_phase) != os.EX_OK:
3575 self._unlock_builddir()
3579 extractor = BinpkgExtractorAsync(background=self.background,
3580 image_dir=self._image_dir,
3581 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3582 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3583 self._start_task(extractor, self._extractor_exit)
3585 def _extractor_exit(self, extractor):
3586 if self._final_exit(extractor) != os.EX_OK:
3587 self._unlock_builddir()
3588 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3592 def _unlock_builddir(self):
3593 if self.opts.pretend or self.opts.fetchonly:
3595 portage.elog.elog_process(self.pkg.cpv, self.settings)
3596 self._build_dir.unlock()
3600 # This gives bashrc users an opportunity to do various things
3601 # such as remove binary packages after they're installed.
3602 settings = self.settings
3603 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3604 settings.backup_changes("PORTAGE_BINPKG_FILE")
3606 merge = EbuildMerge(find_blockers=self.find_blockers,
3607 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3608 pkg=self.pkg, pkg_count=self.pkg_count,
3609 pkg_path=self._pkg_path, scheduler=self.scheduler,
3610 settings=settings, tree=self._tree, world_atom=self.world_atom)
3613 retval = merge.execute()
3615 settings.pop("PORTAGE_BINPKG_FILE", None)
3616 self._unlock_builddir()
3619 class BinpkgFetcher(SpawnProcess):
3621 __slots__ = ("pkg", "pretend",
3622 "locked", "pkg_path", "_lock_obj")
3624 def __init__(self, **kwargs):
3625 SpawnProcess.__init__(self, **kwargs)
3627 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3635 pretend = self.pretend
3636 bintree = pkg.root_config.trees["bintree"]
3637 settings = bintree.settings
3638 use_locks = "distlocks" in settings.features
3639 pkg_path = self.pkg_path
3642 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3645 exists = os.path.exists(pkg_path)
3646 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3647 if not (pretend or resume):
3648 # Remove existing file or broken symlink.
3654 # urljoin doesn't work correctly with
3655 # unrecognized protocols like sftp
3656 if bintree._remote_has_index:
3657 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3659 rel_uri = pkg.cpv + ".tbz2"
3660 uri = bintree._remote_base_uri.rstrip("/") + \
3661 "/" + rel_uri.lstrip("/")
3663 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3664 "/" + pkg.pf + ".tbz2"
3667 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3668 self.returncode = os.EX_OK
3672 protocol = urlparse.urlparse(uri)[0]
3673 fcmd_prefix = "FETCHCOMMAND"
3675 fcmd_prefix = "RESUMECOMMAND"
3676 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3678 fcmd = settings.get(fcmd_prefix)
3681 "DISTDIR" : os.path.dirname(pkg_path),
3683 "FILE" : os.path.basename(pkg_path)
3686 fetch_env = dict(settings.iteritems())
3687 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3688 for x in shlex.split(fcmd)]
3690 if self.fd_pipes is None:
3692 fd_pipes = self.fd_pipes
3694 # Redirect all output to stdout since some fetchers like
3695 # wget pollute stderr (if portage detects a problem then it
3696 # can send it's own message to stderr).
3697 fd_pipes.setdefault(0, sys.stdin.fileno())
3698 fd_pipes.setdefault(1, sys.stdout.fileno())
3699 fd_pipes.setdefault(2, sys.stdout.fileno())
3701 self.args = fetch_args
3702 self.env = fetch_env
3703 SpawnProcess._start(self)
3705 def _set_returncode(self, wait_retval):
3706 SpawnProcess._set_returncode(self, wait_retval)
3707 if self.returncode == os.EX_OK:
3708 # If possible, update the mtime to match the remote package if
3709 # the fetcher didn't already do it automatically.
3710 bintree = self.pkg.root_config.trees["bintree"]
3711 if bintree._remote_has_index:
3712 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3713 if remote_mtime is not None:
3715 remote_mtime = long(remote_mtime)
3720 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3724 if remote_mtime != local_mtime:
3726 os.utime(self.pkg_path,
3727 (remote_mtime, remote_mtime))
3736 This raises an AlreadyLocked exception if lock() is called
3737 while a lock is already held. In order to avoid this, call
3738 unlock() or check whether the "locked" attribute is True
3739 or False before calling lock().
3741 if self._lock_obj is not None:
3742 raise self.AlreadyLocked((self._lock_obj,))
3744 self._lock_obj = portage.locks.lockfile(
3745 self.pkg_path, wantnewlockfile=1)
3748 class AlreadyLocked(portage.exception.PortageException):
3752 if self._lock_obj is None:
3754 portage.locks.unlockfile(self._lock_obj)
3755 self._lock_obj = None
3758 class BinpkgVerifier(AsynchronousTask):
3759 __slots__ = ("logfile", "pkg",)
3763 Note: Unlike a normal AsynchronousTask.start() method,
3764 this one does all work is synchronously. The returncode
3765 attribute will be set before it returns.
3769 root_config = pkg.root_config
3770 bintree = root_config.trees["bintree"]
3772 stdout_orig = sys.stdout
3773 stderr_orig = sys.stderr
3775 if self.background and self.logfile is not None:
3776 log_file = open(self.logfile, 'a')
3778 if log_file is not None:
3779 sys.stdout = log_file
3780 sys.stderr = log_file
3782 bintree.digestCheck(pkg)
3783 except portage.exception.FileNotFound:
3784 writemsg("!!! Fetching Binary failed " + \
3785 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3787 except portage.exception.DigestException, e:
3788 writemsg("\n!!! Digest verification failed:\n",
3790 writemsg("!!! %s\n" % e.value[0],
3792 writemsg("!!! Reason: %s\n" % e.value[1],
3794 writemsg("!!! Got: %s\n" % e.value[2],
3796 writemsg("!!! Expected: %s\n" % e.value[3],
3799 if rval != os.EX_OK:
3800 pkg_path = bintree.getname(pkg.cpv)
3801 head, tail = os.path.split(pkg_path)
3802 temp_filename = portage._checksum_failure_temp_file(head, tail)
3803 writemsg("File renamed to '%s'\n" % (temp_filename,),
3806 sys.stdout = stdout_orig
3807 sys.stderr = stderr_orig
3808 if log_file is not None:
3811 self.returncode = rval
3814 class BinpkgPrefetcher(CompositeTask):
3816 __slots__ = ("pkg",) + \
3817 ("pkg_path", "_bintree",)
3820 self._bintree = self.pkg.root_config.trees["bintree"]
3821 fetcher = BinpkgFetcher(background=self.background,
3822 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3823 scheduler=self.scheduler)
3824 self.pkg_path = fetcher.pkg_path
3825 self._start_task(fetcher, self._fetcher_exit)
3827 def _fetcher_exit(self, fetcher):
3829 if self._default_exit(fetcher) != os.EX_OK:
3833 verifier = BinpkgVerifier(background=self.background,
3834 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3835 self._start_task(verifier, self._verifier_exit)
3837 def _verifier_exit(self, verifier):
3838 if self._default_exit(verifier) != os.EX_OK:
3842 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3844 self._current_task = None
3845 self.returncode = os.EX_OK
3848 class BinpkgExtractorAsync(SpawnProcess):
3850 __slots__ = ("image_dir", "pkg", "pkg_path")
3852 _shell_binary = portage.const.BASH_BINARY
3855 self.args = [self._shell_binary, "-c",
3856 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3857 (portage._shell_quote(self.pkg_path),
3858 portage._shell_quote(self.image_dir))]
3860 self.env = self.pkg.root_config.settings.environ()
3861 SpawnProcess._start(self)
3863 class MergeListItem(CompositeTask):
3866 TODO: For parallel scheduling, everything here needs asynchronous
3867 execution support (start, poll, and wait methods).
3870 __slots__ = ("args_set",
3871 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3872 "find_blockers", "logger", "mtimedb", "pkg",
3873 "pkg_count", "pkg_to_replace", "prefetcher",
3874 "settings", "statusMessage", "world_atom") + \
3880 build_opts = self.build_opts
3883 # uninstall, executed by self.merge()
3884 self.returncode = os.EX_OK
3888 args_set = self.args_set
3889 find_blockers = self.find_blockers
3890 logger = self.logger
3891 mtimedb = self.mtimedb
3892 pkg_count = self.pkg_count
3893 scheduler = self.scheduler
3894 settings = self.settings
3895 world_atom = self.world_atom
3896 ldpath_mtimes = mtimedb["ldpath"]
3898 action_desc = "Emerging"
3900 if pkg.type_name == "binary":
3901 action_desc += " binary"
3903 if build_opts.fetchonly:
3904 action_desc = "Fetching"
3906 msg = "%s (%s of %s) %s" % \
3908 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3909 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3910 colorize("GOOD", pkg.cpv))
3912 portdb = pkg.root_config.trees["porttree"].dbapi
3913 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3914 if portdir_repo_name:
3915 pkg_repo_name = pkg.metadata.get("repository")
3916 if pkg_repo_name != portdir_repo_name:
3917 if not pkg_repo_name:
3918 pkg_repo_name = "unknown repo"
3919 msg += " from %s" % pkg_repo_name
3922 msg += " %s %s" % (preposition, pkg.root)
3924 if not build_opts.pretend:
3925 self.statusMessage(msg)
3926 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3927 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3929 if pkg.type_name == "ebuild":
3931 build = EbuildBuild(args_set=args_set,
3932 background=self.background,
3933 config_pool=self.config_pool,
3934 find_blockers=find_blockers,
3935 ldpath_mtimes=ldpath_mtimes, logger=logger,
3936 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3937 prefetcher=self.prefetcher, scheduler=scheduler,
3938 settings=settings, world_atom=world_atom)
3940 self._install_task = build
3941 self._start_task(build, self._default_final_exit)
3944 elif pkg.type_name == "binary":
3946 binpkg = Binpkg(background=self.background,
3947 find_blockers=find_blockers,
3948 ldpath_mtimes=ldpath_mtimes, logger=logger,
3949 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3950 prefetcher=self.prefetcher, settings=settings,
3951 scheduler=scheduler, world_atom=world_atom)
3953 self._install_task = binpkg
3954 self._start_task(binpkg, self._default_final_exit)
3958 self._install_task.poll()
3959 return self.returncode
3962 self._install_task.wait()
3963 return self.returncode
3968 build_opts = self.build_opts
3969 find_blockers = self.find_blockers
3970 logger = self.logger
3971 mtimedb = self.mtimedb
3972 pkg_count = self.pkg_count
3973 prefetcher = self.prefetcher
3974 scheduler = self.scheduler
3975 settings = self.settings
3976 world_atom = self.world_atom
3977 ldpath_mtimes = mtimedb["ldpath"]
3980 if not (build_opts.buildpkgonly or \
3981 build_opts.fetchonly or build_opts.pretend):
3983 uninstall = PackageUninstall(background=self.background,
3984 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3985 pkg=pkg, scheduler=scheduler, settings=settings)
3988 retval = uninstall.wait()
3989 if retval != os.EX_OK:
3993 if build_opts.fetchonly or \
3994 build_opts.buildpkgonly:
3995 return self.returncode
3997 retval = self._install_task.install()
4000 class PackageMerge(AsynchronousTask):
4002 TODO: Implement asynchronous merge so that the scheduler can
4003 run while a merge is executing.
4006 __slots__ = ("merge",)
4010 pkg = self.merge.pkg
4011 pkg_count = self.merge.pkg_count
4014 action_desc = "Uninstalling"
4015 preposition = "from"
4017 action_desc = "Installing"
4020 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4023 msg += " %s %s" % (preposition, pkg.root)
4025 if not self.merge.build_opts.fetchonly and \
4026 not self.merge.build_opts.pretend and \
4027 not self.merge.build_opts.buildpkgonly:
4028 self.merge.statusMessage(msg)
4030 self.returncode = self.merge.merge()
4033 class DependencyArg(object):
4034 def __init__(self, arg=None, root_config=None):
4036 self.root_config = root_config
4039 return str(self.arg)
4041 class AtomArg(DependencyArg):
4042 def __init__(self, atom=None, **kwargs):
4043 DependencyArg.__init__(self, **kwargs)
4045 if not isinstance(self.atom, portage.dep.Atom):
4046 self.atom = portage.dep.Atom(self.atom)
4047 self.set = (self.atom, )
4049 class PackageArg(DependencyArg):
4050 def __init__(self, package=None, **kwargs):
4051 DependencyArg.__init__(self, **kwargs)
4052 self.package = package
4053 self.atom = portage.dep.Atom("=" + package.cpv)
4054 self.set = (self.atom, )
4056 class SetArg(DependencyArg):
4057 def __init__(self, set=None, **kwargs):
4058 DependencyArg.__init__(self, **kwargs)
4060 self.name = self.arg[len(SETPREFIX):]
4062 class Dependency(SlotObject):
4063 __slots__ = ("atom", "blocker", "depth",
4064 "parent", "onlydeps", "priority", "root")
4065 def __init__(self, **kwargs):
4066 SlotObject.__init__(self, **kwargs)
4067 if self.priority is None:
4068 self.priority = DepPriority()
4069 if self.depth is None:
4072 class BlockerCache(portage.cache.mappings.MutableMapping):
4073 """This caches blockers of installed packages so that dep_check does not
4074 have to be done for every single installed package on every invocation of
4075 emerge. The cache is invalidated whenever it is detected that something
4076 has changed that might alter the results of dep_check() calls:
4077 1) the set of installed packages (including COUNTER) has changed
4078 2) the old-style virtuals have changed
4081 # Number of uncached packages to trigger cache update, since
4082 # it's wasteful to update it for every vdb change.
4083 _cache_threshold = 5
4085 class BlockerData(object):
4087 __slots__ = ("__weakref__", "atoms", "counter")
4089 def __init__(self, counter, atoms):
4090 self.counter = counter
4093 def __init__(self, myroot, vardb):
4095 self._virtuals = vardb.settings.getvirtuals()
4096 self._cache_filename = os.path.join(myroot,
4097 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4098 self._cache_version = "1"
4099 self._cache_data = None
4100 self._modified = set()
4105 f = open(self._cache_filename, mode='rb')
4106 mypickle = pickle.Unpickler(f)
4107 self._cache_data = mypickle.load()
4110 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4111 if isinstance(e, pickle.UnpicklingError):
4112 writemsg("!!! Error loading '%s': %s\n" % \
4113 (self._cache_filename, str(e)), noiselevel=-1)
4116 cache_valid = self._cache_data and \
4117 isinstance(self._cache_data, dict) and \
4118 self._cache_data.get("version") == self._cache_version and \
4119 isinstance(self._cache_data.get("blockers"), dict)
4121 # Validate all the atoms and counters so that
4122 # corruption is detected as soon as possible.
4123 invalid_items = set()
4124 for k, v in self._cache_data["blockers"].iteritems():
4125 if not isinstance(k, basestring):
4126 invalid_items.add(k)
4129 if portage.catpkgsplit(k) is None:
4130 invalid_items.add(k)
4132 except portage.exception.InvalidData:
4133 invalid_items.add(k)
4135 if not isinstance(v, tuple) or \
4137 invalid_items.add(k)
4140 if not isinstance(counter, (int, long)):
4141 invalid_items.add(k)
4143 if not isinstance(atoms, (list, tuple)):
4144 invalid_items.add(k)
4146 invalid_atom = False
4148 if not isinstance(atom, basestring):
4151 if atom[:1] != "!" or \
4152 not portage.isvalidatom(
4153 atom, allow_blockers=True):
4157 invalid_items.add(k)
4160 for k in invalid_items:
4161 del self._cache_data["blockers"][k]
4162 if not self._cache_data["blockers"]:
4166 self._cache_data = {"version":self._cache_version}
4167 self._cache_data["blockers"] = {}
4168 self._cache_data["virtuals"] = self._virtuals
4169 self._modified.clear()
4172 """If the current user has permission and the internal blocker cache
4173 been updated, save it to disk and mark it unmodified. This is called
4174 by emerge after it has proccessed blockers for all installed packages.
4175 Currently, the cache is only written if the user has superuser
4176 privileges (since that's required to obtain a lock), but all users
4177 have read access and benefit from faster blocker lookups (as long as
4178 the entire cache is still valid). The cache is stored as a pickled
4179 dict object with the following format:
4183 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4184 "virtuals" : vardb.settings.getvirtuals()
4187 if len(self._modified) >= self._cache_threshold and \
4190 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4191 pickle.dump(self._cache_data, f, -1)
4193 portage.util.apply_secpass_permissions(
4194 self._cache_filename, gid=portage.portage_gid, mode=0644)
4195 except (IOError, OSError), e:
4197 self._modified.clear()
4199 def __setitem__(self, cpv, blocker_data):
4201 Update the cache and mark it as modified for a future call to
4204 @param cpv: Package for which to cache blockers.
4206 @param blocker_data: An object with counter and atoms attributes.
4207 @type blocker_data: BlockerData
4209 self._cache_data["blockers"][cpv] = \
4210 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4211 self._modified.add(cpv)
4214 if self._cache_data is None:
4215 # triggered by python-trace
4217 return iter(self._cache_data["blockers"])
4219 def __delitem__(self, cpv):
4220 del self._cache_data["blockers"][cpv]
4222 def __getitem__(self, cpv):
4225 @returns: An object with counter and atoms attributes.
4227 return self.BlockerData(*self._cache_data["blockers"][cpv])
4229 class BlockerDB(object):
4231 def __init__(self, root_config):
4232 self._root_config = root_config
4233 self._vartree = root_config.trees["vartree"]
4234 self._portdb = root_config.trees["porttree"].dbapi
4236 self._dep_check_trees = None
4237 self._fake_vartree = None
4239 def _get_fake_vartree(self, acquire_lock=0):
4240 fake_vartree = self._fake_vartree
4241 if fake_vartree is None:
4242 fake_vartree = FakeVartree(self._root_config,
4243 acquire_lock=acquire_lock)
4244 self._fake_vartree = fake_vartree
4245 self._dep_check_trees = { self._vartree.root : {
4246 "porttree" : fake_vartree,
4247 "vartree" : fake_vartree,
4250 fake_vartree.sync(acquire_lock=acquire_lock)
4253 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4254 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4255 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4256 settings = self._vartree.settings
4257 stale_cache = set(blocker_cache)
4258 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4259 dep_check_trees = self._dep_check_trees
4260 vardb = fake_vartree.dbapi
4261 installed_pkgs = list(vardb)
4263 for inst_pkg in installed_pkgs:
4264 stale_cache.discard(inst_pkg.cpv)
4265 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4266 if cached_blockers is not None and \
4267 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4268 cached_blockers = None
4269 if cached_blockers is not None:
4270 blocker_atoms = cached_blockers.atoms
4272 # Use aux_get() to trigger FakeVartree global
4273 # updates on *DEPEND when appropriate.
4274 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4276 portage.dep._dep_check_strict = False
4277 success, atoms = portage.dep_check(depstr,
4278 vardb, settings, myuse=inst_pkg.use.enabled,
4279 trees=dep_check_trees, myroot=inst_pkg.root)
4281 portage.dep._dep_check_strict = True
4283 pkg_location = os.path.join(inst_pkg.root,
4284 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4285 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4286 (pkg_location, atoms), noiselevel=-1)
4289 blocker_atoms = [atom for atom in atoms \
4290 if atom.startswith("!")]
4291 blocker_atoms.sort()
4292 counter = long(inst_pkg.metadata["COUNTER"])
4293 blocker_cache[inst_pkg.cpv] = \
4294 blocker_cache.BlockerData(counter, blocker_atoms)
4295 for cpv in stale_cache:
4296 del blocker_cache[cpv]
4297 blocker_cache.flush()
4299 blocker_parents = digraph()
4301 for pkg in installed_pkgs:
4302 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4303 blocker_atom = blocker_atom.lstrip("!")
4304 blocker_atoms.append(blocker_atom)
4305 blocker_parents.add(blocker_atom, pkg)
4307 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4308 blocking_pkgs = set()
4309 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4310 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4312 # Check for blockers in the other direction.
4313 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4315 portage.dep._dep_check_strict = False
4316 success, atoms = portage.dep_check(depstr,
4317 vardb, settings, myuse=new_pkg.use.enabled,
4318 trees=dep_check_trees, myroot=new_pkg.root)
4320 portage.dep._dep_check_strict = True
4322 # We should never get this far with invalid deps.
4323 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4326 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4329 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4330 for inst_pkg in installed_pkgs:
4332 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4333 except (portage.exception.InvalidDependString, StopIteration):
4335 blocking_pkgs.add(inst_pkg)
4337 return blocking_pkgs
4339 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4341 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4342 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4343 p_type, p_root, p_key, p_status = parent_node
4345 if p_status == "nomerge":
4346 category, pf = portage.catsplit(p_key)
4347 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4348 msg.append("Portage is unable to process the dependencies of the ")
4349 msg.append("'%s' package. " % p_key)
4350 msg.append("In order to correct this problem, the package ")
4351 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4352 msg.append("As a temporary workaround, the --nodeps option can ")
4353 msg.append("be used to ignore all dependencies. For reference, ")
4354 msg.append("the problematic dependencies can be found in the ")
4355 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4357 msg.append("This package can not be installed. ")
4358 msg.append("Please notify the '%s' package maintainer " % p_key)
4359 msg.append("about this problem.")
4361 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4362 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4364 class PackageVirtualDbapi(portage.dbapi):
4366 A dbapi-like interface class that represents the state of the installed
4367 package database as new packages are installed, replacing any packages
4368 that previously existed in the same slot. The main difference between
4369 this class and fakedbapi is that this one uses Package instances
4370 internally (passed in via cpv_inject() and cpv_remove() calls).
4372 def __init__(self, settings):
4373 portage.dbapi.__init__(self)
4374 self.settings = settings
4375 self._match_cache = {}
4381 Remove all packages.
4385 self._cp_map.clear()
4386 self._cpv_map.clear()
4389 obj = PackageVirtualDbapi(self.settings)
4390 obj._match_cache = self._match_cache.copy()
4391 obj._cp_map = self._cp_map.copy()
4392 for k, v in obj._cp_map.iteritems():
4393 obj._cp_map[k] = v[:]
4394 obj._cpv_map = self._cpv_map.copy()
4398 return self._cpv_map.itervalues()
4400 def __contains__(self, item):
4401 existing = self._cpv_map.get(item.cpv)
4402 if existing is not None and \
4407 def get(self, item, default=None):
4408 cpv = getattr(item, "cpv", None)
4412 type_name, root, cpv, operation = item
4414 existing = self._cpv_map.get(cpv)
4415 if existing is not None and \
4420 def match_pkgs(self, atom):
4421 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4423 def _clear_cache(self):
4424 if self._categories is not None:
4425 self._categories = None
4426 if self._match_cache:
4427 self._match_cache = {}
4429 def match(self, origdep, use_cache=1):
4430 result = self._match_cache.get(origdep)
4431 if result is not None:
4433 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4434 self._match_cache[origdep] = result
4437 def cpv_exists(self, cpv):
4438 return cpv in self._cpv_map
4440 def cp_list(self, mycp, use_cache=1):
4441 cachelist = self._match_cache.get(mycp)
4442 # cp_list() doesn't expand old-style virtuals
4443 if cachelist and cachelist[0].startswith(mycp):
4445 cpv_list = self._cp_map.get(mycp)
4446 if cpv_list is None:
4449 cpv_list = [pkg.cpv for pkg in cpv_list]
4450 self._cpv_sort_ascending(cpv_list)
4451 if not (not cpv_list and mycp.startswith("virtual/")):
4452 self._match_cache[mycp] = cpv_list
4456 return list(self._cp_map)
4459 return list(self._cpv_map)
4461 def cpv_inject(self, pkg):
4462 cp_list = self._cp_map.get(pkg.cp)
4465 self._cp_map[pkg.cp] = cp_list
4466 e_pkg = self._cpv_map.get(pkg.cpv)
4467 if e_pkg is not None:
4470 self.cpv_remove(e_pkg)
4471 for e_pkg in cp_list:
4472 if e_pkg.slot_atom == pkg.slot_atom:
4475 self.cpv_remove(e_pkg)
4478 self._cpv_map[pkg.cpv] = pkg
4481 def cpv_remove(self, pkg):
4482 old_pkg = self._cpv_map.get(pkg.cpv)
4485 self._cp_map[pkg.cp].remove(pkg)
4486 del self._cpv_map[pkg.cpv]
4489 def aux_get(self, cpv, wants):
4490 metadata = self._cpv_map[cpv].metadata
4491 return [metadata.get(x, "") for x in wants]
4493 def aux_update(self, cpv, values):
4494 self._cpv_map[cpv].metadata.update(values)
4497 class depgraph(object):
4499 pkg_tree_map = RootConfig.pkg_tree_map
4501 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4503 def __init__(self, settings, trees, myopts, myparams, spinner):
4504 self.settings = settings
4505 self.target_root = settings["ROOT"]
4506 self.myopts = myopts
4507 self.myparams = myparams
4509 if settings.get("PORTAGE_DEBUG", "") == "1":
4511 self.spinner = spinner
4512 self._running_root = trees["/"]["root_config"]
4513 self._opts_no_restart = Scheduler._opts_no_restart
4514 self.pkgsettings = {}
4515 # Maps slot atom to package for each Package added to the graph.
4516 self._slot_pkg_map = {}
4517 # Maps nodes to the reasons they were selected for reinstallation.
4518 self._reinstall_nodes = {}
4521 self._trees_orig = trees
4523 # Contains a filtered view of preferred packages that are selected
4524 # from available repositories.
4525 self._filtered_trees = {}
4526 # Contains installed packages and new packages that have been added
4528 self._graph_trees = {}
4529 # All Package instances
4530 self._pkg_cache = {}
4531 for myroot in trees:
4532 self.trees[myroot] = {}
4533 # Create a RootConfig instance that references
4534 # the FakeVartree instead of the real one.
4535 self.roots[myroot] = RootConfig(
4536 trees[myroot]["vartree"].settings,
4538 trees[myroot]["root_config"].setconfig)
4539 for tree in ("porttree", "bintree"):
4540 self.trees[myroot][tree] = trees[myroot][tree]
4541 self.trees[myroot]["vartree"] = \
4542 FakeVartree(trees[myroot]["root_config"],
4543 pkg_cache=self._pkg_cache)
4544 self.pkgsettings[myroot] = portage.config(
4545 clone=self.trees[myroot]["vartree"].settings)
4546 self._slot_pkg_map[myroot] = {}
4547 vardb = self.trees[myroot]["vartree"].dbapi
4548 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4549 "--buildpkgonly" not in self.myopts
4550 # This fakedbapi instance will model the state that the vdb will
4551 # have after new packages have been installed.
4552 fakedb = PackageVirtualDbapi(vardb.settings)
4553 if preload_installed_pkgs:
4555 self.spinner.update()
4556 # This triggers metadata updates via FakeVartree.
4557 vardb.aux_get(pkg.cpv, [])
4558 fakedb.cpv_inject(pkg)
4560 # Now that the vardb state is cached in our FakeVartree,
4561 # we won't be needing the real vartree cache for awhile.
4562 # To make some room on the heap, clear the vardbapi
4564 trees[myroot]["vartree"].dbapi._clear_cache()
4567 self.mydbapi[myroot] = fakedb
4570 graph_tree.dbapi = fakedb
4571 self._graph_trees[myroot] = {}
4572 self._filtered_trees[myroot] = {}
4573 # Substitute the graph tree for the vartree in dep_check() since we
4574 # want atom selections to be consistent with package selections
4575 # have already been made.
4576 self._graph_trees[myroot]["porttree"] = graph_tree
4577 self._graph_trees[myroot]["vartree"] = graph_tree
4578 def filtered_tree():
4580 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4581 self._filtered_trees[myroot]["porttree"] = filtered_tree
4583 # Passing in graph_tree as the vartree here could lead to better
4584 # atom selections in some cases by causing atoms for packages that
4585 # have been added to the graph to be preferred over other choices.
4586 # However, it can trigger atom selections that result in
4587 # unresolvable direct circular dependencies. For example, this
4588 # happens with gwydion-dylan which depends on either itself or
4589 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4590 # gwydion-dylan-bin needs to be selected in order to avoid a
4591 # an unresolvable direct circular dependency.
4593 # To solve the problem described above, pass in "graph_db" so that
4594 # packages that have been added to the graph are distinguishable
4595 # from other available packages and installed packages. Also, pass
4596 # the parent package into self._select_atoms() calls so that
4597 # unresolvable direct circular dependencies can be detected and
4598 # avoided when possible.
4599 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4600 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4603 portdb = self.trees[myroot]["porttree"].dbapi
4604 bindb = self.trees[myroot]["bintree"].dbapi
4605 vardb = self.trees[myroot]["vartree"].dbapi
4606 # (db, pkg_type, built, installed, db_keys)
4607 if "--usepkgonly" not in self.myopts:
4608 db_keys = list(portdb._aux_cache_keys)
4609 dbs.append((portdb, "ebuild", False, False, db_keys))
4610 if "--usepkg" in self.myopts:
4611 db_keys = list(bindb._aux_cache_keys)
4612 dbs.append((bindb, "binary", True, False, db_keys))
4613 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4614 dbs.append((vardb, "installed", True, True, db_keys))
4615 self._filtered_trees[myroot]["dbs"] = dbs
4616 if "--usepkg" in self.myopts:
4617 self.trees[myroot]["bintree"].populate(
4618 "--getbinpkg" in self.myopts,
4619 "--getbinpkgonly" in self.myopts)
4622 self.digraph=portage.digraph()
4623 # contains all sets added to the graph
4625 # contains atoms given as arguments
4626 self._sets["args"] = InternalPackageSet()
4627 # contains all atoms from all sets added to the graph, including
4628 # atoms given as arguments
4629 self._set_atoms = InternalPackageSet()
4630 self._atom_arg_map = {}
4631 # contains all nodes pulled in by self._set_atoms
4632 self._set_nodes = set()
4633 # Contains only Blocker -> Uninstall edges
4634 self._blocker_uninstalls = digraph()
4635 # Contains only Package -> Blocker edges
4636 self._blocker_parents = digraph()
4637 # Contains only irrelevant Package -> Blocker edges
4638 self._irrelevant_blockers = digraph()
4639 # Contains only unsolvable Package -> Blocker edges
4640 self._unsolvable_blockers = digraph()
4641 # Contains all Blocker -> Blocked Package edges
4642 self._blocked_pkgs = digraph()
4643 # Contains world packages that have been protected from
4644 # uninstallation but may not have been added to the graph
4645 # if the graph is not complete yet.
4646 self._blocked_world_pkgs = {}
4647 self._slot_collision_info = {}
4648 # Slot collision nodes are not allowed to block other packages since
4649 # blocker validation is only able to account for one package per slot.
4650 self._slot_collision_nodes = set()
4651 self._parent_atoms = {}
4652 self._slot_conflict_parent_atoms = set()
4653 self._serialized_tasks_cache = None
4654 self._scheduler_graph = None
4655 self._displayed_list = None
4656 self._pprovided_args = []
4657 self._missing_args = []
4658 self._masked_installed = set()
4659 self._unsatisfied_deps_for_display = []
4660 self._unsatisfied_blockers_for_display = None
4661 self._circular_deps_for_display = None
4662 self._dep_stack = []
4663 self._unsatisfied_deps = []
4664 self._initially_unsatisfied_deps = []
4665 self._ignored_deps = []
4666 self._required_set_names = set(["system", "world"])
4667 self._select_atoms = self._select_atoms_highest_available
4668 self._select_package = self._select_pkg_highest_available
4669 self._highest_pkg_cache = {}
4671 def _show_slot_collision_notice(self):
4672 """Show an informational message advising the user to mask one of the
4673 the packages. In some cases it may be possible to resolve this
4674 automatically, but support for backtracking (removal nodes that have
4675 already been selected) will be required in order to handle all possible
4679 if not self._slot_collision_info:
4682 self._show_merge_list()
4685 msg.append("\n!!! Multiple package instances within a single " + \
4686 "package slot have been pulled\n")
4687 msg.append("!!! into the dependency graph, resulting" + \
4688 " in a slot conflict:\n\n")
4690 # Max number of parents shown, to avoid flooding the display.
4692 explanation_columns = 70
4694 for (slot_atom, root), slot_nodes \
4695 in self._slot_collision_info.iteritems():
4696 msg.append(str(slot_atom))
4699 for node in slot_nodes:
4701 msg.append(str(node))
4702 parent_atoms = self._parent_atoms.get(node)
4705 # Prefer conflict atoms over others.
4706 for parent_atom in parent_atoms:
4707 if len(pruned_list) >= max_parents:
4709 if parent_atom in self._slot_conflict_parent_atoms:
4710 pruned_list.add(parent_atom)
4712 # If this package was pulled in by conflict atoms then
4713 # show those alone since those are the most interesting.
4715 # When generating the pruned list, prefer instances
4716 # of DependencyArg over instances of Package.
4717 for parent_atom in parent_atoms:
4718 if len(pruned_list) >= max_parents:
4720 parent, atom = parent_atom
4721 if isinstance(parent, DependencyArg):
4722 pruned_list.add(parent_atom)
4723 # Prefer Packages instances that themselves have been
4724 # pulled into collision slots.
4725 for parent_atom in parent_atoms:
4726 if len(pruned_list) >= max_parents:
4728 parent, atom = parent_atom
4729 if isinstance(parent, Package) and \
4730 (parent.slot_atom, parent.root) \
4731 in self._slot_collision_info:
4732 pruned_list.add(parent_atom)
4733 for parent_atom in parent_atoms:
4734 if len(pruned_list) >= max_parents:
4736 pruned_list.add(parent_atom)
4737 omitted_parents = len(parent_atoms) - len(pruned_list)
4738 parent_atoms = pruned_list
4739 msg.append(" pulled in by\n")
4740 for parent_atom in parent_atoms:
4741 parent, atom = parent_atom
4742 msg.append(2*indent)
4743 if isinstance(parent,
4744 (PackageArg, AtomArg)):
4745 # For PackageArg and AtomArg types, it's
4746 # redundant to display the atom attribute.
4747 msg.append(str(parent))
4749 # Display the specific atom from SetArg or
4751 msg.append("%s required by %s" % (atom, parent))
4754 msg.append(2*indent)
4755 msg.append("(and %d more)\n" % omitted_parents)
4757 msg.append(" (no parents)\n")
4759 explanation = self._slot_conflict_explanation(slot_nodes)
4762 msg.append(indent + "Explanation:\n\n")
4763 for line in textwrap.wrap(explanation, explanation_columns):
4764 msg.append(2*indent + line + "\n")
4767 sys.stderr.write("".join(msg))
4770 explanations_for_all = explanations == len(self._slot_collision_info)
4772 if explanations_for_all or "--quiet" in self.myopts:
4776 msg.append("It may be possible to solve this problem ")
4777 msg.append("by using package.mask to prevent one of ")
4778 msg.append("those packages from being selected. ")
4779 msg.append("However, it is also possible that conflicting ")
4780 msg.append("dependencies exist such that they are impossible to ")
4781 msg.append("satisfy simultaneously. If such a conflict exists in ")
4782 msg.append("the dependencies of two different packages, then those ")
4783 msg.append("packages can not be installed simultaneously.")
4785 from formatter import AbstractFormatter, DumbWriter
4786 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4788 f.add_flowing_data(x)
4792 msg.append("For more information, see MASKED PACKAGES ")
4793 msg.append("section in the emerge man page or refer ")
4794 msg.append("to the Gentoo Handbook.")
4796 f.add_flowing_data(x)
4800 def _slot_conflict_explanation(self, slot_nodes):
4802 When a slot conflict occurs due to USE deps, there are a few
4803 different cases to consider:
4805 1) New USE are correctly set but --newuse wasn't requested so an
4806 installed package with incorrect USE happened to get pulled
4807 into graph before the new one.
4809 2) New USE are incorrectly set but an installed package has correct
4810 USE so it got pulled into the graph, and a new instance also got
4811 pulled in due to --newuse or an upgrade.
4813 3) Multiple USE deps exist that can't be satisfied simultaneously,
4814 and multiple package instances got pulled into the same slot to
4815 satisfy the conflicting deps.
4817 Currently, explanations and suggested courses of action are generated
4818 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4821 if len(slot_nodes) != 2:
4822 # Suggestions are only implemented for
4823 # conflicts between two packages.
4826 all_conflict_atoms = self._slot_conflict_parent_atoms
4828 matched_atoms = None
4829 unmatched_node = None
4830 for node in slot_nodes:
4831 parent_atoms = self._parent_atoms.get(node)
4832 if not parent_atoms:
4833 # Normally, there are always parent atoms. If there are
4834 # none then something unexpected is happening and there's
4835 # currently no suggestion for this case.
4837 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4838 for parent_atom in conflict_atoms:
4839 parent, atom = parent_atom
4841 # Suggestions are currently only implemented for cases
4842 # in which all conflict atoms have USE deps.
4845 if matched_node is not None:
4846 # If conflict atoms match multiple nodes
4847 # then there's no suggestion.
4850 matched_atoms = conflict_atoms
4852 if unmatched_node is not None:
4853 # Neither node is matched by conflict atoms, and
4854 # there is no suggestion for this case.
4856 unmatched_node = node
4858 if matched_node is None or unmatched_node is None:
4859 # This shouldn't happen.
4862 if unmatched_node.installed and not matched_node.installed:
4863 return "New USE are correctly set, but --newuse wasn't" + \
4864 " requested, so an installed package with incorrect USE " + \
4865 "happened to get pulled into the dependency graph. " + \
4866 "In order to solve " + \
4867 "this, either specify the --newuse option or explicitly " + \
4868 " reinstall '%s'." % matched_node.slot_atom
4870 if matched_node.installed and not unmatched_node.installed:
4871 atoms = sorted(set(atom for parent, atom in matched_atoms))
4872 explanation = ("New USE for '%s' are incorrectly set. " + \
4873 "In order to solve this, adjust USE to satisfy '%s'") % \
4874 (matched_node.slot_atom, atoms[0])
4876 for atom in atoms[1:-1]:
4877 explanation += ", '%s'" % (atom,)
4880 explanation += " and '%s'" % (atoms[-1],)
4886 def _process_slot_conflicts(self):
4888 Process slot conflict data to identify specific atoms which
4889 lead to conflict. These atoms only match a subset of the
4890 packages that have been pulled into a given slot.
4892 for (slot_atom, root), slot_nodes \
4893 in self._slot_collision_info.iteritems():
4895 all_parent_atoms = set()
4896 for pkg in slot_nodes:
4897 parent_atoms = self._parent_atoms.get(pkg)
4898 if not parent_atoms:
4900 all_parent_atoms.update(parent_atoms)
4902 for pkg in slot_nodes:
4903 parent_atoms = self._parent_atoms.get(pkg)
4904 if parent_atoms is None:
4905 parent_atoms = set()
4906 self._parent_atoms[pkg] = parent_atoms
4907 for parent_atom in all_parent_atoms:
4908 if parent_atom in parent_atoms:
4910 # Use package set for matching since it will match via
4911 # PROVIDE when necessary, while match_from_list does not.
4912 parent, atom = parent_atom
4913 atom_set = InternalPackageSet(
4914 initial_atoms=(atom,))
4915 if atom_set.findAtomForPackage(pkg):
4916 parent_atoms.add(parent_atom)
4918 self._slot_conflict_parent_atoms.add(parent_atom)
4920 def _reinstall_for_flags(self, forced_flags,
4921 orig_use, orig_iuse, cur_use, cur_iuse):
4922 """Return a set of flags that trigger reinstallation, or None if there
4923 are no such flags."""
4924 if "--newuse" in self.myopts:
4925 flags = set(orig_iuse.symmetric_difference(
4926 cur_iuse).difference(forced_flags))
4927 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4928 cur_iuse.intersection(cur_use)))
4931 elif "changed-use" == self.myopts.get("--reinstall"):
4932 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4933 cur_iuse.intersection(cur_use))
4938 def _create_graph(self, allow_unsatisfied=False):
4939 dep_stack = self._dep_stack
4941 self.spinner.update()
4942 dep = dep_stack.pop()
4943 if isinstance(dep, Package):
4944 if not self._add_pkg_deps(dep,
4945 allow_unsatisfied=allow_unsatisfied):
4948 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4952 def _add_dep(self, dep, allow_unsatisfied=False):
4953 debug = "--debug" in self.myopts
4954 buildpkgonly = "--buildpkgonly" in self.myopts
4955 nodeps = "--nodeps" in self.myopts
4956 empty = "empty" in self.myparams
4957 deep = "deep" in self.myparams
4958 update = "--update" in self.myopts and dep.depth <= 1
4960 if not buildpkgonly and \
4962 dep.parent not in self._slot_collision_nodes:
4963 if dep.parent.onlydeps:
4964 # It's safe to ignore blockers if the
4965 # parent is an --onlydeps node.
4967 # The blocker applies to the root where
4968 # the parent is or will be installed.
4969 blocker = Blocker(atom=dep.atom,
4970 eapi=dep.parent.metadata["EAPI"],
4971 root=dep.parent.root)
4972 self._blocker_parents.add(blocker, dep.parent)
4974 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4975 onlydeps=dep.onlydeps)
4977 if dep.priority.optional:
4978 # This could be an unecessary build-time dep
4979 # pulled in by --with-bdeps=y.
4981 if allow_unsatisfied:
4982 self._unsatisfied_deps.append(dep)
4984 self._unsatisfied_deps_for_display.append(
4985 ((dep.root, dep.atom), {"myparent":dep.parent}))
4987 # In some cases, dep_check will return deps that shouldn't
4988 # be proccessed any further, so they are identified and
4989 # discarded here. Try to discard as few as possible since
4990 # discarded dependencies reduce the amount of information
4991 # available for optimization of merge order.
4992 if dep.priority.satisfied and \
4993 not dep_pkg.installed and \
4994 not (existing_node or empty or deep or update):
4996 if dep.root == self.target_root:
4998 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4999 except StopIteration:
5001 except portage.exception.InvalidDependString:
5002 if not dep_pkg.installed:
5003 # This shouldn't happen since the package
5004 # should have been masked.
5007 self._ignored_deps.append(dep)
5010 if not self._add_pkg(dep_pkg, dep):
5014 def _add_pkg(self, pkg, dep):
5021 myparent = dep.parent
5022 priority = dep.priority
5024 if priority is None:
5025 priority = DepPriority()
5027 Fills the digraph with nodes comprised of packages to merge.
5028 mybigkey is the package spec of the package to merge.
5029 myparent is the package depending on mybigkey ( or None )
5030 addme = Should we add this package to the digraph or are we just looking at it's deps?
5031 Think --onlydeps, we need to ignore packages in that case.
5034 #IUSE-aware emerge -> USE DEP aware depgraph
5035 #"no downgrade" emerge
5037 # Ensure that the dependencies of the same package
5038 # are never processed more than once.
5039 previously_added = pkg in self.digraph
5041 # select the correct /var database that we'll be checking against
5042 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5043 pkgsettings = self.pkgsettings[pkg.root]
5048 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5049 except portage.exception.InvalidDependString, e:
5050 if not pkg.installed:
5051 show_invalid_depstring_notice(
5052 pkg, pkg.metadata["PROVIDE"], str(e))
5056 if not pkg.onlydeps:
5057 if not pkg.installed and \
5058 "empty" not in self.myparams and \
5059 vardbapi.match(pkg.slot_atom):
5060 # Increase the priority of dependencies on packages that
5061 # are being rebuilt. This optimizes merge order so that
5062 # dependencies are rebuilt/updated as soon as possible,
5063 # which is needed especially when emerge is called by
5064 # revdep-rebuild since dependencies may be affected by ABI
5065 # breakage that has rendered them useless. Don't adjust
5066 # priority here when in "empty" mode since all packages
5067 # are being merged in that case.
5068 priority.rebuild = True
5070 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5071 slot_collision = False
5073 existing_node_matches = pkg.cpv == existing_node.cpv
5074 if existing_node_matches and \
5075 pkg != existing_node and \
5076 dep.atom is not None:
5077 # Use package set for matching since it will match via
5078 # PROVIDE when necessary, while match_from_list does not.
5079 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5080 if not atom_set.findAtomForPackage(existing_node):
5081 existing_node_matches = False
5082 if existing_node_matches:
5083 # The existing node can be reused.
5085 for parent_atom in arg_atoms:
5086 parent, atom = parent_atom
5087 self.digraph.add(existing_node, parent,
5089 self._add_parent_atom(existing_node, parent_atom)
5090 # If a direct circular dependency is not an unsatisfied
5091 # buildtime dependency then drop it here since otherwise
5092 # it can skew the merge order calculation in an unwanted
5094 if existing_node != myparent or \
5095 (priority.buildtime and not priority.satisfied):
5096 self.digraph.addnode(existing_node, myparent,
5098 if dep.atom is not None and dep.parent is not None:
5099 self._add_parent_atom(existing_node,
5100 (dep.parent, dep.atom))
5104 # A slot collision has occurred. Sometimes this coincides
5105 # with unresolvable blockers, so the slot collision will be
5106 # shown later if there are no unresolvable blockers.
5107 self._add_slot_conflict(pkg)
5108 slot_collision = True
5111 # Now add this node to the graph so that self.display()
5112 # can show use flags and --tree portage.output. This node is
5113 # only being partially added to the graph. It must not be
5114 # allowed to interfere with the other nodes that have been
5115 # added. Do not overwrite data for existing nodes in
5116 # self.mydbapi since that data will be used for blocker
5118 # Even though the graph is now invalid, continue to process
5119 # dependencies so that things like --fetchonly can still
5120 # function despite collisions.
5122 elif not previously_added:
5123 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5124 self.mydbapi[pkg.root].cpv_inject(pkg)
5125 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5127 if not pkg.installed:
5128 # Allow this package to satisfy old-style virtuals in case it
5129 # doesn't already. Any pre-existing providers will be preferred
5132 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5133 # For consistency, also update the global virtuals.
5134 settings = self.roots[pkg.root].settings
5136 settings.setinst(pkg.cpv, pkg.metadata)
5138 except portage.exception.InvalidDependString, e:
5139 show_invalid_depstring_notice(
5140 pkg, pkg.metadata["PROVIDE"], str(e))
5145 self._set_nodes.add(pkg)
5147 # Do this even when addme is False (--onlydeps) so that the
5148 # parent/child relationship is always known in case
5149 # self._show_slot_collision_notice() needs to be called later.
5150 self.digraph.add(pkg, myparent, priority=priority)
5151 if dep.atom is not None and dep.parent is not None:
5152 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5155 for parent_atom in arg_atoms:
5156 parent, atom = parent_atom
5157 self.digraph.add(pkg, parent, priority=priority)
5158 self._add_parent_atom(pkg, parent_atom)
5160 """ This section determines whether we go deeper into dependencies or not.
5161 We want to go deeper on a few occasions:
5162 Installing package A, we need to make sure package A's deps are met.
5163 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5164 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5166 dep_stack = self._dep_stack
5167 if "recurse" not in self.myparams:
5169 elif pkg.installed and \
5170 "deep" not in self.myparams:
5171 dep_stack = self._ignored_deps
5173 self.spinner.update()
5178 if not previously_added:
5179 dep_stack.append(pkg)
5182 def _add_parent_atom(self, pkg, parent_atom):
5183 parent_atoms = self._parent_atoms.get(pkg)
5184 if parent_atoms is None:
5185 parent_atoms = set()
5186 self._parent_atoms[pkg] = parent_atoms
5187 parent_atoms.add(parent_atom)
5189 def _add_slot_conflict(self, pkg):
5190 self._slot_collision_nodes.add(pkg)
5191 slot_key = (pkg.slot_atom, pkg.root)
5192 slot_nodes = self._slot_collision_info.get(slot_key)
5193 if slot_nodes is None:
5195 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5196 self._slot_collision_info[slot_key] = slot_nodes
5199 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5201 mytype = pkg.type_name
5204 metadata = pkg.metadata
5205 myuse = pkg.use.enabled
5207 depth = pkg.depth + 1
5208 removal_action = "remove" in self.myparams
5211 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5213 edepend[k] = metadata[k]
5215 if not pkg.built and \
5216 "--buildpkgonly" in self.myopts and \
5217 "deep" not in self.myparams and \
5218 "empty" not in self.myparams:
5219 edepend["RDEPEND"] = ""
5220 edepend["PDEPEND"] = ""
5221 bdeps_optional = False
5223 if pkg.built and not removal_action:
5224 if self.myopts.get("--with-bdeps", "n") == "y":
5225 # Pull in build time deps as requested, but marked them as
5226 # "optional" since they are not strictly required. This allows
5227 # more freedom in the merge order calculation for solving
5228 # circular dependencies. Don't convert to PDEPEND since that
5229 # could make --with-bdeps=y less effective if it is used to
5230 # adjust merge order to prevent built_with_use() calls from
5232 bdeps_optional = True
5234 # built packages do not have build time dependencies.
5235 edepend["DEPEND"] = ""
5237 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5238 edepend["DEPEND"] = ""
5241 ("/", edepend["DEPEND"],
5242 self._priority(buildtime=(not bdeps_optional),
5243 optional=bdeps_optional)),
5244 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5245 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5248 debug = "--debug" in self.myopts
5249 strict = mytype != "installed"
5251 for dep_root, dep_string, dep_priority in deps:
5256 print "Parent: ", jbigkey
5257 print "Depstring:", dep_string
5258 print "Priority:", dep_priority
5259 vardb = self.roots[dep_root].trees["vartree"].dbapi
5261 selected_atoms = self._select_atoms(dep_root,
5262 dep_string, myuse=myuse, parent=pkg, strict=strict,
5263 priority=dep_priority)
5264 except portage.exception.InvalidDependString, e:
5265 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5268 print "Candidates:", selected_atoms
5270 for atom in selected_atoms:
5273 atom = portage.dep.Atom(atom)
5275 mypriority = dep_priority.copy()
5276 if not atom.blocker and vardb.match(atom):
5277 mypriority.satisfied = True
5279 if not self._add_dep(Dependency(atom=atom,
5280 blocker=atom.blocker, depth=depth, parent=pkg,
5281 priority=mypriority, root=dep_root),
5282 allow_unsatisfied=allow_unsatisfied):
5285 except portage.exception.InvalidAtom, e:
5286 show_invalid_depstring_notice(
5287 pkg, dep_string, str(e))
5289 if not pkg.installed:
5293 print "Exiting...", jbigkey
5294 except portage.exception.AmbiguousPackageName, e:
5296 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5297 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5299 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5300 portage.writemsg("\n", noiselevel=-1)
5301 if mytype == "binary":
5303 "!!! This binary package cannot be installed: '%s'\n" % \
5304 mykey, noiselevel=-1)
5305 elif mytype == "ebuild":
5306 portdb = self.roots[myroot].trees["porttree"].dbapi
5307 myebuild, mylocation = portdb.findname2(mykey)
5308 portage.writemsg("!!! This ebuild cannot be installed: " + \
5309 "'%s'\n" % myebuild, noiselevel=-1)
5310 portage.writemsg("!!! Please notify the package maintainer " + \
5311 "that atoms must be fully-qualified.\n", noiselevel=-1)
5315 def _priority(self, **kwargs):
5316 if "remove" in self.myparams:
5317 priority_constructor = UnmergeDepPriority
5319 priority_constructor = DepPriority
5320 return priority_constructor(**kwargs)
5322 def _dep_expand(self, root_config, atom_without_category):
5324 @param root_config: a root config instance
5325 @type root_config: RootConfig
5326 @param atom_without_category: an atom without a category component
5327 @type atom_without_category: String
5329 @returns: a list of atoms containing categories (possibly empty)
5331 null_cp = portage.dep_getkey(insert_category_into_atom(
5332 atom_without_category, "null"))
5333 cat, atom_pn = portage.catsplit(null_cp)
5335 dbs = self._filtered_trees[root_config.root]["dbs"]
5337 for db, pkg_type, built, installed, db_keys in dbs:
5338 for cat in db.categories:
5339 if db.cp_list("%s/%s" % (cat, atom_pn)):
5343 for cat in categories:
5344 deps.append(insert_category_into_atom(
5345 atom_without_category, cat))
5348 def _have_new_virt(self, root, atom_cp):
5350 for db, pkg_type, built, installed, db_keys in \
5351 self._filtered_trees[root]["dbs"]:
5352 if db.cp_list(atom_cp):
5357 def _iter_atoms_for_pkg(self, pkg):
5358 # TODO: add multiple $ROOT support
5359 if pkg.root != self.target_root:
5361 atom_arg_map = self._atom_arg_map
5362 root_config = self.roots[pkg.root]
5363 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5364 atom_cp = portage.dep_getkey(atom)
5365 if atom_cp != pkg.cp and \
5366 self._have_new_virt(pkg.root, atom_cp):
5368 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5369 visible_pkgs.reverse() # descending order
5371 for visible_pkg in visible_pkgs:
5372 if visible_pkg.cp != atom_cp:
5374 if pkg >= visible_pkg:
5375 # This is descending order, and we're not
5376 # interested in any versions <= pkg given.
5378 if pkg.slot_atom != visible_pkg.slot_atom:
5379 higher_slot = visible_pkg
5381 if higher_slot is not None:
5383 for arg in atom_arg_map[(atom, pkg.root)]:
5384 if isinstance(arg, PackageArg) and \
5389 def select_files(self, myfiles):
5390 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5391 appropriate depgraph and return a favorite list."""
5392 debug = "--debug" in self.myopts
5393 root_config = self.roots[self.target_root]
5394 sets = root_config.sets
5395 getSetAtoms = root_config.setconfig.getSetAtoms
5397 myroot = self.target_root
5398 dbs = self._filtered_trees[myroot]["dbs"]
5399 vardb = self.trees[myroot]["vartree"].dbapi
5400 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5401 portdb = self.trees[myroot]["porttree"].dbapi
5402 bindb = self.trees[myroot]["bintree"].dbapi
5403 pkgsettings = self.pkgsettings[myroot]
5405 onlydeps = "--onlydeps" in self.myopts
5408 ext = os.path.splitext(x)[1]
5410 if not os.path.exists(x):
5412 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5413 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5414 elif os.path.exists(
5415 os.path.join(pkgsettings["PKGDIR"], x)):
5416 x = os.path.join(pkgsettings["PKGDIR"], x)
5418 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5419 print "!!! Please ensure the tbz2 exists as specified.\n"
5420 return 0, myfavorites
5421 mytbz2=portage.xpak.tbz2(x)
5422 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5423 if os.path.realpath(x) != \
5424 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5425 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5426 return 0, myfavorites
5427 db_keys = list(bindb._aux_cache_keys)
5428 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5429 pkg = Package(type_name="binary", root_config=root_config,
5430 cpv=mykey, built=True, metadata=metadata,
5432 self._pkg_cache[pkg] = pkg
5433 args.append(PackageArg(arg=x, package=pkg,
5434 root_config=root_config))
5435 elif ext==".ebuild":
5436 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5437 pkgdir = os.path.dirname(ebuild_path)
5438 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5439 cp = pkgdir[len(tree_root)+1:]
5440 e = portage.exception.PackageNotFound(
5441 ("%s is not in a valid portage tree " + \
5442 "hierarchy or does not exist") % x)
5443 if not portage.isvalidatom(cp):
5445 cat = portage.catsplit(cp)[0]
5446 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5447 if not portage.isvalidatom("="+mykey):
5449 ebuild_path = portdb.findname(mykey)
5451 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5452 cp, os.path.basename(ebuild_path)):
5453 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5454 return 0, myfavorites
5455 if mykey not in portdb.xmatch(
5456 "match-visible", portage.dep_getkey(mykey)):
5457 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5458 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5459 print colorize("BAD", "*** page for details.")
5460 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5463 raise portage.exception.PackageNotFound(
5464 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5465 db_keys = list(portdb._aux_cache_keys)
5466 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5467 pkg = Package(type_name="ebuild", root_config=root_config,
5468 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5469 pkgsettings.setcpv(pkg)
5470 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5471 self._pkg_cache[pkg] = pkg
5472 args.append(PackageArg(arg=x, package=pkg,
5473 root_config=root_config))
5474 elif x.startswith(os.path.sep):
5475 if not x.startswith(myroot):
5476 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5477 " $ROOT.\n") % x, noiselevel=-1)
5479 # Queue these up since it's most efficient to handle
5480 # multiple files in a single iter_owners() call.
5481 lookup_owners.append(x)
5483 if x in ("system", "world"):
5485 if x.startswith(SETPREFIX):
5486 s = x[len(SETPREFIX):]
5488 raise portage.exception.PackageSetNotFound(s)
5491 # Recursively expand sets so that containment tests in
5492 # self._get_parent_sets() properly match atoms in nested
5493 # sets (like if world contains system).
5494 expanded_set = InternalPackageSet(
5495 initial_atoms=getSetAtoms(s))
5496 self._sets[s] = expanded_set
5497 args.append(SetArg(arg=x, set=expanded_set,
5498 root_config=root_config))
5500 if not is_valid_package_atom(x):
5501 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5503 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5504 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5506 # Don't expand categories or old-style virtuals here unless
5507 # necessary. Expansion of old-style virtuals here causes at
5508 # least the following problems:
5509 # 1) It's more difficult to determine which set(s) an atom
5510 # came from, if any.
5511 # 2) It takes away freedom from the resolver to choose other
5512 # possible expansions when necessary.
5514 args.append(AtomArg(arg=x, atom=x,
5515 root_config=root_config))
5517 expanded_atoms = self._dep_expand(root_config, x)
5518 installed_cp_set = set()
5519 for atom in expanded_atoms:
5520 atom_cp = portage.dep_getkey(atom)
5521 if vardb.cp_list(atom_cp):
5522 installed_cp_set.add(atom_cp)
5523 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5524 installed_cp = iter(installed_cp_set).next()
5525 expanded_atoms = [atom for atom in expanded_atoms \
5526 if portage.dep_getkey(atom) == installed_cp]
5528 if len(expanded_atoms) > 1:
5531 ambiguous_package_name(x, expanded_atoms, root_config,
5532 self.spinner, self.myopts)
5533 return False, myfavorites
5535 atom = expanded_atoms[0]
5537 null_atom = insert_category_into_atom(x, "null")
5538 null_cp = portage.dep_getkey(null_atom)
5539 cat, atom_pn = portage.catsplit(null_cp)
5540 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5542 # Allow the depgraph to choose which virtual.
5543 atom = insert_category_into_atom(x, "virtual")
5545 atom = insert_category_into_atom(x, "null")
5547 args.append(AtomArg(arg=x, atom=atom,
5548 root_config=root_config))
5552 search_for_multiple = False
5553 if len(lookup_owners) > 1:
5554 search_for_multiple = True
5556 for x in lookup_owners:
5557 if not search_for_multiple and os.path.isdir(x):
5558 search_for_multiple = True
5559 relative_paths.append(x[len(myroot):])
5562 for pkg, relative_path in \
5563 real_vardb._owners.iter_owners(relative_paths):
5564 owners.add(pkg.mycpv)
5565 if not search_for_multiple:
5569 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5570 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5574 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5576 # portage now masks packages with missing slot, but it's
5577 # possible that one was installed by an older version
5578 atom = portage.cpv_getkey(cpv)
5580 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5581 args.append(AtomArg(arg=atom, atom=atom,
5582 root_config=root_config))
5584 if "--update" in self.myopts:
5585 # In some cases, the greedy slots behavior can pull in a slot that
5586 # the user would want to uninstall due to it being blocked by a
5587 # newer version in a different slot. Therefore, it's necessary to
5588 # detect and discard any that should be uninstalled. Each time
5589 # that arguments are updated, package selections are repeated in
5590 # order to ensure consistency with the current arguments:
5592 # 1) Initialize args
5593 # 2) Select packages and generate initial greedy atoms
5594 # 3) Update args with greedy atoms
5595 # 4) Select packages and generate greedy atoms again, while
5596 # accounting for any blockers between selected packages
5597 # 5) Update args with revised greedy atoms
5599 self._set_args(args)
5602 greedy_args.append(arg)
5603 if not isinstance(arg, AtomArg):
5605 for atom in self._greedy_slots(arg.root_config, arg.atom):
5607 AtomArg(arg=arg.arg, atom=atom,
5608 root_config=arg.root_config))
5610 self._set_args(greedy_args)
5613 # Revise greedy atoms, accounting for any blockers
5614 # between selected packages.
5615 revised_greedy_args = []
5617 revised_greedy_args.append(arg)
5618 if not isinstance(arg, AtomArg):
5620 for atom in self._greedy_slots(arg.root_config, arg.atom,
5621 blocker_lookahead=True):
5622 revised_greedy_args.append(
5623 AtomArg(arg=arg.arg, atom=atom,
5624 root_config=arg.root_config))
5625 args = revised_greedy_args
5626 del revised_greedy_args
5628 self._set_args(args)
5630 myfavorites = set(myfavorites)
5632 if isinstance(arg, (AtomArg, PackageArg)):
5633 myfavorites.add(arg.atom)
5634 elif isinstance(arg, SetArg):
5635 myfavorites.add(arg.arg)
5636 myfavorites = list(myfavorites)
5638 pprovideddict = pkgsettings.pprovideddict
5640 portage.writemsg("\n", noiselevel=-1)
5641 # Order needs to be preserved since a feature of --nodeps
5642 # is to allow the user to force a specific merge order.
5646 for atom in arg.set:
5647 self.spinner.update()
5648 dep = Dependency(atom=atom, onlydeps=onlydeps,
5649 root=myroot, parent=arg)
5650 atom_cp = portage.dep_getkey(atom)
5652 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5653 if pprovided and portage.match_from_list(atom, pprovided):
5654 # A provided package has been specified on the command line.
5655 self._pprovided_args.append((arg, atom))
5657 if isinstance(arg, PackageArg):
5658 if not self._add_pkg(arg.package, dep) or \
5659 not self._create_graph():
5660 sys.stderr.write(("\n\n!!! Problem resolving " + \
5661 "dependencies for %s\n") % arg.arg)
5662 return 0, myfavorites
5665 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5666 (arg, atom), noiselevel=-1)
5667 pkg, existing_node = self._select_package(
5668 myroot, atom, onlydeps=onlydeps)
5670 if not (isinstance(arg, SetArg) and \
5671 arg.name in ("system", "world")):
5672 self._unsatisfied_deps_for_display.append(
5673 ((myroot, atom), {}))
5674 return 0, myfavorites
5675 self._missing_args.append((arg, atom))
5677 if atom_cp != pkg.cp:
5678 # For old-style virtuals, we need to repeat the
5679 # package.provided check against the selected package.
5680 expanded_atom = atom.replace(atom_cp, pkg.cp)
5681 pprovided = pprovideddict.get(pkg.cp)
5683 portage.match_from_list(expanded_atom, pprovided):
5684 # A provided package has been
5685 # specified on the command line.
5686 self._pprovided_args.append((arg, atom))
5688 if pkg.installed and "selective" not in self.myparams:
5689 self._unsatisfied_deps_for_display.append(
5690 ((myroot, atom), {}))
5691 # Previous behavior was to bail out in this case, but
5692 # since the dep is satisfied by the installed package,
5693 # it's more friendly to continue building the graph
5694 # and just show a warning message. Therefore, only bail
5695 # out here if the atom is not from either the system or
5697 if not (isinstance(arg, SetArg) and \
5698 arg.name in ("system", "world")):
5699 return 0, myfavorites
5701 # Add the selected package to the graph as soon as possible
5702 # so that later dep_check() calls can use it as feedback
5703 # for making more consistent atom selections.
5704 if not self._add_pkg(pkg, dep):
5705 if isinstance(arg, SetArg):
5706 sys.stderr.write(("\n\n!!! Problem resolving " + \
5707 "dependencies for %s from %s\n") % \
5710 sys.stderr.write(("\n\n!!! Problem resolving " + \
5711 "dependencies for %s\n") % atom)
5712 return 0, myfavorites
5714 except portage.exception.MissingSignature, e:
5715 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5716 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5717 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5718 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5719 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5720 return 0, myfavorites
5721 except portage.exception.InvalidSignature, e:
5722 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5723 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5724 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5725 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5726 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5727 return 0, myfavorites
5728 except SystemExit, e:
5729 raise # Needed else can't exit
5730 except Exception, e:
5731 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5732 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5735 # Now that the root packages have been added to the graph,
5736 # process the dependencies.
5737 if not self._create_graph():
5738 return 0, myfavorites
5741 if "--usepkgonly" in self.myopts:
5742 for xs in self.digraph.all_nodes():
5743 if not isinstance(xs, Package):
5745 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5749 print "Missing binary for:",xs[2]
5753 except self._unknown_internal_error:
5754 return False, myfavorites
5756 # We're true here unless we are missing binaries.
5757 return (not missing,myfavorites)
5759 def _set_args(self, args):
5761 Create the "args" package set from atoms and packages given as
5762 arguments. This method can be called multiple times if necessary.
5763 The package selection cache is automatically invalidated, since
5764 arguments influence package selections.
5766 args_set = self._sets["args"]
5769 if not isinstance(arg, (AtomArg, PackageArg)):
5772 if atom in args_set:
5776 self._set_atoms.clear()
5777 self._set_atoms.update(chain(*self._sets.itervalues()))
5778 atom_arg_map = self._atom_arg_map
5779 atom_arg_map.clear()
5781 for atom in arg.set:
5782 atom_key = (atom, arg.root_config.root)
5783 refs = atom_arg_map.get(atom_key)
5786 atom_arg_map[atom_key] = refs
5790 # Invalidate the package selection cache, since
5791 # arguments influence package selections.
5792 self._highest_pkg_cache.clear()
5793 for trees in self._filtered_trees.itervalues():
5794 trees["porttree"].dbapi._clear_cache()
5796 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5798 Return a list of slot atoms corresponding to installed slots that
5799 differ from the slot of the highest visible match. When
5800 blocker_lookahead is True, slot atoms that would trigger a blocker
5801 conflict are automatically discarded, potentially allowing automatic
5802 uninstallation of older slots when appropriate.
5804 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5805 if highest_pkg is None:
5807 vardb = root_config.trees["vartree"].dbapi
5809 for cpv in vardb.match(atom):
5810 # don't mix new virtuals with old virtuals
5811 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5812 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5814 slots.add(highest_pkg.metadata["SLOT"])
5818 slots.remove(highest_pkg.metadata["SLOT"])
5821 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5822 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5823 if pkg is not None and \
5824 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5825 greedy_pkgs.append(pkg)
5828 if not blocker_lookahead:
5829 return [pkg.slot_atom for pkg in greedy_pkgs]
5832 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5833 for pkg in greedy_pkgs + [highest_pkg]:
5834 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5836 atoms = self._select_atoms(
5837 pkg.root, dep_str, pkg.use.enabled,
5838 parent=pkg, strict=True)
5839 except portage.exception.InvalidDependString:
5841 blocker_atoms = (x for x in atoms if x.blocker)
5842 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5844 if highest_pkg not in blockers:
5847 # filter packages with invalid deps
5848 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5850 # filter packages that conflict with highest_pkg
5851 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5852 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5853 blockers[pkg].findAtomForPackage(highest_pkg))]
5858 # If two packages conflict, discard the lower version.
5859 discard_pkgs = set()
5860 greedy_pkgs.sort(reverse=True)
5861 for i in xrange(len(greedy_pkgs) - 1):
5862 pkg1 = greedy_pkgs[i]
5863 if pkg1 in discard_pkgs:
5865 for j in xrange(i + 1, len(greedy_pkgs)):
5866 pkg2 = greedy_pkgs[j]
5867 if pkg2 in discard_pkgs:
5869 if blockers[pkg1].findAtomForPackage(pkg2) or \
5870 blockers[pkg2].findAtomForPackage(pkg1):
5872 discard_pkgs.add(pkg2)
5874 return [pkg.slot_atom for pkg in greedy_pkgs \
5875 if pkg not in discard_pkgs]
5877 def _select_atoms_from_graph(self, *pargs, **kwargs):
5879 Prefer atoms matching packages that have already been
5880 added to the graph or those that are installed and have
5881 not been scheduled for replacement.
5883 kwargs["trees"] = self._graph_trees
5884 return self._select_atoms_highest_available(*pargs, **kwargs)
5886 def _select_atoms_highest_available(self, root, depstring,
5887 myuse=None, parent=None, strict=True, trees=None, priority=None):
5888 """This will raise InvalidDependString if necessary. If trees is
5889 None then self._filtered_trees is used."""
5890 pkgsettings = self.pkgsettings[root]
5892 trees = self._filtered_trees
5893 if not getattr(priority, "buildtime", False):
5894 # The parent should only be passed to dep_check() for buildtime
5895 # dependencies since that's the only case when it's appropriate
5896 # to trigger the circular dependency avoidance code which uses it.
5897 # It's important not to trigger the same circular dependency
5898 # avoidance code for runtime dependencies since it's not needed
5899 # and it can promote an incorrect package choice.
5903 if parent is not None:
5904 trees[root]["parent"] = parent
5906 portage.dep._dep_check_strict = False
5907 mycheck = portage.dep_check(depstring, None,
5908 pkgsettings, myuse=myuse,
5909 myroot=root, trees=trees)
5911 if parent is not None:
5912 trees[root].pop("parent")
5913 portage.dep._dep_check_strict = True
5915 raise portage.exception.InvalidDependString(mycheck[1])
5916 selected_atoms = mycheck[1]
5917 return selected_atoms
5919 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5920 atom = portage.dep.Atom(atom)
5921 atom_set = InternalPackageSet(initial_atoms=(atom,))
5922 atom_without_use = atom
5924 atom_without_use = portage.dep.remove_slot(atom)
5926 atom_without_use += ":" + atom.slot
5927 atom_without_use = portage.dep.Atom(atom_without_use)
5928 xinfo = '"%s"' % atom
5931 # Discard null/ from failed cpv_expand category expansion.
5932 xinfo = xinfo.replace("null/", "")
5933 masked_packages = []
5935 missing_licenses = []
5936 have_eapi_mask = False
5937 pkgsettings = self.pkgsettings[root]
5938 implicit_iuse = pkgsettings._get_implicit_iuse()
5939 root_config = self.roots[root]
5940 portdb = self.roots[root].trees["porttree"].dbapi
5941 dbs = self._filtered_trees[root]["dbs"]
5942 for db, pkg_type, built, installed, db_keys in dbs:
5946 if hasattr(db, "xmatch"):
5947 cpv_list = db.xmatch("match-all", atom_without_use)
5949 cpv_list = db.match(atom_without_use)
5952 for cpv in cpv_list:
5953 metadata, mreasons = get_mask_info(root_config, cpv,
5954 pkgsettings, db, pkg_type, built, installed, db_keys)
5955 if metadata is not None:
5956 pkg = Package(built=built, cpv=cpv,
5957 installed=installed, metadata=metadata,
5958 root_config=root_config)
5959 if pkg.cp != atom.cp:
5960 # A cpv can be returned from dbapi.match() as an
5961 # old-style virtual match even in cases when the
5962 # package does not actually PROVIDE the virtual.
5963 # Filter out any such false matches here.
5964 if not atom_set.findAtomForPackage(pkg):
5966 if atom.use and not mreasons:
5967 missing_use.append(pkg)
5969 masked_packages.append(
5970 (root_config, pkgsettings, cpv, metadata, mreasons))
5972 missing_use_reasons = []
5973 missing_iuse_reasons = []
5974 for pkg in missing_use:
5975 use = pkg.use.enabled
5976 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5977 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5979 for x in atom.use.required:
5980 if iuse_re.match(x) is None:
5981 missing_iuse.append(x)
5984 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5985 missing_iuse_reasons.append((pkg, mreasons))
5987 need_enable = sorted(atom.use.enabled.difference(use))
5988 need_disable = sorted(atom.use.disabled.intersection(use))
5989 if need_enable or need_disable:
5991 changes.extend(colorize("red", "+" + x) \
5992 for x in need_enable)
5993 changes.extend(colorize("blue", "-" + x) \
5994 for x in need_disable)
5995 mreasons.append("Change USE: %s" % " ".join(changes))
5996 missing_use_reasons.append((pkg, mreasons))
5998 if missing_iuse_reasons and not missing_use_reasons:
5999 missing_use_reasons = missing_iuse_reasons
6000 elif missing_use_reasons:
6001 # Only show the latest version.
6002 del missing_use_reasons[1:]
6004 if missing_use_reasons:
6005 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6006 print "!!! One of the following packages is required to complete your request:"
6007 for pkg, mreasons in missing_use_reasons:
6008 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6010 elif masked_packages:
6012 colorize("BAD", "All ebuilds that could satisfy ") + \
6013 colorize("INFORM", xinfo) + \
6014 colorize("BAD", " have been masked.")
6015 print "!!! One of the following masked packages is required to complete your request:"
6016 have_eapi_mask = show_masked_packages(masked_packages)
6019 msg = ("The current version of portage supports " + \
6020 "EAPI '%s'. You must upgrade to a newer version" + \
6021 " of portage before EAPI masked packages can" + \
6022 " be installed.") % portage.const.EAPI
6023 from textwrap import wrap
6024 for line in wrap(msg, 75):
6029 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6031 # Show parent nodes and the argument that pulled them in.
6032 traversed_nodes = set()
6035 while node is not None:
6036 traversed_nodes.add(node)
6037 msg.append('(dependency required by "%s" [%s])' % \
6038 (colorize('INFORM', str(node.cpv)), node.type_name))
6039 # When traversing to parents, prefer arguments over packages
6040 # since arguments are root nodes. Never traverse the same
6041 # package twice, in order to prevent an infinite loop.
6042 selected_parent = None
6043 for parent in self.digraph.parent_nodes(node):
6044 if isinstance(parent, DependencyArg):
6045 msg.append('(dependency required by "%s" [argument])' % \
6046 (colorize('INFORM', str(parent))))
6047 selected_parent = None
6049 if parent not in traversed_nodes:
6050 selected_parent = parent
6051 node = selected_parent
6057 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6058 cache_key = (root, atom, onlydeps)
6059 ret = self._highest_pkg_cache.get(cache_key)
6062 if pkg and not existing:
6063 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6064 if existing and existing == pkg:
6065 # Update the cache to reflect that the
6066 # package has been added to the graph.
6068 self._highest_pkg_cache[cache_key] = ret
6070 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6071 self._highest_pkg_cache[cache_key] = ret
6074 settings = pkg.root_config.settings
6075 if visible(settings, pkg) and not (pkg.installed and \
6076 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6077 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6080 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6081 root_config = self.roots[root]
6082 pkgsettings = self.pkgsettings[root]
6083 dbs = self._filtered_trees[root]["dbs"]
6084 vardb = self.roots[root].trees["vartree"].dbapi
6085 portdb = self.roots[root].trees["porttree"].dbapi
6086 # List of acceptable packages, ordered by type preference.
6087 matched_packages = []
6088 highest_version = None
6089 if not isinstance(atom, portage.dep.Atom):
6090 atom = portage.dep.Atom(atom)
6092 atom_set = InternalPackageSet(initial_atoms=(atom,))
6093 existing_node = None
6095 usepkgonly = "--usepkgonly" in self.myopts
6096 empty = "empty" in self.myparams
6097 selective = "selective" in self.myparams
6099 noreplace = "--noreplace" in self.myopts
6100 # Behavior of the "selective" parameter depends on
6101 # whether or not a package matches an argument atom.
6102 # If an installed package provides an old-style
6103 # virtual that is no longer provided by an available
6104 # package, the installed package may match an argument
6105 # atom even though none of the available packages do.
6106 # Therefore, "selective" logic does not consider
6107 # whether or not an installed package matches an
6108 # argument atom. It only considers whether or not
6109 # available packages match argument atoms, which is
6110 # represented by the found_available_arg flag.
6111 found_available_arg = False
6112 for find_existing_node in True, False:
6115 for db, pkg_type, built, installed, db_keys in dbs:
6118 if installed and not find_existing_node:
6119 want_reinstall = reinstall or empty or \
6120 (found_available_arg and not selective)
6121 if want_reinstall and matched_packages:
6123 if hasattr(db, "xmatch"):
6124 cpv_list = db.xmatch("match-all", atom)
6126 cpv_list = db.match(atom)
6128 # USE=multislot can make an installed package appear as if
6129 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6130 # won't do any good as long as USE=multislot is enabled since
6131 # the newly built package still won't have the expected slot.
6132 # Therefore, assume that such SLOT dependencies are already
6133 # satisfied rather than forcing a rebuild.
6134 if installed and not cpv_list and atom.slot:
6135 for cpv in db.match(atom.cp):
6136 slot_available = False
6137 for other_db, other_type, other_built, \
6138 other_installed, other_keys in dbs:
6141 other_db.aux_get(cpv, ["SLOT"])[0]:
6142 slot_available = True
6146 if not slot_available:
6148 inst_pkg = self._pkg(cpv, "installed",
6149 root_config, installed=installed)
6150 # Remove the slot from the atom and verify that
6151 # the package matches the resulting atom.
6152 atom_without_slot = portage.dep.remove_slot(atom)
6154 atom_without_slot += str(atom.use)
6155 atom_without_slot = portage.dep.Atom(atom_without_slot)
6156 if portage.match_from_list(
6157 atom_without_slot, [inst_pkg]):
6158 cpv_list = [inst_pkg.cpv]
6163 pkg_status = "merge"
6164 if installed or onlydeps:
6165 pkg_status = "nomerge"
6168 for cpv in cpv_list:
6169 # Make --noreplace take precedence over --newuse.
6170 if not installed and noreplace and \
6171 cpv in vardb.match(atom):
6172 # If the installed version is masked, it may
6173 # be necessary to look at lower versions,
6174 # in case there is a visible downgrade.
6176 reinstall_for_flags = None
6177 cache_key = (pkg_type, root, cpv, pkg_status)
6178 calculated_use = True
6179 pkg = self._pkg_cache.get(cache_key)
6181 calculated_use = False
6183 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6186 pkg = Package(built=built, cpv=cpv,
6187 installed=installed, metadata=metadata,
6188 onlydeps=onlydeps, root_config=root_config,
6190 metadata = pkg.metadata
6191 if not built and ("?" in metadata["LICENSE"] or \
6192 "?" in metadata["PROVIDE"]):
6193 # This is avoided whenever possible because
6194 # it's expensive. It only needs to be done here
6195 # if it has an effect on visibility.
6196 pkgsettings.setcpv(pkg)
6197 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6198 calculated_use = True
6199 self._pkg_cache[pkg] = pkg
6201 if not installed or (built and matched_packages):
6202 # Only enforce visibility on installed packages
6203 # if there is at least one other visible package
6204 # available. By filtering installed masked packages
6205 # here, packages that have been masked since they
6206 # were installed can be automatically downgraded
6207 # to an unmasked version.
6209 if not visible(pkgsettings, pkg):
6211 except portage.exception.InvalidDependString:
6215 # Enable upgrade or downgrade to a version
6216 # with visible KEYWORDS when the installed
6217 # version is masked by KEYWORDS, but never
6218 # reinstall the same exact version only due
6219 # to a KEYWORDS mask.
6220 if built and matched_packages:
6222 different_version = None
6223 for avail_pkg in matched_packages:
6224 if not portage.dep.cpvequal(
6225 pkg.cpv, avail_pkg.cpv):
6226 different_version = avail_pkg
6228 if different_version is not None:
6231 pkgsettings._getMissingKeywords(
6232 pkg.cpv, pkg.metadata):
6235 # If the ebuild no longer exists or it's
6236 # keywords have been dropped, reject built
6237 # instances (installed or binary).
6238 # If --usepkgonly is enabled, assume that
6239 # the ebuild status should be ignored.
6243 pkg.cpv, "ebuild", root_config)
6244 except portage.exception.PackageNotFound:
6247 if not visible(pkgsettings, pkg_eb):
6250 if not pkg.built and not calculated_use:
6251 # This is avoided whenever possible because
6253 pkgsettings.setcpv(pkg)
6254 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6256 if pkg.cp != atom.cp:
6257 # A cpv can be returned from dbapi.match() as an
6258 # old-style virtual match even in cases when the
6259 # package does not actually PROVIDE the virtual.
6260 # Filter out any such false matches here.
6261 if not atom_set.findAtomForPackage(pkg):
6265 if root == self.target_root:
6267 # Ebuild USE must have been calculated prior
6268 # to this point, in case atoms have USE deps.
6269 myarg = self._iter_atoms_for_pkg(pkg).next()
6270 except StopIteration:
6272 except portage.exception.InvalidDependString:
6274 # masked by corruption
6276 if not installed and myarg:
6277 found_available_arg = True
6279 if atom.use and not pkg.built:
6280 use = pkg.use.enabled
6281 if atom.use.enabled.difference(use):
6283 if atom.use.disabled.intersection(use):
6285 if pkg.cp == atom_cp:
6286 if highest_version is None:
6287 highest_version = pkg
6288 elif pkg > highest_version:
6289 highest_version = pkg
6290 # At this point, we've found the highest visible
6291 # match from the current repo. Any lower versions
6292 # from this repo are ignored, so this so the loop
6293 # will always end with a break statement below
6295 if find_existing_node:
6296 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6299 if portage.dep.match_from_list(atom, [e_pkg]):
6300 if highest_version and \
6301 e_pkg.cp == atom_cp and \
6302 e_pkg < highest_version and \
6303 e_pkg.slot_atom != highest_version.slot_atom:
6304 # There is a higher version available in a
6305 # different slot, so this existing node is
6309 matched_packages.append(e_pkg)
6310 existing_node = e_pkg
6312 # Compare built package to current config and
6313 # reject the built package if necessary.
6314 if built and not installed and \
6315 ("--newuse" in self.myopts or \
6316 "--reinstall" in self.myopts):
6317 iuses = pkg.iuse.all
6318 old_use = pkg.use.enabled
6320 pkgsettings.setcpv(myeb)
6322 pkgsettings.setcpv(pkg)
6323 now_use = pkgsettings["PORTAGE_USE"].split()
6324 forced_flags = set()
6325 forced_flags.update(pkgsettings.useforce)
6326 forced_flags.update(pkgsettings.usemask)
6328 if myeb and not usepkgonly:
6329 cur_iuse = myeb.iuse.all
6330 if self._reinstall_for_flags(forced_flags,
6334 # Compare current config to installed package
6335 # and do not reinstall if possible.
6336 if not installed and \
6337 ("--newuse" in self.myopts or \
6338 "--reinstall" in self.myopts) and \
6339 cpv in vardb.match(atom):
6340 pkgsettings.setcpv(pkg)
6341 forced_flags = set()
6342 forced_flags.update(pkgsettings.useforce)
6343 forced_flags.update(pkgsettings.usemask)
6344 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6345 old_iuse = set(filter_iuse_defaults(
6346 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6347 cur_use = pkgsettings["PORTAGE_USE"].split()
6348 cur_iuse = pkg.iuse.all
6349 reinstall_for_flags = \
6350 self._reinstall_for_flags(
6351 forced_flags, old_use, old_iuse,
6353 if reinstall_for_flags:
6357 matched_packages.append(pkg)
6358 if reinstall_for_flags:
6359 self._reinstall_nodes[pkg] = \
6363 if not matched_packages:
6366 if "--debug" in self.myopts:
6367 for pkg in matched_packages:
6368 portage.writemsg("%s %s\n" % \
6369 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6371 # Filter out any old-style virtual matches if they are
6372 # mixed with new-style virtual matches.
6373 cp = portage.dep_getkey(atom)
6374 if len(matched_packages) > 1 and \
6375 "virtual" == portage.catsplit(cp)[0]:
6376 for pkg in matched_packages:
6379 # Got a new-style virtual, so filter
6380 # out any old-style virtuals.
6381 matched_packages = [pkg for pkg in matched_packages \
6385 if len(matched_packages) > 1:
6386 bestmatch = portage.best(
6387 [pkg.cpv for pkg in matched_packages])
6388 matched_packages = [pkg for pkg in matched_packages \
6389 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6391 # ordered by type preference ("ebuild" type is the last resort)
6392 return matched_packages[-1], existing_node
6394 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6396 Select packages that have already been added to the graph or
6397 those that are installed and have not been scheduled for
6400 graph_db = self._graph_trees[root]["porttree"].dbapi
6401 matches = graph_db.match_pkgs(atom)
6404 pkg = matches[-1] # highest match
6405 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6406 return pkg, in_graph
6408 def _complete_graph(self):
6410 Add any deep dependencies of required sets (args, system, world) that
6411 have not been pulled into the graph yet. This ensures that the graph
6412 is consistent such that initially satisfied deep dependencies are not
6413 broken in the new graph. Initially unsatisfied dependencies are
6414 irrelevant since we only want to avoid breaking dependencies that are
6417 Since this method can consume enough time to disturb users, it is
6418 currently only enabled by the --complete-graph option.
6420 if "--buildpkgonly" in self.myopts or \
6421 "recurse" not in self.myparams:
6424 if "complete" not in self.myparams:
6425 # Skip this to avoid consuming enough time to disturb users.
6428 # Put the depgraph into a mode that causes it to only
6429 # select packages that have already been added to the
6430 # graph or those that are installed and have not been
6431 # scheduled for replacement. Also, toggle the "deep"
6432 # parameter so that all dependencies are traversed and
6434 self._select_atoms = self._select_atoms_from_graph
6435 self._select_package = self._select_pkg_from_graph
6436 already_deep = "deep" in self.myparams
6437 if not already_deep:
6438 self.myparams.add("deep")
6440 for root in self.roots:
6441 required_set_names = self._required_set_names.copy()
6442 if root == self.target_root and \
6443 (already_deep or "empty" in self.myparams):
6444 required_set_names.difference_update(self._sets)
6445 if not required_set_names and not self._ignored_deps:
6447 root_config = self.roots[root]
6448 setconfig = root_config.setconfig
6450 # Reuse existing SetArg instances when available.
6451 for arg in self.digraph.root_nodes():
6452 if not isinstance(arg, SetArg):
6454 if arg.root_config != root_config:
6456 if arg.name in required_set_names:
6458 required_set_names.remove(arg.name)
6459 # Create new SetArg instances only when necessary.
6460 for s in required_set_names:
6461 expanded_set = InternalPackageSet(
6462 initial_atoms=setconfig.getSetAtoms(s))
6463 atom = SETPREFIX + s
6464 args.append(SetArg(arg=atom, set=expanded_set,
6465 root_config=root_config))
6466 vardb = root_config.trees["vartree"].dbapi
6468 for atom in arg.set:
6469 self._dep_stack.append(
6470 Dependency(atom=atom, root=root, parent=arg))
6471 if self._ignored_deps:
6472 self._dep_stack.extend(self._ignored_deps)
6473 self._ignored_deps = []
6474 if not self._create_graph(allow_unsatisfied=True):
6476 # Check the unsatisfied deps to see if any initially satisfied deps
6477 # will become unsatisfied due to an upgrade. Initially unsatisfied
6478 # deps are irrelevant since we only want to avoid breaking deps
6479 # that are initially satisfied.
6480 while self._unsatisfied_deps:
6481 dep = self._unsatisfied_deps.pop()
6482 matches = vardb.match_pkgs(dep.atom)
6484 self._initially_unsatisfied_deps.append(dep)
6486 # An scheduled installation broke a deep dependency.
6487 # Add the installed package to the graph so that it
6488 # will be appropriately reported as a slot collision
6489 # (possibly solvable via backtracking).
6490 pkg = matches[-1] # highest match
6491 if not self._add_pkg(pkg, dep):
6493 if not self._create_graph(allow_unsatisfied=True):
6497 def _pkg(self, cpv, type_name, root_config, installed=False):
6499 Get a package instance from the cache, or create a new
6500 one if necessary. Raises KeyError from aux_get if it
6501 failures for some reason (package does not exist or is
6506 operation = "nomerge"
6507 pkg = self._pkg_cache.get(
6508 (type_name, root_config.root, cpv, operation))
6510 tree_type = self.pkg_tree_map[type_name]
6511 db = root_config.trees[tree_type].dbapi
6512 db_keys = list(self._trees_orig[root_config.root][
6513 tree_type].dbapi._aux_cache_keys)
6515 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6517 raise portage.exception.PackageNotFound(cpv)
6518 pkg = Package(cpv=cpv, metadata=metadata,
6519 root_config=root_config, installed=installed)
6520 if type_name == "ebuild":
6521 settings = self.pkgsettings[root_config.root]
6522 settings.setcpv(pkg)
6523 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6524 self._pkg_cache[pkg] = pkg
6527 def validate_blockers(self):
6528 """Remove any blockers from the digraph that do not match any of the
6529 packages within the graph. If necessary, create hard deps to ensure
6530 correct merge order such that mutually blocking packages are never
6531 installed simultaneously."""
6533 if "--buildpkgonly" in self.myopts or \
6534 "--nodeps" in self.myopts:
6537 #if "deep" in self.myparams:
6539 # Pull in blockers from all installed packages that haven't already
6540 # been pulled into the depgraph. This is not enabled by default
6541 # due to the performance penalty that is incurred by all the
6542 # additional dep_check calls that are required.
6544 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6545 for myroot in self.trees:
6546 vardb = self.trees[myroot]["vartree"].dbapi
6547 portdb = self.trees[myroot]["porttree"].dbapi
6548 pkgsettings = self.pkgsettings[myroot]
6549 final_db = self.mydbapi[myroot]
6551 blocker_cache = BlockerCache(myroot, vardb)
6552 stale_cache = set(blocker_cache)
6555 stale_cache.discard(cpv)
6556 pkg_in_graph = self.digraph.contains(pkg)
6558 # Check for masked installed packages. Only warn about
6559 # packages that are in the graph in order to avoid warning
6560 # about those that will be automatically uninstalled during
6561 # the merge process or by --depclean.
6563 if pkg_in_graph and not visible(pkgsettings, pkg):
6564 self._masked_installed.add(pkg)
6566 blocker_atoms = None
6572 self._blocker_parents.child_nodes(pkg))
6577 self._irrelevant_blockers.child_nodes(pkg))
6580 if blockers is not None:
6581 blockers = set(str(blocker.atom) \
6582 for blocker in blockers)
6584 # If this node has any blockers, create a "nomerge"
6585 # node for it so that they can be enforced.
6586 self.spinner.update()
6587 blocker_data = blocker_cache.get(cpv)
6588 if blocker_data is not None and \
6589 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6592 # If blocker data from the graph is available, use
6593 # it to validate the cache and update the cache if
6595 if blocker_data is not None and \
6596 blockers is not None:
6597 if not blockers.symmetric_difference(
6598 blocker_data.atoms):
6602 if blocker_data is None and \
6603 blockers is not None:
6604 # Re-use the blockers from the graph.
6605 blocker_atoms = sorted(blockers)
6606 counter = long(pkg.metadata["COUNTER"])
6608 blocker_cache.BlockerData(counter, blocker_atoms)
6609 blocker_cache[pkg.cpv] = blocker_data
6613 blocker_atoms = blocker_data.atoms
6615 # Use aux_get() to trigger FakeVartree global
6616 # updates on *DEPEND when appropriate.
6617 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6618 # It is crucial to pass in final_db here in order to
6619 # optimize dep_check calls by eliminating atoms via
6620 # dep_wordreduce and dep_eval calls.
6622 portage.dep._dep_check_strict = False
6624 success, atoms = portage.dep_check(depstr,
6625 final_db, pkgsettings, myuse=pkg.use.enabled,
6626 trees=self._graph_trees, myroot=myroot)
6627 except Exception, e:
6628 if isinstance(e, SystemExit):
6630 # This is helpful, for example, if a ValueError
6631 # is thrown from cpv_expand due to multiple
6632 # matches (this can happen if an atom lacks a
6634 show_invalid_depstring_notice(
6635 pkg, depstr, str(e))
6639 portage.dep._dep_check_strict = True
6641 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6642 if replacement_pkg and \
6643 replacement_pkg[0].operation == "merge":
6644 # This package is being replaced anyway, so
6645 # ignore invalid dependencies so as not to
6646 # annoy the user too much (otherwise they'd be
6647 # forced to manually unmerge it first).
6649 show_invalid_depstring_notice(pkg, depstr, atoms)
6651 blocker_atoms = [myatom for myatom in atoms \
6652 if myatom.startswith("!")]
6653 blocker_atoms.sort()
6654 counter = long(pkg.metadata["COUNTER"])
6655 blocker_cache[cpv] = \
6656 blocker_cache.BlockerData(counter, blocker_atoms)
6659 for atom in blocker_atoms:
6660 blocker = Blocker(atom=portage.dep.Atom(atom),
6661 eapi=pkg.metadata["EAPI"], root=myroot)
6662 self._blocker_parents.add(blocker, pkg)
6663 except portage.exception.InvalidAtom, e:
6664 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6665 show_invalid_depstring_notice(
6666 pkg, depstr, "Invalid Atom: %s" % (e,))
6668 for cpv in stale_cache:
6669 del blocker_cache[cpv]
6670 blocker_cache.flush()
6673 # Discard any "uninstall" tasks scheduled by previous calls
6674 # to this method, since those tasks may not make sense given
6675 # the current graph state.
6676 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6677 if previous_uninstall_tasks:
6678 self._blocker_uninstalls = digraph()
6679 self.digraph.difference_update(previous_uninstall_tasks)
6681 for blocker in self._blocker_parents.leaf_nodes():
6682 self.spinner.update()
6683 root_config = self.roots[blocker.root]
6684 virtuals = root_config.settings.getvirtuals()
6685 myroot = blocker.root
6686 initial_db = self.trees[myroot]["vartree"].dbapi
6687 final_db = self.mydbapi[myroot]
6689 provider_virtual = False
6690 if blocker.cp in virtuals and \
6691 not self._have_new_virt(blocker.root, blocker.cp):
6692 provider_virtual = True
6694 if provider_virtual:
6696 for provider_entry in virtuals[blocker.cp]:
6698 portage.dep_getkey(provider_entry)
6699 atoms.append(blocker.atom.replace(
6700 blocker.cp, provider_cp))
6702 atoms = [blocker.atom]
6704 blocked_initial = []
6706 blocked_initial.extend(initial_db.match_pkgs(atom))
6710 blocked_final.extend(final_db.match_pkgs(atom))
6712 if not blocked_initial and not blocked_final:
6713 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6714 self._blocker_parents.remove(blocker)
6715 # Discard any parents that don't have any more blockers.
6716 for pkg in parent_pkgs:
6717 self._irrelevant_blockers.add(blocker, pkg)
6718 if not self._blocker_parents.child_nodes(pkg):
6719 self._blocker_parents.remove(pkg)
6721 for parent in self._blocker_parents.parent_nodes(blocker):
6722 unresolved_blocks = False
6723 depends_on_order = set()
6724 for pkg in blocked_initial:
6725 if pkg.slot_atom == parent.slot_atom:
6726 # TODO: Support blocks within slots in cases where it
6727 # might make sense. For example, a new version might
6728 # require that the old version be uninstalled at build
6731 if parent.installed:
6732 # Two currently installed packages conflict with
6733 # eachother. Ignore this case since the damage
6734 # is already done and this would be likely to
6735 # confuse users if displayed like a normal blocker.
6738 self._blocked_pkgs.add(pkg, blocker)
6740 if parent.operation == "merge":
6741 # Maybe the blocked package can be replaced or simply
6742 # unmerged to resolve this block.
6743 depends_on_order.add((pkg, parent))
6745 # None of the above blocker resolutions techniques apply,
6746 # so apparently this one is unresolvable.
6747 unresolved_blocks = True
6748 for pkg in blocked_final:
6749 if pkg.slot_atom == parent.slot_atom:
6750 # TODO: Support blocks within slots.
6752 if parent.operation == "nomerge" and \
6753 pkg.operation == "nomerge":
6754 # This blocker will be handled the next time that a
6755 # merge of either package is triggered.
6758 self._blocked_pkgs.add(pkg, blocker)
6760 # Maybe the blocking package can be
6761 # unmerged to resolve this block.
6762 if parent.operation == "merge" and pkg.installed:
6763 depends_on_order.add((pkg, parent))
6765 elif parent.operation == "nomerge":
6766 depends_on_order.add((parent, pkg))
6768 # None of the above blocker resolutions techniques apply,
6769 # so apparently this one is unresolvable.
6770 unresolved_blocks = True
6772 # Make sure we don't unmerge any package that have been pulled
6774 if not unresolved_blocks and depends_on_order:
6775 for inst_pkg, inst_task in depends_on_order:
6776 if self.digraph.contains(inst_pkg) and \
6777 self.digraph.parent_nodes(inst_pkg):
6778 unresolved_blocks = True
6781 if not unresolved_blocks and depends_on_order:
6782 for inst_pkg, inst_task in depends_on_order:
6783 uninst_task = Package(built=inst_pkg.built,
6784 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6785 metadata=inst_pkg.metadata,
6786 operation="uninstall",
6787 root_config=inst_pkg.root_config,
6788 type_name=inst_pkg.type_name)
6789 self._pkg_cache[uninst_task] = uninst_task
6790 # Enforce correct merge order with a hard dep.
6791 self.digraph.addnode(uninst_task, inst_task,
6792 priority=BlockerDepPriority.instance)
6793 # Count references to this blocker so that it can be
6794 # invalidated after nodes referencing it have been
6796 self._blocker_uninstalls.addnode(uninst_task, blocker)
6797 if not unresolved_blocks and not depends_on_order:
6798 self._irrelevant_blockers.add(blocker, parent)
6799 self._blocker_parents.remove_edge(blocker, parent)
6800 if not self._blocker_parents.parent_nodes(blocker):
6801 self._blocker_parents.remove(blocker)
6802 if not self._blocker_parents.child_nodes(parent):
6803 self._blocker_parents.remove(parent)
6804 if unresolved_blocks:
6805 self._unsolvable_blockers.add(blocker, parent)
6809 def _accept_blocker_conflicts(self):
6811 for x in ("--buildpkgonly", "--fetchonly",
6812 "--fetch-all-uri", "--nodeps"):
6813 if x in self.myopts:
6818 def _merge_order_bias(self, mygraph):
6820 For optimal leaf node selection, promote deep system runtime deps and
6821 order nodes from highest to lowest overall reference count.
6825 for node in mygraph.order:
6826 node_info[node] = len(mygraph.parent_nodes(node))
6827 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6829 def cmp_merge_preference(node1, node2):
6831 if node1.operation == 'uninstall':
6832 if node2.operation == 'uninstall':
6836 if node2.operation == 'uninstall':
6837 if node1.operation == 'uninstall':
6841 node1_sys = node1 in deep_system_deps
6842 node2_sys = node2 in deep_system_deps
6843 if node1_sys != node2_sys:
6848 return node_info[node2] - node_info[node1]
6850 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6852 def altlist(self, reversed=False):
6854 while self._serialized_tasks_cache is None:
6855 self._resolve_conflicts()
6857 self._serialized_tasks_cache, self._scheduler_graph = \
6858 self._serialize_tasks()
6859 except self._serialize_tasks_retry:
6862 retlist = self._serialized_tasks_cache[:]
6867 def schedulerGraph(self):
6869 The scheduler graph is identical to the normal one except that
6870 uninstall edges are reversed in specific cases that require
6871 conflicting packages to be temporarily installed simultaneously.
6872 This is intended for use by the Scheduler in it's parallelization
6873 logic. It ensures that temporary simultaneous installation of
6874 conflicting packages is avoided when appropriate (especially for
6875 !!atom blockers), but allowed in specific cases that require it.
6877 Note that this method calls break_refs() which alters the state of
6878 internal Package instances such that this depgraph instance should
6879 not be used to perform any more calculations.
6881 if self._scheduler_graph is None:
6883 self.break_refs(self._scheduler_graph.order)
6884 return self._scheduler_graph
6886 def break_refs(self, nodes):
6888 Take a mergelist like that returned from self.altlist() and
6889 break any references that lead back to the depgraph. This is
6890 useful if you want to hold references to packages without
6891 also holding the depgraph on the heap.
6894 if hasattr(node, "root_config"):
6895 # The FakeVartree references the _package_cache which
6896 # references the depgraph. So that Package instances don't
6897 # hold the depgraph and FakeVartree on the heap, replace
6898 # the RootConfig that references the FakeVartree with the
6899 # original RootConfig instance which references the actual
6901 node.root_config = \
6902 self._trees_orig[node.root_config.root]["root_config"]
6904 def _resolve_conflicts(self):
6905 if not self._complete_graph():
6906 raise self._unknown_internal_error()
6908 if not self.validate_blockers():
6909 raise self._unknown_internal_error()
6911 if self._slot_collision_info:
6912 self._process_slot_conflicts()
6914 def _serialize_tasks(self):
6916 if "--debug" in self.myopts:
6917 writemsg("\ndigraph:\n\n", noiselevel=-1)
6918 self.digraph.debug_print()
6919 writemsg("\n", noiselevel=-1)
6921 scheduler_graph = self.digraph.copy()
6922 mygraph=self.digraph.copy()
6923 # Prune "nomerge" root nodes if nothing depends on them, since
6924 # otherwise they slow down merge order calculation. Don't remove
6925 # non-root nodes since they help optimize merge order in some cases
6926 # such as revdep-rebuild.
6927 removed_nodes = set()
6929 for node in mygraph.root_nodes():
6930 if not isinstance(node, Package) or \
6931 node.installed or node.onlydeps:
6932 removed_nodes.add(node)
6934 self.spinner.update()
6935 mygraph.difference_update(removed_nodes)
6936 if not removed_nodes:
6938 removed_nodes.clear()
6939 self._merge_order_bias(mygraph)
6940 def cmp_circular_bias(n1, n2):
6942 RDEPEND is stronger than PDEPEND and this function
6943 measures such a strength bias within a circular
6944 dependency relationship.
6946 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6947 ignore_priority=priority_range.ignore_medium_soft)
6948 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6949 ignore_priority=priority_range.ignore_medium_soft)
6950 if n1_n2_medium == n2_n1_medium:
6955 myblocker_uninstalls = self._blocker_uninstalls.copy()
6957 # Contains uninstall tasks that have been scheduled to
6958 # occur after overlapping blockers have been installed.
6959 scheduled_uninstalls = set()
6960 # Contains any Uninstall tasks that have been ignored
6961 # in order to avoid the circular deps code path. These
6962 # correspond to blocker conflicts that could not be
6964 ignored_uninstall_tasks = set()
6965 have_uninstall_task = False
6966 complete = "complete" in self.myparams
6969 def get_nodes(**kwargs):
6971 Returns leaf nodes excluding Uninstall instances
6972 since those should be executed as late as possible.
6974 return [node for node in mygraph.leaf_nodes(**kwargs) \
6975 if isinstance(node, Package) and \
6976 (node.operation != "uninstall" or \
6977 node in scheduled_uninstalls)]
6979 # sys-apps/portage needs special treatment if ROOT="/"
6980 running_root = self._running_root.root
6981 from portage.const import PORTAGE_PACKAGE_ATOM
6982 runtime_deps = InternalPackageSet(
6983 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6984 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6985 PORTAGE_PACKAGE_ATOM)
6986 replacement_portage = self.mydbapi[running_root].match_pkgs(
6987 PORTAGE_PACKAGE_ATOM)
6990 running_portage = running_portage[0]
6992 running_portage = None
6994 if replacement_portage:
6995 replacement_portage = replacement_portage[0]
6997 replacement_portage = None
6999 if replacement_portage == running_portage:
7000 replacement_portage = None
7002 if replacement_portage is not None:
7003 # update from running_portage to replacement_portage asap
7004 asap_nodes.append(replacement_portage)
7006 if running_portage is not None:
7008 portage_rdepend = self._select_atoms_highest_available(
7009 running_root, running_portage.metadata["RDEPEND"],
7010 myuse=running_portage.use.enabled,
7011 parent=running_portage, strict=False)
7012 except portage.exception.InvalidDependString, e:
7013 portage.writemsg("!!! Invalid RDEPEND in " + \
7014 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7015 (running_root, running_portage.cpv, e), noiselevel=-1)
7017 portage_rdepend = []
7018 runtime_deps.update(atom for atom in portage_rdepend \
7019 if not atom.startswith("!"))
7021 def gather_deps(ignore_priority, mergeable_nodes,
7022 selected_nodes, node):
7024 Recursively gather a group of nodes that RDEPEND on
7025 eachother. This ensures that they are merged as a group
7026 and get their RDEPENDs satisfied as soon as possible.
7028 if node in selected_nodes:
7030 if node not in mergeable_nodes:
7032 if node == replacement_portage and \
7033 mygraph.child_nodes(node,
7034 ignore_priority=priority_range.ignore_medium_soft):
7035 # Make sure that portage always has all of it's
7036 # RDEPENDs installed first.
7038 selected_nodes.add(node)
7039 for child in mygraph.child_nodes(node,
7040 ignore_priority=ignore_priority):
7041 if not gather_deps(ignore_priority,
7042 mergeable_nodes, selected_nodes, child):
7046 def ignore_uninst_or_med(priority):
7047 if priority is BlockerDepPriority.instance:
7049 return priority_range.ignore_medium(priority)
7051 def ignore_uninst_or_med_soft(priority):
7052 if priority is BlockerDepPriority.instance:
7054 return priority_range.ignore_medium_soft(priority)
7056 tree_mode = "--tree" in self.myopts
7057 # Tracks whether or not the current iteration should prefer asap_nodes
7058 # if available. This is set to False when the previous iteration
7059 # failed to select any nodes. It is reset whenever nodes are
7060 # successfully selected.
7063 # Controls whether or not the current iteration should drop edges that
7064 # are "satisfied" by installed packages, in order to solve circular
7065 # dependencies. The deep runtime dependencies of installed packages are
7066 # not checked in this case (bug #199856), so it must be avoided
7067 # whenever possible.
7068 drop_satisfied = False
7070 # State of variables for successive iterations that loosen the
7071 # criteria for node selection.
7073 # iteration prefer_asap drop_satisfied
7078 # If no nodes are selected on the last iteration, it is due to
7079 # unresolved blockers or circular dependencies.
7081 while not mygraph.empty():
7082 self.spinner.update()
7083 selected_nodes = None
7084 ignore_priority = None
7085 if drop_satisfied or (prefer_asap and asap_nodes):
7086 priority_range = DepPrioritySatisfiedRange
7088 priority_range = DepPriorityNormalRange
7089 if prefer_asap and asap_nodes:
7090 # ASAP nodes are merged before their soft deps. Go ahead and
7091 # select root nodes here if necessary, since it's typical for
7092 # the parent to have been removed from the graph already.
7093 asap_nodes = [node for node in asap_nodes \
7094 if mygraph.contains(node)]
7095 for node in asap_nodes:
7096 if not mygraph.child_nodes(node,
7097 ignore_priority=priority_range.ignore_soft):
7098 selected_nodes = [node]
7099 asap_nodes.remove(node)
7101 if not selected_nodes and \
7102 not (prefer_asap and asap_nodes):
7103 for i in xrange(priority_range.NONE,
7104 priority_range.MEDIUM_SOFT + 1):
7105 ignore_priority = priority_range.ignore_priority[i]
7106 nodes = get_nodes(ignore_priority=ignore_priority)
7108 # If there is a mix of uninstall nodes with other
7109 # types, save the uninstall nodes for later since
7110 # sometimes a merge node will render an uninstall
7111 # node unnecessary (due to occupying the same slot),
7112 # and we want to avoid executing a separate uninstall
7113 # task in that case.
7115 good_uninstalls = []
7116 with_some_uninstalls_excluded = []
7118 if node.operation == "uninstall":
7119 slot_node = self.mydbapi[node.root
7120 ].match_pkgs(node.slot_atom)
7122 slot_node[0].operation == "merge":
7124 good_uninstalls.append(node)
7125 with_some_uninstalls_excluded.append(node)
7127 nodes = good_uninstalls
7128 elif with_some_uninstalls_excluded:
7129 nodes = with_some_uninstalls_excluded
7133 if ignore_priority is None and not tree_mode:
7134 # Greedily pop all of these nodes since no
7135 # relationship has been ignored. This optimization
7136 # destroys --tree output, so it's disabled in tree
7138 selected_nodes = nodes
7140 # For optimal merge order:
7141 # * Only pop one node.
7142 # * Removing a root node (node without a parent)
7143 # will not produce a leaf node, so avoid it.
7144 # * It's normal for a selected uninstall to be a
7145 # root node, so don't check them for parents.
7147 if node.operation == "uninstall" or \
7148 mygraph.parent_nodes(node):
7149 selected_nodes = [node]
7155 if not selected_nodes:
7156 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7158 mergeable_nodes = set(nodes)
7159 if prefer_asap and asap_nodes:
7161 for i in xrange(priority_range.SOFT,
7162 priority_range.MEDIUM_SOFT + 1):
7163 ignore_priority = priority_range.ignore_priority[i]
7165 if not mygraph.parent_nodes(node):
7167 selected_nodes = set()
7168 if gather_deps(ignore_priority,
7169 mergeable_nodes, selected_nodes, node):
7172 selected_nodes = None
7176 if prefer_asap and asap_nodes and not selected_nodes:
7177 # We failed to find any asap nodes to merge, so ignore
7178 # them for the next iteration.
7182 if selected_nodes and ignore_priority is not None:
7183 # Try to merge ignored medium_soft deps as soon as possible
7184 # if they're not satisfied by installed packages.
7185 for node in selected_nodes:
7186 children = set(mygraph.child_nodes(node))
7187 soft = children.difference(
7188 mygraph.child_nodes(node,
7189 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7190 medium_soft = children.difference(
7191 mygraph.child_nodes(node,
7193 DepPrioritySatisfiedRange.ignore_medium_soft))
7194 medium_soft.difference_update(soft)
7195 for child in medium_soft:
7196 if child in selected_nodes:
7198 if child in asap_nodes:
7200 asap_nodes.append(child)
7202 if selected_nodes and len(selected_nodes) > 1:
7203 if not isinstance(selected_nodes, list):
7204 selected_nodes = list(selected_nodes)
7205 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7207 if not selected_nodes and not myblocker_uninstalls.is_empty():
7208 # An Uninstall task needs to be executed in order to
7209 # avoid conflict if possible.
7212 priority_range = DepPrioritySatisfiedRange
7214 priority_range = DepPriorityNormalRange
7216 mergeable_nodes = get_nodes(
7217 ignore_priority=ignore_uninst_or_med)
7219 min_parent_deps = None
7221 for task in myblocker_uninstalls.leaf_nodes():
7222 # Do some sanity checks so that system or world packages
7223 # don't get uninstalled inappropriately here (only really
7224 # necessary when --complete-graph has not been enabled).
7226 if task in ignored_uninstall_tasks:
7229 if task in scheduled_uninstalls:
7230 # It's been scheduled but it hasn't
7231 # been executed yet due to dependence
7232 # on installation of blocking packages.
7235 root_config = self.roots[task.root]
7236 inst_pkg = self._pkg_cache[
7237 ("installed", task.root, task.cpv, "nomerge")]
7239 if self.digraph.contains(inst_pkg):
7242 forbid_overlap = False
7243 heuristic_overlap = False
7244 for blocker in myblocker_uninstalls.parent_nodes(task):
7245 if blocker.eapi in ("0", "1"):
7246 heuristic_overlap = True
7247 elif blocker.atom.blocker.overlap.forbid:
7248 forbid_overlap = True
7250 if forbid_overlap and running_root == task.root:
7253 if heuristic_overlap and running_root == task.root:
7254 # Never uninstall sys-apps/portage or it's essential
7255 # dependencies, except through replacement.
7257 runtime_dep_atoms = \
7258 list(runtime_deps.iterAtomsForPackage(task))
7259 except portage.exception.InvalidDependString, e:
7260 portage.writemsg("!!! Invalid PROVIDE in " + \
7261 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7262 (task.root, task.cpv, e), noiselevel=-1)
7266 # Don't uninstall a runtime dep if it appears
7267 # to be the only suitable one installed.
7269 vardb = root_config.trees["vartree"].dbapi
7270 for atom in runtime_dep_atoms:
7271 other_version = None
7272 for pkg in vardb.match_pkgs(atom):
7273 if pkg.cpv == task.cpv and \
7274 pkg.metadata["COUNTER"] == \
7275 task.metadata["COUNTER"]:
7279 if other_version is None:
7285 # For packages in the system set, don't take
7286 # any chances. If the conflict can't be resolved
7287 # by a normal replacement operation then abort.
7290 for atom in root_config.sets[
7291 "system"].iterAtomsForPackage(task):
7294 except portage.exception.InvalidDependString, e:
7295 portage.writemsg("!!! Invalid PROVIDE in " + \
7296 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7297 (task.root, task.cpv, e), noiselevel=-1)
7303 # Note that the world check isn't always
7304 # necessary since self._complete_graph() will
7305 # add all packages from the system and world sets to the
7306 # graph. This just allows unresolved conflicts to be
7307 # detected as early as possible, which makes it possible
7308 # to avoid calling self._complete_graph() when it is
7309 # unnecessary due to blockers triggering an abortion.
7311 # For packages in the world set, go ahead an uninstall
7312 # when necessary, as long as the atom will be satisfied
7313 # in the final state.
7314 graph_db = self.mydbapi[task.root]
7317 for atom in root_config.sets[
7318 "world"].iterAtomsForPackage(task):
7320 for pkg in graph_db.match_pkgs(atom):
7327 self._blocked_world_pkgs[inst_pkg] = atom
7329 except portage.exception.InvalidDependString, e:
7330 portage.writemsg("!!! Invalid PROVIDE in " + \
7331 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7332 (task.root, task.cpv, e), noiselevel=-1)
7338 # Check the deps of parent nodes to ensure that
7339 # the chosen task produces a leaf node. Maybe
7340 # this can be optimized some more to make the
7341 # best possible choice, but the current algorithm
7342 # is simple and should be near optimal for most
7344 mergeable_parent = False
7346 for parent in mygraph.parent_nodes(task):
7347 parent_deps.update(mygraph.child_nodes(parent,
7348 ignore_priority=priority_range.ignore_medium_soft))
7349 if parent in mergeable_nodes and \
7350 gather_deps(ignore_uninst_or_med_soft,
7351 mergeable_nodes, set(), parent):
7352 mergeable_parent = True
7354 if not mergeable_parent:
7357 parent_deps.remove(task)
7358 if min_parent_deps is None or \
7359 len(parent_deps) < min_parent_deps:
7360 min_parent_deps = len(parent_deps)
7363 if uninst_task is not None:
7364 # The uninstall is performed only after blocking
7365 # packages have been merged on top of it. File
7366 # collisions between blocking packages are detected
7367 # and removed from the list of files to be uninstalled.
7368 scheduled_uninstalls.add(uninst_task)
7369 parent_nodes = mygraph.parent_nodes(uninst_task)
7371 # Reverse the parent -> uninstall edges since we want
7372 # to do the uninstall after blocking packages have
7373 # been merged on top of it.
7374 mygraph.remove(uninst_task)
7375 for blocked_pkg in parent_nodes:
7376 mygraph.add(blocked_pkg, uninst_task,
7377 priority=BlockerDepPriority.instance)
7378 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7379 scheduler_graph.add(blocked_pkg, uninst_task,
7380 priority=BlockerDepPriority.instance)
7382 # Reset the state variables for leaf node selection and
7383 # continue trying to select leaf nodes.
7385 drop_satisfied = False
7388 if not selected_nodes:
7389 # Only select root nodes as a last resort. This case should
7390 # only trigger when the graph is nearly empty and the only
7391 # remaining nodes are isolated (no parents or children). Since
7392 # the nodes must be isolated, ignore_priority is not needed.
7393 selected_nodes = get_nodes()
7395 if not selected_nodes and not drop_satisfied:
7396 drop_satisfied = True
7399 if not selected_nodes and not myblocker_uninstalls.is_empty():
7400 # If possible, drop an uninstall task here in order to avoid
7401 # the circular deps code path. The corresponding blocker will
7402 # still be counted as an unresolved conflict.
7404 for node in myblocker_uninstalls.leaf_nodes():
7406 mygraph.remove(node)
7411 ignored_uninstall_tasks.add(node)
7414 if uninst_task is not None:
7415 # Reset the state variables for leaf node selection and
7416 # continue trying to select leaf nodes.
7418 drop_satisfied = False
7421 if not selected_nodes:
7422 self._circular_deps_for_display = mygraph
7423 raise self._unknown_internal_error()
7425 # At this point, we've succeeded in selecting one or more nodes, so
7426 # reset state variables for leaf node selection.
7428 drop_satisfied = False
7430 mygraph.difference_update(selected_nodes)
7432 for node in selected_nodes:
7433 if isinstance(node, Package) and \
7434 node.operation == "nomerge":
7437 # Handle interactions between blockers
7438 # and uninstallation tasks.
7439 solved_blockers = set()
7441 if isinstance(node, Package) and \
7442 "uninstall" == node.operation:
7443 have_uninstall_task = True
7446 vardb = self.trees[node.root]["vartree"].dbapi
7447 previous_cpv = vardb.match(node.slot_atom)
7449 # The package will be replaced by this one, so remove
7450 # the corresponding Uninstall task if necessary.
7451 previous_cpv = previous_cpv[0]
7453 ("installed", node.root, previous_cpv, "uninstall")
7455 mygraph.remove(uninst_task)
7459 if uninst_task is not None and \
7460 uninst_task not in ignored_uninstall_tasks and \
7461 myblocker_uninstalls.contains(uninst_task):
7462 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7463 myblocker_uninstalls.remove(uninst_task)
7464 # Discard any blockers that this Uninstall solves.
7465 for blocker in blocker_nodes:
7466 if not myblocker_uninstalls.child_nodes(blocker):
7467 myblocker_uninstalls.remove(blocker)
7468 solved_blockers.add(blocker)
7470 retlist.append(node)
7472 if (isinstance(node, Package) and \
7473 "uninstall" == node.operation) or \
7474 (uninst_task is not None and \
7475 uninst_task in scheduled_uninstalls):
7476 # Include satisfied blockers in the merge list
7477 # since the user might be interested and also
7478 # it serves as an indicator that blocking packages
7479 # will be temporarily installed simultaneously.
7480 for blocker in solved_blockers:
7481 retlist.append(Blocker(atom=blocker.atom,
7482 root=blocker.root, eapi=blocker.eapi,
7485 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7486 for node in myblocker_uninstalls.root_nodes():
7487 unsolvable_blockers.add(node)
7489 for blocker in unsolvable_blockers:
7490 retlist.append(blocker)
7492 # If any Uninstall tasks need to be executed in order
7493 # to avoid a conflict, complete the graph with any
7494 # dependencies that may have been initially
7495 # neglected (to ensure that unsafe Uninstall tasks
7496 # are properly identified and blocked from execution).
7497 if have_uninstall_task and \
7499 not unsolvable_blockers:
7500 self.myparams.add("complete")
7501 raise self._serialize_tasks_retry("")
7503 if unsolvable_blockers and \
7504 not self._accept_blocker_conflicts():
7505 self._unsatisfied_blockers_for_display = unsolvable_blockers
7506 self._serialized_tasks_cache = retlist[:]
7507 self._scheduler_graph = scheduler_graph
7508 raise self._unknown_internal_error()
7510 if self._slot_collision_info and \
7511 not self._accept_blocker_conflicts():
7512 self._serialized_tasks_cache = retlist[:]
7513 self._scheduler_graph = scheduler_graph
7514 raise self._unknown_internal_error()
7516 return retlist, scheduler_graph
7518 def _show_circular_deps(self, mygraph):
7519 # No leaf nodes are available, so we have a circular
7520 # dependency panic situation. Reduce the noise level to a
7521 # minimum via repeated elimination of root nodes since they
7522 # have no parents and thus can not be part of a cycle.
7524 root_nodes = mygraph.root_nodes(
7525 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7528 mygraph.difference_update(root_nodes)
7529 # Display the USE flags that are enabled on nodes that are part
7530 # of dependency cycles in case that helps the user decide to
7531 # disable some of them.
7533 tempgraph = mygraph.copy()
7534 while not tempgraph.empty():
7535 nodes = tempgraph.leaf_nodes()
7537 node = tempgraph.order[0]
7540 display_order.append(node)
7541 tempgraph.remove(node)
7542 display_order.reverse()
7543 self.myopts.pop("--quiet", None)
7544 self.myopts.pop("--verbose", None)
7545 self.myopts["--tree"] = True
7546 portage.writemsg("\n\n", noiselevel=-1)
7547 self.display(display_order)
7548 prefix = colorize("BAD", " * ")
7549 portage.writemsg("\n", noiselevel=-1)
7550 portage.writemsg(prefix + "Error: circular dependencies:\n",
7552 portage.writemsg("\n", noiselevel=-1)
7553 mygraph.debug_print()
7554 portage.writemsg("\n", noiselevel=-1)
7555 portage.writemsg(prefix + "Note that circular dependencies " + \
7556 "can often be avoided by temporarily\n", noiselevel=-1)
7557 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7558 "optional dependencies.\n", noiselevel=-1)
7560 def _show_merge_list(self):
7561 if self._serialized_tasks_cache is not None and \
7562 not (self._displayed_list and \
7563 (self._displayed_list == self._serialized_tasks_cache or \
7564 self._displayed_list == \
7565 list(reversed(self._serialized_tasks_cache)))):
7566 display_list = self._serialized_tasks_cache[:]
7567 if "--tree" in self.myopts:
7568 display_list.reverse()
7569 self.display(display_list)
7571 def _show_unsatisfied_blockers(self, blockers):
7572 self._show_merge_list()
7573 msg = "Error: The above package list contains " + \
7574 "packages which cannot be installed " + \
7575 "at the same time on the same system."
7576 prefix = colorize("BAD", " * ")
7577 from textwrap import wrap
7578 portage.writemsg("\n", noiselevel=-1)
7579 for line in wrap(msg, 70):
7580 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7582 # Display the conflicting packages along with the packages
7583 # that pulled them in. This is helpful for troubleshooting
7584 # cases in which blockers don't solve automatically and
7585 # the reasons are not apparent from the normal merge list
7589 for blocker in blockers:
7590 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7591 self._blocker_parents.parent_nodes(blocker)):
7592 parent_atoms = self._parent_atoms.get(pkg)
7593 if not parent_atoms:
7594 atom = self._blocked_world_pkgs.get(pkg)
7595 if atom is not None:
7596 parent_atoms = set([("@world", atom)])
7598 conflict_pkgs[pkg] = parent_atoms
7601 # Reduce noise by pruning packages that are only
7602 # pulled in by other conflict packages.
7604 for pkg, parent_atoms in conflict_pkgs.iteritems():
7605 relevant_parent = False
7606 for parent, atom in parent_atoms:
7607 if parent not in conflict_pkgs:
7608 relevant_parent = True
7610 if not relevant_parent:
7611 pruned_pkgs.add(pkg)
7612 for pkg in pruned_pkgs:
7613 del conflict_pkgs[pkg]
7619 # Max number of parents shown, to avoid flooding the display.
7621 for pkg, parent_atoms in conflict_pkgs.iteritems():
7625 # Prefer packages that are not directly involved in a conflict.
7626 for parent_atom in parent_atoms:
7627 if len(pruned_list) >= max_parents:
7629 parent, atom = parent_atom
7630 if parent not in conflict_pkgs:
7631 pruned_list.add(parent_atom)
7633 for parent_atom in parent_atoms:
7634 if len(pruned_list) >= max_parents:
7636 pruned_list.add(parent_atom)
7638 omitted_parents = len(parent_atoms) - len(pruned_list)
7639 msg.append(indent + "%s pulled in by\n" % pkg)
7641 for parent_atom in pruned_list:
7642 parent, atom = parent_atom
7643 msg.append(2*indent)
7644 if isinstance(parent,
7645 (PackageArg, AtomArg)):
7646 # For PackageArg and AtomArg types, it's
7647 # redundant to display the atom attribute.
7648 msg.append(str(parent))
7650 # Display the specific atom from SetArg or
7652 msg.append("%s required by %s" % (atom, parent))
7656 msg.append(2*indent)
7657 msg.append("(and %d more)\n" % omitted_parents)
7661 sys.stderr.write("".join(msg))
7664 if "--quiet" not in self.myopts:
7665 show_blocker_docs_link()
7667 def display(self, mylist, favorites=[], verbosity=None):
7669 # This is used to prevent display_problems() from
7670 # redundantly displaying this exact same merge list
7671 # again via _show_merge_list().
7672 self._displayed_list = mylist
7674 if verbosity is None:
7675 verbosity = ("--quiet" in self.myopts and 1 or \
7676 "--verbose" in self.myopts and 3 or 2)
7677 favorites_set = InternalPackageSet(favorites)
7678 oneshot = "--oneshot" in self.myopts or \
7679 "--onlydeps" in self.myopts
7680 columns = "--columns" in self.myopts
7685 counters = PackageCounters()
7687 if verbosity == 1 and "--verbose" not in self.myopts:
7688 def create_use_string(*args):
7691 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7693 is_new, reinst_flags,
7694 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7695 alphabetical=("--alphabetical" in self.myopts)):
7703 cur_iuse = set(cur_iuse)
7704 enabled_flags = cur_iuse.intersection(cur_use)
7705 removed_iuse = set(old_iuse).difference(cur_iuse)
7706 any_iuse = cur_iuse.union(old_iuse)
7707 any_iuse = list(any_iuse)
7709 for flag in any_iuse:
7712 reinst_flag = reinst_flags and flag in reinst_flags
7713 if flag in enabled_flags:
7715 if is_new or flag in old_use and \
7716 (all_flags or reinst_flag):
7717 flag_str = red(flag)
7718 elif flag not in old_iuse:
7719 flag_str = yellow(flag) + "%*"
7720 elif flag not in old_use:
7721 flag_str = green(flag) + "*"
7722 elif flag in removed_iuse:
7723 if all_flags or reinst_flag:
7724 flag_str = yellow("-" + flag) + "%"
7727 flag_str = "(" + flag_str + ")"
7728 removed.append(flag_str)
7731 if is_new or flag in old_iuse and \
7732 flag not in old_use and \
7733 (all_flags or reinst_flag):
7734 flag_str = blue("-" + flag)
7735 elif flag not in old_iuse:
7736 flag_str = yellow("-" + flag)
7737 if flag not in iuse_forced:
7739 elif flag in old_use:
7740 flag_str = green("-" + flag) + "*"
7742 if flag in iuse_forced:
7743 flag_str = "(" + flag_str + ")"
7745 enabled.append(flag_str)
7747 disabled.append(flag_str)
7750 ret = " ".join(enabled)
7752 ret = " ".join(enabled + disabled + removed)
7754 ret = '%s="%s" ' % (name, ret)
7757 repo_display = RepoDisplay(self.roots)
7761 mygraph = self.digraph.copy()
7763 # If there are any Uninstall instances, add the corresponding
7764 # blockers to the digraph (useful for --tree display).
7766 executed_uninstalls = set(node for node in mylist \
7767 if isinstance(node, Package) and node.operation == "unmerge")
7769 for uninstall in self._blocker_uninstalls.leaf_nodes():
7770 uninstall_parents = \
7771 self._blocker_uninstalls.parent_nodes(uninstall)
7772 if not uninstall_parents:
7775 # Remove the corresponding "nomerge" node and substitute
7776 # the Uninstall node.
7777 inst_pkg = self._pkg_cache[
7778 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7780 mygraph.remove(inst_pkg)
7785 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7787 inst_pkg_blockers = []
7789 # Break the Package -> Uninstall edges.
7790 mygraph.remove(uninstall)
7792 # Resolution of a package's blockers
7793 # depend on it's own uninstallation.
7794 for blocker in inst_pkg_blockers:
7795 mygraph.add(uninstall, blocker)
7797 # Expand Package -> Uninstall edges into
7798 # Package -> Blocker -> Uninstall edges.
7799 for blocker in uninstall_parents:
7800 mygraph.add(uninstall, blocker)
7801 for parent in self._blocker_parents.parent_nodes(blocker):
7802 if parent != inst_pkg:
7803 mygraph.add(blocker, parent)
7805 # If the uninstall task did not need to be executed because
7806 # of an upgrade, display Blocker -> Upgrade edges since the
7807 # corresponding Blocker -> Uninstall edges will not be shown.
7809 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7810 if upgrade_node is not None and \
7811 uninstall not in executed_uninstalls:
7812 for blocker in uninstall_parents:
7813 mygraph.add(upgrade_node, blocker)
7815 unsatisfied_blockers = []
7820 if isinstance(x, Blocker) and not x.satisfied:
7821 unsatisfied_blockers.append(x)
7824 if "--tree" in self.myopts:
7825 depth = len(tree_nodes)
7826 while depth and graph_key not in \
7827 mygraph.child_nodes(tree_nodes[depth-1]):
7830 tree_nodes = tree_nodes[:depth]
7831 tree_nodes.append(graph_key)
7832 display_list.append((x, depth, True))
7833 shown_edges.add((graph_key, tree_nodes[depth-1]))
7835 traversed_nodes = set() # prevent endless circles
7836 traversed_nodes.add(graph_key)
7837 def add_parents(current_node, ordered):
7839 # Do not traverse to parents if this node is an
7840 # an argument or a direct member of a set that has
7841 # been specified as an argument (system or world).
7842 if current_node not in self._set_nodes:
7843 parent_nodes = mygraph.parent_nodes(current_node)
7845 child_nodes = set(mygraph.child_nodes(current_node))
7846 selected_parent = None
7847 # First, try to avoid a direct cycle.
7848 for node in parent_nodes:
7849 if not isinstance(node, (Blocker, Package)):
7851 if node not in traversed_nodes and \
7852 node not in child_nodes:
7853 edge = (current_node, node)
7854 if edge in shown_edges:
7856 selected_parent = node
7858 if not selected_parent:
7859 # A direct cycle is unavoidable.
7860 for node in parent_nodes:
7861 if not isinstance(node, (Blocker, Package)):
7863 if node not in traversed_nodes:
7864 edge = (current_node, node)
7865 if edge in shown_edges:
7867 selected_parent = node
7870 shown_edges.add((current_node, selected_parent))
7871 traversed_nodes.add(selected_parent)
7872 add_parents(selected_parent, False)
7873 display_list.append((current_node,
7874 len(tree_nodes), ordered))
7875 tree_nodes.append(current_node)
7877 add_parents(graph_key, True)
7879 display_list.append((x, depth, True))
7880 mylist = display_list
7881 for x in unsatisfied_blockers:
7882 mylist.append((x, 0, True))
7884 last_merge_depth = 0
7885 for i in xrange(len(mylist)-1,-1,-1):
7886 graph_key, depth, ordered = mylist[i]
7887 if not ordered and depth == 0 and i > 0 \
7888 and graph_key == mylist[i-1][0] and \
7889 mylist[i-1][1] == 0:
7890 # An ordered node got a consecutive duplicate when the tree was
7894 if ordered and graph_key[-1] != "nomerge":
7895 last_merge_depth = depth
7897 if depth >= last_merge_depth or \
7898 i < len(mylist) - 1 and \
7899 depth >= mylist[i+1][1]:
7902 from portage import flatten
7903 from portage.dep import use_reduce, paren_reduce
7904 # files to fetch list - avoids counting a same file twice
7905 # in size display (verbose mode)
7908 # Use this set to detect when all the "repoadd" strings are "[0]"
7909 # and disable the entire repo display in this case.
7912 for mylist_index in xrange(len(mylist)):
7913 x, depth, ordered = mylist[mylist_index]
7917 portdb = self.trees[myroot]["porttree"].dbapi
7918 bindb = self.trees[myroot]["bintree"].dbapi
7919 vardb = self.trees[myroot]["vartree"].dbapi
7920 vartree = self.trees[myroot]["vartree"]
7921 pkgsettings = self.pkgsettings[myroot]
7924 indent = " " * depth
7926 if isinstance(x, Blocker):
7928 blocker_style = "PKG_BLOCKER_SATISFIED"
7929 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7931 blocker_style = "PKG_BLOCKER"
7932 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7934 counters.blocks += 1
7936 counters.blocks_satisfied += 1
7937 resolved = portage.key_expand(
7938 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7939 if "--columns" in self.myopts and "--quiet" in self.myopts:
7940 addl += " " + colorize(blocker_style, resolved)
7942 addl = "[%s %s] %s%s" % \
7943 (colorize(blocker_style, "blocks"),
7944 addl, indent, colorize(blocker_style, resolved))
7945 block_parents = self._blocker_parents.parent_nodes(x)
7946 block_parents = set([pnode[2] for pnode in block_parents])
7947 block_parents = ", ".join(block_parents)
7949 addl += colorize(blocker_style,
7950 " (\"%s\" is blocking %s)") % \
7951 (str(x.atom).lstrip("!"), block_parents)
7953 addl += colorize(blocker_style,
7954 " (is blocking %s)") % block_parents
7955 if isinstance(x, Blocker) and x.satisfied:
7960 blockers.append(addl)
7963 pkg_merge = ordered and pkg_status == "merge"
7964 if not pkg_merge and pkg_status == "merge":
7965 pkg_status = "nomerge"
7966 built = pkg_type != "ebuild"
7967 installed = pkg_type == "installed"
7969 metadata = pkg.metadata
7971 repo_name = metadata["repository"]
7972 if pkg_type == "ebuild":
7973 ebuild_path = portdb.findname(pkg_key)
7974 if not ebuild_path: # shouldn't happen
7975 raise portage.exception.PackageNotFound(pkg_key)
7976 repo_path_real = os.path.dirname(os.path.dirname(
7977 os.path.dirname(ebuild_path)))
7979 repo_path_real = portdb.getRepositoryPath(repo_name)
7980 pkg_use = list(pkg.use.enabled)
7982 restrict = flatten(use_reduce(paren_reduce(
7983 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7984 except portage.exception.InvalidDependString, e:
7985 if not pkg.installed:
7986 show_invalid_depstring_notice(x,
7987 pkg.metadata["RESTRICT"], str(e))
7991 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7992 "fetch" in restrict:
7995 counters.restrict_fetch += 1
7996 if portdb.fetch_check(pkg_key, pkg_use):
7999 counters.restrict_fetch_satisfied += 1
8001 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8002 #param is used for -u, where you still *do* want to see when something is being upgraded.
8005 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8006 if vardb.cpv_exists(pkg_key):
8007 addl=" "+yellow("R")+fetch+" "
8010 counters.reinst += 1
8011 elif pkg_status == "uninstall":
8012 counters.uninst += 1
8013 # filter out old-style virtual matches
8014 elif installed_versions and \
8015 portage.cpv_getkey(installed_versions[0]) == \
8016 portage.cpv_getkey(pkg_key):
8017 myinslotlist = vardb.match(pkg.slot_atom)
8018 # If this is the first install of a new-style virtual, we
8019 # need to filter out old-style virtual matches.
8020 if myinslotlist and \
8021 portage.cpv_getkey(myinslotlist[0]) != \
8022 portage.cpv_getkey(pkg_key):
8025 myoldbest = myinslotlist[:]
8027 if not portage.dep.cpvequal(pkg_key,
8028 portage.best([pkg_key] + myoldbest)):
8030 addl += turquoise("U")+blue("D")
8032 counters.downgrades += 1
8035 addl += turquoise("U") + " "
8037 counters.upgrades += 1
8039 # New slot, mark it new.
8040 addl = " " + green("NS") + fetch + " "
8041 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8043 counters.newslot += 1
8045 if "--changelog" in self.myopts:
8046 inst_matches = vardb.match(pkg.slot_atom)
8048 changelogs.extend(self.calc_changelog(
8049 portdb.findname(pkg_key),
8050 inst_matches[0], pkg_key))
8052 addl = " " + green("N") + " " + fetch + " "
8061 forced_flags = set()
8062 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8063 forced_flags.update(pkgsettings.useforce)
8064 forced_flags.update(pkgsettings.usemask)
8066 cur_use = [flag for flag in pkg.use.enabled \
8067 if flag in pkg.iuse.all]
8068 cur_iuse = sorted(pkg.iuse.all)
8070 if myoldbest and myinslotlist:
8071 previous_cpv = myoldbest[0]
8073 previous_cpv = pkg.cpv
8074 if vardb.cpv_exists(previous_cpv):
8075 old_iuse, old_use = vardb.aux_get(
8076 previous_cpv, ["IUSE", "USE"])
8077 old_iuse = list(set(
8078 filter_iuse_defaults(old_iuse.split())))
8080 old_use = old_use.split()
8087 old_use = [flag for flag in old_use if flag in old_iuse]
8089 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8091 use_expand.reverse()
8092 use_expand_hidden = \
8093 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8095 def map_to_use_expand(myvals, forcedFlags=False,
8099 for exp in use_expand:
8102 for val in myvals[:]:
8103 if val.startswith(exp.lower()+"_"):
8104 if val in forced_flags:
8105 forced[exp].add(val[len(exp)+1:])
8106 ret[exp].append(val[len(exp)+1:])
8109 forced["USE"] = [val for val in myvals \
8110 if val in forced_flags]
8112 for exp in use_expand_hidden:
8118 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8119 # are the only thing that triggered reinstallation.
8120 reinst_flags_map = {}
8121 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8122 reinst_expand_map = None
8123 if reinstall_for_flags:
8124 reinst_flags_map = map_to_use_expand(
8125 list(reinstall_for_flags), removeHidden=False)
8126 for k in list(reinst_flags_map):
8127 if not reinst_flags_map[k]:
8128 del reinst_flags_map[k]
8129 if not reinst_flags_map.get("USE"):
8130 reinst_expand_map = reinst_flags_map.copy()
8131 reinst_expand_map.pop("USE", None)
8132 if reinst_expand_map and \
8133 not set(reinst_expand_map).difference(
8135 use_expand_hidden = \
8136 set(use_expand_hidden).difference(
8139 cur_iuse_map, iuse_forced = \
8140 map_to_use_expand(cur_iuse, forcedFlags=True)
8141 cur_use_map = map_to_use_expand(cur_use)
8142 old_iuse_map = map_to_use_expand(old_iuse)
8143 old_use_map = map_to_use_expand(old_use)
8146 use_expand.insert(0, "USE")
8148 for key in use_expand:
8149 if key in use_expand_hidden:
8151 verboseadd += create_use_string(key.upper(),
8152 cur_iuse_map[key], iuse_forced[key],
8153 cur_use_map[key], old_iuse_map[key],
8154 old_use_map[key], is_new,
8155 reinst_flags_map.get(key))
8160 if pkg_type == "ebuild" and pkg_merge:
8162 myfilesdict = portdb.getfetchsizes(pkg_key,
8163 useflags=pkg_use, debug=self.edebug)
8164 except portage.exception.InvalidDependString, e:
8165 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8166 show_invalid_depstring_notice(x, src_uri, str(e))
8169 if myfilesdict is None:
8170 myfilesdict="[empty/missing/bad digest]"
8172 for myfetchfile in myfilesdict:
8173 if myfetchfile not in myfetchlist:
8174 mysize+=myfilesdict[myfetchfile]
8175 myfetchlist.append(myfetchfile)
8177 counters.totalsize += mysize
8178 verboseadd += format_size(mysize)
8181 # assign index for a previous version in the same slot
8182 has_previous = False
8183 repo_name_prev = None
8184 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8186 slot_matches = vardb.match(slot_atom)
8189 repo_name_prev = vardb.aux_get(slot_matches[0],
8192 # now use the data to generate output
8193 if pkg.installed or not has_previous:
8194 repoadd = repo_display.repoStr(repo_path_real)
8196 repo_path_prev = None
8198 repo_path_prev = portdb.getRepositoryPath(
8200 if repo_path_prev == repo_path_real:
8201 repoadd = repo_display.repoStr(repo_path_real)
8203 repoadd = "%s=>%s" % (
8204 repo_display.repoStr(repo_path_prev),
8205 repo_display.repoStr(repo_path_real))
8207 repoadd_set.add(repoadd)
8209 xs = [portage.cpv_getkey(pkg_key)] + \
8210 list(portage.catpkgsplit(pkg_key)[2:])
8217 if "COLUMNWIDTH" in self.settings:
8219 mywidth = int(self.settings["COLUMNWIDTH"])
8220 except ValueError, e:
8221 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8223 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8224 self.settings["COLUMNWIDTH"], noiselevel=-1)
8226 oldlp = mywidth - 30
8229 # Convert myoldbest from a list to a string.
8233 for pos, key in enumerate(myoldbest):
8234 key = portage.catpkgsplit(key)[2] + \
8235 "-" + portage.catpkgsplit(key)[3]
8236 if key[-3:] == "-r0":
8238 myoldbest[pos] = key
8239 myoldbest = blue("["+", ".join(myoldbest)+"]")
8242 root_config = self.roots[myroot]
8243 system_set = root_config.sets["system"]
8244 world_set = root_config.sets["world"]
8249 pkg_system = system_set.findAtomForPackage(pkg)
8250 pkg_world = world_set.findAtomForPackage(pkg)
8251 if not (oneshot or pkg_world) and \
8252 myroot == self.target_root and \
8253 favorites_set.findAtomForPackage(pkg):
8254 # Maybe it will be added to world now.
8255 if create_world_atom(pkg, favorites_set, root_config):
8257 except portage.exception.InvalidDependString:
8258 # This is reported elsewhere if relevant.
8261 def pkgprint(pkg_str):
8264 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8266 return colorize("PKG_MERGE_WORLD", pkg_str)
8268 return colorize("PKG_MERGE", pkg_str)
8269 elif pkg_status == "uninstall":
8270 return colorize("PKG_UNINSTALL", pkg_str)
8273 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8275 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8277 return colorize("PKG_NOMERGE", pkg_str)
8280 properties = flatten(use_reduce(paren_reduce(
8281 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8282 except portage.exception.InvalidDependString, e:
8283 if not pkg.installed:
8284 show_invalid_depstring_notice(pkg,
8285 pkg.metadata["PROPERTIES"], str(e))
8289 interactive = "interactive" in properties
8290 if interactive and pkg.operation == "merge":
8291 addl = colorize("WARN", "I") + addl[1:]
8293 counters.interactive += 1
8298 if "--columns" in self.myopts:
8299 if "--quiet" in self.myopts:
8300 myprint=addl+" "+indent+pkgprint(pkg_cp)
8301 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8302 myprint=myprint+myoldbest
8303 myprint=myprint+darkgreen("to "+x[1])
8307 myprint = "[%s] %s%s" % \
8308 (pkgprint(pkg_status.ljust(13)),
8309 indent, pkgprint(pkg.cp))
8311 myprint = "[%s %s] %s%s" % \
8312 (pkgprint(pkg.type_name), addl,
8313 indent, pkgprint(pkg.cp))
8314 if (newlp-nc_len(myprint)) > 0:
8315 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8316 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8317 if (oldlp-nc_len(myprint)) > 0:
8318 myprint=myprint+" "*(oldlp-nc_len(myprint))
8319 myprint=myprint+myoldbest
8320 myprint += darkgreen("to " + pkg.root)
8323 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8325 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8326 myprint += indent + pkgprint(pkg_key) + " " + \
8327 myoldbest + darkgreen("to " + myroot)
8329 if "--columns" in self.myopts:
8330 if "--quiet" in self.myopts:
8331 myprint=addl+" "+indent+pkgprint(pkg_cp)
8332 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8333 myprint=myprint+myoldbest
8337 myprint = "[%s] %s%s" % \
8338 (pkgprint(pkg_status.ljust(13)),
8339 indent, pkgprint(pkg.cp))
8341 myprint = "[%s %s] %s%s" % \
8342 (pkgprint(pkg.type_name), addl,
8343 indent, pkgprint(pkg.cp))
8344 if (newlp-nc_len(myprint)) > 0:
8345 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8346 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8347 if (oldlp-nc_len(myprint)) > 0:
8348 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8349 myprint += myoldbest
8352 myprint = "[%s] %s%s %s" % \
8353 (pkgprint(pkg_status.ljust(13)),
8354 indent, pkgprint(pkg.cpv),
8357 myprint = "[%s %s] %s%s %s" % \
8358 (pkgprint(pkg_type), addl, indent,
8359 pkgprint(pkg.cpv), myoldbest)
8361 if columns and pkg.operation == "uninstall":
8363 p.append((myprint, verboseadd, repoadd))
8365 if "--tree" not in self.myopts and \
8366 "--quiet" not in self.myopts and \
8367 not self._opts_no_restart.intersection(self.myopts) and \
8368 pkg.root == self._running_root.root and \
8369 portage.match_from_list(
8370 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8371 not vardb.cpv_exists(pkg.cpv) and \
8372 "--quiet" not in self.myopts:
8373 if mylist_index < len(mylist) - 1:
8374 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8375 p.append(colorize("WARN", " then resume the merge."))
8378 show_repos = repoadd_set and repoadd_set != set(["0"])
8381 if isinstance(x, basestring):
8382 out.write("%s\n" % (x,))
8385 myprint, verboseadd, repoadd = x
8388 myprint += " " + verboseadd
8390 if show_repos and repoadd:
8391 myprint += " " + teal("[%s]" % repoadd)
8393 out.write("%s\n" % (myprint,))
8402 sys.stdout.write(str(repo_display))
8404 if "--changelog" in self.myopts:
8406 for revision,text in changelogs:
8407 print bold('*'+revision)
8408 sys.stdout.write(text)
8413 def display_problems(self):
8415 Display problems with the dependency graph such as slot collisions.
8416 This is called internally by display() to show the problems _after_
8417 the merge list where it is most likely to be seen, but if display()
8418 is not going to be called then this method should be called explicitly
8419 to ensure that the user is notified of problems with the graph.
8421 All output goes to stderr, except for unsatisfied dependencies which
8422 go to stdout for parsing by programs such as autounmask.
8425 # Note that show_masked_packages() sends it's output to
8426 # stdout, and some programs such as autounmask parse the
8427 # output in cases when emerge bails out. However, when
8428 # show_masked_packages() is called for installed packages
8429 # here, the message is a warning that is more appropriate
8430 # to send to stderr, so temporarily redirect stdout to
8431 # stderr. TODO: Fix output code so there's a cleaner way
8432 # to redirect everything to stderr.
8437 sys.stdout = sys.stderr
8438 self._display_problems()
8444 # This goes to stdout for parsing by programs like autounmask.
8445 for pargs, kwargs in self._unsatisfied_deps_for_display:
8446 self._show_unsatisfied_dep(*pargs, **kwargs)
8448 def _display_problems(self):
8449 if self._circular_deps_for_display is not None:
8450 self._show_circular_deps(
8451 self._circular_deps_for_display)
8453 # The user is only notified of a slot conflict if
8454 # there are no unresolvable blocker conflicts.
8455 if self._unsatisfied_blockers_for_display is not None:
8456 self._show_unsatisfied_blockers(
8457 self._unsatisfied_blockers_for_display)
8459 self._show_slot_collision_notice()
8461 # TODO: Add generic support for "set problem" handlers so that
8462 # the below warnings aren't special cases for world only.
8464 if self._missing_args:
8465 world_problems = False
8466 if "world" in self._sets:
8467 # Filter out indirect members of world (from nested sets)
8468 # since only direct members of world are desired here.
8469 world_set = self.roots[self.target_root].sets["world"]
8470 for arg, atom in self._missing_args:
8471 if arg.name == "world" and atom in world_set:
8472 world_problems = True
8476 sys.stderr.write("\n!!! Problems have been " + \
8477 "detected with your world file\n")
8478 sys.stderr.write("!!! Please run " + \
8479 green("emaint --check world")+"\n\n")
8481 if self._missing_args:
8482 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8483 " Ebuilds for the following packages are either all\n")
8484 sys.stderr.write(colorize("BAD", "!!!") + \
8485 " masked or don't exist:\n")
8486 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8487 self._missing_args) + "\n")
8489 if self._pprovided_args:
8491 for arg, atom in self._pprovided_args:
8492 if isinstance(arg, SetArg):
8494 arg_atom = (atom, atom)
8497 arg_atom = (arg.arg, atom)
8498 refs = arg_refs.setdefault(arg_atom, [])
8499 if parent not in refs:
8502 msg.append(bad("\nWARNING: "))
8503 if len(self._pprovided_args) > 1:
8504 msg.append("Requested packages will not be " + \
8505 "merged because they are listed in\n")
8507 msg.append("A requested package will not be " + \
8508 "merged because it is listed in\n")
8509 msg.append("package.provided:\n\n")
8510 problems_sets = set()
8511 for (arg, atom), refs in arg_refs.iteritems():
8514 problems_sets.update(refs)
8516 ref_string = ", ".join(["'%s'" % name for name in refs])
8517 ref_string = " pulled in by " + ref_string
8518 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8520 if "world" in problems_sets:
8521 msg.append("This problem can be solved in one of the following ways:\n\n")
8522 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8523 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8524 msg.append(" C) Remove offending entries from package.provided.\n\n")
8525 msg.append("The best course of action depends on the reason that an offending\n")
8526 msg.append("package.provided entry exists.\n\n")
8527 sys.stderr.write("".join(msg))
8529 masked_packages = []
8530 for pkg in self._masked_installed:
8531 root_config = pkg.root_config
8532 pkgsettings = self.pkgsettings[pkg.root]
8533 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8534 masked_packages.append((root_config, pkgsettings,
8535 pkg.cpv, pkg.metadata, mreasons))
8537 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8538 " The following installed packages are masked:\n")
8539 show_masked_packages(masked_packages)
8543 def calc_changelog(self,ebuildpath,current,next):
8544 if ebuildpath == None or not os.path.exists(ebuildpath):
8546 current = '-'.join(portage.catpkgsplit(current)[1:])
8547 if current.endswith('-r0'):
8548 current = current[:-3]
8549 next = '-'.join(portage.catpkgsplit(next)[1:])
8550 if next.endswith('-r0'):
8552 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8554 changelog = open(changelogpath).read()
8555 except SystemExit, e:
8556 raise # Needed else can't exit
8559 divisions = self.find_changelog_tags(changelog)
8560 #print 'XX from',current,'to',next
8561 #for div,text in divisions: print 'XX',div
8562 # skip entries for all revisions above the one we are about to emerge
8563 for i in range(len(divisions)):
8564 if divisions[i][0]==next:
8565 divisions = divisions[i:]
8567 # find out how many entries we are going to display
8568 for i in range(len(divisions)):
8569 if divisions[i][0]==current:
8570 divisions = divisions[:i]
8573 # couldnt find the current revision in the list. display nothing
8577 def find_changelog_tags(self,changelog):
8581 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8583 if release is not None:
8584 divs.append((release,changelog))
8586 if release is not None:
8587 divs.append((release,changelog[:match.start()]))
8588 changelog = changelog[match.end():]
8589 release = match.group(1)
8590 if release.endswith('.ebuild'):
8591 release = release[:-7]
8592 if release.endswith('-r0'):
8593 release = release[:-3]
8595 def saveNomergeFavorites(self):
8596 """Find atoms in favorites that are not in the mergelist and add them
8597 to the world file if necessary."""
8598 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8599 "--oneshot", "--onlydeps", "--pretend"):
8600 if x in self.myopts:
8602 root_config = self.roots[self.target_root]
8603 world_set = root_config.sets["world"]
8605 world_locked = False
8606 if hasattr(world_set, "lock"):
8610 if hasattr(world_set, "load"):
8611 world_set.load() # maybe it's changed on disk
8613 args_set = self._sets["args"]
8614 portdb = self.trees[self.target_root]["porttree"].dbapi
8615 added_favorites = set()
8616 for x in self._set_nodes:
8617 pkg_type, root, pkg_key, pkg_status = x
8618 if pkg_status != "nomerge":
8622 myfavkey = create_world_atom(x, args_set, root_config)
8624 if myfavkey in added_favorites:
8626 added_favorites.add(myfavkey)
8627 except portage.exception.InvalidDependString, e:
8628 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8629 (pkg_key, str(e)), noiselevel=-1)
8630 writemsg("!!! see '%s'\n\n" % os.path.join(
8631 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8634 for k in self._sets:
8635 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8640 all_added.append(SETPREFIX + k)
8641 all_added.extend(added_favorites)
8644 print ">>> Recording %s in \"world\" favorites file..." % \
8645 colorize("INFORM", str(a))
8647 world_set.update(all_added)
8652 def loadResumeCommand(self, resume_data, skip_masked=False):
8654 Add a resume command to the graph and validate it in the process. This
8655 will raise a PackageNotFound exception if a package is not available.
8658 if not isinstance(resume_data, dict):
8661 mergelist = resume_data.get("mergelist")
8662 if not isinstance(mergelist, list):
8665 fakedb = self.mydbapi
8667 serialized_tasks = []
8670 if not (isinstance(x, list) and len(x) == 4):
8672 pkg_type, myroot, pkg_key, action = x
8673 if pkg_type not in self.pkg_tree_map:
8675 if action != "merge":
8677 tree_type = self.pkg_tree_map[pkg_type]
8678 mydb = trees[myroot][tree_type].dbapi
8679 db_keys = list(self._trees_orig[myroot][
8680 tree_type].dbapi._aux_cache_keys)
8682 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8684 # It does no exist or it is corrupt.
8685 if action == "uninstall":
8687 raise portage.exception.PackageNotFound(pkg_key)
8688 installed = action == "uninstall"
8689 built = pkg_type != "ebuild"
8690 root_config = self.roots[myroot]
8691 pkg = Package(built=built, cpv=pkg_key,
8692 installed=installed, metadata=metadata,
8693 operation=action, root_config=root_config,
8695 if pkg_type == "ebuild":
8696 pkgsettings = self.pkgsettings[myroot]
8697 pkgsettings.setcpv(pkg)
8698 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8699 self._pkg_cache[pkg] = pkg
8701 root_config = self.roots[pkg.root]
8702 if "merge" == pkg.operation and \
8703 not visible(root_config.settings, pkg):
8705 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8707 self._unsatisfied_deps_for_display.append(
8708 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8710 fakedb[myroot].cpv_inject(pkg)
8711 serialized_tasks.append(pkg)
8712 self.spinner.update()
8714 if self._unsatisfied_deps_for_display:
8717 if not serialized_tasks or "--nodeps" in self.myopts:
8718 self._serialized_tasks_cache = serialized_tasks
8719 self._scheduler_graph = self.digraph
8721 self._select_package = self._select_pkg_from_graph
8722 self.myparams.add("selective")
8723 # Always traverse deep dependencies in order to account for
8724 # potentially unsatisfied dependencies of installed packages.
8725 # This is necessary for correct --keep-going or --resume operation
8726 # in case a package from a group of circularly dependent packages
8727 # fails. In this case, a package which has recently been installed
8728 # may have an unsatisfied circular dependency (pulled in by
8729 # PDEPEND, for example). So, even though a package is already
8730 # installed, it may not have all of it's dependencies satisfied, so
8731 # it may not be usable. If such a package is in the subgraph of
8732 # deep depenedencies of a scheduled build, that build needs to
8733 # be cancelled. In order for this type of situation to be
8734 # recognized, deep traversal of dependencies is required.
8735 self.myparams.add("deep")
8737 favorites = resume_data.get("favorites")
8738 args_set = self._sets["args"]
8739 if isinstance(favorites, list):
8740 args = self._load_favorites(favorites)
8744 for task in serialized_tasks:
8745 if isinstance(task, Package) and \
8746 task.operation == "merge":
8747 if not self._add_pkg(task, None):
8750 # Packages for argument atoms need to be explicitly
8751 # added via _add_pkg() so that they are included in the
8752 # digraph (needed at least for --tree display).
8754 for atom in arg.set:
8755 pkg, existing_node = self._select_package(
8756 arg.root_config.root, atom)
8757 if existing_node is None and \
8759 if not self._add_pkg(pkg, Dependency(atom=atom,
8760 root=pkg.root, parent=arg)):
8763 # Allow unsatisfied deps here to avoid showing a masking
8764 # message for an unsatisfied dep that isn't necessarily
8766 if not self._create_graph(allow_unsatisfied=True):
8769 unsatisfied_deps = []
8770 for dep in self._unsatisfied_deps:
8771 if not isinstance(dep.parent, Package):
8773 if dep.parent.operation == "merge":
8774 unsatisfied_deps.append(dep)
8777 # For unsatisfied deps of installed packages, only account for
8778 # them if they are in the subgraph of dependencies of a package
8779 # which is scheduled to be installed.
8780 unsatisfied_install = False
8782 dep_stack = self.digraph.parent_nodes(dep.parent)
8784 node = dep_stack.pop()
8785 if not isinstance(node, Package):
8787 if node.operation == "merge":
8788 unsatisfied_install = True
8790 if node in traversed:
8793 dep_stack.extend(self.digraph.parent_nodes(node))
8795 if unsatisfied_install:
8796 unsatisfied_deps.append(dep)
8798 if masked_tasks or unsatisfied_deps:
8799 # This probably means that a required package
8800 # was dropped via --skipfirst. It makes the
8801 # resume list invalid, so convert it to a
8802 # UnsatisfiedResumeDep exception.
8803 raise self.UnsatisfiedResumeDep(self,
8804 masked_tasks + unsatisfied_deps)
8805 self._serialized_tasks_cache = None
8808 except self._unknown_internal_error:
8813 def _load_favorites(self, favorites):
8815 Use a list of favorites to resume state from a
8816 previous select_files() call. This creates similar
8817 DependencyArg instances to those that would have
8818 been created by the original select_files() call.
8819 This allows Package instances to be matched with
8820 DependencyArg instances during graph creation.
8822 root_config = self.roots[self.target_root]
8823 getSetAtoms = root_config.setconfig.getSetAtoms
8824 sets = root_config.sets
8827 if not isinstance(x, basestring):
8829 if x in ("system", "world"):
8831 if x.startswith(SETPREFIX):
8832 s = x[len(SETPREFIX):]
8837 # Recursively expand sets so that containment tests in
8838 # self._get_parent_sets() properly match atoms in nested
8839 # sets (like if world contains system).
8840 expanded_set = InternalPackageSet(
8841 initial_atoms=getSetAtoms(s))
8842 self._sets[s] = expanded_set
8843 args.append(SetArg(arg=x, set=expanded_set,
8844 root_config=root_config))
8846 if not portage.isvalidatom(x):
8848 args.append(AtomArg(arg=x, atom=x,
8849 root_config=root_config))
8851 self._set_args(args)
8854 class UnsatisfiedResumeDep(portage.exception.PortageException):
8856 A dependency of a resume list is not installed. This
8857 can occur when a required package is dropped from the
8858 merge list via --skipfirst.
8860 def __init__(self, depgraph, value):
8861 portage.exception.PortageException.__init__(self, value)
8862 self.depgraph = depgraph
8864 class _internal_exception(portage.exception.PortageException):
8865 def __init__(self, value=""):
8866 portage.exception.PortageException.__init__(self, value)
8868 class _unknown_internal_error(_internal_exception):
8870 Used by the depgraph internally to terminate graph creation.
8871 The specific reason for the failure should have been dumped
8872 to stderr, unfortunately, the exact reason for the failure
8876 class _serialize_tasks_retry(_internal_exception):
8878 This is raised by the _serialize_tasks() method when it needs to
8879 be called again for some reason. The only case that it's currently
8880 used for is when neglected dependencies need to be added to the
8881 graph in order to avoid making a potentially unsafe decision.
8884 class _dep_check_composite_db(portage.dbapi):
8886 A dbapi-like interface that is optimized for use in dep_check() calls.
8887 This is built on top of the existing depgraph package selection logic.
8888 Some packages that have been added to the graph may be masked from this
8889 view in order to influence the atom preference selection that occurs
8892 def __init__(self, depgraph, root):
8893 portage.dbapi.__init__(self)
8894 self._depgraph = depgraph
8896 self._match_cache = {}
8897 self._cpv_pkg_map = {}
8899 def _clear_cache(self):
8900 self._match_cache.clear()
8901 self._cpv_pkg_map.clear()
8903 def match(self, atom):
8904 ret = self._match_cache.get(atom)
8909 atom = self._dep_expand(atom)
8910 pkg, existing = self._depgraph._select_package(self._root, atom)
8914 # Return the highest available from select_package() as well as
8915 # any matching slots in the graph db.
8917 slots.add(pkg.metadata["SLOT"])
8918 atom_cp = portage.dep_getkey(atom)
8919 if pkg.cp.startswith("virtual/"):
8920 # For new-style virtual lookahead that occurs inside
8921 # dep_check(), examine all slots. This is needed
8922 # so that newer slots will not unnecessarily be pulled in
8923 # when a satisfying lower slot is already installed. For
8924 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8925 # there's no need to pull in a newer slot to satisfy a
8926 # virtual/jdk dependency.
8927 for db, pkg_type, built, installed, db_keys in \
8928 self._depgraph._filtered_trees[self._root]["dbs"]:
8929 for cpv in db.match(atom):
8930 if portage.cpv_getkey(cpv) != pkg.cp:
8932 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8934 if self._visible(pkg):
8935 self._cpv_pkg_map[pkg.cpv] = pkg
8937 slots.remove(pkg.metadata["SLOT"])
8939 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8940 pkg, existing = self._depgraph._select_package(
8941 self._root, slot_atom)
8944 if not self._visible(pkg):
8946 self._cpv_pkg_map[pkg.cpv] = pkg
8949 self._cpv_sort_ascending(ret)
8950 self._match_cache[orig_atom] = ret
8953 def _visible(self, pkg):
8954 if pkg.installed and "selective" not in self._depgraph.myparams:
8956 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8957 except (StopIteration, portage.exception.InvalidDependString):
8964 self._depgraph.pkgsettings[pkg.root], pkg):
8966 except portage.exception.InvalidDependString:
8968 in_graph = self._depgraph._slot_pkg_map[
8969 self._root].get(pkg.slot_atom)
8970 if in_graph is None:
8971 # Mask choices for packages which are not the highest visible
8972 # version within their slot (since they usually trigger slot
8974 highest_visible, in_graph = self._depgraph._select_package(
8975 self._root, pkg.slot_atom)
8976 if pkg != highest_visible:
8978 elif in_graph != pkg:
8979 # Mask choices for packages that would trigger a slot
8980 # conflict with a previously selected package.
8984 def _dep_expand(self, atom):
8986 This is only needed for old installed packages that may
8987 contain atoms that are not fully qualified with a specific
8988 category. Emulate the cpv_expand() function that's used by
8989 dbapi.match() in cases like this. If there are multiple
8990 matches, it's often due to a new-style virtual that has
8991 been added, so try to filter those out to avoid raising
8994 root_config = self._depgraph.roots[self._root]
8996 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8997 if len(expanded_atoms) > 1:
8998 non_virtual_atoms = []
8999 for x in expanded_atoms:
9000 if not portage.dep_getkey(x).startswith("virtual/"):
9001 non_virtual_atoms.append(x)
9002 if len(non_virtual_atoms) == 1:
9003 expanded_atoms = non_virtual_atoms
9004 if len(expanded_atoms) > 1:
9005 # compatible with portage.cpv_expand()
9006 raise portage.exception.AmbiguousPackageName(
9007 [portage.dep_getkey(x) for x in expanded_atoms])
9009 atom = expanded_atoms[0]
9011 null_atom = insert_category_into_atom(atom, "null")
9012 null_cp = portage.dep_getkey(null_atom)
9013 cat, atom_pn = portage.catsplit(null_cp)
9014 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9016 # Allow the resolver to choose which virtual.
9017 atom = insert_category_into_atom(atom, "virtual")
9019 atom = insert_category_into_atom(atom, "null")
9022 def aux_get(self, cpv, wants):
9023 metadata = self._cpv_pkg_map[cpv].metadata
9024 return [metadata.get(x, "") for x in wants]
9026 class RepoDisplay(object):
9027 def __init__(self, roots):
9028 self._shown_repos = {}
9029 self._unknown_repo = False
9031 for root_config in roots.itervalues():
9032 portdir = root_config.settings.get("PORTDIR")
9034 repo_paths.add(portdir)
9035 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9037 repo_paths.update(overlays.split())
9038 repo_paths = list(repo_paths)
9039 self._repo_paths = repo_paths
9040 self._repo_paths_real = [ os.path.realpath(repo_path) \
9041 for repo_path in repo_paths ]
9043 # pre-allocate index for PORTDIR so that it always has index 0.
9044 for root_config in roots.itervalues():
9045 portdb = root_config.trees["porttree"].dbapi
9046 portdir = portdb.porttree_root
9048 self.repoStr(portdir)
9050 def repoStr(self, repo_path_real):
9053 real_index = self._repo_paths_real.index(repo_path_real)
9054 if real_index == -1:
9056 self._unknown_repo = True
9058 shown_repos = self._shown_repos
9059 repo_paths = self._repo_paths
9060 repo_path = repo_paths[real_index]
9061 index = shown_repos.get(repo_path)
9063 index = len(shown_repos)
9064 shown_repos[repo_path] = index
9070 shown_repos = self._shown_repos
9071 unknown_repo = self._unknown_repo
9072 if shown_repos or self._unknown_repo:
9073 output.append("Portage tree and overlays:\n")
9074 show_repo_paths = list(shown_repos)
9075 for repo_path, repo_index in shown_repos.iteritems():
9076 show_repo_paths[repo_index] = repo_path
9078 for index, repo_path in enumerate(show_repo_paths):
9079 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9081 output.append(" "+teal("[?]") + \
9082 " indicates that the source repository could not be determined\n")
9083 return "".join(output)
9085 class PackageCounters(object):
9095 self.blocks_satisfied = 0
9097 self.restrict_fetch = 0
9098 self.restrict_fetch_satisfied = 0
9099 self.interactive = 0
9102 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9105 myoutput.append("Total: %s package" % total_installs)
9106 if total_installs != 1:
9107 myoutput.append("s")
9108 if total_installs != 0:
9109 myoutput.append(" (")
9110 if self.upgrades > 0:
9111 details.append("%s upgrade" % self.upgrades)
9112 if self.upgrades > 1:
9114 if self.downgrades > 0:
9115 details.append("%s downgrade" % self.downgrades)
9116 if self.downgrades > 1:
9119 details.append("%s new" % self.new)
9120 if self.newslot > 0:
9121 details.append("%s in new slot" % self.newslot)
9122 if self.newslot > 1:
9125 details.append("%s reinstall" % self.reinst)
9129 details.append("%s uninstall" % self.uninst)
9132 if self.interactive > 0:
9133 details.append("%s %s" % (self.interactive,
9134 colorize("WARN", "interactive")))
9135 myoutput.append(", ".join(details))
9136 if total_installs != 0:
9137 myoutput.append(")")
9138 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9139 if self.restrict_fetch:
9140 myoutput.append("\nFetch Restriction: %s package" % \
9141 self.restrict_fetch)
9142 if self.restrict_fetch > 1:
9143 myoutput.append("s")
9144 if self.restrict_fetch_satisfied < self.restrict_fetch:
9145 myoutput.append(bad(" (%s unsatisfied)") % \
9146 (self.restrict_fetch - self.restrict_fetch_satisfied))
9148 myoutput.append("\nConflict: %s block" % \
9151 myoutput.append("s")
9152 if self.blocks_satisfied < self.blocks:
9153 myoutput.append(bad(" (%s unsatisfied)") % \
9154 (self.blocks - self.blocks_satisfied))
9155 return "".join(myoutput)
9157 class PollSelectAdapter(PollConstants):
9160 Use select to emulate a poll object, for
9161 systems that don't support poll().
9165 self._registered = {}
9166 self._select_args = [[], [], []]
9168 def register(self, fd, *args):
9170 Only POLLIN is currently supported!
9174 "register expected at most 2 arguments, got " + \
9175 repr(1 + len(args)))
9177 eventmask = PollConstants.POLLIN | \
9178 PollConstants.POLLPRI | PollConstants.POLLOUT
9182 self._registered[fd] = eventmask
9183 self._select_args = None
9185 def unregister(self, fd):
9186 self._select_args = None
9187 del self._registered[fd]
9189 def poll(self, *args):
9192 "poll expected at most 2 arguments, got " + \
9193 repr(1 + len(args)))
9199 select_args = self._select_args
9200 if select_args is None:
9201 select_args = [self._registered.keys(), [], []]
9203 if timeout is not None:
9204 select_args = select_args[:]
9205 # Translate poll() timeout args to select() timeout args:
9207 # | units | value(s) for indefinite block
9208 # ---------|--------------|------------------------------
9209 # poll | milliseconds | omitted, negative, or None
9210 # ---------|--------------|------------------------------
9211 # select | seconds | omitted
9212 # ---------|--------------|------------------------------
9214 if timeout is not None and timeout < 0:
9216 if timeout is not None:
9217 select_args.append(timeout / 1000)
9219 select_events = select.select(*select_args)
9221 for fd in select_events[0]:
9222 poll_events.append((fd, PollConstants.POLLIN))
9225 class SequentialTaskQueue(SlotObject):
9227 __slots__ = ("max_jobs", "running_tasks") + \
9228 ("_dirty", "_scheduling", "_task_queue")
9230 def __init__(self, **kwargs):
9231 SlotObject.__init__(self, **kwargs)
9232 self._task_queue = deque()
9233 self.running_tasks = set()
9234 if self.max_jobs is None:
9238 def add(self, task):
9239 self._task_queue.append(task)
9242 def addFront(self, task):
9243 self._task_queue.appendleft(task)
9254 if self._scheduling:
9255 # Ignore any recursive schedule() calls triggered via
9256 # self._task_exit().
9259 self._scheduling = True
9261 task_queue = self._task_queue
9262 running_tasks = self.running_tasks
9263 max_jobs = self.max_jobs
9264 state_changed = False
9266 while task_queue and \
9267 (max_jobs is True or len(running_tasks) < max_jobs):
9268 task = task_queue.popleft()
9269 cancelled = getattr(task, "cancelled", None)
9271 running_tasks.add(task)
9272 task.addExitListener(self._task_exit)
9274 state_changed = True
9277 self._scheduling = False
9279 return state_changed
9281 def _task_exit(self, task):
9283 Since we can always rely on exit listeners being called, the set of
9284 running tasks is always pruned automatically and there is never any need
9285 to actively prune it.
9287 self.running_tasks.remove(task)
9288 if self._task_queue:
9292 self._task_queue.clear()
9293 running_tasks = self.running_tasks
9294 while running_tasks:
9295 task = running_tasks.pop()
9296 task.removeExitListener(self._task_exit)
9300 def __nonzero__(self):
9301 return bool(self._task_queue or self.running_tasks)
9304 return len(self._task_queue) + len(self.running_tasks)
9306 _can_poll_device = None
9308 def can_poll_device():
9310 Test if it's possible to use poll() on a device such as a pty. This
9311 is known to fail on Darwin.
9313 @returns: True if poll() on a device succeeds, False otherwise.
9316 global _can_poll_device
9317 if _can_poll_device is not None:
9318 return _can_poll_device
9320 if not hasattr(select, "poll"):
9321 _can_poll_device = False
9322 return _can_poll_device
9325 dev_null = open('/dev/null', 'rb')
9327 _can_poll_device = False
9328 return _can_poll_device
9331 p.register(dev_null.fileno(), PollConstants.POLLIN)
9333 invalid_request = False
9334 for f, event in p.poll():
9335 if event & PollConstants.POLLNVAL:
9336 invalid_request = True
9340 _can_poll_device = not invalid_request
9341 return _can_poll_device
9343 def create_poll_instance():
9345 Create an instance of select.poll, or an instance of
9346 PollSelectAdapter there is no poll() implementation or
9347 it is broken somehow.
9349 if can_poll_device():
9350 return select.poll()
9351 return PollSelectAdapter()
9353 getloadavg = getattr(os, "getloadavg", None)
9354 if getloadavg is None:
9357 Uses /proc/loadavg to emulate os.getloadavg().
9358 Raises OSError if the load average was unobtainable.
9361 loadavg_str = open('/proc/loadavg').readline()
9363 # getloadavg() is only supposed to raise OSError, so convert
9364 raise OSError('unknown')
9365 loadavg_split = loadavg_str.split()
9366 if len(loadavg_split) < 3:
9367 raise OSError('unknown')
9371 loadavg_floats.append(float(loadavg_split[i]))
9373 raise OSError('unknown')
9374 return tuple(loadavg_floats)
9376 class PollScheduler(object):
9378 class _sched_iface_class(SlotObject):
9379 __slots__ = ("register", "schedule", "unregister")
9383 self._max_load = None
9385 self._poll_event_queue = []
9386 self._poll_event_handlers = {}
9387 self._poll_event_handler_ids = {}
9388 # Increment id for each new handler.
9389 self._event_handler_id = 0
9390 self._poll_obj = create_poll_instance()
9391 self._scheduling = False
9393 def _schedule(self):
9395 Calls _schedule_tasks() and automatically returns early from
9396 any recursive calls to this method that the _schedule_tasks()
9397 call might trigger. This makes _schedule() safe to call from
9398 inside exit listeners.
9400 if self._scheduling:
9402 self._scheduling = True
9404 return self._schedule_tasks()
9406 self._scheduling = False
9408 def _running_job_count(self):
9411 def _can_add_job(self):
9412 max_jobs = self._max_jobs
9413 max_load = self._max_load
9415 if self._max_jobs is not True and \
9416 self._running_job_count() >= self._max_jobs:
9419 if max_load is not None and \
9420 (max_jobs is True or max_jobs > 1) and \
9421 self._running_job_count() >= 1:
9423 avg1, avg5, avg15 = getloadavg()
9427 if avg1 >= max_load:
9432 def _poll(self, timeout=None):
9434 All poll() calls pass through here. The poll events
9435 are added directly to self._poll_event_queue.
9436 In order to avoid endless blocking, this raises
9437 StopIteration if timeout is None and there are
9438 no file descriptors to poll.
9440 if not self._poll_event_handlers:
9442 if timeout is None and \
9443 not self._poll_event_handlers:
9444 raise StopIteration(
9445 "timeout is None and there are no poll() event handlers")
9447 # The following error is known to occur with Linux kernel versions
9450 # select.error: (4, 'Interrupted system call')
9452 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9453 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9454 # without any events.
9457 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9459 except select.error, e:
9460 writemsg_level("\n!!! select error: %s\n" % (e,),
9461 level=logging.ERROR, noiselevel=-1)
9463 if timeout is not None:
9466 def _next_poll_event(self, timeout=None):
9468 Since the _schedule_wait() loop is called by event
9469 handlers from _poll_loop(), maintain a central event
9470 queue for both of them to share events from a single
9471 poll() call. In order to avoid endless blocking, this
9472 raises StopIteration if timeout is None and there are
9473 no file descriptors to poll.
9475 if not self._poll_event_queue:
9477 return self._poll_event_queue.pop()
9479 def _poll_loop(self):
9481 event_handlers = self._poll_event_handlers
9482 event_handled = False
9485 while event_handlers:
9486 f, event = self._next_poll_event()
9487 handler, reg_id = event_handlers[f]
9489 event_handled = True
9490 except StopIteration:
9491 event_handled = True
9493 if not event_handled:
9494 raise AssertionError("tight loop")
9496 def _schedule_yield(self):
9498 Schedule for a short period of time chosen by the scheduler based
9499 on internal state. Synchronous tasks should call this periodically
9500 in order to allow the scheduler to service pending poll events. The
9501 scheduler will call poll() exactly once, without blocking, and any
9502 resulting poll events will be serviced.
9504 event_handlers = self._poll_event_handlers
9507 if not event_handlers:
9508 return bool(events_handled)
9510 if not self._poll_event_queue:
9514 while event_handlers and self._poll_event_queue:
9515 f, event = self._next_poll_event()
9516 handler, reg_id = event_handlers[f]
9519 except StopIteration:
9522 return bool(events_handled)
9524 def _register(self, f, eventmask, handler):
9527 @return: A unique registration id, for use in schedule() or
9530 if f in self._poll_event_handlers:
9531 raise AssertionError("fd %d is already registered" % f)
9532 self._event_handler_id += 1
9533 reg_id = self._event_handler_id
9534 self._poll_event_handler_ids[reg_id] = f
9535 self._poll_event_handlers[f] = (handler, reg_id)
9536 self._poll_obj.register(f, eventmask)
9539 def _unregister(self, reg_id):
9540 f = self._poll_event_handler_ids[reg_id]
9541 self._poll_obj.unregister(f)
9542 del self._poll_event_handlers[f]
9543 del self._poll_event_handler_ids[reg_id]
9545 def _schedule_wait(self, wait_ids):
9547 Schedule until wait_id is not longer registered
9550 @param wait_id: a task id to wait for
9552 event_handlers = self._poll_event_handlers
9553 handler_ids = self._poll_event_handler_ids
9554 event_handled = False
9556 if isinstance(wait_ids, int):
9557 wait_ids = frozenset([wait_ids])
9560 while wait_ids.intersection(handler_ids):
9561 f, event = self._next_poll_event()
9562 handler, reg_id = event_handlers[f]
9564 event_handled = True
9565 except StopIteration:
9566 event_handled = True
9568 return event_handled
9570 class QueueScheduler(PollScheduler):
9573 Add instances of SequentialTaskQueue and then call run(). The
9574 run() method returns when no tasks remain.
9577 def __init__(self, max_jobs=None, max_load=None):
9578 PollScheduler.__init__(self)
9580 if max_jobs is None:
9583 self._max_jobs = max_jobs
9584 self._max_load = max_load
9585 self.sched_iface = self._sched_iface_class(
9586 register=self._register,
9587 schedule=self._schedule_wait,
9588 unregister=self._unregister)
9591 self._schedule_listeners = []
9594 self._queues.append(q)
9596 def remove(self, q):
9597 self._queues.remove(q)
9601 while self._schedule():
9604 while self._running_job_count():
9607 def _schedule_tasks(self):
9610 @returns: True if there may be remaining tasks to schedule,
9613 while self._can_add_job():
9614 n = self._max_jobs - self._running_job_count()
9618 if not self._start_next_job(n):
9621 for q in self._queues:
9626 def _running_job_count(self):
9628 for q in self._queues:
9629 job_count += len(q.running_tasks)
9630 self._jobs = job_count
9633 def _start_next_job(self, n=1):
9635 for q in self._queues:
9636 initial_job_count = len(q.running_tasks)
9638 final_job_count = len(q.running_tasks)
9639 if final_job_count > initial_job_count:
9640 started_count += (final_job_count - initial_job_count)
9641 if started_count >= n:
9643 return started_count
9645 class TaskScheduler(object):
9648 A simple way to handle scheduling of AsynchrousTask instances. Simply
9649 add tasks and call run(). The run() method returns when no tasks remain.
9652 def __init__(self, max_jobs=None, max_load=None):
9653 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9654 self._scheduler = QueueScheduler(
9655 max_jobs=max_jobs, max_load=max_load)
9656 self.sched_iface = self._scheduler.sched_iface
9657 self.run = self._scheduler.run
9658 self._scheduler.add(self._queue)
9660 def add(self, task):
9661 self._queue.add(task)
9663 class JobStatusDisplay(object):
9665 _bound_properties = ("curval", "failed", "running")
9666 _jobs_column_width = 48
9668 # Don't update the display unless at least this much
9669 # time has passed, in units of seconds.
9670 _min_display_latency = 2
9672 _default_term_codes = {
9678 _termcap_name_map = {
9679 'carriage_return' : 'cr',
9684 def __init__(self, out=sys.stdout, quiet=False):
9685 object.__setattr__(self, "out", out)
9686 object.__setattr__(self, "quiet", quiet)
9687 object.__setattr__(self, "maxval", 0)
9688 object.__setattr__(self, "merges", 0)
9689 object.__setattr__(self, "_changed", False)
9690 object.__setattr__(self, "_displayed", False)
9691 object.__setattr__(self, "_last_display_time", 0)
9692 object.__setattr__(self, "width", 80)
9695 isatty = hasattr(out, "isatty") and out.isatty()
9696 object.__setattr__(self, "_isatty", isatty)
9697 if not isatty or not self._init_term():
9699 for k, capname in self._termcap_name_map.iteritems():
9700 term_codes[k] = self._default_term_codes[capname]
9701 object.__setattr__(self, "_term_codes", term_codes)
9702 encoding = sys.getdefaultencoding()
9703 for k, v in self._term_codes.items():
9704 if not isinstance(v, str):
9705 self._term_codes[k] = v.decode(encoding, 'replace')
9707 def _init_term(self):
9709 Initialize term control codes.
9711 @returns: True if term codes were successfully initialized,
9715 term_type = os.environ.get("TERM", "vt100")
9721 curses.setupterm(term_type, self.out.fileno())
9722 tigetstr = curses.tigetstr
9723 except curses.error:
9728 if tigetstr is None:
9732 for k, capname in self._termcap_name_map.iteritems():
9733 code = tigetstr(capname)
9735 code = self._default_term_codes[capname]
9736 term_codes[k] = code
9737 object.__setattr__(self, "_term_codes", term_codes)
9740 def _format_msg(self, msg):
9741 return ">>> %s" % msg
9745 self._term_codes['carriage_return'] + \
9746 self._term_codes['clr_eol'])
9748 self._displayed = False
9750 def _display(self, line):
9751 self.out.write(line)
9753 self._displayed = True
9755 def _update(self, msg):
9758 if not self._isatty:
9759 out.write(self._format_msg(msg) + self._term_codes['newline'])
9761 self._displayed = True
9767 self._display(self._format_msg(msg))
9769 def displayMessage(self, msg):
9771 was_displayed = self._displayed
9773 if self._isatty and self._displayed:
9776 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9778 self._displayed = False
9781 self._changed = True
9787 for name in self._bound_properties:
9788 object.__setattr__(self, name, 0)
9791 self.out.write(self._term_codes['newline'])
9793 self._displayed = False
9795 def __setattr__(self, name, value):
9796 old_value = getattr(self, name)
9797 if value == old_value:
9799 object.__setattr__(self, name, value)
9800 if name in self._bound_properties:
9801 self._property_change(name, old_value, value)
9803 def _property_change(self, name, old_value, new_value):
9804 self._changed = True
9807 def _load_avg_str(self):
9822 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9826 Display status on stdout, but only if something has
9827 changed since the last call.
9833 current_time = time.time()
9834 time_delta = current_time - self._last_display_time
9835 if self._displayed and \
9837 if not self._isatty:
9839 if time_delta < self._min_display_latency:
9842 self._last_display_time = current_time
9843 self._changed = False
9844 self._display_status()
9846 def _display_status(self):
9847 # Don't use len(self._completed_tasks) here since that also
9848 # can include uninstall tasks.
9849 curval_str = str(self.curval)
9850 maxval_str = str(self.maxval)
9851 running_str = str(self.running)
9852 failed_str = str(self.failed)
9853 load_avg_str = self._load_avg_str()
9855 color_output = StringIO()
9856 plain_output = StringIO()
9857 style_file = portage.output.ConsoleStyleFile(color_output)
9858 style_file.write_listener = plain_output
9859 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9860 style_writer.style_listener = style_file.new_styles
9861 f = formatter.AbstractFormatter(style_writer)
9863 number_style = "INFORM"
9864 f.add_literal_data("Jobs: ")
9865 f.push_style(number_style)
9866 f.add_literal_data(curval_str)
9868 f.add_literal_data(" of ")
9869 f.push_style(number_style)
9870 f.add_literal_data(maxval_str)
9872 f.add_literal_data(" complete")
9875 f.add_literal_data(", ")
9876 f.push_style(number_style)
9877 f.add_literal_data(running_str)
9879 f.add_literal_data(" running")
9882 f.add_literal_data(", ")
9883 f.push_style(number_style)
9884 f.add_literal_data(failed_str)
9886 f.add_literal_data(" failed")
9888 padding = self._jobs_column_width - len(plain_output.getvalue())
9890 f.add_literal_data(padding * " ")
9892 f.add_literal_data("Load avg: ")
9893 f.add_literal_data(load_avg_str)
9895 # Truncate to fit width, to avoid making the terminal scroll if the
9896 # line overflows (happens when the load average is large).
9897 plain_output = plain_output.getvalue()
9898 if self._isatty and len(plain_output) > self.width:
9899 # Use plain_output here since it's easier to truncate
9900 # properly than the color output which contains console
9902 self._update(plain_output[:self.width])
9904 self._update(color_output.getvalue())
9906 xtermTitle(" ".join(plain_output.split()))
9908 class Scheduler(PollScheduler):
9910 _opts_ignore_blockers = \
9911 frozenset(["--buildpkgonly",
9912 "--fetchonly", "--fetch-all-uri",
9913 "--nodeps", "--pretend"])
9915 _opts_no_background = \
9916 frozenset(["--pretend",
9917 "--fetchonly", "--fetch-all-uri"])
9919 _opts_no_restart = frozenset(["--buildpkgonly",
9920 "--fetchonly", "--fetch-all-uri", "--pretend"])
9922 _bad_resume_opts = set(["--ask", "--changelog",
9923 "--resume", "--skipfirst"])
9925 _fetch_log = "/var/log/emerge-fetch.log"
9927 class _iface_class(SlotObject):
9928 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9929 "dblinkElog", "fetch", "register", "schedule",
9930 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9933 class _fetch_iface_class(SlotObject):
9934 __slots__ = ("log_file", "schedule")
9936 _task_queues_class = slot_dict_class(
9937 ("merge", "jobs", "fetch", "unpack"), prefix="")
9939 class _build_opts_class(SlotObject):
9940 __slots__ = ("buildpkg", "buildpkgonly",
9941 "fetch_all_uri", "fetchonly", "pretend")
9943 class _binpkg_opts_class(SlotObject):
9944 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9946 class _pkg_count_class(SlotObject):
9947 __slots__ = ("curval", "maxval")
9949 class _emerge_log_class(SlotObject):
9950 __slots__ = ("xterm_titles",)
9952 def log(self, *pargs, **kwargs):
9953 if not self.xterm_titles:
9954 # Avoid interference with the scheduler's status display.
9955 kwargs.pop("short_msg", None)
9956 emergelog(self.xterm_titles, *pargs, **kwargs)
9958 class _failed_pkg(SlotObject):
9959 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9961 class _ConfigPool(object):
9962 """Interface for a task to temporarily allocate a config
9963 instance from a pool. This allows a task to be constructed
9964 long before the config instance actually becomes needed, like
9965 when prefetchers are constructed for the whole merge list."""
9966 __slots__ = ("_root", "_allocate", "_deallocate")
9967 def __init__(self, root, allocate, deallocate):
9969 self._allocate = allocate
9970 self._deallocate = deallocate
9972 return self._allocate(self._root)
9973 def deallocate(self, settings):
9974 self._deallocate(settings)
9976 class _unknown_internal_error(portage.exception.PortageException):
9978 Used internally to terminate scheduling. The specific reason for
9979 the failure should have been dumped to stderr.
9981 def __init__(self, value=""):
9982 portage.exception.PortageException.__init__(self, value)
9984 def __init__(self, settings, trees, mtimedb, myopts,
9985 spinner, mergelist, favorites, digraph):
9986 PollScheduler.__init__(self)
9987 self.settings = settings
9988 self.target_root = settings["ROOT"]
9990 self.myopts = myopts
9991 self._spinner = spinner
9992 self._mtimedb = mtimedb
9993 self._mergelist = mergelist
9994 self._favorites = favorites
9995 self._args_set = InternalPackageSet(favorites)
9996 self._build_opts = self._build_opts_class()
9997 for k in self._build_opts.__slots__:
9998 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9999 self._binpkg_opts = self._binpkg_opts_class()
10000 for k in self._binpkg_opts.__slots__:
10001 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10004 self._logger = self._emerge_log_class()
10005 self._task_queues = self._task_queues_class()
10006 for k in self._task_queues.allowed_keys:
10007 setattr(self._task_queues, k,
10008 SequentialTaskQueue())
10010 # Holds merges that will wait to be executed when no builds are
10011 # executing. This is useful for system packages since dependencies
10012 # on system packages are frequently unspecified.
10013 self._merge_wait_queue = []
10014 # Holds merges that have been transfered from the merge_wait_queue to
10015 # the actual merge queue. They are removed from this list upon
10016 # completion. Other packages can start building only when this list is
10018 self._merge_wait_scheduled = []
10020 # Holds system packages and their deep runtime dependencies. Before
10021 # being merged, these packages go to merge_wait_queue, to be merged
10022 # when no other packages are building.
10023 self._deep_system_deps = set()
10025 # Holds packages to merge which will satisfy currently unsatisfied
10026 # deep runtime dependencies of system packages. If this is not empty
10027 # then no parallel builds will be spawned until it is empty. This
10028 # minimizes the possibility that a build will fail due to the system
10029 # being in a fragile state. For example, see bug #259954.
10030 self._unsatisfied_system_deps = set()
10032 self._status_display = JobStatusDisplay()
10033 self._max_load = myopts.get("--load-average")
10034 max_jobs = myopts.get("--jobs")
10035 if max_jobs is None:
10037 self._set_max_jobs(max_jobs)
10039 # The root where the currently running
10040 # portage instance is installed.
10041 self._running_root = trees["/"]["root_config"]
10043 if settings.get("PORTAGE_DEBUG", "") == "1":
10045 self.pkgsettings = {}
10046 self._config_pool = {}
10047 self._blocker_db = {}
10049 self._config_pool[root] = []
10050 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10052 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10053 schedule=self._schedule_fetch)
10054 self._sched_iface = self._iface_class(
10055 dblinkEbuildPhase=self._dblink_ebuild_phase,
10056 dblinkDisplayMerge=self._dblink_display_merge,
10057 dblinkElog=self._dblink_elog,
10058 fetch=fetch_iface, register=self._register,
10059 schedule=self._schedule_wait,
10060 scheduleSetup=self._schedule_setup,
10061 scheduleUnpack=self._schedule_unpack,
10062 scheduleYield=self._schedule_yield,
10063 unregister=self._unregister)
10065 self._prefetchers = weakref.WeakValueDictionary()
10066 self._pkg_queue = []
10067 self._completed_tasks = set()
10069 self._failed_pkgs = []
10070 self._failed_pkgs_all = []
10071 self._failed_pkgs_die_msgs = []
10072 self._post_mod_echo_msgs = []
10073 self._parallel_fetch = False
10074 merge_count = len([x for x in mergelist \
10075 if isinstance(x, Package) and x.operation == "merge"])
10076 self._pkg_count = self._pkg_count_class(
10077 curval=0, maxval=merge_count)
10078 self._status_display.maxval = self._pkg_count.maxval
10080 # The load average takes some time to respond when new
10081 # jobs are added, so we need to limit the rate of adding
10083 self._job_delay_max = 10
10084 self._job_delay_factor = 1.0
10085 self._job_delay_exp = 1.5
10086 self._previous_job_start_time = None
10088 self._set_digraph(digraph)
10090 # This is used to memoize the _choose_pkg() result when
10091 # no packages can be chosen until one of the existing
10093 self._choose_pkg_return_early = False
10095 features = self.settings.features
10096 if "parallel-fetch" in features and \
10097 not ("--pretend" in self.myopts or \
10098 "--fetch-all-uri" in self.myopts or \
10099 "--fetchonly" in self.myopts):
10100 if "distlocks" not in features:
10101 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10102 portage.writemsg(red("!!!")+" parallel-fetching " + \
10103 "requires the distlocks feature enabled"+"\n",
10105 portage.writemsg(red("!!!")+" you have it disabled, " + \
10106 "thus parallel-fetching is being disabled"+"\n",
10108 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10109 elif len(mergelist) > 1:
10110 self._parallel_fetch = True
10112 if self._parallel_fetch:
10113 # clear out existing fetch log if it exists
10115 open(self._fetch_log, 'w')
10116 except EnvironmentError:
10119 self._running_portage = None
10120 portage_match = self._running_root.trees["vartree"].dbapi.match(
10121 portage.const.PORTAGE_PACKAGE_ATOM)
10123 cpv = portage_match.pop()
10124 self._running_portage = self._pkg(cpv, "installed",
10125 self._running_root, installed=True)
10127 def _poll(self, timeout=None):
10129 PollScheduler._poll(self, timeout=timeout)
10131 def _set_max_jobs(self, max_jobs):
10132 self._max_jobs = max_jobs
10133 self._task_queues.jobs.max_jobs = max_jobs
10135 def _background_mode(self):
10137 Check if background mode is enabled and adjust states as necessary.
10140 @returns: True if background mode is enabled, False otherwise.
10142 background = (self._max_jobs is True or \
10143 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10144 not bool(self._opts_no_background.intersection(self.myopts))
10147 interactive_tasks = self._get_interactive_tasks()
10148 if interactive_tasks:
10150 writemsg_level(">>> Sending package output to stdio due " + \
10151 "to interactive package(s):\n",
10152 level=logging.INFO, noiselevel=-1)
10154 for pkg in interactive_tasks:
10155 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10156 if pkg.root != "/":
10157 pkg_str += " for " + pkg.root
10158 msg.append(pkg_str)
10160 writemsg_level("".join("%s\n" % (l,) for l in msg),
10161 level=logging.INFO, noiselevel=-1)
10162 if self._max_jobs is True or self._max_jobs > 1:
10163 self._set_max_jobs(1)
10164 writemsg_level(">>> Setting --jobs=1 due " + \
10165 "to the above interactive package(s)\n",
10166 level=logging.INFO, noiselevel=-1)
10168 self._status_display.quiet = \
10169 not background or \
10170 ("--quiet" in self.myopts and \
10171 "--verbose" not in self.myopts)
10173 self._logger.xterm_titles = \
10174 "notitles" not in self.settings.features and \
10175 self._status_display.quiet
10179 def _get_interactive_tasks(self):
10180 from portage import flatten
10181 from portage.dep import use_reduce, paren_reduce
10182 interactive_tasks = []
10183 for task in self._mergelist:
10184 if not (isinstance(task, Package) and \
10185 task.operation == "merge"):
10188 properties = flatten(use_reduce(paren_reduce(
10189 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10190 except portage.exception.InvalidDependString, e:
10191 show_invalid_depstring_notice(task,
10192 task.metadata["PROPERTIES"], str(e))
10193 raise self._unknown_internal_error()
10194 if "interactive" in properties:
10195 interactive_tasks.append(task)
10196 return interactive_tasks
10198 def _set_digraph(self, digraph):
10199 if "--nodeps" in self.myopts or \
10200 (self._max_jobs is not True and self._max_jobs < 2):
10202 self._digraph = None
10205 self._digraph = digraph
10206 self._find_system_deps()
10207 self._prune_digraph()
10208 self._prevent_builddir_collisions()
10210 def _find_system_deps(self):
10212 Find system packages and their deep runtime dependencies. Before being
10213 merged, these packages go to merge_wait_queue, to be merged when no
10214 other packages are building.
10216 deep_system_deps = self._deep_system_deps
10217 deep_system_deps.clear()
10218 deep_system_deps.update(
10219 _find_deep_system_runtime_deps(self._digraph))
10220 deep_system_deps.difference_update([pkg for pkg in \
10221 deep_system_deps if pkg.operation != "merge"])
10223 def _prune_digraph(self):
10225 Prune any root nodes that are irrelevant.
10228 graph = self._digraph
10229 completed_tasks = self._completed_tasks
10230 removed_nodes = set()
10232 for node in graph.root_nodes():
10233 if not isinstance(node, Package) or \
10234 (node.installed and node.operation == "nomerge") or \
10236 node in completed_tasks:
10237 removed_nodes.add(node)
10239 graph.difference_update(removed_nodes)
10240 if not removed_nodes:
10242 removed_nodes.clear()
10244 def _prevent_builddir_collisions(self):
10246 When building stages, sometimes the same exact cpv needs to be merged
10247 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10248 in the builddir. Currently, normal file locks would be inappropriate
10249 for this purpose since emerge holds all of it's build dir locks from
10253 for pkg in self._mergelist:
10254 if not isinstance(pkg, Package):
10255 # a satisfied blocker
10259 if pkg.cpv not in cpv_map:
10260 cpv_map[pkg.cpv] = [pkg]
10262 for earlier_pkg in cpv_map[pkg.cpv]:
10263 self._digraph.add(earlier_pkg, pkg,
10264 priority=DepPriority(buildtime=True))
10265 cpv_map[pkg.cpv].append(pkg)
10267 class _pkg_failure(portage.exception.PortageException):
10269 An instance of this class is raised by unmerge() when
10270 an uninstallation fails.
10273 def __init__(self, *pargs):
10274 portage.exception.PortageException.__init__(self, pargs)
10276 self.status = pargs[0]
10278 def _schedule_fetch(self, fetcher):
10280 Schedule a fetcher on the fetch queue, in order to
10281 serialize access to the fetch log.
10283 self._task_queues.fetch.addFront(fetcher)
10285 def _schedule_setup(self, setup_phase):
10287 Schedule a setup phase on the merge queue, in order to
10288 serialize unsandboxed access to the live filesystem.
10290 self._task_queues.merge.addFront(setup_phase)
10293 def _schedule_unpack(self, unpack_phase):
10295 Schedule an unpack phase on the unpack queue, in order
10296 to serialize $DISTDIR access for live ebuilds.
10298 self._task_queues.unpack.add(unpack_phase)
10300 def _find_blockers(self, new_pkg):
10302 Returns a callable which should be called only when
10303 the vdb lock has been acquired.
10305 def get_blockers():
10306 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10307 return get_blockers
10309 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10310 if self._opts_ignore_blockers.intersection(self.myopts):
10313 # Call gc.collect() here to avoid heap overflow that
10314 # triggers 'Cannot allocate memory' errors (reported
10315 # with python-2.5).
10319 blocker_db = self._blocker_db[new_pkg.root]
10321 blocker_dblinks = []
10322 for blocking_pkg in blocker_db.findInstalledBlockers(
10323 new_pkg, acquire_lock=acquire_lock):
10324 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10326 if new_pkg.cpv == blocking_pkg.cpv:
10328 blocker_dblinks.append(portage.dblink(
10329 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10330 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10331 vartree=self.trees[blocking_pkg.root]["vartree"]))
10335 return blocker_dblinks
10337 def _dblink_pkg(self, pkg_dblink):
10338 cpv = pkg_dblink.mycpv
10339 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10340 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10341 installed = type_name == "installed"
10342 return self._pkg(cpv, type_name, root_config, installed=installed)
10344 def _append_to_log_path(self, log_path, msg):
10345 f = open(log_path, 'a')
10351 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10353 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10356 background = self._background
10358 if background and log_path is not None:
10359 log_file = open(log_path, 'a')
10364 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10366 if log_file is not None:
10369 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10370 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10371 background = self._background
10373 if log_path is None:
10374 if not (background and level < logging.WARN):
10375 portage.util.writemsg_level(msg,
10376 level=level, noiselevel=noiselevel)
10379 portage.util.writemsg_level(msg,
10380 level=level, noiselevel=noiselevel)
10381 self._append_to_log_path(log_path, msg)
10383 def _dblink_ebuild_phase(self,
10384 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10386 Using this callback for merge phases allows the scheduler
10387 to run while these phases execute asynchronously, and allows
10388 the scheduler control output handling.
10391 scheduler = self._sched_iface
10392 settings = pkg_dblink.settings
10393 pkg = self._dblink_pkg(pkg_dblink)
10394 background = self._background
10395 log_path = settings.get("PORTAGE_LOG_FILE")
10397 ebuild_phase = EbuildPhase(background=background,
10398 pkg=pkg, phase=phase, scheduler=scheduler,
10399 settings=settings, tree=pkg_dblink.treetype)
10400 ebuild_phase.start()
10401 ebuild_phase.wait()
10403 return ebuild_phase.returncode
10405 def _check_manifests(self):
10406 # Verify all the manifests now so that the user is notified of failure
10407 # as soon as possible.
10408 if "strict" not in self.settings.features or \
10409 "--fetchonly" in self.myopts or \
10410 "--fetch-all-uri" in self.myopts:
10413 shown_verifying_msg = False
10414 quiet_settings = {}
10415 for myroot, pkgsettings in self.pkgsettings.iteritems():
10416 quiet_config = portage.config(clone=pkgsettings)
10417 quiet_config["PORTAGE_QUIET"] = "1"
10418 quiet_config.backup_changes("PORTAGE_QUIET")
10419 quiet_settings[myroot] = quiet_config
10422 for x in self._mergelist:
10423 if not isinstance(x, Package) or \
10424 x.type_name != "ebuild":
10427 if not shown_verifying_msg:
10428 shown_verifying_msg = True
10429 self._status_msg("Verifying ebuild manifests")
10431 root_config = x.root_config
10432 portdb = root_config.trees["porttree"].dbapi
10433 quiet_config = quiet_settings[root_config.root]
10434 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10435 if not portage.digestcheck([], quiet_config, strict=True):
10440 def _add_prefetchers(self):
10442 if not self._parallel_fetch:
10445 if self._parallel_fetch:
10446 self._status_msg("Starting parallel fetch")
10448 prefetchers = self._prefetchers
10449 getbinpkg = "--getbinpkg" in self.myopts
10451 # In order to avoid "waiting for lock" messages
10452 # at the beginning, which annoy users, never
10453 # spawn a prefetcher for the first package.
10454 for pkg in self._mergelist[1:]:
10455 prefetcher = self._create_prefetcher(pkg)
10456 if prefetcher is not None:
10457 self._task_queues.fetch.add(prefetcher)
10458 prefetchers[pkg] = prefetcher
10460 def _create_prefetcher(self, pkg):
10462 @return: a prefetcher, or None if not applicable
10466 if not isinstance(pkg, Package):
10469 elif pkg.type_name == "ebuild":
10471 prefetcher = EbuildFetcher(background=True,
10472 config_pool=self._ConfigPool(pkg.root,
10473 self._allocate_config, self._deallocate_config),
10474 fetchonly=1, logfile=self._fetch_log,
10475 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10477 elif pkg.type_name == "binary" and \
10478 "--getbinpkg" in self.myopts and \
10479 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10481 prefetcher = BinpkgPrefetcher(background=True,
10482 pkg=pkg, scheduler=self._sched_iface)
10486 def _is_restart_scheduled(self):
10488 Check if the merge list contains a replacement
10489 for the current running instance, that will result
10490 in restart after merge.
10492 @returns: True if a restart is scheduled, False otherwise.
10494 if self._opts_no_restart.intersection(self.myopts):
10497 mergelist = self._mergelist
10499 for i, pkg in enumerate(mergelist):
10500 if self._is_restart_necessary(pkg) and \
10501 i != len(mergelist) - 1:
10506 def _is_restart_necessary(self, pkg):
10508 @return: True if merging the given package
10509 requires restart, False otherwise.
10512 # Figure out if we need a restart.
10513 if pkg.root == self._running_root.root and \
10514 portage.match_from_list(
10515 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10516 if self._running_portage:
10517 return pkg.cpv != self._running_portage.cpv
10521 def _restart_if_necessary(self, pkg):
10523 Use execv() to restart emerge. This happens
10524 if portage upgrades itself and there are
10525 remaining packages in the list.
10528 if self._opts_no_restart.intersection(self.myopts):
10531 if not self._is_restart_necessary(pkg):
10534 if pkg == self._mergelist[-1]:
10537 self._main_loop_cleanup()
10539 logger = self._logger
10540 pkg_count = self._pkg_count
10541 mtimedb = self._mtimedb
10542 bad_resume_opts = self._bad_resume_opts
10544 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10545 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10547 logger.log(" *** RESTARTING " + \
10548 "emerge via exec() after change of " + \
10549 "portage version.")
10551 mtimedb["resume"]["mergelist"].remove(list(pkg))
10553 portage.run_exitfuncs()
10554 mynewargv = [sys.argv[0], "--resume"]
10555 resume_opts = self.myopts.copy()
10556 # For automatic resume, we need to prevent
10557 # any of bad_resume_opts from leaking in
10558 # via EMERGE_DEFAULT_OPTS.
10559 resume_opts["--ignore-default-opts"] = True
10560 for myopt, myarg in resume_opts.iteritems():
10561 if myopt not in bad_resume_opts:
10563 mynewargv.append(myopt)
10565 mynewargv.append(myopt +"="+ str(myarg))
10566 # priority only needs to be adjusted on the first run
10567 os.environ["PORTAGE_NICENESS"] = "0"
10568 os.execv(mynewargv[0], mynewargv)
10572 if "--resume" in self.myopts:
10574 portage.writemsg_stdout(
10575 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10576 self._logger.log(" *** Resuming merge...")
10578 self._save_resume_list()
10581 self._background = self._background_mode()
10582 except self._unknown_internal_error:
10585 for root in self.trees:
10586 root_config = self.trees[root]["root_config"]
10588 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10589 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10590 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10591 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10592 if not tmpdir or not os.path.isdir(tmpdir):
10593 msg = "The directory specified in your " + \
10594 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10595 "does not exist. Please create this " + \
10596 "directory or correct your PORTAGE_TMPDIR setting."
10597 msg = textwrap.wrap(msg, 70)
10598 out = portage.output.EOutput()
10603 if self._background:
10604 root_config.settings.unlock()
10605 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10606 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10607 root_config.settings.lock()
10609 self.pkgsettings[root] = portage.config(
10610 clone=root_config.settings)
10612 rval = self._check_manifests()
10613 if rval != os.EX_OK:
10616 keep_going = "--keep-going" in self.myopts
10617 fetchonly = self._build_opts.fetchonly
10618 mtimedb = self._mtimedb
10619 failed_pkgs = self._failed_pkgs
10622 rval = self._merge()
10623 if rval == os.EX_OK or fetchonly or not keep_going:
10625 if "resume" not in mtimedb:
10627 mergelist = self._mtimedb["resume"].get("mergelist")
10631 if not failed_pkgs:
10634 for failed_pkg in failed_pkgs:
10635 mergelist.remove(list(failed_pkg.pkg))
10637 self._failed_pkgs_all.extend(failed_pkgs)
10643 if not self._calc_resume_list():
10646 clear_caches(self.trees)
10647 if not self._mergelist:
10650 self._save_resume_list()
10651 self._pkg_count.curval = 0
10652 self._pkg_count.maxval = len([x for x in self._mergelist \
10653 if isinstance(x, Package) and x.operation == "merge"])
10654 self._status_display.maxval = self._pkg_count.maxval
10656 self._logger.log(" *** Finished. Cleaning up...")
10659 self._failed_pkgs_all.extend(failed_pkgs)
10662 background = self._background
10663 failure_log_shown = False
10664 if background and len(self._failed_pkgs_all) == 1:
10665 # If only one package failed then just show it's
10666 # whole log for easy viewing.
10667 failed_pkg = self._failed_pkgs_all[-1]
10668 build_dir = failed_pkg.build_dir
10671 log_paths = [failed_pkg.build_log]
10673 log_path = self._locate_failure_log(failed_pkg)
10674 if log_path is not None:
10676 log_file = open(log_path)
10680 if log_file is not None:
10682 for line in log_file:
10683 writemsg_level(line, noiselevel=-1)
10686 failure_log_shown = True
10688 # Dump mod_echo output now since it tends to flood the terminal.
10689 # This allows us to avoid having more important output, generated
10690 # later, from being swept away by the mod_echo output.
10691 mod_echo_output = _flush_elog_mod_echo()
10693 if background and not failure_log_shown and \
10694 self._failed_pkgs_all and \
10695 self._failed_pkgs_die_msgs and \
10696 not mod_echo_output:
10698 printer = portage.output.EOutput()
10699 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10701 if mysettings["ROOT"] != "/":
10702 root_msg = " merged to %s" % mysettings["ROOT"]
10704 printer.einfo("Error messages for package %s%s:" % \
10705 (colorize("INFORM", key), root_msg))
10707 for phase in portage.const.EBUILD_PHASES:
10708 if phase not in logentries:
10710 for msgtype, msgcontent in logentries[phase]:
10711 if isinstance(msgcontent, basestring):
10712 msgcontent = [msgcontent]
10713 for line in msgcontent:
10714 printer.eerror(line.strip("\n"))
10716 if self._post_mod_echo_msgs:
10717 for msg in self._post_mod_echo_msgs:
10720 if len(self._failed_pkgs_all) > 1 or \
10721 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10722 if len(self._failed_pkgs_all) > 1:
10723 msg = "The following %d packages have " % \
10724 len(self._failed_pkgs_all) + \
10725 "failed to build or install:"
10727 msg = "The following package has " + \
10728 "failed to build or install:"
10729 prefix = bad(" * ")
10730 writemsg(prefix + "\n", noiselevel=-1)
10731 from textwrap import wrap
10732 for line in wrap(msg, 72):
10733 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10734 writemsg(prefix + "\n", noiselevel=-1)
10735 for failed_pkg in self._failed_pkgs_all:
10736 writemsg("%s\t%s\n" % (prefix,
10737 colorize("INFORM", str(failed_pkg.pkg))),
10739 writemsg(prefix + "\n", noiselevel=-1)
10743 def _elog_listener(self, mysettings, key, logentries, fulltext):
10744 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10746 self._failed_pkgs_die_msgs.append(
10747 (mysettings, key, errors))
10749 def _locate_failure_log(self, failed_pkg):
10751 build_dir = failed_pkg.build_dir
10754 log_paths = [failed_pkg.build_log]
10756 for log_path in log_paths:
10761 log_size = os.stat(log_path).st_size
10772 def _add_packages(self):
10773 pkg_queue = self._pkg_queue
10774 for pkg in self._mergelist:
10775 if isinstance(pkg, Package):
10776 pkg_queue.append(pkg)
10777 elif isinstance(pkg, Blocker):
10780 def _system_merge_started(self, merge):
10782 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10784 graph = self._digraph
10787 pkg = merge.merge.pkg
10789 # Skip this if $ROOT != / since it shouldn't matter if there
10790 # are unsatisfied system runtime deps in this case.
10791 if pkg.root != '/':
10794 completed_tasks = self._completed_tasks
10795 unsatisfied = self._unsatisfied_system_deps
10797 def ignore_non_runtime_or_satisfied(priority):
10799 Ignore non-runtime and satisfied runtime priorities.
10801 if isinstance(priority, DepPriority) and \
10802 not priority.satisfied and \
10803 (priority.runtime or priority.runtime_post):
10807 # When checking for unsatisfied runtime deps, only check
10808 # direct deps since indirect deps are checked when the
10809 # corresponding parent is merged.
10810 for child in graph.child_nodes(pkg,
10811 ignore_priority=ignore_non_runtime_or_satisfied):
10812 if not isinstance(child, Package) or \
10813 child.operation == 'uninstall':
10817 if child.operation == 'merge' and \
10818 child not in completed_tasks:
10819 unsatisfied.add(child)
10821 def _merge_wait_exit_handler(self, task):
10822 self._merge_wait_scheduled.remove(task)
10823 self._merge_exit(task)
10825 def _merge_exit(self, merge):
10826 self._do_merge_exit(merge)
10827 self._deallocate_config(merge.merge.settings)
10828 if merge.returncode == os.EX_OK and \
10829 not merge.merge.pkg.installed:
10830 self._status_display.curval += 1
10831 self._status_display.merges = len(self._task_queues.merge)
10834 def _do_merge_exit(self, merge):
10835 pkg = merge.merge.pkg
10836 if merge.returncode != os.EX_OK:
10837 settings = merge.merge.settings
10838 build_dir = settings.get("PORTAGE_BUILDDIR")
10839 build_log = settings.get("PORTAGE_LOG_FILE")
10841 self._failed_pkgs.append(self._failed_pkg(
10842 build_dir=build_dir, build_log=build_log,
10844 returncode=merge.returncode))
10845 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10847 self._status_display.failed = len(self._failed_pkgs)
10850 self._task_complete(pkg)
10851 pkg_to_replace = merge.merge.pkg_to_replace
10852 if pkg_to_replace is not None:
10853 # When a package is replaced, mark it's uninstall
10854 # task complete (if any).
10855 uninst_hash_key = \
10856 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10857 self._task_complete(uninst_hash_key)
10862 self._restart_if_necessary(pkg)
10864 # Call mtimedb.commit() after each merge so that
10865 # --resume still works after being interrupted
10866 # by reboot, sigkill or similar.
10867 mtimedb = self._mtimedb
10868 mtimedb["resume"]["mergelist"].remove(list(pkg))
10869 if not mtimedb["resume"]["mergelist"]:
10870 del mtimedb["resume"]
10873 def _build_exit(self, build):
10874 if build.returncode == os.EX_OK:
10876 merge = PackageMerge(merge=build)
10877 if not build.build_opts.buildpkgonly and \
10878 build.pkg in self._deep_system_deps:
10879 # Since dependencies on system packages are frequently
10880 # unspecified, merge them only when no builds are executing.
10881 self._merge_wait_queue.append(merge)
10882 merge.addStartListener(self._system_merge_started)
10884 merge.addExitListener(self._merge_exit)
10885 self._task_queues.merge.add(merge)
10886 self._status_display.merges = len(self._task_queues.merge)
10888 settings = build.settings
10889 build_dir = settings.get("PORTAGE_BUILDDIR")
10890 build_log = settings.get("PORTAGE_LOG_FILE")
10892 self._failed_pkgs.append(self._failed_pkg(
10893 build_dir=build_dir, build_log=build_log,
10895 returncode=build.returncode))
10896 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10898 self._status_display.failed = len(self._failed_pkgs)
10899 self._deallocate_config(build.settings)
10901 self._status_display.running = self._jobs
10904 def _extract_exit(self, build):
10905 self._build_exit(build)
10907 def _task_complete(self, pkg):
10908 self._completed_tasks.add(pkg)
10909 self._unsatisfied_system_deps.discard(pkg)
10910 self._choose_pkg_return_early = False
10914 self._add_prefetchers()
10915 self._add_packages()
10916 pkg_queue = self._pkg_queue
10917 failed_pkgs = self._failed_pkgs
10918 portage.locks._quiet = self._background
10919 portage.elog._emerge_elog_listener = self._elog_listener
10925 self._main_loop_cleanup()
10926 portage.locks._quiet = False
10927 portage.elog._emerge_elog_listener = None
10929 rval = failed_pkgs[-1].returncode
10933 def _main_loop_cleanup(self):
10934 del self._pkg_queue[:]
10935 self._completed_tasks.clear()
10936 self._deep_system_deps.clear()
10937 self._unsatisfied_system_deps.clear()
10938 self._choose_pkg_return_early = False
10939 self._status_display.reset()
10940 self._digraph = None
10941 self._task_queues.fetch.clear()
10943 def _choose_pkg(self):
10945 Choose a task that has all it's dependencies satisfied.
10948 if self._choose_pkg_return_early:
10951 if self._digraph is None:
10952 if (self._jobs or self._task_queues.merge) and \
10953 not ("--nodeps" in self.myopts and \
10954 (self._max_jobs is True or self._max_jobs > 1)):
10955 self._choose_pkg_return_early = True
10957 return self._pkg_queue.pop(0)
10959 if not (self._jobs or self._task_queues.merge):
10960 return self._pkg_queue.pop(0)
10962 self._prune_digraph()
10965 later = set(self._pkg_queue)
10966 for pkg in self._pkg_queue:
10968 if not self._dependent_on_scheduled_merges(pkg, later):
10972 if chosen_pkg is not None:
10973 self._pkg_queue.remove(chosen_pkg)
10975 if chosen_pkg is None:
10976 # There's no point in searching for a package to
10977 # choose until at least one of the existing jobs
10979 self._choose_pkg_return_early = True
10983 def _dependent_on_scheduled_merges(self, pkg, later):
10985 Traverse the subgraph of the given packages deep dependencies
10986 to see if it contains any scheduled merges.
10987 @param pkg: a package to check dependencies for
10989 @param later: packages for which dependence should be ignored
10990 since they will be merged later than pkg anyway and therefore
10991 delaying the merge of pkg will not result in a more optimal
10995 @returns: True if the package is dependent, False otherwise.
10998 graph = self._digraph
10999 completed_tasks = self._completed_tasks
11002 traversed_nodes = set([pkg])
11003 direct_deps = graph.child_nodes(pkg)
11004 node_stack = direct_deps
11005 direct_deps = frozenset(direct_deps)
11007 node = node_stack.pop()
11008 if node in traversed_nodes:
11010 traversed_nodes.add(node)
11011 if not ((node.installed and node.operation == "nomerge") or \
11012 (node.operation == "uninstall" and \
11013 node not in direct_deps) or \
11014 node in completed_tasks or \
11018 node_stack.extend(graph.child_nodes(node))
11022 def _allocate_config(self, root):
11024 Allocate a unique config instance for a task in order
11025 to prevent interference between parallel tasks.
11027 if self._config_pool[root]:
11028 temp_settings = self._config_pool[root].pop()
11030 temp_settings = portage.config(clone=self.pkgsettings[root])
11031 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11032 # performance reasons, call it here to make sure all settings from the
11033 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11034 temp_settings.reload()
11035 temp_settings.reset()
11036 return temp_settings
11038 def _deallocate_config(self, settings):
11039 self._config_pool[settings["ROOT"]].append(settings)
11041 def _main_loop(self):
11043 # Only allow 1 job max if a restart is scheduled
11044 # due to portage update.
11045 if self._is_restart_scheduled() or \
11046 self._opts_no_background.intersection(self.myopts):
11047 self._set_max_jobs(1)
11049 merge_queue = self._task_queues.merge
11051 while self._schedule():
11052 if self._poll_event_handlers:
11057 if not (self._jobs or merge_queue):
11059 if self._poll_event_handlers:
11062 def _keep_scheduling(self):
11063 return bool(self._pkg_queue and \
11064 not (self._failed_pkgs and not self._build_opts.fetchonly))
11066 def _schedule_tasks(self):
11068 # When the number of jobs drops to zero, process all waiting merges.
11069 if not self._jobs and self._merge_wait_queue:
11070 for task in self._merge_wait_queue:
11071 task.addExitListener(self._merge_wait_exit_handler)
11072 self._task_queues.merge.add(task)
11073 self._status_display.merges = len(self._task_queues.merge)
11074 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11075 del self._merge_wait_queue[:]
11077 self._schedule_tasks_imp()
11078 self._status_display.display()
11081 for q in self._task_queues.values():
11085 # Cancel prefetchers if they're the only reason
11086 # the main poll loop is still running.
11087 if self._failed_pkgs and not self._build_opts.fetchonly and \
11088 not (self._jobs or self._task_queues.merge) and \
11089 self._task_queues.fetch:
11090 self._task_queues.fetch.clear()
11094 self._schedule_tasks_imp()
11095 self._status_display.display()
11097 return self._keep_scheduling()
11099 def _job_delay(self):
11102 @returns: True if job scheduling should be delayed, False otherwise.
11105 if self._jobs and self._max_load is not None:
11107 current_time = time.time()
11109 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11110 if delay > self._job_delay_max:
11111 delay = self._job_delay_max
11112 if (current_time - self._previous_job_start_time) < delay:
11117 def _schedule_tasks_imp(self):
11120 @returns: True if state changed, False otherwise.
11127 if not self._keep_scheduling():
11128 return bool(state_change)
11130 if self._choose_pkg_return_early or \
11131 self._merge_wait_scheduled or \
11132 (self._jobs and self._unsatisfied_system_deps) or \
11133 not self._can_add_job() or \
11135 return bool(state_change)
11137 pkg = self._choose_pkg()
11139 return bool(state_change)
11143 if not pkg.installed:
11144 self._pkg_count.curval += 1
11146 task = self._task(pkg)
11149 merge = PackageMerge(merge=task)
11150 merge.addExitListener(self._merge_exit)
11151 self._task_queues.merge.add(merge)
11155 self._previous_job_start_time = time.time()
11156 self._status_display.running = self._jobs
11157 task.addExitListener(self._extract_exit)
11158 self._task_queues.jobs.add(task)
11162 self._previous_job_start_time = time.time()
11163 self._status_display.running = self._jobs
11164 task.addExitListener(self._build_exit)
11165 self._task_queues.jobs.add(task)
11167 return bool(state_change)
11169 def _task(self, pkg):
11171 pkg_to_replace = None
11172 if pkg.operation != "uninstall":
11173 vardb = pkg.root_config.trees["vartree"].dbapi
11174 previous_cpv = vardb.match(pkg.slot_atom)
11176 previous_cpv = previous_cpv.pop()
11177 pkg_to_replace = self._pkg(previous_cpv,
11178 "installed", pkg.root_config, installed=True)
11180 task = MergeListItem(args_set=self._args_set,
11181 background=self._background, binpkg_opts=self._binpkg_opts,
11182 build_opts=self._build_opts,
11183 config_pool=self._ConfigPool(pkg.root,
11184 self._allocate_config, self._deallocate_config),
11185 emerge_opts=self.myopts,
11186 find_blockers=self._find_blockers(pkg), logger=self._logger,
11187 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11188 pkg_to_replace=pkg_to_replace,
11189 prefetcher=self._prefetchers.get(pkg),
11190 scheduler=self._sched_iface,
11191 settings=self._allocate_config(pkg.root),
11192 statusMessage=self._status_msg,
11193 world_atom=self._world_atom)
11197 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11198 pkg = failed_pkg.pkg
11199 msg = "%s to %s %s" % \
11200 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11201 if pkg.root != "/":
11202 msg += " %s %s" % (preposition, pkg.root)
11204 log_path = self._locate_failure_log(failed_pkg)
11205 if log_path is not None:
11206 msg += ", Log file:"
11207 self._status_msg(msg)
11209 if log_path is not None:
11210 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11212 def _status_msg(self, msg):
11214 Display a brief status message (no newlines) in the status display.
11215 This is called by tasks to provide feedback to the user. This
11216 delegates the resposibility of generating \r and \n control characters,
11217 to guarantee that lines are created or erased when necessary and
11221 @param msg: a brief status message (no newlines allowed)
11223 if not self._background:
11224 writemsg_level("\n")
11225 self._status_display.displayMessage(msg)
11227 def _save_resume_list(self):
11229 Do this before verifying the ebuild Manifests since it might
11230 be possible for the user to use --resume --skipfirst get past
11231 a non-essential package with a broken digest.
11233 mtimedb = self._mtimedb
11234 mtimedb["resume"]["mergelist"] = [list(x) \
11235 for x in self._mergelist \
11236 if isinstance(x, Package) and x.operation == "merge"]
11240 def _calc_resume_list(self):
11242 Use the current resume list to calculate a new one,
11243 dropping any packages with unsatisfied deps.
11245 @returns: True if successful, False otherwise.
11247 print colorize("GOOD", "*** Resuming merge...")
11249 if self._show_list():
11250 if "--tree" in self.myopts:
11251 portage.writemsg_stdout("\n" + \
11252 darkgreen("These are the packages that " + \
11253 "would be merged, in reverse order:\n\n"))
11256 portage.writemsg_stdout("\n" + \
11257 darkgreen("These are the packages that " + \
11258 "would be merged, in order:\n\n"))
11260 show_spinner = "--quiet" not in self.myopts and \
11261 "--nodeps" not in self.myopts
11264 print "Calculating dependencies ",
11266 myparams = create_depgraph_params(self.myopts, None)
11270 success, mydepgraph, dropped_tasks = resume_depgraph(
11271 self.settings, self.trees, self._mtimedb, self.myopts,
11272 myparams, self._spinner)
11273 except depgraph.UnsatisfiedResumeDep, exc:
11274 # rename variable to avoid python-3.0 error:
11275 # SyntaxError: can not delete variable 'e' referenced in nested
11278 mydepgraph = e.depgraph
11279 dropped_tasks = set()
11282 print "\b\b... done!"
11285 def unsatisfied_resume_dep_msg():
11286 mydepgraph.display_problems()
11287 out = portage.output.EOutput()
11288 out.eerror("One or more packages are either masked or " + \
11289 "have missing dependencies:")
11292 show_parents = set()
11293 for dep in e.value:
11294 if dep.parent in show_parents:
11296 show_parents.add(dep.parent)
11297 if dep.atom is None:
11298 out.eerror(indent + "Masked package:")
11299 out.eerror(2 * indent + str(dep.parent))
11302 out.eerror(indent + str(dep.atom) + " pulled in by:")
11303 out.eerror(2 * indent + str(dep.parent))
11305 msg = "The resume list contains packages " + \
11306 "that are either masked or have " + \
11307 "unsatisfied dependencies. " + \
11308 "Please restart/continue " + \
11309 "the operation manually, or use --skipfirst " + \
11310 "to skip the first package in the list and " + \
11311 "any other packages that may be " + \
11312 "masked or have missing dependencies."
11313 for line in textwrap.wrap(msg, 72):
11315 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11318 if success and self._show_list():
11319 mylist = mydepgraph.altlist()
11321 if "--tree" in self.myopts:
11323 mydepgraph.display(mylist, favorites=self._favorites)
11326 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11328 mydepgraph.display_problems()
11330 mylist = mydepgraph.altlist()
11331 mydepgraph.break_refs(mylist)
11332 mydepgraph.break_refs(dropped_tasks)
11333 self._mergelist = mylist
11334 self._set_digraph(mydepgraph.schedulerGraph())
11337 for task in dropped_tasks:
11338 if not (isinstance(task, Package) and task.operation == "merge"):
11341 msg = "emerge --keep-going:" + \
11343 if pkg.root != "/":
11344 msg += " for %s" % (pkg.root,)
11345 msg += " dropped due to unsatisfied dependency."
11346 for line in textwrap.wrap(msg, msg_width):
11347 eerror(line, phase="other", key=pkg.cpv)
11348 settings = self.pkgsettings[pkg.root]
11349 # Ensure that log collection from $T is disabled inside
11350 # elog_process(), since any logs that might exist are
11352 settings.pop("T", None)
11353 portage.elog.elog_process(pkg.cpv, settings)
11354 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11358 def _show_list(self):
11359 myopts = self.myopts
11360 if "--quiet" not in myopts and \
11361 ("--ask" in myopts or "--tree" in myopts or \
11362 "--verbose" in myopts):
11366 def _world_atom(self, pkg):
11368 Add the package to the world file, but only if
11369 it's supposed to be added. Otherwise, do nothing.
11372 if set(("--buildpkgonly", "--fetchonly",
11374 "--oneshot", "--onlydeps",
11375 "--pretend")).intersection(self.myopts):
11378 if pkg.root != self.target_root:
11381 args_set = self._args_set
11382 if not args_set.findAtomForPackage(pkg):
11385 logger = self._logger
11386 pkg_count = self._pkg_count
11387 root_config = pkg.root_config
11388 world_set = root_config.sets["world"]
11389 world_locked = False
11390 if hasattr(world_set, "lock"):
11392 world_locked = True
11395 if hasattr(world_set, "load"):
11396 world_set.load() # maybe it's changed on disk
11398 atom = create_world_atom(pkg, args_set, root_config)
11400 if hasattr(world_set, "add"):
11401 self._status_msg(('Recording %s in "world" ' + \
11402 'favorites file...') % atom)
11403 logger.log(" === (%s of %s) Updating world file (%s)" % \
11404 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11405 world_set.add(atom)
11407 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11408 (atom,), level=logging.WARN, noiselevel=-1)
11413 def _pkg(self, cpv, type_name, root_config, installed=False):
11415 Get a package instance from the cache, or create a new
11416 one if necessary. Raises KeyError from aux_get if it
11417 failures for some reason (package does not exist or is
11420 operation = "merge"
11422 operation = "nomerge"
11424 if self._digraph is not None:
11425 # Reuse existing instance when available.
11426 pkg = self._digraph.get(
11427 (type_name, root_config.root, cpv, operation))
11428 if pkg is not None:
11431 tree_type = depgraph.pkg_tree_map[type_name]
11432 db = root_config.trees[tree_type].dbapi
11433 db_keys = list(self.trees[root_config.root][
11434 tree_type].dbapi._aux_cache_keys)
11435 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11436 pkg = Package(cpv=cpv, metadata=metadata,
11437 root_config=root_config, installed=installed)
11438 if type_name == "ebuild":
11439 settings = self.pkgsettings[root_config.root]
11440 settings.setcpv(pkg)
11441 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11445 class MetadataRegen(PollScheduler):
11447 def __init__(self, portdb, max_jobs=None, max_load=None):
11448 PollScheduler.__init__(self)
11449 self._portdb = portdb
11451 if max_jobs is None:
11454 self._max_jobs = max_jobs
11455 self._max_load = max_load
11456 self._sched_iface = self._sched_iface_class(
11457 register=self._register,
11458 schedule=self._schedule_wait,
11459 unregister=self._unregister)
11461 self._valid_pkgs = set()
11462 self._process_iter = self._iter_metadata_processes()
11463 self.returncode = os.EX_OK
11464 self._error_count = 0
11466 def _iter_metadata_processes(self):
11467 portdb = self._portdb
11468 valid_pkgs = self._valid_pkgs
11469 every_cp = portdb.cp_all()
11470 every_cp.sort(reverse=True)
11473 cp = every_cp.pop()
11474 portage.writemsg_stdout("Processing %s\n" % cp)
11475 cpv_list = portdb.cp_list(cp)
11476 for cpv in cpv_list:
11477 valid_pkgs.add(cpv)
11478 ebuild_path, repo_path = portdb.findname2(cpv)
11479 metadata_process = portdb._metadata_process(
11480 cpv, ebuild_path, repo_path)
11481 if metadata_process is None:
11483 yield metadata_process
11487 portdb = self._portdb
11488 from portage.cache.cache_errors import CacheError
11491 for mytree in portdb.porttrees:
11493 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11494 except CacheError, e:
11495 portage.writemsg("Error listing cache entries for " + \
11496 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11501 while self._schedule():
11508 for y in self._valid_pkgs:
11509 for mytree in portdb.porttrees:
11510 if portdb.findname2(y, mytree=mytree)[0]:
11511 dead_nodes[mytree].discard(y)
11513 for mytree, nodes in dead_nodes.iteritems():
11514 auxdb = portdb.auxdb[mytree]
11518 except (KeyError, CacheError):
11521 def _schedule_tasks(self):
11524 @returns: True if there may be remaining tasks to schedule,
11527 while self._can_add_job():
11529 metadata_process = self._process_iter.next()
11530 except StopIteration:
11534 metadata_process.scheduler = self._sched_iface
11535 metadata_process.addExitListener(self._metadata_exit)
11536 metadata_process.start()
11539 def _metadata_exit(self, metadata_process):
11541 if metadata_process.returncode != os.EX_OK:
11542 self.returncode = 1
11543 self._error_count += 1
11544 self._valid_pkgs.discard(metadata_process.cpv)
11545 portage.writemsg("Error processing %s, continuing...\n" % \
11546 (metadata_process.cpv,))
11549 class UninstallFailure(portage.exception.PortageException):
11551 An instance of this class is raised by unmerge() when
11552 an uninstallation fails.
11555 def __init__(self, *pargs):
11556 portage.exception.PortageException.__init__(self, pargs)
11558 self.status = pargs[0]
11560 def unmerge(root_config, myopts, unmerge_action,
11561 unmerge_files, ldpath_mtimes, autoclean=0,
11562 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11563 scheduler=None, writemsg_level=portage.util.writemsg_level):
11565 quiet = "--quiet" in myopts
11566 settings = root_config.settings
11567 sets = root_config.sets
11568 vartree = root_config.trees["vartree"]
11569 candidate_catpkgs=[]
11571 xterm_titles = "notitles" not in settings.features
11572 out = portage.output.EOutput()
11574 db_keys = list(vartree.dbapi._aux_cache_keys)
11577 pkg = pkg_cache.get(cpv)
11579 pkg = Package(cpv=cpv, installed=True,
11580 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11581 root_config=root_config,
11582 type_name="installed")
11583 pkg_cache[cpv] = pkg
11586 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11588 # At least the parent needs to exist for the lock file.
11589 portage.util.ensure_dirs(vdb_path)
11590 except portage.exception.PortageException:
11594 if os.access(vdb_path, os.W_OK):
11595 vdb_lock = portage.locks.lockdir(vdb_path)
11596 realsyslist = sets["system"].getAtoms()
11598 for x in realsyslist:
11599 mycp = portage.dep_getkey(x)
11600 if mycp in settings.getvirtuals():
11602 for provider in settings.getvirtuals()[mycp]:
11603 if vartree.dbapi.match(provider):
11604 providers.append(provider)
11605 if len(providers) == 1:
11606 syslist.extend(providers)
11608 syslist.append(mycp)
11610 mysettings = portage.config(clone=settings)
11612 if not unmerge_files:
11613 if unmerge_action == "unmerge":
11615 print bold("emerge unmerge") + " can only be used with specific package names"
11621 localtree = vartree
11622 # process all arguments and add all
11623 # valid db entries to candidate_catpkgs
11625 if not unmerge_files:
11626 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11628 #we've got command-line arguments
11629 if not unmerge_files:
11630 print "\nNo packages to unmerge have been provided.\n"
11632 for x in unmerge_files:
11633 arg_parts = x.split('/')
11634 if x[0] not in [".","/"] and \
11635 arg_parts[-1][-7:] != ".ebuild":
11636 #possible cat/pkg or dep; treat as such
11637 candidate_catpkgs.append(x)
11638 elif unmerge_action in ["prune","clean"]:
11639 print "\n!!! Prune and clean do not accept individual" + \
11640 " ebuilds as arguments;\n skipping.\n"
11643 # it appears that the user is specifying an installed
11644 # ebuild and we're in "unmerge" mode, so it's ok.
11645 if not os.path.exists(x):
11646 print "\n!!! The path '"+x+"' doesn't exist.\n"
11649 absx = os.path.abspath(x)
11650 sp_absx = absx.split("/")
11651 if sp_absx[-1][-7:] == ".ebuild":
11653 absx = "/".join(sp_absx)
11655 sp_absx_len = len(sp_absx)
11657 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11658 vdb_len = len(vdb_path)
11660 sp_vdb = vdb_path.split("/")
11661 sp_vdb_len = len(sp_vdb)
11663 if not os.path.exists(absx+"/CONTENTS"):
11664 print "!!! Not a valid db dir: "+str(absx)
11667 if sp_absx_len <= sp_vdb_len:
11668 # The Path is shorter... so it can't be inside the vdb.
11671 print "\n!!!",x,"cannot be inside "+ \
11672 vdb_path+"; aborting.\n"
11675 for idx in range(0,sp_vdb_len):
11676 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11679 print "\n!!!", x, "is not inside "+\
11680 vdb_path+"; aborting.\n"
11683 print "="+"/".join(sp_absx[sp_vdb_len:])
11684 candidate_catpkgs.append(
11685 "="+"/".join(sp_absx[sp_vdb_len:]))
11688 if (not "--quiet" in myopts):
11690 if settings["ROOT"] != "/":
11691 writemsg_level(darkgreen(newline+ \
11692 ">>> Using system located in ROOT tree %s\n" % \
11695 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11696 not ("--quiet" in myopts):
11697 writemsg_level(darkgreen(newline+\
11698 ">>> These are the packages that would be unmerged:\n"))
11700 # Preservation of order is required for --depclean and --prune so
11701 # that dependencies are respected. Use all_selected to eliminate
11702 # duplicate packages since the same package may be selected by
11705 all_selected = set()
11706 for x in candidate_catpkgs:
11707 # cycle through all our candidate deps and determine
11708 # what will and will not get unmerged
11710 mymatch = vartree.dbapi.match(x)
11711 except portage.exception.AmbiguousPackageName, errpkgs:
11712 print "\n\n!!! The short ebuild name \"" + \
11713 x + "\" is ambiguous. Please specify"
11714 print "!!! one of the following fully-qualified " + \
11715 "ebuild names instead:\n"
11716 for i in errpkgs[0]:
11717 print " " + green(i)
11721 if not mymatch and x[0] not in "<>=~":
11722 mymatch = localtree.dep_match(x)
11724 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11725 (x, unmerge_action), noiselevel=-1)
11729 {"protected": set(), "selected": set(), "omitted": set()})
11730 mykey = len(pkgmap) - 1
11731 if unmerge_action=="unmerge":
11733 if y not in all_selected:
11734 pkgmap[mykey]["selected"].add(y)
11735 all_selected.add(y)
11736 elif unmerge_action == "prune":
11737 if len(mymatch) == 1:
11739 best_version = mymatch[0]
11740 best_slot = vartree.getslot(best_version)
11741 best_counter = vartree.dbapi.cpv_counter(best_version)
11742 for mypkg in mymatch[1:]:
11743 myslot = vartree.getslot(mypkg)
11744 mycounter = vartree.dbapi.cpv_counter(mypkg)
11745 if (myslot == best_slot and mycounter > best_counter) or \
11746 mypkg == portage.best([mypkg, best_version]):
11747 if myslot == best_slot:
11748 if mycounter < best_counter:
11749 # On slot collision, keep the one with the
11750 # highest counter since it is the most
11751 # recently installed.
11753 best_version = mypkg
11755 best_counter = mycounter
11756 pkgmap[mykey]["protected"].add(best_version)
11757 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11758 if mypkg != best_version and mypkg not in all_selected)
11759 all_selected.update(pkgmap[mykey]["selected"])
11761 # unmerge_action == "clean"
11763 for mypkg in mymatch:
11764 if unmerge_action == "clean":
11765 myslot = localtree.getslot(mypkg)
11767 # since we're pruning, we don't care about slots
11768 # and put all the pkgs in together
11770 if myslot not in slotmap:
11771 slotmap[myslot] = {}
11772 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11774 for mypkg in vartree.dbapi.cp_list(
11775 portage.dep_getkey(mymatch[0])):
11776 myslot = vartree.getslot(mypkg)
11777 if myslot not in slotmap:
11778 slotmap[myslot] = {}
11779 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11781 for myslot in slotmap:
11782 counterkeys = slotmap[myslot].keys()
11783 if not counterkeys:
11786 pkgmap[mykey]["protected"].add(
11787 slotmap[myslot][counterkeys[-1]])
11788 del counterkeys[-1]
11790 for counter in counterkeys[:]:
11791 mypkg = slotmap[myslot][counter]
11792 if mypkg not in mymatch:
11793 counterkeys.remove(counter)
11794 pkgmap[mykey]["protected"].add(
11795 slotmap[myslot][counter])
11797 #be pretty and get them in order of merge:
11798 for ckey in counterkeys:
11799 mypkg = slotmap[myslot][ckey]
11800 if mypkg not in all_selected:
11801 pkgmap[mykey]["selected"].add(mypkg)
11802 all_selected.add(mypkg)
11803 # ok, now the last-merged package
11804 # is protected, and the rest are selected
11805 numselected = len(all_selected)
11806 if global_unmerge and not numselected:
11807 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11810 if not numselected:
11811 portage.writemsg_stdout(
11812 "\n>>> No packages selected for removal by " + \
11813 unmerge_action + "\n")
11817 vartree.dbapi.flush_cache()
11818 portage.locks.unlockdir(vdb_lock)
11820 from portage.sets.base import EditablePackageSet
11822 # generate a list of package sets that are directly or indirectly listed in "world",
11823 # as there is no persistent list of "installed" sets
11824 installed_sets = ["world"]
11829 pos = len(installed_sets)
11830 for s in installed_sets[pos - 1:]:
11833 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11836 installed_sets += candidates
11837 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11840 # we don't want to unmerge packages that are still listed in user-editable package sets
11841 # listed in "world" as they would be remerged on the next update of "world" or the
11842 # relevant package sets.
11843 unknown_sets = set()
11844 for cp in xrange(len(pkgmap)):
11845 for cpv in pkgmap[cp]["selected"].copy():
11849 # It could have been uninstalled
11850 # by a concurrent process.
11853 if unmerge_action != "clean" and \
11854 root_config.root == "/" and \
11855 portage.match_from_list(
11856 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11857 msg = ("Not unmerging package %s since there is no valid " + \
11858 "reason for portage to unmerge itself.") % (pkg.cpv,)
11859 for line in textwrap.wrap(msg, 75):
11861 # adjust pkgmap so the display output is correct
11862 pkgmap[cp]["selected"].remove(cpv)
11863 all_selected.remove(cpv)
11864 pkgmap[cp]["protected"].add(cpv)
11868 for s in installed_sets:
11869 # skip sets that the user requested to unmerge, and skip world
11870 # unless we're unmerging a package set (as the package would be
11871 # removed from "world" later on)
11872 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11876 if s in unknown_sets:
11878 unknown_sets.add(s)
11879 out = portage.output.EOutput()
11880 out.eerror(("Unknown set '@%s' in " + \
11881 "%svar/lib/portage/world_sets") % \
11882 (s, root_config.root))
11885 # only check instances of EditablePackageSet as other classes are generally used for
11886 # special purposes and can be ignored here (and are usually generated dynamically, so the
11887 # user can't do much about them anyway)
11888 if isinstance(sets[s], EditablePackageSet):
11890 # This is derived from a snippet of code in the
11891 # depgraph._iter_atoms_for_pkg() method.
11892 for atom in sets[s].iterAtomsForPackage(pkg):
11893 inst_matches = vartree.dbapi.match(atom)
11894 inst_matches.reverse() # descending order
11896 for inst_cpv in inst_matches:
11898 inst_pkg = _pkg(inst_cpv)
11900 # It could have been uninstalled
11901 # by a concurrent process.
11904 if inst_pkg.cp != atom.cp:
11906 if pkg >= inst_pkg:
11907 # This is descending order, and we're not
11908 # interested in any versions <= pkg given.
11910 if pkg.slot_atom != inst_pkg.slot_atom:
11911 higher_slot = inst_pkg
11913 if higher_slot is None:
11917 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11918 #print colorize("WARN", "but still listed in the following package sets:")
11919 #print " %s\n" % ", ".join(parents)
11920 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11921 print colorize("WARN", "still referenced by the following package sets:")
11922 print " %s\n" % ", ".join(parents)
11923 # adjust pkgmap so the display output is correct
11924 pkgmap[cp]["selected"].remove(cpv)
11925 all_selected.remove(cpv)
11926 pkgmap[cp]["protected"].add(cpv)
11930 numselected = len(all_selected)
11931 if not numselected:
11933 "\n>>> No packages selected for removal by " + \
11934 unmerge_action + "\n")
11937 # Unmerge order only matters in some cases
11941 selected = d["selected"]
11944 cp = portage.cpv_getkey(iter(selected).next())
11945 cp_dict = unordered.get(cp)
11946 if cp_dict is None:
11948 unordered[cp] = cp_dict
11951 for k, v in d.iteritems():
11952 cp_dict[k].update(v)
11953 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11955 for x in xrange(len(pkgmap)):
11956 selected = pkgmap[x]["selected"]
11959 for mytype, mylist in pkgmap[x].iteritems():
11960 if mytype == "selected":
11962 mylist.difference_update(all_selected)
11963 cp = portage.cpv_getkey(iter(selected).next())
11964 for y in localtree.dep_match(cp):
11965 if y not in pkgmap[x]["omitted"] and \
11966 y not in pkgmap[x]["selected"] and \
11967 y not in pkgmap[x]["protected"] and \
11968 y not in all_selected:
11969 pkgmap[x]["omitted"].add(y)
11970 if global_unmerge and not pkgmap[x]["selected"]:
11971 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11973 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11974 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11975 "'%s' is part of your system profile.\n" % cp),
11976 level=logging.WARNING, noiselevel=-1)
11977 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11978 "be damaging to your system.\n\n"),
11979 level=logging.WARNING, noiselevel=-1)
11980 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11981 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11982 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11984 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11986 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11987 for mytype in ["selected","protected","omitted"]:
11989 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11990 if pkgmap[x][mytype]:
11991 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11992 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
11993 for pn, ver, rev in sorted_pkgs:
11997 myversion = ver + "-" + rev
11998 if mytype == "selected":
12000 colorize("UNMERGE_WARN", myversion + " "),
12004 colorize("GOOD", myversion + " "), noiselevel=-1)
12006 writemsg_level("none ", noiselevel=-1)
12008 writemsg_level("\n", noiselevel=-1)
12010 writemsg_level("\n", noiselevel=-1)
12012 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12013 " packages are slated for removal.\n")
12014 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12015 " and " + colorize("GOOD", "'omitted'") + \
12016 " packages will not be removed.\n\n")
12018 if "--pretend" in myopts:
12019 #we're done... return
12021 if "--ask" in myopts:
12022 if userquery("Would you like to unmerge these packages?")=="No":
12023 # enter pretend mode for correct formatting of results
12024 myopts["--pretend"] = True
12029 #the real unmerging begins, after a short delay....
12030 if clean_delay and not autoclean:
12031 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12033 for x in xrange(len(pkgmap)):
12034 for y in pkgmap[x]["selected"]:
12035 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12036 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12037 mysplit = y.split("/")
12039 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12040 mysettings, unmerge_action not in ["clean","prune"],
12041 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12042 scheduler=scheduler)
12044 if retval != os.EX_OK:
12045 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12047 raise UninstallFailure(retval)
12050 if clean_world and hasattr(sets["world"], "cleanPackage"):
12051 sets["world"].cleanPackage(vartree.dbapi, y)
12052 emergelog(xterm_titles, " >>> unmerge success: "+y)
12053 if clean_world and hasattr(sets["world"], "remove"):
12054 for s in root_config.setconfig.active:
12055 sets["world"].remove(SETPREFIX+s)
12058 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12060 if os.path.exists("/usr/bin/install-info"):
12061 out = portage.output.EOutput()
12066 inforoot=normpath(root+z)
12067 if os.path.isdir(inforoot):
12068 infomtime = long(os.stat(inforoot).st_mtime)
12069 if inforoot not in prev_mtimes or \
12070 prev_mtimes[inforoot] != infomtime:
12071 regen_infodirs.append(inforoot)
12073 if not regen_infodirs:
12074 portage.writemsg_stdout("\n")
12075 out.einfo("GNU info directory index is up-to-date.")
12077 portage.writemsg_stdout("\n")
12078 out.einfo("Regenerating GNU info directory index...")
12080 dir_extensions = ("", ".gz", ".bz2")
12084 for inforoot in regen_infodirs:
12088 if not os.path.isdir(inforoot) or \
12089 not os.access(inforoot, os.W_OK):
12092 file_list = os.listdir(inforoot)
12094 dir_file = os.path.join(inforoot, "dir")
12095 moved_old_dir = False
12096 processed_count = 0
12097 for x in file_list:
12098 if x.startswith(".") or \
12099 os.path.isdir(os.path.join(inforoot, x)):
12101 if x.startswith("dir"):
12103 for ext in dir_extensions:
12104 if x == "dir" + ext or \
12105 x == "dir" + ext + ".old":
12110 if processed_count == 0:
12111 for ext in dir_extensions:
12113 os.rename(dir_file + ext, dir_file + ext + ".old")
12114 moved_old_dir = True
12115 except EnvironmentError, e:
12116 if e.errno != errno.ENOENT:
12119 processed_count += 1
12120 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12121 existsstr="already exists, for file `"
12123 if re.search(existsstr,myso):
12124 # Already exists... Don't increment the count for this.
12126 elif myso[:44]=="install-info: warning: no info dir entry in ":
12127 # This info file doesn't contain a DIR-header: install-info produces this
12128 # (harmless) warning (the --quiet switch doesn't seem to work).
12129 # Don't increment the count for this.
12132 badcount=badcount+1
12133 errmsg += myso + "\n"
12136 if moved_old_dir and not os.path.exists(dir_file):
12137 # We didn't generate a new dir file, so put the old file
12138 # back where it was originally found.
12139 for ext in dir_extensions:
12141 os.rename(dir_file + ext + ".old", dir_file + ext)
12142 except EnvironmentError, e:
12143 if e.errno != errno.ENOENT:
12147 # Clean dir.old cruft so that they don't prevent
12148 # unmerge of otherwise empty directories.
12149 for ext in dir_extensions:
12151 os.unlink(dir_file + ext + ".old")
12152 except EnvironmentError, e:
12153 if e.errno != errno.ENOENT:
12157 #update mtime so we can potentially avoid regenerating.
12158 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12161 out.eerror("Processed %d info files; %d errors." % \
12162 (icount, badcount))
12163 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12166 out.einfo("Processed %d info files." % (icount,))
12169 def display_news_notification(root_config, myopts):
12170 target_root = root_config.root
12171 trees = root_config.trees
12172 settings = trees["vartree"].settings
12173 portdb = trees["porttree"].dbapi
12174 vardb = trees["vartree"].dbapi
12175 NEWS_PATH = os.path.join("metadata", "news")
12176 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12177 newsReaderDisplay = False
12178 update = "--pretend" not in myopts
12180 for repo in portdb.getRepositories():
12181 unreadItems = checkUpdatedNewsItems(
12182 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12184 if not newsReaderDisplay:
12185 newsReaderDisplay = True
12187 print colorize("WARN", " * IMPORTANT:"),
12188 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12191 if newsReaderDisplay:
12192 print colorize("WARN", " *"),
12193 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12196 def display_preserved_libs(vardbapi):
12199 # Ensure the registry is consistent with existing files.
12200 vardbapi.plib_registry.pruneNonExisting()
12202 if vardbapi.plib_registry.hasEntries():
12204 print colorize("WARN", "!!!") + " existing preserved libs:"
12205 plibdata = vardbapi.plib_registry.getPreservedLibs()
12206 linkmap = vardbapi.linkmap
12209 linkmap_broken = False
12213 except portage.exception.CommandNotFound, e:
12214 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12215 level=logging.ERROR, noiselevel=-1)
12217 linkmap_broken = True
12219 search_for_owners = set()
12220 for cpv in plibdata:
12221 internal_plib_keys = set(linkmap._obj_key(f) \
12222 for f in plibdata[cpv])
12223 for f in plibdata[cpv]:
12224 if f in consumer_map:
12227 for c in linkmap.findConsumers(f):
12228 # Filter out any consumers that are also preserved libs
12229 # belonging to the same package as the provider.
12230 if linkmap._obj_key(c) not in internal_plib_keys:
12231 consumers.append(c)
12233 consumer_map[f] = consumers
12234 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12236 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12238 for cpv in plibdata:
12239 print colorize("WARN", ">>>") + " package: %s" % cpv
12241 for f in plibdata[cpv]:
12242 obj_key = linkmap._obj_key(f)
12243 alt_paths = samefile_map.get(obj_key)
12244 if alt_paths is None:
12246 samefile_map[obj_key] = alt_paths
12249 for alt_paths in samefile_map.itervalues():
12250 alt_paths = sorted(alt_paths)
12251 for p in alt_paths:
12252 print colorize("WARN", " * ") + " - %s" % (p,)
12254 consumers = consumer_map.get(f, [])
12255 for c in consumers[:MAX_DISPLAY]:
12256 print colorize("WARN", " * ") + " used by %s (%s)" % \
12257 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12258 if len(consumers) == MAX_DISPLAY + 1:
12259 print colorize("WARN", " * ") + " used by %s (%s)" % \
12260 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12261 for x in owners.get(consumers[MAX_DISPLAY], [])))
12262 elif len(consumers) > MAX_DISPLAY:
12263 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12264 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12267 def _flush_elog_mod_echo():
12269 Dump the mod_echo output now so that our other
12270 notifications are shown last.
12272 @returns: True if messages were shown, False otherwise.
12274 messages_shown = False
12276 from portage.elog import mod_echo
12277 except ImportError:
12278 pass # happens during downgrade to a version without the module
12280 messages_shown = bool(mod_echo._items)
12281 mod_echo.finalize()
12282 return messages_shown
12284 def post_emerge(root_config, myopts, mtimedb, retval):
12286 Misc. things to run at the end of a merge session.
12289 Update Config Files
12292 Display preserved libs warnings
12295 @param trees: A dictionary mapping each ROOT to it's package databases
12297 @param mtimedb: The mtimeDB to store data needed across merge invocations
12298 @type mtimedb: MtimeDB class instance
12299 @param retval: Emerge's return value
12303 1. Calls sys.exit(retval)
12306 target_root = root_config.root
12307 trees = { target_root : root_config.trees }
12308 vardbapi = trees[target_root]["vartree"].dbapi
12309 settings = vardbapi.settings
12310 info_mtimes = mtimedb["info"]
12312 # Load the most current variables from ${ROOT}/etc/profile.env
12315 settings.regenerate()
12318 config_protect = settings.get("CONFIG_PROTECT","").split()
12319 infodirs = settings.get("INFOPATH","").split(":") + \
12320 settings.get("INFODIR","").split(":")
12324 if retval == os.EX_OK:
12325 exit_msg = " *** exiting successfully."
12327 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12328 emergelog("notitles" not in settings.features, exit_msg)
12330 _flush_elog_mod_echo()
12332 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12333 if "--pretend" in myopts or (counter_hash is not None and \
12334 counter_hash == vardbapi._counter_hash()):
12335 display_news_notification(root_config, myopts)
12336 # If vdb state has not changed then there's nothing else to do.
12339 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12340 portage.util.ensure_dirs(vdb_path)
12342 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12343 vdb_lock = portage.locks.lockdir(vdb_path)
12347 if "noinfo" not in settings.features:
12348 chk_updated_info_files(target_root,
12349 infodirs, info_mtimes, retval)
12353 portage.locks.unlockdir(vdb_lock)
12355 chk_updated_cfg_files(target_root, config_protect)
12357 display_news_notification(root_config, myopts)
12358 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12359 display_preserved_libs(vardbapi)
12364 def chk_updated_cfg_files(target_root, config_protect):
12366 #number of directories with some protect files in them
12368 for x in config_protect:
12369 x = os.path.join(target_root, x.lstrip(os.path.sep))
12370 if not os.access(x, os.W_OK):
12371 # Avoid Permission denied errors generated
12375 mymode = os.lstat(x).st_mode
12378 if stat.S_ISLNK(mymode):
12379 # We want to treat it like a directory if it
12380 # is a symlink to an existing directory.
12382 real_mode = os.stat(x).st_mode
12383 if stat.S_ISDIR(real_mode):
12387 if stat.S_ISDIR(mymode):
12388 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12390 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12391 os.path.split(x.rstrip(os.path.sep))
12392 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12393 a = commands.getstatusoutput(mycommand)
12395 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12397 # Show the error message alone, sending stdout to /dev/null.
12398 os.system(mycommand + " 1>/dev/null")
12400 files = a[1].split('\0')
12401 # split always produces an empty string as the last element
12402 if files and not files[-1]:
12406 print "\n"+colorize("WARN", " * IMPORTANT:"),
12407 if stat.S_ISDIR(mymode):
12408 print "%d config files in '%s' need updating." % \
12411 print "config file '%s' needs updating." % x
12414 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12415 " section of the " + bold("emerge")
12416 print " "+yellow("*")+" man page to learn how to update config files."
12418 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12421 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12422 Returns the number of unread (yet relevent) items.
12424 @param portdb: a portage tree database
12425 @type portdb: pordbapi
12426 @param vardb: an installed package database
12427 @type vardb: vardbapi
12430 @param UNREAD_PATH:
12436 1. The number of unread but relevant news items.
12439 from portage.news import NewsManager
12440 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12441 return manager.getUnreadItems( repo_id, update=update )
12443 def insert_category_into_atom(atom, category):
12444 alphanum = re.search(r'\w', atom)
12446 ret = atom[:alphanum.start()] + "%s/" % category + \
12447 atom[alphanum.start():]
12452 def is_valid_package_atom(x):
12454 alphanum = re.search(r'\w', x)
12456 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12457 return portage.isvalidatom(x)
12459 def show_blocker_docs_link():
12461 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12462 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12464 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12467 def show_mask_docs():
12468 print "For more information, see the MASKED PACKAGES section in the emerge"
12469 print "man page or refer to the Gentoo Handbook."
12471 def action_sync(settings, trees, mtimedb, myopts, myaction):
12472 xterm_titles = "notitles" not in settings.features
12473 emergelog(xterm_titles, " === sync")
12474 myportdir = settings.get("PORTDIR", None)
12475 out = portage.output.EOutput()
12477 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12479 if myportdir[-1]=="/":
12480 myportdir=myportdir[:-1]
12482 st = os.stat(myportdir)
12486 print ">>>",myportdir,"not found, creating it."
12487 os.makedirs(myportdir,0755)
12488 st = os.stat(myportdir)
12491 spawn_kwargs["env"] = settings.environ()
12492 if 'usersync' in settings.features and \
12493 portage.data.secpass >= 2 and \
12494 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12495 st.st_gid != os.getgid() and st.st_mode & 0070):
12497 homedir = pwd.getpwuid(st.st_uid).pw_dir
12501 # Drop privileges when syncing, in order to match
12502 # existing uid/gid settings.
12503 spawn_kwargs["uid"] = st.st_uid
12504 spawn_kwargs["gid"] = st.st_gid
12505 spawn_kwargs["groups"] = [st.st_gid]
12506 spawn_kwargs["env"]["HOME"] = homedir
12508 if not st.st_mode & 0020:
12509 umask = umask | 0020
12510 spawn_kwargs["umask"] = umask
12512 syncuri = settings.get("SYNC", "").strip()
12514 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12515 noiselevel=-1, level=logging.ERROR)
12518 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12519 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12522 dosyncuri = syncuri
12523 updatecache_flg = False
12524 if myaction == "metadata":
12525 print "skipping sync"
12526 updatecache_flg = True
12527 elif ".git" in vcs_dirs:
12528 # Update existing git repository, and ignore the syncuri. We are
12529 # going to trust the user and assume that the user is in the branch
12530 # that he/she wants updated. We'll let the user manage branches with
12532 if portage.process.find_binary("git") is None:
12533 msg = ["Command not found: git",
12534 "Type \"emerge dev-util/git\" to enable git support."]
12536 writemsg_level("!!! %s\n" % l,
12537 level=logging.ERROR, noiselevel=-1)
12539 msg = ">>> Starting git pull in %s..." % myportdir
12540 emergelog(xterm_titles, msg )
12541 writemsg_level(msg + "\n")
12542 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12543 (portage._shell_quote(myportdir),), **spawn_kwargs)
12544 if exitcode != os.EX_OK:
12545 msg = "!!! git pull error in %s." % myportdir
12546 emergelog(xterm_titles, msg)
12547 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12549 msg = ">>> Git pull in %s successful" % myportdir
12550 emergelog(xterm_titles, msg)
12551 writemsg_level(msg + "\n")
12552 exitcode = git_sync_timestamps(settings, myportdir)
12553 if exitcode == os.EX_OK:
12554 updatecache_flg = True
12555 elif syncuri[:8]=="rsync://":
12556 for vcs_dir in vcs_dirs:
12557 writemsg_level(("!!! %s appears to be under revision " + \
12558 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12559 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12561 if not os.path.exists("/usr/bin/rsync"):
12562 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12563 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12568 if settings["PORTAGE_RSYNC_OPTS"] == "":
12569 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12570 rsync_opts.extend([
12571 "--recursive", # Recurse directories
12572 "--links", # Consider symlinks
12573 "--safe-links", # Ignore links outside of tree
12574 "--perms", # Preserve permissions
12575 "--times", # Preserive mod times
12576 "--compress", # Compress the data transmitted
12577 "--force", # Force deletion on non-empty dirs
12578 "--whole-file", # Don't do block transfers, only entire files
12579 "--delete", # Delete files that aren't in the master tree
12580 "--stats", # Show final statistics about what was transfered
12581 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12582 "--exclude=/distfiles", # Exclude distfiles from consideration
12583 "--exclude=/local", # Exclude local from consideration
12584 "--exclude=/packages", # Exclude packages from consideration
12588 # The below validation is not needed when using the above hardcoded
12591 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12593 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12594 for opt in ("--recursive", "--times"):
12595 if opt not in rsync_opts:
12596 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12597 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12598 rsync_opts.append(opt)
12600 for exclude in ("distfiles", "local", "packages"):
12601 opt = "--exclude=/%s" % exclude
12602 if opt not in rsync_opts:
12603 portage.writemsg(yellow("WARNING:") + \
12604 " adding required option %s not included in " % opt + \
12605 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12606 rsync_opts.append(opt)
12608 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12609 def rsync_opt_startswith(opt_prefix):
12610 for x in rsync_opts:
12611 if x.startswith(opt_prefix):
12615 if not rsync_opt_startswith("--timeout="):
12616 rsync_opts.append("--timeout=%d" % mytimeout)
12618 for opt in ("--compress", "--whole-file"):
12619 if opt not in rsync_opts:
12620 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12621 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12622 rsync_opts.append(opt)
12624 if "--quiet" in myopts:
12625 rsync_opts.append("--quiet") # Shut up a lot
12627 rsync_opts.append("--verbose") # Print filelist
12629 if "--verbose" in myopts:
12630 rsync_opts.append("--progress") # Progress meter for each file
12632 if "--debug" in myopts:
12633 rsync_opts.append("--checksum") # Force checksum on all files
12635 # Real local timestamp file.
12636 servertimestampfile = os.path.join(
12637 myportdir, "metadata", "timestamp.chk")
12639 content = portage.util.grabfile(servertimestampfile)
12643 mytimestamp = time.mktime(time.strptime(content[0],
12644 "%a, %d %b %Y %H:%M:%S +0000"))
12645 except (OverflowError, ValueError):
12650 rsync_initial_timeout = \
12651 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12653 rsync_initial_timeout = 15
12656 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12657 except SystemExit, e:
12658 raise # Needed else can't exit
12660 maxretries=3 #default number of retries
12663 user_name, hostname, port = re.split(
12664 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12667 if user_name is None:
12669 updatecache_flg=True
12670 all_rsync_opts = set(rsync_opts)
12671 extra_rsync_opts = shlex.split(
12672 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12673 all_rsync_opts.update(extra_rsync_opts)
12674 family = socket.AF_INET
12675 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12676 family = socket.AF_INET
12677 elif socket.has_ipv6 and \
12678 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12679 family = socket.AF_INET6
12681 SERVER_OUT_OF_DATE = -1
12682 EXCEEDED_MAX_RETRIES = -2
12688 for addrinfo in socket.getaddrinfo(
12689 hostname, None, family, socket.SOCK_STREAM):
12690 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12691 # IPv6 addresses need to be enclosed in square brackets
12692 ips.append("[%s]" % addrinfo[4][0])
12694 ips.append(addrinfo[4][0])
12695 from random import shuffle
12697 except SystemExit, e:
12698 raise # Needed else can't exit
12699 except Exception, e:
12700 print "Notice:",str(e)
12705 dosyncuri = syncuri.replace(
12706 "//" + user_name + hostname + port + "/",
12707 "//" + user_name + ips[0] + port + "/", 1)
12708 except SystemExit, e:
12709 raise # Needed else can't exit
12710 except Exception, e:
12711 print "Notice:",str(e)
12715 if "--ask" in myopts:
12716 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12721 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12722 if "--quiet" not in myopts:
12723 print ">>> Starting rsync with "+dosyncuri+"..."
12725 emergelog(xterm_titles,
12726 ">>> Starting retry %d of %d with %s" % \
12727 (retries,maxretries,dosyncuri))
12728 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12730 if mytimestamp != 0 and "--quiet" not in myopts:
12731 print ">>> Checking server timestamp ..."
12733 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12735 if "--debug" in myopts:
12738 exitcode = os.EX_OK
12739 servertimestamp = 0
12740 # Even if there's no timestamp available locally, fetch the
12741 # timestamp anyway as an initial probe to verify that the server is
12742 # responsive. This protects us from hanging indefinitely on a
12743 # connection attempt to an unresponsive server which rsync's
12744 # --timeout option does not prevent.
12746 # Temporary file for remote server timestamp comparison.
12747 from tempfile import mkstemp
12748 fd, tmpservertimestampfile = mkstemp()
12750 mycommand = rsynccommand[:]
12751 mycommand.append(dosyncuri.rstrip("/") + \
12752 "/metadata/timestamp.chk")
12753 mycommand.append(tmpservertimestampfile)
12757 def timeout_handler(signum, frame):
12758 raise portage.exception.PortageException("timed out")
12759 signal.signal(signal.SIGALRM, timeout_handler)
12760 # Timeout here in case the server is unresponsive. The
12761 # --timeout rsync option doesn't apply to the initial
12762 # connection attempt.
12763 if rsync_initial_timeout:
12764 signal.alarm(rsync_initial_timeout)
12766 mypids.extend(portage.process.spawn(
12767 mycommand, env=settings.environ(), returnpid=True))
12768 exitcode = os.waitpid(mypids[0], 0)[1]
12769 content = portage.grabfile(tmpservertimestampfile)
12771 if rsync_initial_timeout:
12774 os.unlink(tmpservertimestampfile)
12777 except portage.exception.PortageException, e:
12781 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12782 os.kill(mypids[0], signal.SIGTERM)
12783 os.waitpid(mypids[0], 0)
12784 # This is the same code rsync uses for timeout.
12787 if exitcode != os.EX_OK:
12788 if exitcode & 0xff:
12789 exitcode = (exitcode & 0xff) << 8
12791 exitcode = exitcode >> 8
12793 portage.process.spawned_pids.remove(mypids[0])
12796 servertimestamp = time.mktime(time.strptime(
12797 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12798 except (OverflowError, ValueError):
12800 del mycommand, mypids, content
12801 if exitcode == os.EX_OK:
12802 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12803 emergelog(xterm_titles,
12804 ">>> Cancelling sync -- Already current.")
12807 print ">>> Timestamps on the server and in the local repository are the same."
12808 print ">>> Cancelling all further sync action. You are already up to date."
12810 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12814 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12815 emergelog(xterm_titles,
12816 ">>> Server out of date: %s" % dosyncuri)
12819 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12821 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12824 exitcode = SERVER_OUT_OF_DATE
12825 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12827 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12828 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12829 if exitcode in [0,1,3,4,11,14,20,21]:
12831 elif exitcode in [1,3,4,11,14,20,21]:
12834 # Code 2 indicates protocol incompatibility, which is expected
12835 # for servers with protocol < 29 that don't support
12836 # --prune-empty-directories. Retry for a server that supports
12837 # at least rsync protocol version 29 (>=rsync-2.6.4).
12842 if retries<=maxretries:
12843 print ">>> Retrying..."
12848 updatecache_flg=False
12849 exitcode = EXCEEDED_MAX_RETRIES
12853 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12854 elif exitcode == SERVER_OUT_OF_DATE:
12856 elif exitcode == EXCEEDED_MAX_RETRIES:
12858 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12863 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12864 msg.append("that your SYNC statement is proper.")
12865 msg.append("SYNC=" + settings["SYNC"])
12867 msg.append("Rsync has reported that there is a File IO error. Normally")
12868 msg.append("this means your disk is full, but can be caused by corruption")
12869 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12870 msg.append("and try again after the problem has been fixed.")
12871 msg.append("PORTDIR=" + settings["PORTDIR"])
12873 msg.append("Rsync was killed before it finished.")
12875 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12876 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12877 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12878 msg.append("temporary problem unless complications exist with your network")
12879 msg.append("(and possibly your system's filesystem) configuration.")
12883 elif syncuri[:6]=="cvs://":
12884 if not os.path.exists("/usr/bin/cvs"):
12885 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12886 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12888 cvsroot=syncuri[6:]
12889 cvsdir=os.path.dirname(myportdir)
12890 if not os.path.exists(myportdir+"/CVS"):
12892 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12893 if os.path.exists(cvsdir+"/gentoo-x86"):
12894 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12897 os.rmdir(myportdir)
12899 if e.errno != errno.ENOENT:
12901 "!!! existing '%s' directory; exiting.\n" % myportdir)
12904 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12905 print "!!! cvs checkout error; exiting."
12907 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12910 print ">>> Starting cvs update with "+syncuri+"..."
12911 retval = portage.process.spawn_bash(
12912 "cd %s; cvs -z0 -q update -dP" % \
12913 (portage._shell_quote(myportdir),), **spawn_kwargs)
12914 if retval != os.EX_OK:
12916 dosyncuri = syncuri
12918 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12919 noiselevel=-1, level=logging.ERROR)
12922 if updatecache_flg and \
12923 myaction != "metadata" and \
12924 "metadata-transfer" not in settings.features:
12925 updatecache_flg = False
12927 # Reload the whole config from scratch.
12928 settings, trees, mtimedb = load_emerge_config(trees=trees)
12929 root_config = trees[settings["ROOT"]]["root_config"]
12930 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12932 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12933 action_metadata(settings, portdb, myopts)
12935 if portage._global_updates(trees, mtimedb["updates"]):
12937 # Reload the whole config from scratch.
12938 settings, trees, mtimedb = load_emerge_config(trees=trees)
12939 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12940 root_config = trees[settings["ROOT"]]["root_config"]
12942 mybestpv = portdb.xmatch("bestmatch-visible",
12943 portage.const.PORTAGE_PACKAGE_ATOM)
12944 mypvs = portage.best(
12945 trees[settings["ROOT"]]["vartree"].dbapi.match(
12946 portage.const.PORTAGE_PACKAGE_ATOM))
12948 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12950 if myaction != "metadata":
12951 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12952 retval = portage.process.spawn(
12953 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12954 dosyncuri], env=settings.environ())
12955 if retval != os.EX_OK:
12956 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12958 if(mybestpv != mypvs) and not "--quiet" in myopts:
12960 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12961 print red(" * ")+"that you update portage now, before any other packages are updated."
12963 print red(" * ")+"To update portage, run 'emerge portage' now."
12966 display_news_notification(root_config, myopts)
12969 def git_sync_timestamps(settings, portdir):
12971 Since git doesn't preserve timestamps, synchronize timestamps between
12972 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12973 for a given file as long as the file in the working tree is not modified
12974 (relative to HEAD).
12976 cache_dir = os.path.join(portdir, "metadata", "cache")
12977 if not os.path.isdir(cache_dir):
12979 writemsg_level(">>> Synchronizing timestamps...\n")
12981 from portage.cache.cache_errors import CacheError
12983 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12984 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12985 except CacheError, e:
12986 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12987 level=logging.ERROR, noiselevel=-1)
12990 ec_dir = os.path.join(portdir, "eclass")
12992 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12993 if f.endswith(".eclass"))
12995 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12996 level=logging.ERROR, noiselevel=-1)
12999 args = [portage.const.BASH_BINARY, "-c",
13000 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13001 portage._shell_quote(portdir)]
13003 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13004 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13006 if rval != os.EX_OK:
13009 modified_eclasses = set(ec for ec in ec_names \
13010 if os.path.join("eclass", ec + ".eclass") in modified_files)
13012 updated_ec_mtimes = {}
13014 for cpv in cache_db:
13015 cpv_split = portage.catpkgsplit(cpv)
13016 if cpv_split is None:
13017 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13018 level=logging.ERROR, noiselevel=-1)
13021 cat, pn, ver, rev = cpv_split
13022 cat, pf = portage.catsplit(cpv)
13023 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13024 if relative_eb_path in modified_files:
13028 cache_entry = cache_db[cpv]
13029 eb_mtime = cache_entry.get("_mtime_")
13030 ec_mtimes = cache_entry.get("_eclasses_")
13032 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13033 level=logging.ERROR, noiselevel=-1)
13035 except CacheError, e:
13036 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13037 (cpv, e), level=logging.ERROR, noiselevel=-1)
13040 if eb_mtime is None:
13041 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13042 level=logging.ERROR, noiselevel=-1)
13046 eb_mtime = long(eb_mtime)
13048 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13049 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13052 if ec_mtimes is None:
13053 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13054 level=logging.ERROR, noiselevel=-1)
13057 if modified_eclasses.intersection(ec_mtimes):
13060 missing_eclasses = set(ec_mtimes).difference(ec_names)
13061 if missing_eclasses:
13062 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13063 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13067 eb_path = os.path.join(portdir, relative_eb_path)
13069 current_eb_mtime = os.stat(eb_path)
13071 writemsg_level("!!! Missing ebuild: %s\n" % \
13072 (cpv,), level=logging.ERROR, noiselevel=-1)
13075 inconsistent = False
13076 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13077 updated_mtime = updated_ec_mtimes.get(ec)
13078 if updated_mtime is not None and updated_mtime != ec_mtime:
13079 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13080 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13081 inconsistent = True
13087 if current_eb_mtime != eb_mtime:
13088 os.utime(eb_path, (eb_mtime, eb_mtime))
13090 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13091 if ec in updated_ec_mtimes:
13093 ec_path = os.path.join(ec_dir, ec + ".eclass")
13094 current_mtime = long(os.stat(ec_path).st_mtime)
13095 if current_mtime != ec_mtime:
13096 os.utime(ec_path, (ec_mtime, ec_mtime))
13097 updated_ec_mtimes[ec] = ec_mtime
13101 def action_metadata(settings, portdb, myopts):
13102 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13103 old_umask = os.umask(0002)
13104 cachedir = os.path.normpath(settings.depcachedir)
13105 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13106 "/lib", "/opt", "/proc", "/root", "/sbin",
13107 "/sys", "/tmp", "/usr", "/var"]:
13108 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13109 "ROOT DIRECTORY ON YOUR SYSTEM."
13110 print >> sys.stderr, \
13111 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13113 if not os.path.exists(cachedir):
13116 ec = portage.eclass_cache.cache(portdb.porttree_root)
13117 myportdir = os.path.realpath(settings["PORTDIR"])
13118 cm = settings.load_best_module("portdbapi.metadbmodule")(
13119 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13121 from portage.cache import util
13123 class percentage_noise_maker(util.quiet_mirroring):
13124 def __init__(self, dbapi):
13126 self.cp_all = dbapi.cp_all()
13127 l = len(self.cp_all)
13128 self.call_update_min = 100000000
13129 self.min_cp_all = l/100.0
13133 def __iter__(self):
13134 for x in self.cp_all:
13136 if self.count > self.min_cp_all:
13137 self.call_update_min = 0
13139 for y in self.dbapi.cp_list(x):
13141 self.call_update_mine = 0
13143 def update(self, *arg):
13144 try: self.pstr = int(self.pstr) + 1
13145 except ValueError: self.pstr = 1
13146 sys.stdout.write("%s%i%%" % \
13147 ("\b" * (len(str(self.pstr))+1), self.pstr))
13149 self.call_update_min = 10000000
13151 def finish(self, *arg):
13152 sys.stdout.write("\b\b\b\b100%\n")
13155 if "--quiet" in myopts:
13156 def quicky_cpv_generator(cp_all_list):
13157 for x in cp_all_list:
13158 for y in portdb.cp_list(x):
13160 source = quicky_cpv_generator(portdb.cp_all())
13161 noise_maker = portage.cache.util.quiet_mirroring()
13163 noise_maker = source = percentage_noise_maker(portdb)
13164 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13165 eclass_cache=ec, verbose_instance=noise_maker)
13168 os.umask(old_umask)
13170 def action_regen(settings, portdb, max_jobs, max_load):
13171 xterm_titles = "notitles" not in settings.features
13172 emergelog(xterm_titles, " === regen")
13173 #regenerate cache entries
13174 portage.writemsg_stdout("Regenerating cache entries...\n")
13176 os.close(sys.stdin.fileno())
13177 except SystemExit, e:
13178 raise # Needed else can't exit
13183 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13186 portage.writemsg_stdout("done!\n")
13187 return regen.returncode
13189 def action_config(settings, trees, myopts, myfiles):
13190 if len(myfiles) != 1:
13191 print red("!!! config can only take a single package atom at this time\n")
13193 if not is_valid_package_atom(myfiles[0]):
13194 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13196 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13197 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13201 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13202 except portage.exception.AmbiguousPackageName, e:
13203 # Multiple matches thrown from cpv_expand
13206 print "No packages found.\n"
13208 elif len(pkgs) > 1:
13209 if "--ask" in myopts:
13211 print "Please select a package to configure:"
13215 options.append(str(idx))
13216 print options[-1]+") "+pkg
13218 options.append("X")
13219 idx = userquery("Selection?", options)
13222 pkg = pkgs[int(idx)-1]
13224 print "The following packages available:"
13227 print "\nPlease use a specific atom or the --ask option."
13233 if "--ask" in myopts:
13234 if userquery("Ready to configure "+pkg+"?") == "No":
13237 print "Configuring pkg..."
13239 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13240 mysettings = portage.config(clone=settings)
13241 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13242 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13243 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13245 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13246 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13247 if retval == os.EX_OK:
13248 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13249 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13252 def action_info(settings, trees, myopts, myfiles):
13253 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13254 settings.profile_path, settings["CHOST"],
13255 trees[settings["ROOT"]]["vartree"].dbapi)
13257 header_title = "System Settings"
13259 print header_width * "="
13260 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13261 print header_width * "="
13262 print "System uname: "+platform.platform(aliased=1)
13264 lastSync = portage.grabfile(os.path.join(
13265 settings["PORTDIR"], "metadata", "timestamp.chk"))
13266 print "Timestamp of tree:",
13272 output=commands.getstatusoutput("distcc --version")
13274 print str(output[1].split("\n",1)[0]),
13275 if "distcc" in settings.features:
13280 output=commands.getstatusoutput("ccache -V")
13282 print str(output[1].split("\n",1)[0]),
13283 if "ccache" in settings.features:
13288 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13289 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13290 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13291 myvars = portage.util.unique_array(myvars)
13295 if portage.isvalidatom(x):
13296 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13297 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13298 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13300 for pn, ver, rev in pkg_matches:
13302 pkgs.append(ver + "-" + rev)
13306 pkgs = ", ".join(pkgs)
13307 print "%-20s %s" % (x+":", pkgs)
13309 print "%-20s %s" % (x+":", "[NOT VALID]")
13311 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13313 if "--verbose" in myopts:
13314 myvars=settings.keys()
13316 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13317 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13318 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13319 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13321 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13323 myvars = portage.util.unique_array(myvars)
13329 print '%s="%s"' % (x, settings[x])
13331 use = set(settings["USE"].split())
13332 use_expand = settings["USE_EXPAND"].split()
13334 for varname in use_expand:
13335 flag_prefix = varname.lower() + "_"
13336 for f in list(use):
13337 if f.startswith(flag_prefix):
13341 print 'USE="%s"' % " ".join(use),
13342 for varname in use_expand:
13343 myval = settings.get(varname)
13345 print '%s="%s"' % (varname, myval),
13348 unset_vars.append(x)
13350 print "Unset: "+", ".join(unset_vars)
13353 if "--debug" in myopts:
13354 for x in dir(portage):
13355 module = getattr(portage, x)
13356 if "cvs_id_string" in dir(module):
13357 print "%s: %s" % (str(x), str(module.cvs_id_string))
13359 # See if we can find any packages installed matching the strings
13360 # passed on the command line
13362 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13363 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13365 mypkgs.extend(vardb.match(x))
13367 # If some packages were found...
13369 # Get our global settings (we only print stuff if it varies from
13370 # the current config)
13371 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13372 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13374 pkgsettings = portage.config(clone=settings)
13376 for myvar in mydesiredvars:
13377 global_vals[myvar] = set(settings.get(myvar, "").split())
13379 # Loop through each package
13380 # Only print settings if they differ from global settings
13381 header_title = "Package Settings"
13382 print header_width * "="
13383 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13384 print header_width * "="
13385 from portage.output import EOutput
13388 # Get all package specific variables
13389 auxvalues = vardb.aux_get(pkg, auxkeys)
13391 for i in xrange(len(auxkeys)):
13392 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13394 for myvar in mydesiredvars:
13395 # If the package variable doesn't match the
13396 # current global variable, something has changed
13397 # so set diff_found so we know to print
13398 if valuesmap[myvar] != global_vals[myvar]:
13399 diff_values[myvar] = valuesmap[myvar]
13400 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13401 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13402 pkgsettings.reset()
13403 # If a matching ebuild is no longer available in the tree, maybe it
13404 # would make sense to compare against the flags for the best
13405 # available version with the same slot?
13407 if portdb.cpv_exists(pkg):
13409 pkgsettings.setcpv(pkg, mydb=mydb)
13410 if valuesmap["IUSE"].intersection(
13411 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13412 diff_values["USE"] = valuesmap["USE"]
13413 # If a difference was found, print the info for
13416 # Print package info
13417 print "%s was built with the following:" % pkg
13418 for myvar in mydesiredvars + ["USE"]:
13419 if myvar in diff_values:
13420 mylist = list(diff_values[myvar])
13422 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13424 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13425 ebuildpath = vardb.findname(pkg)
13426 if not ebuildpath or not os.path.exists(ebuildpath):
13427 out.ewarn("No ebuild found for '%s'" % pkg)
13429 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13430 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13431 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13434 def action_search(root_config, myopts, myfiles, spinner):
13436 print "emerge: no search terms provided."
13438 searchinstance = search(root_config,
13439 spinner, "--searchdesc" in myopts,
13440 "--quiet" not in myopts, "--usepkg" in myopts,
13441 "--usepkgonly" in myopts)
13442 for mysearch in myfiles:
13444 searchinstance.execute(mysearch)
13445 except re.error, comment:
13446 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13448 searchinstance.output()
13450 def action_depclean(settings, trees, ldpath_mtimes,
13451 myopts, action, myfiles, spinner):
13452 # Kill packages that aren't explicitly merged or are required as a
13453 # dependency of another package. World file is explicit.
13455 # Global depclean or prune operations are not very safe when there are
13456 # missing dependencies since it's unknown how badly incomplete
13457 # the dependency graph is, and we might accidentally remove packages
13458 # that should have been pulled into the graph. On the other hand, it's
13459 # relatively safe to ignore missing deps when only asked to remove
13460 # specific packages.
13461 allow_missing_deps = len(myfiles) > 0
13464 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13465 msg.append("mistakes. Packages that are part of the world set will always\n")
13466 msg.append("be kept. They can be manually added to this set with\n")
13467 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13468 msg.append("package.provided (see portage(5)) will be removed by\n")
13469 msg.append("depclean, even if they are part of the world set.\n")
13471 msg.append("As a safety measure, depclean will not remove any packages\n")
13472 msg.append("unless *all* required dependencies have been resolved. As a\n")
13473 msg.append("consequence, it is often necessary to run %s\n" % \
13474 good("`emerge --update"))
13475 msg.append(good("--newuse --deep @system @world`") + \
13476 " prior to depclean.\n")
13478 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13479 portage.writemsg_stdout("\n")
13481 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13483 xterm_titles = "notitles" not in settings.features
13484 myroot = settings["ROOT"]
13485 root_config = trees[myroot]["root_config"]
13486 getSetAtoms = root_config.setconfig.getSetAtoms
13487 vardb = trees[myroot]["vartree"].dbapi
13489 required_set_names = ("system", "world")
13493 for s in required_set_names:
13494 required_sets[s] = InternalPackageSet(
13495 initial_atoms=getSetAtoms(s))
13498 # When removing packages, use a temporary version of world
13499 # which excludes packages that are intended to be eligible for
13501 world_temp_set = required_sets["world"]
13502 system_set = required_sets["system"]
13504 if not system_set or not world_temp_set:
13507 writemsg_level("!!! You have no system list.\n",
13508 level=logging.ERROR, noiselevel=-1)
13510 if not world_temp_set:
13511 writemsg_level("!!! You have no world file.\n",
13512 level=logging.WARNING, noiselevel=-1)
13514 writemsg_level("!!! Proceeding is likely to " + \
13515 "break your installation.\n",
13516 level=logging.WARNING, noiselevel=-1)
13517 if "--pretend" not in myopts:
13518 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13520 if action == "depclean":
13521 emergelog(xterm_titles, " >>> depclean")
13524 args_set = InternalPackageSet()
13527 if not is_valid_package_atom(x):
13528 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13529 level=logging.ERROR, noiselevel=-1)
13530 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13533 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13534 except portage.exception.AmbiguousPackageName, e:
13535 msg = "The short ebuild name \"" + x + \
13536 "\" is ambiguous. Please specify " + \
13537 "one of the following " + \
13538 "fully-qualified ebuild names instead:"
13539 for line in textwrap.wrap(msg, 70):
13540 writemsg_level("!!! %s\n" % (line,),
13541 level=logging.ERROR, noiselevel=-1)
13543 writemsg_level(" %s\n" % colorize("INFORM", i),
13544 level=logging.ERROR, noiselevel=-1)
13545 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13548 matched_packages = False
13551 matched_packages = True
13553 if not matched_packages:
13554 writemsg_level(">>> No packages selected for removal by %s\n" % \
13558 writemsg_level("\nCalculating dependencies ")
13559 resolver_params = create_depgraph_params(myopts, "remove")
13560 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13561 vardb = resolver.trees[myroot]["vartree"].dbapi
13563 if action == "depclean":
13566 # Pull in everything that's installed but not matched
13567 # by an argument atom since we don't want to clean any
13568 # package if something depends on it.
13570 world_temp_set.clear()
13575 if args_set.findAtomForPackage(pkg) is None:
13576 world_temp_set.add("=" + pkg.cpv)
13578 except portage.exception.InvalidDependString, e:
13579 show_invalid_depstring_notice(pkg,
13580 pkg.metadata["PROVIDE"], str(e))
13582 world_temp_set.add("=" + pkg.cpv)
13585 elif action == "prune":
13587 # Pull in everything that's installed since we don't
13588 # to prune a package if something depends on it.
13589 world_temp_set.clear()
13590 world_temp_set.update(vardb.cp_all())
13594 # Try to prune everything that's slotted.
13595 for cp in vardb.cp_all():
13596 if len(vardb.cp_list(cp)) > 1:
13599 # Remove atoms from world that match installed packages
13600 # that are also matched by argument atoms, but do not remove
13601 # them if they match the highest installed version.
13604 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13605 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13606 raise AssertionError("package expected in matches: " + \
13607 "cp = %s, cpv = %s matches = %s" % \
13608 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13610 highest_version = pkgs_for_cp[-1]
13611 if pkg == highest_version:
13612 # pkg is the highest version
13613 world_temp_set.add("=" + pkg.cpv)
13616 if len(pkgs_for_cp) <= 1:
13617 raise AssertionError("more packages expected: " + \
13618 "cp = %s, cpv = %s matches = %s" % \
13619 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13622 if args_set.findAtomForPackage(pkg) is None:
13623 world_temp_set.add("=" + pkg.cpv)
13625 except portage.exception.InvalidDependString, e:
13626 show_invalid_depstring_notice(pkg,
13627 pkg.metadata["PROVIDE"], str(e))
13629 world_temp_set.add("=" + pkg.cpv)
13633 for s, package_set in required_sets.iteritems():
13634 set_atom = SETPREFIX + s
13635 set_arg = SetArg(arg=set_atom, set=package_set,
13636 root_config=resolver.roots[myroot])
13637 set_args[s] = set_arg
13638 for atom in set_arg.set:
13639 resolver._dep_stack.append(
13640 Dependency(atom=atom, root=myroot, parent=set_arg))
13641 resolver.digraph.add(set_arg, None)
13643 success = resolver._complete_graph()
13644 writemsg_level("\b\b... done!\n")
13646 resolver.display_problems()
13651 def unresolved_deps():
13653 unresolvable = set()
13654 for dep in resolver._initially_unsatisfied_deps:
13655 if isinstance(dep.parent, Package) and \
13656 (dep.priority > UnmergeDepPriority.SOFT):
13657 unresolvable.add((dep.atom, dep.parent.cpv))
13659 if not unresolvable:
13662 if unresolvable and not allow_missing_deps:
13663 prefix = bad(" * ")
13665 msg.append("Dependencies could not be completely resolved due to")
13666 msg.append("the following required packages not being installed:")
13668 for atom, parent in unresolvable:
13669 msg.append(" %s pulled in by:" % (atom,))
13670 msg.append(" %s" % (parent,))
13672 msg.append("Have you forgotten to run " + \
13673 good("`emerge --update --newuse --deep @system @world`") + " prior")
13674 msg.append(("to %s? It may be necessary to manually " + \
13675 "uninstall packages that no longer") % action)
13676 msg.append("exist in the portage tree since " + \
13677 "it may not be possible to satisfy their")
13678 msg.append("dependencies. Also, be aware of " + \
13679 "the --with-bdeps option that is documented")
13680 msg.append("in " + good("`man emerge`") + ".")
13681 if action == "prune":
13683 msg.append("If you would like to ignore " + \
13684 "dependencies then use %s." % good("--nodeps"))
13685 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13686 level=logging.ERROR, noiselevel=-1)
13690 if unresolved_deps():
13693 graph = resolver.digraph.copy()
13694 required_pkgs_total = 0
13696 if isinstance(node, Package):
13697 required_pkgs_total += 1
13699 def show_parents(child_node):
13700 parent_nodes = graph.parent_nodes(child_node)
13701 if not parent_nodes:
13702 # With --prune, the highest version can be pulled in without any
13703 # real parent since all installed packages are pulled in. In that
13704 # case there's nothing to show here.
13707 for node in parent_nodes:
13708 parent_strs.append(str(getattr(node, "cpv", node)))
13711 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13712 for parent_str in parent_strs:
13713 msg.append(" %s\n" % (parent_str,))
13715 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13717 def cmp_pkg_cpv(pkg1, pkg2):
13718 """Sort Package instances by cpv."""
13719 if pkg1.cpv > pkg2.cpv:
13721 elif pkg1.cpv == pkg2.cpv:
13726 def create_cleanlist():
13727 pkgs_to_remove = []
13729 if action == "depclean":
13732 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13735 arg_atom = args_set.findAtomForPackage(pkg)
13736 except portage.exception.InvalidDependString:
13737 # this error has already been displayed by now
13741 if pkg not in graph:
13742 pkgs_to_remove.append(pkg)
13743 elif "--verbose" in myopts:
13747 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13748 if pkg not in graph:
13749 pkgs_to_remove.append(pkg)
13750 elif "--verbose" in myopts:
13753 elif action == "prune":
13754 # Prune really uses all installed instead of world. It's not
13755 # a real reverse dependency so don't display it as such.
13756 graph.remove(set_args["world"])
13758 for atom in args_set:
13759 for pkg in vardb.match_pkgs(atom):
13760 if pkg not in graph:
13761 pkgs_to_remove.append(pkg)
13762 elif "--verbose" in myopts:
13765 if not pkgs_to_remove:
13767 ">>> No packages selected for removal by %s\n" % action)
13768 if "--verbose" not in myopts:
13770 ">>> To see reverse dependencies, use %s\n" % \
13772 if action == "prune":
13774 ">>> To ignore dependencies, use %s\n" % \
13777 return pkgs_to_remove
13779 cleanlist = create_cleanlist()
13782 clean_set = set(cleanlist)
13784 # Check if any of these package are the sole providers of libraries
13785 # with consumers that have not been selected for removal. If so, these
13786 # packages and any dependencies need to be added to the graph.
13787 real_vardb = trees[myroot]["vartree"].dbapi
13788 linkmap = real_vardb.linkmap
13789 liblist = linkmap.listLibraryObjects()
13790 consumer_cache = {}
13791 provider_cache = {}
13795 writemsg_level(">>> Checking for lib consumers...\n")
13797 for pkg in cleanlist:
13798 pkg_dblink = real_vardb._dblink(pkg.cpv)
13799 provided_libs = set()
13801 for lib in liblist:
13802 if pkg_dblink.isowner(lib, myroot):
13803 provided_libs.add(lib)
13805 if not provided_libs:
13809 for lib in provided_libs:
13810 lib_consumers = consumer_cache.get(lib)
13811 if lib_consumers is None:
13812 lib_consumers = linkmap.findConsumers(lib)
13813 consumer_cache[lib] = lib_consumers
13815 consumers[lib] = lib_consumers
13820 for lib, lib_consumers in consumers.items():
13821 for consumer_file in list(lib_consumers):
13822 if pkg_dblink.isowner(consumer_file, myroot):
13823 lib_consumers.remove(consumer_file)
13824 if not lib_consumers:
13830 for lib, lib_consumers in consumers.iteritems():
13832 soname = soname_cache.get(lib)
13834 soname = linkmap.getSoname(lib)
13835 soname_cache[lib] = soname
13837 consumer_providers = []
13838 for lib_consumer in lib_consumers:
13839 providers = provider_cache.get(lib)
13840 if providers is None:
13841 providers = linkmap.findProviders(lib_consumer)
13842 provider_cache[lib_consumer] = providers
13843 if soname not in providers:
13844 # Why does this happen?
13846 consumer_providers.append(
13847 (lib_consumer, providers[soname]))
13849 consumers[lib] = consumer_providers
13851 consumer_map[pkg] = consumers
13855 search_files = set()
13856 for consumers in consumer_map.itervalues():
13857 for lib, consumer_providers in consumers.iteritems():
13858 for lib_consumer, providers in consumer_providers:
13859 search_files.add(lib_consumer)
13860 search_files.update(providers)
13862 writemsg_level(">>> Assigning files to packages...\n")
13863 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13865 for pkg, consumers in consumer_map.items():
13866 for lib, consumer_providers in consumers.items():
13867 lib_consumers = set()
13869 for lib_consumer, providers in consumer_providers:
13870 owner_set = file_owners.get(lib_consumer)
13871 provider_dblinks = set()
13872 provider_pkgs = set()
13874 if len(providers) > 1:
13875 for provider in providers:
13876 provider_set = file_owners.get(provider)
13877 if provider_set is not None:
13878 provider_dblinks.update(provider_set)
13880 if len(provider_dblinks) > 1:
13881 for provider_dblink in provider_dblinks:
13882 pkg_key = ("installed", myroot,
13883 provider_dblink.mycpv, "nomerge")
13884 if pkg_key not in clean_set:
13885 provider_pkgs.add(vardb.get(pkg_key))
13890 if owner_set is not None:
13891 lib_consumers.update(owner_set)
13893 for consumer_dblink in list(lib_consumers):
13894 if ("installed", myroot, consumer_dblink.mycpv,
13895 "nomerge") in clean_set:
13896 lib_consumers.remove(consumer_dblink)
13900 consumers[lib] = lib_consumers
13904 del consumer_map[pkg]
13907 # TODO: Implement a package set for rebuilding consumer packages.
13909 msg = "In order to avoid breakage of link level " + \
13910 "dependencies, one or more packages will not be removed. " + \
13911 "This can be solved by rebuilding " + \
13912 "the packages that pulled them in."
13914 prefix = bad(" * ")
13915 from textwrap import wrap
13916 writemsg_level("".join(prefix + "%s\n" % line for \
13917 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13920 for pkg, consumers in consumer_map.iteritems():
13921 unique_consumers = set(chain(*consumers.values()))
13922 unique_consumers = sorted(consumer.mycpv \
13923 for consumer in unique_consumers)
13925 msg.append(" %s pulled in by:" % (pkg.cpv,))
13926 for consumer in unique_consumers:
13927 msg.append(" %s" % (consumer,))
13929 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13930 level=logging.WARNING, noiselevel=-1)
13932 # Add lib providers to the graph as children of lib consumers,
13933 # and also add any dependencies pulled in by the provider.
13934 writemsg_level(">>> Adding lib providers to graph...\n")
13936 for pkg, consumers in consumer_map.iteritems():
13937 for consumer_dblink in set(chain(*consumers.values())):
13938 consumer_pkg = vardb.get(("installed", myroot,
13939 consumer_dblink.mycpv, "nomerge"))
13940 if not resolver._add_pkg(pkg,
13941 Dependency(parent=consumer_pkg,
13942 priority=UnmergeDepPriority(runtime=True),
13944 resolver.display_problems()
13947 writemsg_level("\nCalculating dependencies ")
13948 success = resolver._complete_graph()
13949 writemsg_level("\b\b... done!\n")
13950 resolver.display_problems()
13953 if unresolved_deps():
13956 graph = resolver.digraph.copy()
13957 required_pkgs_total = 0
13959 if isinstance(node, Package):
13960 required_pkgs_total += 1
13961 cleanlist = create_cleanlist()
13964 clean_set = set(cleanlist)
13966 # Use a topological sort to create an unmerge order such that
13967 # each package is unmerged before it's dependencies. This is
13968 # necessary to avoid breaking things that may need to run
13969 # during pkg_prerm or pkg_postrm phases.
13971 # Create a new graph to account for dependencies between the
13972 # packages being unmerged.
13976 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13977 runtime = UnmergeDepPriority(runtime=True)
13978 runtime_post = UnmergeDepPriority(runtime_post=True)
13979 buildtime = UnmergeDepPriority(buildtime=True)
13981 "RDEPEND": runtime,
13982 "PDEPEND": runtime_post,
13983 "DEPEND": buildtime,
13986 for node in clean_set:
13987 graph.add(node, None)
13989 node_use = node.metadata["USE"].split()
13990 for dep_type in dep_keys:
13991 depstr = node.metadata[dep_type]
13995 portage.dep._dep_check_strict = False
13996 success, atoms = portage.dep_check(depstr, None, settings,
13997 myuse=node_use, trees=resolver._graph_trees,
14000 portage.dep._dep_check_strict = True
14002 # Ignore invalid deps of packages that will
14003 # be uninstalled anyway.
14006 priority = priority_map[dep_type]
14008 if not isinstance(atom, portage.dep.Atom):
14009 # Ignore invalid atoms returned from dep_check().
14013 matches = vardb.match_pkgs(atom)
14016 for child_node in matches:
14017 if child_node in clean_set:
14018 graph.add(child_node, node, priority=priority)
14021 if len(graph.order) == len(graph.root_nodes()):
14022 # If there are no dependencies between packages
14023 # let unmerge() group them by cat/pn.
14025 cleanlist = [pkg.cpv for pkg in graph.order]
14027 # Order nodes from lowest to highest overall reference count for
14028 # optimal root node selection.
14029 node_refcounts = {}
14030 for node in graph.order:
14031 node_refcounts[node] = len(graph.parent_nodes(node))
14032 def cmp_reference_count(node1, node2):
14033 return node_refcounts[node1] - node_refcounts[node2]
14034 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14036 ignore_priority_range = [None]
14037 ignore_priority_range.extend(
14038 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14039 while not graph.empty():
14040 for ignore_priority in ignore_priority_range:
14041 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14045 raise AssertionError("no root nodes")
14046 if ignore_priority is not None:
14047 # Some deps have been dropped due to circular dependencies,
14048 # so only pop one node in order do minimize the number that
14053 cleanlist.append(node.cpv)
14055 unmerge(root_config, myopts, "unmerge", cleanlist,
14056 ldpath_mtimes, ordered=ordered)
14058 if action == "prune":
14061 if not cleanlist and "--quiet" in myopts:
14064 print "Packages installed: "+str(len(vardb.cpv_all()))
14065 print "Packages in world: " + \
14066 str(len(root_config.sets["world"].getAtoms()))
14067 print "Packages in system: " + \
14068 str(len(root_config.sets["system"].getAtoms()))
14069 print "Required packages: "+str(required_pkgs_total)
14070 if "--pretend" in myopts:
14071 print "Number to remove: "+str(len(cleanlist))
14073 print "Number removed: "+str(len(cleanlist))
14075 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14077 Construct a depgraph for the given resume list. This will raise
14078 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14080 @returns: (success, depgraph, dropped_tasks)
14083 skip_unsatisfied = True
14084 mergelist = mtimedb["resume"]["mergelist"]
14085 dropped_tasks = set()
14087 mydepgraph = depgraph(settings, trees,
14088 myopts, myparams, spinner)
14090 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14091 skip_masked=skip_masked)
14092 except depgraph.UnsatisfiedResumeDep, e:
14093 if not skip_unsatisfied:
14096 graph = mydepgraph.digraph
14097 unsatisfied_parents = dict((dep.parent, dep.parent) \
14098 for dep in e.value)
14099 traversed_nodes = set()
14100 unsatisfied_stack = list(unsatisfied_parents)
14101 while unsatisfied_stack:
14102 pkg = unsatisfied_stack.pop()
14103 if pkg in traversed_nodes:
14105 traversed_nodes.add(pkg)
14107 # If this package was pulled in by a parent
14108 # package scheduled for merge, removing this
14109 # package may cause the the parent package's
14110 # dependency to become unsatisfied.
14111 for parent_node in graph.parent_nodes(pkg):
14112 if not isinstance(parent_node, Package) \
14113 or parent_node.operation not in ("merge", "nomerge"):
14116 graph.child_nodes(parent_node,
14117 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14118 if pkg in unsatisfied:
14119 unsatisfied_parents[parent_node] = parent_node
14120 unsatisfied_stack.append(parent_node)
14122 pruned_mergelist = []
14123 for x in mergelist:
14124 if isinstance(x, list) and \
14125 tuple(x) not in unsatisfied_parents:
14126 pruned_mergelist.append(x)
14128 # If the mergelist doesn't shrink then this loop is infinite.
14129 if len(pruned_mergelist) == len(mergelist):
14130 # This happens if a package can't be dropped because
14131 # it's already installed, but it has unsatisfied PDEPEND.
14133 mergelist[:] = pruned_mergelist
14135 # Exclude installed packages that have been removed from the graph due
14136 # to failure to build/install runtime dependencies after the dependent
14137 # package has already been installed.
14138 dropped_tasks.update(pkg for pkg in \
14139 unsatisfied_parents if pkg.operation != "nomerge")
14140 mydepgraph.break_refs(unsatisfied_parents)
14142 del e, graph, traversed_nodes, \
14143 unsatisfied_parents, unsatisfied_stack
14147 return (success, mydepgraph, dropped_tasks)
14149 def action_build(settings, trees, mtimedb,
14150 myopts, myaction, myfiles, spinner):
14152 # validate the state of the resume data
14153 # so that we can make assumptions later.
14154 for k in ("resume", "resume_backup"):
14155 if k not in mtimedb:
14157 resume_data = mtimedb[k]
14158 if not isinstance(resume_data, dict):
14161 mergelist = resume_data.get("mergelist")
14162 if not isinstance(mergelist, list):
14165 for x in mergelist:
14166 if not (isinstance(x, list) and len(x) == 4):
14168 pkg_type, pkg_root, pkg_key, pkg_action = x
14169 if pkg_root not in trees:
14170 # Current $ROOT setting differs,
14171 # so the list must be stale.
14177 resume_opts = resume_data.get("myopts")
14178 if not isinstance(resume_opts, (dict, list)):
14181 favorites = resume_data.get("favorites")
14182 if not isinstance(favorites, list):
14187 if "--resume" in myopts and \
14188 ("resume" in mtimedb or
14189 "resume_backup" in mtimedb):
14191 if "resume" not in mtimedb:
14192 mtimedb["resume"] = mtimedb["resume_backup"]
14193 del mtimedb["resume_backup"]
14195 # "myopts" is a list for backward compatibility.
14196 resume_opts = mtimedb["resume"].get("myopts", [])
14197 if isinstance(resume_opts, list):
14198 resume_opts = dict((k,True) for k in resume_opts)
14199 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14200 resume_opts.pop(opt, None)
14201 myopts.update(resume_opts)
14203 if "--debug" in myopts:
14204 writemsg_level("myopts %s\n" % (myopts,))
14206 # Adjust config according to options of the command being resumed.
14207 for myroot in trees:
14208 mysettings = trees[myroot]["vartree"].settings
14209 mysettings.unlock()
14210 adjust_config(myopts, mysettings)
14212 del myroot, mysettings
14214 ldpath_mtimes = mtimedb["ldpath"]
14217 buildpkgonly = "--buildpkgonly" in myopts
14218 pretend = "--pretend" in myopts
14219 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14220 ask = "--ask" in myopts
14221 nodeps = "--nodeps" in myopts
14222 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14223 tree = "--tree" in myopts
14224 if nodeps and tree:
14226 del myopts["--tree"]
14227 portage.writemsg(colorize("WARN", " * ") + \
14228 "--tree is broken with --nodeps. Disabling...\n")
14229 debug = "--debug" in myopts
14230 verbose = "--verbose" in myopts
14231 quiet = "--quiet" in myopts
14232 if pretend or fetchonly:
14233 # make the mtimedb readonly
14234 mtimedb.filename = None
14235 if "--digest" in myopts:
14236 msg = "The --digest option can prevent corruption from being" + \
14237 " noticed. The `repoman manifest` command is the preferred" + \
14238 " way to generate manifests and it is capable of doing an" + \
14239 " entire repository or category at once."
14240 prefix = bad(" * ")
14241 writemsg(prefix + "\n")
14242 from textwrap import wrap
14243 for line in wrap(msg, 72):
14244 writemsg("%s%s\n" % (prefix, line))
14245 writemsg(prefix + "\n")
14247 if "--quiet" not in myopts and \
14248 ("--pretend" in myopts or "--ask" in myopts or \
14249 "--tree" in myopts or "--verbose" in myopts):
14251 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14253 elif "--buildpkgonly" in myopts:
14257 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14259 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14263 print darkgreen("These are the packages that would be %s, in order:") % action
14266 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14267 if not show_spinner:
14268 spinner.update = spinner.update_quiet
14271 favorites = mtimedb["resume"].get("favorites")
14272 if not isinstance(favorites, list):
14276 print "Calculating dependencies ",
14277 myparams = create_depgraph_params(myopts, myaction)
14279 resume_data = mtimedb["resume"]
14280 mergelist = resume_data["mergelist"]
14281 if mergelist and "--skipfirst" in myopts:
14282 for i, task in enumerate(mergelist):
14283 if isinstance(task, list) and \
14284 task and task[-1] == "merge":
14291 success, mydepgraph, dropped_tasks = resume_depgraph(
14292 settings, trees, mtimedb, myopts, myparams, spinner)
14293 except (portage.exception.PackageNotFound,
14294 depgraph.UnsatisfiedResumeDep), e:
14295 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14296 mydepgraph = e.depgraph
14299 from textwrap import wrap
14300 from portage.output import EOutput
14303 resume_data = mtimedb["resume"]
14304 mergelist = resume_data.get("mergelist")
14305 if not isinstance(mergelist, list):
14307 if mergelist and debug or (verbose and not quiet):
14308 out.eerror("Invalid resume list:")
14311 for task in mergelist:
14312 if isinstance(task, list):
14313 out.eerror(indent + str(tuple(task)))
14316 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14317 out.eerror("One or more packages are either masked or " + \
14318 "have missing dependencies:")
14321 for dep in e.value:
14322 if dep.atom is None:
14323 out.eerror(indent + "Masked package:")
14324 out.eerror(2 * indent + str(dep.parent))
14327 out.eerror(indent + str(dep.atom) + " pulled in by:")
14328 out.eerror(2 * indent + str(dep.parent))
14330 msg = "The resume list contains packages " + \
14331 "that are either masked or have " + \
14332 "unsatisfied dependencies. " + \
14333 "Please restart/continue " + \
14334 "the operation manually, or use --skipfirst " + \
14335 "to skip the first package in the list and " + \
14336 "any other packages that may be " + \
14337 "masked or have missing dependencies."
14338 for line in wrap(msg, 72):
14340 elif isinstance(e, portage.exception.PackageNotFound):
14341 out.eerror("An expected package is " + \
14342 "not available: %s" % str(e))
14344 msg = "The resume list contains one or more " + \
14345 "packages that are no longer " + \
14346 "available. Please restart/continue " + \
14347 "the operation manually."
14348 for line in wrap(msg, 72):
14352 print "\b\b... done!"
14356 portage.writemsg("!!! One or more packages have been " + \
14357 "dropped due to\n" + \
14358 "!!! masking or unsatisfied dependencies:\n\n",
14360 for task in dropped_tasks:
14361 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14362 portage.writemsg("\n", noiselevel=-1)
14365 if mydepgraph is not None:
14366 mydepgraph.display_problems()
14367 if not (ask or pretend):
14368 # delete the current list and also the backup
14369 # since it's probably stale too.
14370 for k in ("resume", "resume_backup"):
14371 mtimedb.pop(k, None)
14376 if ("--resume" in myopts):
14377 print darkgreen("emerge: It seems we have nothing to resume...")
14380 myparams = create_depgraph_params(myopts, myaction)
14381 if "--quiet" not in myopts and "--nodeps" not in myopts:
14382 print "Calculating dependencies ",
14384 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14386 retval, favorites = mydepgraph.select_files(myfiles)
14387 except portage.exception.PackageNotFound, e:
14388 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14390 except portage.exception.PackageSetNotFound, e:
14391 root_config = trees[settings["ROOT"]]["root_config"]
14392 display_missing_pkg_set(root_config, e.value)
14395 print "\b\b... done!"
14397 mydepgraph.display_problems()
14400 if "--pretend" not in myopts and \
14401 ("--ask" in myopts or "--tree" in myopts or \
14402 "--verbose" in myopts) and \
14403 not ("--quiet" in myopts and "--ask" not in myopts):
14404 if "--resume" in myopts:
14405 mymergelist = mydepgraph.altlist()
14406 if len(mymergelist) == 0:
14407 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14409 favorites = mtimedb["resume"]["favorites"]
14410 retval = mydepgraph.display(
14411 mydepgraph.altlist(reversed=tree),
14412 favorites=favorites)
14413 mydepgraph.display_problems()
14414 if retval != os.EX_OK:
14416 prompt="Would you like to resume merging these packages?"
14418 retval = mydepgraph.display(
14419 mydepgraph.altlist(reversed=("--tree" in myopts)),
14420 favorites=favorites)
14421 mydepgraph.display_problems()
14422 if retval != os.EX_OK:
14425 for x in mydepgraph.altlist():
14426 if isinstance(x, Package) and x.operation == "merge":
14430 sets = trees[settings["ROOT"]]["root_config"].sets
14431 world_candidates = None
14432 if "--noreplace" in myopts and \
14433 not oneshot and favorites:
14434 # Sets that are not world candidates are filtered
14435 # out here since the favorites list needs to be
14436 # complete for depgraph.loadResumeCommand() to
14437 # operate correctly.
14438 world_candidates = [x for x in favorites \
14439 if not (x.startswith(SETPREFIX) and \
14440 not sets[x[1:]].world_candidate)]
14441 if "--noreplace" in myopts and \
14442 not oneshot and world_candidates:
14444 for x in world_candidates:
14445 print " %s %s" % (good("*"), x)
14446 prompt="Would you like to add these packages to your world favorites?"
14447 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14448 prompt="Nothing to merge; would you like to auto-clean packages?"
14451 print "Nothing to merge; quitting."
14454 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14455 prompt="Would you like to fetch the source files for these packages?"
14457 prompt="Would you like to merge these packages?"
14459 if "--ask" in myopts and userquery(prompt) == "No":
14464 # Don't ask again (e.g. when auto-cleaning packages after merge)
14465 myopts.pop("--ask", None)
14467 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14468 if ("--resume" in myopts):
14469 mymergelist = mydepgraph.altlist()
14470 if len(mymergelist) == 0:
14471 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14473 favorites = mtimedb["resume"]["favorites"]
14474 retval = mydepgraph.display(
14475 mydepgraph.altlist(reversed=tree),
14476 favorites=favorites)
14477 mydepgraph.display_problems()
14478 if retval != os.EX_OK:
14481 retval = mydepgraph.display(
14482 mydepgraph.altlist(reversed=("--tree" in myopts)),
14483 favorites=favorites)
14484 mydepgraph.display_problems()
14485 if retval != os.EX_OK:
14487 if "--buildpkgonly" in myopts:
14488 graph_copy = mydepgraph.digraph.clone()
14489 removed_nodes = set()
14490 for node in list(graph_copy.order):
14491 if not isinstance(node, Package) or \
14492 node.operation == "nomerge":
14493 removed_nodes.add(node)
14494 graph_copy.difference_update(removed_nodes)
14495 if not graph_copy.hasallzeros(ignore_priority = \
14496 DepPrioritySatisfiedRange.ignore_medium):
14497 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14498 print "!!! You have to merge the dependencies before you can build this package.\n"
14501 if "--buildpkgonly" in myopts:
14502 graph_copy = mydepgraph.digraph.clone()
14503 removed_nodes = set()
14504 for node in list(graph_copy.order):
14505 if not isinstance(node, Package) or \
14506 node.operation == "nomerge":
14507 removed_nodes.add(node)
14508 graph_copy.difference_update(removed_nodes)
14509 if not graph_copy.hasallzeros(ignore_priority = \
14510 DepPrioritySatisfiedRange.ignore_medium):
14511 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14512 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14515 if ("--resume" in myopts):
14516 favorites=mtimedb["resume"]["favorites"]
14517 mymergelist = mydepgraph.altlist()
14518 mydepgraph.break_refs(mymergelist)
14519 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14520 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14521 del mydepgraph, mymergelist
14522 clear_caches(trees)
14524 retval = mergetask.merge()
14525 merge_count = mergetask.curval
14527 if "resume" in mtimedb and \
14528 "mergelist" in mtimedb["resume"] and \
14529 len(mtimedb["resume"]["mergelist"]) > 1:
14530 mtimedb["resume_backup"] = mtimedb["resume"]
14531 del mtimedb["resume"]
14533 mtimedb["resume"]={}
14534 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14535 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14536 # a list type for options.
14537 mtimedb["resume"]["myopts"] = myopts.copy()
14539 # Convert Atom instances to plain str.
14540 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14542 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14543 for pkgline in mydepgraph.altlist():
14544 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14545 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14546 tmpsettings = portage.config(clone=settings)
14548 if settings.get("PORTAGE_DEBUG", "") == "1":
14550 retval = portage.doebuild(
14551 y, "digest", settings["ROOT"], tmpsettings, edebug,
14552 ("--pretend" in myopts),
14553 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14556 pkglist = mydepgraph.altlist()
14557 mydepgraph.saveNomergeFavorites()
14558 mydepgraph.break_refs(pkglist)
14559 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14560 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14561 del mydepgraph, pkglist
14562 clear_caches(trees)
14564 retval = mergetask.merge()
14565 merge_count = mergetask.curval
14567 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14568 if "yes" == settings.get("AUTOCLEAN"):
14569 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14570 unmerge(trees[settings["ROOT"]]["root_config"],
14571 myopts, "clean", [],
14572 ldpath_mtimes, autoclean=1)
14574 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14575 + " AUTOCLEAN is disabled. This can cause serious"
14576 + " problems due to overlapping packages.\n")
14577 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14581 def multiple_actions(action1, action2):
14582 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14583 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14586 def insert_optional_args(args):
14588 Parse optional arguments and insert a value if one has
14589 not been provided. This is done before feeding the args
14590 to the optparse parser since that parser does not support
14591 this feature natively.
14595 jobs_opts = ("-j", "--jobs")
14596 arg_stack = args[:]
14597 arg_stack.reverse()
14599 arg = arg_stack.pop()
14601 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14602 if not (short_job_opt or arg in jobs_opts):
14603 new_args.append(arg)
14606 # Insert an empty placeholder in order to
14607 # satisfy the requirements of optparse.
14609 new_args.append("--jobs")
14612 if short_job_opt and len(arg) > 2:
14613 if arg[:2] == "-j":
14615 job_count = int(arg[2:])
14617 saved_opts = arg[2:]
14620 saved_opts = arg[1:].replace("j", "")
14622 if job_count is None and arg_stack:
14624 job_count = int(arg_stack[-1])
14628 # Discard the job count from the stack
14629 # since we're consuming it here.
14632 if job_count is None:
14633 # unlimited number of jobs
14634 new_args.append("True")
14636 new_args.append(str(job_count))
14638 if saved_opts is not None:
14639 new_args.append("-" + saved_opts)
14643 def parse_opts(tmpcmdline, silent=False):
14648 global actions, options, shortmapping
14650 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14651 argument_options = {
14653 "help":"specify the location for portage configuration files",
14657 "help":"enable or disable color output",
14659 "choices":("y", "n")
14664 "help" : "Specifies the number of packages to build " + \
14670 "--load-average": {
14672 "help" :"Specifies that no new builds should be started " + \
14673 "if there are other builds running and the load average " + \
14674 "is at least LOAD (a floating-point number).",
14680 "help":"include unnecessary build time dependencies",
14682 "choices":("y", "n")
14685 "help":"specify conditions to trigger package reinstallation",
14687 "choices":["changed-use"]
14691 from optparse import OptionParser
14692 parser = OptionParser()
14693 if parser.has_option("--help"):
14694 parser.remove_option("--help")
14696 for action_opt in actions:
14697 parser.add_option("--" + action_opt, action="store_true",
14698 dest=action_opt.replace("-", "_"), default=False)
14699 for myopt in options:
14700 parser.add_option(myopt, action="store_true",
14701 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14702 for shortopt, longopt in shortmapping.iteritems():
14703 parser.add_option("-" + shortopt, action="store_true",
14704 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14705 for myalias, myopt in longopt_aliases.iteritems():
14706 parser.add_option(myalias, action="store_true",
14707 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14709 for myopt, kwargs in argument_options.iteritems():
14710 parser.add_option(myopt,
14711 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14713 tmpcmdline = insert_optional_args(tmpcmdline)
14715 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14719 if myoptions.jobs == "True":
14723 jobs = int(myoptions.jobs)
14727 if jobs is not True and \
14731 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14732 (myoptions.jobs,), noiselevel=-1)
14734 myoptions.jobs = jobs
14736 if myoptions.load_average:
14738 load_average = float(myoptions.load_average)
14742 if load_average <= 0.0:
14743 load_average = None
14745 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14746 (myoptions.load_average,), noiselevel=-1)
14748 myoptions.load_average = load_average
14750 for myopt in options:
14751 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14753 myopts[myopt] = True
14755 for myopt in argument_options:
14756 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14760 for action_opt in actions:
14761 v = getattr(myoptions, action_opt.replace("-", "_"))
14764 multiple_actions(myaction, action_opt)
14766 myaction = action_opt
14770 return myaction, myopts, myfiles
14772 def validate_ebuild_environment(trees):
14773 for myroot in trees:
14774 settings = trees[myroot]["vartree"].settings
14775 settings.validate()
14777 def clear_caches(trees):
14778 for d in trees.itervalues():
14779 d["porttree"].dbapi.melt()
14780 d["porttree"].dbapi._aux_cache.clear()
14781 d["bintree"].dbapi._aux_cache.clear()
14782 d["bintree"].dbapi._clear_cache()
14783 d["vartree"].dbapi.linkmap._clear_cache()
14784 portage.dircache.clear()
14787 def load_emerge_config(trees=None):
14789 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14790 v = os.environ.get(envvar, None)
14791 if v and v.strip():
14793 trees = portage.create_trees(trees=trees, **kwargs)
14795 for root, root_trees in trees.iteritems():
14796 settings = root_trees["vartree"].settings
14797 setconfig = load_default_config(settings, root_trees)
14798 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14800 settings = trees["/"]["vartree"].settings
14802 for myroot in trees:
14804 settings = trees[myroot]["vartree"].settings
14807 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14808 mtimedb = portage.MtimeDB(mtimedbfile)
14810 return settings, trees, mtimedb
14812 def adjust_config(myopts, settings):
14813 """Make emerge specific adjustments to the config."""
14815 # To enhance usability, make some vars case insensitive by forcing them to
14817 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14818 if myvar in settings:
14819 settings[myvar] = settings[myvar].lower()
14820 settings.backup_changes(myvar)
14823 # Kill noauto as it will break merges otherwise.
14824 if "noauto" in settings.features:
14825 while "noauto" in settings.features:
14826 settings.features.remove("noauto")
14827 settings["FEATURES"] = " ".join(settings.features)
14828 settings.backup_changes("FEATURES")
14832 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14833 except ValueError, e:
14834 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14835 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14836 settings["CLEAN_DELAY"], noiselevel=-1)
14837 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14838 settings.backup_changes("CLEAN_DELAY")
14840 EMERGE_WARNING_DELAY = 10
14842 EMERGE_WARNING_DELAY = int(settings.get(
14843 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14844 except ValueError, e:
14845 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14846 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14847 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14848 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14849 settings.backup_changes("EMERGE_WARNING_DELAY")
14851 if "--quiet" in myopts:
14852 settings["PORTAGE_QUIET"]="1"
14853 settings.backup_changes("PORTAGE_QUIET")
14855 if "--verbose" in myopts:
14856 settings["PORTAGE_VERBOSE"] = "1"
14857 settings.backup_changes("PORTAGE_VERBOSE")
14859 # Set so that configs will be merged regardless of remembered status
14860 if ("--noconfmem" in myopts):
14861 settings["NOCONFMEM"]="1"
14862 settings.backup_changes("NOCONFMEM")
14864 # Set various debug markers... They should be merged somehow.
14867 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14868 if PORTAGE_DEBUG not in (0, 1):
14869 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14870 PORTAGE_DEBUG, noiselevel=-1)
14871 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14874 except ValueError, e:
14875 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14876 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14877 settings["PORTAGE_DEBUG"], noiselevel=-1)
14879 if "--debug" in myopts:
14881 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14882 settings.backup_changes("PORTAGE_DEBUG")
14884 if settings.get("NOCOLOR") not in ("yes","true"):
14885 portage.output.havecolor = 1
14887 """The explicit --color < y | n > option overrides the NOCOLOR environment
14888 variable and stdout auto-detection."""
14889 if "--color" in myopts:
14890 if "y" == myopts["--color"]:
14891 portage.output.havecolor = 1
14892 settings["NOCOLOR"] = "false"
14894 portage.output.havecolor = 0
14895 settings["NOCOLOR"] = "true"
14896 settings.backup_changes("NOCOLOR")
14897 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14898 portage.output.havecolor = 0
14899 settings["NOCOLOR"] = "true"
14900 settings.backup_changes("NOCOLOR")
14902 def apply_priorities(settings):
14906 def nice(settings):
14908 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14909 except (OSError, ValueError), e:
14910 out = portage.output.EOutput()
14911 out.eerror("Failed to change nice value to '%s'" % \
14912 settings["PORTAGE_NICENESS"])
14913 out.eerror("%s\n" % str(e))
14915 def ionice(settings):
14917 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14919 ionice_cmd = shlex.split(ionice_cmd)
14923 from portage.util import varexpand
14924 variables = {"PID" : str(os.getpid())}
14925 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14928 rval = portage.process.spawn(cmd, env=os.environ)
14929 except portage.exception.CommandNotFound:
14930 # The OS kernel probably doesn't support ionice,
14931 # so return silently.
14934 if rval != os.EX_OK:
14935 out = portage.output.EOutput()
14936 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14937 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14939 def display_missing_pkg_set(root_config, set_name):
14942 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14943 "The following sets exist:") % \
14944 colorize("INFORM", set_name))
14947 for s in sorted(root_config.sets):
14948 msg.append(" %s" % s)
14951 writemsg_level("".join("%s\n" % l for l in msg),
14952 level=logging.ERROR, noiselevel=-1)
14954 def expand_set_arguments(myfiles, myaction, root_config):
14956 setconfig = root_config.setconfig
14958 sets = setconfig.getSets()
14960 # In order to know exactly which atoms/sets should be added to the
14961 # world file, the depgraph performs set expansion later. It will get
14962 # confused about where the atoms came from if it's not allowed to
14963 # expand them itself.
14964 do_not_expand = (None, )
14967 if a in ("system", "world"):
14968 newargs.append(SETPREFIX+a)
14975 # separators for set arguments
14979 # WARNING: all operators must be of equal length
14981 DIFF_OPERATOR = "-@"
14982 UNION_OPERATOR = "+@"
14984 for i in range(0, len(myfiles)):
14985 if myfiles[i].startswith(SETPREFIX):
14988 x = myfiles[i][len(SETPREFIX):]
14991 start = x.find(ARG_START)
14992 end = x.find(ARG_END)
14993 if start > 0 and start < end:
14994 namepart = x[:start]
14995 argpart = x[start+1:end]
14997 # TODO: implement proper quoting
14998 args = argpart.split(",")
15002 k, v = a.split("=", 1)
15005 options[a] = "True"
15006 setconfig.update(namepart, options)
15007 newset += (x[:start-len(namepart)]+namepart)
15008 x = x[end+len(ARG_END):]
15012 myfiles[i] = SETPREFIX+newset
15014 sets = setconfig.getSets()
15016 # display errors that occured while loading the SetConfig instance
15017 for e in setconfig.errors:
15018 print colorize("BAD", "Error during set creation: %s" % e)
15020 # emerge relies on the existance of sets with names "world" and "system"
15021 required_sets = ("world", "system")
15024 for s in required_sets:
15026 missing_sets.append(s)
15028 if len(missing_sets) > 2:
15029 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15030 missing_sets_str += ', and "%s"' % missing_sets[-1]
15031 elif len(missing_sets) == 2:
15032 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15034 missing_sets_str = '"%s"' % missing_sets[-1]
15035 msg = ["emerge: incomplete set configuration, " + \
15036 "missing set(s): %s" % missing_sets_str]
15038 msg.append(" sets defined: %s" % ", ".join(sets))
15039 msg.append(" This usually means that '%s'" % \
15040 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15041 msg.append(" is missing or corrupt.")
15043 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15045 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15048 if a.startswith(SETPREFIX):
15049 # support simple set operations (intersection, difference and union)
15050 # on the commandline. Expressions are evaluated strictly left-to-right
15051 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15052 expression = a[len(SETPREFIX):]
15055 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15056 is_pos = expression.rfind(IS_OPERATOR)
15057 diff_pos = expression.rfind(DIFF_OPERATOR)
15058 union_pos = expression.rfind(UNION_OPERATOR)
15059 op_pos = max(is_pos, diff_pos, union_pos)
15060 s1 = expression[:op_pos]
15061 s2 = expression[op_pos+len(IS_OPERATOR):]
15062 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15064 display_missing_pkg_set(root_config, s2)
15066 expr_sets.insert(0, s2)
15067 expr_ops.insert(0, op)
15069 if not expression in sets:
15070 display_missing_pkg_set(root_config, expression)
15072 expr_sets.insert(0, expression)
15073 result = set(setconfig.getSetAtoms(expression))
15074 for i in range(0, len(expr_ops)):
15075 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15076 if expr_ops[i] == IS_OPERATOR:
15077 result.intersection_update(s2)
15078 elif expr_ops[i] == DIFF_OPERATOR:
15079 result.difference_update(s2)
15080 elif expr_ops[i] == UNION_OPERATOR:
15083 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15084 newargs.extend(result)
15086 s = a[len(SETPREFIX):]
15088 display_missing_pkg_set(root_config, s)
15090 setconfig.active.append(s)
15092 set_atoms = setconfig.getSetAtoms(s)
15093 except portage.exception.PackageSetNotFound, e:
15094 writemsg_level(("emerge: the given set '%s' " + \
15095 "contains a non-existent set named '%s'.\n") % \
15096 (s, e), level=logging.ERROR, noiselevel=-1)
15098 if myaction in unmerge_actions and \
15099 not sets[s].supportsOperation("unmerge"):
15100 sys.stderr.write("emerge: the given set '%s' does " % s + \
15101 "not support unmerge operations\n")
15103 elif not set_atoms:
15104 print "emerge: '%s' is an empty set" % s
15105 elif myaction not in do_not_expand:
15106 newargs.extend(set_atoms)
15108 newargs.append(SETPREFIX+s)
15109 for e in sets[s].errors:
15113 return (newargs, retval)
15115 def repo_name_check(trees):
15116 missing_repo_names = set()
15117 for root, root_trees in trees.iteritems():
15118 if "porttree" in root_trees:
15119 portdb = root_trees["porttree"].dbapi
15120 missing_repo_names.update(portdb.porttrees)
15121 repos = portdb.getRepositories()
15123 missing_repo_names.discard(portdb.getRepositoryPath(r))
15124 if portdb.porttree_root in missing_repo_names and \
15125 not os.path.exists(os.path.join(
15126 portdb.porttree_root, "profiles")):
15127 # This is normal if $PORTDIR happens to be empty,
15128 # so don't warn about it.
15129 missing_repo_names.remove(portdb.porttree_root)
15131 if missing_repo_names:
15133 msg.append("WARNING: One or more repositories " + \
15134 "have missing repo_name entries:")
15136 for p in missing_repo_names:
15137 msg.append("\t%s/profiles/repo_name" % (p,))
15139 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15140 "should be a plain text file containing a unique " + \
15141 "name for the repository on the first line.", 70))
15142 writemsg_level("".join("%s\n" % l for l in msg),
15143 level=logging.WARNING, noiselevel=-1)
15145 return bool(missing_repo_names)
15147 def config_protect_check(trees):
15148 for root, root_trees in trees.iteritems():
15149 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15150 msg = "!!! CONFIG_PROTECT is empty"
15152 msg += " for '%s'" % root
15153 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15155 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15157 if "--quiet" in myopts:
15158 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15159 print "!!! one of the following fully-qualified ebuild names instead:\n"
15160 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15161 print " " + colorize("INFORM", cp)
15164 s = search(root_config, spinner, "--searchdesc" in myopts,
15165 "--quiet" not in myopts, "--usepkg" in myopts,
15166 "--usepkgonly" in myopts)
15167 null_cp = portage.dep_getkey(insert_category_into_atom(
15169 cat, atom_pn = portage.catsplit(null_cp)
15170 s.searchkey = atom_pn
15171 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15174 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15175 print "!!! one of the above fully-qualified ebuild names instead.\n"
15177 def profile_check(trees, myaction, myopts):
15178 if myaction in ("info", "sync"):
15180 elif "--version" in myopts or "--help" in myopts:
15182 for root, root_trees in trees.iteritems():
15183 if root_trees["root_config"].settings.profiles:
15185 # generate some profile related warning messages
15186 validate_ebuild_environment(trees)
15187 msg = "If you have just changed your profile configuration, you " + \
15188 "should revert back to the previous configuration. Due to " + \
15189 "your current profile being invalid, allowed actions are " + \
15190 "limited to --help, --info, --sync, and --version."
15191 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15192 level=logging.ERROR, noiselevel=-1)
15197 global portage # NFC why this is necessary now - genone
15198 portage._disable_legacy_globals()
15199 # Disable color until we're sure that it should be enabled (after
15200 # EMERGE_DEFAULT_OPTS has been parsed).
15201 portage.output.havecolor = 0
15202 # This first pass is just for options that need to be known as early as
15203 # possible, such as --config-root. They will be parsed again later,
15204 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15205 # the value of --config-root).
15206 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15207 if "--debug" in myopts:
15208 os.environ["PORTAGE_DEBUG"] = "1"
15209 if "--config-root" in myopts:
15210 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15212 # Portage needs to ensure a sane umask for the files it creates.
15214 settings, trees, mtimedb = load_emerge_config()
15215 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15216 rval = profile_check(trees, myaction, myopts)
15217 if rval != os.EX_OK:
15220 if portage._global_updates(trees, mtimedb["updates"]):
15222 # Reload the whole config from scratch.
15223 settings, trees, mtimedb = load_emerge_config(trees=trees)
15224 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15226 xterm_titles = "notitles" not in settings.features
15229 if "--ignore-default-opts" not in myopts:
15230 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15231 tmpcmdline.extend(sys.argv[1:])
15232 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15234 if "--digest" in myopts:
15235 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15236 # Reload the whole config from scratch so that the portdbapi internal
15237 # config is updated with new FEATURES.
15238 settings, trees, mtimedb = load_emerge_config(trees=trees)
15239 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15241 for myroot in trees:
15242 mysettings = trees[myroot]["vartree"].settings
15243 mysettings.unlock()
15244 adjust_config(myopts, mysettings)
15245 if "--pretend" not in myopts:
15246 mysettings["PORTAGE_COUNTER_HASH"] = \
15247 trees[myroot]["vartree"].dbapi._counter_hash()
15248 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15250 del myroot, mysettings
15252 apply_priorities(settings)
15254 spinner = stdout_spinner()
15255 if "candy" in settings.features:
15256 spinner.update = spinner.update_scroll
15258 if "--quiet" not in myopts:
15259 portage.deprecated_profile_check(settings=settings)
15260 repo_name_check(trees)
15261 config_protect_check(trees)
15263 eclasses_overridden = {}
15264 for mytrees in trees.itervalues():
15265 mydb = mytrees["porttree"].dbapi
15266 # Freeze the portdbapi for performance (memoize all xmatch results).
15268 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15271 if eclasses_overridden and \
15272 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15273 prefix = bad(" * ")
15274 if len(eclasses_overridden) == 1:
15275 writemsg(prefix + "Overlay eclass overrides " + \
15276 "eclass from PORTDIR:\n", noiselevel=-1)
15278 writemsg(prefix + "Overlay eclasses override " + \
15279 "eclasses from PORTDIR:\n", noiselevel=-1)
15280 writemsg(prefix + "\n", noiselevel=-1)
15281 for eclass_name in sorted(eclasses_overridden):
15282 writemsg(prefix + " '%s/%s.eclass'\n" % \
15283 (eclasses_overridden[eclass_name], eclass_name),
15285 writemsg(prefix + "\n", noiselevel=-1)
15286 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15287 "because it will trigger invalidation of cached ebuild metadata " + \
15288 "that is distributed with the portage tree. If you must " + \
15289 "override eclasses from PORTDIR then you are advised to add " + \
15290 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15291 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15292 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15293 "you would like to disable this warning."
15294 from textwrap import wrap
15295 for line in wrap(msg, 72):
15296 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15298 if "moo" in myfiles:
15301 Larry loves Gentoo (""" + platform.system() + """)
15303 _______________________
15304 < Have you mooed today? >
15305 -----------------------
15315 ext = os.path.splitext(x)[1]
15316 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15317 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15320 root_config = trees[settings["ROOT"]]["root_config"]
15321 if myaction == "list-sets":
15322 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15326 # only expand sets for actions taking package arguments
15327 oldargs = myfiles[:]
15328 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15329 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15330 if retval != os.EX_OK:
15333 # Need to handle empty sets specially, otherwise emerge will react
15334 # with the help message for empty argument lists
15335 if oldargs and not myfiles:
15336 print "emerge: no targets left after set expansion"
15339 if ("--tree" in myopts) and ("--columns" in myopts):
15340 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15343 if ("--quiet" in myopts):
15344 spinner.update = spinner.update_quiet
15345 portage.util.noiselimit = -1
15347 # Always create packages if FEATURES=buildpkg
15348 # Imply --buildpkg if --buildpkgonly
15349 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15350 if "--buildpkg" not in myopts:
15351 myopts["--buildpkg"] = True
15353 # Also allow -S to invoke search action (-sS)
15354 if ("--searchdesc" in myopts):
15355 if myaction and myaction != "search":
15356 myfiles.append(myaction)
15357 if "--search" not in myopts:
15358 myopts["--search"] = True
15359 myaction = "search"
15361 # Always try and fetch binary packages if FEATURES=getbinpkg
15362 if ("getbinpkg" in settings.features):
15363 myopts["--getbinpkg"] = True
15365 if "--buildpkgonly" in myopts:
15366 # --buildpkgonly will not merge anything, so
15367 # it cancels all binary package options.
15368 for opt in ("--getbinpkg", "--getbinpkgonly",
15369 "--usepkg", "--usepkgonly"):
15370 myopts.pop(opt, None)
15372 if "--fetch-all-uri" in myopts:
15373 myopts["--fetchonly"] = True
15375 if "--skipfirst" in myopts and "--resume" not in myopts:
15376 myopts["--resume"] = True
15378 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15379 myopts["--usepkgonly"] = True
15381 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15382 myopts["--getbinpkg"] = True
15384 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15385 myopts["--usepkg"] = True
15387 # Also allow -K to apply --usepkg/-k
15388 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15389 myopts["--usepkg"] = True
15391 # Allow -p to remove --ask
15392 if ("--pretend" in myopts) and ("--ask" in myopts):
15393 print ">>> --pretend disables --ask... removing --ask from options."
15394 del myopts["--ask"]
15396 # forbid --ask when not in a terminal
15397 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15398 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15399 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15403 if settings.get("PORTAGE_DEBUG", "") == "1":
15404 spinner.update = spinner.update_quiet
15406 if "python-trace" in settings.features:
15407 import portage.debug
15408 portage.debug.set_trace(True)
15410 if not ("--quiet" in myopts):
15411 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15412 spinner.update = spinner.update_basic
15414 if "--version" in myopts:
15415 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15416 settings.profile_path, settings["CHOST"],
15417 trees[settings["ROOT"]]["vartree"].dbapi)
15419 elif "--help" in myopts:
15420 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15423 if "--debug" in myopts:
15424 print "myaction", myaction
15425 print "myopts", myopts
15427 if not myaction and not myfiles and "--resume" not in myopts:
15428 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15431 pretend = "--pretend" in myopts
15432 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15433 buildpkgonly = "--buildpkgonly" in myopts
15435 # check if root user is the current user for the actions where emerge needs this
15436 if portage.secpass < 2:
15437 # We've already allowed "--version" and "--help" above.
15438 if "--pretend" not in myopts and myaction not in ("search","info"):
15439 need_superuser = not \
15441 (buildpkgonly and secpass >= 1) or \
15442 myaction in ("metadata", "regen") or \
15443 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15444 if portage.secpass < 1 or \
15447 access_desc = "superuser"
15449 access_desc = "portage group"
15450 # Always show portage_group_warning() when only portage group
15451 # access is required but the user is not in the portage group.
15452 from portage.data import portage_group_warning
15453 if "--ask" in myopts:
15454 myopts["--pretend"] = True
15455 del myopts["--ask"]
15456 print ("%s access is required... " + \
15457 "adding --pretend to options.\n") % access_desc
15458 if portage.secpass < 1 and not need_superuser:
15459 portage_group_warning()
15461 sys.stderr.write(("emerge: %s access is " + \
15462 "required.\n\n") % access_desc)
15463 if portage.secpass < 1 and not need_superuser:
15464 portage_group_warning()
15467 disable_emergelog = False
15468 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15470 disable_emergelog = True
15472 if myaction in ("search", "info"):
15473 disable_emergelog = True
15474 if disable_emergelog:
15475 """ Disable emergelog for everything except build or unmerge
15476 operations. This helps minimize parallel emerge.log entries that can
15477 confuse log parsers. We especially want it disabled during
15478 parallel-fetch, which uses --resume --fetchonly."""
15480 def emergelog(*pargs, **kargs):
15483 if not "--pretend" in myopts:
15484 emergelog(xterm_titles, "Started emerge on: "+\
15485 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15488 myelogstr=" ".join(myopts)
15490 myelogstr+=" "+myaction
15492 myelogstr += " " + " ".join(oldargs)
15493 emergelog(xterm_titles, " *** emerge " + myelogstr)
15496 def emergeexitsig(signum, frame):
15497 signal.signal(signal.SIGINT, signal.SIG_IGN)
15498 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15499 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15500 sys.exit(100+signum)
15501 signal.signal(signal.SIGINT, emergeexitsig)
15502 signal.signal(signal.SIGTERM, emergeexitsig)
15505 """This gets out final log message in before we quit."""
15506 if "--pretend" not in myopts:
15507 emergelog(xterm_titles, " *** terminating.")
15508 if "notitles" not in settings.features:
15510 portage.atexit_register(emergeexit)
15512 if myaction in ("config", "metadata", "regen", "sync"):
15513 if "--pretend" in myopts:
15514 sys.stderr.write(("emerge: The '%s' action does " + \
15515 "not support '--pretend'.\n") % myaction)
15518 if "sync" == myaction:
15519 return action_sync(settings, trees, mtimedb, myopts, myaction)
15520 elif "metadata" == myaction:
15521 action_metadata(settings, portdb, myopts)
15522 elif myaction=="regen":
15523 validate_ebuild_environment(trees)
15524 return action_regen(settings, portdb, myopts.get("--jobs"),
15525 myopts.get("--load-average"))
15527 elif "config"==myaction:
15528 validate_ebuild_environment(trees)
15529 action_config(settings, trees, myopts, myfiles)
15532 elif "search"==myaction:
15533 validate_ebuild_environment(trees)
15534 action_search(trees[settings["ROOT"]]["root_config"],
15535 myopts, myfiles, spinner)
15536 elif myaction in ("clean", "unmerge") or \
15537 (myaction == "prune" and "--nodeps" in myopts):
15538 validate_ebuild_environment(trees)
15540 # Ensure atoms are valid before calling unmerge().
15541 # For backward compat, leading '=' is not required.
15543 if is_valid_package_atom(x) or \
15544 is_valid_package_atom("=" + x):
15547 msg.append("'%s' is not a valid package atom." % (x,))
15548 msg.append("Please check ebuild(5) for full details.")
15549 writemsg_level("".join("!!! %s\n" % line for line in msg),
15550 level=logging.ERROR, noiselevel=-1)
15553 # When given a list of atoms, unmerge
15554 # them in the order given.
15555 ordered = myaction == "unmerge"
15556 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15557 mtimedb["ldpath"], ordered=ordered):
15558 if not (buildpkgonly or fetchonly or pretend):
15559 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15561 elif myaction in ("depclean", "info", "prune"):
15563 # Ensure atoms are valid before calling unmerge().
15564 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15567 if is_valid_package_atom(x):
15569 valid_atoms.append(
15570 portage.dep_expand(x, mydb=vardb, settings=settings))
15571 except portage.exception.AmbiguousPackageName, e:
15572 msg = "The short ebuild name \"" + x + \
15573 "\" is ambiguous. Please specify " + \
15574 "one of the following " + \
15575 "fully-qualified ebuild names instead:"
15576 for line in textwrap.wrap(msg, 70):
15577 writemsg_level("!!! %s\n" % (line,),
15578 level=logging.ERROR, noiselevel=-1)
15580 writemsg_level(" %s\n" % colorize("INFORM", i),
15581 level=logging.ERROR, noiselevel=-1)
15582 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15586 msg.append("'%s' is not a valid package atom." % (x,))
15587 msg.append("Please check ebuild(5) for full details.")
15588 writemsg_level("".join("!!! %s\n" % line for line in msg),
15589 level=logging.ERROR, noiselevel=-1)
15592 if myaction == "info":
15593 return action_info(settings, trees, myopts, valid_atoms)
15595 validate_ebuild_environment(trees)
15596 action_depclean(settings, trees, mtimedb["ldpath"],
15597 myopts, myaction, valid_atoms, spinner)
15598 if not (buildpkgonly or fetchonly or pretend):
15599 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15600 # "update", "system", or just process files:
15602 validate_ebuild_environment(trees)
15603 if "--pretend" not in myopts:
15604 display_news_notification(root_config, myopts)
15605 retval = action_build(settings, trees, mtimedb,
15606 myopts, myaction, myfiles, spinner)
15607 root_config = trees[settings["ROOT"]]["root_config"]
15608 post_emerge(root_config, myopts, mtimedb, retval)