2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
61 import cPickle as pickle
66 from cStringIO import StringIO
68 from StringIO import StringIO
70 class stdout_spinner(object):
72 "Gentoo Rocks ("+platform.system()+")",
73 "Thank you for using Gentoo. :)",
74 "Are you actually trying to read this?",
75 "How many times have you stared at this?",
76 "We are generating the cache right now",
77 "You are paying too much attention.",
78 "A theory is better than its explanation.",
79 "Phasers locked on target, Captain.",
80 "Thrashing is just virtual crashing.",
81 "To be is to program.",
82 "Real Users hate Real Programmers.",
83 "When all else fails, read the instructions.",
84 "Functionality breeds Contempt.",
85 "The future lies ahead.",
86 "3.1415926535897932384626433832795028841971694",
87 "Sometimes insanity is the only alternative.",
88 "Inaccuracy saves a world of explanation.",
91 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
95 self.update = self.update_twirl
96 self.scroll_sequence = self.scroll_msgs[
97 int(time.time() * 100) % len(self.scroll_msgs)]
99 self.min_display_latency = 0.05
101 def _return_early(self):
103 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104 each update* method should return without doing any output when this
107 cur_time = time.time()
108 if cur_time - self.last_update < self.min_display_latency:
110 self.last_update = cur_time
113 def update_basic(self):
114 self.spinpos = (self.spinpos + 1) % 500
115 if self._return_early():
117 if (self.spinpos % 100) == 0:
118 if self.spinpos == 0:
119 sys.stdout.write(". ")
121 sys.stdout.write(".")
124 def update_scroll(self):
125 if self._return_early():
127 if(self.spinpos >= len(self.scroll_sequence)):
128 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135 def update_twirl(self):
136 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137 if self._return_early():
139 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142 def update_quiet(self):
145 def userquery(prompt, responses=None, colours=None):
146 """Displays a prompt and a set of responses, then waits for a response
147 which is checked against the responses and the first to match is
148 returned. An empty response will match the first value in responses. The
149 input buffer is *not* cleared prior to the prompt!
152 responses: a List of Strings.
153 colours: a List of Functions taking and returning a String, used to
154 process the responses for display. Typically these will be functions
155 like red() but could be e.g. lambda x: "DisplayString".
156 If responses is omitted, defaults to ["Yes", "No"], [green, red].
157 If only colours is omitted, defaults to [bold, ...].
159 Returns a member of the List responses. (If called without optional
160 arguments, returns "Yes" or "No".)
161 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163 if responses is None:
164 responses = ["Yes", "No"]
166 create_color_func("PROMPT_CHOICE_DEFAULT"),
167 create_color_func("PROMPT_CHOICE_OTHER")
169 elif colours is None:
171 colours=(colours*len(responses))[:len(responses)]
175 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176 for key in responses:
177 # An empty response will match the first value in responses.
178 if response.upper()==key[:len(response)].upper():
180 print "Sorry, response '%s' not understood." % response,
181 except (EOFError, KeyboardInterrupt):
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen", "search",
192 "--ask", "--alphabetical",
193 "--buildpkg", "--buildpkgonly",
194 "--changelog", "--columns",
199 "--fetchonly", "--fetch-all-uri",
200 "--getbinpkg", "--getbinpkgonly",
201 "--help", "--ignore-default-opts",
204 "--newuse", "--nocolor",
205 "--nodeps", "--noreplace",
206 "--nospinner", "--oneshot",
207 "--onlydeps", "--pretend",
208 "--quiet", "--resume",
209 "--searchdesc", "--selective",
213 "--usepkg", "--usepkgonly",
214 "--verbose", "--version"
220 "b":"--buildpkg", "B":"--buildpkgonly",
221 "c":"--clean", "C":"--unmerge",
222 "d":"--debug", "D":"--deep",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "k":"--usepkg", "K":"--usepkgonly",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps", "O":"--nodeps",
231 "p":"--pretend", "P":"--prune",
233 "s":"--search", "S":"--searchdesc",
236 "v":"--verbose", "V":"--version"
239 def emergelog(xterm_titles, mystr, short_msg=None):
240 if xterm_titles and short_msg:
241 if "HOSTNAME" in os.environ:
242 short_msg = os.environ["HOSTNAME"]+": "+short_msg
243 xtermTitle(short_msg)
245 file_path = "/var/log/emerge.log"
246 mylogfile = open(file_path, "a")
247 portage.util.apply_secpass_permissions(file_path,
248 uid=portage.portage_uid, gid=portage.portage_gid,
252 mylock = portage.locks.lockfile(mylogfile)
253 # seek because we may have gotten held up by the lock.
254 # if so, we may not be positioned at the end of the file.
256 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
260 portage.locks.unlockfile(mylock)
262 except (IOError,OSError,portage.exception.PortageException), e:
264 print >> sys.stderr, "emergelog():",e
266 def countdown(secs=5, doing="Starting"):
268 print ">>> Waiting",secs,"seconds before starting..."
269 print ">>> (Control-C to abort)...\n"+doing+" in: ",
273 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280 if isinstance(mysize, basestring):
282 if 0 != mysize % 1024:
283 # Always round up to the next kB so that it doesn't show 0 kB when
284 # some small file still needs to be fetched.
285 mysize += 1024 - mysize % 1024
286 mystr=str(mysize/1024)
290 mystr=mystr[:mycount]+","+mystr[mycount:]
294 def getgccversion(chost):
297 return: the current in-use gcc version
300 gcc_ver_command = 'gcc -dumpversion'
301 gcc_ver_prefix = 'gcc-'
303 gcc_not_found_error = red(
304 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305 "!!! to update the environment of this terminal and possibly\n" +
306 "!!! other terminals also.\n"
309 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313 mystatus, myoutput = commands.getstatusoutput(
314 chost + "-" + gcc_ver_command)
315 if mystatus == os.EX_OK:
316 return gcc_ver_prefix + myoutput
318 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319 if mystatus == os.EX_OK:
320 return gcc_ver_prefix + myoutput
322 portage.writemsg(gcc_not_found_error, noiselevel=-1)
323 return "[unavailable]"
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326 profilever = "unavailable"
328 realpath = os.path.realpath(profile)
329 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
330 if realpath.startswith(basepath):
331 profilever = realpath[1 + len(basepath):]
334 profilever = "!" + os.readlink(profile)
337 del realpath, basepath
340 libclist = vardb.match("virtual/libc")
341 libclist += vardb.match("virtual/glibc")
342 libclist = portage.util.unique_array(libclist)
344 xs=portage.catpkgsplit(x)
346 libcver+=","+"-".join(xs[1:])
348 libcver="-".join(xs[1:])
350 libcver="unavailable"
352 gccver = getgccversion(chost)
353 unameout=platform.release()+" "+platform.machine()
355 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357 def create_depgraph_params(myopts, myaction):
358 #configure emerge engine parameters
360 # self: include _this_ package regardless of if it is merged.
361 # selective: exclude the package if it is merged
362 # recurse: go into the dependencies
363 # deep: go into the dependencies of already merged packages
364 # empty: pretend nothing is merged
365 # complete: completely account for all known dependencies
366 # remove: build graph for use in removing packages
367 myparams = set(["recurse"])
369 if myaction == "remove":
370 myparams.add("remove")
371 myparams.add("complete")
374 if "--update" in myopts or \
375 "--newuse" in myopts or \
376 "--reinstall" in myopts or \
377 "--noreplace" in myopts:
378 myparams.add("selective")
379 if "--emptytree" in myopts:
380 myparams.add("empty")
381 myparams.discard("selective")
382 if "--nodeps" in myopts:
383 myparams.discard("recurse")
384 if "--deep" in myopts:
386 if "--complete-graph" in myopts:
387 myparams.add("complete")
390 # search functionality
391 class search(object):
402 def __init__(self, root_config, spinner, searchdesc,
403 verbose, usepkg, usepkgonly):
404 """Searches the available and installed packages for the supplied search key.
405 The list of available and installed packages is created at object instantiation.
406 This makes successive searches faster."""
407 self.settings = root_config.settings
408 self.vartree = root_config.trees["vartree"]
409 self.spinner = spinner
410 self.verbose = verbose
411 self.searchdesc = searchdesc
412 self.root_config = root_config
413 self.setconfig = root_config.setconfig
414 self.matches = {"pkg" : []}
419 self.portdb = fake_portdb
420 for attrib in ("aux_get", "cp_all",
421 "xmatch", "findname", "getFetchMap"):
422 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
426 portdb = root_config.trees["porttree"].dbapi
427 bindb = root_config.trees["bintree"].dbapi
428 vardb = root_config.trees["vartree"].dbapi
430 if not usepkgonly and portdb._have_root_eclass_dir:
431 self._dbs.append(portdb)
433 if (usepkg or usepkgonly) and bindb.cp_all():
434 self._dbs.append(bindb)
436 self._dbs.append(vardb)
437 self._portdb = portdb
442 cp_all.update(db.cp_all())
443 return list(sorted(cp_all))
445 def _aux_get(self, *args, **kwargs):
448 return db.aux_get(*args, **kwargs)
453 def _findname(self, *args, **kwargs):
455 if db is not self._portdb:
456 # We don't want findname to return anything
457 # unless it's an ebuild in a portage tree.
458 # Otherwise, it's already built and we don't
461 func = getattr(db, "findname", None)
463 value = func(*args, **kwargs)
468 def _getFetchMap(self, *args, **kwargs):
470 func = getattr(db, "getFetchMap", None)
472 value = func(*args, **kwargs)
477 def _visible(self, db, cpv, metadata):
478 installed = db is self.vartree.dbapi
479 built = installed or db is not self._portdb
482 pkg_type = "installed"
485 return visible(self.settings,
486 Package(type_name=pkg_type, root_config=self.root_config,
487 cpv=cpv, built=built, installed=installed, metadata=metadata))
489 def _xmatch(self, level, atom):
491 This method does not expand old-style virtuals because it
492 is restricted to returning matches for a single ${CATEGORY}/${PN}
493 and old-style virual matches unreliable for that when querying
494 multiple package databases. If necessary, old-style virtuals
495 can be performed on atoms prior to calling this method.
497 cp = portage.dep_getkey(atom)
498 if level == "match-all":
501 if hasattr(db, "xmatch"):
502 matches.update(db.xmatch(level, atom))
504 matches.update(db.match(atom))
505 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506 db._cpv_sort_ascending(result)
507 elif level == "match-visible":
510 if hasattr(db, "xmatch"):
511 matches.update(db.xmatch(level, atom))
513 db_keys = list(db._aux_cache_keys)
514 for cpv in db.match(atom):
515 metadata = izip(db_keys,
516 db.aux_get(cpv, db_keys))
517 if not self._visible(db, cpv, metadata):
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "bestmatch-visible":
525 if hasattr(db, "xmatch"):
526 cpv = db.xmatch("bestmatch-visible", atom)
527 if not cpv or portage.cpv_getkey(cpv) != cp:
529 if not result or cpv == portage.best([cpv, result]):
532 db_keys = Package.metadata_keys
533 # break out of this loop with highest visible
534 # match, checked in descending order
535 for cpv in reversed(db.match(atom)):
536 if portage.cpv_getkey(cpv) != cp:
538 metadata = izip(db_keys,
539 db.aux_get(cpv, db_keys))
540 if not self._visible(db, cpv, metadata):
542 if not result or cpv == portage.best([cpv, result]):
546 raise NotImplementedError(level)
549 def execute(self,searchkey):
550 """Performs the search for the supplied search key"""
552 self.searchkey=searchkey
553 self.packagematches = []
556 self.matches = {"pkg":[], "desc":[], "set":[]}
559 self.matches = {"pkg":[], "set":[]}
560 print "Searching... ",
563 if self.searchkey.startswith('%'):
565 self.searchkey = self.searchkey[1:]
566 if self.searchkey.startswith('@'):
568 self.searchkey = self.searchkey[1:]
570 self.searchre=re.compile(self.searchkey,re.I)
572 self.searchre=re.compile(re.escape(self.searchkey), re.I)
573 for package in self.portdb.cp_all():
574 self.spinner.update()
577 match_string = package[:]
579 match_string = package.split("/")[-1]
582 if self.searchre.search(match_string):
583 if not self.portdb.xmatch("match-visible", package):
585 self.matches["pkg"].append([package,masked])
586 elif self.searchdesc: # DESCRIPTION searching
587 full_package = self.portdb.xmatch("bestmatch-visible", package)
589 #no match found; we don't want to query description
590 full_package = portage.best(
591 self.portdb.xmatch("match-all", package))
597 full_desc = self.portdb.aux_get(
598 full_package, ["DESCRIPTION"])[0]
600 print "emerge: search: aux_get() failed, skipping"
602 if self.searchre.search(full_desc):
603 self.matches["desc"].append([full_package,masked])
605 self.sdict = self.setconfig.getSets()
606 for setname in self.sdict:
607 self.spinner.update()
609 match_string = setname
611 match_string = setname.split("/")[-1]
613 if self.searchre.search(match_string):
614 self.matches["set"].append([setname, False])
615 elif self.searchdesc:
616 if self.searchre.search(
617 self.sdict[setname].getMetadata("DESCRIPTION")):
618 self.matches["set"].append([setname, False])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
656 print green("*")+" "+white(match)
657 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
661 desc, homepage, license = self.portdb.aux_get(
662 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664 print "emerge: search: aux_get() failed, skipping"
667 print green("*")+" "+white(match)+" "+red("[ Masked ]")
669 print green("*")+" "+white(match)
670 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
674 mycat = match.split("/")[0]
675 mypkg = match.split("/")[1]
676 mycpv = match + "-" + myversion
677 myebuild = self.portdb.findname(mycpv)
679 pkgdir = os.path.dirname(myebuild)
680 from portage import manifest
681 mf = manifest.Manifest(
682 pkgdir, self.settings["DISTDIR"])
684 uri_map = self.portdb.getFetchMap(mycpv)
685 except portage.exception.InvalidDependString, e:
686 file_size_str = "Unknown (%s)" % (e,)
690 mysum[0] = mf.getDistfilesSize(uri_map)
692 file_size_str = "Unknown (missing " + \
693 "digest for %s)" % (e,)
698 if db is not vardb and \
699 db.cpv_exists(mycpv):
701 if not myebuild and hasattr(db, "bintree"):
702 myebuild = db.bintree.getname(mycpv)
704 mysum[0] = os.stat(myebuild).st_size
709 if myebuild and file_size_str is None:
710 mystr = str(mysum[0] / 1024)
714 mystr = mystr[:mycount] + "," + mystr[mycount:]
715 file_size_str = mystr + " kB"
719 print " ", darkgreen("Latest version available:"),myversion
720 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
723 (darkgreen("Size of files:"), file_size_str)
724 print " ", darkgreen("Homepage:")+" ",homepage
725 print " ", darkgreen("Description:")+" ",desc
726 print " ", darkgreen("License:")+" ",license
731 def getInstallationStatus(self,package):
732 installed_package = self.vartree.dep_bestmatch(package)
734 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736 result = darkgreen("Latest version installed:")+" "+version
738 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741 def getVersion(self,full_package,detail):
742 if len(full_package) > 1:
743 package_parts = portage.catpkgsplit(full_package)
744 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745 result = package_parts[2]+ "-" + package_parts[3]
747 result = package_parts[2]
752 class RootConfig(object):
753 """This is used internally by depgraph to track information about a
757 "ebuild" : "porttree",
758 "binary" : "bintree",
759 "installed" : "vartree"
763 for k, v in pkg_tree_map.iteritems():
766 def __init__(self, settings, trees, setconfig):
768 self.settings = settings
769 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770 self.root = self.settings["ROOT"]
771 self.setconfig = setconfig
772 self.sets = self.setconfig.getSets()
773 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775 def create_world_atom(pkg, args_set, root_config):
776 """Create a new atom for the world file if one does not exist. If the
777 argument atom is precise enough to identify a specific slot then a slot
778 atom will be returned. Atoms that are in the system set may also be stored
779 in world since system atoms can only match one slot while world atoms can
780 be greedy with respect to slots. Unslotted system packages will not be
783 arg_atom = args_set.findAtomForPackage(pkg)
786 cp = portage.dep_getkey(arg_atom)
788 sets = root_config.sets
789 portdb = root_config.trees["porttree"].dbapi
790 vardb = root_config.trees["vartree"].dbapi
791 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792 for cpv in portdb.match(cp))
793 slotted = len(available_slots) > 1 or \
794 (len(available_slots) == 1 and "0" not in available_slots)
796 # check the vdb in case this is multislot
797 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798 for cpv in vardb.match(cp))
799 slotted = len(available_slots) > 1 or \
800 (len(available_slots) == 1 and "0" not in available_slots)
801 if slotted and arg_atom != cp:
802 # If the user gave a specific atom, store it as a
803 # slot atom in the world file.
804 slot_atom = pkg.slot_atom
806 # For USE=multislot, there are a couple of cases to
809 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810 # unknown value, so just record an unslotted atom.
812 # 2) SLOT comes from an installed package and there is no
813 # matching SLOT in the portage tree.
815 # Make sure that the slot atom is available in either the
816 # portdb or the vardb, since otherwise the user certainly
817 # doesn't want the SLOT atom recorded in the world file
818 # (case 1 above). If it's only available in the vardb,
819 # the user may be trying to prevent a USE=multislot
820 # package from being removed by --depclean (case 2 above).
823 if not portdb.match(slot_atom):
824 # SLOT seems to come from an installed multislot package
826 # If there is no installed package matching the SLOT atom,
827 # it probably changed SLOT spontaneously due to USE=multislot,
828 # so just record an unslotted atom.
829 if vardb.match(slot_atom):
830 # Now verify that the argument is precise
831 # enough to identify a specific slot.
832 matches = mydb.match(arg_atom)
833 matched_slots = set()
835 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836 if len(matched_slots) == 1:
837 new_world_atom = slot_atom
839 if new_world_atom == sets["world"].findAtomForPackage(pkg):
840 # Both atoms would be identical, so there's nothing to add.
843 # Unlike world atoms, system atoms are not greedy for slots, so they
844 # can't be safely excluded from world if they are slotted.
845 system_atom = sets["system"].findAtomForPackage(pkg)
847 if not portage.dep_getkey(system_atom).startswith("virtual/"):
849 # System virtuals aren't safe to exclude from world since they can
850 # match multiple old-style virtuals but only one of them will be
851 # pulled in by update or depclean.
852 providers = portdb.mysettings.getvirtuals().get(
853 portage.dep_getkey(system_atom))
854 if providers and len(providers) == 1 and providers[0] == cp:
856 return new_world_atom
858 def filter_iuse_defaults(iuse):
860 if flag.startswith("+") or flag.startswith("-"):
865 class SlotObject(object):
866 __slots__ = ("__weakref__",)
868 def __init__(self, **kwargs):
869 classes = [self.__class__]
874 classes.extend(c.__bases__)
875 slots = getattr(c, "__slots__", None)
879 myvalue = kwargs.get(myattr, None)
880 setattr(self, myattr, myvalue)
884 Create a new instance and copy all attributes
885 defined from __slots__ (including those from
888 obj = self.__class__()
890 classes = [self.__class__]
895 classes.extend(c.__bases__)
896 slots = getattr(c, "__slots__", None)
900 setattr(obj, myattr, getattr(self, myattr))
904 class AbstractDepPriority(SlotObject):
905 __slots__ = ("buildtime", "runtime", "runtime_post")
907 def __lt__(self, other):
908 return self.__int__() < other
910 def __le__(self, other):
911 return self.__int__() <= other
913 def __eq__(self, other):
914 return self.__int__() == other
916 def __ne__(self, other):
917 return self.__int__() != other
919 def __gt__(self, other):
920 return self.__int__() > other
922 def __ge__(self, other):
923 return self.__int__() >= other
927 return copy.copy(self)
929 class DepPriority(AbstractDepPriority):
931 __slots__ = ("satisfied", "optional", "rebuild")
943 if self.runtime_post:
944 return "runtime_post"
947 class BlockerDepPriority(DepPriority):
955 BlockerDepPriority.instance = BlockerDepPriority()
957 class UnmergeDepPriority(AbstractDepPriority):
958 __slots__ = ("optional", "satisfied",)
960 Combination of properties Priority Category
965 (none of the above) -2 SOFT
975 if self.runtime_post:
982 myvalue = self.__int__()
983 if myvalue > self.SOFT:
987 class DepPriorityNormalRange(object):
989 DepPriority properties Index Category
993 runtime_post 2 MEDIUM_SOFT
995 (none of the above) 0 NONE
1003 def _ignore_optional(cls, priority):
1004 if priority.__class__ is not DepPriority:
1006 return bool(priority.optional)
1009 def _ignore_runtime_post(cls, priority):
1010 if priority.__class__ is not DepPriority:
1012 return bool(priority.optional or priority.runtime_post)
1015 def _ignore_runtime(cls, priority):
1016 if priority.__class__ is not DepPriority:
1018 return not priority.buildtime
1020 ignore_medium = _ignore_runtime
1021 ignore_medium_soft = _ignore_runtime_post
1022 ignore_soft = _ignore_optional
1024 DepPriorityNormalRange.ignore_priority = (
1026 DepPriorityNormalRange._ignore_optional,
1027 DepPriorityNormalRange._ignore_runtime_post,
1028 DepPriorityNormalRange._ignore_runtime
1031 class DepPrioritySatisfiedRange(object):
1033 DepPriority Index Category
1035 not satisfied and buildtime HARD
1036 not satisfied and runtime 7 MEDIUM
1037 not satisfied and runtime_post 6 MEDIUM_SOFT
1038 satisfied and buildtime and rebuild 5 SOFT
1039 satisfied and buildtime 4 SOFT
1040 satisfied and runtime 3 SOFT
1041 satisfied and runtime_post 2 SOFT
1043 (none of the above) 0 NONE
1051 def _ignore_optional(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional)
1057 def _ignore_satisfied_runtime_post(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 if priority.optional:
1062 if not priority.satisfied:
1064 return bool(priority.runtime_post)
1067 def _ignore_satisfied_runtime(cls, priority):
1068 if priority.__class__ is not DepPriority:
1070 if priority.optional:
1072 if not priority.satisfied:
1074 return not priority.buildtime
1077 def _ignore_satisfied_buildtime(cls, priority):
1078 if priority.__class__ is not DepPriority:
1080 if priority.optional:
1082 if not priority.satisfied:
1084 if priority.buildtime:
1085 return not priority.rebuild
1089 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090 if priority.__class__ is not DepPriority:
1092 if priority.optional:
1094 return bool(priority.satisfied)
1097 def _ignore_runtime_post(cls, priority):
1098 if priority.__class__ is not DepPriority:
1100 return bool(priority.optional or \
1101 priority.satisfied or \
1102 priority.runtime_post)
1105 def _ignore_runtime(cls, priority):
1106 if priority.__class__ is not DepPriority:
1108 return bool(priority.satisfied or \
1109 not priority.buildtime)
1111 ignore_medium = _ignore_runtime
1112 ignore_medium_soft = _ignore_runtime_post
1113 ignore_soft = _ignore_satisfied_buildtime_rebuild
1115 DepPrioritySatisfiedRange.ignore_priority = (
1117 DepPrioritySatisfiedRange._ignore_optional,
1118 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122 DepPrioritySatisfiedRange._ignore_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_runtime
1126 def _find_deep_system_runtime_deps(graph):
1127 deep_system_deps = set()
1130 if not isinstance(node, Package) or \
1131 node.operation == 'uninstall':
1133 if node.root_config.sets['system'].findAtomForPackage(node):
1134 node_stack.append(node)
1136 def ignore_priority(priority):
1138 Ignore non-runtime priorities.
1140 if isinstance(priority, DepPriority) and \
1141 (priority.runtime or priority.runtime_post):
1146 node = node_stack.pop()
1147 if node in deep_system_deps:
1149 deep_system_deps.add(node)
1150 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151 if not isinstance(child, Package) or \
1152 child.operation == 'uninstall':
1154 node_stack.append(child)
1156 return deep_system_deps
1158 class FakeVartree(portage.vartree):
1159 """This is implements an in-memory copy of a vartree instance that provides
1160 all the interfaces required for use by the depgraph. The vardb is locked
1161 during the constructor call just long enough to read a copy of the
1162 installed package information. This allows the depgraph to do it's
1163 dependency calculations without holding a lock on the vardb. It also
1164 allows things like vardb global updates to be done in memory so that the
1165 user doesn't necessarily need write access to the vardb in cases where
1166 global updates are necessary (updates are performed when necessary if there
1167 is not a matching ebuild in the tree)."""
1168 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169 self._root_config = root_config
1170 if pkg_cache is None:
1172 real_vartree = root_config.trees["vartree"]
1173 portdb = root_config.trees["porttree"].dbapi
1174 self.root = real_vartree.root
1175 self.settings = real_vartree.settings
1176 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177 if "_mtime_" not in mykeys:
1178 mykeys.append("_mtime_")
1179 self._db_keys = mykeys
1180 self._pkg_cache = pkg_cache
1181 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184 # At least the parent needs to exist for the lock file.
1185 portage.util.ensure_dirs(vdb_path)
1186 except portage.exception.PortageException:
1190 if acquire_lock and os.access(vdb_path, os.W_OK):
1191 vdb_lock = portage.locks.lockdir(vdb_path)
1192 real_dbapi = real_vartree.dbapi
1194 for cpv in real_dbapi.cpv_all():
1195 cache_key = ("installed", self.root, cpv, "nomerge")
1196 pkg = self._pkg_cache.get(cache_key)
1198 metadata = pkg.metadata
1200 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201 myslot = metadata["SLOT"]
1202 mycp = portage.dep_getkey(cpv)
1203 myslot_atom = "%s:%s" % (mycp, myslot)
1205 mycounter = long(metadata["COUNTER"])
1208 metadata["COUNTER"] = str(mycounter)
1209 other_counter = slot_counters.get(myslot_atom, None)
1210 if other_counter is not None:
1211 if other_counter > mycounter:
1213 slot_counters[myslot_atom] = mycounter
1215 pkg = Package(built=True, cpv=cpv,
1216 installed=True, metadata=metadata,
1217 root_config=root_config, type_name="installed")
1218 self._pkg_cache[pkg] = pkg
1219 self.dbapi.cpv_inject(pkg)
1220 real_dbapi.flush_cache()
1223 portage.locks.unlockdir(vdb_lock)
1224 # Populate the old-style virtuals using the cached values.
1225 if not self.settings.treeVirtuals:
1226 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227 portage.getCPFromCPV, self.get_all_provides())
1229 # Intialize variables needed for lazy cache pulls of the live ebuild
1230 # metadata. This ensures that the vardb lock is released ASAP, without
1231 # being delayed in case cache generation is triggered.
1232 self._aux_get = self.dbapi.aux_get
1233 self.dbapi.aux_get = self._aux_get_wrapper
1234 self._match = self.dbapi.match
1235 self.dbapi.match = self._match_wrapper
1236 self._aux_get_history = set()
1237 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238 self._portdb = portdb
1239 self._global_updates = None
1241 def _match_wrapper(self, cpv, use_cache=1):
1243 Make sure the metadata in Package instances gets updated for any
1244 cpv that is returned from a match() call, since the metadata can
1245 be accessed directly from the Package instance instead of via
1248 matches = self._match(cpv, use_cache=use_cache)
1250 if cpv in self._aux_get_history:
1252 self._aux_get_wrapper(cpv, [])
1255 def _aux_get_wrapper(self, pkg, wants):
1256 if pkg in self._aux_get_history:
1257 return self._aux_get(pkg, wants)
1258 self._aux_get_history.add(pkg)
1260 # Use the live ebuild metadata if possible.
1261 live_metadata = dict(izip(self._portdb_keys,
1262 self._portdb.aux_get(pkg, self._portdb_keys)))
1263 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265 self.dbapi.aux_update(pkg, live_metadata)
1266 except (KeyError, portage.exception.PortageException):
1267 if self._global_updates is None:
1268 self._global_updates = \
1269 grab_global_updates(self._portdb.porttree_root)
1270 perform_global_updates(
1271 pkg, self.dbapi, self._global_updates)
1272 return self._aux_get(pkg, wants)
1274 def sync(self, acquire_lock=1):
1276 Call this method to synchronize state with the real vardb
1277 after one or more packages may have been installed or
1280 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282 # At least the parent needs to exist for the lock file.
1283 portage.util.ensure_dirs(vdb_path)
1284 except portage.exception.PortageException:
1288 if acquire_lock and os.access(vdb_path, os.W_OK):
1289 vdb_lock = portage.locks.lockdir(vdb_path)
1293 portage.locks.unlockdir(vdb_lock)
1297 real_vardb = self._root_config.trees["vartree"].dbapi
1298 current_cpv_set = frozenset(real_vardb.cpv_all())
1299 pkg_vardb = self.dbapi
1300 aux_get_history = self._aux_get_history
1302 # Remove any packages that have been uninstalled.
1303 for pkg in list(pkg_vardb):
1304 if pkg.cpv not in current_cpv_set:
1305 pkg_vardb.cpv_remove(pkg)
1306 aux_get_history.discard(pkg.cpv)
1308 # Validate counters and timestamps.
1311 validation_keys = ["COUNTER", "_mtime_"]
1312 for cpv in current_cpv_set:
1314 pkg_hash_key = ("installed", root, cpv, "nomerge")
1315 pkg = pkg_vardb.get(pkg_hash_key)
1317 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319 counter = long(counter)
1323 if counter != pkg.counter or \
1325 pkg_vardb.cpv_remove(pkg)
1326 aux_get_history.discard(pkg.cpv)
1330 pkg = self._pkg(cpv)
1332 other_counter = slot_counters.get(pkg.slot_atom)
1333 if other_counter is not None:
1334 if other_counter > pkg.counter:
1337 slot_counters[pkg.slot_atom] = pkg.counter
1338 pkg_vardb.cpv_inject(pkg)
1340 real_vardb.flush_cache()
1342 def _pkg(self, cpv):
1343 root_config = self._root_config
1344 real_vardb = root_config.trees["vartree"].dbapi
1345 pkg = Package(cpv=cpv, installed=True,
1346 metadata=izip(self._db_keys,
1347 real_vardb.aux_get(cpv, self._db_keys)),
1348 root_config=root_config,
1349 type_name="installed")
1352 mycounter = long(pkg.metadata["COUNTER"])
1355 pkg.metadata["COUNTER"] = str(mycounter)
1359 def grab_global_updates(portdir):
1360 from portage.update import grab_updates, parse_updates
1361 updpath = os.path.join(portdir, "profiles", "updates")
1363 rawupdates = grab_updates(updpath)
1364 except portage.exception.DirectoryNotFound:
1367 for mykey, mystat, mycontent in rawupdates:
1368 commands, errors = parse_updates(mycontent)
1369 upd_commands.extend(commands)
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373 from portage.update import update_dbentries
1374 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376 updates = update_dbentries(mycommands, aux_dict)
1378 mydb.aux_update(mycpv, updates)
1380 def visible(pkgsettings, pkg):
1382 Check if a package is visible. This can raise an InvalidDependString
1383 exception if LICENSE is invalid.
1384 TODO: optionally generate a list of masking reasons
1386 @returns: True if the package is visible, False otherwise.
1388 if not pkg.metadata["SLOT"]:
1390 if not pkg.installed:
1391 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1393 eapi = pkg.metadata["EAPI"]
1394 if not portage.eapi_is_supported(eapi):
1396 if not pkg.installed:
1397 if portage._eapi_is_deprecated(eapi):
1399 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408 except portage.exception.InvalidDependString:
1412 def get_masking_status(pkg, pkgsettings, root_config):
1414 mreasons = portage.getmaskingstatus(
1415 pkg, settings=pkgsettings,
1416 portdb=root_config.trees["porttree"].dbapi)
1418 if not pkg.installed:
1419 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420 mreasons.append("CHOST: %s" % \
1421 pkg.metadata["CHOST"])
1423 if not pkg.metadata["SLOT"]:
1424 mreasons.append("invalid: SLOT is undefined")
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429 db, pkg_type, built, installed, db_keys):
1432 metadata = dict(izip(db_keys,
1433 db.aux_get(cpv, db_keys)))
1436 if metadata and not built:
1437 pkgsettings.setcpv(cpv, mydb=metadata)
1438 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440 if metadata is None:
1441 mreasons = ["corruption"]
1443 pkg = Package(type_name=pkg_type, root_config=root_config,
1444 cpv=cpv, built=built, installed=installed, metadata=metadata)
1445 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446 return metadata, mreasons
1448 def show_masked_packages(masked_packages):
1449 shown_licenses = set()
1450 shown_comments = set()
1451 # Maybe there is both an ebuild and a binary. Only
1452 # show one of them to avoid redundant appearance.
1454 have_eapi_mask = False
1455 for (root_config, pkgsettings, cpv,
1456 metadata, mreasons) in masked_packages:
1457 if cpv in shown_cpvs:
1460 comment, filename = None, None
1461 if "package.mask" in mreasons:
1462 comment, filename = \
1463 portage.getmaskingreason(
1464 cpv, metadata=metadata,
1465 settings=pkgsettings,
1466 portdb=root_config.trees["porttree"].dbapi,
1467 return_location=True)
1468 missing_licenses = []
1470 if not portage.eapi_is_supported(metadata["EAPI"]):
1471 have_eapi_mask = True
1473 missing_licenses = \
1474 pkgsettings._getMissingLicenses(
1476 except portage.exception.InvalidDependString:
1477 # This will have already been reported
1478 # above via mreasons.
1481 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482 if comment and comment not in shown_comments:
1485 shown_comments.add(comment)
1486 portdb = root_config.trees["porttree"].dbapi
1487 for l in missing_licenses:
1488 l_path = portdb.findLicensePath(l)
1489 if l in shown_licenses:
1491 msg = ("A copy of the '%s' license" + \
1492 " is located at '%s'.") % (l, l_path)
1495 shown_licenses.add(l)
1496 return have_eapi_mask
1498 class Task(SlotObject):
1499 __slots__ = ("_hash_key", "_hash_value")
1501 def _get_hash_key(self):
1502 hash_key = getattr(self, "_hash_key", None)
1503 if hash_key is None:
1504 raise NotImplementedError(self)
1507 def __eq__(self, other):
1508 return self._get_hash_key() == other
1510 def __ne__(self, other):
1511 return self._get_hash_key() != other
1514 hash_value = getattr(self, "_hash_value", None)
1515 if hash_value is None:
1516 self._hash_value = hash(self._get_hash_key())
1517 return self._hash_value
1520 return len(self._get_hash_key())
1522 def __getitem__(self, key):
1523 return self._get_hash_key()[key]
1526 return iter(self._get_hash_key())
1528 def __contains__(self, key):
1529 return key in self._get_hash_key()
1532 return str(self._get_hash_key())
1534 class Blocker(Task):
1536 __hash__ = Task.__hash__
1537 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1539 def __init__(self, **kwargs):
1540 Task.__init__(self, **kwargs)
1541 self.cp = portage.dep_getkey(self.atom)
1543 def _get_hash_key(self):
1544 hash_key = getattr(self, "_hash_key", None)
1545 if hash_key is None:
1547 ("blocks", self.root, self.atom, self.eapi)
1548 return self._hash_key
1550 class Package(Task):
1552 __hash__ = Task.__hash__
1553 __slots__ = ("built", "cpv", "depth",
1554 "installed", "metadata", "onlydeps", "operation",
1555 "root_config", "type_name",
1556 "category", "counter", "cp", "cpv_split",
1557 "inherited", "iuse", "mtime",
1558 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1561 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562 "INHERITED", "IUSE", "KEYWORDS",
1563 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1566 def __init__(self, **kwargs):
1567 Task.__init__(self, **kwargs)
1568 self.root = self.root_config.root
1569 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570 self.cp = portage.cpv_getkey(self.cpv)
1571 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1572 self.category, self.pf = portage.catsplit(self.cpv)
1573 self.cpv_split = portage.catpkgsplit(self.cpv)
1574 self.pv_split = self.cpv_split[1:]
1578 __slots__ = ("__weakref__", "enabled")
1580 def __init__(self, use):
1581 self.enabled = frozenset(use)
1583 class _iuse(object):
1585 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1587 def __init__(self, tokens, iuse_implicit):
1588 self.tokens = tuple(tokens)
1589 self.iuse_implicit = iuse_implicit
1596 enabled.append(x[1:])
1598 disabled.append(x[1:])
1601 self.enabled = frozenset(enabled)
1602 self.disabled = frozenset(disabled)
1603 self.all = frozenset(chain(enabled, disabled, other))
1605 def __getattribute__(self, name):
1608 return object.__getattribute__(self, "regex")
1609 except AttributeError:
1610 all = object.__getattribute__(self, "all")
1611 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1612 # Escape anything except ".*" which is supposed
1613 # to pass through from _get_implicit_iuse()
1614 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1615 regex = "^(%s)$" % "|".join(regex)
1616 regex = regex.replace("\\.\\*", ".*")
1617 self.regex = re.compile(regex)
1618 return object.__getattribute__(self, name)
1620 def _get_hash_key(self):
1621 hash_key = getattr(self, "_hash_key", None)
1622 if hash_key is None:
1623 if self.operation is None:
1624 self.operation = "merge"
1625 if self.onlydeps or self.installed:
1626 self.operation = "nomerge"
1628 (self.type_name, self.root, self.cpv, self.operation)
1629 return self._hash_key
1631 def __lt__(self, other):
1632 if other.cp != self.cp:
1634 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1638 def __le__(self, other):
1639 if other.cp != self.cp:
1641 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1645 def __gt__(self, other):
1646 if other.cp != self.cp:
1648 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1652 def __ge__(self, other):
1653 if other.cp != self.cp:
1655 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1659 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1660 if not x.startswith("UNUSED_"))
1661 _all_metadata_keys.discard("CDEPEND")
1662 _all_metadata_keys.update(Package.metadata_keys)
1664 from portage.cache.mappings import slot_dict_class
1665 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1667 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1669 Detect metadata updates and synchronize Package attributes.
1672 __slots__ = ("_pkg",)
1673 _wrapped_keys = frozenset(
1674 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1676 def __init__(self, pkg, metadata):
1677 _PackageMetadataWrapperBase.__init__(self)
1679 self.update(metadata)
1681 def __setitem__(self, k, v):
1682 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1683 if k in self._wrapped_keys:
1684 getattr(self, "_set_" + k.lower())(k, v)
1686 def _set_inherited(self, k, v):
1687 if isinstance(v, basestring):
1688 v = frozenset(v.split())
1689 self._pkg.inherited = v
1691 def _set_iuse(self, k, v):
1692 self._pkg.iuse = self._pkg._iuse(
1693 v.split(), self._pkg.root_config.iuse_implicit)
1695 def _set_slot(self, k, v):
1698 def _set_use(self, k, v):
1699 self._pkg.use = self._pkg._use(v.split())
1701 def _set_counter(self, k, v):
1702 if isinstance(v, basestring):
1707 self._pkg.counter = v
1709 def _set__mtime_(self, k, v):
1710 if isinstance(v, basestring):
1717 class EbuildFetchonly(SlotObject):
1719 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1722 settings = self.settings
1724 portdb = pkg.root_config.trees["porttree"].dbapi
1725 ebuild_path = portdb.findname(pkg.cpv)
1726 settings.setcpv(pkg)
1727 debug = settings.get("PORTAGE_DEBUG") == "1"
1728 use_cache = 1 # always true
1729 portage.doebuild_environment(ebuild_path, "fetch",
1730 settings["ROOT"], settings, debug, use_cache, portdb)
1731 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1734 rval = self._execute_with_builddir()
1736 rval = portage.doebuild(ebuild_path, "fetch",
1737 settings["ROOT"], settings, debug=debug,
1738 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1739 mydbapi=portdb, tree="porttree")
1741 if rval != os.EX_OK:
1742 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1743 eerror(msg, phase="unpack", key=pkg.cpv)
1747 def _execute_with_builddir(self):
1748 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1749 # ensuring sane $PWD (bug #239560) and storing elog
1750 # messages. Use a private temp directory, in order
1751 # to avoid locking the main one.
1752 settings = self.settings
1753 global_tmpdir = settings["PORTAGE_TMPDIR"]
1754 from tempfile import mkdtemp
1756 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1758 if e.errno != portage.exception.PermissionDenied.errno:
1760 raise portage.exception.PermissionDenied(global_tmpdir)
1761 settings["PORTAGE_TMPDIR"] = private_tmpdir
1762 settings.backup_changes("PORTAGE_TMPDIR")
1764 retval = self._execute()
1766 settings["PORTAGE_TMPDIR"] = global_tmpdir
1767 settings.backup_changes("PORTAGE_TMPDIR")
1768 shutil.rmtree(private_tmpdir)
1772 settings = self.settings
1774 root_config = pkg.root_config
1775 portdb = root_config.trees["porttree"].dbapi
1776 ebuild_path = portdb.findname(pkg.cpv)
1777 debug = settings.get("PORTAGE_DEBUG") == "1"
1778 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1780 retval = portage.doebuild(ebuild_path, "fetch",
1781 self.settings["ROOT"], self.settings, debug=debug,
1782 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1783 mydbapi=portdb, tree="porttree")
1785 if retval != os.EX_OK:
1786 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1787 eerror(msg, phase="unpack", key=pkg.cpv)
1789 portage.elog.elog_process(self.pkg.cpv, self.settings)
1792 class PollConstants(object):
1795 Provides POLL* constants that are equivalent to those from the
1796 select module, for use by PollSelectAdapter.
1799 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1802 locals()[k] = getattr(select, k, v)
1806 class AsynchronousTask(SlotObject):
1808 Subclasses override _wait() and _poll() so that calls
1809 to public methods can be wrapped for implementing
1810 hooks such as exit listener notification.
1812 Sublasses should call self.wait() to notify exit listeners after
1813 the task is complete and self.returncode has been set.
1816 __slots__ = ("background", "cancelled", "returncode") + \
1817 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1821 Start an asynchronous task and then return as soon as possible.
1827 raise NotImplementedError(self)
1830 return self.returncode is None
1837 return self.returncode
1840 if self.returncode is None:
1843 return self.returncode
1846 return self.returncode
1849 self.cancelled = True
1852 def addStartListener(self, f):
1854 The function will be called with one argument, a reference to self.
1856 if self._start_listeners is None:
1857 self._start_listeners = []
1858 self._start_listeners.append(f)
1860 def removeStartListener(self, f):
1861 if self._start_listeners is None:
1863 self._start_listeners.remove(f)
1865 def _start_hook(self):
1866 if self._start_listeners is not None:
1867 start_listeners = self._start_listeners
1868 self._start_listeners = None
1870 for f in start_listeners:
1873 def addExitListener(self, f):
1875 The function will be called with one argument, a reference to self.
1877 if self._exit_listeners is None:
1878 self._exit_listeners = []
1879 self._exit_listeners.append(f)
1881 def removeExitListener(self, f):
1882 if self._exit_listeners is None:
1883 if self._exit_listener_stack is not None:
1884 self._exit_listener_stack.remove(f)
1886 self._exit_listeners.remove(f)
1888 def _wait_hook(self):
1890 Call this method after the task completes, just before returning
1891 the returncode from wait() or poll(). This hook is
1892 used to trigger exit listeners when the returncode first
1895 if self.returncode is not None and \
1896 self._exit_listeners is not None:
1898 # This prevents recursion, in case one of the
1899 # exit handlers triggers this method again by
1900 # calling wait(). Use a stack that gives
1901 # removeExitListener() an opportunity to consume
1902 # listeners from the stack, before they can get
1903 # called below. This is necessary because a call
1904 # to one exit listener may result in a call to
1905 # removeExitListener() for another listener on
1906 # the stack. That listener needs to be removed
1907 # from the stack since it would be inconsistent
1908 # to call it after it has been been passed into
1909 # removeExitListener().
1910 self._exit_listener_stack = self._exit_listeners
1911 self._exit_listeners = None
1913 self._exit_listener_stack.reverse()
1914 while self._exit_listener_stack:
1915 self._exit_listener_stack.pop()(self)
1917 class AbstractPollTask(AsynchronousTask):
1919 __slots__ = ("scheduler",) + \
1923 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1924 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1927 def _unregister(self):
1928 raise NotImplementedError(self)
1930 def _unregister_if_appropriate(self, event):
1931 if self._registered:
1932 if event & self._exceptional_events:
1935 elif event & PollConstants.POLLHUP:
1939 class PipeReader(AbstractPollTask):
1942 Reads output from one or more files and saves it in memory,
1943 for retrieval via the getvalue() method. This is driven by
1944 the scheduler's poll() loop, so it runs entirely within the
1948 __slots__ = ("input_files",) + \
1949 ("_read_data", "_reg_ids")
1952 self._reg_ids = set()
1953 self._read_data = []
1954 for k, f in self.input_files.iteritems():
1955 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1956 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1957 self._reg_ids.add(self.scheduler.register(f.fileno(),
1958 self._registered_events, self._output_handler))
1959 self._registered = True
1962 return self._registered
1965 if self.returncode is None:
1967 self.cancelled = True
1971 if self.returncode is not None:
1972 return self.returncode
1974 if self._registered:
1975 self.scheduler.schedule(self._reg_ids)
1978 self.returncode = os.EX_OK
1979 return self.returncode
1982 """Retrieve the entire contents"""
1983 if sys.hexversion >= 0x3000000:
1984 return bytes().join(self._read_data)
1985 return "".join(self._read_data)
1988 """Free the memory buffer."""
1989 self._read_data = None
1991 def _output_handler(self, fd, event):
1993 if event & PollConstants.POLLIN:
1995 for f in self.input_files.itervalues():
1996 if fd == f.fileno():
1999 buf = array.array('B')
2001 buf.fromfile(f, self._bufsize)
2006 self._read_data.append(buf.tostring())
2011 self._unregister_if_appropriate(event)
2012 return self._registered
2014 def _unregister(self):
2016 Unregister from the scheduler and close open files.
2019 self._registered = False
2021 if self._reg_ids is not None:
2022 for reg_id in self._reg_ids:
2023 self.scheduler.unregister(reg_id)
2024 self._reg_ids = None
2026 if self.input_files is not None:
2027 for f in self.input_files.itervalues():
2029 self.input_files = None
2031 class CompositeTask(AsynchronousTask):
2033 __slots__ = ("scheduler",) + ("_current_task",)
2036 return self._current_task is not None
2039 self.cancelled = True
2040 if self._current_task is not None:
2041 self._current_task.cancel()
2045 This does a loop calling self._current_task.poll()
2046 repeatedly as long as the value of self._current_task
2047 keeps changing. It calls poll() a maximum of one time
2048 for a given self._current_task instance. This is useful
2049 since calling poll() on a task can trigger advance to
2050 the next task could eventually lead to the returncode
2051 being set in cases when polling only a single task would
2052 not have the same effect.
2057 task = self._current_task
2058 if task is None or task is prev:
2059 # don't poll the same task more than once
2064 return self.returncode
2070 task = self._current_task
2072 # don't wait for the same task more than once
2075 # Before the task.wait() method returned, an exit
2076 # listener should have set self._current_task to either
2077 # a different task or None. Something is wrong.
2078 raise AssertionError("self._current_task has not " + \
2079 "changed since calling wait", self, task)
2083 return self.returncode
2085 def _assert_current(self, task):
2087 Raises an AssertionError if the given task is not the
2088 same one as self._current_task. This can be useful
2091 if task is not self._current_task:
2092 raise AssertionError("Unrecognized task: %s" % (task,))
2094 def _default_exit(self, task):
2096 Calls _assert_current() on the given task and then sets the
2097 composite returncode attribute if task.returncode != os.EX_OK.
2098 If the task failed then self._current_task will be set to None.
2099 Subclasses can use this as a generic task exit callback.
2102 @returns: The task.returncode attribute.
2104 self._assert_current(task)
2105 if task.returncode != os.EX_OK:
2106 self.returncode = task.returncode
2107 self._current_task = None
2108 return task.returncode
2110 def _final_exit(self, task):
2112 Assumes that task is the final task of this composite task.
2113 Calls _default_exit() and sets self.returncode to the task's
2114 returncode and sets self._current_task to None.
2116 self._default_exit(task)
2117 self._current_task = None
2118 self.returncode = task.returncode
2119 return self.returncode
2121 def _default_final_exit(self, task):
2123 This calls _final_exit() and then wait().
2125 Subclasses can use this as a generic final task exit callback.
2128 self._final_exit(task)
2131 def _start_task(self, task, exit_handler):
2133 Register exit handler for the given task, set it
2134 as self._current_task, and call task.start().
2136 Subclasses can use this as a generic way to start
2140 task.addExitListener(exit_handler)
2141 self._current_task = task
2144 class TaskSequence(CompositeTask):
2146 A collection of tasks that executes sequentially. Each task
2147 must have a addExitListener() method that can be used as
2148 a means to trigger movement from one task to the next.
2151 __slots__ = ("_task_queue",)
2153 def __init__(self, **kwargs):
2154 AsynchronousTask.__init__(self, **kwargs)
2155 self._task_queue = deque()
2157 def add(self, task):
2158 self._task_queue.append(task)
2161 self._start_next_task()
2164 self._task_queue.clear()
2165 CompositeTask.cancel(self)
2167 def _start_next_task(self):
2168 self._start_task(self._task_queue.popleft(),
2169 self._task_exit_handler)
2171 def _task_exit_handler(self, task):
2172 if self._default_exit(task) != os.EX_OK:
2174 elif self._task_queue:
2175 self._start_next_task()
2177 self._final_exit(task)
2180 class SubProcess(AbstractPollTask):
2182 __slots__ = ("pid",) + \
2183 ("_files", "_reg_id")
2185 # A file descriptor is required for the scheduler to monitor changes from
2186 # inside a poll() loop. When logging is not enabled, create a pipe just to
2187 # serve this purpose alone.
2191 if self.returncode is not None:
2192 return self.returncode
2193 if self.pid is None:
2194 return self.returncode
2195 if self._registered:
2196 return self.returncode
2199 retval = os.waitpid(self.pid, os.WNOHANG)
2201 if e.errno != errno.ECHILD:
2204 retval = (self.pid, 1)
2206 if retval == (0, 0):
2208 self._set_returncode(retval)
2209 return self.returncode
2214 os.kill(self.pid, signal.SIGTERM)
2216 if e.errno != errno.ESRCH:
2220 self.cancelled = True
2221 if self.pid is not None:
2223 return self.returncode
2226 return self.pid is not None and \
2227 self.returncode is None
2231 if self.returncode is not None:
2232 return self.returncode
2234 if self._registered:
2235 self.scheduler.schedule(self._reg_id)
2237 if self.returncode is not None:
2238 return self.returncode
2241 wait_retval = os.waitpid(self.pid, 0)
2243 if e.errno != errno.ECHILD:
2246 self._set_returncode((self.pid, 1))
2248 self._set_returncode(wait_retval)
2250 return self.returncode
2252 def _unregister(self):
2254 Unregister from the scheduler and close open files.
2257 self._registered = False
2259 if self._reg_id is not None:
2260 self.scheduler.unregister(self._reg_id)
2263 if self._files is not None:
2264 for f in self._files.itervalues():
2268 def _set_returncode(self, wait_retval):
2270 retval = wait_retval[1]
2272 if retval != os.EX_OK:
2274 retval = (retval & 0xff) << 8
2276 retval = retval >> 8
2278 self.returncode = retval
2280 class SpawnProcess(SubProcess):
2283 Constructor keyword args are passed into portage.process.spawn().
2284 The required "args" keyword argument will be passed as the first
2288 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2289 "uid", "gid", "groups", "umask", "logfile",
2290 "path_lookup", "pre_exec")
2292 __slots__ = ("args",) + \
2295 _file_names = ("log", "process", "stdout")
2296 _files_dict = slot_dict_class(_file_names, prefix="")
2303 if self.fd_pipes is None:
2305 fd_pipes = self.fd_pipes
2306 fd_pipes.setdefault(0, sys.stdin.fileno())
2307 fd_pipes.setdefault(1, sys.stdout.fileno())
2308 fd_pipes.setdefault(2, sys.stderr.fileno())
2310 # flush any pending output
2311 for fd in fd_pipes.itervalues():
2312 if fd == sys.stdout.fileno():
2314 if fd == sys.stderr.fileno():
2317 logfile = self.logfile
2318 self._files = self._files_dict()
2321 master_fd, slave_fd = self._pipe(fd_pipes)
2322 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2323 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2326 fd_pipes_orig = fd_pipes.copy()
2328 # TODO: Use job control functions like tcsetpgrp() to control
2329 # access to stdin. Until then, use /dev/null so that any
2330 # attempts to read from stdin will immediately return EOF
2331 # instead of blocking indefinitely.
2332 null_input = open('/dev/null', 'rb')
2333 fd_pipes[0] = null_input.fileno()
2335 fd_pipes[0] = fd_pipes_orig[0]
2337 files.process = os.fdopen(master_fd, 'rb')
2338 if logfile is not None:
2340 fd_pipes[1] = slave_fd
2341 fd_pipes[2] = slave_fd
2343 files.log = open(logfile, mode='ab')
2344 portage.util.apply_secpass_permissions(logfile,
2345 uid=portage.portage_uid, gid=portage.portage_gid,
2348 if not self.background:
2349 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2351 output_handler = self._output_handler
2355 # Create a dummy pipe so the scheduler can monitor
2356 # the process from inside a poll() loop.
2357 fd_pipes[self._dummy_pipe_fd] = slave_fd
2359 fd_pipes[1] = slave_fd
2360 fd_pipes[2] = slave_fd
2361 output_handler = self._dummy_handler
2364 for k in self._spawn_kwarg_names:
2365 v = getattr(self, k)
2369 kwargs["fd_pipes"] = fd_pipes
2370 kwargs["returnpid"] = True
2371 kwargs.pop("logfile", None)
2373 self._reg_id = self.scheduler.register(files.process.fileno(),
2374 self._registered_events, output_handler)
2375 self._registered = True
2377 retval = self._spawn(self.args, **kwargs)
2380 if null_input is not None:
2383 if isinstance(retval, int):
2386 self.returncode = retval
2390 self.pid = retval[0]
2391 portage.process.spawned_pids.remove(self.pid)
2393 def _pipe(self, fd_pipes):
2395 @type fd_pipes: dict
2396 @param fd_pipes: pipes from which to copy terminal size if desired.
2400 def _spawn(self, args, **kwargs):
2401 return portage.process.spawn(args, **kwargs)
2403 def _output_handler(self, fd, event):
2405 if event & PollConstants.POLLIN:
2408 buf = array.array('B')
2410 buf.fromfile(files.process, self._bufsize)
2415 if not self.background:
2416 buf.tofile(files.stdout)
2417 files.stdout.flush()
2418 buf.tofile(files.log)
2424 self._unregister_if_appropriate(event)
2425 return self._registered
2427 def _dummy_handler(self, fd, event):
2429 This method is mainly interested in detecting EOF, since
2430 the only purpose of the pipe is to allow the scheduler to
2431 monitor the process from inside a poll() loop.
2434 if event & PollConstants.POLLIN:
2436 buf = array.array('B')
2438 buf.fromfile(self._files.process, self._bufsize)
2448 self._unregister_if_appropriate(event)
2449 return self._registered
2451 class MiscFunctionsProcess(SpawnProcess):
2453 Spawns misc-functions.sh with an existing ebuild environment.
2456 __slots__ = ("commands", "phase", "pkg", "settings")
2459 settings = self.settings
2460 settings.pop("EBUILD_PHASE", None)
2461 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2462 misc_sh_binary = os.path.join(portage_bin_path,
2463 os.path.basename(portage.const.MISC_SH_BINARY))
2465 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2466 self.logfile = settings.get("PORTAGE_LOG_FILE")
2468 portage._doebuild_exit_status_unlink(
2469 settings.get("EBUILD_EXIT_STATUS_FILE"))
2471 SpawnProcess._start(self)
2473 def _spawn(self, args, **kwargs):
2474 settings = self.settings
2475 debug = settings.get("PORTAGE_DEBUG") == "1"
2476 return portage.spawn(" ".join(args), settings,
2477 debug=debug, **kwargs)
2479 def _set_returncode(self, wait_retval):
2480 SpawnProcess._set_returncode(self, wait_retval)
2481 self.returncode = portage._doebuild_exit_status_check_and_log(
2482 self.settings, self.phase, self.returncode)
2484 class EbuildFetcher(SpawnProcess):
2486 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2491 root_config = self.pkg.root_config
2492 portdb = root_config.trees["porttree"].dbapi
2493 ebuild_path = portdb.findname(self.pkg.cpv)
2494 settings = self.config_pool.allocate()
2495 settings.setcpv(self.pkg)
2497 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2498 # should not be touched since otherwise it could interfere with
2499 # another instance of the same cpv concurrently being built for a
2500 # different $ROOT (currently, builds only cooperate with prefetchers
2501 # that are spawned for the same $ROOT).
2502 if not self.prefetch:
2503 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2504 self._build_dir.lock()
2505 self._build_dir.clean()
2506 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2507 if self.logfile is None:
2508 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514 # If any incremental variables have been overridden
2515 # via the environment, those values need to be passed
2516 # along here so that they are correctly considered by
2517 # the config instance in the subproccess.
2518 fetch_env = os.environ.copy()
2520 nocolor = settings.get("NOCOLOR")
2521 if nocolor is not None:
2522 fetch_env["NOCOLOR"] = nocolor
2524 fetch_env["PORTAGE_NICENESS"] = "0"
2526 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2528 ebuild_binary = os.path.join(
2529 settings["PORTAGE_BIN_PATH"], "ebuild")
2531 fetch_args = [ebuild_binary, ebuild_path, phase]
2532 debug = settings.get("PORTAGE_DEBUG") == "1"
2534 fetch_args.append("--debug")
2536 self.args = fetch_args
2537 self.env = fetch_env
2538 SpawnProcess._start(self)
2540 def _pipe(self, fd_pipes):
2541 """When appropriate, use a pty so that fetcher progress bars,
2542 like wget has, will work properly."""
2543 if self.background or not sys.stdout.isatty():
2544 # When the output only goes to a log file,
2545 # there's no point in creating a pty.
2547 stdout_pipe = fd_pipes.get(1)
2548 got_pty, master_fd, slave_fd = \
2549 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2550 return (master_fd, slave_fd)
2552 def _set_returncode(self, wait_retval):
2553 SpawnProcess._set_returncode(self, wait_retval)
2554 # Collect elog messages that might have been
2555 # created by the pkg_nofetch phase.
2556 if self._build_dir is not None:
2557 # Skip elog messages for prefetch, in order to avoid duplicates.
2558 if not self.prefetch and self.returncode != os.EX_OK:
2560 if self.logfile is not None:
2562 elog_out = open(self.logfile, 'a')
2563 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2564 if self.logfile is not None:
2565 msg += ", Log file:"
2566 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2567 if self.logfile is not None:
2568 eerror(" '%s'" % (self.logfile,),
2569 phase="unpack", key=self.pkg.cpv, out=elog_out)
2570 if elog_out is not None:
2572 if not self.prefetch:
2573 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2574 features = self._build_dir.settings.features
2575 if self.returncode == os.EX_OK:
2576 self._build_dir.clean()
2577 self._build_dir.unlock()
2578 self.config_pool.deallocate(self._build_dir.settings)
2579 self._build_dir = None
2581 class EbuildBuildDir(SlotObject):
2583 __slots__ = ("dir_path", "pkg", "settings",
2584 "locked", "_catdir", "_lock_obj")
2586 def __init__(self, **kwargs):
2587 SlotObject.__init__(self, **kwargs)
2592 This raises an AlreadyLocked exception if lock() is called
2593 while a lock is already held. In order to avoid this, call
2594 unlock() or check whether the "locked" attribute is True
2595 or False before calling lock().
2597 if self._lock_obj is not None:
2598 raise self.AlreadyLocked((self._lock_obj,))
2600 dir_path = self.dir_path
2601 if dir_path is None:
2602 root_config = self.pkg.root_config
2603 portdb = root_config.trees["porttree"].dbapi
2604 ebuild_path = portdb.findname(self.pkg.cpv)
2605 settings = self.settings
2606 settings.setcpv(self.pkg)
2607 debug = settings.get("PORTAGE_DEBUG") == "1"
2608 use_cache = 1 # always true
2609 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2610 self.settings, debug, use_cache, portdb)
2611 dir_path = self.settings["PORTAGE_BUILDDIR"]
2613 catdir = os.path.dirname(dir_path)
2614 self._catdir = catdir
2616 portage.util.ensure_dirs(os.path.dirname(catdir),
2617 gid=portage.portage_gid,
2621 catdir_lock = portage.locks.lockdir(catdir)
2622 portage.util.ensure_dirs(catdir,
2623 gid=portage.portage_gid,
2625 self._lock_obj = portage.locks.lockdir(dir_path)
2627 self.locked = self._lock_obj is not None
2628 if catdir_lock is not None:
2629 portage.locks.unlockdir(catdir_lock)
2632 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2633 by keepwork or keeptemp in FEATURES."""
2634 settings = self.settings
2635 features = settings.features
2636 if not ("keepwork" in features or "keeptemp" in features):
2638 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2639 except EnvironmentError, e:
2640 if e.errno != errno.ENOENT:
2645 if self._lock_obj is None:
2648 portage.locks.unlockdir(self._lock_obj)
2649 self._lock_obj = None
2652 catdir = self._catdir
2655 catdir_lock = portage.locks.lockdir(catdir)
2661 if e.errno not in (errno.ENOENT,
2662 errno.ENOTEMPTY, errno.EEXIST):
2665 portage.locks.unlockdir(catdir_lock)
2667 class AlreadyLocked(portage.exception.PortageException):
2670 class EbuildBuild(CompositeTask):
2672 __slots__ = ("args_set", "config_pool", "find_blockers",
2673 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2674 "prefetcher", "settings", "world_atom") + \
2675 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2679 logger = self.logger
2682 settings = self.settings
2683 world_atom = self.world_atom
2684 root_config = pkg.root_config
2687 portdb = root_config.trees[tree].dbapi
2688 settings.setcpv(pkg)
2689 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2690 ebuild_path = portdb.findname(self.pkg.cpv)
2691 self._ebuild_path = ebuild_path
2693 prefetcher = self.prefetcher
2694 if prefetcher is None:
2696 elif not prefetcher.isAlive():
2698 elif prefetcher.poll() is None:
2700 waiting_msg = "Fetching files " + \
2701 "in the background. " + \
2702 "To view fetch progress, run `tail -f " + \
2703 "/var/log/emerge-fetch.log` in another " + \
2705 msg_prefix = colorize("GOOD", " * ")
2706 from textwrap import wrap
2707 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2708 for line in wrap(waiting_msg, 65))
2709 if not self.background:
2710 writemsg(waiting_msg, noiselevel=-1)
2712 self._current_task = prefetcher
2713 prefetcher.addExitListener(self._prefetch_exit)
2716 self._prefetch_exit(prefetcher)
2718 def _prefetch_exit(self, prefetcher):
2722 settings = self.settings
2725 fetcher = EbuildFetchonly(
2726 fetch_all=opts.fetch_all_uri,
2727 pkg=pkg, pretend=opts.pretend,
2729 retval = fetcher.execute()
2730 self.returncode = retval
2734 fetcher = EbuildFetcher(config_pool=self.config_pool,
2735 fetchall=opts.fetch_all_uri,
2736 fetchonly=opts.fetchonly,
2737 background=self.background,
2738 pkg=pkg, scheduler=self.scheduler)
2740 self._start_task(fetcher, self._fetch_exit)
2742 def _fetch_exit(self, fetcher):
2746 fetch_failed = False
2748 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2750 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2752 if fetch_failed and fetcher.logfile is not None and \
2753 os.path.exists(fetcher.logfile):
2754 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2756 if not fetch_failed and fetcher.logfile is not None:
2757 # Fetch was successful, so remove the fetch log.
2759 os.unlink(fetcher.logfile)
2763 if fetch_failed or opts.fetchonly:
2767 logger = self.logger
2769 pkg_count = self.pkg_count
2770 scheduler = self.scheduler
2771 settings = self.settings
2772 features = settings.features
2773 ebuild_path = self._ebuild_path
2774 system_set = pkg.root_config.sets["system"]
2776 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2777 self._build_dir.lock()
2779 # Cleaning is triggered before the setup
2780 # phase, in portage.doebuild().
2781 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2782 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2783 short_msg = "emerge: (%s of %s) %s Clean" % \
2784 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2785 logger.log(msg, short_msg=short_msg)
2787 #buildsyspkg: Check if we need to _force_ binary package creation
2788 self._issyspkg = "buildsyspkg" in features and \
2789 system_set.findAtomForPackage(pkg) and \
2792 if opts.buildpkg or self._issyspkg:
2794 self._buildpkg = True
2796 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2797 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2798 short_msg = "emerge: (%s of %s) %s Compile" % \
2799 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2800 logger.log(msg, short_msg=short_msg)
2803 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2804 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2805 short_msg = "emerge: (%s of %s) %s Compile" % \
2806 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2807 logger.log(msg, short_msg=short_msg)
2809 build = EbuildExecuter(background=self.background, pkg=pkg,
2810 scheduler=scheduler, settings=settings)
2811 self._start_task(build, self._build_exit)
2813 def _unlock_builddir(self):
2814 portage.elog.elog_process(self.pkg.cpv, self.settings)
2815 self._build_dir.unlock()
2817 def _build_exit(self, build):
2818 if self._default_exit(build) != os.EX_OK:
2819 self._unlock_builddir()
2824 buildpkg = self._buildpkg
2827 self._final_exit(build)
2832 msg = ">>> This is a system package, " + \
2833 "let's pack a rescue tarball.\n"
2835 log_path = self.settings.get("PORTAGE_LOG_FILE")
2836 if log_path is not None:
2837 log_file = open(log_path, 'a')
2843 if not self.background:
2844 portage.writemsg_stdout(msg, noiselevel=-1)
2846 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2847 scheduler=self.scheduler, settings=self.settings)
2849 self._start_task(packager, self._buildpkg_exit)
2851 def _buildpkg_exit(self, packager):
2853 Released build dir lock when there is a failure or
2854 when in buildpkgonly mode. Otherwise, the lock will
2855 be released when merge() is called.
2858 if self._default_exit(packager) != os.EX_OK:
2859 self._unlock_builddir()
2863 if self.opts.buildpkgonly:
2864 # Need to call "clean" phase for buildpkgonly mode
2865 portage.elog.elog_process(self.pkg.cpv, self.settings)
2867 clean_phase = EbuildPhase(background=self.background,
2868 pkg=self.pkg, phase=phase,
2869 scheduler=self.scheduler, settings=self.settings,
2871 self._start_task(clean_phase, self._clean_exit)
2874 # Continue holding the builddir lock until
2875 # after the package has been installed.
2876 self._current_task = None
2877 self.returncode = packager.returncode
2880 def _clean_exit(self, clean_phase):
2881 if self._final_exit(clean_phase) != os.EX_OK or \
2882 self.opts.buildpkgonly:
2883 self._unlock_builddir()
2888 Install the package and then clean up and release locks.
2889 Only call this after the build has completed successfully
2890 and neither fetchonly nor buildpkgonly mode are enabled.
2893 find_blockers = self.find_blockers
2894 ldpath_mtimes = self.ldpath_mtimes
2895 logger = self.logger
2897 pkg_count = self.pkg_count
2898 settings = self.settings
2899 world_atom = self.world_atom
2900 ebuild_path = self._ebuild_path
2903 merge = EbuildMerge(find_blockers=self.find_blockers,
2904 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2905 pkg_count=pkg_count, pkg_path=ebuild_path,
2906 scheduler=self.scheduler,
2907 settings=settings, tree=tree, world_atom=world_atom)
2909 msg = " === (%s of %s) Merging (%s::%s)" % \
2910 (pkg_count.curval, pkg_count.maxval,
2911 pkg.cpv, ebuild_path)
2912 short_msg = "emerge: (%s of %s) %s Merge" % \
2913 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2914 logger.log(msg, short_msg=short_msg)
2917 rval = merge.execute()
2919 self._unlock_builddir()
2923 class EbuildExecuter(CompositeTask):
2925 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2927 _phases = ("prepare", "configure", "compile", "test", "install")
2929 _live_eclasses = frozenset([
2939 self._tree = "porttree"
2942 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2943 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2944 self._start_task(clean_phase, self._clean_phase_exit)
2946 def _clean_phase_exit(self, clean_phase):
2948 if self._default_exit(clean_phase) != os.EX_OK:
2953 scheduler = self.scheduler
2954 settings = self.settings
2957 # This initializes PORTAGE_LOG_FILE.
2958 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2960 setup_phase = EbuildPhase(background=self.background,
2961 pkg=pkg, phase="setup", scheduler=scheduler,
2962 settings=settings, tree=self._tree)
2964 setup_phase.addExitListener(self._setup_exit)
2965 self._current_task = setup_phase
2966 self.scheduler.scheduleSetup(setup_phase)
2968 def _setup_exit(self, setup_phase):
2970 if self._default_exit(setup_phase) != os.EX_OK:
2974 unpack_phase = EbuildPhase(background=self.background,
2975 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2976 settings=self.settings, tree=self._tree)
2978 if self._live_eclasses.intersection(self.pkg.inherited):
2979 # Serialize $DISTDIR access for live ebuilds since
2980 # otherwise they can interfere with eachother.
2982 unpack_phase.addExitListener(self._unpack_exit)
2983 self._current_task = unpack_phase
2984 self.scheduler.scheduleUnpack(unpack_phase)
2987 self._start_task(unpack_phase, self._unpack_exit)
2989 def _unpack_exit(self, unpack_phase):
2991 if self._default_exit(unpack_phase) != os.EX_OK:
2995 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2998 phases = self._phases
2999 eapi = pkg.metadata["EAPI"]
3000 if eapi in ("0", "1"):
3001 # skip src_prepare and src_configure
3004 for phase in phases:
3005 ebuild_phases.add(EbuildPhase(background=self.background,
3006 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3007 settings=self.settings, tree=self._tree))
3009 self._start_task(ebuild_phases, self._default_final_exit)
3011 class EbuildMetadataPhase(SubProcess):
3014 Asynchronous interface for the ebuild "depend" phase which is
3015 used to extract metadata from the ebuild.
3018 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3019 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3022 _file_names = ("ebuild",)
3023 _files_dict = slot_dict_class(_file_names, prefix="")
3027 settings = self.settings
3029 ebuild_path = self.ebuild_path
3030 debug = settings.get("PORTAGE_DEBUG") == "1"
3034 if self.fd_pipes is not None:
3035 fd_pipes = self.fd_pipes.copy()
3039 fd_pipes.setdefault(0, sys.stdin.fileno())
3040 fd_pipes.setdefault(1, sys.stdout.fileno())
3041 fd_pipes.setdefault(2, sys.stderr.fileno())
3043 # flush any pending output
3044 for fd in fd_pipes.itervalues():
3045 if fd == sys.stdout.fileno():
3047 if fd == sys.stderr.fileno():
3050 fd_pipes_orig = fd_pipes.copy()
3051 self._files = self._files_dict()
3054 master_fd, slave_fd = os.pipe()
3055 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3056 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3058 fd_pipes[self._metadata_fd] = slave_fd
3060 self._raw_metadata = []
3061 files.ebuild = os.fdopen(master_fd, 'r')
3062 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3063 self._registered_events, self._output_handler)
3064 self._registered = True
3066 retval = portage.doebuild(ebuild_path, "depend",
3067 settings["ROOT"], settings, debug,
3068 mydbapi=self.portdb, tree="porttree",
3069 fd_pipes=fd_pipes, returnpid=True)
3073 if isinstance(retval, int):
3074 # doebuild failed before spawning
3076 self.returncode = retval
3080 self.pid = retval[0]
3081 portage.process.spawned_pids.remove(self.pid)
3083 def _output_handler(self, fd, event):
3085 if event & PollConstants.POLLIN:
3086 self._raw_metadata.append(self._files.ebuild.read())
3087 if not self._raw_metadata[-1]:
3091 self._unregister_if_appropriate(event)
3092 return self._registered
3094 def _set_returncode(self, wait_retval):
3095 SubProcess._set_returncode(self, wait_retval)
3096 if self.returncode == os.EX_OK:
3097 metadata_lines = "".join(self._raw_metadata).splitlines()
3098 if len(portage.auxdbkeys) != len(metadata_lines):
3099 # Don't trust bash's returncode if the
3100 # number of lines is incorrect.
3103 metadata = izip(portage.auxdbkeys, metadata_lines)
3104 self.metadata_callback(self.cpv, self.ebuild_path,
3105 self.repo_path, metadata, self.ebuild_mtime)
3107 class EbuildProcess(SpawnProcess):
3109 __slots__ = ("phase", "pkg", "settings", "tree")
3112 # Don't open the log file during the clean phase since the
3113 # open file can result in an nfs lock on $T/build.log which
3114 # prevents the clean phase from removing $T.
3115 if self.phase not in ("clean", "cleanrm"):
3116 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3117 SpawnProcess._start(self)
3119 def _pipe(self, fd_pipes):
3120 stdout_pipe = fd_pipes.get(1)
3121 got_pty, master_fd, slave_fd = \
3122 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3123 return (master_fd, slave_fd)
3125 def _spawn(self, args, **kwargs):
3127 root_config = self.pkg.root_config
3129 mydbapi = root_config.trees[tree].dbapi
3130 settings = self.settings
3131 ebuild_path = settings["EBUILD"]
3132 debug = settings.get("PORTAGE_DEBUG") == "1"
3134 rval = portage.doebuild(ebuild_path, self.phase,
3135 root_config.root, settings, debug,
3136 mydbapi=mydbapi, tree=tree, **kwargs)
3140 def _set_returncode(self, wait_retval):
3141 SpawnProcess._set_returncode(self, wait_retval)
3143 if self.phase not in ("clean", "cleanrm"):
3144 self.returncode = portage._doebuild_exit_status_check_and_log(
3145 self.settings, self.phase, self.returncode)
3147 if self.phase == "test" and self.returncode != os.EX_OK and \
3148 "test-fail-continue" in self.settings.features:
3149 self.returncode = os.EX_OK
3151 portage._post_phase_userpriv_perms(self.settings)
3153 class EbuildPhase(CompositeTask):
3155 __slots__ = ("background", "pkg", "phase",
3156 "scheduler", "settings", "tree")
3158 _post_phase_cmds = portage._post_phase_cmds
3162 ebuild_process = EbuildProcess(background=self.background,
3163 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3164 settings=self.settings, tree=self.tree)
3166 self._start_task(ebuild_process, self._ebuild_exit)
3168 def _ebuild_exit(self, ebuild_process):
3170 if self.phase == "install":
3172 log_path = self.settings.get("PORTAGE_LOG_FILE")
3174 if self.background and log_path is not None:
3175 log_file = open(log_path, 'a')
3178 portage._check_build_log(self.settings, out=out)
3180 if log_file is not None:
3183 if self._default_exit(ebuild_process) != os.EX_OK:
3187 settings = self.settings
3189 if self.phase == "install":
3190 portage._post_src_install_uid_fix(settings)
3192 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3193 if post_phase_cmds is not None:
3194 post_phase = MiscFunctionsProcess(background=self.background,
3195 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3196 scheduler=self.scheduler, settings=settings)
3197 self._start_task(post_phase, self._post_phase_exit)
3200 self.returncode = ebuild_process.returncode
3201 self._current_task = None
3204 def _post_phase_exit(self, post_phase):
3205 if self._final_exit(post_phase) != os.EX_OK:
3206 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3208 self._current_task = None
3212 class EbuildBinpkg(EbuildProcess):
3214 This assumes that src_install() has successfully completed.
3216 __slots__ = ("_binpkg_tmpfile",)
3219 self.phase = "package"
3220 self.tree = "porttree"
3222 root_config = pkg.root_config
3223 portdb = root_config.trees["porttree"].dbapi
3224 bintree = root_config.trees["bintree"]
3225 ebuild_path = portdb.findname(self.pkg.cpv)
3226 settings = self.settings
3227 debug = settings.get("PORTAGE_DEBUG") == "1"
3229 bintree.prevent_collision(pkg.cpv)
3230 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3231 pkg.cpv + ".tbz2." + str(os.getpid()))
3232 self._binpkg_tmpfile = binpkg_tmpfile
3233 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3234 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3237 EbuildProcess._start(self)
3239 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3241 def _set_returncode(self, wait_retval):
3242 EbuildProcess._set_returncode(self, wait_retval)
3245 bintree = pkg.root_config.trees["bintree"]
3246 binpkg_tmpfile = self._binpkg_tmpfile
3247 if self.returncode == os.EX_OK:
3248 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3250 class EbuildMerge(SlotObject):
3252 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3253 "pkg", "pkg_count", "pkg_path", "pretend",
3254 "scheduler", "settings", "tree", "world_atom")
3257 root_config = self.pkg.root_config
3258 settings = self.settings
3259 retval = portage.merge(settings["CATEGORY"],
3260 settings["PF"], settings["D"],
3261 os.path.join(settings["PORTAGE_BUILDDIR"],
3262 "build-info"), root_config.root, settings,
3263 myebuild=settings["EBUILD"],
3264 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3265 vartree=root_config.trees["vartree"],
3266 prev_mtimes=self.ldpath_mtimes,
3267 scheduler=self.scheduler,
3268 blockers=self.find_blockers)
3270 if retval == os.EX_OK:
3271 self.world_atom(self.pkg)
3276 def _log_success(self):
3278 pkg_count = self.pkg_count
3279 pkg_path = self.pkg_path
3280 logger = self.logger
3281 if "noclean" not in self.settings.features:
3282 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3283 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3284 logger.log((" === (%s of %s) " + \
3285 "Post-Build Cleaning (%s::%s)") % \
3286 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3287 short_msg=short_msg)
3288 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3289 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3291 class PackageUninstall(AsynchronousTask):
3293 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3297 unmerge(self.pkg.root_config, self.opts, "unmerge",
3298 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3299 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3300 writemsg_level=self._writemsg_level)
3301 except UninstallFailure, e:
3302 self.returncode = e.status
3304 self.returncode = os.EX_OK
3307 def _writemsg_level(self, msg, level=0, noiselevel=0):
3309 log_path = self.settings.get("PORTAGE_LOG_FILE")
3310 background = self.background
3312 if log_path is None:
3313 if not (background and level < logging.WARNING):
3314 portage.util.writemsg_level(msg,
3315 level=level, noiselevel=noiselevel)
3318 portage.util.writemsg_level(msg,
3319 level=level, noiselevel=noiselevel)
3321 f = open(log_path, 'a')
3327 class Binpkg(CompositeTask):
3329 __slots__ = ("find_blockers",
3330 "ldpath_mtimes", "logger", "opts",
3331 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3332 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3333 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3335 def _writemsg_level(self, msg, level=0, noiselevel=0):
3337 if not self.background:
3338 portage.util.writemsg_level(msg,
3339 level=level, noiselevel=noiselevel)
3341 log_path = self.settings.get("PORTAGE_LOG_FILE")
3342 if log_path is not None:
3343 f = open(log_path, 'a')
3352 settings = self.settings
3353 settings.setcpv(pkg)
3354 self._tree = "bintree"
3355 self._bintree = self.pkg.root_config.trees[self._tree]
3356 self._verify = not self.opts.pretend
3358 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3359 "portage", pkg.category, pkg.pf)
3360 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3361 pkg=pkg, settings=settings)
3362 self._image_dir = os.path.join(dir_path, "image")
3363 self._infloc = os.path.join(dir_path, "build-info")
3364 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3365 settings["EBUILD"] = self._ebuild_path
3366 debug = settings.get("PORTAGE_DEBUG") == "1"
3367 portage.doebuild_environment(self._ebuild_path, "setup",
3368 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3369 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3371 # The prefetcher has already completed or it
3372 # could be running now. If it's running now,
3373 # wait for it to complete since it holds
3374 # a lock on the file being fetched. The
3375 # portage.locks functions are only designed
3376 # to work between separate processes. Since
3377 # the lock is held by the current process,
3378 # use the scheduler and fetcher methods to
3379 # synchronize with the fetcher.
3380 prefetcher = self.prefetcher
3381 if prefetcher is None:
3383 elif not prefetcher.isAlive():
3385 elif prefetcher.poll() is None:
3387 waiting_msg = ("Fetching '%s' " + \
3388 "in the background. " + \
3389 "To view fetch progress, run `tail -f " + \
3390 "/var/log/emerge-fetch.log` in another " + \
3391 "terminal.") % prefetcher.pkg_path
3392 msg_prefix = colorize("GOOD", " * ")
3393 from textwrap import wrap
3394 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3395 for line in wrap(waiting_msg, 65))
3396 if not self.background:
3397 writemsg(waiting_msg, noiselevel=-1)
3399 self._current_task = prefetcher
3400 prefetcher.addExitListener(self._prefetch_exit)
3403 self._prefetch_exit(prefetcher)
3405 def _prefetch_exit(self, prefetcher):
3408 pkg_count = self.pkg_count
3409 if not (self.opts.pretend or self.opts.fetchonly):
3410 self._build_dir.lock()
3412 shutil.rmtree(self._build_dir.dir_path)
3413 except EnvironmentError, e:
3414 if e.errno != errno.ENOENT:
3417 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3418 fetcher = BinpkgFetcher(background=self.background,
3419 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3420 pretend=self.opts.pretend, scheduler=self.scheduler)
3421 pkg_path = fetcher.pkg_path
3422 self._pkg_path = pkg_path
3424 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3426 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3427 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3428 short_msg = "emerge: (%s of %s) %s Fetch" % \
3429 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3430 self.logger.log(msg, short_msg=short_msg)
3431 self._start_task(fetcher, self._fetcher_exit)
3434 self._fetcher_exit(fetcher)
3436 def _fetcher_exit(self, fetcher):
3438 # The fetcher only has a returncode when
3439 # --getbinpkg is enabled.
3440 if fetcher.returncode is not None:
3441 self._fetched_pkg = True
3442 if self._default_exit(fetcher) != os.EX_OK:
3443 self._unlock_builddir()
3447 if self.opts.pretend:
3448 self._current_task = None
3449 self.returncode = os.EX_OK
3457 logfile = self.settings.get("PORTAGE_LOG_FILE")
3458 verifier = BinpkgVerifier(background=self.background,
3459 logfile=logfile, pkg=self.pkg)
3460 self._start_task(verifier, self._verifier_exit)
3463 self._verifier_exit(verifier)
3465 def _verifier_exit(self, verifier):
3466 if verifier is not None and \
3467 self._default_exit(verifier) != os.EX_OK:
3468 self._unlock_builddir()
3472 logger = self.logger
3474 pkg_count = self.pkg_count
3475 pkg_path = self._pkg_path
3477 if self._fetched_pkg:
3478 self._bintree.inject(pkg.cpv, filename=pkg_path)
3480 if self.opts.fetchonly:
3481 self._current_task = None
3482 self.returncode = os.EX_OK
3486 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3487 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3488 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3489 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3490 logger.log(msg, short_msg=short_msg)
3493 settings = self.settings
3494 ebuild_phase = EbuildPhase(background=self.background,
3495 pkg=pkg, phase=phase, scheduler=self.scheduler,
3496 settings=settings, tree=self._tree)
3498 self._start_task(ebuild_phase, self._clean_exit)
3500 def _clean_exit(self, clean_phase):
3501 if self._default_exit(clean_phase) != os.EX_OK:
3502 self._unlock_builddir()
3506 dir_path = self._build_dir.dir_path
3509 shutil.rmtree(dir_path)
3510 except (IOError, OSError), e:
3511 if e.errno != errno.ENOENT:
3515 infloc = self._infloc
3517 pkg_path = self._pkg_path
3520 for mydir in (dir_path, self._image_dir, infloc):
3521 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3522 gid=portage.data.portage_gid, mode=dir_mode)
3524 # This initializes PORTAGE_LOG_FILE.
3525 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3526 self._writemsg_level(">>> Extracting info\n")
3528 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3529 check_missing_metadata = ("CATEGORY", "PF")
3530 missing_metadata = set()
3531 for k in check_missing_metadata:
3532 v = pkg_xpak.getfile(k)
3534 missing_metadata.add(k)
3536 pkg_xpak.unpackinfo(infloc)
3537 for k in missing_metadata:
3545 f = open(os.path.join(infloc, k), 'wb')
3551 # Store the md5sum in the vdb.
3552 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3554 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3558 # This gives bashrc users an opportunity to do various things
3559 # such as remove binary packages after they're installed.
3560 settings = self.settings
3561 settings.setcpv(self.pkg)
3562 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3563 settings.backup_changes("PORTAGE_BINPKG_FILE")
3566 setup_phase = EbuildPhase(background=self.background,
3567 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3568 settings=settings, tree=self._tree)
3570 setup_phase.addExitListener(self._setup_exit)
3571 self._current_task = setup_phase
3572 self.scheduler.scheduleSetup(setup_phase)
3574 def _setup_exit(self, setup_phase):
3575 if self._default_exit(setup_phase) != os.EX_OK:
3576 self._unlock_builddir()
3580 extractor = BinpkgExtractorAsync(background=self.background,
3581 image_dir=self._image_dir,
3582 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3583 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3584 self._start_task(extractor, self._extractor_exit)
3586 def _extractor_exit(self, extractor):
3587 if self._final_exit(extractor) != os.EX_OK:
3588 self._unlock_builddir()
3589 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3593 def _unlock_builddir(self):
3594 if self.opts.pretend or self.opts.fetchonly:
3596 portage.elog.elog_process(self.pkg.cpv, self.settings)
3597 self._build_dir.unlock()
3601 # This gives bashrc users an opportunity to do various things
3602 # such as remove binary packages after they're installed.
3603 settings = self.settings
3604 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3605 settings.backup_changes("PORTAGE_BINPKG_FILE")
3607 merge = EbuildMerge(find_blockers=self.find_blockers,
3608 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3609 pkg=self.pkg, pkg_count=self.pkg_count,
3610 pkg_path=self._pkg_path, scheduler=self.scheduler,
3611 settings=settings, tree=self._tree, world_atom=self.world_atom)
3614 retval = merge.execute()
3616 settings.pop("PORTAGE_BINPKG_FILE", None)
3617 self._unlock_builddir()
3620 class BinpkgFetcher(SpawnProcess):
3622 __slots__ = ("pkg", "pretend",
3623 "locked", "pkg_path", "_lock_obj")
3625 def __init__(self, **kwargs):
3626 SpawnProcess.__init__(self, **kwargs)
3628 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3636 pretend = self.pretend
3637 bintree = pkg.root_config.trees["bintree"]
3638 settings = bintree.settings
3639 use_locks = "distlocks" in settings.features
3640 pkg_path = self.pkg_path
3643 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3646 exists = os.path.exists(pkg_path)
3647 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3648 if not (pretend or resume):
3649 # Remove existing file or broken symlink.
3655 # urljoin doesn't work correctly with
3656 # unrecognized protocols like sftp
3657 if bintree._remote_has_index:
3658 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3660 rel_uri = pkg.cpv + ".tbz2"
3661 uri = bintree._remote_base_uri.rstrip("/") + \
3662 "/" + rel_uri.lstrip("/")
3664 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3665 "/" + pkg.pf + ".tbz2"
3668 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3669 self.returncode = os.EX_OK
3673 protocol = urlparse.urlparse(uri)[0]
3674 fcmd_prefix = "FETCHCOMMAND"
3676 fcmd_prefix = "RESUMECOMMAND"
3677 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3679 fcmd = settings.get(fcmd_prefix)
3682 "DISTDIR" : os.path.dirname(pkg_path),
3684 "FILE" : os.path.basename(pkg_path)
3687 fetch_env = dict(settings.iteritems())
3688 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3689 for x in shlex.split(fcmd)]
3691 if self.fd_pipes is None:
3693 fd_pipes = self.fd_pipes
3695 # Redirect all output to stdout since some fetchers like
3696 # wget pollute stderr (if portage detects a problem then it
3697 # can send it's own message to stderr).
3698 fd_pipes.setdefault(0, sys.stdin.fileno())
3699 fd_pipes.setdefault(1, sys.stdout.fileno())
3700 fd_pipes.setdefault(2, sys.stdout.fileno())
3702 self.args = fetch_args
3703 self.env = fetch_env
3704 SpawnProcess._start(self)
3706 def _set_returncode(self, wait_retval):
3707 SpawnProcess._set_returncode(self, wait_retval)
3708 if self.returncode == os.EX_OK:
3709 # If possible, update the mtime to match the remote package if
3710 # the fetcher didn't already do it automatically.
3711 bintree = self.pkg.root_config.trees["bintree"]
3712 if bintree._remote_has_index:
3713 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3714 if remote_mtime is not None:
3716 remote_mtime = long(remote_mtime)
3721 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3725 if remote_mtime != local_mtime:
3727 os.utime(self.pkg_path,
3728 (remote_mtime, remote_mtime))
3737 This raises an AlreadyLocked exception if lock() is called
3738 while a lock is already held. In order to avoid this, call
3739 unlock() or check whether the "locked" attribute is True
3740 or False before calling lock().
3742 if self._lock_obj is not None:
3743 raise self.AlreadyLocked((self._lock_obj,))
3745 self._lock_obj = portage.locks.lockfile(
3746 self.pkg_path, wantnewlockfile=1)
3749 class AlreadyLocked(portage.exception.PortageException):
3753 if self._lock_obj is None:
3755 portage.locks.unlockfile(self._lock_obj)
3756 self._lock_obj = None
3759 class BinpkgVerifier(AsynchronousTask):
3760 __slots__ = ("logfile", "pkg",)
3764 Note: Unlike a normal AsynchronousTask.start() method,
3765 this one does all work is synchronously. The returncode
3766 attribute will be set before it returns.
3770 root_config = pkg.root_config
3771 bintree = root_config.trees["bintree"]
3773 stdout_orig = sys.stdout
3774 stderr_orig = sys.stderr
3776 if self.background and self.logfile is not None:
3777 log_file = open(self.logfile, 'a')
3779 if log_file is not None:
3780 sys.stdout = log_file
3781 sys.stderr = log_file
3783 bintree.digestCheck(pkg)
3784 except portage.exception.FileNotFound:
3785 writemsg("!!! Fetching Binary failed " + \
3786 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3788 except portage.exception.DigestException, e:
3789 writemsg("\n!!! Digest verification failed:\n",
3791 writemsg("!!! %s\n" % e.value[0],
3793 writemsg("!!! Reason: %s\n" % e.value[1],
3795 writemsg("!!! Got: %s\n" % e.value[2],
3797 writemsg("!!! Expected: %s\n" % e.value[3],
3800 if rval != os.EX_OK:
3801 pkg_path = bintree.getname(pkg.cpv)
3802 head, tail = os.path.split(pkg_path)
3803 temp_filename = portage._checksum_failure_temp_file(head, tail)
3804 writemsg("File renamed to '%s'\n" % (temp_filename,),
3807 sys.stdout = stdout_orig
3808 sys.stderr = stderr_orig
3809 if log_file is not None:
3812 self.returncode = rval
3815 class BinpkgPrefetcher(CompositeTask):
3817 __slots__ = ("pkg",) + \
3818 ("pkg_path", "_bintree",)
3821 self._bintree = self.pkg.root_config.trees["bintree"]
3822 fetcher = BinpkgFetcher(background=self.background,
3823 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3824 scheduler=self.scheduler)
3825 self.pkg_path = fetcher.pkg_path
3826 self._start_task(fetcher, self._fetcher_exit)
3828 def _fetcher_exit(self, fetcher):
3830 if self._default_exit(fetcher) != os.EX_OK:
3834 verifier = BinpkgVerifier(background=self.background,
3835 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3836 self._start_task(verifier, self._verifier_exit)
3838 def _verifier_exit(self, verifier):
3839 if self._default_exit(verifier) != os.EX_OK:
3843 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3845 self._current_task = None
3846 self.returncode = os.EX_OK
3849 class BinpkgExtractorAsync(SpawnProcess):
3851 __slots__ = ("image_dir", "pkg", "pkg_path")
3853 _shell_binary = portage.const.BASH_BINARY
3856 self.args = [self._shell_binary, "-c",
3857 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3858 (portage._shell_quote(self.pkg_path),
3859 portage._shell_quote(self.image_dir))]
3861 self.env = self.pkg.root_config.settings.environ()
3862 SpawnProcess._start(self)
3864 class MergeListItem(CompositeTask):
3867 TODO: For parallel scheduling, everything here needs asynchronous
3868 execution support (start, poll, and wait methods).
3871 __slots__ = ("args_set",
3872 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3873 "find_blockers", "logger", "mtimedb", "pkg",
3874 "pkg_count", "pkg_to_replace", "prefetcher",
3875 "settings", "statusMessage", "world_atom") + \
3881 build_opts = self.build_opts
3884 # uninstall, executed by self.merge()
3885 self.returncode = os.EX_OK
3889 args_set = self.args_set
3890 find_blockers = self.find_blockers
3891 logger = self.logger
3892 mtimedb = self.mtimedb
3893 pkg_count = self.pkg_count
3894 scheduler = self.scheduler
3895 settings = self.settings
3896 world_atom = self.world_atom
3897 ldpath_mtimes = mtimedb["ldpath"]
3899 action_desc = "Emerging"
3901 if pkg.type_name == "binary":
3902 action_desc += " binary"
3904 if build_opts.fetchonly:
3905 action_desc = "Fetching"
3907 msg = "%s (%s of %s) %s" % \
3909 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3910 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3911 colorize("GOOD", pkg.cpv))
3913 portdb = pkg.root_config.trees["porttree"].dbapi
3914 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3915 if portdir_repo_name:
3916 pkg_repo_name = pkg.metadata.get("repository")
3917 if pkg_repo_name != portdir_repo_name:
3918 if not pkg_repo_name:
3919 pkg_repo_name = "unknown repo"
3920 msg += " from %s" % pkg_repo_name
3923 msg += " %s %s" % (preposition, pkg.root)
3925 if not build_opts.pretend:
3926 self.statusMessage(msg)
3927 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3928 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3930 if pkg.type_name == "ebuild":
3932 build = EbuildBuild(args_set=args_set,
3933 background=self.background,
3934 config_pool=self.config_pool,
3935 find_blockers=find_blockers,
3936 ldpath_mtimes=ldpath_mtimes, logger=logger,
3937 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3938 prefetcher=self.prefetcher, scheduler=scheduler,
3939 settings=settings, world_atom=world_atom)
3941 self._install_task = build
3942 self._start_task(build, self._default_final_exit)
3945 elif pkg.type_name == "binary":
3947 binpkg = Binpkg(background=self.background,
3948 find_blockers=find_blockers,
3949 ldpath_mtimes=ldpath_mtimes, logger=logger,
3950 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3951 prefetcher=self.prefetcher, settings=settings,
3952 scheduler=scheduler, world_atom=world_atom)
3954 self._install_task = binpkg
3955 self._start_task(binpkg, self._default_final_exit)
3959 self._install_task.poll()
3960 return self.returncode
3963 self._install_task.wait()
3964 return self.returncode
3969 build_opts = self.build_opts
3970 find_blockers = self.find_blockers
3971 logger = self.logger
3972 mtimedb = self.mtimedb
3973 pkg_count = self.pkg_count
3974 prefetcher = self.prefetcher
3975 scheduler = self.scheduler
3976 settings = self.settings
3977 world_atom = self.world_atom
3978 ldpath_mtimes = mtimedb["ldpath"]
3981 if not (build_opts.buildpkgonly or \
3982 build_opts.fetchonly or build_opts.pretend):
3984 uninstall = PackageUninstall(background=self.background,
3985 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3986 pkg=pkg, scheduler=scheduler, settings=settings)
3989 retval = uninstall.wait()
3990 if retval != os.EX_OK:
3994 if build_opts.fetchonly or \
3995 build_opts.buildpkgonly:
3996 return self.returncode
3998 retval = self._install_task.install()
4001 class PackageMerge(AsynchronousTask):
4003 TODO: Implement asynchronous merge so that the scheduler can
4004 run while a merge is executing.
4007 __slots__ = ("merge",)
4011 pkg = self.merge.pkg
4012 pkg_count = self.merge.pkg_count
4015 action_desc = "Uninstalling"
4016 preposition = "from"
4018 action_desc = "Installing"
4021 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4024 msg += " %s %s" % (preposition, pkg.root)
4026 if not self.merge.build_opts.fetchonly and \
4027 not self.merge.build_opts.pretend and \
4028 not self.merge.build_opts.buildpkgonly:
4029 self.merge.statusMessage(msg)
4031 self.returncode = self.merge.merge()
4034 class DependencyArg(object):
4035 def __init__(self, arg=None, root_config=None):
4037 self.root_config = root_config
4040 return str(self.arg)
4042 class AtomArg(DependencyArg):
4043 def __init__(self, atom=None, **kwargs):
4044 DependencyArg.__init__(self, **kwargs)
4046 if not isinstance(self.atom, portage.dep.Atom):
4047 self.atom = portage.dep.Atom(self.atom)
4048 self.set = (self.atom, )
4050 class PackageArg(DependencyArg):
4051 def __init__(self, package=None, **kwargs):
4052 DependencyArg.__init__(self, **kwargs)
4053 self.package = package
4054 self.atom = portage.dep.Atom("=" + package.cpv)
4055 self.set = (self.atom, )
4057 class SetArg(DependencyArg):
4058 def __init__(self, set=None, **kwargs):
4059 DependencyArg.__init__(self, **kwargs)
4061 self.name = self.arg[len(SETPREFIX):]
4063 class Dependency(SlotObject):
4064 __slots__ = ("atom", "blocker", "depth",
4065 "parent", "onlydeps", "priority", "root")
4066 def __init__(self, **kwargs):
4067 SlotObject.__init__(self, **kwargs)
4068 if self.priority is None:
4069 self.priority = DepPriority()
4070 if self.depth is None:
4073 class BlockerCache(portage.cache.mappings.MutableMapping):
4074 """This caches blockers of installed packages so that dep_check does not
4075 have to be done for every single installed package on every invocation of
4076 emerge. The cache is invalidated whenever it is detected that something
4077 has changed that might alter the results of dep_check() calls:
4078 1) the set of installed packages (including COUNTER) has changed
4079 2) the old-style virtuals have changed
4082 # Number of uncached packages to trigger cache update, since
4083 # it's wasteful to update it for every vdb change.
4084 _cache_threshold = 5
4086 class BlockerData(object):
4088 __slots__ = ("__weakref__", "atoms", "counter")
4090 def __init__(self, counter, atoms):
4091 self.counter = counter
4094 def __init__(self, myroot, vardb):
4096 self._virtuals = vardb.settings.getvirtuals()
4097 self._cache_filename = os.path.join(myroot,
4098 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4099 self._cache_version = "1"
4100 self._cache_data = None
4101 self._modified = set()
4106 f = open(self._cache_filename, mode='rb')
4107 mypickle = pickle.Unpickler(f)
4108 self._cache_data = mypickle.load()
4111 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4112 if isinstance(e, pickle.UnpicklingError):
4113 writemsg("!!! Error loading '%s': %s\n" % \
4114 (self._cache_filename, str(e)), noiselevel=-1)
4117 cache_valid = self._cache_data and \
4118 isinstance(self._cache_data, dict) and \
4119 self._cache_data.get("version") == self._cache_version and \
4120 isinstance(self._cache_data.get("blockers"), dict)
4122 # Validate all the atoms and counters so that
4123 # corruption is detected as soon as possible.
4124 invalid_items = set()
4125 for k, v in self._cache_data["blockers"].iteritems():
4126 if not isinstance(k, basestring):
4127 invalid_items.add(k)
4130 if portage.catpkgsplit(k) is None:
4131 invalid_items.add(k)
4133 except portage.exception.InvalidData:
4134 invalid_items.add(k)
4136 if not isinstance(v, tuple) or \
4138 invalid_items.add(k)
4141 if not isinstance(counter, (int, long)):
4142 invalid_items.add(k)
4144 if not isinstance(atoms, (list, tuple)):
4145 invalid_items.add(k)
4147 invalid_atom = False
4149 if not isinstance(atom, basestring):
4152 if atom[:1] != "!" or \
4153 not portage.isvalidatom(
4154 atom, allow_blockers=True):
4158 invalid_items.add(k)
4161 for k in invalid_items:
4162 del self._cache_data["blockers"][k]
4163 if not self._cache_data["blockers"]:
4167 self._cache_data = {"version":self._cache_version}
4168 self._cache_data["blockers"] = {}
4169 self._cache_data["virtuals"] = self._virtuals
4170 self._modified.clear()
4173 """If the current user has permission and the internal blocker cache
4174 been updated, save it to disk and mark it unmodified. This is called
4175 by emerge after it has proccessed blockers for all installed packages.
4176 Currently, the cache is only written if the user has superuser
4177 privileges (since that's required to obtain a lock), but all users
4178 have read access and benefit from faster blocker lookups (as long as
4179 the entire cache is still valid). The cache is stored as a pickled
4180 dict object with the following format:
4184 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4185 "virtuals" : vardb.settings.getvirtuals()
4188 if len(self._modified) >= self._cache_threshold and \
4191 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4192 pickle.dump(self._cache_data, f, -1)
4194 portage.util.apply_secpass_permissions(
4195 self._cache_filename, gid=portage.portage_gid, mode=0644)
4196 except (IOError, OSError), e:
4198 self._modified.clear()
4200 def __setitem__(self, cpv, blocker_data):
4202 Update the cache and mark it as modified for a future call to
4205 @param cpv: Package for which to cache blockers.
4207 @param blocker_data: An object with counter and atoms attributes.
4208 @type blocker_data: BlockerData
4210 self._cache_data["blockers"][cpv] = \
4211 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4212 self._modified.add(cpv)
4215 if self._cache_data is None:
4216 # triggered by python-trace
4218 return iter(self._cache_data["blockers"])
4220 def __delitem__(self, cpv):
4221 del self._cache_data["blockers"][cpv]
4223 def __getitem__(self, cpv):
4226 @returns: An object with counter and atoms attributes.
4228 return self.BlockerData(*self._cache_data["blockers"][cpv])
4230 class BlockerDB(object):
4232 def __init__(self, root_config):
4233 self._root_config = root_config
4234 self._vartree = root_config.trees["vartree"]
4235 self._portdb = root_config.trees["porttree"].dbapi
4237 self._dep_check_trees = None
4238 self._fake_vartree = None
4240 def _get_fake_vartree(self, acquire_lock=0):
4241 fake_vartree = self._fake_vartree
4242 if fake_vartree is None:
4243 fake_vartree = FakeVartree(self._root_config,
4244 acquire_lock=acquire_lock)
4245 self._fake_vartree = fake_vartree
4246 self._dep_check_trees = { self._vartree.root : {
4247 "porttree" : fake_vartree,
4248 "vartree" : fake_vartree,
4251 fake_vartree.sync(acquire_lock=acquire_lock)
4254 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4255 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4256 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4257 settings = self._vartree.settings
4258 stale_cache = set(blocker_cache)
4259 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4260 dep_check_trees = self._dep_check_trees
4261 vardb = fake_vartree.dbapi
4262 installed_pkgs = list(vardb)
4264 for inst_pkg in installed_pkgs:
4265 stale_cache.discard(inst_pkg.cpv)
4266 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4267 if cached_blockers is not None and \
4268 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4269 cached_blockers = None
4270 if cached_blockers is not None:
4271 blocker_atoms = cached_blockers.atoms
4273 # Use aux_get() to trigger FakeVartree global
4274 # updates on *DEPEND when appropriate.
4275 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4277 portage.dep._dep_check_strict = False
4278 success, atoms = portage.dep_check(depstr,
4279 vardb, settings, myuse=inst_pkg.use.enabled,
4280 trees=dep_check_trees, myroot=inst_pkg.root)
4282 portage.dep._dep_check_strict = True
4284 pkg_location = os.path.join(inst_pkg.root,
4285 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4286 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4287 (pkg_location, atoms), noiselevel=-1)
4290 blocker_atoms = [atom for atom in atoms \
4291 if atom.startswith("!")]
4292 blocker_atoms.sort()
4293 counter = long(inst_pkg.metadata["COUNTER"])
4294 blocker_cache[inst_pkg.cpv] = \
4295 blocker_cache.BlockerData(counter, blocker_atoms)
4296 for cpv in stale_cache:
4297 del blocker_cache[cpv]
4298 blocker_cache.flush()
4300 blocker_parents = digraph()
4302 for pkg in installed_pkgs:
4303 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4304 blocker_atom = blocker_atom.lstrip("!")
4305 blocker_atoms.append(blocker_atom)
4306 blocker_parents.add(blocker_atom, pkg)
4308 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4309 blocking_pkgs = set()
4310 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4311 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4313 # Check for blockers in the other direction.
4314 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4316 portage.dep._dep_check_strict = False
4317 success, atoms = portage.dep_check(depstr,
4318 vardb, settings, myuse=new_pkg.use.enabled,
4319 trees=dep_check_trees, myroot=new_pkg.root)
4321 portage.dep._dep_check_strict = True
4323 # We should never get this far with invalid deps.
4324 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4327 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4330 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4331 for inst_pkg in installed_pkgs:
4333 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4334 except (portage.exception.InvalidDependString, StopIteration):
4336 blocking_pkgs.add(inst_pkg)
4338 return blocking_pkgs
4340 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4342 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4343 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4344 p_type, p_root, p_key, p_status = parent_node
4346 if p_status == "nomerge":
4347 category, pf = portage.catsplit(p_key)
4348 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4349 msg.append("Portage is unable to process the dependencies of the ")
4350 msg.append("'%s' package. " % p_key)
4351 msg.append("In order to correct this problem, the package ")
4352 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4353 msg.append("As a temporary workaround, the --nodeps option can ")
4354 msg.append("be used to ignore all dependencies. For reference, ")
4355 msg.append("the problematic dependencies can be found in the ")
4356 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4358 msg.append("This package can not be installed. ")
4359 msg.append("Please notify the '%s' package maintainer " % p_key)
4360 msg.append("about this problem.")
4362 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4363 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4365 class PackageVirtualDbapi(portage.dbapi):
4367 A dbapi-like interface class that represents the state of the installed
4368 package database as new packages are installed, replacing any packages
4369 that previously existed in the same slot. The main difference between
4370 this class and fakedbapi is that this one uses Package instances
4371 internally (passed in via cpv_inject() and cpv_remove() calls).
4373 def __init__(self, settings):
4374 portage.dbapi.__init__(self)
4375 self.settings = settings
4376 self._match_cache = {}
4382 Remove all packages.
4386 self._cp_map.clear()
4387 self._cpv_map.clear()
4390 obj = PackageVirtualDbapi(self.settings)
4391 obj._match_cache = self._match_cache.copy()
4392 obj._cp_map = self._cp_map.copy()
4393 for k, v in obj._cp_map.iteritems():
4394 obj._cp_map[k] = v[:]
4395 obj._cpv_map = self._cpv_map.copy()
4399 return self._cpv_map.itervalues()
4401 def __contains__(self, item):
4402 existing = self._cpv_map.get(item.cpv)
4403 if existing is not None and \
4408 def get(self, item, default=None):
4409 cpv = getattr(item, "cpv", None)
4413 type_name, root, cpv, operation = item
4415 existing = self._cpv_map.get(cpv)
4416 if existing is not None and \
4421 def match_pkgs(self, atom):
4422 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4424 def _clear_cache(self):
4425 if self._categories is not None:
4426 self._categories = None
4427 if self._match_cache:
4428 self._match_cache = {}
4430 def match(self, origdep, use_cache=1):
4431 result = self._match_cache.get(origdep)
4432 if result is not None:
4434 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4435 self._match_cache[origdep] = result
4438 def cpv_exists(self, cpv):
4439 return cpv in self._cpv_map
4441 def cp_list(self, mycp, use_cache=1):
4442 cachelist = self._match_cache.get(mycp)
4443 # cp_list() doesn't expand old-style virtuals
4444 if cachelist and cachelist[0].startswith(mycp):
4446 cpv_list = self._cp_map.get(mycp)
4447 if cpv_list is None:
4450 cpv_list = [pkg.cpv for pkg in cpv_list]
4451 self._cpv_sort_ascending(cpv_list)
4452 if not (not cpv_list and mycp.startswith("virtual/")):
4453 self._match_cache[mycp] = cpv_list
4457 return list(self._cp_map)
4460 return list(self._cpv_map)
4462 def cpv_inject(self, pkg):
4463 cp_list = self._cp_map.get(pkg.cp)
4466 self._cp_map[pkg.cp] = cp_list
4467 e_pkg = self._cpv_map.get(pkg.cpv)
4468 if e_pkg is not None:
4471 self.cpv_remove(e_pkg)
4472 for e_pkg in cp_list:
4473 if e_pkg.slot_atom == pkg.slot_atom:
4476 self.cpv_remove(e_pkg)
4479 self._cpv_map[pkg.cpv] = pkg
4482 def cpv_remove(self, pkg):
4483 old_pkg = self._cpv_map.get(pkg.cpv)
4486 self._cp_map[pkg.cp].remove(pkg)
4487 del self._cpv_map[pkg.cpv]
4490 def aux_get(self, cpv, wants):
4491 metadata = self._cpv_map[cpv].metadata
4492 return [metadata.get(x, "") for x in wants]
4494 def aux_update(self, cpv, values):
4495 self._cpv_map[cpv].metadata.update(values)
4498 class depgraph(object):
4500 pkg_tree_map = RootConfig.pkg_tree_map
4502 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4504 def __init__(self, settings, trees, myopts, myparams, spinner):
4505 self.settings = settings
4506 self.target_root = settings["ROOT"]
4507 self.myopts = myopts
4508 self.myparams = myparams
4510 if settings.get("PORTAGE_DEBUG", "") == "1":
4512 self.spinner = spinner
4513 self._running_root = trees["/"]["root_config"]
4514 self._opts_no_restart = Scheduler._opts_no_restart
4515 self.pkgsettings = {}
4516 # Maps slot atom to package for each Package added to the graph.
4517 self._slot_pkg_map = {}
4518 # Maps nodes to the reasons they were selected for reinstallation.
4519 self._reinstall_nodes = {}
4522 self._trees_orig = trees
4524 # Contains a filtered view of preferred packages that are selected
4525 # from available repositories.
4526 self._filtered_trees = {}
4527 # Contains installed packages and new packages that have been added
4529 self._graph_trees = {}
4530 # All Package instances
4531 self._pkg_cache = {}
4532 for myroot in trees:
4533 self.trees[myroot] = {}
4534 # Create a RootConfig instance that references
4535 # the FakeVartree instead of the real one.
4536 self.roots[myroot] = RootConfig(
4537 trees[myroot]["vartree"].settings,
4539 trees[myroot]["root_config"].setconfig)
4540 for tree in ("porttree", "bintree"):
4541 self.trees[myroot][tree] = trees[myroot][tree]
4542 self.trees[myroot]["vartree"] = \
4543 FakeVartree(trees[myroot]["root_config"],
4544 pkg_cache=self._pkg_cache)
4545 self.pkgsettings[myroot] = portage.config(
4546 clone=self.trees[myroot]["vartree"].settings)
4547 self._slot_pkg_map[myroot] = {}
4548 vardb = self.trees[myroot]["vartree"].dbapi
4549 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4550 "--buildpkgonly" not in self.myopts
4551 # This fakedbapi instance will model the state that the vdb will
4552 # have after new packages have been installed.
4553 fakedb = PackageVirtualDbapi(vardb.settings)
4554 if preload_installed_pkgs:
4556 self.spinner.update()
4557 # This triggers metadata updates via FakeVartree.
4558 vardb.aux_get(pkg.cpv, [])
4559 fakedb.cpv_inject(pkg)
4561 # Now that the vardb state is cached in our FakeVartree,
4562 # we won't be needing the real vartree cache for awhile.
4563 # To make some room on the heap, clear the vardbapi
4565 trees[myroot]["vartree"].dbapi._clear_cache()
4568 self.mydbapi[myroot] = fakedb
4571 graph_tree.dbapi = fakedb
4572 self._graph_trees[myroot] = {}
4573 self._filtered_trees[myroot] = {}
4574 # Substitute the graph tree for the vartree in dep_check() since we
4575 # want atom selections to be consistent with package selections
4576 # have already been made.
4577 self._graph_trees[myroot]["porttree"] = graph_tree
4578 self._graph_trees[myroot]["vartree"] = graph_tree
4579 def filtered_tree():
4581 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4582 self._filtered_trees[myroot]["porttree"] = filtered_tree
4584 # Passing in graph_tree as the vartree here could lead to better
4585 # atom selections in some cases by causing atoms for packages that
4586 # have been added to the graph to be preferred over other choices.
4587 # However, it can trigger atom selections that result in
4588 # unresolvable direct circular dependencies. For example, this
4589 # happens with gwydion-dylan which depends on either itself or
4590 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4591 # gwydion-dylan-bin needs to be selected in order to avoid a
4592 # an unresolvable direct circular dependency.
4594 # To solve the problem described above, pass in "graph_db" so that
4595 # packages that have been added to the graph are distinguishable
4596 # from other available packages and installed packages. Also, pass
4597 # the parent package into self._select_atoms() calls so that
4598 # unresolvable direct circular dependencies can be detected and
4599 # avoided when possible.
4600 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4601 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4604 portdb = self.trees[myroot]["porttree"].dbapi
4605 bindb = self.trees[myroot]["bintree"].dbapi
4606 vardb = self.trees[myroot]["vartree"].dbapi
4607 # (db, pkg_type, built, installed, db_keys)
4608 if "--usepkgonly" not in self.myopts:
4609 db_keys = list(portdb._aux_cache_keys)
4610 dbs.append((portdb, "ebuild", False, False, db_keys))
4611 if "--usepkg" in self.myopts:
4612 db_keys = list(bindb._aux_cache_keys)
4613 dbs.append((bindb, "binary", True, False, db_keys))
4614 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4615 dbs.append((vardb, "installed", True, True, db_keys))
4616 self._filtered_trees[myroot]["dbs"] = dbs
4617 if "--usepkg" in self.myopts:
4618 self.trees[myroot]["bintree"].populate(
4619 "--getbinpkg" in self.myopts,
4620 "--getbinpkgonly" in self.myopts)
4623 self.digraph=portage.digraph()
4624 # contains all sets added to the graph
4626 # contains atoms given as arguments
4627 self._sets["args"] = InternalPackageSet()
4628 # contains all atoms from all sets added to the graph, including
4629 # atoms given as arguments
4630 self._set_atoms = InternalPackageSet()
4631 self._atom_arg_map = {}
4632 # contains all nodes pulled in by self._set_atoms
4633 self._set_nodes = set()
4634 # Contains only Blocker -> Uninstall edges
4635 self._blocker_uninstalls = digraph()
4636 # Contains only Package -> Blocker edges
4637 self._blocker_parents = digraph()
4638 # Contains only irrelevant Package -> Blocker edges
4639 self._irrelevant_blockers = digraph()
4640 # Contains only unsolvable Package -> Blocker edges
4641 self._unsolvable_blockers = digraph()
4642 # Contains all Blocker -> Blocked Package edges
4643 self._blocked_pkgs = digraph()
4644 # Contains world packages that have been protected from
4645 # uninstallation but may not have been added to the graph
4646 # if the graph is not complete yet.
4647 self._blocked_world_pkgs = {}
4648 self._slot_collision_info = {}
4649 # Slot collision nodes are not allowed to block other packages since
4650 # blocker validation is only able to account for one package per slot.
4651 self._slot_collision_nodes = set()
4652 self._parent_atoms = {}
4653 self._slot_conflict_parent_atoms = set()
4654 self._serialized_tasks_cache = None
4655 self._scheduler_graph = None
4656 self._displayed_list = None
4657 self._pprovided_args = []
4658 self._missing_args = []
4659 self._masked_installed = set()
4660 self._unsatisfied_deps_for_display = []
4661 self._unsatisfied_blockers_for_display = None
4662 self._circular_deps_for_display = None
4663 self._dep_stack = []
4664 self._unsatisfied_deps = []
4665 self._initially_unsatisfied_deps = []
4666 self._ignored_deps = []
4667 self._required_set_names = set(["system", "world"])
4668 self._select_atoms = self._select_atoms_highest_available
4669 self._select_package = self._select_pkg_highest_available
4670 self._highest_pkg_cache = {}
4672 def _show_slot_collision_notice(self):
4673 """Show an informational message advising the user to mask one of the
4674 the packages. In some cases it may be possible to resolve this
4675 automatically, but support for backtracking (removal nodes that have
4676 already been selected) will be required in order to handle all possible
4680 if not self._slot_collision_info:
4683 self._show_merge_list()
4686 msg.append("\n!!! Multiple package instances within a single " + \
4687 "package slot have been pulled\n")
4688 msg.append("!!! into the dependency graph, resulting" + \
4689 " in a slot conflict:\n\n")
4691 # Max number of parents shown, to avoid flooding the display.
4693 explanation_columns = 70
4695 for (slot_atom, root), slot_nodes \
4696 in self._slot_collision_info.iteritems():
4697 msg.append(str(slot_atom))
4700 for node in slot_nodes:
4702 msg.append(str(node))
4703 parent_atoms = self._parent_atoms.get(node)
4706 # Prefer conflict atoms over others.
4707 for parent_atom in parent_atoms:
4708 if len(pruned_list) >= max_parents:
4710 if parent_atom in self._slot_conflict_parent_atoms:
4711 pruned_list.add(parent_atom)
4713 # If this package was pulled in by conflict atoms then
4714 # show those alone since those are the most interesting.
4716 # When generating the pruned list, prefer instances
4717 # of DependencyArg over instances of Package.
4718 for parent_atom in parent_atoms:
4719 if len(pruned_list) >= max_parents:
4721 parent, atom = parent_atom
4722 if isinstance(parent, DependencyArg):
4723 pruned_list.add(parent_atom)
4724 # Prefer Packages instances that themselves have been
4725 # pulled into collision slots.
4726 for parent_atom in parent_atoms:
4727 if len(pruned_list) >= max_parents:
4729 parent, atom = parent_atom
4730 if isinstance(parent, Package) and \
4731 (parent.slot_atom, parent.root) \
4732 in self._slot_collision_info:
4733 pruned_list.add(parent_atom)
4734 for parent_atom in parent_atoms:
4735 if len(pruned_list) >= max_parents:
4737 pruned_list.add(parent_atom)
4738 omitted_parents = len(parent_atoms) - len(pruned_list)
4739 parent_atoms = pruned_list
4740 msg.append(" pulled in by\n")
4741 for parent_atom in parent_atoms:
4742 parent, atom = parent_atom
4743 msg.append(2*indent)
4744 if isinstance(parent,
4745 (PackageArg, AtomArg)):
4746 # For PackageArg and AtomArg types, it's
4747 # redundant to display the atom attribute.
4748 msg.append(str(parent))
4750 # Display the specific atom from SetArg or
4752 msg.append("%s required by %s" % (atom, parent))
4755 msg.append(2*indent)
4756 msg.append("(and %d more)\n" % omitted_parents)
4758 msg.append(" (no parents)\n")
4760 explanation = self._slot_conflict_explanation(slot_nodes)
4763 msg.append(indent + "Explanation:\n\n")
4764 for line in textwrap.wrap(explanation, explanation_columns):
4765 msg.append(2*indent + line + "\n")
4768 sys.stderr.write("".join(msg))
4771 explanations_for_all = explanations == len(self._slot_collision_info)
4773 if explanations_for_all or "--quiet" in self.myopts:
4777 msg.append("It may be possible to solve this problem ")
4778 msg.append("by using package.mask to prevent one of ")
4779 msg.append("those packages from being selected. ")
4780 msg.append("However, it is also possible that conflicting ")
4781 msg.append("dependencies exist such that they are impossible to ")
4782 msg.append("satisfy simultaneously. If such a conflict exists in ")
4783 msg.append("the dependencies of two different packages, then those ")
4784 msg.append("packages can not be installed simultaneously.")
4786 from formatter import AbstractFormatter, DumbWriter
4787 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4789 f.add_flowing_data(x)
4793 msg.append("For more information, see MASKED PACKAGES ")
4794 msg.append("section in the emerge man page or refer ")
4795 msg.append("to the Gentoo Handbook.")
4797 f.add_flowing_data(x)
4801 def _slot_conflict_explanation(self, slot_nodes):
4803 When a slot conflict occurs due to USE deps, there are a few
4804 different cases to consider:
4806 1) New USE are correctly set but --newuse wasn't requested so an
4807 installed package with incorrect USE happened to get pulled
4808 into graph before the new one.
4810 2) New USE are incorrectly set but an installed package has correct
4811 USE so it got pulled into the graph, and a new instance also got
4812 pulled in due to --newuse or an upgrade.
4814 3) Multiple USE deps exist that can't be satisfied simultaneously,
4815 and multiple package instances got pulled into the same slot to
4816 satisfy the conflicting deps.
4818 Currently, explanations and suggested courses of action are generated
4819 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4822 if len(slot_nodes) != 2:
4823 # Suggestions are only implemented for
4824 # conflicts between two packages.
4827 all_conflict_atoms = self._slot_conflict_parent_atoms
4829 matched_atoms = None
4830 unmatched_node = None
4831 for node in slot_nodes:
4832 parent_atoms = self._parent_atoms.get(node)
4833 if not parent_atoms:
4834 # Normally, there are always parent atoms. If there are
4835 # none then something unexpected is happening and there's
4836 # currently no suggestion for this case.
4838 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4839 for parent_atom in conflict_atoms:
4840 parent, atom = parent_atom
4842 # Suggestions are currently only implemented for cases
4843 # in which all conflict atoms have USE deps.
4846 if matched_node is not None:
4847 # If conflict atoms match multiple nodes
4848 # then there's no suggestion.
4851 matched_atoms = conflict_atoms
4853 if unmatched_node is not None:
4854 # Neither node is matched by conflict atoms, and
4855 # there is no suggestion for this case.
4857 unmatched_node = node
4859 if matched_node is None or unmatched_node is None:
4860 # This shouldn't happen.
4863 if unmatched_node.installed and not matched_node.installed:
4864 return "New USE are correctly set, but --newuse wasn't" + \
4865 " requested, so an installed package with incorrect USE " + \
4866 "happened to get pulled into the dependency graph. " + \
4867 "In order to solve " + \
4868 "this, either specify the --newuse option or explicitly " + \
4869 " reinstall '%s'." % matched_node.slot_atom
4871 if matched_node.installed and not unmatched_node.installed:
4872 atoms = sorted(set(atom for parent, atom in matched_atoms))
4873 explanation = ("New USE for '%s' are incorrectly set. " + \
4874 "In order to solve this, adjust USE to satisfy '%s'") % \
4875 (matched_node.slot_atom, atoms[0])
4877 for atom in atoms[1:-1]:
4878 explanation += ", '%s'" % (atom,)
4881 explanation += " and '%s'" % (atoms[-1],)
4887 def _process_slot_conflicts(self):
4889 Process slot conflict data to identify specific atoms which
4890 lead to conflict. These atoms only match a subset of the
4891 packages that have been pulled into a given slot.
4893 for (slot_atom, root), slot_nodes \
4894 in self._slot_collision_info.iteritems():
4896 all_parent_atoms = set()
4897 for pkg in slot_nodes:
4898 parent_atoms = self._parent_atoms.get(pkg)
4899 if not parent_atoms:
4901 all_parent_atoms.update(parent_atoms)
4903 for pkg in slot_nodes:
4904 parent_atoms = self._parent_atoms.get(pkg)
4905 if parent_atoms is None:
4906 parent_atoms = set()
4907 self._parent_atoms[pkg] = parent_atoms
4908 for parent_atom in all_parent_atoms:
4909 if parent_atom in parent_atoms:
4911 # Use package set for matching since it will match via
4912 # PROVIDE when necessary, while match_from_list does not.
4913 parent, atom = parent_atom
4914 atom_set = InternalPackageSet(
4915 initial_atoms=(atom,))
4916 if atom_set.findAtomForPackage(pkg):
4917 parent_atoms.add(parent_atom)
4919 self._slot_conflict_parent_atoms.add(parent_atom)
4921 def _reinstall_for_flags(self, forced_flags,
4922 orig_use, orig_iuse, cur_use, cur_iuse):
4923 """Return a set of flags that trigger reinstallation, or None if there
4924 are no such flags."""
4925 if "--newuse" in self.myopts:
4926 flags = set(orig_iuse.symmetric_difference(
4927 cur_iuse).difference(forced_flags))
4928 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4929 cur_iuse.intersection(cur_use)))
4932 elif "changed-use" == self.myopts.get("--reinstall"):
4933 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4934 cur_iuse.intersection(cur_use))
4939 def _create_graph(self, allow_unsatisfied=False):
4940 dep_stack = self._dep_stack
4942 self.spinner.update()
4943 dep = dep_stack.pop()
4944 if isinstance(dep, Package):
4945 if not self._add_pkg_deps(dep,
4946 allow_unsatisfied=allow_unsatisfied):
4949 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4953 def _add_dep(self, dep, allow_unsatisfied=False):
4954 debug = "--debug" in self.myopts
4955 buildpkgonly = "--buildpkgonly" in self.myopts
4956 nodeps = "--nodeps" in self.myopts
4957 empty = "empty" in self.myparams
4958 deep = "deep" in self.myparams
4959 update = "--update" in self.myopts and dep.depth <= 1
4961 if not buildpkgonly and \
4963 dep.parent not in self._slot_collision_nodes:
4964 if dep.parent.onlydeps:
4965 # It's safe to ignore blockers if the
4966 # parent is an --onlydeps node.
4968 # The blocker applies to the root where
4969 # the parent is or will be installed.
4970 blocker = Blocker(atom=dep.atom,
4971 eapi=dep.parent.metadata["EAPI"],
4972 root=dep.parent.root)
4973 self._blocker_parents.add(blocker, dep.parent)
4975 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4976 onlydeps=dep.onlydeps)
4978 if dep.priority.optional:
4979 # This could be an unecessary build-time dep
4980 # pulled in by --with-bdeps=y.
4982 if allow_unsatisfied:
4983 self._unsatisfied_deps.append(dep)
4985 self._unsatisfied_deps_for_display.append(
4986 ((dep.root, dep.atom), {"myparent":dep.parent}))
4988 # In some cases, dep_check will return deps that shouldn't
4989 # be proccessed any further, so they are identified and
4990 # discarded here. Try to discard as few as possible since
4991 # discarded dependencies reduce the amount of information
4992 # available for optimization of merge order.
4993 if dep.priority.satisfied and \
4994 not dep_pkg.installed and \
4995 not (existing_node or empty or deep or update):
4997 if dep.root == self.target_root:
4999 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5000 except StopIteration:
5002 except portage.exception.InvalidDependString:
5003 if not dep_pkg.installed:
5004 # This shouldn't happen since the package
5005 # should have been masked.
5008 self._ignored_deps.append(dep)
5011 if not self._add_pkg(dep_pkg, dep):
5015 def _add_pkg(self, pkg, dep):
5022 myparent = dep.parent
5023 priority = dep.priority
5025 if priority is None:
5026 priority = DepPriority()
5028 Fills the digraph with nodes comprised of packages to merge.
5029 mybigkey is the package spec of the package to merge.
5030 myparent is the package depending on mybigkey ( or None )
5031 addme = Should we add this package to the digraph or are we just looking at it's deps?
5032 Think --onlydeps, we need to ignore packages in that case.
5035 #IUSE-aware emerge -> USE DEP aware depgraph
5036 #"no downgrade" emerge
5038 # Ensure that the dependencies of the same package
5039 # are never processed more than once.
5040 previously_added = pkg in self.digraph
5042 # select the correct /var database that we'll be checking against
5043 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5044 pkgsettings = self.pkgsettings[pkg.root]
5049 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5050 except portage.exception.InvalidDependString, e:
5051 if not pkg.installed:
5052 show_invalid_depstring_notice(
5053 pkg, pkg.metadata["PROVIDE"], str(e))
5057 if not pkg.onlydeps:
5058 if not pkg.installed and \
5059 "empty" not in self.myparams and \
5060 vardbapi.match(pkg.slot_atom):
5061 # Increase the priority of dependencies on packages that
5062 # are being rebuilt. This optimizes merge order so that
5063 # dependencies are rebuilt/updated as soon as possible,
5064 # which is needed especially when emerge is called by
5065 # revdep-rebuild since dependencies may be affected by ABI
5066 # breakage that has rendered them useless. Don't adjust
5067 # priority here when in "empty" mode since all packages
5068 # are being merged in that case.
5069 priority.rebuild = True
5071 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5072 slot_collision = False
5074 existing_node_matches = pkg.cpv == existing_node.cpv
5075 if existing_node_matches and \
5076 pkg != existing_node and \
5077 dep.atom is not None:
5078 # Use package set for matching since it will match via
5079 # PROVIDE when necessary, while match_from_list does not.
5080 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5081 if not atom_set.findAtomForPackage(existing_node):
5082 existing_node_matches = False
5083 if existing_node_matches:
5084 # The existing node can be reused.
5086 for parent_atom in arg_atoms:
5087 parent, atom = parent_atom
5088 self.digraph.add(existing_node, parent,
5090 self._add_parent_atom(existing_node, parent_atom)
5091 # If a direct circular dependency is not an unsatisfied
5092 # buildtime dependency then drop it here since otherwise
5093 # it can skew the merge order calculation in an unwanted
5095 if existing_node != myparent or \
5096 (priority.buildtime and not priority.satisfied):
5097 self.digraph.addnode(existing_node, myparent,
5099 if dep.atom is not None and dep.parent is not None:
5100 self._add_parent_atom(existing_node,
5101 (dep.parent, dep.atom))
5105 # A slot collision has occurred. Sometimes this coincides
5106 # with unresolvable blockers, so the slot collision will be
5107 # shown later if there are no unresolvable blockers.
5108 self._add_slot_conflict(pkg)
5109 slot_collision = True
5112 # Now add this node to the graph so that self.display()
5113 # can show use flags and --tree portage.output. This node is
5114 # only being partially added to the graph. It must not be
5115 # allowed to interfere with the other nodes that have been
5116 # added. Do not overwrite data for existing nodes in
5117 # self.mydbapi since that data will be used for blocker
5119 # Even though the graph is now invalid, continue to process
5120 # dependencies so that things like --fetchonly can still
5121 # function despite collisions.
5123 elif not previously_added:
5124 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5125 self.mydbapi[pkg.root].cpv_inject(pkg)
5126 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5128 if not pkg.installed:
5129 # Allow this package to satisfy old-style virtuals in case it
5130 # doesn't already. Any pre-existing providers will be preferred
5133 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5134 # For consistency, also update the global virtuals.
5135 settings = self.roots[pkg.root].settings
5137 settings.setinst(pkg.cpv, pkg.metadata)
5139 except portage.exception.InvalidDependString, e:
5140 show_invalid_depstring_notice(
5141 pkg, pkg.metadata["PROVIDE"], str(e))
5146 self._set_nodes.add(pkg)
5148 # Do this even when addme is False (--onlydeps) so that the
5149 # parent/child relationship is always known in case
5150 # self._show_slot_collision_notice() needs to be called later.
5151 self.digraph.add(pkg, myparent, priority=priority)
5152 if dep.atom is not None and dep.parent is not None:
5153 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5156 for parent_atom in arg_atoms:
5157 parent, atom = parent_atom
5158 self.digraph.add(pkg, parent, priority=priority)
5159 self._add_parent_atom(pkg, parent_atom)
5161 """ This section determines whether we go deeper into dependencies or not.
5162 We want to go deeper on a few occasions:
5163 Installing package A, we need to make sure package A's deps are met.
5164 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5165 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5167 dep_stack = self._dep_stack
5168 if "recurse" not in self.myparams:
5170 elif pkg.installed and \
5171 "deep" not in self.myparams:
5172 dep_stack = self._ignored_deps
5174 self.spinner.update()
5179 if not previously_added:
5180 dep_stack.append(pkg)
5183 def _add_parent_atom(self, pkg, parent_atom):
5184 parent_atoms = self._parent_atoms.get(pkg)
5185 if parent_atoms is None:
5186 parent_atoms = set()
5187 self._parent_atoms[pkg] = parent_atoms
5188 parent_atoms.add(parent_atom)
5190 def _add_slot_conflict(self, pkg):
5191 self._slot_collision_nodes.add(pkg)
5192 slot_key = (pkg.slot_atom, pkg.root)
5193 slot_nodes = self._slot_collision_info.get(slot_key)
5194 if slot_nodes is None:
5196 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5197 self._slot_collision_info[slot_key] = slot_nodes
5200 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5202 mytype = pkg.type_name
5205 metadata = pkg.metadata
5206 myuse = pkg.use.enabled
5208 depth = pkg.depth + 1
5209 removal_action = "remove" in self.myparams
5212 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5214 edepend[k] = metadata[k]
5216 if not pkg.built and \
5217 "--buildpkgonly" in self.myopts and \
5218 "deep" not in self.myparams and \
5219 "empty" not in self.myparams:
5220 edepend["RDEPEND"] = ""
5221 edepend["PDEPEND"] = ""
5222 bdeps_optional = False
5224 if pkg.built and not removal_action:
5225 if self.myopts.get("--with-bdeps", "n") == "y":
5226 # Pull in build time deps as requested, but marked them as
5227 # "optional" since they are not strictly required. This allows
5228 # more freedom in the merge order calculation for solving
5229 # circular dependencies. Don't convert to PDEPEND since that
5230 # could make --with-bdeps=y less effective if it is used to
5231 # adjust merge order to prevent built_with_use() calls from
5233 bdeps_optional = True
5235 # built packages do not have build time dependencies.
5236 edepend["DEPEND"] = ""
5238 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5239 edepend["DEPEND"] = ""
5242 ("/", edepend["DEPEND"],
5243 self._priority(buildtime=(not bdeps_optional),
5244 optional=bdeps_optional)),
5245 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5246 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5249 debug = "--debug" in self.myopts
5250 strict = mytype != "installed"
5252 for dep_root, dep_string, dep_priority in deps:
5257 print "Parent: ", jbigkey
5258 print "Depstring:", dep_string
5259 print "Priority:", dep_priority
5260 vardb = self.roots[dep_root].trees["vartree"].dbapi
5262 selected_atoms = self._select_atoms(dep_root,
5263 dep_string, myuse=myuse, parent=pkg, strict=strict,
5264 priority=dep_priority)
5265 except portage.exception.InvalidDependString, e:
5266 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5269 print "Candidates:", selected_atoms
5271 for atom in selected_atoms:
5274 atom = portage.dep.Atom(atom)
5276 mypriority = dep_priority.copy()
5277 if not atom.blocker and vardb.match(atom):
5278 mypriority.satisfied = True
5280 if not self._add_dep(Dependency(atom=atom,
5281 blocker=atom.blocker, depth=depth, parent=pkg,
5282 priority=mypriority, root=dep_root),
5283 allow_unsatisfied=allow_unsatisfied):
5286 except portage.exception.InvalidAtom, e:
5287 show_invalid_depstring_notice(
5288 pkg, dep_string, str(e))
5290 if not pkg.installed:
5294 print "Exiting...", jbigkey
5295 except portage.exception.AmbiguousPackageName, e:
5297 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5298 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5300 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5301 portage.writemsg("\n", noiselevel=-1)
5302 if mytype == "binary":
5304 "!!! This binary package cannot be installed: '%s'\n" % \
5305 mykey, noiselevel=-1)
5306 elif mytype == "ebuild":
5307 portdb = self.roots[myroot].trees["porttree"].dbapi
5308 myebuild, mylocation = portdb.findname2(mykey)
5309 portage.writemsg("!!! This ebuild cannot be installed: " + \
5310 "'%s'\n" % myebuild, noiselevel=-1)
5311 portage.writemsg("!!! Please notify the package maintainer " + \
5312 "that atoms must be fully-qualified.\n", noiselevel=-1)
5316 def _priority(self, **kwargs):
5317 if "remove" in self.myparams:
5318 priority_constructor = UnmergeDepPriority
5320 priority_constructor = DepPriority
5321 return priority_constructor(**kwargs)
5323 def _dep_expand(self, root_config, atom_without_category):
5325 @param root_config: a root config instance
5326 @type root_config: RootConfig
5327 @param atom_without_category: an atom without a category component
5328 @type atom_without_category: String
5330 @returns: a list of atoms containing categories (possibly empty)
5332 null_cp = portage.dep_getkey(insert_category_into_atom(
5333 atom_without_category, "null"))
5334 cat, atom_pn = portage.catsplit(null_cp)
5336 dbs = self._filtered_trees[root_config.root]["dbs"]
5338 for db, pkg_type, built, installed, db_keys in dbs:
5339 for cat in db.categories:
5340 if db.cp_list("%s/%s" % (cat, atom_pn)):
5344 for cat in categories:
5345 deps.append(insert_category_into_atom(
5346 atom_without_category, cat))
5349 def _have_new_virt(self, root, atom_cp):
5351 for db, pkg_type, built, installed, db_keys in \
5352 self._filtered_trees[root]["dbs"]:
5353 if db.cp_list(atom_cp):
5358 def _iter_atoms_for_pkg(self, pkg):
5359 # TODO: add multiple $ROOT support
5360 if pkg.root != self.target_root:
5362 atom_arg_map = self._atom_arg_map
5363 root_config = self.roots[pkg.root]
5364 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5365 atom_cp = portage.dep_getkey(atom)
5366 if atom_cp != pkg.cp and \
5367 self._have_new_virt(pkg.root, atom_cp):
5369 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5370 visible_pkgs.reverse() # descending order
5372 for visible_pkg in visible_pkgs:
5373 if visible_pkg.cp != atom_cp:
5375 if pkg >= visible_pkg:
5376 # This is descending order, and we're not
5377 # interested in any versions <= pkg given.
5379 if pkg.slot_atom != visible_pkg.slot_atom:
5380 higher_slot = visible_pkg
5382 if higher_slot is not None:
5384 for arg in atom_arg_map[(atom, pkg.root)]:
5385 if isinstance(arg, PackageArg) and \
5390 def select_files(self, myfiles):
5391 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5392 appropriate depgraph and return a favorite list."""
5393 debug = "--debug" in self.myopts
5394 root_config = self.roots[self.target_root]
5395 sets = root_config.sets
5396 getSetAtoms = root_config.setconfig.getSetAtoms
5398 myroot = self.target_root
5399 dbs = self._filtered_trees[myroot]["dbs"]
5400 vardb = self.trees[myroot]["vartree"].dbapi
5401 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5402 portdb = self.trees[myroot]["porttree"].dbapi
5403 bindb = self.trees[myroot]["bintree"].dbapi
5404 pkgsettings = self.pkgsettings[myroot]
5406 onlydeps = "--onlydeps" in self.myopts
5409 ext = os.path.splitext(x)[1]
5411 if not os.path.exists(x):
5413 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5414 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5415 elif os.path.exists(
5416 os.path.join(pkgsettings["PKGDIR"], x)):
5417 x = os.path.join(pkgsettings["PKGDIR"], x)
5419 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5420 print "!!! Please ensure the tbz2 exists as specified.\n"
5421 return 0, myfavorites
5422 mytbz2=portage.xpak.tbz2(x)
5423 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5424 if os.path.realpath(x) != \
5425 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5426 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5427 return 0, myfavorites
5428 db_keys = list(bindb._aux_cache_keys)
5429 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5430 pkg = Package(type_name="binary", root_config=root_config,
5431 cpv=mykey, built=True, metadata=metadata,
5433 self._pkg_cache[pkg] = pkg
5434 args.append(PackageArg(arg=x, package=pkg,
5435 root_config=root_config))
5436 elif ext==".ebuild":
5437 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5438 pkgdir = os.path.dirname(ebuild_path)
5439 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5440 cp = pkgdir[len(tree_root)+1:]
5441 e = portage.exception.PackageNotFound(
5442 ("%s is not in a valid portage tree " + \
5443 "hierarchy or does not exist") % x)
5444 if not portage.isvalidatom(cp):
5446 cat = portage.catsplit(cp)[0]
5447 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5448 if not portage.isvalidatom("="+mykey):
5450 ebuild_path = portdb.findname(mykey)
5452 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5453 cp, os.path.basename(ebuild_path)):
5454 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5455 return 0, myfavorites
5456 if mykey not in portdb.xmatch(
5457 "match-visible", portage.dep_getkey(mykey)):
5458 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5459 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5460 print colorize("BAD", "*** page for details.")
5461 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5464 raise portage.exception.PackageNotFound(
5465 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5466 db_keys = list(portdb._aux_cache_keys)
5467 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5468 pkg = Package(type_name="ebuild", root_config=root_config,
5469 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5470 pkgsettings.setcpv(pkg)
5471 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5472 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5473 self._pkg_cache[pkg] = pkg
5474 args.append(PackageArg(arg=x, package=pkg,
5475 root_config=root_config))
5476 elif x.startswith(os.path.sep):
5477 if not x.startswith(myroot):
5478 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5479 " $ROOT.\n") % x, noiselevel=-1)
5481 # Queue these up since it's most efficient to handle
5482 # multiple files in a single iter_owners() call.
5483 lookup_owners.append(x)
5485 if x in ("system", "world"):
5487 if x.startswith(SETPREFIX):
5488 s = x[len(SETPREFIX):]
5490 raise portage.exception.PackageSetNotFound(s)
5493 # Recursively expand sets so that containment tests in
5494 # self._get_parent_sets() properly match atoms in nested
5495 # sets (like if world contains system).
5496 expanded_set = InternalPackageSet(
5497 initial_atoms=getSetAtoms(s))
5498 self._sets[s] = expanded_set
5499 args.append(SetArg(arg=x, set=expanded_set,
5500 root_config=root_config))
5502 if not is_valid_package_atom(x):
5503 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5505 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5506 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5508 # Don't expand categories or old-style virtuals here unless
5509 # necessary. Expansion of old-style virtuals here causes at
5510 # least the following problems:
5511 # 1) It's more difficult to determine which set(s) an atom
5512 # came from, if any.
5513 # 2) It takes away freedom from the resolver to choose other
5514 # possible expansions when necessary.
5516 args.append(AtomArg(arg=x, atom=x,
5517 root_config=root_config))
5519 expanded_atoms = self._dep_expand(root_config, x)
5520 installed_cp_set = set()
5521 for atom in expanded_atoms:
5522 atom_cp = portage.dep_getkey(atom)
5523 if vardb.cp_list(atom_cp):
5524 installed_cp_set.add(atom_cp)
5525 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5526 installed_cp = iter(installed_cp_set).next()
5527 expanded_atoms = [atom for atom in expanded_atoms \
5528 if portage.dep_getkey(atom) == installed_cp]
5530 if len(expanded_atoms) > 1:
5533 ambiguous_package_name(x, expanded_atoms, root_config,
5534 self.spinner, self.myopts)
5535 return False, myfavorites
5537 atom = expanded_atoms[0]
5539 null_atom = insert_category_into_atom(x, "null")
5540 null_cp = portage.dep_getkey(null_atom)
5541 cat, atom_pn = portage.catsplit(null_cp)
5542 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5544 # Allow the depgraph to choose which virtual.
5545 atom = insert_category_into_atom(x, "virtual")
5547 atom = insert_category_into_atom(x, "null")
5549 args.append(AtomArg(arg=x, atom=atom,
5550 root_config=root_config))
5554 search_for_multiple = False
5555 if len(lookup_owners) > 1:
5556 search_for_multiple = True
5558 for x in lookup_owners:
5559 if not search_for_multiple and os.path.isdir(x):
5560 search_for_multiple = True
5561 relative_paths.append(x[len(myroot):])
5564 for pkg, relative_path in \
5565 real_vardb._owners.iter_owners(relative_paths):
5566 owners.add(pkg.mycpv)
5567 if not search_for_multiple:
5571 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5572 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5576 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5578 # portage now masks packages with missing slot, but it's
5579 # possible that one was installed by an older version
5580 atom = portage.cpv_getkey(cpv)
5582 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5583 args.append(AtomArg(arg=atom, atom=atom,
5584 root_config=root_config))
5586 if "--update" in self.myopts:
5587 # In some cases, the greedy slots behavior can pull in a slot that
5588 # the user would want to uninstall due to it being blocked by a
5589 # newer version in a different slot. Therefore, it's necessary to
5590 # detect and discard any that should be uninstalled. Each time
5591 # that arguments are updated, package selections are repeated in
5592 # order to ensure consistency with the current arguments:
5594 # 1) Initialize args
5595 # 2) Select packages and generate initial greedy atoms
5596 # 3) Update args with greedy atoms
5597 # 4) Select packages and generate greedy atoms again, while
5598 # accounting for any blockers between selected packages
5599 # 5) Update args with revised greedy atoms
5601 self._set_args(args)
5604 greedy_args.append(arg)
5605 if not isinstance(arg, AtomArg):
5607 for atom in self._greedy_slots(arg.root_config, arg.atom):
5609 AtomArg(arg=arg.arg, atom=atom,
5610 root_config=arg.root_config))
5612 self._set_args(greedy_args)
5615 # Revise greedy atoms, accounting for any blockers
5616 # between selected packages.
5617 revised_greedy_args = []
5619 revised_greedy_args.append(arg)
5620 if not isinstance(arg, AtomArg):
5622 for atom in self._greedy_slots(arg.root_config, arg.atom,
5623 blocker_lookahead=True):
5624 revised_greedy_args.append(
5625 AtomArg(arg=arg.arg, atom=atom,
5626 root_config=arg.root_config))
5627 args = revised_greedy_args
5628 del revised_greedy_args
5630 self._set_args(args)
5632 myfavorites = set(myfavorites)
5634 if isinstance(arg, (AtomArg, PackageArg)):
5635 myfavorites.add(arg.atom)
5636 elif isinstance(arg, SetArg):
5637 myfavorites.add(arg.arg)
5638 myfavorites = list(myfavorites)
5640 pprovideddict = pkgsettings.pprovideddict
5642 portage.writemsg("\n", noiselevel=-1)
5643 # Order needs to be preserved since a feature of --nodeps
5644 # is to allow the user to force a specific merge order.
5648 for atom in arg.set:
5649 self.spinner.update()
5650 dep = Dependency(atom=atom, onlydeps=onlydeps,
5651 root=myroot, parent=arg)
5652 atom_cp = portage.dep_getkey(atom)
5654 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5655 if pprovided and portage.match_from_list(atom, pprovided):
5656 # A provided package has been specified on the command line.
5657 self._pprovided_args.append((arg, atom))
5659 if isinstance(arg, PackageArg):
5660 if not self._add_pkg(arg.package, dep) or \
5661 not self._create_graph():
5662 sys.stderr.write(("\n\n!!! Problem resolving " + \
5663 "dependencies for %s\n") % arg.arg)
5664 return 0, myfavorites
5667 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5668 (arg, atom), noiselevel=-1)
5669 pkg, existing_node = self._select_package(
5670 myroot, atom, onlydeps=onlydeps)
5672 if not (isinstance(arg, SetArg) and \
5673 arg.name in ("system", "world")):
5674 self._unsatisfied_deps_for_display.append(
5675 ((myroot, atom), {}))
5676 return 0, myfavorites
5677 self._missing_args.append((arg, atom))
5679 if atom_cp != pkg.cp:
5680 # For old-style virtuals, we need to repeat the
5681 # package.provided check against the selected package.
5682 expanded_atom = atom.replace(atom_cp, pkg.cp)
5683 pprovided = pprovideddict.get(pkg.cp)
5685 portage.match_from_list(expanded_atom, pprovided):
5686 # A provided package has been
5687 # specified on the command line.
5688 self._pprovided_args.append((arg, atom))
5690 if pkg.installed and "selective" not in self.myparams:
5691 self._unsatisfied_deps_for_display.append(
5692 ((myroot, atom), {}))
5693 # Previous behavior was to bail out in this case, but
5694 # since the dep is satisfied by the installed package,
5695 # it's more friendly to continue building the graph
5696 # and just show a warning message. Therefore, only bail
5697 # out here if the atom is not from either the system or
5699 if not (isinstance(arg, SetArg) and \
5700 arg.name in ("system", "world")):
5701 return 0, myfavorites
5703 # Add the selected package to the graph as soon as possible
5704 # so that later dep_check() calls can use it as feedback
5705 # for making more consistent atom selections.
5706 if not self._add_pkg(pkg, dep):
5707 if isinstance(arg, SetArg):
5708 sys.stderr.write(("\n\n!!! Problem resolving " + \
5709 "dependencies for %s from %s\n") % \
5712 sys.stderr.write(("\n\n!!! Problem resolving " + \
5713 "dependencies for %s\n") % atom)
5714 return 0, myfavorites
5716 except portage.exception.MissingSignature, e:
5717 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5718 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5719 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5720 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5721 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5722 return 0, myfavorites
5723 except portage.exception.InvalidSignature, e:
5724 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5725 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5726 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5727 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5728 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5729 return 0, myfavorites
5730 except SystemExit, e:
5731 raise # Needed else can't exit
5732 except Exception, e:
5733 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5734 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5737 # Now that the root packages have been added to the graph,
5738 # process the dependencies.
5739 if not self._create_graph():
5740 return 0, myfavorites
5743 if "--usepkgonly" in self.myopts:
5744 for xs in self.digraph.all_nodes():
5745 if not isinstance(xs, Package):
5747 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5751 print "Missing binary for:",xs[2]
5755 except self._unknown_internal_error:
5756 return False, myfavorites
5758 # We're true here unless we are missing binaries.
5759 return (not missing,myfavorites)
5761 def _set_args(self, args):
5763 Create the "args" package set from atoms and packages given as
5764 arguments. This method can be called multiple times if necessary.
5765 The package selection cache is automatically invalidated, since
5766 arguments influence package selections.
5768 args_set = self._sets["args"]
5771 if not isinstance(arg, (AtomArg, PackageArg)):
5774 if atom in args_set:
5778 self._set_atoms.clear()
5779 self._set_atoms.update(chain(*self._sets.itervalues()))
5780 atom_arg_map = self._atom_arg_map
5781 atom_arg_map.clear()
5783 for atom in arg.set:
5784 atom_key = (atom, arg.root_config.root)
5785 refs = atom_arg_map.get(atom_key)
5788 atom_arg_map[atom_key] = refs
5792 # Invalidate the package selection cache, since
5793 # arguments influence package selections.
5794 self._highest_pkg_cache.clear()
5795 for trees in self._filtered_trees.itervalues():
5796 trees["porttree"].dbapi._clear_cache()
5798 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5800 Return a list of slot atoms corresponding to installed slots that
5801 differ from the slot of the highest visible match. When
5802 blocker_lookahead is True, slot atoms that would trigger a blocker
5803 conflict are automatically discarded, potentially allowing automatic
5804 uninstallation of older slots when appropriate.
5806 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5807 if highest_pkg is None:
5809 vardb = root_config.trees["vartree"].dbapi
5811 for cpv in vardb.match(atom):
5812 # don't mix new virtuals with old virtuals
5813 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5814 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5816 slots.add(highest_pkg.metadata["SLOT"])
5820 slots.remove(highest_pkg.metadata["SLOT"])
5823 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5824 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5825 if pkg is not None and \
5826 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5827 greedy_pkgs.append(pkg)
5830 if not blocker_lookahead:
5831 return [pkg.slot_atom for pkg in greedy_pkgs]
5834 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5835 for pkg in greedy_pkgs + [highest_pkg]:
5836 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5838 atoms = self._select_atoms(
5839 pkg.root, dep_str, pkg.use.enabled,
5840 parent=pkg, strict=True)
5841 except portage.exception.InvalidDependString:
5843 blocker_atoms = (x for x in atoms if x.blocker)
5844 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5846 if highest_pkg not in blockers:
5849 # filter packages with invalid deps
5850 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5852 # filter packages that conflict with highest_pkg
5853 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5854 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5855 blockers[pkg].findAtomForPackage(highest_pkg))]
5860 # If two packages conflict, discard the lower version.
5861 discard_pkgs = set()
5862 greedy_pkgs.sort(reverse=True)
5863 for i in xrange(len(greedy_pkgs) - 1):
5864 pkg1 = greedy_pkgs[i]
5865 if pkg1 in discard_pkgs:
5867 for j in xrange(i + 1, len(greedy_pkgs)):
5868 pkg2 = greedy_pkgs[j]
5869 if pkg2 in discard_pkgs:
5871 if blockers[pkg1].findAtomForPackage(pkg2) or \
5872 blockers[pkg2].findAtomForPackage(pkg1):
5874 discard_pkgs.add(pkg2)
5876 return [pkg.slot_atom for pkg in greedy_pkgs \
5877 if pkg not in discard_pkgs]
5879 def _select_atoms_from_graph(self, *pargs, **kwargs):
5881 Prefer atoms matching packages that have already been
5882 added to the graph or those that are installed and have
5883 not been scheduled for replacement.
5885 kwargs["trees"] = self._graph_trees
5886 return self._select_atoms_highest_available(*pargs, **kwargs)
5888 def _select_atoms_highest_available(self, root, depstring,
5889 myuse=None, parent=None, strict=True, trees=None, priority=None):
5890 """This will raise InvalidDependString if necessary. If trees is
5891 None then self._filtered_trees is used."""
5892 pkgsettings = self.pkgsettings[root]
5894 trees = self._filtered_trees
5895 if not getattr(priority, "buildtime", False):
5896 # The parent should only be passed to dep_check() for buildtime
5897 # dependencies since that's the only case when it's appropriate
5898 # to trigger the circular dependency avoidance code which uses it.
5899 # It's important not to trigger the same circular dependency
5900 # avoidance code for runtime dependencies since it's not needed
5901 # and it can promote an incorrect package choice.
5905 if parent is not None:
5906 trees[root]["parent"] = parent
5908 portage.dep._dep_check_strict = False
5909 mycheck = portage.dep_check(depstring, None,
5910 pkgsettings, myuse=myuse,
5911 myroot=root, trees=trees)
5913 if parent is not None:
5914 trees[root].pop("parent")
5915 portage.dep._dep_check_strict = True
5917 raise portage.exception.InvalidDependString(mycheck[1])
5918 selected_atoms = mycheck[1]
5919 return selected_atoms
5921 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5922 atom = portage.dep.Atom(atom)
5923 atom_set = InternalPackageSet(initial_atoms=(atom,))
5924 atom_without_use = atom
5926 atom_without_use = portage.dep.remove_slot(atom)
5928 atom_without_use += ":" + atom.slot
5929 atom_without_use = portage.dep.Atom(atom_without_use)
5930 xinfo = '"%s"' % atom
5933 # Discard null/ from failed cpv_expand category expansion.
5934 xinfo = xinfo.replace("null/", "")
5935 masked_packages = []
5937 missing_licenses = []
5938 have_eapi_mask = False
5939 pkgsettings = self.pkgsettings[root]
5940 implicit_iuse = pkgsettings._get_implicit_iuse()
5941 root_config = self.roots[root]
5942 portdb = self.roots[root].trees["porttree"].dbapi
5943 dbs = self._filtered_trees[root]["dbs"]
5944 for db, pkg_type, built, installed, db_keys in dbs:
5948 if hasattr(db, "xmatch"):
5949 cpv_list = db.xmatch("match-all", atom_without_use)
5951 cpv_list = db.match(atom_without_use)
5954 for cpv in cpv_list:
5955 metadata, mreasons = get_mask_info(root_config, cpv,
5956 pkgsettings, db, pkg_type, built, installed, db_keys)
5957 if metadata is not None:
5958 pkg = Package(built=built, cpv=cpv,
5959 installed=installed, metadata=metadata,
5960 root_config=root_config)
5961 if pkg.cp != atom.cp:
5962 # A cpv can be returned from dbapi.match() as an
5963 # old-style virtual match even in cases when the
5964 # package does not actually PROVIDE the virtual.
5965 # Filter out any such false matches here.
5966 if not atom_set.findAtomForPackage(pkg):
5968 if atom.use and not mreasons:
5969 missing_use.append(pkg)
5971 masked_packages.append(
5972 (root_config, pkgsettings, cpv, metadata, mreasons))
5974 missing_use_reasons = []
5975 missing_iuse_reasons = []
5976 for pkg in missing_use:
5977 use = pkg.use.enabled
5978 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5979 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5981 for x in atom.use.required:
5982 if iuse_re.match(x) is None:
5983 missing_iuse.append(x)
5986 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5987 missing_iuse_reasons.append((pkg, mreasons))
5989 need_enable = sorted(atom.use.enabled.difference(use))
5990 need_disable = sorted(atom.use.disabled.intersection(use))
5991 if need_enable or need_disable:
5993 changes.extend(colorize("red", "+" + x) \
5994 for x in need_enable)
5995 changes.extend(colorize("blue", "-" + x) \
5996 for x in need_disable)
5997 mreasons.append("Change USE: %s" % " ".join(changes))
5998 missing_use_reasons.append((pkg, mreasons))
6000 if missing_iuse_reasons and not missing_use_reasons:
6001 missing_use_reasons = missing_iuse_reasons
6002 elif missing_use_reasons:
6003 # Only show the latest version.
6004 del missing_use_reasons[1:]
6006 if missing_use_reasons:
6007 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6008 print "!!! One of the following packages is required to complete your request:"
6009 for pkg, mreasons in missing_use_reasons:
6010 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6012 elif masked_packages:
6014 colorize("BAD", "All ebuilds that could satisfy ") + \
6015 colorize("INFORM", xinfo) + \
6016 colorize("BAD", " have been masked.")
6017 print "!!! One of the following masked packages is required to complete your request:"
6018 have_eapi_mask = show_masked_packages(masked_packages)
6021 msg = ("The current version of portage supports " + \
6022 "EAPI '%s'. You must upgrade to a newer version" + \
6023 " of portage before EAPI masked packages can" + \
6024 " be installed.") % portage.const.EAPI
6025 from textwrap import wrap
6026 for line in wrap(msg, 75):
6031 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6033 # Show parent nodes and the argument that pulled them in.
6034 traversed_nodes = set()
6037 while node is not None:
6038 traversed_nodes.add(node)
6039 msg.append('(dependency required by "%s" [%s])' % \
6040 (colorize('INFORM', str(node.cpv)), node.type_name))
6041 # When traversing to parents, prefer arguments over packages
6042 # since arguments are root nodes. Never traverse the same
6043 # package twice, in order to prevent an infinite loop.
6044 selected_parent = None
6045 for parent in self.digraph.parent_nodes(node):
6046 if isinstance(parent, DependencyArg):
6047 msg.append('(dependency required by "%s" [argument])' % \
6048 (colorize('INFORM', str(parent))))
6049 selected_parent = None
6051 if parent not in traversed_nodes:
6052 selected_parent = parent
6053 node = selected_parent
6059 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6060 cache_key = (root, atom, onlydeps)
6061 ret = self._highest_pkg_cache.get(cache_key)
6064 if pkg and not existing:
6065 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6066 if existing and existing == pkg:
6067 # Update the cache to reflect that the
6068 # package has been added to the graph.
6070 self._highest_pkg_cache[cache_key] = ret
6072 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6073 self._highest_pkg_cache[cache_key] = ret
6076 settings = pkg.root_config.settings
6077 if visible(settings, pkg) and not (pkg.installed and \
6078 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6079 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6082 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6083 root_config = self.roots[root]
6084 pkgsettings = self.pkgsettings[root]
6085 dbs = self._filtered_trees[root]["dbs"]
6086 vardb = self.roots[root].trees["vartree"].dbapi
6087 portdb = self.roots[root].trees["porttree"].dbapi
6088 # List of acceptable packages, ordered by type preference.
6089 matched_packages = []
6090 highest_version = None
6091 if not isinstance(atom, portage.dep.Atom):
6092 atom = portage.dep.Atom(atom)
6094 atom_set = InternalPackageSet(initial_atoms=(atom,))
6095 existing_node = None
6097 usepkgonly = "--usepkgonly" in self.myopts
6098 empty = "empty" in self.myparams
6099 selective = "selective" in self.myparams
6101 noreplace = "--noreplace" in self.myopts
6102 # Behavior of the "selective" parameter depends on
6103 # whether or not a package matches an argument atom.
6104 # If an installed package provides an old-style
6105 # virtual that is no longer provided by an available
6106 # package, the installed package may match an argument
6107 # atom even though none of the available packages do.
6108 # Therefore, "selective" logic does not consider
6109 # whether or not an installed package matches an
6110 # argument atom. It only considers whether or not
6111 # available packages match argument atoms, which is
6112 # represented by the found_available_arg flag.
6113 found_available_arg = False
6114 for find_existing_node in True, False:
6117 for db, pkg_type, built, installed, db_keys in dbs:
6120 if installed and not find_existing_node:
6121 want_reinstall = reinstall or empty or \
6122 (found_available_arg and not selective)
6123 if want_reinstall and matched_packages:
6125 if hasattr(db, "xmatch"):
6126 cpv_list = db.xmatch("match-all", atom)
6128 cpv_list = db.match(atom)
6130 # USE=multislot can make an installed package appear as if
6131 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6132 # won't do any good as long as USE=multislot is enabled since
6133 # the newly built package still won't have the expected slot.
6134 # Therefore, assume that such SLOT dependencies are already
6135 # satisfied rather than forcing a rebuild.
6136 if installed and not cpv_list and atom.slot:
6137 for cpv in db.match(atom.cp):
6138 slot_available = False
6139 for other_db, other_type, other_built, \
6140 other_installed, other_keys in dbs:
6143 other_db.aux_get(cpv, ["SLOT"])[0]:
6144 slot_available = True
6148 if not slot_available:
6150 inst_pkg = self._pkg(cpv, "installed",
6151 root_config, installed=installed)
6152 # Remove the slot from the atom and verify that
6153 # the package matches the resulting atom.
6154 atom_without_slot = portage.dep.remove_slot(atom)
6156 atom_without_slot += str(atom.use)
6157 atom_without_slot = portage.dep.Atom(atom_without_slot)
6158 if portage.match_from_list(
6159 atom_without_slot, [inst_pkg]):
6160 cpv_list = [inst_pkg.cpv]
6165 pkg_status = "merge"
6166 if installed or onlydeps:
6167 pkg_status = "nomerge"
6170 for cpv in cpv_list:
6171 # Make --noreplace take precedence over --newuse.
6172 if not installed and noreplace and \
6173 cpv in vardb.match(atom):
6174 # If the installed version is masked, it may
6175 # be necessary to look at lower versions,
6176 # in case there is a visible downgrade.
6178 reinstall_for_flags = None
6179 cache_key = (pkg_type, root, cpv, pkg_status)
6180 calculated_use = True
6181 pkg = self._pkg_cache.get(cache_key)
6183 calculated_use = False
6185 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6188 pkg = Package(built=built, cpv=cpv,
6189 installed=installed, metadata=metadata,
6190 onlydeps=onlydeps, root_config=root_config,
6192 metadata = pkg.metadata
6194 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6195 if not built and ("?" in metadata["LICENSE"] or \
6196 "?" in metadata["PROVIDE"]):
6197 # This is avoided whenever possible because
6198 # it's expensive. It only needs to be done here
6199 # if it has an effect on visibility.
6200 pkgsettings.setcpv(pkg)
6201 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6202 calculated_use = True
6203 self._pkg_cache[pkg] = pkg
6205 if not installed or (built and matched_packages):
6206 # Only enforce visibility on installed packages
6207 # if there is at least one other visible package
6208 # available. By filtering installed masked packages
6209 # here, packages that have been masked since they
6210 # were installed can be automatically downgraded
6211 # to an unmasked version.
6213 if not visible(pkgsettings, pkg):
6215 except portage.exception.InvalidDependString:
6219 # Enable upgrade or downgrade to a version
6220 # with visible KEYWORDS when the installed
6221 # version is masked by KEYWORDS, but never
6222 # reinstall the same exact version only due
6223 # to a KEYWORDS mask.
6224 if built and matched_packages:
6226 different_version = None
6227 for avail_pkg in matched_packages:
6228 if not portage.dep.cpvequal(
6229 pkg.cpv, avail_pkg.cpv):
6230 different_version = avail_pkg
6232 if different_version is not None:
6235 pkgsettings._getMissingKeywords(
6236 pkg.cpv, pkg.metadata):
6239 # If the ebuild no longer exists or it's
6240 # keywords have been dropped, reject built
6241 # instances (installed or binary).
6242 # If --usepkgonly is enabled, assume that
6243 # the ebuild status should be ignored.
6247 pkg.cpv, "ebuild", root_config)
6248 except portage.exception.PackageNotFound:
6251 if not visible(pkgsettings, pkg_eb):
6254 if not pkg.built and not calculated_use:
6255 # This is avoided whenever possible because
6257 pkgsettings.setcpv(pkg)
6258 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6260 if pkg.cp != atom.cp:
6261 # A cpv can be returned from dbapi.match() as an
6262 # old-style virtual match even in cases when the
6263 # package does not actually PROVIDE the virtual.
6264 # Filter out any such false matches here.
6265 if not atom_set.findAtomForPackage(pkg):
6269 if root == self.target_root:
6271 # Ebuild USE must have been calculated prior
6272 # to this point, in case atoms have USE deps.
6273 myarg = self._iter_atoms_for_pkg(pkg).next()
6274 except StopIteration:
6276 except portage.exception.InvalidDependString:
6278 # masked by corruption
6280 if not installed and myarg:
6281 found_available_arg = True
6283 if atom.use and not pkg.built:
6284 use = pkg.use.enabled
6285 if atom.use.enabled.difference(use):
6287 if atom.use.disabled.intersection(use):
6289 if pkg.cp == atom_cp:
6290 if highest_version is None:
6291 highest_version = pkg
6292 elif pkg > highest_version:
6293 highest_version = pkg
6294 # At this point, we've found the highest visible
6295 # match from the current repo. Any lower versions
6296 # from this repo are ignored, so this so the loop
6297 # will always end with a break statement below
6299 if find_existing_node:
6300 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6303 if portage.dep.match_from_list(atom, [e_pkg]):
6304 if highest_version and \
6305 e_pkg.cp == atom_cp and \
6306 e_pkg < highest_version and \
6307 e_pkg.slot_atom != highest_version.slot_atom:
6308 # There is a higher version available in a
6309 # different slot, so this existing node is
6313 matched_packages.append(e_pkg)
6314 existing_node = e_pkg
6316 # Compare built package to current config and
6317 # reject the built package if necessary.
6318 if built and not installed and \
6319 ("--newuse" in self.myopts or \
6320 "--reinstall" in self.myopts):
6321 iuses = pkg.iuse.all
6322 old_use = pkg.use.enabled
6324 pkgsettings.setcpv(myeb)
6326 pkgsettings.setcpv(pkg)
6327 now_use = pkgsettings["PORTAGE_USE"].split()
6328 forced_flags = set()
6329 forced_flags.update(pkgsettings.useforce)
6330 forced_flags.update(pkgsettings.usemask)
6332 if myeb and not usepkgonly:
6333 cur_iuse = myeb.iuse.all
6334 if self._reinstall_for_flags(forced_flags,
6338 # Compare current config to installed package
6339 # and do not reinstall if possible.
6340 if not installed and \
6341 ("--newuse" in self.myopts or \
6342 "--reinstall" in self.myopts) and \
6343 cpv in vardb.match(atom):
6344 pkgsettings.setcpv(pkg)
6345 forced_flags = set()
6346 forced_flags.update(pkgsettings.useforce)
6347 forced_flags.update(pkgsettings.usemask)
6348 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6349 old_iuse = set(filter_iuse_defaults(
6350 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6351 cur_use = pkgsettings["PORTAGE_USE"].split()
6352 cur_iuse = pkg.iuse.all
6353 reinstall_for_flags = \
6354 self._reinstall_for_flags(
6355 forced_flags, old_use, old_iuse,
6357 if reinstall_for_flags:
6361 matched_packages.append(pkg)
6362 if reinstall_for_flags:
6363 self._reinstall_nodes[pkg] = \
6367 if not matched_packages:
6370 if "--debug" in self.myopts:
6371 for pkg in matched_packages:
6372 portage.writemsg("%s %s\n" % \
6373 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6375 # Filter out any old-style virtual matches if they are
6376 # mixed with new-style virtual matches.
6377 cp = portage.dep_getkey(atom)
6378 if len(matched_packages) > 1 and \
6379 "virtual" == portage.catsplit(cp)[0]:
6380 for pkg in matched_packages:
6383 # Got a new-style virtual, so filter
6384 # out any old-style virtuals.
6385 matched_packages = [pkg for pkg in matched_packages \
6389 if len(matched_packages) > 1:
6390 bestmatch = portage.best(
6391 [pkg.cpv for pkg in matched_packages])
6392 matched_packages = [pkg for pkg in matched_packages \
6393 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6395 # ordered by type preference ("ebuild" type is the last resort)
6396 return matched_packages[-1], existing_node
6398 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6400 Select packages that have already been added to the graph or
6401 those that are installed and have not been scheduled for
6404 graph_db = self._graph_trees[root]["porttree"].dbapi
6405 matches = graph_db.match_pkgs(atom)
6408 pkg = matches[-1] # highest match
6409 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6410 return pkg, in_graph
6412 def _complete_graph(self):
6414 Add any deep dependencies of required sets (args, system, world) that
6415 have not been pulled into the graph yet. This ensures that the graph
6416 is consistent such that initially satisfied deep dependencies are not
6417 broken in the new graph. Initially unsatisfied dependencies are
6418 irrelevant since we only want to avoid breaking dependencies that are
6421 Since this method can consume enough time to disturb users, it is
6422 currently only enabled by the --complete-graph option.
6424 if "--buildpkgonly" in self.myopts or \
6425 "recurse" not in self.myparams:
6428 if "complete" not in self.myparams:
6429 # Skip this to avoid consuming enough time to disturb users.
6432 # Put the depgraph into a mode that causes it to only
6433 # select packages that have already been added to the
6434 # graph or those that are installed and have not been
6435 # scheduled for replacement. Also, toggle the "deep"
6436 # parameter so that all dependencies are traversed and
6438 self._select_atoms = self._select_atoms_from_graph
6439 self._select_package = self._select_pkg_from_graph
6440 already_deep = "deep" in self.myparams
6441 if not already_deep:
6442 self.myparams.add("deep")
6444 for root in self.roots:
6445 required_set_names = self._required_set_names.copy()
6446 if root == self.target_root and \
6447 (already_deep or "empty" in self.myparams):
6448 required_set_names.difference_update(self._sets)
6449 if not required_set_names and not self._ignored_deps:
6451 root_config = self.roots[root]
6452 setconfig = root_config.setconfig
6454 # Reuse existing SetArg instances when available.
6455 for arg in self.digraph.root_nodes():
6456 if not isinstance(arg, SetArg):
6458 if arg.root_config != root_config:
6460 if arg.name in required_set_names:
6462 required_set_names.remove(arg.name)
6463 # Create new SetArg instances only when necessary.
6464 for s in required_set_names:
6465 expanded_set = InternalPackageSet(
6466 initial_atoms=setconfig.getSetAtoms(s))
6467 atom = SETPREFIX + s
6468 args.append(SetArg(arg=atom, set=expanded_set,
6469 root_config=root_config))
6470 vardb = root_config.trees["vartree"].dbapi
6472 for atom in arg.set:
6473 self._dep_stack.append(
6474 Dependency(atom=atom, root=root, parent=arg))
6475 if self._ignored_deps:
6476 self._dep_stack.extend(self._ignored_deps)
6477 self._ignored_deps = []
6478 if not self._create_graph(allow_unsatisfied=True):
6480 # Check the unsatisfied deps to see if any initially satisfied deps
6481 # will become unsatisfied due to an upgrade. Initially unsatisfied
6482 # deps are irrelevant since we only want to avoid breaking deps
6483 # that are initially satisfied.
6484 while self._unsatisfied_deps:
6485 dep = self._unsatisfied_deps.pop()
6486 matches = vardb.match_pkgs(dep.atom)
6488 self._initially_unsatisfied_deps.append(dep)
6490 # An scheduled installation broke a deep dependency.
6491 # Add the installed package to the graph so that it
6492 # will be appropriately reported as a slot collision
6493 # (possibly solvable via backtracking).
6494 pkg = matches[-1] # highest match
6495 if not self._add_pkg(pkg, dep):
6497 if not self._create_graph(allow_unsatisfied=True):
6501 def _pkg(self, cpv, type_name, root_config, installed=False):
6503 Get a package instance from the cache, or create a new
6504 one if necessary. Raises KeyError from aux_get if it
6505 failures for some reason (package does not exist or is
6510 operation = "nomerge"
6511 pkg = self._pkg_cache.get(
6512 (type_name, root_config.root, cpv, operation))
6514 tree_type = self.pkg_tree_map[type_name]
6515 db = root_config.trees[tree_type].dbapi
6516 db_keys = list(self._trees_orig[root_config.root][
6517 tree_type].dbapi._aux_cache_keys)
6519 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6521 raise portage.exception.PackageNotFound(cpv)
6522 pkg = Package(cpv=cpv, metadata=metadata,
6523 root_config=root_config, installed=installed)
6524 if type_name == "ebuild":
6525 settings = self.pkgsettings[root_config.root]
6526 settings.setcpv(pkg)
6527 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6528 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6529 self._pkg_cache[pkg] = pkg
6532 def validate_blockers(self):
6533 """Remove any blockers from the digraph that do not match any of the
6534 packages within the graph. If necessary, create hard deps to ensure
6535 correct merge order such that mutually blocking packages are never
6536 installed simultaneously."""
6538 if "--buildpkgonly" in self.myopts or \
6539 "--nodeps" in self.myopts:
6542 #if "deep" in self.myparams:
6544 # Pull in blockers from all installed packages that haven't already
6545 # been pulled into the depgraph. This is not enabled by default
6546 # due to the performance penalty that is incurred by all the
6547 # additional dep_check calls that are required.
6549 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6550 for myroot in self.trees:
6551 vardb = self.trees[myroot]["vartree"].dbapi
6552 portdb = self.trees[myroot]["porttree"].dbapi
6553 pkgsettings = self.pkgsettings[myroot]
6554 final_db = self.mydbapi[myroot]
6556 blocker_cache = BlockerCache(myroot, vardb)
6557 stale_cache = set(blocker_cache)
6560 stale_cache.discard(cpv)
6561 pkg_in_graph = self.digraph.contains(pkg)
6563 # Check for masked installed packages. Only warn about
6564 # packages that are in the graph in order to avoid warning
6565 # about those that will be automatically uninstalled during
6566 # the merge process or by --depclean.
6568 if pkg_in_graph and not visible(pkgsettings, pkg):
6569 self._masked_installed.add(pkg)
6571 blocker_atoms = None
6577 self._blocker_parents.child_nodes(pkg))
6582 self._irrelevant_blockers.child_nodes(pkg))
6585 if blockers is not None:
6586 blockers = set(str(blocker.atom) \
6587 for blocker in blockers)
6589 # If this node has any blockers, create a "nomerge"
6590 # node for it so that they can be enforced.
6591 self.spinner.update()
6592 blocker_data = blocker_cache.get(cpv)
6593 if blocker_data is not None and \
6594 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6597 # If blocker data from the graph is available, use
6598 # it to validate the cache and update the cache if
6600 if blocker_data is not None and \
6601 blockers is not None:
6602 if not blockers.symmetric_difference(
6603 blocker_data.atoms):
6607 if blocker_data is None and \
6608 blockers is not None:
6609 # Re-use the blockers from the graph.
6610 blocker_atoms = sorted(blockers)
6611 counter = long(pkg.metadata["COUNTER"])
6613 blocker_cache.BlockerData(counter, blocker_atoms)
6614 blocker_cache[pkg.cpv] = blocker_data
6618 blocker_atoms = blocker_data.atoms
6620 # Use aux_get() to trigger FakeVartree global
6621 # updates on *DEPEND when appropriate.
6622 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6623 # It is crucial to pass in final_db here in order to
6624 # optimize dep_check calls by eliminating atoms via
6625 # dep_wordreduce and dep_eval calls.
6627 portage.dep._dep_check_strict = False
6629 success, atoms = portage.dep_check(depstr,
6630 final_db, pkgsettings, myuse=pkg.use.enabled,
6631 trees=self._graph_trees, myroot=myroot)
6632 except Exception, e:
6633 if isinstance(e, SystemExit):
6635 # This is helpful, for example, if a ValueError
6636 # is thrown from cpv_expand due to multiple
6637 # matches (this can happen if an atom lacks a
6639 show_invalid_depstring_notice(
6640 pkg, depstr, str(e))
6644 portage.dep._dep_check_strict = True
6646 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6647 if replacement_pkg and \
6648 replacement_pkg[0].operation == "merge":
6649 # This package is being replaced anyway, so
6650 # ignore invalid dependencies so as not to
6651 # annoy the user too much (otherwise they'd be
6652 # forced to manually unmerge it first).
6654 show_invalid_depstring_notice(pkg, depstr, atoms)
6656 blocker_atoms = [myatom for myatom in atoms \
6657 if myatom.startswith("!")]
6658 blocker_atoms.sort()
6659 counter = long(pkg.metadata["COUNTER"])
6660 blocker_cache[cpv] = \
6661 blocker_cache.BlockerData(counter, blocker_atoms)
6664 for atom in blocker_atoms:
6665 blocker = Blocker(atom=portage.dep.Atom(atom),
6666 eapi=pkg.metadata["EAPI"], root=myroot)
6667 self._blocker_parents.add(blocker, pkg)
6668 except portage.exception.InvalidAtom, e:
6669 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6670 show_invalid_depstring_notice(
6671 pkg, depstr, "Invalid Atom: %s" % (e,))
6673 for cpv in stale_cache:
6674 del blocker_cache[cpv]
6675 blocker_cache.flush()
6678 # Discard any "uninstall" tasks scheduled by previous calls
6679 # to this method, since those tasks may not make sense given
6680 # the current graph state.
6681 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6682 if previous_uninstall_tasks:
6683 self._blocker_uninstalls = digraph()
6684 self.digraph.difference_update(previous_uninstall_tasks)
6686 for blocker in self._blocker_parents.leaf_nodes():
6687 self.spinner.update()
6688 root_config = self.roots[blocker.root]
6689 virtuals = root_config.settings.getvirtuals()
6690 myroot = blocker.root
6691 initial_db = self.trees[myroot]["vartree"].dbapi
6692 final_db = self.mydbapi[myroot]
6694 provider_virtual = False
6695 if blocker.cp in virtuals and \
6696 not self._have_new_virt(blocker.root, blocker.cp):
6697 provider_virtual = True
6699 if provider_virtual:
6701 for provider_entry in virtuals[blocker.cp]:
6703 portage.dep_getkey(provider_entry)
6704 atoms.append(blocker.atom.replace(
6705 blocker.cp, provider_cp))
6707 atoms = [blocker.atom]
6709 blocked_initial = []
6711 blocked_initial.extend(initial_db.match_pkgs(atom))
6715 blocked_final.extend(final_db.match_pkgs(atom))
6717 if not blocked_initial and not blocked_final:
6718 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6719 self._blocker_parents.remove(blocker)
6720 # Discard any parents that don't have any more blockers.
6721 for pkg in parent_pkgs:
6722 self._irrelevant_blockers.add(blocker, pkg)
6723 if not self._blocker_parents.child_nodes(pkg):
6724 self._blocker_parents.remove(pkg)
6726 for parent in self._blocker_parents.parent_nodes(blocker):
6727 unresolved_blocks = False
6728 depends_on_order = set()
6729 for pkg in blocked_initial:
6730 if pkg.slot_atom == parent.slot_atom:
6731 # TODO: Support blocks within slots in cases where it
6732 # might make sense. For example, a new version might
6733 # require that the old version be uninstalled at build
6736 if parent.installed:
6737 # Two currently installed packages conflict with
6738 # eachother. Ignore this case since the damage
6739 # is already done and this would be likely to
6740 # confuse users if displayed like a normal blocker.
6743 self._blocked_pkgs.add(pkg, blocker)
6745 if parent.operation == "merge":
6746 # Maybe the blocked package can be replaced or simply
6747 # unmerged to resolve this block.
6748 depends_on_order.add((pkg, parent))
6750 # None of the above blocker resolutions techniques apply,
6751 # so apparently this one is unresolvable.
6752 unresolved_blocks = True
6753 for pkg in blocked_final:
6754 if pkg.slot_atom == parent.slot_atom:
6755 # TODO: Support blocks within slots.
6757 if parent.operation == "nomerge" and \
6758 pkg.operation == "nomerge":
6759 # This blocker will be handled the next time that a
6760 # merge of either package is triggered.
6763 self._blocked_pkgs.add(pkg, blocker)
6765 # Maybe the blocking package can be
6766 # unmerged to resolve this block.
6767 if parent.operation == "merge" and pkg.installed:
6768 depends_on_order.add((pkg, parent))
6770 elif parent.operation == "nomerge":
6771 depends_on_order.add((parent, pkg))
6773 # None of the above blocker resolutions techniques apply,
6774 # so apparently this one is unresolvable.
6775 unresolved_blocks = True
6777 # Make sure we don't unmerge any package that have been pulled
6779 if not unresolved_blocks and depends_on_order:
6780 for inst_pkg, inst_task in depends_on_order:
6781 if self.digraph.contains(inst_pkg) and \
6782 self.digraph.parent_nodes(inst_pkg):
6783 unresolved_blocks = True
6786 if not unresolved_blocks and depends_on_order:
6787 for inst_pkg, inst_task in depends_on_order:
6788 uninst_task = Package(built=inst_pkg.built,
6789 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6790 metadata=inst_pkg.metadata,
6791 operation="uninstall",
6792 root_config=inst_pkg.root_config,
6793 type_name=inst_pkg.type_name)
6794 self._pkg_cache[uninst_task] = uninst_task
6795 # Enforce correct merge order with a hard dep.
6796 self.digraph.addnode(uninst_task, inst_task,
6797 priority=BlockerDepPriority.instance)
6798 # Count references to this blocker so that it can be
6799 # invalidated after nodes referencing it have been
6801 self._blocker_uninstalls.addnode(uninst_task, blocker)
6802 if not unresolved_blocks and not depends_on_order:
6803 self._irrelevant_blockers.add(blocker, parent)
6804 self._blocker_parents.remove_edge(blocker, parent)
6805 if not self._blocker_parents.parent_nodes(blocker):
6806 self._blocker_parents.remove(blocker)
6807 if not self._blocker_parents.child_nodes(parent):
6808 self._blocker_parents.remove(parent)
6809 if unresolved_blocks:
6810 self._unsolvable_blockers.add(blocker, parent)
6814 def _accept_blocker_conflicts(self):
6816 for x in ("--buildpkgonly", "--fetchonly",
6817 "--fetch-all-uri", "--nodeps"):
6818 if x in self.myopts:
6823 def _merge_order_bias(self, mygraph):
6825 For optimal leaf node selection, promote deep system runtime deps and
6826 order nodes from highest to lowest overall reference count.
6830 for node in mygraph.order:
6831 node_info[node] = len(mygraph.parent_nodes(node))
6832 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6834 def cmp_merge_preference(node1, node2):
6836 if node1.operation == 'uninstall':
6837 if node2.operation == 'uninstall':
6841 if node2.operation == 'uninstall':
6842 if node1.operation == 'uninstall':
6846 node1_sys = node1 in deep_system_deps
6847 node2_sys = node2 in deep_system_deps
6848 if node1_sys != node2_sys:
6853 return node_info[node2] - node_info[node1]
6855 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6857 def altlist(self, reversed=False):
6859 while self._serialized_tasks_cache is None:
6860 self._resolve_conflicts()
6862 self._serialized_tasks_cache, self._scheduler_graph = \
6863 self._serialize_tasks()
6864 except self._serialize_tasks_retry:
6867 retlist = self._serialized_tasks_cache[:]
6872 def schedulerGraph(self):
6874 The scheduler graph is identical to the normal one except that
6875 uninstall edges are reversed in specific cases that require
6876 conflicting packages to be temporarily installed simultaneously.
6877 This is intended for use by the Scheduler in it's parallelization
6878 logic. It ensures that temporary simultaneous installation of
6879 conflicting packages is avoided when appropriate (especially for
6880 !!atom blockers), but allowed in specific cases that require it.
6882 Note that this method calls break_refs() which alters the state of
6883 internal Package instances such that this depgraph instance should
6884 not be used to perform any more calculations.
6886 if self._scheduler_graph is None:
6888 self.break_refs(self._scheduler_graph.order)
6889 return self._scheduler_graph
6891 def break_refs(self, nodes):
6893 Take a mergelist like that returned from self.altlist() and
6894 break any references that lead back to the depgraph. This is
6895 useful if you want to hold references to packages without
6896 also holding the depgraph on the heap.
6899 if hasattr(node, "root_config"):
6900 # The FakeVartree references the _package_cache which
6901 # references the depgraph. So that Package instances don't
6902 # hold the depgraph and FakeVartree on the heap, replace
6903 # the RootConfig that references the FakeVartree with the
6904 # original RootConfig instance which references the actual
6906 node.root_config = \
6907 self._trees_orig[node.root_config.root]["root_config"]
6909 def _resolve_conflicts(self):
6910 if not self._complete_graph():
6911 raise self._unknown_internal_error()
6913 if not self.validate_blockers():
6914 raise self._unknown_internal_error()
6916 if self._slot_collision_info:
6917 self._process_slot_conflicts()
6919 def _serialize_tasks(self):
6921 if "--debug" in self.myopts:
6922 writemsg("\ndigraph:\n\n", noiselevel=-1)
6923 self.digraph.debug_print()
6924 writemsg("\n", noiselevel=-1)
6926 scheduler_graph = self.digraph.copy()
6927 mygraph=self.digraph.copy()
6928 # Prune "nomerge" root nodes if nothing depends on them, since
6929 # otherwise they slow down merge order calculation. Don't remove
6930 # non-root nodes since they help optimize merge order in some cases
6931 # such as revdep-rebuild.
6932 removed_nodes = set()
6934 for node in mygraph.root_nodes():
6935 if not isinstance(node, Package) or \
6936 node.installed or node.onlydeps:
6937 removed_nodes.add(node)
6939 self.spinner.update()
6940 mygraph.difference_update(removed_nodes)
6941 if not removed_nodes:
6943 removed_nodes.clear()
6944 self._merge_order_bias(mygraph)
6945 def cmp_circular_bias(n1, n2):
6947 RDEPEND is stronger than PDEPEND and this function
6948 measures such a strength bias within a circular
6949 dependency relationship.
6951 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6952 ignore_priority=priority_range.ignore_medium_soft)
6953 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6954 ignore_priority=priority_range.ignore_medium_soft)
6955 if n1_n2_medium == n2_n1_medium:
6960 myblocker_uninstalls = self._blocker_uninstalls.copy()
6962 # Contains uninstall tasks that have been scheduled to
6963 # occur after overlapping blockers have been installed.
6964 scheduled_uninstalls = set()
6965 # Contains any Uninstall tasks that have been ignored
6966 # in order to avoid the circular deps code path. These
6967 # correspond to blocker conflicts that could not be
6969 ignored_uninstall_tasks = set()
6970 have_uninstall_task = False
6971 complete = "complete" in self.myparams
6974 def get_nodes(**kwargs):
6976 Returns leaf nodes excluding Uninstall instances
6977 since those should be executed as late as possible.
6979 return [node for node in mygraph.leaf_nodes(**kwargs) \
6980 if isinstance(node, Package) and \
6981 (node.operation != "uninstall" or \
6982 node in scheduled_uninstalls)]
6984 # sys-apps/portage needs special treatment if ROOT="/"
6985 running_root = self._running_root.root
6986 from portage.const import PORTAGE_PACKAGE_ATOM
6987 runtime_deps = InternalPackageSet(
6988 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6989 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6990 PORTAGE_PACKAGE_ATOM)
6991 replacement_portage = self.mydbapi[running_root].match_pkgs(
6992 PORTAGE_PACKAGE_ATOM)
6995 running_portage = running_portage[0]
6997 running_portage = None
6999 if replacement_portage:
7000 replacement_portage = replacement_portage[0]
7002 replacement_portage = None
7004 if replacement_portage == running_portage:
7005 replacement_portage = None
7007 if replacement_portage is not None:
7008 # update from running_portage to replacement_portage asap
7009 asap_nodes.append(replacement_portage)
7011 if running_portage is not None:
7013 portage_rdepend = self._select_atoms_highest_available(
7014 running_root, running_portage.metadata["RDEPEND"],
7015 myuse=running_portage.use.enabled,
7016 parent=running_portage, strict=False)
7017 except portage.exception.InvalidDependString, e:
7018 portage.writemsg("!!! Invalid RDEPEND in " + \
7019 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7020 (running_root, running_portage.cpv, e), noiselevel=-1)
7022 portage_rdepend = []
7023 runtime_deps.update(atom for atom in portage_rdepend \
7024 if not atom.startswith("!"))
7026 def gather_deps(ignore_priority, mergeable_nodes,
7027 selected_nodes, node):
7029 Recursively gather a group of nodes that RDEPEND on
7030 eachother. This ensures that they are merged as a group
7031 and get their RDEPENDs satisfied as soon as possible.
7033 if node in selected_nodes:
7035 if node not in mergeable_nodes:
7037 if node == replacement_portage and \
7038 mygraph.child_nodes(node,
7039 ignore_priority=priority_range.ignore_medium_soft):
7040 # Make sure that portage always has all of it's
7041 # RDEPENDs installed first.
7043 selected_nodes.add(node)
7044 for child in mygraph.child_nodes(node,
7045 ignore_priority=ignore_priority):
7046 if not gather_deps(ignore_priority,
7047 mergeable_nodes, selected_nodes, child):
7051 def ignore_uninst_or_med(priority):
7052 if priority is BlockerDepPriority.instance:
7054 return priority_range.ignore_medium(priority)
7056 def ignore_uninst_or_med_soft(priority):
7057 if priority is BlockerDepPriority.instance:
7059 return priority_range.ignore_medium_soft(priority)
7061 tree_mode = "--tree" in self.myopts
7062 # Tracks whether or not the current iteration should prefer asap_nodes
7063 # if available. This is set to False when the previous iteration
7064 # failed to select any nodes. It is reset whenever nodes are
7065 # successfully selected.
7068 # Controls whether or not the current iteration should drop edges that
7069 # are "satisfied" by installed packages, in order to solve circular
7070 # dependencies. The deep runtime dependencies of installed packages are
7071 # not checked in this case (bug #199856), so it must be avoided
7072 # whenever possible.
7073 drop_satisfied = False
7075 # State of variables for successive iterations that loosen the
7076 # criteria for node selection.
7078 # iteration prefer_asap drop_satisfied
7083 # If no nodes are selected on the last iteration, it is due to
7084 # unresolved blockers or circular dependencies.
7086 while not mygraph.empty():
7087 self.spinner.update()
7088 selected_nodes = None
7089 ignore_priority = None
7090 if drop_satisfied or (prefer_asap and asap_nodes):
7091 priority_range = DepPrioritySatisfiedRange
7093 priority_range = DepPriorityNormalRange
7094 if prefer_asap and asap_nodes:
7095 # ASAP nodes are merged before their soft deps. Go ahead and
7096 # select root nodes here if necessary, since it's typical for
7097 # the parent to have been removed from the graph already.
7098 asap_nodes = [node for node in asap_nodes \
7099 if mygraph.contains(node)]
7100 for node in asap_nodes:
7101 if not mygraph.child_nodes(node,
7102 ignore_priority=priority_range.ignore_soft):
7103 selected_nodes = [node]
7104 asap_nodes.remove(node)
7106 if not selected_nodes and \
7107 not (prefer_asap and asap_nodes):
7108 for i in xrange(priority_range.NONE,
7109 priority_range.MEDIUM_SOFT + 1):
7110 ignore_priority = priority_range.ignore_priority[i]
7111 nodes = get_nodes(ignore_priority=ignore_priority)
7113 # If there is a mix of uninstall nodes with other
7114 # types, save the uninstall nodes for later since
7115 # sometimes a merge node will render an uninstall
7116 # node unnecessary (due to occupying the same slot),
7117 # and we want to avoid executing a separate uninstall
7118 # task in that case.
7120 good_uninstalls = []
7121 with_some_uninstalls_excluded = []
7123 if node.operation == "uninstall":
7124 slot_node = self.mydbapi[node.root
7125 ].match_pkgs(node.slot_atom)
7127 slot_node[0].operation == "merge":
7129 good_uninstalls.append(node)
7130 with_some_uninstalls_excluded.append(node)
7132 nodes = good_uninstalls
7133 elif with_some_uninstalls_excluded:
7134 nodes = with_some_uninstalls_excluded
7138 if ignore_priority is None and not tree_mode:
7139 # Greedily pop all of these nodes since no
7140 # relationship has been ignored. This optimization
7141 # destroys --tree output, so it's disabled in tree
7143 selected_nodes = nodes
7145 # For optimal merge order:
7146 # * Only pop one node.
7147 # * Removing a root node (node without a parent)
7148 # will not produce a leaf node, so avoid it.
7149 # * It's normal for a selected uninstall to be a
7150 # root node, so don't check them for parents.
7152 if node.operation == "uninstall" or \
7153 mygraph.parent_nodes(node):
7154 selected_nodes = [node]
7160 if not selected_nodes:
7161 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7163 mergeable_nodes = set(nodes)
7164 if prefer_asap and asap_nodes:
7166 for i in xrange(priority_range.SOFT,
7167 priority_range.MEDIUM_SOFT + 1):
7168 ignore_priority = priority_range.ignore_priority[i]
7170 if not mygraph.parent_nodes(node):
7172 selected_nodes = set()
7173 if gather_deps(ignore_priority,
7174 mergeable_nodes, selected_nodes, node):
7177 selected_nodes = None
7181 if prefer_asap and asap_nodes and not selected_nodes:
7182 # We failed to find any asap nodes to merge, so ignore
7183 # them for the next iteration.
7187 if selected_nodes and ignore_priority is not None:
7188 # Try to merge ignored medium_soft deps as soon as possible
7189 # if they're not satisfied by installed packages.
7190 for node in selected_nodes:
7191 children = set(mygraph.child_nodes(node))
7192 soft = children.difference(
7193 mygraph.child_nodes(node,
7194 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7195 medium_soft = children.difference(
7196 mygraph.child_nodes(node,
7198 DepPrioritySatisfiedRange.ignore_medium_soft))
7199 medium_soft.difference_update(soft)
7200 for child in medium_soft:
7201 if child in selected_nodes:
7203 if child in asap_nodes:
7205 asap_nodes.append(child)
7207 if selected_nodes and len(selected_nodes) > 1:
7208 if not isinstance(selected_nodes, list):
7209 selected_nodes = list(selected_nodes)
7210 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7212 if not selected_nodes and not myblocker_uninstalls.is_empty():
7213 # An Uninstall task needs to be executed in order to
7214 # avoid conflict if possible.
7217 priority_range = DepPrioritySatisfiedRange
7219 priority_range = DepPriorityNormalRange
7221 mergeable_nodes = get_nodes(
7222 ignore_priority=ignore_uninst_or_med)
7224 min_parent_deps = None
7226 for task in myblocker_uninstalls.leaf_nodes():
7227 # Do some sanity checks so that system or world packages
7228 # don't get uninstalled inappropriately here (only really
7229 # necessary when --complete-graph has not been enabled).
7231 if task in ignored_uninstall_tasks:
7234 if task in scheduled_uninstalls:
7235 # It's been scheduled but it hasn't
7236 # been executed yet due to dependence
7237 # on installation of blocking packages.
7240 root_config = self.roots[task.root]
7241 inst_pkg = self._pkg_cache[
7242 ("installed", task.root, task.cpv, "nomerge")]
7244 if self.digraph.contains(inst_pkg):
7247 forbid_overlap = False
7248 heuristic_overlap = False
7249 for blocker in myblocker_uninstalls.parent_nodes(task):
7250 if blocker.eapi in ("0", "1"):
7251 heuristic_overlap = True
7252 elif blocker.atom.blocker.overlap.forbid:
7253 forbid_overlap = True
7255 if forbid_overlap and running_root == task.root:
7258 if heuristic_overlap and running_root == task.root:
7259 # Never uninstall sys-apps/portage or it's essential
7260 # dependencies, except through replacement.
7262 runtime_dep_atoms = \
7263 list(runtime_deps.iterAtomsForPackage(task))
7264 except portage.exception.InvalidDependString, e:
7265 portage.writemsg("!!! Invalid PROVIDE in " + \
7266 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7267 (task.root, task.cpv, e), noiselevel=-1)
7271 # Don't uninstall a runtime dep if it appears
7272 # to be the only suitable one installed.
7274 vardb = root_config.trees["vartree"].dbapi
7275 for atom in runtime_dep_atoms:
7276 other_version = None
7277 for pkg in vardb.match_pkgs(atom):
7278 if pkg.cpv == task.cpv and \
7279 pkg.metadata["COUNTER"] == \
7280 task.metadata["COUNTER"]:
7284 if other_version is None:
7290 # For packages in the system set, don't take
7291 # any chances. If the conflict can't be resolved
7292 # by a normal replacement operation then abort.
7295 for atom in root_config.sets[
7296 "system"].iterAtomsForPackage(task):
7299 except portage.exception.InvalidDependString, e:
7300 portage.writemsg("!!! Invalid PROVIDE in " + \
7301 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7302 (task.root, task.cpv, e), noiselevel=-1)
7308 # Note that the world check isn't always
7309 # necessary since self._complete_graph() will
7310 # add all packages from the system and world sets to the
7311 # graph. This just allows unresolved conflicts to be
7312 # detected as early as possible, which makes it possible
7313 # to avoid calling self._complete_graph() when it is
7314 # unnecessary due to blockers triggering an abortion.
7316 # For packages in the world set, go ahead an uninstall
7317 # when necessary, as long as the atom will be satisfied
7318 # in the final state.
7319 graph_db = self.mydbapi[task.root]
7322 for atom in root_config.sets[
7323 "world"].iterAtomsForPackage(task):
7325 for pkg in graph_db.match_pkgs(atom):
7332 self._blocked_world_pkgs[inst_pkg] = atom
7334 except portage.exception.InvalidDependString, e:
7335 portage.writemsg("!!! Invalid PROVIDE in " + \
7336 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7337 (task.root, task.cpv, e), noiselevel=-1)
7343 # Check the deps of parent nodes to ensure that
7344 # the chosen task produces a leaf node. Maybe
7345 # this can be optimized some more to make the
7346 # best possible choice, but the current algorithm
7347 # is simple and should be near optimal for most
7349 mergeable_parent = False
7351 for parent in mygraph.parent_nodes(task):
7352 parent_deps.update(mygraph.child_nodes(parent,
7353 ignore_priority=priority_range.ignore_medium_soft))
7354 if parent in mergeable_nodes and \
7355 gather_deps(ignore_uninst_or_med_soft,
7356 mergeable_nodes, set(), parent):
7357 mergeable_parent = True
7359 if not mergeable_parent:
7362 parent_deps.remove(task)
7363 if min_parent_deps is None or \
7364 len(parent_deps) < min_parent_deps:
7365 min_parent_deps = len(parent_deps)
7368 if uninst_task is not None:
7369 # The uninstall is performed only after blocking
7370 # packages have been merged on top of it. File
7371 # collisions between blocking packages are detected
7372 # and removed from the list of files to be uninstalled.
7373 scheduled_uninstalls.add(uninst_task)
7374 parent_nodes = mygraph.parent_nodes(uninst_task)
7376 # Reverse the parent -> uninstall edges since we want
7377 # to do the uninstall after blocking packages have
7378 # been merged on top of it.
7379 mygraph.remove(uninst_task)
7380 for blocked_pkg in parent_nodes:
7381 mygraph.add(blocked_pkg, uninst_task,
7382 priority=BlockerDepPriority.instance)
7383 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7384 scheduler_graph.add(blocked_pkg, uninst_task,
7385 priority=BlockerDepPriority.instance)
7387 # Reset the state variables for leaf node selection and
7388 # continue trying to select leaf nodes.
7390 drop_satisfied = False
7393 if not selected_nodes:
7394 # Only select root nodes as a last resort. This case should
7395 # only trigger when the graph is nearly empty and the only
7396 # remaining nodes are isolated (no parents or children). Since
7397 # the nodes must be isolated, ignore_priority is not needed.
7398 selected_nodes = get_nodes()
7400 if not selected_nodes and not drop_satisfied:
7401 drop_satisfied = True
7404 if not selected_nodes and not myblocker_uninstalls.is_empty():
7405 # If possible, drop an uninstall task here in order to avoid
7406 # the circular deps code path. The corresponding blocker will
7407 # still be counted as an unresolved conflict.
7409 for node in myblocker_uninstalls.leaf_nodes():
7411 mygraph.remove(node)
7416 ignored_uninstall_tasks.add(node)
7419 if uninst_task is not None:
7420 # Reset the state variables for leaf node selection and
7421 # continue trying to select leaf nodes.
7423 drop_satisfied = False
7426 if not selected_nodes:
7427 self._circular_deps_for_display = mygraph
7428 raise self._unknown_internal_error()
7430 # At this point, we've succeeded in selecting one or more nodes, so
7431 # reset state variables for leaf node selection.
7433 drop_satisfied = False
7435 mygraph.difference_update(selected_nodes)
7437 for node in selected_nodes:
7438 if isinstance(node, Package) and \
7439 node.operation == "nomerge":
7442 # Handle interactions between blockers
7443 # and uninstallation tasks.
7444 solved_blockers = set()
7446 if isinstance(node, Package) and \
7447 "uninstall" == node.operation:
7448 have_uninstall_task = True
7451 vardb = self.trees[node.root]["vartree"].dbapi
7452 previous_cpv = vardb.match(node.slot_atom)
7454 # The package will be replaced by this one, so remove
7455 # the corresponding Uninstall task if necessary.
7456 previous_cpv = previous_cpv[0]
7458 ("installed", node.root, previous_cpv, "uninstall")
7460 mygraph.remove(uninst_task)
7464 if uninst_task is not None and \
7465 uninst_task not in ignored_uninstall_tasks and \
7466 myblocker_uninstalls.contains(uninst_task):
7467 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7468 myblocker_uninstalls.remove(uninst_task)
7469 # Discard any blockers that this Uninstall solves.
7470 for blocker in blocker_nodes:
7471 if not myblocker_uninstalls.child_nodes(blocker):
7472 myblocker_uninstalls.remove(blocker)
7473 solved_blockers.add(blocker)
7475 retlist.append(node)
7477 if (isinstance(node, Package) and \
7478 "uninstall" == node.operation) or \
7479 (uninst_task is not None and \
7480 uninst_task in scheduled_uninstalls):
7481 # Include satisfied blockers in the merge list
7482 # since the user might be interested and also
7483 # it serves as an indicator that blocking packages
7484 # will be temporarily installed simultaneously.
7485 for blocker in solved_blockers:
7486 retlist.append(Blocker(atom=blocker.atom,
7487 root=blocker.root, eapi=blocker.eapi,
7490 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7491 for node in myblocker_uninstalls.root_nodes():
7492 unsolvable_blockers.add(node)
7494 for blocker in unsolvable_blockers:
7495 retlist.append(blocker)
7497 # If any Uninstall tasks need to be executed in order
7498 # to avoid a conflict, complete the graph with any
7499 # dependencies that may have been initially
7500 # neglected (to ensure that unsafe Uninstall tasks
7501 # are properly identified and blocked from execution).
7502 if have_uninstall_task and \
7504 not unsolvable_blockers:
7505 self.myparams.add("complete")
7506 raise self._serialize_tasks_retry("")
7508 if unsolvable_blockers and \
7509 not self._accept_blocker_conflicts():
7510 self._unsatisfied_blockers_for_display = unsolvable_blockers
7511 self._serialized_tasks_cache = retlist[:]
7512 self._scheduler_graph = scheduler_graph
7513 raise self._unknown_internal_error()
7515 if self._slot_collision_info and \
7516 not self._accept_blocker_conflicts():
7517 self._serialized_tasks_cache = retlist[:]
7518 self._scheduler_graph = scheduler_graph
7519 raise self._unknown_internal_error()
7521 return retlist, scheduler_graph
7523 def _show_circular_deps(self, mygraph):
7524 # No leaf nodes are available, so we have a circular
7525 # dependency panic situation. Reduce the noise level to a
7526 # minimum via repeated elimination of root nodes since they
7527 # have no parents and thus can not be part of a cycle.
7529 root_nodes = mygraph.root_nodes(
7530 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7533 mygraph.difference_update(root_nodes)
7534 # Display the USE flags that are enabled on nodes that are part
7535 # of dependency cycles in case that helps the user decide to
7536 # disable some of them.
7538 tempgraph = mygraph.copy()
7539 while not tempgraph.empty():
7540 nodes = tempgraph.leaf_nodes()
7542 node = tempgraph.order[0]
7545 display_order.append(node)
7546 tempgraph.remove(node)
7547 display_order.reverse()
7548 self.myopts.pop("--quiet", None)
7549 self.myopts.pop("--verbose", None)
7550 self.myopts["--tree"] = True
7551 portage.writemsg("\n\n", noiselevel=-1)
7552 self.display(display_order)
7553 prefix = colorize("BAD", " * ")
7554 portage.writemsg("\n", noiselevel=-1)
7555 portage.writemsg(prefix + "Error: circular dependencies:\n",
7557 portage.writemsg("\n", noiselevel=-1)
7558 mygraph.debug_print()
7559 portage.writemsg("\n", noiselevel=-1)
7560 portage.writemsg(prefix + "Note that circular dependencies " + \
7561 "can often be avoided by temporarily\n", noiselevel=-1)
7562 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7563 "optional dependencies.\n", noiselevel=-1)
7565 def _show_merge_list(self):
7566 if self._serialized_tasks_cache is not None and \
7567 not (self._displayed_list and \
7568 (self._displayed_list == self._serialized_tasks_cache or \
7569 self._displayed_list == \
7570 list(reversed(self._serialized_tasks_cache)))):
7571 display_list = self._serialized_tasks_cache[:]
7572 if "--tree" in self.myopts:
7573 display_list.reverse()
7574 self.display(display_list)
7576 def _show_unsatisfied_blockers(self, blockers):
7577 self._show_merge_list()
7578 msg = "Error: The above package list contains " + \
7579 "packages which cannot be installed " + \
7580 "at the same time on the same system."
7581 prefix = colorize("BAD", " * ")
7582 from textwrap import wrap
7583 portage.writemsg("\n", noiselevel=-1)
7584 for line in wrap(msg, 70):
7585 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7587 # Display the conflicting packages along with the packages
7588 # that pulled them in. This is helpful for troubleshooting
7589 # cases in which blockers don't solve automatically and
7590 # the reasons are not apparent from the normal merge list
7594 for blocker in blockers:
7595 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7596 self._blocker_parents.parent_nodes(blocker)):
7597 parent_atoms = self._parent_atoms.get(pkg)
7598 if not parent_atoms:
7599 atom = self._blocked_world_pkgs.get(pkg)
7600 if atom is not None:
7601 parent_atoms = set([("@world", atom)])
7603 conflict_pkgs[pkg] = parent_atoms
7606 # Reduce noise by pruning packages that are only
7607 # pulled in by other conflict packages.
7609 for pkg, parent_atoms in conflict_pkgs.iteritems():
7610 relevant_parent = False
7611 for parent, atom in parent_atoms:
7612 if parent not in conflict_pkgs:
7613 relevant_parent = True
7615 if not relevant_parent:
7616 pruned_pkgs.add(pkg)
7617 for pkg in pruned_pkgs:
7618 del conflict_pkgs[pkg]
7624 # Max number of parents shown, to avoid flooding the display.
7626 for pkg, parent_atoms in conflict_pkgs.iteritems():
7630 # Prefer packages that are not directly involved in a conflict.
7631 for parent_atom in parent_atoms:
7632 if len(pruned_list) >= max_parents:
7634 parent, atom = parent_atom
7635 if parent not in conflict_pkgs:
7636 pruned_list.add(parent_atom)
7638 for parent_atom in parent_atoms:
7639 if len(pruned_list) >= max_parents:
7641 pruned_list.add(parent_atom)
7643 omitted_parents = len(parent_atoms) - len(pruned_list)
7644 msg.append(indent + "%s pulled in by\n" % pkg)
7646 for parent_atom in pruned_list:
7647 parent, atom = parent_atom
7648 msg.append(2*indent)
7649 if isinstance(parent,
7650 (PackageArg, AtomArg)):
7651 # For PackageArg and AtomArg types, it's
7652 # redundant to display the atom attribute.
7653 msg.append(str(parent))
7655 # Display the specific atom from SetArg or
7657 msg.append("%s required by %s" % (atom, parent))
7661 msg.append(2*indent)
7662 msg.append("(and %d more)\n" % omitted_parents)
7666 sys.stderr.write("".join(msg))
7669 if "--quiet" not in self.myopts:
7670 show_blocker_docs_link()
7672 def display(self, mylist, favorites=[], verbosity=None):
7674 # This is used to prevent display_problems() from
7675 # redundantly displaying this exact same merge list
7676 # again via _show_merge_list().
7677 self._displayed_list = mylist
7679 if verbosity is None:
7680 verbosity = ("--quiet" in self.myopts and 1 or \
7681 "--verbose" in self.myopts and 3 or 2)
7682 favorites_set = InternalPackageSet(favorites)
7683 oneshot = "--oneshot" in self.myopts or \
7684 "--onlydeps" in self.myopts
7685 columns = "--columns" in self.myopts
7690 counters = PackageCounters()
7692 if verbosity == 1 and "--verbose" not in self.myopts:
7693 def create_use_string(*args):
7696 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7698 is_new, reinst_flags,
7699 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7700 alphabetical=("--alphabetical" in self.myopts)):
7708 cur_iuse = set(cur_iuse)
7709 enabled_flags = cur_iuse.intersection(cur_use)
7710 removed_iuse = set(old_iuse).difference(cur_iuse)
7711 any_iuse = cur_iuse.union(old_iuse)
7712 any_iuse = list(any_iuse)
7714 for flag in any_iuse:
7717 reinst_flag = reinst_flags and flag in reinst_flags
7718 if flag in enabled_flags:
7720 if is_new or flag in old_use and \
7721 (all_flags or reinst_flag):
7722 flag_str = red(flag)
7723 elif flag not in old_iuse:
7724 flag_str = yellow(flag) + "%*"
7725 elif flag not in old_use:
7726 flag_str = green(flag) + "*"
7727 elif flag in removed_iuse:
7728 if all_flags or reinst_flag:
7729 flag_str = yellow("-" + flag) + "%"
7732 flag_str = "(" + flag_str + ")"
7733 removed.append(flag_str)
7736 if is_new or flag in old_iuse and \
7737 flag not in old_use and \
7738 (all_flags or reinst_flag):
7739 flag_str = blue("-" + flag)
7740 elif flag not in old_iuse:
7741 flag_str = yellow("-" + flag)
7742 if flag not in iuse_forced:
7744 elif flag in old_use:
7745 flag_str = green("-" + flag) + "*"
7747 if flag in iuse_forced:
7748 flag_str = "(" + flag_str + ")"
7750 enabled.append(flag_str)
7752 disabled.append(flag_str)
7755 ret = " ".join(enabled)
7757 ret = " ".join(enabled + disabled + removed)
7759 ret = '%s="%s" ' % (name, ret)
7762 repo_display = RepoDisplay(self.roots)
7766 mygraph = self.digraph.copy()
7768 # If there are any Uninstall instances, add the corresponding
7769 # blockers to the digraph (useful for --tree display).
7771 executed_uninstalls = set(node for node in mylist \
7772 if isinstance(node, Package) and node.operation == "unmerge")
7774 for uninstall in self._blocker_uninstalls.leaf_nodes():
7775 uninstall_parents = \
7776 self._blocker_uninstalls.parent_nodes(uninstall)
7777 if not uninstall_parents:
7780 # Remove the corresponding "nomerge" node and substitute
7781 # the Uninstall node.
7782 inst_pkg = self._pkg_cache[
7783 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7785 mygraph.remove(inst_pkg)
7790 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7792 inst_pkg_blockers = []
7794 # Break the Package -> Uninstall edges.
7795 mygraph.remove(uninstall)
7797 # Resolution of a package's blockers
7798 # depend on it's own uninstallation.
7799 for blocker in inst_pkg_blockers:
7800 mygraph.add(uninstall, blocker)
7802 # Expand Package -> Uninstall edges into
7803 # Package -> Blocker -> Uninstall edges.
7804 for blocker in uninstall_parents:
7805 mygraph.add(uninstall, blocker)
7806 for parent in self._blocker_parents.parent_nodes(blocker):
7807 if parent != inst_pkg:
7808 mygraph.add(blocker, parent)
7810 # If the uninstall task did not need to be executed because
7811 # of an upgrade, display Blocker -> Upgrade edges since the
7812 # corresponding Blocker -> Uninstall edges will not be shown.
7814 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7815 if upgrade_node is not None and \
7816 uninstall not in executed_uninstalls:
7817 for blocker in uninstall_parents:
7818 mygraph.add(upgrade_node, blocker)
7820 unsatisfied_blockers = []
7825 if isinstance(x, Blocker) and not x.satisfied:
7826 unsatisfied_blockers.append(x)
7829 if "--tree" in self.myopts:
7830 depth = len(tree_nodes)
7831 while depth and graph_key not in \
7832 mygraph.child_nodes(tree_nodes[depth-1]):
7835 tree_nodes = tree_nodes[:depth]
7836 tree_nodes.append(graph_key)
7837 display_list.append((x, depth, True))
7838 shown_edges.add((graph_key, tree_nodes[depth-1]))
7840 traversed_nodes = set() # prevent endless circles
7841 traversed_nodes.add(graph_key)
7842 def add_parents(current_node, ordered):
7844 # Do not traverse to parents if this node is an
7845 # an argument or a direct member of a set that has
7846 # been specified as an argument (system or world).
7847 if current_node not in self._set_nodes:
7848 parent_nodes = mygraph.parent_nodes(current_node)
7850 child_nodes = set(mygraph.child_nodes(current_node))
7851 selected_parent = None
7852 # First, try to avoid a direct cycle.
7853 for node in parent_nodes:
7854 if not isinstance(node, (Blocker, Package)):
7856 if node not in traversed_nodes and \
7857 node not in child_nodes:
7858 edge = (current_node, node)
7859 if edge in shown_edges:
7861 selected_parent = node
7863 if not selected_parent:
7864 # A direct cycle is unavoidable.
7865 for node in parent_nodes:
7866 if not isinstance(node, (Blocker, Package)):
7868 if node not in traversed_nodes:
7869 edge = (current_node, node)
7870 if edge in shown_edges:
7872 selected_parent = node
7875 shown_edges.add((current_node, selected_parent))
7876 traversed_nodes.add(selected_parent)
7877 add_parents(selected_parent, False)
7878 display_list.append((current_node,
7879 len(tree_nodes), ordered))
7880 tree_nodes.append(current_node)
7882 add_parents(graph_key, True)
7884 display_list.append((x, depth, True))
7885 mylist = display_list
7886 for x in unsatisfied_blockers:
7887 mylist.append((x, 0, True))
7889 last_merge_depth = 0
7890 for i in xrange(len(mylist)-1,-1,-1):
7891 graph_key, depth, ordered = mylist[i]
7892 if not ordered and depth == 0 and i > 0 \
7893 and graph_key == mylist[i-1][0] and \
7894 mylist[i-1][1] == 0:
7895 # An ordered node got a consecutive duplicate when the tree was
7899 if ordered and graph_key[-1] != "nomerge":
7900 last_merge_depth = depth
7902 if depth >= last_merge_depth or \
7903 i < len(mylist) - 1 and \
7904 depth >= mylist[i+1][1]:
7907 from portage import flatten
7908 from portage.dep import use_reduce, paren_reduce
7909 # files to fetch list - avoids counting a same file twice
7910 # in size display (verbose mode)
7913 # Use this set to detect when all the "repoadd" strings are "[0]"
7914 # and disable the entire repo display in this case.
7917 for mylist_index in xrange(len(mylist)):
7918 x, depth, ordered = mylist[mylist_index]
7922 portdb = self.trees[myroot]["porttree"].dbapi
7923 bindb = self.trees[myroot]["bintree"].dbapi
7924 vardb = self.trees[myroot]["vartree"].dbapi
7925 vartree = self.trees[myroot]["vartree"]
7926 pkgsettings = self.pkgsettings[myroot]
7929 indent = " " * depth
7931 if isinstance(x, Blocker):
7933 blocker_style = "PKG_BLOCKER_SATISFIED"
7934 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7936 blocker_style = "PKG_BLOCKER"
7937 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7939 counters.blocks += 1
7941 counters.blocks_satisfied += 1
7942 resolved = portage.key_expand(
7943 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7944 if "--columns" in self.myopts and "--quiet" in self.myopts:
7945 addl += " " + colorize(blocker_style, resolved)
7947 addl = "[%s %s] %s%s" % \
7948 (colorize(blocker_style, "blocks"),
7949 addl, indent, colorize(blocker_style, resolved))
7950 block_parents = self._blocker_parents.parent_nodes(x)
7951 block_parents = set([pnode[2] for pnode in block_parents])
7952 block_parents = ", ".join(block_parents)
7954 addl += colorize(blocker_style,
7955 " (\"%s\" is blocking %s)") % \
7956 (str(x.atom).lstrip("!"), block_parents)
7958 addl += colorize(blocker_style,
7959 " (is blocking %s)") % block_parents
7960 if isinstance(x, Blocker) and x.satisfied:
7965 blockers.append(addl)
7968 pkg_merge = ordered and pkg_status == "merge"
7969 if not pkg_merge and pkg_status == "merge":
7970 pkg_status = "nomerge"
7971 built = pkg_type != "ebuild"
7972 installed = pkg_type == "installed"
7974 metadata = pkg.metadata
7976 repo_name = metadata["repository"]
7977 if pkg_type == "ebuild":
7978 ebuild_path = portdb.findname(pkg_key)
7979 if not ebuild_path: # shouldn't happen
7980 raise portage.exception.PackageNotFound(pkg_key)
7981 repo_path_real = os.path.dirname(os.path.dirname(
7982 os.path.dirname(ebuild_path)))
7984 repo_path_real = portdb.getRepositoryPath(repo_name)
7985 pkg_use = list(pkg.use.enabled)
7987 restrict = flatten(use_reduce(paren_reduce(
7988 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7989 except portage.exception.InvalidDependString, e:
7990 if not pkg.installed:
7991 show_invalid_depstring_notice(x,
7992 pkg.metadata["RESTRICT"], str(e))
7996 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7997 "fetch" in restrict:
8000 counters.restrict_fetch += 1
8001 if portdb.fetch_check(pkg_key, pkg_use):
8004 counters.restrict_fetch_satisfied += 1
8006 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8007 #param is used for -u, where you still *do* want to see when something is being upgraded.
8010 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8011 if vardb.cpv_exists(pkg_key):
8012 addl=" "+yellow("R")+fetch+" "
8015 counters.reinst += 1
8016 elif pkg_status == "uninstall":
8017 counters.uninst += 1
8018 # filter out old-style virtual matches
8019 elif installed_versions and \
8020 portage.cpv_getkey(installed_versions[0]) == \
8021 portage.cpv_getkey(pkg_key):
8022 myinslotlist = vardb.match(pkg.slot_atom)
8023 # If this is the first install of a new-style virtual, we
8024 # need to filter out old-style virtual matches.
8025 if myinslotlist and \
8026 portage.cpv_getkey(myinslotlist[0]) != \
8027 portage.cpv_getkey(pkg_key):
8030 myoldbest = myinslotlist[:]
8032 if not portage.dep.cpvequal(pkg_key,
8033 portage.best([pkg_key] + myoldbest)):
8035 addl += turquoise("U")+blue("D")
8037 counters.downgrades += 1
8040 addl += turquoise("U") + " "
8042 counters.upgrades += 1
8044 # New slot, mark it new.
8045 addl = " " + green("NS") + fetch + " "
8046 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8048 counters.newslot += 1
8050 if "--changelog" in self.myopts:
8051 inst_matches = vardb.match(pkg.slot_atom)
8053 changelogs.extend(self.calc_changelog(
8054 portdb.findname(pkg_key),
8055 inst_matches[0], pkg_key))
8057 addl = " " + green("N") + " " + fetch + " "
8066 forced_flags = set()
8067 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8068 forced_flags.update(pkgsettings.useforce)
8069 forced_flags.update(pkgsettings.usemask)
8071 cur_use = [flag for flag in pkg.use.enabled \
8072 if flag in pkg.iuse.all]
8073 cur_iuse = sorted(pkg.iuse.all)
8075 if myoldbest and myinslotlist:
8076 previous_cpv = myoldbest[0]
8078 previous_cpv = pkg.cpv
8079 if vardb.cpv_exists(previous_cpv):
8080 old_iuse, old_use = vardb.aux_get(
8081 previous_cpv, ["IUSE", "USE"])
8082 old_iuse = list(set(
8083 filter_iuse_defaults(old_iuse.split())))
8085 old_use = old_use.split()
8092 old_use = [flag for flag in old_use if flag in old_iuse]
8094 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8096 use_expand.reverse()
8097 use_expand_hidden = \
8098 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8100 def map_to_use_expand(myvals, forcedFlags=False,
8104 for exp in use_expand:
8107 for val in myvals[:]:
8108 if val.startswith(exp.lower()+"_"):
8109 if val in forced_flags:
8110 forced[exp].add(val[len(exp)+1:])
8111 ret[exp].append(val[len(exp)+1:])
8114 forced["USE"] = [val for val in myvals \
8115 if val in forced_flags]
8117 for exp in use_expand_hidden:
8123 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8124 # are the only thing that triggered reinstallation.
8125 reinst_flags_map = {}
8126 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8127 reinst_expand_map = None
8128 if reinstall_for_flags:
8129 reinst_flags_map = map_to_use_expand(
8130 list(reinstall_for_flags), removeHidden=False)
8131 for k in list(reinst_flags_map):
8132 if not reinst_flags_map[k]:
8133 del reinst_flags_map[k]
8134 if not reinst_flags_map.get("USE"):
8135 reinst_expand_map = reinst_flags_map.copy()
8136 reinst_expand_map.pop("USE", None)
8137 if reinst_expand_map and \
8138 not set(reinst_expand_map).difference(
8140 use_expand_hidden = \
8141 set(use_expand_hidden).difference(
8144 cur_iuse_map, iuse_forced = \
8145 map_to_use_expand(cur_iuse, forcedFlags=True)
8146 cur_use_map = map_to_use_expand(cur_use)
8147 old_iuse_map = map_to_use_expand(old_iuse)
8148 old_use_map = map_to_use_expand(old_use)
8151 use_expand.insert(0, "USE")
8153 for key in use_expand:
8154 if key in use_expand_hidden:
8156 verboseadd += create_use_string(key.upper(),
8157 cur_iuse_map[key], iuse_forced[key],
8158 cur_use_map[key], old_iuse_map[key],
8159 old_use_map[key], is_new,
8160 reinst_flags_map.get(key))
8165 if pkg_type == "ebuild" and pkg_merge:
8167 myfilesdict = portdb.getfetchsizes(pkg_key,
8168 useflags=pkg_use, debug=self.edebug)
8169 except portage.exception.InvalidDependString, e:
8170 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8171 show_invalid_depstring_notice(x, src_uri, str(e))
8174 if myfilesdict is None:
8175 myfilesdict="[empty/missing/bad digest]"
8177 for myfetchfile in myfilesdict:
8178 if myfetchfile not in myfetchlist:
8179 mysize+=myfilesdict[myfetchfile]
8180 myfetchlist.append(myfetchfile)
8182 counters.totalsize += mysize
8183 verboseadd += format_size(mysize)
8186 # assign index for a previous version in the same slot
8187 has_previous = False
8188 repo_name_prev = None
8189 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8191 slot_matches = vardb.match(slot_atom)
8194 repo_name_prev = vardb.aux_get(slot_matches[0],
8197 # now use the data to generate output
8198 if pkg.installed or not has_previous:
8199 repoadd = repo_display.repoStr(repo_path_real)
8201 repo_path_prev = None
8203 repo_path_prev = portdb.getRepositoryPath(
8205 if repo_path_prev == repo_path_real:
8206 repoadd = repo_display.repoStr(repo_path_real)
8208 repoadd = "%s=>%s" % (
8209 repo_display.repoStr(repo_path_prev),
8210 repo_display.repoStr(repo_path_real))
8212 repoadd_set.add(repoadd)
8214 xs = [portage.cpv_getkey(pkg_key)] + \
8215 list(portage.catpkgsplit(pkg_key)[2:])
8222 if "COLUMNWIDTH" in self.settings:
8224 mywidth = int(self.settings["COLUMNWIDTH"])
8225 except ValueError, e:
8226 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8228 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8229 self.settings["COLUMNWIDTH"], noiselevel=-1)
8231 oldlp = mywidth - 30
8234 # Convert myoldbest from a list to a string.
8238 for pos, key in enumerate(myoldbest):
8239 key = portage.catpkgsplit(key)[2] + \
8240 "-" + portage.catpkgsplit(key)[3]
8241 if key[-3:] == "-r0":
8243 myoldbest[pos] = key
8244 myoldbest = blue("["+", ".join(myoldbest)+"]")
8247 root_config = self.roots[myroot]
8248 system_set = root_config.sets["system"]
8249 world_set = root_config.sets["world"]
8254 pkg_system = system_set.findAtomForPackage(pkg)
8255 pkg_world = world_set.findAtomForPackage(pkg)
8256 if not (oneshot or pkg_world) and \
8257 myroot == self.target_root and \
8258 favorites_set.findAtomForPackage(pkg):
8259 # Maybe it will be added to world now.
8260 if create_world_atom(pkg, favorites_set, root_config):
8262 except portage.exception.InvalidDependString:
8263 # This is reported elsewhere if relevant.
8266 def pkgprint(pkg_str):
8269 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8271 return colorize("PKG_MERGE_WORLD", pkg_str)
8273 return colorize("PKG_MERGE", pkg_str)
8274 elif pkg_status == "uninstall":
8275 return colorize("PKG_UNINSTALL", pkg_str)
8278 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8280 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8282 return colorize("PKG_NOMERGE", pkg_str)
8285 properties = flatten(use_reduce(paren_reduce(
8286 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8287 except portage.exception.InvalidDependString, e:
8288 if not pkg.installed:
8289 show_invalid_depstring_notice(pkg,
8290 pkg.metadata["PROPERTIES"], str(e))
8294 interactive = "interactive" in properties
8295 if interactive and pkg.operation == "merge":
8296 addl = colorize("WARN", "I") + addl[1:]
8298 counters.interactive += 1
8303 if "--columns" in self.myopts:
8304 if "--quiet" in self.myopts:
8305 myprint=addl+" "+indent+pkgprint(pkg_cp)
8306 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8307 myprint=myprint+myoldbest
8308 myprint=myprint+darkgreen("to "+x[1])
8312 myprint = "[%s] %s%s" % \
8313 (pkgprint(pkg_status.ljust(13)),
8314 indent, pkgprint(pkg.cp))
8316 myprint = "[%s %s] %s%s" % \
8317 (pkgprint(pkg.type_name), addl,
8318 indent, pkgprint(pkg.cp))
8319 if (newlp-nc_len(myprint)) > 0:
8320 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8321 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8322 if (oldlp-nc_len(myprint)) > 0:
8323 myprint=myprint+" "*(oldlp-nc_len(myprint))
8324 myprint=myprint+myoldbest
8325 myprint += darkgreen("to " + pkg.root)
8328 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8330 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8331 myprint += indent + pkgprint(pkg_key) + " " + \
8332 myoldbest + darkgreen("to " + myroot)
8334 if "--columns" in self.myopts:
8335 if "--quiet" in self.myopts:
8336 myprint=addl+" "+indent+pkgprint(pkg_cp)
8337 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8338 myprint=myprint+myoldbest
8342 myprint = "[%s] %s%s" % \
8343 (pkgprint(pkg_status.ljust(13)),
8344 indent, pkgprint(pkg.cp))
8346 myprint = "[%s %s] %s%s" % \
8347 (pkgprint(pkg.type_name), addl,
8348 indent, pkgprint(pkg.cp))
8349 if (newlp-nc_len(myprint)) > 0:
8350 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8351 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8352 if (oldlp-nc_len(myprint)) > 0:
8353 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8354 myprint += myoldbest
8357 myprint = "[%s] %s%s %s" % \
8358 (pkgprint(pkg_status.ljust(13)),
8359 indent, pkgprint(pkg.cpv),
8362 myprint = "[%s %s] %s%s %s" % \
8363 (pkgprint(pkg_type), addl, indent,
8364 pkgprint(pkg.cpv), myoldbest)
8366 if columns and pkg.operation == "uninstall":
8368 p.append((myprint, verboseadd, repoadd))
8370 if "--tree" not in self.myopts and \
8371 "--quiet" not in self.myopts and \
8372 not self._opts_no_restart.intersection(self.myopts) and \
8373 pkg.root == self._running_root.root and \
8374 portage.match_from_list(
8375 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8376 not vardb.cpv_exists(pkg.cpv) and \
8377 "--quiet" not in self.myopts:
8378 if mylist_index < len(mylist) - 1:
8379 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8380 p.append(colorize("WARN", " then resume the merge."))
8383 show_repos = repoadd_set and repoadd_set != set(["0"])
8386 if isinstance(x, basestring):
8387 out.write("%s\n" % (x,))
8390 myprint, verboseadd, repoadd = x
8393 myprint += " " + verboseadd
8395 if show_repos and repoadd:
8396 myprint += " " + teal("[%s]" % repoadd)
8398 out.write("%s\n" % (myprint,))
8407 sys.stdout.write(str(repo_display))
8409 if "--changelog" in self.myopts:
8411 for revision,text in changelogs:
8412 print bold('*'+revision)
8413 sys.stdout.write(text)
8418 def display_problems(self):
8420 Display problems with the dependency graph such as slot collisions.
8421 This is called internally by display() to show the problems _after_
8422 the merge list where it is most likely to be seen, but if display()
8423 is not going to be called then this method should be called explicitly
8424 to ensure that the user is notified of problems with the graph.
8426 All output goes to stderr, except for unsatisfied dependencies which
8427 go to stdout for parsing by programs such as autounmask.
8430 # Note that show_masked_packages() sends it's output to
8431 # stdout, and some programs such as autounmask parse the
8432 # output in cases when emerge bails out. However, when
8433 # show_masked_packages() is called for installed packages
8434 # here, the message is a warning that is more appropriate
8435 # to send to stderr, so temporarily redirect stdout to
8436 # stderr. TODO: Fix output code so there's a cleaner way
8437 # to redirect everything to stderr.
8442 sys.stdout = sys.stderr
8443 self._display_problems()
8449 # This goes to stdout for parsing by programs like autounmask.
8450 for pargs, kwargs in self._unsatisfied_deps_for_display:
8451 self._show_unsatisfied_dep(*pargs, **kwargs)
8453 def _display_problems(self):
8454 if self._circular_deps_for_display is not None:
8455 self._show_circular_deps(
8456 self._circular_deps_for_display)
8458 # The user is only notified of a slot conflict if
8459 # there are no unresolvable blocker conflicts.
8460 if self._unsatisfied_blockers_for_display is not None:
8461 self._show_unsatisfied_blockers(
8462 self._unsatisfied_blockers_for_display)
8464 self._show_slot_collision_notice()
8466 # TODO: Add generic support for "set problem" handlers so that
8467 # the below warnings aren't special cases for world only.
8469 if self._missing_args:
8470 world_problems = False
8471 if "world" in self._sets:
8472 # Filter out indirect members of world (from nested sets)
8473 # since only direct members of world are desired here.
8474 world_set = self.roots[self.target_root].sets["world"]
8475 for arg, atom in self._missing_args:
8476 if arg.name == "world" and atom in world_set:
8477 world_problems = True
8481 sys.stderr.write("\n!!! Problems have been " + \
8482 "detected with your world file\n")
8483 sys.stderr.write("!!! Please run " + \
8484 green("emaint --check world")+"\n\n")
8486 if self._missing_args:
8487 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8488 " Ebuilds for the following packages are either all\n")
8489 sys.stderr.write(colorize("BAD", "!!!") + \
8490 " masked or don't exist:\n")
8491 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8492 self._missing_args) + "\n")
8494 if self._pprovided_args:
8496 for arg, atom in self._pprovided_args:
8497 if isinstance(arg, SetArg):
8499 arg_atom = (atom, atom)
8502 arg_atom = (arg.arg, atom)
8503 refs = arg_refs.setdefault(arg_atom, [])
8504 if parent not in refs:
8507 msg.append(bad("\nWARNING: "))
8508 if len(self._pprovided_args) > 1:
8509 msg.append("Requested packages will not be " + \
8510 "merged because they are listed in\n")
8512 msg.append("A requested package will not be " + \
8513 "merged because it is listed in\n")
8514 msg.append("package.provided:\n\n")
8515 problems_sets = set()
8516 for (arg, atom), refs in arg_refs.iteritems():
8519 problems_sets.update(refs)
8521 ref_string = ", ".join(["'%s'" % name for name in refs])
8522 ref_string = " pulled in by " + ref_string
8523 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8525 if "world" in problems_sets:
8526 msg.append("This problem can be solved in one of the following ways:\n\n")
8527 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8528 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8529 msg.append(" C) Remove offending entries from package.provided.\n\n")
8530 msg.append("The best course of action depends on the reason that an offending\n")
8531 msg.append("package.provided entry exists.\n\n")
8532 sys.stderr.write("".join(msg))
8534 masked_packages = []
8535 for pkg in self._masked_installed:
8536 root_config = pkg.root_config
8537 pkgsettings = self.pkgsettings[pkg.root]
8538 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8539 masked_packages.append((root_config, pkgsettings,
8540 pkg.cpv, pkg.metadata, mreasons))
8542 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8543 " The following installed packages are masked:\n")
8544 show_masked_packages(masked_packages)
8548 def calc_changelog(self,ebuildpath,current,next):
8549 if ebuildpath == None or not os.path.exists(ebuildpath):
8551 current = '-'.join(portage.catpkgsplit(current)[1:])
8552 if current.endswith('-r0'):
8553 current = current[:-3]
8554 next = '-'.join(portage.catpkgsplit(next)[1:])
8555 if next.endswith('-r0'):
8557 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8559 changelog = open(changelogpath).read()
8560 except SystemExit, e:
8561 raise # Needed else can't exit
8564 divisions = self.find_changelog_tags(changelog)
8565 #print 'XX from',current,'to',next
8566 #for div,text in divisions: print 'XX',div
8567 # skip entries for all revisions above the one we are about to emerge
8568 for i in range(len(divisions)):
8569 if divisions[i][0]==next:
8570 divisions = divisions[i:]
8572 # find out how many entries we are going to display
8573 for i in range(len(divisions)):
8574 if divisions[i][0]==current:
8575 divisions = divisions[:i]
8578 # couldnt find the current revision in the list. display nothing
8582 def find_changelog_tags(self,changelog):
8586 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8588 if release is not None:
8589 divs.append((release,changelog))
8591 if release is not None:
8592 divs.append((release,changelog[:match.start()]))
8593 changelog = changelog[match.end():]
8594 release = match.group(1)
8595 if release.endswith('.ebuild'):
8596 release = release[:-7]
8597 if release.endswith('-r0'):
8598 release = release[:-3]
8600 def saveNomergeFavorites(self):
8601 """Find atoms in favorites that are not in the mergelist and add them
8602 to the world file if necessary."""
8603 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8604 "--oneshot", "--onlydeps", "--pretend"):
8605 if x in self.myopts:
8607 root_config = self.roots[self.target_root]
8608 world_set = root_config.sets["world"]
8610 world_locked = False
8611 if hasattr(world_set, "lock"):
8615 if hasattr(world_set, "load"):
8616 world_set.load() # maybe it's changed on disk
8618 args_set = self._sets["args"]
8619 portdb = self.trees[self.target_root]["porttree"].dbapi
8620 added_favorites = set()
8621 for x in self._set_nodes:
8622 pkg_type, root, pkg_key, pkg_status = x
8623 if pkg_status != "nomerge":
8627 myfavkey = create_world_atom(x, args_set, root_config)
8629 if myfavkey in added_favorites:
8631 added_favorites.add(myfavkey)
8632 except portage.exception.InvalidDependString, e:
8633 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8634 (pkg_key, str(e)), noiselevel=-1)
8635 writemsg("!!! see '%s'\n\n" % os.path.join(
8636 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8639 for k in self._sets:
8640 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8645 all_added.append(SETPREFIX + k)
8646 all_added.extend(added_favorites)
8649 print ">>> Recording %s in \"world\" favorites file..." % \
8650 colorize("INFORM", str(a))
8652 world_set.update(all_added)
8657 def loadResumeCommand(self, resume_data, skip_masked=False):
8659 Add a resume command to the graph and validate it in the process. This
8660 will raise a PackageNotFound exception if a package is not available.
8663 if not isinstance(resume_data, dict):
8666 mergelist = resume_data.get("mergelist")
8667 if not isinstance(mergelist, list):
8670 fakedb = self.mydbapi
8672 serialized_tasks = []
8675 if not (isinstance(x, list) and len(x) == 4):
8677 pkg_type, myroot, pkg_key, action = x
8678 if pkg_type not in self.pkg_tree_map:
8680 if action != "merge":
8682 tree_type = self.pkg_tree_map[pkg_type]
8683 mydb = trees[myroot][tree_type].dbapi
8684 db_keys = list(self._trees_orig[myroot][
8685 tree_type].dbapi._aux_cache_keys)
8687 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8689 # It does no exist or it is corrupt.
8690 if action == "uninstall":
8692 raise portage.exception.PackageNotFound(pkg_key)
8693 installed = action == "uninstall"
8694 built = pkg_type != "ebuild"
8695 root_config = self.roots[myroot]
8696 pkg = Package(built=built, cpv=pkg_key,
8697 installed=installed, metadata=metadata,
8698 operation=action, root_config=root_config,
8700 if pkg_type == "ebuild":
8701 pkgsettings = self.pkgsettings[myroot]
8702 pkgsettings.setcpv(pkg)
8703 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8704 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8705 self._pkg_cache[pkg] = pkg
8707 root_config = self.roots[pkg.root]
8708 if "merge" == pkg.operation and \
8709 not visible(root_config.settings, pkg):
8711 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8713 self._unsatisfied_deps_for_display.append(
8714 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8716 fakedb[myroot].cpv_inject(pkg)
8717 serialized_tasks.append(pkg)
8718 self.spinner.update()
8720 if self._unsatisfied_deps_for_display:
8723 if not serialized_tasks or "--nodeps" in self.myopts:
8724 self._serialized_tasks_cache = serialized_tasks
8725 self._scheduler_graph = self.digraph
8727 self._select_package = self._select_pkg_from_graph
8728 self.myparams.add("selective")
8729 # Always traverse deep dependencies in order to account for
8730 # potentially unsatisfied dependencies of installed packages.
8731 # This is necessary for correct --keep-going or --resume operation
8732 # in case a package from a group of circularly dependent packages
8733 # fails. In this case, a package which has recently been installed
8734 # may have an unsatisfied circular dependency (pulled in by
8735 # PDEPEND, for example). So, even though a package is already
8736 # installed, it may not have all of it's dependencies satisfied, so
8737 # it may not be usable. If such a package is in the subgraph of
8738 # deep depenedencies of a scheduled build, that build needs to
8739 # be cancelled. In order for this type of situation to be
8740 # recognized, deep traversal of dependencies is required.
8741 self.myparams.add("deep")
8743 favorites = resume_data.get("favorites")
8744 args_set = self._sets["args"]
8745 if isinstance(favorites, list):
8746 args = self._load_favorites(favorites)
8750 for task in serialized_tasks:
8751 if isinstance(task, Package) and \
8752 task.operation == "merge":
8753 if not self._add_pkg(task, None):
8756 # Packages for argument atoms need to be explicitly
8757 # added via _add_pkg() so that they are included in the
8758 # digraph (needed at least for --tree display).
8760 for atom in arg.set:
8761 pkg, existing_node = self._select_package(
8762 arg.root_config.root, atom)
8763 if existing_node is None and \
8765 if not self._add_pkg(pkg, Dependency(atom=atom,
8766 root=pkg.root, parent=arg)):
8769 # Allow unsatisfied deps here to avoid showing a masking
8770 # message for an unsatisfied dep that isn't necessarily
8772 if not self._create_graph(allow_unsatisfied=True):
8775 unsatisfied_deps = []
8776 for dep in self._unsatisfied_deps:
8777 if not isinstance(dep.parent, Package):
8779 if dep.parent.operation == "merge":
8780 unsatisfied_deps.append(dep)
8783 # For unsatisfied deps of installed packages, only account for
8784 # them if they are in the subgraph of dependencies of a package
8785 # which is scheduled to be installed.
8786 unsatisfied_install = False
8788 dep_stack = self.digraph.parent_nodes(dep.parent)
8790 node = dep_stack.pop()
8791 if not isinstance(node, Package):
8793 if node.operation == "merge":
8794 unsatisfied_install = True
8796 if node in traversed:
8799 dep_stack.extend(self.digraph.parent_nodes(node))
8801 if unsatisfied_install:
8802 unsatisfied_deps.append(dep)
8804 if masked_tasks or unsatisfied_deps:
8805 # This probably means that a required package
8806 # was dropped via --skipfirst. It makes the
8807 # resume list invalid, so convert it to a
8808 # UnsatisfiedResumeDep exception.
8809 raise self.UnsatisfiedResumeDep(self,
8810 masked_tasks + unsatisfied_deps)
8811 self._serialized_tasks_cache = None
8814 except self._unknown_internal_error:
8819 def _load_favorites(self, favorites):
8821 Use a list of favorites to resume state from a
8822 previous select_files() call. This creates similar
8823 DependencyArg instances to those that would have
8824 been created by the original select_files() call.
8825 This allows Package instances to be matched with
8826 DependencyArg instances during graph creation.
8828 root_config = self.roots[self.target_root]
8829 getSetAtoms = root_config.setconfig.getSetAtoms
8830 sets = root_config.sets
8833 if not isinstance(x, basestring):
8835 if x in ("system", "world"):
8837 if x.startswith(SETPREFIX):
8838 s = x[len(SETPREFIX):]
8843 # Recursively expand sets so that containment tests in
8844 # self._get_parent_sets() properly match atoms in nested
8845 # sets (like if world contains system).
8846 expanded_set = InternalPackageSet(
8847 initial_atoms=getSetAtoms(s))
8848 self._sets[s] = expanded_set
8849 args.append(SetArg(arg=x, set=expanded_set,
8850 root_config=root_config))
8852 if not portage.isvalidatom(x):
8854 args.append(AtomArg(arg=x, atom=x,
8855 root_config=root_config))
8857 self._set_args(args)
8860 class UnsatisfiedResumeDep(portage.exception.PortageException):
8862 A dependency of a resume list is not installed. This
8863 can occur when a required package is dropped from the
8864 merge list via --skipfirst.
8866 def __init__(self, depgraph, value):
8867 portage.exception.PortageException.__init__(self, value)
8868 self.depgraph = depgraph
8870 class _internal_exception(portage.exception.PortageException):
8871 def __init__(self, value=""):
8872 portage.exception.PortageException.__init__(self, value)
8874 class _unknown_internal_error(_internal_exception):
8876 Used by the depgraph internally to terminate graph creation.
8877 The specific reason for the failure should have been dumped
8878 to stderr, unfortunately, the exact reason for the failure
8882 class _serialize_tasks_retry(_internal_exception):
8884 This is raised by the _serialize_tasks() method when it needs to
8885 be called again for some reason. The only case that it's currently
8886 used for is when neglected dependencies need to be added to the
8887 graph in order to avoid making a potentially unsafe decision.
8890 class _dep_check_composite_db(portage.dbapi):
8892 A dbapi-like interface that is optimized for use in dep_check() calls.
8893 This is built on top of the existing depgraph package selection logic.
8894 Some packages that have been added to the graph may be masked from this
8895 view in order to influence the atom preference selection that occurs
8898 def __init__(self, depgraph, root):
8899 portage.dbapi.__init__(self)
8900 self._depgraph = depgraph
8902 self._match_cache = {}
8903 self._cpv_pkg_map = {}
8905 def _clear_cache(self):
8906 self._match_cache.clear()
8907 self._cpv_pkg_map.clear()
8909 def match(self, atom):
8910 ret = self._match_cache.get(atom)
8915 atom = self._dep_expand(atom)
8916 pkg, existing = self._depgraph._select_package(self._root, atom)
8920 # Return the highest available from select_package() as well as
8921 # any matching slots in the graph db.
8923 slots.add(pkg.metadata["SLOT"])
8924 atom_cp = portage.dep_getkey(atom)
8925 if pkg.cp.startswith("virtual/"):
8926 # For new-style virtual lookahead that occurs inside
8927 # dep_check(), examine all slots. This is needed
8928 # so that newer slots will not unnecessarily be pulled in
8929 # when a satisfying lower slot is already installed. For
8930 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8931 # there's no need to pull in a newer slot to satisfy a
8932 # virtual/jdk dependency.
8933 for db, pkg_type, built, installed, db_keys in \
8934 self._depgraph._filtered_trees[self._root]["dbs"]:
8935 for cpv in db.match(atom):
8936 if portage.cpv_getkey(cpv) != pkg.cp:
8938 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8940 if self._visible(pkg):
8941 self._cpv_pkg_map[pkg.cpv] = pkg
8943 slots.remove(pkg.metadata["SLOT"])
8945 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8946 pkg, existing = self._depgraph._select_package(
8947 self._root, slot_atom)
8950 if not self._visible(pkg):
8952 self._cpv_pkg_map[pkg.cpv] = pkg
8955 self._cpv_sort_ascending(ret)
8956 self._match_cache[orig_atom] = ret
8959 def _visible(self, pkg):
8960 if pkg.installed and "selective" not in self._depgraph.myparams:
8962 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8963 except (StopIteration, portage.exception.InvalidDependString):
8970 self._depgraph.pkgsettings[pkg.root], pkg):
8972 except portage.exception.InvalidDependString:
8974 in_graph = self._depgraph._slot_pkg_map[
8975 self._root].get(pkg.slot_atom)
8976 if in_graph is None:
8977 # Mask choices for packages which are not the highest visible
8978 # version within their slot (since they usually trigger slot
8980 highest_visible, in_graph = self._depgraph._select_package(
8981 self._root, pkg.slot_atom)
8982 if pkg != highest_visible:
8984 elif in_graph != pkg:
8985 # Mask choices for packages that would trigger a slot
8986 # conflict with a previously selected package.
8990 def _dep_expand(self, atom):
8992 This is only needed for old installed packages that may
8993 contain atoms that are not fully qualified with a specific
8994 category. Emulate the cpv_expand() function that's used by
8995 dbapi.match() in cases like this. If there are multiple
8996 matches, it's often due to a new-style virtual that has
8997 been added, so try to filter those out to avoid raising
9000 root_config = self._depgraph.roots[self._root]
9002 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9003 if len(expanded_atoms) > 1:
9004 non_virtual_atoms = []
9005 for x in expanded_atoms:
9006 if not portage.dep_getkey(x).startswith("virtual/"):
9007 non_virtual_atoms.append(x)
9008 if len(non_virtual_atoms) == 1:
9009 expanded_atoms = non_virtual_atoms
9010 if len(expanded_atoms) > 1:
9011 # compatible with portage.cpv_expand()
9012 raise portage.exception.AmbiguousPackageName(
9013 [portage.dep_getkey(x) for x in expanded_atoms])
9015 atom = expanded_atoms[0]
9017 null_atom = insert_category_into_atom(atom, "null")
9018 null_cp = portage.dep_getkey(null_atom)
9019 cat, atom_pn = portage.catsplit(null_cp)
9020 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9022 # Allow the resolver to choose which virtual.
9023 atom = insert_category_into_atom(atom, "virtual")
9025 atom = insert_category_into_atom(atom, "null")
9028 def aux_get(self, cpv, wants):
9029 metadata = self._cpv_pkg_map[cpv].metadata
9030 return [metadata.get(x, "") for x in wants]
9032 class RepoDisplay(object):
9033 def __init__(self, roots):
9034 self._shown_repos = {}
9035 self._unknown_repo = False
9037 for root_config in roots.itervalues():
9038 portdir = root_config.settings.get("PORTDIR")
9040 repo_paths.add(portdir)
9041 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9043 repo_paths.update(overlays.split())
9044 repo_paths = list(repo_paths)
9045 self._repo_paths = repo_paths
9046 self._repo_paths_real = [ os.path.realpath(repo_path) \
9047 for repo_path in repo_paths ]
9049 # pre-allocate index for PORTDIR so that it always has index 0.
9050 for root_config in roots.itervalues():
9051 portdb = root_config.trees["porttree"].dbapi
9052 portdir = portdb.porttree_root
9054 self.repoStr(portdir)
9056 def repoStr(self, repo_path_real):
9059 real_index = self._repo_paths_real.index(repo_path_real)
9060 if real_index == -1:
9062 self._unknown_repo = True
9064 shown_repos = self._shown_repos
9065 repo_paths = self._repo_paths
9066 repo_path = repo_paths[real_index]
9067 index = shown_repos.get(repo_path)
9069 index = len(shown_repos)
9070 shown_repos[repo_path] = index
9076 shown_repos = self._shown_repos
9077 unknown_repo = self._unknown_repo
9078 if shown_repos or self._unknown_repo:
9079 output.append("Portage tree and overlays:\n")
9080 show_repo_paths = list(shown_repos)
9081 for repo_path, repo_index in shown_repos.iteritems():
9082 show_repo_paths[repo_index] = repo_path
9084 for index, repo_path in enumerate(show_repo_paths):
9085 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9087 output.append(" "+teal("[?]") + \
9088 " indicates that the source repository could not be determined\n")
9089 return "".join(output)
9091 class PackageCounters(object):
9101 self.blocks_satisfied = 0
9103 self.restrict_fetch = 0
9104 self.restrict_fetch_satisfied = 0
9105 self.interactive = 0
9108 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9111 myoutput.append("Total: %s package" % total_installs)
9112 if total_installs != 1:
9113 myoutput.append("s")
9114 if total_installs != 0:
9115 myoutput.append(" (")
9116 if self.upgrades > 0:
9117 details.append("%s upgrade" % self.upgrades)
9118 if self.upgrades > 1:
9120 if self.downgrades > 0:
9121 details.append("%s downgrade" % self.downgrades)
9122 if self.downgrades > 1:
9125 details.append("%s new" % self.new)
9126 if self.newslot > 0:
9127 details.append("%s in new slot" % self.newslot)
9128 if self.newslot > 1:
9131 details.append("%s reinstall" % self.reinst)
9135 details.append("%s uninstall" % self.uninst)
9138 if self.interactive > 0:
9139 details.append("%s %s" % (self.interactive,
9140 colorize("WARN", "interactive")))
9141 myoutput.append(", ".join(details))
9142 if total_installs != 0:
9143 myoutput.append(")")
9144 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9145 if self.restrict_fetch:
9146 myoutput.append("\nFetch Restriction: %s package" % \
9147 self.restrict_fetch)
9148 if self.restrict_fetch > 1:
9149 myoutput.append("s")
9150 if self.restrict_fetch_satisfied < self.restrict_fetch:
9151 myoutput.append(bad(" (%s unsatisfied)") % \
9152 (self.restrict_fetch - self.restrict_fetch_satisfied))
9154 myoutput.append("\nConflict: %s block" % \
9157 myoutput.append("s")
9158 if self.blocks_satisfied < self.blocks:
9159 myoutput.append(bad(" (%s unsatisfied)") % \
9160 (self.blocks - self.blocks_satisfied))
9161 return "".join(myoutput)
9163 class PollSelectAdapter(PollConstants):
9166 Use select to emulate a poll object, for
9167 systems that don't support poll().
9171 self._registered = {}
9172 self._select_args = [[], [], []]
9174 def register(self, fd, *args):
9176 Only POLLIN is currently supported!
9180 "register expected at most 2 arguments, got " + \
9181 repr(1 + len(args)))
9183 eventmask = PollConstants.POLLIN | \
9184 PollConstants.POLLPRI | PollConstants.POLLOUT
9188 self._registered[fd] = eventmask
9189 self._select_args = None
9191 def unregister(self, fd):
9192 self._select_args = None
9193 del self._registered[fd]
9195 def poll(self, *args):
9198 "poll expected at most 2 arguments, got " + \
9199 repr(1 + len(args)))
9205 select_args = self._select_args
9206 if select_args is None:
9207 select_args = [self._registered.keys(), [], []]
9209 if timeout is not None:
9210 select_args = select_args[:]
9211 # Translate poll() timeout args to select() timeout args:
9213 # | units | value(s) for indefinite block
9214 # ---------|--------------|------------------------------
9215 # poll | milliseconds | omitted, negative, or None
9216 # ---------|--------------|------------------------------
9217 # select | seconds | omitted
9218 # ---------|--------------|------------------------------
9220 if timeout is not None and timeout < 0:
9222 if timeout is not None:
9223 select_args.append(timeout / 1000)
9225 select_events = select.select(*select_args)
9227 for fd in select_events[0]:
9228 poll_events.append((fd, PollConstants.POLLIN))
9231 class SequentialTaskQueue(SlotObject):
9233 __slots__ = ("max_jobs", "running_tasks") + \
9234 ("_dirty", "_scheduling", "_task_queue")
9236 def __init__(self, **kwargs):
9237 SlotObject.__init__(self, **kwargs)
9238 self._task_queue = deque()
9239 self.running_tasks = set()
9240 if self.max_jobs is None:
9244 def add(self, task):
9245 self._task_queue.append(task)
9248 def addFront(self, task):
9249 self._task_queue.appendleft(task)
9260 if self._scheduling:
9261 # Ignore any recursive schedule() calls triggered via
9262 # self._task_exit().
9265 self._scheduling = True
9267 task_queue = self._task_queue
9268 running_tasks = self.running_tasks
9269 max_jobs = self.max_jobs
9270 state_changed = False
9272 while task_queue and \
9273 (max_jobs is True or len(running_tasks) < max_jobs):
9274 task = task_queue.popleft()
9275 cancelled = getattr(task, "cancelled", None)
9277 running_tasks.add(task)
9278 task.addExitListener(self._task_exit)
9280 state_changed = True
9283 self._scheduling = False
9285 return state_changed
9287 def _task_exit(self, task):
9289 Since we can always rely on exit listeners being called, the set of
9290 running tasks is always pruned automatically and there is never any need
9291 to actively prune it.
9293 self.running_tasks.remove(task)
9294 if self._task_queue:
9298 self._task_queue.clear()
9299 running_tasks = self.running_tasks
9300 while running_tasks:
9301 task = running_tasks.pop()
9302 task.removeExitListener(self._task_exit)
9306 def __nonzero__(self):
9307 return bool(self._task_queue or self.running_tasks)
9310 return len(self._task_queue) + len(self.running_tasks)
9312 _can_poll_device = None
9314 def can_poll_device():
9316 Test if it's possible to use poll() on a device such as a pty. This
9317 is known to fail on Darwin.
9319 @returns: True if poll() on a device succeeds, False otherwise.
9322 global _can_poll_device
9323 if _can_poll_device is not None:
9324 return _can_poll_device
9326 if not hasattr(select, "poll"):
9327 _can_poll_device = False
9328 return _can_poll_device
9331 dev_null = open('/dev/null', 'rb')
9333 _can_poll_device = False
9334 return _can_poll_device
9337 p.register(dev_null.fileno(), PollConstants.POLLIN)
9339 invalid_request = False
9340 for f, event in p.poll():
9341 if event & PollConstants.POLLNVAL:
9342 invalid_request = True
9346 _can_poll_device = not invalid_request
9347 return _can_poll_device
9349 def create_poll_instance():
9351 Create an instance of select.poll, or an instance of
9352 PollSelectAdapter there is no poll() implementation or
9353 it is broken somehow.
9355 if can_poll_device():
9356 return select.poll()
9357 return PollSelectAdapter()
9359 getloadavg = getattr(os, "getloadavg", None)
9360 if getloadavg is None:
9363 Uses /proc/loadavg to emulate os.getloadavg().
9364 Raises OSError if the load average was unobtainable.
9367 loadavg_str = open('/proc/loadavg').readline()
9369 # getloadavg() is only supposed to raise OSError, so convert
9370 raise OSError('unknown')
9371 loadavg_split = loadavg_str.split()
9372 if len(loadavg_split) < 3:
9373 raise OSError('unknown')
9377 loadavg_floats.append(float(loadavg_split[i]))
9379 raise OSError('unknown')
9380 return tuple(loadavg_floats)
9382 class PollScheduler(object):
9384 class _sched_iface_class(SlotObject):
9385 __slots__ = ("register", "schedule", "unregister")
9389 self._max_load = None
9391 self._poll_event_queue = []
9392 self._poll_event_handlers = {}
9393 self._poll_event_handler_ids = {}
9394 # Increment id for each new handler.
9395 self._event_handler_id = 0
9396 self._poll_obj = create_poll_instance()
9397 self._scheduling = False
9399 def _schedule(self):
9401 Calls _schedule_tasks() and automatically returns early from
9402 any recursive calls to this method that the _schedule_tasks()
9403 call might trigger. This makes _schedule() safe to call from
9404 inside exit listeners.
9406 if self._scheduling:
9408 self._scheduling = True
9410 return self._schedule_tasks()
9412 self._scheduling = False
9414 def _running_job_count(self):
9417 def _can_add_job(self):
9418 max_jobs = self._max_jobs
9419 max_load = self._max_load
9421 if self._max_jobs is not True and \
9422 self._running_job_count() >= self._max_jobs:
9425 if max_load is not None and \
9426 (max_jobs is True or max_jobs > 1) and \
9427 self._running_job_count() >= 1:
9429 avg1, avg5, avg15 = getloadavg()
9433 if avg1 >= max_load:
9438 def _poll(self, timeout=None):
9440 All poll() calls pass through here. The poll events
9441 are added directly to self._poll_event_queue.
9442 In order to avoid endless blocking, this raises
9443 StopIteration if timeout is None and there are
9444 no file descriptors to poll.
9446 if not self._poll_event_handlers:
9448 if timeout is None and \
9449 not self._poll_event_handlers:
9450 raise StopIteration(
9451 "timeout is None and there are no poll() event handlers")
9453 # The following error is known to occur with Linux kernel versions
9456 # select.error: (4, 'Interrupted system call')
9458 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9459 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9460 # without any events.
9463 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9465 except select.error, e:
9466 writemsg_level("\n!!! select error: %s\n" % (e,),
9467 level=logging.ERROR, noiselevel=-1)
9469 if timeout is not None:
9472 def _next_poll_event(self, timeout=None):
9474 Since the _schedule_wait() loop is called by event
9475 handlers from _poll_loop(), maintain a central event
9476 queue for both of them to share events from a single
9477 poll() call. In order to avoid endless blocking, this
9478 raises StopIteration if timeout is None and there are
9479 no file descriptors to poll.
9481 if not self._poll_event_queue:
9483 return self._poll_event_queue.pop()
9485 def _poll_loop(self):
9487 event_handlers = self._poll_event_handlers
9488 event_handled = False
9491 while event_handlers:
9492 f, event = self._next_poll_event()
9493 handler, reg_id = event_handlers[f]
9495 event_handled = True
9496 except StopIteration:
9497 event_handled = True
9499 if not event_handled:
9500 raise AssertionError("tight loop")
9502 def _schedule_yield(self):
9504 Schedule for a short period of time chosen by the scheduler based
9505 on internal state. Synchronous tasks should call this periodically
9506 in order to allow the scheduler to service pending poll events. The
9507 scheduler will call poll() exactly once, without blocking, and any
9508 resulting poll events will be serviced.
9510 event_handlers = self._poll_event_handlers
9513 if not event_handlers:
9514 return bool(events_handled)
9516 if not self._poll_event_queue:
9520 while event_handlers and self._poll_event_queue:
9521 f, event = self._next_poll_event()
9522 handler, reg_id = event_handlers[f]
9525 except StopIteration:
9528 return bool(events_handled)
9530 def _register(self, f, eventmask, handler):
9533 @return: A unique registration id, for use in schedule() or
9536 if f in self._poll_event_handlers:
9537 raise AssertionError("fd %d is already registered" % f)
9538 self._event_handler_id += 1
9539 reg_id = self._event_handler_id
9540 self._poll_event_handler_ids[reg_id] = f
9541 self._poll_event_handlers[f] = (handler, reg_id)
9542 self._poll_obj.register(f, eventmask)
9545 def _unregister(self, reg_id):
9546 f = self._poll_event_handler_ids[reg_id]
9547 self._poll_obj.unregister(f)
9548 del self._poll_event_handlers[f]
9549 del self._poll_event_handler_ids[reg_id]
9551 def _schedule_wait(self, wait_ids):
9553 Schedule until wait_id is not longer registered
9556 @param wait_id: a task id to wait for
9558 event_handlers = self._poll_event_handlers
9559 handler_ids = self._poll_event_handler_ids
9560 event_handled = False
9562 if isinstance(wait_ids, int):
9563 wait_ids = frozenset([wait_ids])
9566 while wait_ids.intersection(handler_ids):
9567 f, event = self._next_poll_event()
9568 handler, reg_id = event_handlers[f]
9570 event_handled = True
9571 except StopIteration:
9572 event_handled = True
9574 return event_handled
9576 class QueueScheduler(PollScheduler):
9579 Add instances of SequentialTaskQueue and then call run(). The
9580 run() method returns when no tasks remain.
9583 def __init__(self, max_jobs=None, max_load=None):
9584 PollScheduler.__init__(self)
9586 if max_jobs is None:
9589 self._max_jobs = max_jobs
9590 self._max_load = max_load
9591 self.sched_iface = self._sched_iface_class(
9592 register=self._register,
9593 schedule=self._schedule_wait,
9594 unregister=self._unregister)
9597 self._schedule_listeners = []
9600 self._queues.append(q)
9602 def remove(self, q):
9603 self._queues.remove(q)
9607 while self._schedule():
9610 while self._running_job_count():
9613 def _schedule_tasks(self):
9616 @returns: True if there may be remaining tasks to schedule,
9619 while self._can_add_job():
9620 n = self._max_jobs - self._running_job_count()
9624 if not self._start_next_job(n):
9627 for q in self._queues:
9632 def _running_job_count(self):
9634 for q in self._queues:
9635 job_count += len(q.running_tasks)
9636 self._jobs = job_count
9639 def _start_next_job(self, n=1):
9641 for q in self._queues:
9642 initial_job_count = len(q.running_tasks)
9644 final_job_count = len(q.running_tasks)
9645 if final_job_count > initial_job_count:
9646 started_count += (final_job_count - initial_job_count)
9647 if started_count >= n:
9649 return started_count
9651 class TaskScheduler(object):
9654 A simple way to handle scheduling of AsynchrousTask instances. Simply
9655 add tasks and call run(). The run() method returns when no tasks remain.
9658 def __init__(self, max_jobs=None, max_load=None):
9659 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9660 self._scheduler = QueueScheduler(
9661 max_jobs=max_jobs, max_load=max_load)
9662 self.sched_iface = self._scheduler.sched_iface
9663 self.run = self._scheduler.run
9664 self._scheduler.add(self._queue)
9666 def add(self, task):
9667 self._queue.add(task)
9669 class JobStatusDisplay(object):
9671 _bound_properties = ("curval", "failed", "running")
9672 _jobs_column_width = 48
9674 # Don't update the display unless at least this much
9675 # time has passed, in units of seconds.
9676 _min_display_latency = 2
9678 _default_term_codes = {
9684 _termcap_name_map = {
9685 'carriage_return' : 'cr',
9690 def __init__(self, out=sys.stdout, quiet=False):
9691 object.__setattr__(self, "out", out)
9692 object.__setattr__(self, "quiet", quiet)
9693 object.__setattr__(self, "maxval", 0)
9694 object.__setattr__(self, "merges", 0)
9695 object.__setattr__(self, "_changed", False)
9696 object.__setattr__(self, "_displayed", False)
9697 object.__setattr__(self, "_last_display_time", 0)
9698 object.__setattr__(self, "width", 80)
9701 isatty = hasattr(out, "isatty") and out.isatty()
9702 object.__setattr__(self, "_isatty", isatty)
9703 if not isatty or not self._init_term():
9705 for k, capname in self._termcap_name_map.iteritems():
9706 term_codes[k] = self._default_term_codes[capname]
9707 object.__setattr__(self, "_term_codes", term_codes)
9708 encoding = sys.getdefaultencoding()
9709 for k, v in self._term_codes.items():
9710 if not isinstance(v, str):
9711 self._term_codes[k] = v.decode(encoding, 'replace')
9713 def _init_term(self):
9715 Initialize term control codes.
9717 @returns: True if term codes were successfully initialized,
9721 term_type = os.environ.get("TERM", "vt100")
9727 curses.setupterm(term_type, self.out.fileno())
9728 tigetstr = curses.tigetstr
9729 except curses.error:
9734 if tigetstr is None:
9738 for k, capname in self._termcap_name_map.iteritems():
9739 code = tigetstr(capname)
9741 code = self._default_term_codes[capname]
9742 term_codes[k] = code
9743 object.__setattr__(self, "_term_codes", term_codes)
9746 def _format_msg(self, msg):
9747 return ">>> %s" % msg
9751 self._term_codes['carriage_return'] + \
9752 self._term_codes['clr_eol'])
9754 self._displayed = False
9756 def _display(self, line):
9757 self.out.write(line)
9759 self._displayed = True
9761 def _update(self, msg):
9764 if not self._isatty:
9765 out.write(self._format_msg(msg) + self._term_codes['newline'])
9767 self._displayed = True
9773 self._display(self._format_msg(msg))
9775 def displayMessage(self, msg):
9777 was_displayed = self._displayed
9779 if self._isatty and self._displayed:
9782 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9784 self._displayed = False
9787 self._changed = True
9793 for name in self._bound_properties:
9794 object.__setattr__(self, name, 0)
9797 self.out.write(self._term_codes['newline'])
9799 self._displayed = False
9801 def __setattr__(self, name, value):
9802 old_value = getattr(self, name)
9803 if value == old_value:
9805 object.__setattr__(self, name, value)
9806 if name in self._bound_properties:
9807 self._property_change(name, old_value, value)
9809 def _property_change(self, name, old_value, new_value):
9810 self._changed = True
9813 def _load_avg_str(self):
9828 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9832 Display status on stdout, but only if something has
9833 changed since the last call.
9839 current_time = time.time()
9840 time_delta = current_time - self._last_display_time
9841 if self._displayed and \
9843 if not self._isatty:
9845 if time_delta < self._min_display_latency:
9848 self._last_display_time = current_time
9849 self._changed = False
9850 self._display_status()
9852 def _display_status(self):
9853 # Don't use len(self._completed_tasks) here since that also
9854 # can include uninstall tasks.
9855 curval_str = str(self.curval)
9856 maxval_str = str(self.maxval)
9857 running_str = str(self.running)
9858 failed_str = str(self.failed)
9859 load_avg_str = self._load_avg_str()
9861 color_output = StringIO()
9862 plain_output = StringIO()
9863 style_file = portage.output.ConsoleStyleFile(color_output)
9864 style_file.write_listener = plain_output
9865 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9866 style_writer.style_listener = style_file.new_styles
9867 f = formatter.AbstractFormatter(style_writer)
9869 number_style = "INFORM"
9870 f.add_literal_data("Jobs: ")
9871 f.push_style(number_style)
9872 f.add_literal_data(curval_str)
9874 f.add_literal_data(" of ")
9875 f.push_style(number_style)
9876 f.add_literal_data(maxval_str)
9878 f.add_literal_data(" complete")
9881 f.add_literal_data(", ")
9882 f.push_style(number_style)
9883 f.add_literal_data(running_str)
9885 f.add_literal_data(" running")
9888 f.add_literal_data(", ")
9889 f.push_style(number_style)
9890 f.add_literal_data(failed_str)
9892 f.add_literal_data(" failed")
9894 padding = self._jobs_column_width - len(plain_output.getvalue())
9896 f.add_literal_data(padding * " ")
9898 f.add_literal_data("Load avg: ")
9899 f.add_literal_data(load_avg_str)
9901 # Truncate to fit width, to avoid making the terminal scroll if the
9902 # line overflows (happens when the load average is large).
9903 plain_output = plain_output.getvalue()
9904 if self._isatty and len(plain_output) > self.width:
9905 # Use plain_output here since it's easier to truncate
9906 # properly than the color output which contains console
9908 self._update(plain_output[:self.width])
9910 self._update(color_output.getvalue())
9912 xtermTitle(" ".join(plain_output.split()))
9914 class Scheduler(PollScheduler):
9916 _opts_ignore_blockers = \
9917 frozenset(["--buildpkgonly",
9918 "--fetchonly", "--fetch-all-uri",
9919 "--nodeps", "--pretend"])
9921 _opts_no_background = \
9922 frozenset(["--pretend",
9923 "--fetchonly", "--fetch-all-uri"])
9925 _opts_no_restart = frozenset(["--buildpkgonly",
9926 "--fetchonly", "--fetch-all-uri", "--pretend"])
9928 _bad_resume_opts = set(["--ask", "--changelog",
9929 "--resume", "--skipfirst"])
9931 _fetch_log = "/var/log/emerge-fetch.log"
9933 class _iface_class(SlotObject):
9934 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9935 "dblinkElog", "fetch", "register", "schedule",
9936 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9939 class _fetch_iface_class(SlotObject):
9940 __slots__ = ("log_file", "schedule")
9942 _task_queues_class = slot_dict_class(
9943 ("merge", "jobs", "fetch", "unpack"), prefix="")
9945 class _build_opts_class(SlotObject):
9946 __slots__ = ("buildpkg", "buildpkgonly",
9947 "fetch_all_uri", "fetchonly", "pretend")
9949 class _binpkg_opts_class(SlotObject):
9950 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9952 class _pkg_count_class(SlotObject):
9953 __slots__ = ("curval", "maxval")
9955 class _emerge_log_class(SlotObject):
9956 __slots__ = ("xterm_titles",)
9958 def log(self, *pargs, **kwargs):
9959 if not self.xterm_titles:
9960 # Avoid interference with the scheduler's status display.
9961 kwargs.pop("short_msg", None)
9962 emergelog(self.xterm_titles, *pargs, **kwargs)
9964 class _failed_pkg(SlotObject):
9965 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9967 class _ConfigPool(object):
9968 """Interface for a task to temporarily allocate a config
9969 instance from a pool. This allows a task to be constructed
9970 long before the config instance actually becomes needed, like
9971 when prefetchers are constructed for the whole merge list."""
9972 __slots__ = ("_root", "_allocate", "_deallocate")
9973 def __init__(self, root, allocate, deallocate):
9975 self._allocate = allocate
9976 self._deallocate = deallocate
9978 return self._allocate(self._root)
9979 def deallocate(self, settings):
9980 self._deallocate(settings)
9982 class _unknown_internal_error(portage.exception.PortageException):
9984 Used internally to terminate scheduling. The specific reason for
9985 the failure should have been dumped to stderr.
9987 def __init__(self, value=""):
9988 portage.exception.PortageException.__init__(self, value)
9990 def __init__(self, settings, trees, mtimedb, myopts,
9991 spinner, mergelist, favorites, digraph):
9992 PollScheduler.__init__(self)
9993 self.settings = settings
9994 self.target_root = settings["ROOT"]
9996 self.myopts = myopts
9997 self._spinner = spinner
9998 self._mtimedb = mtimedb
9999 self._mergelist = mergelist
10000 self._favorites = favorites
10001 self._args_set = InternalPackageSet(favorites)
10002 self._build_opts = self._build_opts_class()
10003 for k in self._build_opts.__slots__:
10004 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10005 self._binpkg_opts = self._binpkg_opts_class()
10006 for k in self._binpkg_opts.__slots__:
10007 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10010 self._logger = self._emerge_log_class()
10011 self._task_queues = self._task_queues_class()
10012 for k in self._task_queues.allowed_keys:
10013 setattr(self._task_queues, k,
10014 SequentialTaskQueue())
10016 # Holds merges that will wait to be executed when no builds are
10017 # executing. This is useful for system packages since dependencies
10018 # on system packages are frequently unspecified.
10019 self._merge_wait_queue = []
10020 # Holds merges that have been transfered from the merge_wait_queue to
10021 # the actual merge queue. They are removed from this list upon
10022 # completion. Other packages can start building only when this list is
10024 self._merge_wait_scheduled = []
10026 # Holds system packages and their deep runtime dependencies. Before
10027 # being merged, these packages go to merge_wait_queue, to be merged
10028 # when no other packages are building.
10029 self._deep_system_deps = set()
10031 # Holds packages to merge which will satisfy currently unsatisfied
10032 # deep runtime dependencies of system packages. If this is not empty
10033 # then no parallel builds will be spawned until it is empty. This
10034 # minimizes the possibility that a build will fail due to the system
10035 # being in a fragile state. For example, see bug #259954.
10036 self._unsatisfied_system_deps = set()
10038 self._status_display = JobStatusDisplay()
10039 self._max_load = myopts.get("--load-average")
10040 max_jobs = myopts.get("--jobs")
10041 if max_jobs is None:
10043 self._set_max_jobs(max_jobs)
10045 # The root where the currently running
10046 # portage instance is installed.
10047 self._running_root = trees["/"]["root_config"]
10049 if settings.get("PORTAGE_DEBUG", "") == "1":
10051 self.pkgsettings = {}
10052 self._config_pool = {}
10053 self._blocker_db = {}
10055 self._config_pool[root] = []
10056 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10058 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10059 schedule=self._schedule_fetch)
10060 self._sched_iface = self._iface_class(
10061 dblinkEbuildPhase=self._dblink_ebuild_phase,
10062 dblinkDisplayMerge=self._dblink_display_merge,
10063 dblinkElog=self._dblink_elog,
10064 fetch=fetch_iface, register=self._register,
10065 schedule=self._schedule_wait,
10066 scheduleSetup=self._schedule_setup,
10067 scheduleUnpack=self._schedule_unpack,
10068 scheduleYield=self._schedule_yield,
10069 unregister=self._unregister)
10071 self._prefetchers = weakref.WeakValueDictionary()
10072 self._pkg_queue = []
10073 self._completed_tasks = set()
10075 self._failed_pkgs = []
10076 self._failed_pkgs_all = []
10077 self._failed_pkgs_die_msgs = []
10078 self._post_mod_echo_msgs = []
10079 self._parallel_fetch = False
10080 merge_count = len([x for x in mergelist \
10081 if isinstance(x, Package) and x.operation == "merge"])
10082 self._pkg_count = self._pkg_count_class(
10083 curval=0, maxval=merge_count)
10084 self._status_display.maxval = self._pkg_count.maxval
10086 # The load average takes some time to respond when new
10087 # jobs are added, so we need to limit the rate of adding
10089 self._job_delay_max = 10
10090 self._job_delay_factor = 1.0
10091 self._job_delay_exp = 1.5
10092 self._previous_job_start_time = None
10094 self._set_digraph(digraph)
10096 # This is used to memoize the _choose_pkg() result when
10097 # no packages can be chosen until one of the existing
10099 self._choose_pkg_return_early = False
10101 features = self.settings.features
10102 if "parallel-fetch" in features and \
10103 not ("--pretend" in self.myopts or \
10104 "--fetch-all-uri" in self.myopts or \
10105 "--fetchonly" in self.myopts):
10106 if "distlocks" not in features:
10107 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10108 portage.writemsg(red("!!!")+" parallel-fetching " + \
10109 "requires the distlocks feature enabled"+"\n",
10111 portage.writemsg(red("!!!")+" you have it disabled, " + \
10112 "thus parallel-fetching is being disabled"+"\n",
10114 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10115 elif len(mergelist) > 1:
10116 self._parallel_fetch = True
10118 if self._parallel_fetch:
10119 # clear out existing fetch log if it exists
10121 open(self._fetch_log, 'w')
10122 except EnvironmentError:
10125 self._running_portage = None
10126 portage_match = self._running_root.trees["vartree"].dbapi.match(
10127 portage.const.PORTAGE_PACKAGE_ATOM)
10129 cpv = portage_match.pop()
10130 self._running_portage = self._pkg(cpv, "installed",
10131 self._running_root, installed=True)
10133 def _poll(self, timeout=None):
10135 PollScheduler._poll(self, timeout=timeout)
10137 def _set_max_jobs(self, max_jobs):
10138 self._max_jobs = max_jobs
10139 self._task_queues.jobs.max_jobs = max_jobs
10141 def _background_mode(self):
10143 Check if background mode is enabled and adjust states as necessary.
10146 @returns: True if background mode is enabled, False otherwise.
10148 background = (self._max_jobs is True or \
10149 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10150 not bool(self._opts_no_background.intersection(self.myopts))
10153 interactive_tasks = self._get_interactive_tasks()
10154 if interactive_tasks:
10156 writemsg_level(">>> Sending package output to stdio due " + \
10157 "to interactive package(s):\n",
10158 level=logging.INFO, noiselevel=-1)
10160 for pkg in interactive_tasks:
10161 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10162 if pkg.root != "/":
10163 pkg_str += " for " + pkg.root
10164 msg.append(pkg_str)
10166 writemsg_level("".join("%s\n" % (l,) for l in msg),
10167 level=logging.INFO, noiselevel=-1)
10168 if self._max_jobs is True or self._max_jobs > 1:
10169 self._set_max_jobs(1)
10170 writemsg_level(">>> Setting --jobs=1 due " + \
10171 "to the above interactive package(s)\n",
10172 level=logging.INFO, noiselevel=-1)
10174 self._status_display.quiet = \
10175 not background or \
10176 ("--quiet" in self.myopts and \
10177 "--verbose" not in self.myopts)
10179 self._logger.xterm_titles = \
10180 "notitles" not in self.settings.features and \
10181 self._status_display.quiet
10185 def _get_interactive_tasks(self):
10186 from portage import flatten
10187 from portage.dep import use_reduce, paren_reduce
10188 interactive_tasks = []
10189 for task in self._mergelist:
10190 if not (isinstance(task, Package) and \
10191 task.operation == "merge"):
10194 properties = flatten(use_reduce(paren_reduce(
10195 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10196 except portage.exception.InvalidDependString, e:
10197 show_invalid_depstring_notice(task,
10198 task.metadata["PROPERTIES"], str(e))
10199 raise self._unknown_internal_error()
10200 if "interactive" in properties:
10201 interactive_tasks.append(task)
10202 return interactive_tasks
10204 def _set_digraph(self, digraph):
10205 if "--nodeps" in self.myopts or \
10206 (self._max_jobs is not True and self._max_jobs < 2):
10208 self._digraph = None
10211 self._digraph = digraph
10212 self._find_system_deps()
10213 self._prune_digraph()
10214 self._prevent_builddir_collisions()
10216 def _find_system_deps(self):
10218 Find system packages and their deep runtime dependencies. Before being
10219 merged, these packages go to merge_wait_queue, to be merged when no
10220 other packages are building.
10222 deep_system_deps = self._deep_system_deps
10223 deep_system_deps.clear()
10224 deep_system_deps.update(
10225 _find_deep_system_runtime_deps(self._digraph))
10226 deep_system_deps.difference_update([pkg for pkg in \
10227 deep_system_deps if pkg.operation != "merge"])
10229 def _prune_digraph(self):
10231 Prune any root nodes that are irrelevant.
10234 graph = self._digraph
10235 completed_tasks = self._completed_tasks
10236 removed_nodes = set()
10238 for node in graph.root_nodes():
10239 if not isinstance(node, Package) or \
10240 (node.installed and node.operation == "nomerge") or \
10242 node in completed_tasks:
10243 removed_nodes.add(node)
10245 graph.difference_update(removed_nodes)
10246 if not removed_nodes:
10248 removed_nodes.clear()
10250 def _prevent_builddir_collisions(self):
10252 When building stages, sometimes the same exact cpv needs to be merged
10253 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10254 in the builddir. Currently, normal file locks would be inappropriate
10255 for this purpose since emerge holds all of it's build dir locks from
10259 for pkg in self._mergelist:
10260 if not isinstance(pkg, Package):
10261 # a satisfied blocker
10265 if pkg.cpv not in cpv_map:
10266 cpv_map[pkg.cpv] = [pkg]
10268 for earlier_pkg in cpv_map[pkg.cpv]:
10269 self._digraph.add(earlier_pkg, pkg,
10270 priority=DepPriority(buildtime=True))
10271 cpv_map[pkg.cpv].append(pkg)
10273 class _pkg_failure(portage.exception.PortageException):
10275 An instance of this class is raised by unmerge() when
10276 an uninstallation fails.
10279 def __init__(self, *pargs):
10280 portage.exception.PortageException.__init__(self, pargs)
10282 self.status = pargs[0]
10284 def _schedule_fetch(self, fetcher):
10286 Schedule a fetcher on the fetch queue, in order to
10287 serialize access to the fetch log.
10289 self._task_queues.fetch.addFront(fetcher)
10291 def _schedule_setup(self, setup_phase):
10293 Schedule a setup phase on the merge queue, in order to
10294 serialize unsandboxed access to the live filesystem.
10296 self._task_queues.merge.addFront(setup_phase)
10299 def _schedule_unpack(self, unpack_phase):
10301 Schedule an unpack phase on the unpack queue, in order
10302 to serialize $DISTDIR access for live ebuilds.
10304 self._task_queues.unpack.add(unpack_phase)
10306 def _find_blockers(self, new_pkg):
10308 Returns a callable which should be called only when
10309 the vdb lock has been acquired.
10311 def get_blockers():
10312 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10313 return get_blockers
10315 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10316 if self._opts_ignore_blockers.intersection(self.myopts):
10319 # Call gc.collect() here to avoid heap overflow that
10320 # triggers 'Cannot allocate memory' errors (reported
10321 # with python-2.5).
10325 blocker_db = self._blocker_db[new_pkg.root]
10327 blocker_dblinks = []
10328 for blocking_pkg in blocker_db.findInstalledBlockers(
10329 new_pkg, acquire_lock=acquire_lock):
10330 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10332 if new_pkg.cpv == blocking_pkg.cpv:
10334 blocker_dblinks.append(portage.dblink(
10335 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10336 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10337 vartree=self.trees[blocking_pkg.root]["vartree"]))
10341 return blocker_dblinks
10343 def _dblink_pkg(self, pkg_dblink):
10344 cpv = pkg_dblink.mycpv
10345 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10346 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10347 installed = type_name == "installed"
10348 return self._pkg(cpv, type_name, root_config, installed=installed)
10350 def _append_to_log_path(self, log_path, msg):
10351 f = open(log_path, 'a')
10357 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10359 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10362 background = self._background
10364 if background and log_path is not None:
10365 log_file = open(log_path, 'a')
10370 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10372 if log_file is not None:
10375 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10376 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10377 background = self._background
10379 if log_path is None:
10380 if not (background and level < logging.WARN):
10381 portage.util.writemsg_level(msg,
10382 level=level, noiselevel=noiselevel)
10385 portage.util.writemsg_level(msg,
10386 level=level, noiselevel=noiselevel)
10387 self._append_to_log_path(log_path, msg)
10389 def _dblink_ebuild_phase(self,
10390 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10392 Using this callback for merge phases allows the scheduler
10393 to run while these phases execute asynchronously, and allows
10394 the scheduler control output handling.
10397 scheduler = self._sched_iface
10398 settings = pkg_dblink.settings
10399 pkg = self._dblink_pkg(pkg_dblink)
10400 background = self._background
10401 log_path = settings.get("PORTAGE_LOG_FILE")
10403 ebuild_phase = EbuildPhase(background=background,
10404 pkg=pkg, phase=phase, scheduler=scheduler,
10405 settings=settings, tree=pkg_dblink.treetype)
10406 ebuild_phase.start()
10407 ebuild_phase.wait()
10409 return ebuild_phase.returncode
10411 def _check_manifests(self):
10412 # Verify all the manifests now so that the user is notified of failure
10413 # as soon as possible.
10414 if "strict" not in self.settings.features or \
10415 "--fetchonly" in self.myopts or \
10416 "--fetch-all-uri" in self.myopts:
10419 shown_verifying_msg = False
10420 quiet_settings = {}
10421 for myroot, pkgsettings in self.pkgsettings.iteritems():
10422 quiet_config = portage.config(clone=pkgsettings)
10423 quiet_config["PORTAGE_QUIET"] = "1"
10424 quiet_config.backup_changes("PORTAGE_QUIET")
10425 quiet_settings[myroot] = quiet_config
10428 for x in self._mergelist:
10429 if not isinstance(x, Package) or \
10430 x.type_name != "ebuild":
10433 if not shown_verifying_msg:
10434 shown_verifying_msg = True
10435 self._status_msg("Verifying ebuild manifests")
10437 root_config = x.root_config
10438 portdb = root_config.trees["porttree"].dbapi
10439 quiet_config = quiet_settings[root_config.root]
10440 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10441 if not portage.digestcheck([], quiet_config, strict=True):
10446 def _add_prefetchers(self):
10448 if not self._parallel_fetch:
10451 if self._parallel_fetch:
10452 self._status_msg("Starting parallel fetch")
10454 prefetchers = self._prefetchers
10455 getbinpkg = "--getbinpkg" in self.myopts
10457 # In order to avoid "waiting for lock" messages
10458 # at the beginning, which annoy users, never
10459 # spawn a prefetcher for the first package.
10460 for pkg in self._mergelist[1:]:
10461 prefetcher = self._create_prefetcher(pkg)
10462 if prefetcher is not None:
10463 self._task_queues.fetch.add(prefetcher)
10464 prefetchers[pkg] = prefetcher
10466 def _create_prefetcher(self, pkg):
10468 @return: a prefetcher, or None if not applicable
10472 if not isinstance(pkg, Package):
10475 elif pkg.type_name == "ebuild":
10477 prefetcher = EbuildFetcher(background=True,
10478 config_pool=self._ConfigPool(pkg.root,
10479 self._allocate_config, self._deallocate_config),
10480 fetchonly=1, logfile=self._fetch_log,
10481 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10483 elif pkg.type_name == "binary" and \
10484 "--getbinpkg" in self.myopts and \
10485 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10487 prefetcher = BinpkgPrefetcher(background=True,
10488 pkg=pkg, scheduler=self._sched_iface)
10492 def _is_restart_scheduled(self):
10494 Check if the merge list contains a replacement
10495 for the current running instance, that will result
10496 in restart after merge.
10498 @returns: True if a restart is scheduled, False otherwise.
10500 if self._opts_no_restart.intersection(self.myopts):
10503 mergelist = self._mergelist
10505 for i, pkg in enumerate(mergelist):
10506 if self._is_restart_necessary(pkg) and \
10507 i != len(mergelist) - 1:
10512 def _is_restart_necessary(self, pkg):
10514 @return: True if merging the given package
10515 requires restart, False otherwise.
10518 # Figure out if we need a restart.
10519 if pkg.root == self._running_root.root and \
10520 portage.match_from_list(
10521 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10522 if self._running_portage:
10523 return pkg.cpv != self._running_portage.cpv
10527 def _restart_if_necessary(self, pkg):
10529 Use execv() to restart emerge. This happens
10530 if portage upgrades itself and there are
10531 remaining packages in the list.
10534 if self._opts_no_restart.intersection(self.myopts):
10537 if not self._is_restart_necessary(pkg):
10540 if pkg == self._mergelist[-1]:
10543 self._main_loop_cleanup()
10545 logger = self._logger
10546 pkg_count = self._pkg_count
10547 mtimedb = self._mtimedb
10548 bad_resume_opts = self._bad_resume_opts
10550 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10551 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10553 logger.log(" *** RESTARTING " + \
10554 "emerge via exec() after change of " + \
10555 "portage version.")
10557 mtimedb["resume"]["mergelist"].remove(list(pkg))
10559 portage.run_exitfuncs()
10560 mynewargv = [sys.argv[0], "--resume"]
10561 resume_opts = self.myopts.copy()
10562 # For automatic resume, we need to prevent
10563 # any of bad_resume_opts from leaking in
10564 # via EMERGE_DEFAULT_OPTS.
10565 resume_opts["--ignore-default-opts"] = True
10566 for myopt, myarg in resume_opts.iteritems():
10567 if myopt not in bad_resume_opts:
10569 mynewargv.append(myopt)
10571 mynewargv.append(myopt +"="+ str(myarg))
10572 # priority only needs to be adjusted on the first run
10573 os.environ["PORTAGE_NICENESS"] = "0"
10574 os.execv(mynewargv[0], mynewargv)
10578 if "--resume" in self.myopts:
10580 portage.writemsg_stdout(
10581 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10582 self._logger.log(" *** Resuming merge...")
10584 self._save_resume_list()
10587 self._background = self._background_mode()
10588 except self._unknown_internal_error:
10591 for root in self.trees:
10592 root_config = self.trees[root]["root_config"]
10594 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10595 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10596 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10597 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10598 if not tmpdir or not os.path.isdir(tmpdir):
10599 msg = "The directory specified in your " + \
10600 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10601 "does not exist. Please create this " + \
10602 "directory or correct your PORTAGE_TMPDIR setting."
10603 msg = textwrap.wrap(msg, 70)
10604 out = portage.output.EOutput()
10609 if self._background:
10610 root_config.settings.unlock()
10611 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10612 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10613 root_config.settings.lock()
10615 self.pkgsettings[root] = portage.config(
10616 clone=root_config.settings)
10618 rval = self._check_manifests()
10619 if rval != os.EX_OK:
10622 keep_going = "--keep-going" in self.myopts
10623 fetchonly = self._build_opts.fetchonly
10624 mtimedb = self._mtimedb
10625 failed_pkgs = self._failed_pkgs
10628 rval = self._merge()
10629 if rval == os.EX_OK or fetchonly or not keep_going:
10631 if "resume" not in mtimedb:
10633 mergelist = self._mtimedb["resume"].get("mergelist")
10637 if not failed_pkgs:
10640 for failed_pkg in failed_pkgs:
10641 mergelist.remove(list(failed_pkg.pkg))
10643 self._failed_pkgs_all.extend(failed_pkgs)
10649 if not self._calc_resume_list():
10652 clear_caches(self.trees)
10653 if not self._mergelist:
10656 self._save_resume_list()
10657 self._pkg_count.curval = 0
10658 self._pkg_count.maxval = len([x for x in self._mergelist \
10659 if isinstance(x, Package) and x.operation == "merge"])
10660 self._status_display.maxval = self._pkg_count.maxval
10662 self._logger.log(" *** Finished. Cleaning up...")
10665 self._failed_pkgs_all.extend(failed_pkgs)
10668 background = self._background
10669 failure_log_shown = False
10670 if background and len(self._failed_pkgs_all) == 1:
10671 # If only one package failed then just show it's
10672 # whole log for easy viewing.
10673 failed_pkg = self._failed_pkgs_all[-1]
10674 build_dir = failed_pkg.build_dir
10677 log_paths = [failed_pkg.build_log]
10679 log_path = self._locate_failure_log(failed_pkg)
10680 if log_path is not None:
10682 log_file = open(log_path)
10686 if log_file is not None:
10688 for line in log_file:
10689 writemsg_level(line, noiselevel=-1)
10692 failure_log_shown = True
10694 # Dump mod_echo output now since it tends to flood the terminal.
10695 # This allows us to avoid having more important output, generated
10696 # later, from being swept away by the mod_echo output.
10697 mod_echo_output = _flush_elog_mod_echo()
10699 if background and not failure_log_shown and \
10700 self._failed_pkgs_all and \
10701 self._failed_pkgs_die_msgs and \
10702 not mod_echo_output:
10704 printer = portage.output.EOutput()
10705 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10707 if mysettings["ROOT"] != "/":
10708 root_msg = " merged to %s" % mysettings["ROOT"]
10710 printer.einfo("Error messages for package %s%s:" % \
10711 (colorize("INFORM", key), root_msg))
10713 for phase in portage.const.EBUILD_PHASES:
10714 if phase not in logentries:
10716 for msgtype, msgcontent in logentries[phase]:
10717 if isinstance(msgcontent, basestring):
10718 msgcontent = [msgcontent]
10719 for line in msgcontent:
10720 printer.eerror(line.strip("\n"))
10722 if self._post_mod_echo_msgs:
10723 for msg in self._post_mod_echo_msgs:
10726 if len(self._failed_pkgs_all) > 1 or \
10727 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10728 if len(self._failed_pkgs_all) > 1:
10729 msg = "The following %d packages have " % \
10730 len(self._failed_pkgs_all) + \
10731 "failed to build or install:"
10733 msg = "The following package has " + \
10734 "failed to build or install:"
10735 prefix = bad(" * ")
10736 writemsg(prefix + "\n", noiselevel=-1)
10737 from textwrap import wrap
10738 for line in wrap(msg, 72):
10739 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10740 writemsg(prefix + "\n", noiselevel=-1)
10741 for failed_pkg in self._failed_pkgs_all:
10742 writemsg("%s\t%s\n" % (prefix,
10743 colorize("INFORM", str(failed_pkg.pkg))),
10745 writemsg(prefix + "\n", noiselevel=-1)
10749 def _elog_listener(self, mysettings, key, logentries, fulltext):
10750 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10752 self._failed_pkgs_die_msgs.append(
10753 (mysettings, key, errors))
10755 def _locate_failure_log(self, failed_pkg):
10757 build_dir = failed_pkg.build_dir
10760 log_paths = [failed_pkg.build_log]
10762 for log_path in log_paths:
10767 log_size = os.stat(log_path).st_size
10778 def _add_packages(self):
10779 pkg_queue = self._pkg_queue
10780 for pkg in self._mergelist:
10781 if isinstance(pkg, Package):
10782 pkg_queue.append(pkg)
10783 elif isinstance(pkg, Blocker):
10786 def _system_merge_started(self, merge):
10788 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10790 graph = self._digraph
10793 pkg = merge.merge.pkg
10795 # Skip this if $ROOT != / since it shouldn't matter if there
10796 # are unsatisfied system runtime deps in this case.
10797 if pkg.root != '/':
10800 completed_tasks = self._completed_tasks
10801 unsatisfied = self._unsatisfied_system_deps
10803 def ignore_non_runtime_or_satisfied(priority):
10805 Ignore non-runtime and satisfied runtime priorities.
10807 if isinstance(priority, DepPriority) and \
10808 not priority.satisfied and \
10809 (priority.runtime or priority.runtime_post):
10813 # When checking for unsatisfied runtime deps, only check
10814 # direct deps since indirect deps are checked when the
10815 # corresponding parent is merged.
10816 for child in graph.child_nodes(pkg,
10817 ignore_priority=ignore_non_runtime_or_satisfied):
10818 if not isinstance(child, Package) or \
10819 child.operation == 'uninstall':
10823 if child.operation == 'merge' and \
10824 child not in completed_tasks:
10825 unsatisfied.add(child)
10827 def _merge_wait_exit_handler(self, task):
10828 self._merge_wait_scheduled.remove(task)
10829 self._merge_exit(task)
10831 def _merge_exit(self, merge):
10832 self._do_merge_exit(merge)
10833 self._deallocate_config(merge.merge.settings)
10834 if merge.returncode == os.EX_OK and \
10835 not merge.merge.pkg.installed:
10836 self._status_display.curval += 1
10837 self._status_display.merges = len(self._task_queues.merge)
10840 def _do_merge_exit(self, merge):
10841 pkg = merge.merge.pkg
10842 if merge.returncode != os.EX_OK:
10843 settings = merge.merge.settings
10844 build_dir = settings.get("PORTAGE_BUILDDIR")
10845 build_log = settings.get("PORTAGE_LOG_FILE")
10847 self._failed_pkgs.append(self._failed_pkg(
10848 build_dir=build_dir, build_log=build_log,
10850 returncode=merge.returncode))
10851 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10853 self._status_display.failed = len(self._failed_pkgs)
10856 self._task_complete(pkg)
10857 pkg_to_replace = merge.merge.pkg_to_replace
10858 if pkg_to_replace is not None:
10859 # When a package is replaced, mark it's uninstall
10860 # task complete (if any).
10861 uninst_hash_key = \
10862 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10863 self._task_complete(uninst_hash_key)
10868 self._restart_if_necessary(pkg)
10870 # Call mtimedb.commit() after each merge so that
10871 # --resume still works after being interrupted
10872 # by reboot, sigkill or similar.
10873 mtimedb = self._mtimedb
10874 mtimedb["resume"]["mergelist"].remove(list(pkg))
10875 if not mtimedb["resume"]["mergelist"]:
10876 del mtimedb["resume"]
10879 def _build_exit(self, build):
10880 if build.returncode == os.EX_OK:
10882 merge = PackageMerge(merge=build)
10883 if not build.build_opts.buildpkgonly and \
10884 build.pkg in self._deep_system_deps:
10885 # Since dependencies on system packages are frequently
10886 # unspecified, merge them only when no builds are executing.
10887 self._merge_wait_queue.append(merge)
10888 merge.addStartListener(self._system_merge_started)
10890 merge.addExitListener(self._merge_exit)
10891 self._task_queues.merge.add(merge)
10892 self._status_display.merges = len(self._task_queues.merge)
10894 settings = build.settings
10895 build_dir = settings.get("PORTAGE_BUILDDIR")
10896 build_log = settings.get("PORTAGE_LOG_FILE")
10898 self._failed_pkgs.append(self._failed_pkg(
10899 build_dir=build_dir, build_log=build_log,
10901 returncode=build.returncode))
10902 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10904 self._status_display.failed = len(self._failed_pkgs)
10905 self._deallocate_config(build.settings)
10907 self._status_display.running = self._jobs
10910 def _extract_exit(self, build):
10911 self._build_exit(build)
10913 def _task_complete(self, pkg):
10914 self._completed_tasks.add(pkg)
10915 self._unsatisfied_system_deps.discard(pkg)
10916 self._choose_pkg_return_early = False
10920 self._add_prefetchers()
10921 self._add_packages()
10922 pkg_queue = self._pkg_queue
10923 failed_pkgs = self._failed_pkgs
10924 portage.locks._quiet = self._background
10925 portage.elog._emerge_elog_listener = self._elog_listener
10931 self._main_loop_cleanup()
10932 portage.locks._quiet = False
10933 portage.elog._emerge_elog_listener = None
10935 rval = failed_pkgs[-1].returncode
10939 def _main_loop_cleanup(self):
10940 del self._pkg_queue[:]
10941 self._completed_tasks.clear()
10942 self._deep_system_deps.clear()
10943 self._unsatisfied_system_deps.clear()
10944 self._choose_pkg_return_early = False
10945 self._status_display.reset()
10946 self._digraph = None
10947 self._task_queues.fetch.clear()
10949 def _choose_pkg(self):
10951 Choose a task that has all it's dependencies satisfied.
10954 if self._choose_pkg_return_early:
10957 if self._digraph is None:
10958 if (self._jobs or self._task_queues.merge) and \
10959 not ("--nodeps" in self.myopts and \
10960 (self._max_jobs is True or self._max_jobs > 1)):
10961 self._choose_pkg_return_early = True
10963 return self._pkg_queue.pop(0)
10965 if not (self._jobs or self._task_queues.merge):
10966 return self._pkg_queue.pop(0)
10968 self._prune_digraph()
10971 later = set(self._pkg_queue)
10972 for pkg in self._pkg_queue:
10974 if not self._dependent_on_scheduled_merges(pkg, later):
10978 if chosen_pkg is not None:
10979 self._pkg_queue.remove(chosen_pkg)
10981 if chosen_pkg is None:
10982 # There's no point in searching for a package to
10983 # choose until at least one of the existing jobs
10985 self._choose_pkg_return_early = True
10989 def _dependent_on_scheduled_merges(self, pkg, later):
10991 Traverse the subgraph of the given packages deep dependencies
10992 to see if it contains any scheduled merges.
10993 @param pkg: a package to check dependencies for
10995 @param later: packages for which dependence should be ignored
10996 since they will be merged later than pkg anyway and therefore
10997 delaying the merge of pkg will not result in a more optimal
11001 @returns: True if the package is dependent, False otherwise.
11004 graph = self._digraph
11005 completed_tasks = self._completed_tasks
11008 traversed_nodes = set([pkg])
11009 direct_deps = graph.child_nodes(pkg)
11010 node_stack = direct_deps
11011 direct_deps = frozenset(direct_deps)
11013 node = node_stack.pop()
11014 if node in traversed_nodes:
11016 traversed_nodes.add(node)
11017 if not ((node.installed and node.operation == "nomerge") or \
11018 (node.operation == "uninstall" and \
11019 node not in direct_deps) or \
11020 node in completed_tasks or \
11024 node_stack.extend(graph.child_nodes(node))
11028 def _allocate_config(self, root):
11030 Allocate a unique config instance for a task in order
11031 to prevent interference between parallel tasks.
11033 if self._config_pool[root]:
11034 temp_settings = self._config_pool[root].pop()
11036 temp_settings = portage.config(clone=self.pkgsettings[root])
11037 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11038 # performance reasons, call it here to make sure all settings from the
11039 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11040 temp_settings.reload()
11041 temp_settings.reset()
11042 return temp_settings
11044 def _deallocate_config(self, settings):
11045 self._config_pool[settings["ROOT"]].append(settings)
11047 def _main_loop(self):
11049 # Only allow 1 job max if a restart is scheduled
11050 # due to portage update.
11051 if self._is_restart_scheduled() or \
11052 self._opts_no_background.intersection(self.myopts):
11053 self._set_max_jobs(1)
11055 merge_queue = self._task_queues.merge
11057 while self._schedule():
11058 if self._poll_event_handlers:
11063 if not (self._jobs or merge_queue):
11065 if self._poll_event_handlers:
11068 def _keep_scheduling(self):
11069 return bool(self._pkg_queue and \
11070 not (self._failed_pkgs and not self._build_opts.fetchonly))
11072 def _schedule_tasks(self):
11074 # When the number of jobs drops to zero, process all waiting merges.
11075 if not self._jobs and self._merge_wait_queue:
11076 for task in self._merge_wait_queue:
11077 task.addExitListener(self._merge_wait_exit_handler)
11078 self._task_queues.merge.add(task)
11079 self._status_display.merges = len(self._task_queues.merge)
11080 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11081 del self._merge_wait_queue[:]
11083 self._schedule_tasks_imp()
11084 self._status_display.display()
11087 for q in self._task_queues.values():
11091 # Cancel prefetchers if they're the only reason
11092 # the main poll loop is still running.
11093 if self._failed_pkgs and not self._build_opts.fetchonly and \
11094 not (self._jobs or self._task_queues.merge) and \
11095 self._task_queues.fetch:
11096 self._task_queues.fetch.clear()
11100 self._schedule_tasks_imp()
11101 self._status_display.display()
11103 return self._keep_scheduling()
11105 def _job_delay(self):
11108 @returns: True if job scheduling should be delayed, False otherwise.
11111 if self._jobs and self._max_load is not None:
11113 current_time = time.time()
11115 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11116 if delay > self._job_delay_max:
11117 delay = self._job_delay_max
11118 if (current_time - self._previous_job_start_time) < delay:
11123 def _schedule_tasks_imp(self):
11126 @returns: True if state changed, False otherwise.
11133 if not self._keep_scheduling():
11134 return bool(state_change)
11136 if self._choose_pkg_return_early or \
11137 self._merge_wait_scheduled or \
11138 (self._jobs and self._unsatisfied_system_deps) or \
11139 not self._can_add_job() or \
11141 return bool(state_change)
11143 pkg = self._choose_pkg()
11145 return bool(state_change)
11149 if not pkg.installed:
11150 self._pkg_count.curval += 1
11152 task = self._task(pkg)
11155 merge = PackageMerge(merge=task)
11156 merge.addExitListener(self._merge_exit)
11157 self._task_queues.merge.add(merge)
11161 self._previous_job_start_time = time.time()
11162 self._status_display.running = self._jobs
11163 task.addExitListener(self._extract_exit)
11164 self._task_queues.jobs.add(task)
11168 self._previous_job_start_time = time.time()
11169 self._status_display.running = self._jobs
11170 task.addExitListener(self._build_exit)
11171 self._task_queues.jobs.add(task)
11173 return bool(state_change)
11175 def _task(self, pkg):
11177 pkg_to_replace = None
11178 if pkg.operation != "uninstall":
11179 vardb = pkg.root_config.trees["vartree"].dbapi
11180 previous_cpv = vardb.match(pkg.slot_atom)
11182 previous_cpv = previous_cpv.pop()
11183 pkg_to_replace = self._pkg(previous_cpv,
11184 "installed", pkg.root_config, installed=True)
11186 task = MergeListItem(args_set=self._args_set,
11187 background=self._background, binpkg_opts=self._binpkg_opts,
11188 build_opts=self._build_opts,
11189 config_pool=self._ConfigPool(pkg.root,
11190 self._allocate_config, self._deallocate_config),
11191 emerge_opts=self.myopts,
11192 find_blockers=self._find_blockers(pkg), logger=self._logger,
11193 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11194 pkg_to_replace=pkg_to_replace,
11195 prefetcher=self._prefetchers.get(pkg),
11196 scheduler=self._sched_iface,
11197 settings=self._allocate_config(pkg.root),
11198 statusMessage=self._status_msg,
11199 world_atom=self._world_atom)
11203 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11204 pkg = failed_pkg.pkg
11205 msg = "%s to %s %s" % \
11206 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11207 if pkg.root != "/":
11208 msg += " %s %s" % (preposition, pkg.root)
11210 log_path = self._locate_failure_log(failed_pkg)
11211 if log_path is not None:
11212 msg += ", Log file:"
11213 self._status_msg(msg)
11215 if log_path is not None:
11216 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11218 def _status_msg(self, msg):
11220 Display a brief status message (no newlines) in the status display.
11221 This is called by tasks to provide feedback to the user. This
11222 delegates the resposibility of generating \r and \n control characters,
11223 to guarantee that lines are created or erased when necessary and
11227 @param msg: a brief status message (no newlines allowed)
11229 if not self._background:
11230 writemsg_level("\n")
11231 self._status_display.displayMessage(msg)
11233 def _save_resume_list(self):
11235 Do this before verifying the ebuild Manifests since it might
11236 be possible for the user to use --resume --skipfirst get past
11237 a non-essential package with a broken digest.
11239 mtimedb = self._mtimedb
11240 mtimedb["resume"]["mergelist"] = [list(x) \
11241 for x in self._mergelist \
11242 if isinstance(x, Package) and x.operation == "merge"]
11246 def _calc_resume_list(self):
11248 Use the current resume list to calculate a new one,
11249 dropping any packages with unsatisfied deps.
11251 @returns: True if successful, False otherwise.
11253 print colorize("GOOD", "*** Resuming merge...")
11255 if self._show_list():
11256 if "--tree" in self.myopts:
11257 portage.writemsg_stdout("\n" + \
11258 darkgreen("These are the packages that " + \
11259 "would be merged, in reverse order:\n\n"))
11262 portage.writemsg_stdout("\n" + \
11263 darkgreen("These are the packages that " + \
11264 "would be merged, in order:\n\n"))
11266 show_spinner = "--quiet" not in self.myopts and \
11267 "--nodeps" not in self.myopts
11270 print "Calculating dependencies ",
11272 myparams = create_depgraph_params(self.myopts, None)
11276 success, mydepgraph, dropped_tasks = resume_depgraph(
11277 self.settings, self.trees, self._mtimedb, self.myopts,
11278 myparams, self._spinner)
11279 except depgraph.UnsatisfiedResumeDep, exc:
11280 # rename variable to avoid python-3.0 error:
11281 # SyntaxError: can not delete variable 'e' referenced in nested
11284 mydepgraph = e.depgraph
11285 dropped_tasks = set()
11288 print "\b\b... done!"
11291 def unsatisfied_resume_dep_msg():
11292 mydepgraph.display_problems()
11293 out = portage.output.EOutput()
11294 out.eerror("One or more packages are either masked or " + \
11295 "have missing dependencies:")
11298 show_parents = set()
11299 for dep in e.value:
11300 if dep.parent in show_parents:
11302 show_parents.add(dep.parent)
11303 if dep.atom is None:
11304 out.eerror(indent + "Masked package:")
11305 out.eerror(2 * indent + str(dep.parent))
11308 out.eerror(indent + str(dep.atom) + " pulled in by:")
11309 out.eerror(2 * indent + str(dep.parent))
11311 msg = "The resume list contains packages " + \
11312 "that are either masked or have " + \
11313 "unsatisfied dependencies. " + \
11314 "Please restart/continue " + \
11315 "the operation manually, or use --skipfirst " + \
11316 "to skip the first package in the list and " + \
11317 "any other packages that may be " + \
11318 "masked or have missing dependencies."
11319 for line in textwrap.wrap(msg, 72):
11321 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11324 if success and self._show_list():
11325 mylist = mydepgraph.altlist()
11327 if "--tree" in self.myopts:
11329 mydepgraph.display(mylist, favorites=self._favorites)
11332 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11334 mydepgraph.display_problems()
11336 mylist = mydepgraph.altlist()
11337 mydepgraph.break_refs(mylist)
11338 mydepgraph.break_refs(dropped_tasks)
11339 self._mergelist = mylist
11340 self._set_digraph(mydepgraph.schedulerGraph())
11343 for task in dropped_tasks:
11344 if not (isinstance(task, Package) and task.operation == "merge"):
11347 msg = "emerge --keep-going:" + \
11349 if pkg.root != "/":
11350 msg += " for %s" % (pkg.root,)
11351 msg += " dropped due to unsatisfied dependency."
11352 for line in textwrap.wrap(msg, msg_width):
11353 eerror(line, phase="other", key=pkg.cpv)
11354 settings = self.pkgsettings[pkg.root]
11355 # Ensure that log collection from $T is disabled inside
11356 # elog_process(), since any logs that might exist are
11358 settings.pop("T", None)
11359 portage.elog.elog_process(pkg.cpv, settings)
11360 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11364 def _show_list(self):
11365 myopts = self.myopts
11366 if "--quiet" not in myopts and \
11367 ("--ask" in myopts or "--tree" in myopts or \
11368 "--verbose" in myopts):
11372 def _world_atom(self, pkg):
11374 Add the package to the world file, but only if
11375 it's supposed to be added. Otherwise, do nothing.
11378 if set(("--buildpkgonly", "--fetchonly",
11380 "--oneshot", "--onlydeps",
11381 "--pretend")).intersection(self.myopts):
11384 if pkg.root != self.target_root:
11387 args_set = self._args_set
11388 if not args_set.findAtomForPackage(pkg):
11391 logger = self._logger
11392 pkg_count = self._pkg_count
11393 root_config = pkg.root_config
11394 world_set = root_config.sets["world"]
11395 world_locked = False
11396 if hasattr(world_set, "lock"):
11398 world_locked = True
11401 if hasattr(world_set, "load"):
11402 world_set.load() # maybe it's changed on disk
11404 atom = create_world_atom(pkg, args_set, root_config)
11406 if hasattr(world_set, "add"):
11407 self._status_msg(('Recording %s in "world" ' + \
11408 'favorites file...') % atom)
11409 logger.log(" === (%s of %s) Updating world file (%s)" % \
11410 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11411 world_set.add(atom)
11413 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11414 (atom,), level=logging.WARN, noiselevel=-1)
11419 def _pkg(self, cpv, type_name, root_config, installed=False):
11421 Get a package instance from the cache, or create a new
11422 one if necessary. Raises KeyError from aux_get if it
11423 failures for some reason (package does not exist or is
11426 operation = "merge"
11428 operation = "nomerge"
11430 if self._digraph is not None:
11431 # Reuse existing instance when available.
11432 pkg = self._digraph.get(
11433 (type_name, root_config.root, cpv, operation))
11434 if pkg is not None:
11437 tree_type = depgraph.pkg_tree_map[type_name]
11438 db = root_config.trees[tree_type].dbapi
11439 db_keys = list(self.trees[root_config.root][
11440 tree_type].dbapi._aux_cache_keys)
11441 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11442 pkg = Package(cpv=cpv, metadata=metadata,
11443 root_config=root_config, installed=installed)
11444 if type_name == "ebuild":
11445 settings = self.pkgsettings[root_config.root]
11446 settings.setcpv(pkg)
11447 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11448 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11452 class MetadataRegen(PollScheduler):
11454 def __init__(self, portdb, max_jobs=None, max_load=None):
11455 PollScheduler.__init__(self)
11456 self._portdb = portdb
11458 if max_jobs is None:
11461 self._max_jobs = max_jobs
11462 self._max_load = max_load
11463 self._sched_iface = self._sched_iface_class(
11464 register=self._register,
11465 schedule=self._schedule_wait,
11466 unregister=self._unregister)
11468 self._valid_pkgs = set()
11469 self._process_iter = self._iter_metadata_processes()
11470 self.returncode = os.EX_OK
11471 self._error_count = 0
11473 def _iter_metadata_processes(self):
11474 portdb = self._portdb
11475 valid_pkgs = self._valid_pkgs
11476 every_cp = portdb.cp_all()
11477 every_cp.sort(reverse=True)
11480 cp = every_cp.pop()
11481 portage.writemsg_stdout("Processing %s\n" % cp)
11482 cpv_list = portdb.cp_list(cp)
11483 for cpv in cpv_list:
11484 valid_pkgs.add(cpv)
11485 ebuild_path, repo_path = portdb.findname2(cpv)
11486 metadata_process = portdb._metadata_process(
11487 cpv, ebuild_path, repo_path)
11488 if metadata_process is None:
11490 yield metadata_process
11494 portdb = self._portdb
11495 from portage.cache.cache_errors import CacheError
11498 for mytree in portdb.porttrees:
11500 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11501 except CacheError, e:
11502 portage.writemsg("Error listing cache entries for " + \
11503 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11508 while self._schedule():
11515 for y in self._valid_pkgs:
11516 for mytree in portdb.porttrees:
11517 if portdb.findname2(y, mytree=mytree)[0]:
11518 dead_nodes[mytree].discard(y)
11520 for mytree, nodes in dead_nodes.iteritems():
11521 auxdb = portdb.auxdb[mytree]
11525 except (KeyError, CacheError):
11528 def _schedule_tasks(self):
11531 @returns: True if there may be remaining tasks to schedule,
11534 while self._can_add_job():
11536 metadata_process = self._process_iter.next()
11537 except StopIteration:
11541 metadata_process.scheduler = self._sched_iface
11542 metadata_process.addExitListener(self._metadata_exit)
11543 metadata_process.start()
11546 def _metadata_exit(self, metadata_process):
11548 if metadata_process.returncode != os.EX_OK:
11549 self.returncode = 1
11550 self._error_count += 1
11551 self._valid_pkgs.discard(metadata_process.cpv)
11552 portage.writemsg("Error processing %s, continuing...\n" % \
11553 (metadata_process.cpv,))
11556 class UninstallFailure(portage.exception.PortageException):
11558 An instance of this class is raised by unmerge() when
11559 an uninstallation fails.
11562 def __init__(self, *pargs):
11563 portage.exception.PortageException.__init__(self, pargs)
11565 self.status = pargs[0]
11567 def unmerge(root_config, myopts, unmerge_action,
11568 unmerge_files, ldpath_mtimes, autoclean=0,
11569 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11570 scheduler=None, writemsg_level=portage.util.writemsg_level):
11572 quiet = "--quiet" in myopts
11573 settings = root_config.settings
11574 sets = root_config.sets
11575 vartree = root_config.trees["vartree"]
11576 candidate_catpkgs=[]
11578 xterm_titles = "notitles" not in settings.features
11579 out = portage.output.EOutput()
11581 db_keys = list(vartree.dbapi._aux_cache_keys)
11584 pkg = pkg_cache.get(cpv)
11586 pkg = Package(cpv=cpv, installed=True,
11587 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11588 root_config=root_config,
11589 type_name="installed")
11590 pkg_cache[cpv] = pkg
11593 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11595 # At least the parent needs to exist for the lock file.
11596 portage.util.ensure_dirs(vdb_path)
11597 except portage.exception.PortageException:
11601 if os.access(vdb_path, os.W_OK):
11602 vdb_lock = portage.locks.lockdir(vdb_path)
11603 realsyslist = sets["system"].getAtoms()
11605 for x in realsyslist:
11606 mycp = portage.dep_getkey(x)
11607 if mycp in settings.getvirtuals():
11609 for provider in settings.getvirtuals()[mycp]:
11610 if vartree.dbapi.match(provider):
11611 providers.append(provider)
11612 if len(providers) == 1:
11613 syslist.extend(providers)
11615 syslist.append(mycp)
11617 mysettings = portage.config(clone=settings)
11619 if not unmerge_files:
11620 if unmerge_action == "unmerge":
11622 print bold("emerge unmerge") + " can only be used with specific package names"
11628 localtree = vartree
11629 # process all arguments and add all
11630 # valid db entries to candidate_catpkgs
11632 if not unmerge_files:
11633 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11635 #we've got command-line arguments
11636 if not unmerge_files:
11637 print "\nNo packages to unmerge have been provided.\n"
11639 for x in unmerge_files:
11640 arg_parts = x.split('/')
11641 if x[0] not in [".","/"] and \
11642 arg_parts[-1][-7:] != ".ebuild":
11643 #possible cat/pkg or dep; treat as such
11644 candidate_catpkgs.append(x)
11645 elif unmerge_action in ["prune","clean"]:
11646 print "\n!!! Prune and clean do not accept individual" + \
11647 " ebuilds as arguments;\n skipping.\n"
11650 # it appears that the user is specifying an installed
11651 # ebuild and we're in "unmerge" mode, so it's ok.
11652 if not os.path.exists(x):
11653 print "\n!!! The path '"+x+"' doesn't exist.\n"
11656 absx = os.path.abspath(x)
11657 sp_absx = absx.split("/")
11658 if sp_absx[-1][-7:] == ".ebuild":
11660 absx = "/".join(sp_absx)
11662 sp_absx_len = len(sp_absx)
11664 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11665 vdb_len = len(vdb_path)
11667 sp_vdb = vdb_path.split("/")
11668 sp_vdb_len = len(sp_vdb)
11670 if not os.path.exists(absx+"/CONTENTS"):
11671 print "!!! Not a valid db dir: "+str(absx)
11674 if sp_absx_len <= sp_vdb_len:
11675 # The Path is shorter... so it can't be inside the vdb.
11678 print "\n!!!",x,"cannot be inside "+ \
11679 vdb_path+"; aborting.\n"
11682 for idx in range(0,sp_vdb_len):
11683 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11686 print "\n!!!", x, "is not inside "+\
11687 vdb_path+"; aborting.\n"
11690 print "="+"/".join(sp_absx[sp_vdb_len:])
11691 candidate_catpkgs.append(
11692 "="+"/".join(sp_absx[sp_vdb_len:]))
11695 if (not "--quiet" in myopts):
11697 if settings["ROOT"] != "/":
11698 writemsg_level(darkgreen(newline+ \
11699 ">>> Using system located in ROOT tree %s\n" % \
11702 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11703 not ("--quiet" in myopts):
11704 writemsg_level(darkgreen(newline+\
11705 ">>> These are the packages that would be unmerged:\n"))
11707 # Preservation of order is required for --depclean and --prune so
11708 # that dependencies are respected. Use all_selected to eliminate
11709 # duplicate packages since the same package may be selected by
11712 all_selected = set()
11713 for x in candidate_catpkgs:
11714 # cycle through all our candidate deps and determine
11715 # what will and will not get unmerged
11717 mymatch = vartree.dbapi.match(x)
11718 except portage.exception.AmbiguousPackageName, errpkgs:
11719 print "\n\n!!! The short ebuild name \"" + \
11720 x + "\" is ambiguous. Please specify"
11721 print "!!! one of the following fully-qualified " + \
11722 "ebuild names instead:\n"
11723 for i in errpkgs[0]:
11724 print " " + green(i)
11728 if not mymatch and x[0] not in "<>=~":
11729 mymatch = localtree.dep_match(x)
11731 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11732 (x, unmerge_action), noiselevel=-1)
11736 {"protected": set(), "selected": set(), "omitted": set()})
11737 mykey = len(pkgmap) - 1
11738 if unmerge_action=="unmerge":
11740 if y not in all_selected:
11741 pkgmap[mykey]["selected"].add(y)
11742 all_selected.add(y)
11743 elif unmerge_action == "prune":
11744 if len(mymatch) == 1:
11746 best_version = mymatch[0]
11747 best_slot = vartree.getslot(best_version)
11748 best_counter = vartree.dbapi.cpv_counter(best_version)
11749 for mypkg in mymatch[1:]:
11750 myslot = vartree.getslot(mypkg)
11751 mycounter = vartree.dbapi.cpv_counter(mypkg)
11752 if (myslot == best_slot and mycounter > best_counter) or \
11753 mypkg == portage.best([mypkg, best_version]):
11754 if myslot == best_slot:
11755 if mycounter < best_counter:
11756 # On slot collision, keep the one with the
11757 # highest counter since it is the most
11758 # recently installed.
11760 best_version = mypkg
11762 best_counter = mycounter
11763 pkgmap[mykey]["protected"].add(best_version)
11764 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11765 if mypkg != best_version and mypkg not in all_selected)
11766 all_selected.update(pkgmap[mykey]["selected"])
11768 # unmerge_action == "clean"
11770 for mypkg in mymatch:
11771 if unmerge_action == "clean":
11772 myslot = localtree.getslot(mypkg)
11774 # since we're pruning, we don't care about slots
11775 # and put all the pkgs in together
11777 if myslot not in slotmap:
11778 slotmap[myslot] = {}
11779 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11781 for mypkg in vartree.dbapi.cp_list(
11782 portage.dep_getkey(mymatch[0])):
11783 myslot = vartree.getslot(mypkg)
11784 if myslot not in slotmap:
11785 slotmap[myslot] = {}
11786 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11788 for myslot in slotmap:
11789 counterkeys = slotmap[myslot].keys()
11790 if not counterkeys:
11793 pkgmap[mykey]["protected"].add(
11794 slotmap[myslot][counterkeys[-1]])
11795 del counterkeys[-1]
11797 for counter in counterkeys[:]:
11798 mypkg = slotmap[myslot][counter]
11799 if mypkg not in mymatch:
11800 counterkeys.remove(counter)
11801 pkgmap[mykey]["protected"].add(
11802 slotmap[myslot][counter])
11804 #be pretty and get them in order of merge:
11805 for ckey in counterkeys:
11806 mypkg = slotmap[myslot][ckey]
11807 if mypkg not in all_selected:
11808 pkgmap[mykey]["selected"].add(mypkg)
11809 all_selected.add(mypkg)
11810 # ok, now the last-merged package
11811 # is protected, and the rest are selected
11812 numselected = len(all_selected)
11813 if global_unmerge and not numselected:
11814 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11817 if not numselected:
11818 portage.writemsg_stdout(
11819 "\n>>> No packages selected for removal by " + \
11820 unmerge_action + "\n")
11824 vartree.dbapi.flush_cache()
11825 portage.locks.unlockdir(vdb_lock)
11827 from portage.sets.base import EditablePackageSet
11829 # generate a list of package sets that are directly or indirectly listed in "world",
11830 # as there is no persistent list of "installed" sets
11831 installed_sets = ["world"]
11836 pos = len(installed_sets)
11837 for s in installed_sets[pos - 1:]:
11840 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11843 installed_sets += candidates
11844 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11847 # we don't want to unmerge packages that are still listed in user-editable package sets
11848 # listed in "world" as they would be remerged on the next update of "world" or the
11849 # relevant package sets.
11850 unknown_sets = set()
11851 for cp in xrange(len(pkgmap)):
11852 for cpv in pkgmap[cp]["selected"].copy():
11856 # It could have been uninstalled
11857 # by a concurrent process.
11860 if unmerge_action != "clean" and \
11861 root_config.root == "/" and \
11862 portage.match_from_list(
11863 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11864 msg = ("Not unmerging package %s since there is no valid " + \
11865 "reason for portage to unmerge itself.") % (pkg.cpv,)
11866 for line in textwrap.wrap(msg, 75):
11868 # adjust pkgmap so the display output is correct
11869 pkgmap[cp]["selected"].remove(cpv)
11870 all_selected.remove(cpv)
11871 pkgmap[cp]["protected"].add(cpv)
11875 for s in installed_sets:
11876 # skip sets that the user requested to unmerge, and skip world
11877 # unless we're unmerging a package set (as the package would be
11878 # removed from "world" later on)
11879 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11883 if s in unknown_sets:
11885 unknown_sets.add(s)
11886 out = portage.output.EOutput()
11887 out.eerror(("Unknown set '@%s' in " + \
11888 "%svar/lib/portage/world_sets") % \
11889 (s, root_config.root))
11892 # only check instances of EditablePackageSet as other classes are generally used for
11893 # special purposes and can be ignored here (and are usually generated dynamically, so the
11894 # user can't do much about them anyway)
11895 if isinstance(sets[s], EditablePackageSet):
11897 # This is derived from a snippet of code in the
11898 # depgraph._iter_atoms_for_pkg() method.
11899 for atom in sets[s].iterAtomsForPackage(pkg):
11900 inst_matches = vartree.dbapi.match(atom)
11901 inst_matches.reverse() # descending order
11903 for inst_cpv in inst_matches:
11905 inst_pkg = _pkg(inst_cpv)
11907 # It could have been uninstalled
11908 # by a concurrent process.
11911 if inst_pkg.cp != atom.cp:
11913 if pkg >= inst_pkg:
11914 # This is descending order, and we're not
11915 # interested in any versions <= pkg given.
11917 if pkg.slot_atom != inst_pkg.slot_atom:
11918 higher_slot = inst_pkg
11920 if higher_slot is None:
11924 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11925 #print colorize("WARN", "but still listed in the following package sets:")
11926 #print " %s\n" % ", ".join(parents)
11927 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11928 print colorize("WARN", "still referenced by the following package sets:")
11929 print " %s\n" % ", ".join(parents)
11930 # adjust pkgmap so the display output is correct
11931 pkgmap[cp]["selected"].remove(cpv)
11932 all_selected.remove(cpv)
11933 pkgmap[cp]["protected"].add(cpv)
11937 numselected = len(all_selected)
11938 if not numselected:
11940 "\n>>> No packages selected for removal by " + \
11941 unmerge_action + "\n")
11944 # Unmerge order only matters in some cases
11948 selected = d["selected"]
11951 cp = portage.cpv_getkey(iter(selected).next())
11952 cp_dict = unordered.get(cp)
11953 if cp_dict is None:
11955 unordered[cp] = cp_dict
11958 for k, v in d.iteritems():
11959 cp_dict[k].update(v)
11960 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11962 for x in xrange(len(pkgmap)):
11963 selected = pkgmap[x]["selected"]
11966 for mytype, mylist in pkgmap[x].iteritems():
11967 if mytype == "selected":
11969 mylist.difference_update(all_selected)
11970 cp = portage.cpv_getkey(iter(selected).next())
11971 for y in localtree.dep_match(cp):
11972 if y not in pkgmap[x]["omitted"] and \
11973 y not in pkgmap[x]["selected"] and \
11974 y not in pkgmap[x]["protected"] and \
11975 y not in all_selected:
11976 pkgmap[x]["omitted"].add(y)
11977 if global_unmerge and not pkgmap[x]["selected"]:
11978 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11980 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11981 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11982 "'%s' is part of your system profile.\n" % cp),
11983 level=logging.WARNING, noiselevel=-1)
11984 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11985 "be damaging to your system.\n\n"),
11986 level=logging.WARNING, noiselevel=-1)
11987 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11988 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11989 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11991 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11993 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11994 for mytype in ["selected","protected","omitted"]:
11996 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11997 if pkgmap[x][mytype]:
11998 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11999 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12000 for pn, ver, rev in sorted_pkgs:
12004 myversion = ver + "-" + rev
12005 if mytype == "selected":
12007 colorize("UNMERGE_WARN", myversion + " "),
12011 colorize("GOOD", myversion + " "), noiselevel=-1)
12013 writemsg_level("none ", noiselevel=-1)
12015 writemsg_level("\n", noiselevel=-1)
12017 writemsg_level("\n", noiselevel=-1)
12019 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12020 " packages are slated for removal.\n")
12021 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12022 " and " + colorize("GOOD", "'omitted'") + \
12023 " packages will not be removed.\n\n")
12025 if "--pretend" in myopts:
12026 #we're done... return
12028 if "--ask" in myopts:
12029 if userquery("Would you like to unmerge these packages?")=="No":
12030 # enter pretend mode for correct formatting of results
12031 myopts["--pretend"] = True
12036 #the real unmerging begins, after a short delay....
12037 if clean_delay and not autoclean:
12038 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12040 for x in xrange(len(pkgmap)):
12041 for y in pkgmap[x]["selected"]:
12042 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12043 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12044 mysplit = y.split("/")
12046 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12047 mysettings, unmerge_action not in ["clean","prune"],
12048 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12049 scheduler=scheduler)
12051 if retval != os.EX_OK:
12052 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12054 raise UninstallFailure(retval)
12057 if clean_world and hasattr(sets["world"], "cleanPackage"):
12058 sets["world"].cleanPackage(vartree.dbapi, y)
12059 emergelog(xterm_titles, " >>> unmerge success: "+y)
12060 if clean_world and hasattr(sets["world"], "remove"):
12061 for s in root_config.setconfig.active:
12062 sets["world"].remove(SETPREFIX+s)
12065 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12067 if os.path.exists("/usr/bin/install-info"):
12068 out = portage.output.EOutput()
12073 inforoot=normpath(root+z)
12074 if os.path.isdir(inforoot):
12075 infomtime = long(os.stat(inforoot).st_mtime)
12076 if inforoot not in prev_mtimes or \
12077 prev_mtimes[inforoot] != infomtime:
12078 regen_infodirs.append(inforoot)
12080 if not regen_infodirs:
12081 portage.writemsg_stdout("\n")
12082 out.einfo("GNU info directory index is up-to-date.")
12084 portage.writemsg_stdout("\n")
12085 out.einfo("Regenerating GNU info directory index...")
12087 dir_extensions = ("", ".gz", ".bz2")
12091 for inforoot in regen_infodirs:
12095 if not os.path.isdir(inforoot) or \
12096 not os.access(inforoot, os.W_OK):
12099 file_list = os.listdir(inforoot)
12101 dir_file = os.path.join(inforoot, "dir")
12102 moved_old_dir = False
12103 processed_count = 0
12104 for x in file_list:
12105 if x.startswith(".") or \
12106 os.path.isdir(os.path.join(inforoot, x)):
12108 if x.startswith("dir"):
12110 for ext in dir_extensions:
12111 if x == "dir" + ext or \
12112 x == "dir" + ext + ".old":
12117 if processed_count == 0:
12118 for ext in dir_extensions:
12120 os.rename(dir_file + ext, dir_file + ext + ".old")
12121 moved_old_dir = True
12122 except EnvironmentError, e:
12123 if e.errno != errno.ENOENT:
12126 processed_count += 1
12127 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12128 existsstr="already exists, for file `"
12130 if re.search(existsstr,myso):
12131 # Already exists... Don't increment the count for this.
12133 elif myso[:44]=="install-info: warning: no info dir entry in ":
12134 # This info file doesn't contain a DIR-header: install-info produces this
12135 # (harmless) warning (the --quiet switch doesn't seem to work).
12136 # Don't increment the count for this.
12139 badcount=badcount+1
12140 errmsg += myso + "\n"
12143 if moved_old_dir and not os.path.exists(dir_file):
12144 # We didn't generate a new dir file, so put the old file
12145 # back where it was originally found.
12146 for ext in dir_extensions:
12148 os.rename(dir_file + ext + ".old", dir_file + ext)
12149 except EnvironmentError, e:
12150 if e.errno != errno.ENOENT:
12154 # Clean dir.old cruft so that they don't prevent
12155 # unmerge of otherwise empty directories.
12156 for ext in dir_extensions:
12158 os.unlink(dir_file + ext + ".old")
12159 except EnvironmentError, e:
12160 if e.errno != errno.ENOENT:
12164 #update mtime so we can potentially avoid regenerating.
12165 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12168 out.eerror("Processed %d info files; %d errors." % \
12169 (icount, badcount))
12170 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12173 out.einfo("Processed %d info files." % (icount,))
12176 def display_news_notification(root_config, myopts):
12177 target_root = root_config.root
12178 trees = root_config.trees
12179 settings = trees["vartree"].settings
12180 portdb = trees["porttree"].dbapi
12181 vardb = trees["vartree"].dbapi
12182 NEWS_PATH = os.path.join("metadata", "news")
12183 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12184 newsReaderDisplay = False
12185 update = "--pretend" not in myopts
12187 for repo in portdb.getRepositories():
12188 unreadItems = checkUpdatedNewsItems(
12189 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12191 if not newsReaderDisplay:
12192 newsReaderDisplay = True
12194 print colorize("WARN", " * IMPORTANT:"),
12195 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12198 if newsReaderDisplay:
12199 print colorize("WARN", " *"),
12200 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12203 def display_preserved_libs(vardbapi):
12206 # Ensure the registry is consistent with existing files.
12207 vardbapi.plib_registry.pruneNonExisting()
12209 if vardbapi.plib_registry.hasEntries():
12211 print colorize("WARN", "!!!") + " existing preserved libs:"
12212 plibdata = vardbapi.plib_registry.getPreservedLibs()
12213 linkmap = vardbapi.linkmap
12216 linkmap_broken = False
12220 except portage.exception.CommandNotFound, e:
12221 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12222 level=logging.ERROR, noiselevel=-1)
12224 linkmap_broken = True
12226 search_for_owners = set()
12227 for cpv in plibdata:
12228 internal_plib_keys = set(linkmap._obj_key(f) \
12229 for f in plibdata[cpv])
12230 for f in plibdata[cpv]:
12231 if f in consumer_map:
12234 for c in linkmap.findConsumers(f):
12235 # Filter out any consumers that are also preserved libs
12236 # belonging to the same package as the provider.
12237 if linkmap._obj_key(c) not in internal_plib_keys:
12238 consumers.append(c)
12240 consumer_map[f] = consumers
12241 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12243 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12245 for cpv in plibdata:
12246 print colorize("WARN", ">>>") + " package: %s" % cpv
12248 for f in plibdata[cpv]:
12249 obj_key = linkmap._obj_key(f)
12250 alt_paths = samefile_map.get(obj_key)
12251 if alt_paths is None:
12253 samefile_map[obj_key] = alt_paths
12256 for alt_paths in samefile_map.itervalues():
12257 alt_paths = sorted(alt_paths)
12258 for p in alt_paths:
12259 print colorize("WARN", " * ") + " - %s" % (p,)
12261 consumers = consumer_map.get(f, [])
12262 for c in consumers[:MAX_DISPLAY]:
12263 print colorize("WARN", " * ") + " used by %s (%s)" % \
12264 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12265 if len(consumers) == MAX_DISPLAY + 1:
12266 print colorize("WARN", " * ") + " used by %s (%s)" % \
12267 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12268 for x in owners.get(consumers[MAX_DISPLAY], [])))
12269 elif len(consumers) > MAX_DISPLAY:
12270 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12271 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12274 def _flush_elog_mod_echo():
12276 Dump the mod_echo output now so that our other
12277 notifications are shown last.
12279 @returns: True if messages were shown, False otherwise.
12281 messages_shown = False
12283 from portage.elog import mod_echo
12284 except ImportError:
12285 pass # happens during downgrade to a version without the module
12287 messages_shown = bool(mod_echo._items)
12288 mod_echo.finalize()
12289 return messages_shown
12291 def post_emerge(root_config, myopts, mtimedb, retval):
12293 Misc. things to run at the end of a merge session.
12296 Update Config Files
12299 Display preserved libs warnings
12302 @param trees: A dictionary mapping each ROOT to it's package databases
12304 @param mtimedb: The mtimeDB to store data needed across merge invocations
12305 @type mtimedb: MtimeDB class instance
12306 @param retval: Emerge's return value
12310 1. Calls sys.exit(retval)
12313 target_root = root_config.root
12314 trees = { target_root : root_config.trees }
12315 vardbapi = trees[target_root]["vartree"].dbapi
12316 settings = vardbapi.settings
12317 info_mtimes = mtimedb["info"]
12319 # Load the most current variables from ${ROOT}/etc/profile.env
12322 settings.regenerate()
12325 config_protect = settings.get("CONFIG_PROTECT","").split()
12326 infodirs = settings.get("INFOPATH","").split(":") + \
12327 settings.get("INFODIR","").split(":")
12331 if retval == os.EX_OK:
12332 exit_msg = " *** exiting successfully."
12334 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12335 emergelog("notitles" not in settings.features, exit_msg)
12337 _flush_elog_mod_echo()
12339 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12340 if "--pretend" in myopts or (counter_hash is not None and \
12341 counter_hash == vardbapi._counter_hash()):
12342 display_news_notification(root_config, myopts)
12343 # If vdb state has not changed then there's nothing else to do.
12346 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12347 portage.util.ensure_dirs(vdb_path)
12349 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12350 vdb_lock = portage.locks.lockdir(vdb_path)
12354 if "noinfo" not in settings.features:
12355 chk_updated_info_files(target_root,
12356 infodirs, info_mtimes, retval)
12360 portage.locks.unlockdir(vdb_lock)
12362 chk_updated_cfg_files(target_root, config_protect)
12364 display_news_notification(root_config, myopts)
12365 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12366 display_preserved_libs(vardbapi)
12371 def chk_updated_cfg_files(target_root, config_protect):
12373 #number of directories with some protect files in them
12375 for x in config_protect:
12376 x = os.path.join(target_root, x.lstrip(os.path.sep))
12377 if not os.access(x, os.W_OK):
12378 # Avoid Permission denied errors generated
12382 mymode = os.lstat(x).st_mode
12385 if stat.S_ISLNK(mymode):
12386 # We want to treat it like a directory if it
12387 # is a symlink to an existing directory.
12389 real_mode = os.stat(x).st_mode
12390 if stat.S_ISDIR(real_mode):
12394 if stat.S_ISDIR(mymode):
12395 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12397 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12398 os.path.split(x.rstrip(os.path.sep))
12399 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12400 a = commands.getstatusoutput(mycommand)
12402 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12404 # Show the error message alone, sending stdout to /dev/null.
12405 os.system(mycommand + " 1>/dev/null")
12407 files = a[1].split('\0')
12408 # split always produces an empty string as the last element
12409 if files and not files[-1]:
12413 print "\n"+colorize("WARN", " * IMPORTANT:"),
12414 if stat.S_ISDIR(mymode):
12415 print "%d config files in '%s' need updating." % \
12418 print "config file '%s' needs updating." % x
12421 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12422 " section of the " + bold("emerge")
12423 print " "+yellow("*")+" man page to learn how to update config files."
12425 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12428 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12429 Returns the number of unread (yet relevent) items.
12431 @param portdb: a portage tree database
12432 @type portdb: pordbapi
12433 @param vardb: an installed package database
12434 @type vardb: vardbapi
12437 @param UNREAD_PATH:
12443 1. The number of unread but relevant news items.
12446 from portage.news import NewsManager
12447 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12448 return manager.getUnreadItems( repo_id, update=update )
12450 def insert_category_into_atom(atom, category):
12451 alphanum = re.search(r'\w', atom)
12453 ret = atom[:alphanum.start()] + "%s/" % category + \
12454 atom[alphanum.start():]
12459 def is_valid_package_atom(x):
12461 alphanum = re.search(r'\w', x)
12463 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12464 return portage.isvalidatom(x)
12466 def show_blocker_docs_link():
12468 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12469 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12471 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12474 def show_mask_docs():
12475 print "For more information, see the MASKED PACKAGES section in the emerge"
12476 print "man page or refer to the Gentoo Handbook."
12478 def action_sync(settings, trees, mtimedb, myopts, myaction):
12479 xterm_titles = "notitles" not in settings.features
12480 emergelog(xterm_titles, " === sync")
12481 myportdir = settings.get("PORTDIR", None)
12482 out = portage.output.EOutput()
12484 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12486 if myportdir[-1]=="/":
12487 myportdir=myportdir[:-1]
12489 st = os.stat(myportdir)
12493 print ">>>",myportdir,"not found, creating it."
12494 os.makedirs(myportdir,0755)
12495 st = os.stat(myportdir)
12498 spawn_kwargs["env"] = settings.environ()
12499 if 'usersync' in settings.features and \
12500 portage.data.secpass >= 2 and \
12501 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12502 st.st_gid != os.getgid() and st.st_mode & 0070):
12504 homedir = pwd.getpwuid(st.st_uid).pw_dir
12508 # Drop privileges when syncing, in order to match
12509 # existing uid/gid settings.
12510 spawn_kwargs["uid"] = st.st_uid
12511 spawn_kwargs["gid"] = st.st_gid
12512 spawn_kwargs["groups"] = [st.st_gid]
12513 spawn_kwargs["env"]["HOME"] = homedir
12515 if not st.st_mode & 0020:
12516 umask = umask | 0020
12517 spawn_kwargs["umask"] = umask
12519 syncuri = settings.get("SYNC", "").strip()
12521 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12522 noiselevel=-1, level=logging.ERROR)
12525 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12526 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12529 dosyncuri = syncuri
12530 updatecache_flg = False
12531 if myaction == "metadata":
12532 print "skipping sync"
12533 updatecache_flg = True
12534 elif ".git" in vcs_dirs:
12535 # Update existing git repository, and ignore the syncuri. We are
12536 # going to trust the user and assume that the user is in the branch
12537 # that he/she wants updated. We'll let the user manage branches with
12539 if portage.process.find_binary("git") is None:
12540 msg = ["Command not found: git",
12541 "Type \"emerge dev-util/git\" to enable git support."]
12543 writemsg_level("!!! %s\n" % l,
12544 level=logging.ERROR, noiselevel=-1)
12546 msg = ">>> Starting git pull in %s..." % myportdir
12547 emergelog(xterm_titles, msg )
12548 writemsg_level(msg + "\n")
12549 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12550 (portage._shell_quote(myportdir),), **spawn_kwargs)
12551 if exitcode != os.EX_OK:
12552 msg = "!!! git pull error in %s." % myportdir
12553 emergelog(xterm_titles, msg)
12554 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12556 msg = ">>> Git pull in %s successful" % myportdir
12557 emergelog(xterm_titles, msg)
12558 writemsg_level(msg + "\n")
12559 exitcode = git_sync_timestamps(settings, myportdir)
12560 if exitcode == os.EX_OK:
12561 updatecache_flg = True
12562 elif syncuri[:8]=="rsync://":
12563 for vcs_dir in vcs_dirs:
12564 writemsg_level(("!!! %s appears to be under revision " + \
12565 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12566 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12568 if not os.path.exists("/usr/bin/rsync"):
12569 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12570 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12575 if settings["PORTAGE_RSYNC_OPTS"] == "":
12576 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12577 rsync_opts.extend([
12578 "--recursive", # Recurse directories
12579 "--links", # Consider symlinks
12580 "--safe-links", # Ignore links outside of tree
12581 "--perms", # Preserve permissions
12582 "--times", # Preserive mod times
12583 "--compress", # Compress the data transmitted
12584 "--force", # Force deletion on non-empty dirs
12585 "--whole-file", # Don't do block transfers, only entire files
12586 "--delete", # Delete files that aren't in the master tree
12587 "--stats", # Show final statistics about what was transfered
12588 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12589 "--exclude=/distfiles", # Exclude distfiles from consideration
12590 "--exclude=/local", # Exclude local from consideration
12591 "--exclude=/packages", # Exclude packages from consideration
12595 # The below validation is not needed when using the above hardcoded
12598 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12600 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12601 for opt in ("--recursive", "--times"):
12602 if opt not in rsync_opts:
12603 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12604 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12605 rsync_opts.append(opt)
12607 for exclude in ("distfiles", "local", "packages"):
12608 opt = "--exclude=/%s" % exclude
12609 if opt not in rsync_opts:
12610 portage.writemsg(yellow("WARNING:") + \
12611 " adding required option %s not included in " % opt + \
12612 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12613 rsync_opts.append(opt)
12615 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12616 def rsync_opt_startswith(opt_prefix):
12617 for x in rsync_opts:
12618 if x.startswith(opt_prefix):
12622 if not rsync_opt_startswith("--timeout="):
12623 rsync_opts.append("--timeout=%d" % mytimeout)
12625 for opt in ("--compress", "--whole-file"):
12626 if opt not in rsync_opts:
12627 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12628 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12629 rsync_opts.append(opt)
12631 if "--quiet" in myopts:
12632 rsync_opts.append("--quiet") # Shut up a lot
12634 rsync_opts.append("--verbose") # Print filelist
12636 if "--verbose" in myopts:
12637 rsync_opts.append("--progress") # Progress meter for each file
12639 if "--debug" in myopts:
12640 rsync_opts.append("--checksum") # Force checksum on all files
12642 # Real local timestamp file.
12643 servertimestampfile = os.path.join(
12644 myportdir, "metadata", "timestamp.chk")
12646 content = portage.util.grabfile(servertimestampfile)
12650 mytimestamp = time.mktime(time.strptime(content[0],
12651 "%a, %d %b %Y %H:%M:%S +0000"))
12652 except (OverflowError, ValueError):
12657 rsync_initial_timeout = \
12658 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12660 rsync_initial_timeout = 15
12663 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12664 except SystemExit, e:
12665 raise # Needed else can't exit
12667 maxretries=3 #default number of retries
12670 user_name, hostname, port = re.split(
12671 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12674 if user_name is None:
12676 updatecache_flg=True
12677 all_rsync_opts = set(rsync_opts)
12678 extra_rsync_opts = shlex.split(
12679 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12680 all_rsync_opts.update(extra_rsync_opts)
12681 family = socket.AF_INET
12682 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12683 family = socket.AF_INET
12684 elif socket.has_ipv6 and \
12685 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12686 family = socket.AF_INET6
12688 SERVER_OUT_OF_DATE = -1
12689 EXCEEDED_MAX_RETRIES = -2
12695 for addrinfo in socket.getaddrinfo(
12696 hostname, None, family, socket.SOCK_STREAM):
12697 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12698 # IPv6 addresses need to be enclosed in square brackets
12699 ips.append("[%s]" % addrinfo[4][0])
12701 ips.append(addrinfo[4][0])
12702 from random import shuffle
12704 except SystemExit, e:
12705 raise # Needed else can't exit
12706 except Exception, e:
12707 print "Notice:",str(e)
12712 dosyncuri = syncuri.replace(
12713 "//" + user_name + hostname + port + "/",
12714 "//" + user_name + ips[0] + port + "/", 1)
12715 except SystemExit, e:
12716 raise # Needed else can't exit
12717 except Exception, e:
12718 print "Notice:",str(e)
12722 if "--ask" in myopts:
12723 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12728 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12729 if "--quiet" not in myopts:
12730 print ">>> Starting rsync with "+dosyncuri+"..."
12732 emergelog(xterm_titles,
12733 ">>> Starting retry %d of %d with %s" % \
12734 (retries,maxretries,dosyncuri))
12735 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12737 if mytimestamp != 0 and "--quiet" not in myopts:
12738 print ">>> Checking server timestamp ..."
12740 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12742 if "--debug" in myopts:
12745 exitcode = os.EX_OK
12746 servertimestamp = 0
12747 # Even if there's no timestamp available locally, fetch the
12748 # timestamp anyway as an initial probe to verify that the server is
12749 # responsive. This protects us from hanging indefinitely on a
12750 # connection attempt to an unresponsive server which rsync's
12751 # --timeout option does not prevent.
12753 # Temporary file for remote server timestamp comparison.
12754 from tempfile import mkstemp
12755 fd, tmpservertimestampfile = mkstemp()
12757 mycommand = rsynccommand[:]
12758 mycommand.append(dosyncuri.rstrip("/") + \
12759 "/metadata/timestamp.chk")
12760 mycommand.append(tmpservertimestampfile)
12764 def timeout_handler(signum, frame):
12765 raise portage.exception.PortageException("timed out")
12766 signal.signal(signal.SIGALRM, timeout_handler)
12767 # Timeout here in case the server is unresponsive. The
12768 # --timeout rsync option doesn't apply to the initial
12769 # connection attempt.
12770 if rsync_initial_timeout:
12771 signal.alarm(rsync_initial_timeout)
12773 mypids.extend(portage.process.spawn(
12774 mycommand, env=settings.environ(), returnpid=True))
12775 exitcode = os.waitpid(mypids[0], 0)[1]
12776 content = portage.grabfile(tmpservertimestampfile)
12778 if rsync_initial_timeout:
12781 os.unlink(tmpservertimestampfile)
12784 except portage.exception.PortageException, e:
12788 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12789 os.kill(mypids[0], signal.SIGTERM)
12790 os.waitpid(mypids[0], 0)
12791 # This is the same code rsync uses for timeout.
12794 if exitcode != os.EX_OK:
12795 if exitcode & 0xff:
12796 exitcode = (exitcode & 0xff) << 8
12798 exitcode = exitcode >> 8
12800 portage.process.spawned_pids.remove(mypids[0])
12803 servertimestamp = time.mktime(time.strptime(
12804 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12805 except (OverflowError, ValueError):
12807 del mycommand, mypids, content
12808 if exitcode == os.EX_OK:
12809 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12810 emergelog(xterm_titles,
12811 ">>> Cancelling sync -- Already current.")
12814 print ">>> Timestamps on the server and in the local repository are the same."
12815 print ">>> Cancelling all further sync action. You are already up to date."
12817 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12821 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12822 emergelog(xterm_titles,
12823 ">>> Server out of date: %s" % dosyncuri)
12826 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12828 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12831 exitcode = SERVER_OUT_OF_DATE
12832 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12834 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12835 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12836 if exitcode in [0,1,3,4,11,14,20,21]:
12838 elif exitcode in [1,3,4,11,14,20,21]:
12841 # Code 2 indicates protocol incompatibility, which is expected
12842 # for servers with protocol < 29 that don't support
12843 # --prune-empty-directories. Retry for a server that supports
12844 # at least rsync protocol version 29 (>=rsync-2.6.4).
12849 if retries<=maxretries:
12850 print ">>> Retrying..."
12855 updatecache_flg=False
12856 exitcode = EXCEEDED_MAX_RETRIES
12860 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12861 elif exitcode == SERVER_OUT_OF_DATE:
12863 elif exitcode == EXCEEDED_MAX_RETRIES:
12865 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12870 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12871 msg.append("that your SYNC statement is proper.")
12872 msg.append("SYNC=" + settings["SYNC"])
12874 msg.append("Rsync has reported that there is a File IO error. Normally")
12875 msg.append("this means your disk is full, but can be caused by corruption")
12876 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12877 msg.append("and try again after the problem has been fixed.")
12878 msg.append("PORTDIR=" + settings["PORTDIR"])
12880 msg.append("Rsync was killed before it finished.")
12882 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12883 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12884 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12885 msg.append("temporary problem unless complications exist with your network")
12886 msg.append("(and possibly your system's filesystem) configuration.")
12890 elif syncuri[:6]=="cvs://":
12891 if not os.path.exists("/usr/bin/cvs"):
12892 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12893 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12895 cvsroot=syncuri[6:]
12896 cvsdir=os.path.dirname(myportdir)
12897 if not os.path.exists(myportdir+"/CVS"):
12899 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12900 if os.path.exists(cvsdir+"/gentoo-x86"):
12901 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12904 os.rmdir(myportdir)
12906 if e.errno != errno.ENOENT:
12908 "!!! existing '%s' directory; exiting.\n" % myportdir)
12911 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12912 print "!!! cvs checkout error; exiting."
12914 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12917 print ">>> Starting cvs update with "+syncuri+"..."
12918 retval = portage.process.spawn_bash(
12919 "cd %s; cvs -z0 -q update -dP" % \
12920 (portage._shell_quote(myportdir),), **spawn_kwargs)
12921 if retval != os.EX_OK:
12923 dosyncuri = syncuri
12925 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12926 noiselevel=-1, level=logging.ERROR)
12929 if updatecache_flg and \
12930 myaction != "metadata" and \
12931 "metadata-transfer" not in settings.features:
12932 updatecache_flg = False
12934 # Reload the whole config from scratch.
12935 settings, trees, mtimedb = load_emerge_config(trees=trees)
12936 root_config = trees[settings["ROOT"]]["root_config"]
12937 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12939 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12940 action_metadata(settings, portdb, myopts)
12942 if portage._global_updates(trees, mtimedb["updates"]):
12944 # Reload the whole config from scratch.
12945 settings, trees, mtimedb = load_emerge_config(trees=trees)
12946 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12947 root_config = trees[settings["ROOT"]]["root_config"]
12949 mybestpv = portdb.xmatch("bestmatch-visible",
12950 portage.const.PORTAGE_PACKAGE_ATOM)
12951 mypvs = portage.best(
12952 trees[settings["ROOT"]]["vartree"].dbapi.match(
12953 portage.const.PORTAGE_PACKAGE_ATOM))
12955 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12957 if myaction != "metadata":
12958 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12959 retval = portage.process.spawn(
12960 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12961 dosyncuri], env=settings.environ())
12962 if retval != os.EX_OK:
12963 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12965 if(mybestpv != mypvs) and not "--quiet" in myopts:
12967 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12968 print red(" * ")+"that you update portage now, before any other packages are updated."
12970 print red(" * ")+"To update portage, run 'emerge portage' now."
12973 display_news_notification(root_config, myopts)
12976 def git_sync_timestamps(settings, portdir):
12978 Since git doesn't preserve timestamps, synchronize timestamps between
12979 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12980 for a given file as long as the file in the working tree is not modified
12981 (relative to HEAD).
12983 cache_dir = os.path.join(portdir, "metadata", "cache")
12984 if not os.path.isdir(cache_dir):
12986 writemsg_level(">>> Synchronizing timestamps...\n")
12988 from portage.cache.cache_errors import CacheError
12990 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12991 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12992 except CacheError, e:
12993 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12994 level=logging.ERROR, noiselevel=-1)
12997 ec_dir = os.path.join(portdir, "eclass")
12999 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13000 if f.endswith(".eclass"))
13002 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13003 level=logging.ERROR, noiselevel=-1)
13006 args = [portage.const.BASH_BINARY, "-c",
13007 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13008 portage._shell_quote(portdir)]
13010 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13011 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13013 if rval != os.EX_OK:
13016 modified_eclasses = set(ec for ec in ec_names \
13017 if os.path.join("eclass", ec + ".eclass") in modified_files)
13019 updated_ec_mtimes = {}
13021 for cpv in cache_db:
13022 cpv_split = portage.catpkgsplit(cpv)
13023 if cpv_split is None:
13024 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13025 level=logging.ERROR, noiselevel=-1)
13028 cat, pn, ver, rev = cpv_split
13029 cat, pf = portage.catsplit(cpv)
13030 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13031 if relative_eb_path in modified_files:
13035 cache_entry = cache_db[cpv]
13036 eb_mtime = cache_entry.get("_mtime_")
13037 ec_mtimes = cache_entry.get("_eclasses_")
13039 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13040 level=logging.ERROR, noiselevel=-1)
13042 except CacheError, e:
13043 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13044 (cpv, e), level=logging.ERROR, noiselevel=-1)
13047 if eb_mtime is None:
13048 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13049 level=logging.ERROR, noiselevel=-1)
13053 eb_mtime = long(eb_mtime)
13055 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13056 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13059 if ec_mtimes is None:
13060 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13061 level=logging.ERROR, noiselevel=-1)
13064 if modified_eclasses.intersection(ec_mtimes):
13067 missing_eclasses = set(ec_mtimes).difference(ec_names)
13068 if missing_eclasses:
13069 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13070 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13074 eb_path = os.path.join(portdir, relative_eb_path)
13076 current_eb_mtime = os.stat(eb_path)
13078 writemsg_level("!!! Missing ebuild: %s\n" % \
13079 (cpv,), level=logging.ERROR, noiselevel=-1)
13082 inconsistent = False
13083 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13084 updated_mtime = updated_ec_mtimes.get(ec)
13085 if updated_mtime is not None and updated_mtime != ec_mtime:
13086 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13087 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13088 inconsistent = True
13094 if current_eb_mtime != eb_mtime:
13095 os.utime(eb_path, (eb_mtime, eb_mtime))
13097 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13098 if ec in updated_ec_mtimes:
13100 ec_path = os.path.join(ec_dir, ec + ".eclass")
13101 current_mtime = long(os.stat(ec_path).st_mtime)
13102 if current_mtime != ec_mtime:
13103 os.utime(ec_path, (ec_mtime, ec_mtime))
13104 updated_ec_mtimes[ec] = ec_mtime
13108 def action_metadata(settings, portdb, myopts):
13109 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13110 old_umask = os.umask(0002)
13111 cachedir = os.path.normpath(settings.depcachedir)
13112 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13113 "/lib", "/opt", "/proc", "/root", "/sbin",
13114 "/sys", "/tmp", "/usr", "/var"]:
13115 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13116 "ROOT DIRECTORY ON YOUR SYSTEM."
13117 print >> sys.stderr, \
13118 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13120 if not os.path.exists(cachedir):
13123 ec = portage.eclass_cache.cache(portdb.porttree_root)
13124 myportdir = os.path.realpath(settings["PORTDIR"])
13125 cm = settings.load_best_module("portdbapi.metadbmodule")(
13126 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13128 from portage.cache import util
13130 class percentage_noise_maker(util.quiet_mirroring):
13131 def __init__(self, dbapi):
13133 self.cp_all = dbapi.cp_all()
13134 l = len(self.cp_all)
13135 self.call_update_min = 100000000
13136 self.min_cp_all = l/100.0
13140 def __iter__(self):
13141 for x in self.cp_all:
13143 if self.count > self.min_cp_all:
13144 self.call_update_min = 0
13146 for y in self.dbapi.cp_list(x):
13148 self.call_update_mine = 0
13150 def update(self, *arg):
13151 try: self.pstr = int(self.pstr) + 1
13152 except ValueError: self.pstr = 1
13153 sys.stdout.write("%s%i%%" % \
13154 ("\b" * (len(str(self.pstr))+1), self.pstr))
13156 self.call_update_min = 10000000
13158 def finish(self, *arg):
13159 sys.stdout.write("\b\b\b\b100%\n")
13162 if "--quiet" in myopts:
13163 def quicky_cpv_generator(cp_all_list):
13164 for x in cp_all_list:
13165 for y in portdb.cp_list(x):
13167 source = quicky_cpv_generator(portdb.cp_all())
13168 noise_maker = portage.cache.util.quiet_mirroring()
13170 noise_maker = source = percentage_noise_maker(portdb)
13171 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13172 eclass_cache=ec, verbose_instance=noise_maker)
13175 os.umask(old_umask)
13177 def action_regen(settings, portdb, max_jobs, max_load):
13178 xterm_titles = "notitles" not in settings.features
13179 emergelog(xterm_titles, " === regen")
13180 #regenerate cache entries
13181 portage.writemsg_stdout("Regenerating cache entries...\n")
13183 os.close(sys.stdin.fileno())
13184 except SystemExit, e:
13185 raise # Needed else can't exit
13190 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13193 portage.writemsg_stdout("done!\n")
13194 return regen.returncode
13196 def action_config(settings, trees, myopts, myfiles):
13197 if len(myfiles) != 1:
13198 print red("!!! config can only take a single package atom at this time\n")
13200 if not is_valid_package_atom(myfiles[0]):
13201 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13203 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13204 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13208 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13209 except portage.exception.AmbiguousPackageName, e:
13210 # Multiple matches thrown from cpv_expand
13213 print "No packages found.\n"
13215 elif len(pkgs) > 1:
13216 if "--ask" in myopts:
13218 print "Please select a package to configure:"
13222 options.append(str(idx))
13223 print options[-1]+") "+pkg
13225 options.append("X")
13226 idx = userquery("Selection?", options)
13229 pkg = pkgs[int(idx)-1]
13231 print "The following packages available:"
13234 print "\nPlease use a specific atom or the --ask option."
13240 if "--ask" in myopts:
13241 if userquery("Ready to configure "+pkg+"?") == "No":
13244 print "Configuring pkg..."
13246 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13247 mysettings = portage.config(clone=settings)
13248 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13249 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13250 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13252 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13253 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13254 if retval == os.EX_OK:
13255 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13256 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13259 def action_info(settings, trees, myopts, myfiles):
13260 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13261 settings.profile_path, settings["CHOST"],
13262 trees[settings["ROOT"]]["vartree"].dbapi)
13264 header_title = "System Settings"
13266 print header_width * "="
13267 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13268 print header_width * "="
13269 print "System uname: "+platform.platform(aliased=1)
13271 lastSync = portage.grabfile(os.path.join(
13272 settings["PORTDIR"], "metadata", "timestamp.chk"))
13273 print "Timestamp of tree:",
13279 output=commands.getstatusoutput("distcc --version")
13281 print str(output[1].split("\n",1)[0]),
13282 if "distcc" in settings.features:
13287 output=commands.getstatusoutput("ccache -V")
13289 print str(output[1].split("\n",1)[0]),
13290 if "ccache" in settings.features:
13295 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13296 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13297 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13298 myvars = portage.util.unique_array(myvars)
13302 if portage.isvalidatom(x):
13303 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13304 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13305 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13307 for pn, ver, rev in pkg_matches:
13309 pkgs.append(ver + "-" + rev)
13313 pkgs = ", ".join(pkgs)
13314 print "%-20s %s" % (x+":", pkgs)
13316 print "%-20s %s" % (x+":", "[NOT VALID]")
13318 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13320 if "--verbose" in myopts:
13321 myvars=settings.keys()
13323 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13324 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13325 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13326 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13328 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13330 myvars = portage.util.unique_array(myvars)
13336 print '%s="%s"' % (x, settings[x])
13338 use = set(settings["USE"].split())
13339 use_expand = settings["USE_EXPAND"].split()
13341 for varname in use_expand:
13342 flag_prefix = varname.lower() + "_"
13343 for f in list(use):
13344 if f.startswith(flag_prefix):
13348 print 'USE="%s"' % " ".join(use),
13349 for varname in use_expand:
13350 myval = settings.get(varname)
13352 print '%s="%s"' % (varname, myval),
13355 unset_vars.append(x)
13357 print "Unset: "+", ".join(unset_vars)
13360 if "--debug" in myopts:
13361 for x in dir(portage):
13362 module = getattr(portage, x)
13363 if "cvs_id_string" in dir(module):
13364 print "%s: %s" % (str(x), str(module.cvs_id_string))
13366 # See if we can find any packages installed matching the strings
13367 # passed on the command line
13369 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13370 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13372 mypkgs.extend(vardb.match(x))
13374 # If some packages were found...
13376 # Get our global settings (we only print stuff if it varies from
13377 # the current config)
13378 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13379 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13381 pkgsettings = portage.config(clone=settings)
13383 for myvar in mydesiredvars:
13384 global_vals[myvar] = set(settings.get(myvar, "").split())
13386 # Loop through each package
13387 # Only print settings if they differ from global settings
13388 header_title = "Package Settings"
13389 print header_width * "="
13390 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13391 print header_width * "="
13392 from portage.output import EOutput
13395 # Get all package specific variables
13396 auxvalues = vardb.aux_get(pkg, auxkeys)
13398 for i in xrange(len(auxkeys)):
13399 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13401 for myvar in mydesiredvars:
13402 # If the package variable doesn't match the
13403 # current global variable, something has changed
13404 # so set diff_found so we know to print
13405 if valuesmap[myvar] != global_vals[myvar]:
13406 diff_values[myvar] = valuesmap[myvar]
13407 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13408 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13409 pkgsettings.reset()
13410 # If a matching ebuild is no longer available in the tree, maybe it
13411 # would make sense to compare against the flags for the best
13412 # available version with the same slot?
13414 if portdb.cpv_exists(pkg):
13416 pkgsettings.setcpv(pkg, mydb=mydb)
13417 if valuesmap["IUSE"].intersection(
13418 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13419 diff_values["USE"] = valuesmap["USE"]
13420 # If a difference was found, print the info for
13423 # Print package info
13424 print "%s was built with the following:" % pkg
13425 for myvar in mydesiredvars + ["USE"]:
13426 if myvar in diff_values:
13427 mylist = list(diff_values[myvar])
13429 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13431 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13432 ebuildpath = vardb.findname(pkg)
13433 if not ebuildpath or not os.path.exists(ebuildpath):
13434 out.ewarn("No ebuild found for '%s'" % pkg)
13436 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13437 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13438 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13441 def action_search(root_config, myopts, myfiles, spinner):
13443 print "emerge: no search terms provided."
13445 searchinstance = search(root_config,
13446 spinner, "--searchdesc" in myopts,
13447 "--quiet" not in myopts, "--usepkg" in myopts,
13448 "--usepkgonly" in myopts)
13449 for mysearch in myfiles:
13451 searchinstance.execute(mysearch)
13452 except re.error, comment:
13453 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13455 searchinstance.output()
13457 def action_depclean(settings, trees, ldpath_mtimes,
13458 myopts, action, myfiles, spinner):
13459 # Kill packages that aren't explicitly merged or are required as a
13460 # dependency of another package. World file is explicit.
13462 # Global depclean or prune operations are not very safe when there are
13463 # missing dependencies since it's unknown how badly incomplete
13464 # the dependency graph is, and we might accidentally remove packages
13465 # that should have been pulled into the graph. On the other hand, it's
13466 # relatively safe to ignore missing deps when only asked to remove
13467 # specific packages.
13468 allow_missing_deps = len(myfiles) > 0
13471 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13472 msg.append("mistakes. Packages that are part of the world set will always\n")
13473 msg.append("be kept. They can be manually added to this set with\n")
13474 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13475 msg.append("package.provided (see portage(5)) will be removed by\n")
13476 msg.append("depclean, even if they are part of the world set.\n")
13478 msg.append("As a safety measure, depclean will not remove any packages\n")
13479 msg.append("unless *all* required dependencies have been resolved. As a\n")
13480 msg.append("consequence, it is often necessary to run %s\n" % \
13481 good("`emerge --update"))
13482 msg.append(good("--newuse --deep @system @world`") + \
13483 " prior to depclean.\n")
13485 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13486 portage.writemsg_stdout("\n")
13488 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13490 xterm_titles = "notitles" not in settings.features
13491 myroot = settings["ROOT"]
13492 root_config = trees[myroot]["root_config"]
13493 getSetAtoms = root_config.setconfig.getSetAtoms
13494 vardb = trees[myroot]["vartree"].dbapi
13496 required_set_names = ("system", "world")
13500 for s in required_set_names:
13501 required_sets[s] = InternalPackageSet(
13502 initial_atoms=getSetAtoms(s))
13505 # When removing packages, use a temporary version of world
13506 # which excludes packages that are intended to be eligible for
13508 world_temp_set = required_sets["world"]
13509 system_set = required_sets["system"]
13511 if not system_set or not world_temp_set:
13514 writemsg_level("!!! You have no system list.\n",
13515 level=logging.ERROR, noiselevel=-1)
13517 if not world_temp_set:
13518 writemsg_level("!!! You have no world file.\n",
13519 level=logging.WARNING, noiselevel=-1)
13521 writemsg_level("!!! Proceeding is likely to " + \
13522 "break your installation.\n",
13523 level=logging.WARNING, noiselevel=-1)
13524 if "--pretend" not in myopts:
13525 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13527 if action == "depclean":
13528 emergelog(xterm_titles, " >>> depclean")
13531 args_set = InternalPackageSet()
13534 if not is_valid_package_atom(x):
13535 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13536 level=logging.ERROR, noiselevel=-1)
13537 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13540 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13541 except portage.exception.AmbiguousPackageName, e:
13542 msg = "The short ebuild name \"" + x + \
13543 "\" is ambiguous. Please specify " + \
13544 "one of the following " + \
13545 "fully-qualified ebuild names instead:"
13546 for line in textwrap.wrap(msg, 70):
13547 writemsg_level("!!! %s\n" % (line,),
13548 level=logging.ERROR, noiselevel=-1)
13550 writemsg_level(" %s\n" % colorize("INFORM", i),
13551 level=logging.ERROR, noiselevel=-1)
13552 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13555 matched_packages = False
13558 matched_packages = True
13560 if not matched_packages:
13561 writemsg_level(">>> No packages selected for removal by %s\n" % \
13565 writemsg_level("\nCalculating dependencies ")
13566 resolver_params = create_depgraph_params(myopts, "remove")
13567 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13568 vardb = resolver.trees[myroot]["vartree"].dbapi
13570 if action == "depclean":
13573 # Pull in everything that's installed but not matched
13574 # by an argument atom since we don't want to clean any
13575 # package if something depends on it.
13577 world_temp_set.clear()
13582 if args_set.findAtomForPackage(pkg) is None:
13583 world_temp_set.add("=" + pkg.cpv)
13585 except portage.exception.InvalidDependString, e:
13586 show_invalid_depstring_notice(pkg,
13587 pkg.metadata["PROVIDE"], str(e))
13589 world_temp_set.add("=" + pkg.cpv)
13592 elif action == "prune":
13594 # Pull in everything that's installed since we don't
13595 # to prune a package if something depends on it.
13596 world_temp_set.clear()
13597 world_temp_set.update(vardb.cp_all())
13601 # Try to prune everything that's slotted.
13602 for cp in vardb.cp_all():
13603 if len(vardb.cp_list(cp)) > 1:
13606 # Remove atoms from world that match installed packages
13607 # that are also matched by argument atoms, but do not remove
13608 # them if they match the highest installed version.
13611 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13612 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13613 raise AssertionError("package expected in matches: " + \
13614 "cp = %s, cpv = %s matches = %s" % \
13615 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13617 highest_version = pkgs_for_cp[-1]
13618 if pkg == highest_version:
13619 # pkg is the highest version
13620 world_temp_set.add("=" + pkg.cpv)
13623 if len(pkgs_for_cp) <= 1:
13624 raise AssertionError("more packages expected: " + \
13625 "cp = %s, cpv = %s matches = %s" % \
13626 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13629 if args_set.findAtomForPackage(pkg) is None:
13630 world_temp_set.add("=" + pkg.cpv)
13632 except portage.exception.InvalidDependString, e:
13633 show_invalid_depstring_notice(pkg,
13634 pkg.metadata["PROVIDE"], str(e))
13636 world_temp_set.add("=" + pkg.cpv)
13640 for s, package_set in required_sets.iteritems():
13641 set_atom = SETPREFIX + s
13642 set_arg = SetArg(arg=set_atom, set=package_set,
13643 root_config=resolver.roots[myroot])
13644 set_args[s] = set_arg
13645 for atom in set_arg.set:
13646 resolver._dep_stack.append(
13647 Dependency(atom=atom, root=myroot, parent=set_arg))
13648 resolver.digraph.add(set_arg, None)
13650 success = resolver._complete_graph()
13651 writemsg_level("\b\b... done!\n")
13653 resolver.display_problems()
13658 def unresolved_deps():
13660 unresolvable = set()
13661 for dep in resolver._initially_unsatisfied_deps:
13662 if isinstance(dep.parent, Package) and \
13663 (dep.priority > UnmergeDepPriority.SOFT):
13664 unresolvable.add((dep.atom, dep.parent.cpv))
13666 if not unresolvable:
13669 if unresolvable and not allow_missing_deps:
13670 prefix = bad(" * ")
13672 msg.append("Dependencies could not be completely resolved due to")
13673 msg.append("the following required packages not being installed:")
13675 for atom, parent in unresolvable:
13676 msg.append(" %s pulled in by:" % (atom,))
13677 msg.append(" %s" % (parent,))
13679 msg.append("Have you forgotten to run " + \
13680 good("`emerge --update --newuse --deep @system @world`") + " prior")
13681 msg.append(("to %s? It may be necessary to manually " + \
13682 "uninstall packages that no longer") % action)
13683 msg.append("exist in the portage tree since " + \
13684 "it may not be possible to satisfy their")
13685 msg.append("dependencies. Also, be aware of " + \
13686 "the --with-bdeps option that is documented")
13687 msg.append("in " + good("`man emerge`") + ".")
13688 if action == "prune":
13690 msg.append("If you would like to ignore " + \
13691 "dependencies then use %s." % good("--nodeps"))
13692 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13693 level=logging.ERROR, noiselevel=-1)
13697 if unresolved_deps():
13700 graph = resolver.digraph.copy()
13701 required_pkgs_total = 0
13703 if isinstance(node, Package):
13704 required_pkgs_total += 1
13706 def show_parents(child_node):
13707 parent_nodes = graph.parent_nodes(child_node)
13708 if not parent_nodes:
13709 # With --prune, the highest version can be pulled in without any
13710 # real parent since all installed packages are pulled in. In that
13711 # case there's nothing to show here.
13714 for node in parent_nodes:
13715 parent_strs.append(str(getattr(node, "cpv", node)))
13718 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13719 for parent_str in parent_strs:
13720 msg.append(" %s\n" % (parent_str,))
13722 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13724 def cmp_pkg_cpv(pkg1, pkg2):
13725 """Sort Package instances by cpv."""
13726 if pkg1.cpv > pkg2.cpv:
13728 elif pkg1.cpv == pkg2.cpv:
13733 def create_cleanlist():
13734 pkgs_to_remove = []
13736 if action == "depclean":
13739 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13742 arg_atom = args_set.findAtomForPackage(pkg)
13743 except portage.exception.InvalidDependString:
13744 # this error has already been displayed by now
13748 if pkg not in graph:
13749 pkgs_to_remove.append(pkg)
13750 elif "--verbose" in myopts:
13754 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13755 if pkg not in graph:
13756 pkgs_to_remove.append(pkg)
13757 elif "--verbose" in myopts:
13760 elif action == "prune":
13761 # Prune really uses all installed instead of world. It's not
13762 # a real reverse dependency so don't display it as such.
13763 graph.remove(set_args["world"])
13765 for atom in args_set:
13766 for pkg in vardb.match_pkgs(atom):
13767 if pkg not in graph:
13768 pkgs_to_remove.append(pkg)
13769 elif "--verbose" in myopts:
13772 if not pkgs_to_remove:
13774 ">>> No packages selected for removal by %s\n" % action)
13775 if "--verbose" not in myopts:
13777 ">>> To see reverse dependencies, use %s\n" % \
13779 if action == "prune":
13781 ">>> To ignore dependencies, use %s\n" % \
13784 return pkgs_to_remove
13786 cleanlist = create_cleanlist()
13789 clean_set = set(cleanlist)
13791 # Check if any of these package are the sole providers of libraries
13792 # with consumers that have not been selected for removal. If so, these
13793 # packages and any dependencies need to be added to the graph.
13794 real_vardb = trees[myroot]["vartree"].dbapi
13795 linkmap = real_vardb.linkmap
13796 liblist = linkmap.listLibraryObjects()
13797 consumer_cache = {}
13798 provider_cache = {}
13802 writemsg_level(">>> Checking for lib consumers...\n")
13804 for pkg in cleanlist:
13805 pkg_dblink = real_vardb._dblink(pkg.cpv)
13806 provided_libs = set()
13808 for lib in liblist:
13809 if pkg_dblink.isowner(lib, myroot):
13810 provided_libs.add(lib)
13812 if not provided_libs:
13816 for lib in provided_libs:
13817 lib_consumers = consumer_cache.get(lib)
13818 if lib_consumers is None:
13819 lib_consumers = linkmap.findConsumers(lib)
13820 consumer_cache[lib] = lib_consumers
13822 consumers[lib] = lib_consumers
13827 for lib, lib_consumers in consumers.items():
13828 for consumer_file in list(lib_consumers):
13829 if pkg_dblink.isowner(consumer_file, myroot):
13830 lib_consumers.remove(consumer_file)
13831 if not lib_consumers:
13837 for lib, lib_consumers in consumers.iteritems():
13839 soname = soname_cache.get(lib)
13841 soname = linkmap.getSoname(lib)
13842 soname_cache[lib] = soname
13844 consumer_providers = []
13845 for lib_consumer in lib_consumers:
13846 providers = provider_cache.get(lib)
13847 if providers is None:
13848 providers = linkmap.findProviders(lib_consumer)
13849 provider_cache[lib_consumer] = providers
13850 if soname not in providers:
13851 # Why does this happen?
13853 consumer_providers.append(
13854 (lib_consumer, providers[soname]))
13856 consumers[lib] = consumer_providers
13858 consumer_map[pkg] = consumers
13862 search_files = set()
13863 for consumers in consumer_map.itervalues():
13864 for lib, consumer_providers in consumers.iteritems():
13865 for lib_consumer, providers in consumer_providers:
13866 search_files.add(lib_consumer)
13867 search_files.update(providers)
13869 writemsg_level(">>> Assigning files to packages...\n")
13870 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13872 for pkg, consumers in consumer_map.items():
13873 for lib, consumer_providers in consumers.items():
13874 lib_consumers = set()
13876 for lib_consumer, providers in consumer_providers:
13877 owner_set = file_owners.get(lib_consumer)
13878 provider_dblinks = set()
13879 provider_pkgs = set()
13881 if len(providers) > 1:
13882 for provider in providers:
13883 provider_set = file_owners.get(provider)
13884 if provider_set is not None:
13885 provider_dblinks.update(provider_set)
13887 if len(provider_dblinks) > 1:
13888 for provider_dblink in provider_dblinks:
13889 pkg_key = ("installed", myroot,
13890 provider_dblink.mycpv, "nomerge")
13891 if pkg_key not in clean_set:
13892 provider_pkgs.add(vardb.get(pkg_key))
13897 if owner_set is not None:
13898 lib_consumers.update(owner_set)
13900 for consumer_dblink in list(lib_consumers):
13901 if ("installed", myroot, consumer_dblink.mycpv,
13902 "nomerge") in clean_set:
13903 lib_consumers.remove(consumer_dblink)
13907 consumers[lib] = lib_consumers
13911 del consumer_map[pkg]
13914 # TODO: Implement a package set for rebuilding consumer packages.
13916 msg = "In order to avoid breakage of link level " + \
13917 "dependencies, one or more packages will not be removed. " + \
13918 "This can be solved by rebuilding " + \
13919 "the packages that pulled them in."
13921 prefix = bad(" * ")
13922 from textwrap import wrap
13923 writemsg_level("".join(prefix + "%s\n" % line for \
13924 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13927 for pkg, consumers in consumer_map.iteritems():
13928 unique_consumers = set(chain(*consumers.values()))
13929 unique_consumers = sorted(consumer.mycpv \
13930 for consumer in unique_consumers)
13932 msg.append(" %s pulled in by:" % (pkg.cpv,))
13933 for consumer in unique_consumers:
13934 msg.append(" %s" % (consumer,))
13936 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13937 level=logging.WARNING, noiselevel=-1)
13939 # Add lib providers to the graph as children of lib consumers,
13940 # and also add any dependencies pulled in by the provider.
13941 writemsg_level(">>> Adding lib providers to graph...\n")
13943 for pkg, consumers in consumer_map.iteritems():
13944 for consumer_dblink in set(chain(*consumers.values())):
13945 consumer_pkg = vardb.get(("installed", myroot,
13946 consumer_dblink.mycpv, "nomerge"))
13947 if not resolver._add_pkg(pkg,
13948 Dependency(parent=consumer_pkg,
13949 priority=UnmergeDepPriority(runtime=True),
13951 resolver.display_problems()
13954 writemsg_level("\nCalculating dependencies ")
13955 success = resolver._complete_graph()
13956 writemsg_level("\b\b... done!\n")
13957 resolver.display_problems()
13960 if unresolved_deps():
13963 graph = resolver.digraph.copy()
13964 required_pkgs_total = 0
13966 if isinstance(node, Package):
13967 required_pkgs_total += 1
13968 cleanlist = create_cleanlist()
13971 clean_set = set(cleanlist)
13973 # Use a topological sort to create an unmerge order such that
13974 # each package is unmerged before it's dependencies. This is
13975 # necessary to avoid breaking things that may need to run
13976 # during pkg_prerm or pkg_postrm phases.
13978 # Create a new graph to account for dependencies between the
13979 # packages being unmerged.
13983 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13984 runtime = UnmergeDepPriority(runtime=True)
13985 runtime_post = UnmergeDepPriority(runtime_post=True)
13986 buildtime = UnmergeDepPriority(buildtime=True)
13988 "RDEPEND": runtime,
13989 "PDEPEND": runtime_post,
13990 "DEPEND": buildtime,
13993 for node in clean_set:
13994 graph.add(node, None)
13996 node_use = node.metadata["USE"].split()
13997 for dep_type in dep_keys:
13998 depstr = node.metadata[dep_type]
14002 portage.dep._dep_check_strict = False
14003 success, atoms = portage.dep_check(depstr, None, settings,
14004 myuse=node_use, trees=resolver._graph_trees,
14007 portage.dep._dep_check_strict = True
14009 # Ignore invalid deps of packages that will
14010 # be uninstalled anyway.
14013 priority = priority_map[dep_type]
14015 if not isinstance(atom, portage.dep.Atom):
14016 # Ignore invalid atoms returned from dep_check().
14020 matches = vardb.match_pkgs(atom)
14023 for child_node in matches:
14024 if child_node in clean_set:
14025 graph.add(child_node, node, priority=priority)
14028 if len(graph.order) == len(graph.root_nodes()):
14029 # If there are no dependencies between packages
14030 # let unmerge() group them by cat/pn.
14032 cleanlist = [pkg.cpv for pkg in graph.order]
14034 # Order nodes from lowest to highest overall reference count for
14035 # optimal root node selection.
14036 node_refcounts = {}
14037 for node in graph.order:
14038 node_refcounts[node] = len(graph.parent_nodes(node))
14039 def cmp_reference_count(node1, node2):
14040 return node_refcounts[node1] - node_refcounts[node2]
14041 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14043 ignore_priority_range = [None]
14044 ignore_priority_range.extend(
14045 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14046 while not graph.empty():
14047 for ignore_priority in ignore_priority_range:
14048 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14052 raise AssertionError("no root nodes")
14053 if ignore_priority is not None:
14054 # Some deps have been dropped due to circular dependencies,
14055 # so only pop one node in order do minimize the number that
14060 cleanlist.append(node.cpv)
14062 unmerge(root_config, myopts, "unmerge", cleanlist,
14063 ldpath_mtimes, ordered=ordered)
14065 if action == "prune":
14068 if not cleanlist and "--quiet" in myopts:
14071 print "Packages installed: "+str(len(vardb.cpv_all()))
14072 print "Packages in world: " + \
14073 str(len(root_config.sets["world"].getAtoms()))
14074 print "Packages in system: " + \
14075 str(len(root_config.sets["system"].getAtoms()))
14076 print "Required packages: "+str(required_pkgs_total)
14077 if "--pretend" in myopts:
14078 print "Number to remove: "+str(len(cleanlist))
14080 print "Number removed: "+str(len(cleanlist))
14082 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14084 Construct a depgraph for the given resume list. This will raise
14085 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14087 @returns: (success, depgraph, dropped_tasks)
14090 skip_unsatisfied = True
14091 mergelist = mtimedb["resume"]["mergelist"]
14092 dropped_tasks = set()
14094 mydepgraph = depgraph(settings, trees,
14095 myopts, myparams, spinner)
14097 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14098 skip_masked=skip_masked)
14099 except depgraph.UnsatisfiedResumeDep, e:
14100 if not skip_unsatisfied:
14103 graph = mydepgraph.digraph
14104 unsatisfied_parents = dict((dep.parent, dep.parent) \
14105 for dep in e.value)
14106 traversed_nodes = set()
14107 unsatisfied_stack = list(unsatisfied_parents)
14108 while unsatisfied_stack:
14109 pkg = unsatisfied_stack.pop()
14110 if pkg in traversed_nodes:
14112 traversed_nodes.add(pkg)
14114 # If this package was pulled in by a parent
14115 # package scheduled for merge, removing this
14116 # package may cause the the parent package's
14117 # dependency to become unsatisfied.
14118 for parent_node in graph.parent_nodes(pkg):
14119 if not isinstance(parent_node, Package) \
14120 or parent_node.operation not in ("merge", "nomerge"):
14123 graph.child_nodes(parent_node,
14124 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14125 if pkg in unsatisfied:
14126 unsatisfied_parents[parent_node] = parent_node
14127 unsatisfied_stack.append(parent_node)
14129 pruned_mergelist = []
14130 for x in mergelist:
14131 if isinstance(x, list) and \
14132 tuple(x) not in unsatisfied_parents:
14133 pruned_mergelist.append(x)
14135 # If the mergelist doesn't shrink then this loop is infinite.
14136 if len(pruned_mergelist) == len(mergelist):
14137 # This happens if a package can't be dropped because
14138 # it's already installed, but it has unsatisfied PDEPEND.
14140 mergelist[:] = pruned_mergelist
14142 # Exclude installed packages that have been removed from the graph due
14143 # to failure to build/install runtime dependencies after the dependent
14144 # package has already been installed.
14145 dropped_tasks.update(pkg for pkg in \
14146 unsatisfied_parents if pkg.operation != "nomerge")
14147 mydepgraph.break_refs(unsatisfied_parents)
14149 del e, graph, traversed_nodes, \
14150 unsatisfied_parents, unsatisfied_stack
14154 return (success, mydepgraph, dropped_tasks)
14156 def action_build(settings, trees, mtimedb,
14157 myopts, myaction, myfiles, spinner):
14159 # validate the state of the resume data
14160 # so that we can make assumptions later.
14161 for k in ("resume", "resume_backup"):
14162 if k not in mtimedb:
14164 resume_data = mtimedb[k]
14165 if not isinstance(resume_data, dict):
14168 mergelist = resume_data.get("mergelist")
14169 if not isinstance(mergelist, list):
14172 for x in mergelist:
14173 if not (isinstance(x, list) and len(x) == 4):
14175 pkg_type, pkg_root, pkg_key, pkg_action = x
14176 if pkg_root not in trees:
14177 # Current $ROOT setting differs,
14178 # so the list must be stale.
14184 resume_opts = resume_data.get("myopts")
14185 if not isinstance(resume_opts, (dict, list)):
14188 favorites = resume_data.get("favorites")
14189 if not isinstance(favorites, list):
14194 if "--resume" in myopts and \
14195 ("resume" in mtimedb or
14196 "resume_backup" in mtimedb):
14198 if "resume" not in mtimedb:
14199 mtimedb["resume"] = mtimedb["resume_backup"]
14200 del mtimedb["resume_backup"]
14202 # "myopts" is a list for backward compatibility.
14203 resume_opts = mtimedb["resume"].get("myopts", [])
14204 if isinstance(resume_opts, list):
14205 resume_opts = dict((k,True) for k in resume_opts)
14206 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14207 resume_opts.pop(opt, None)
14208 myopts.update(resume_opts)
14210 if "--debug" in myopts:
14211 writemsg_level("myopts %s\n" % (myopts,))
14213 # Adjust config according to options of the command being resumed.
14214 for myroot in trees:
14215 mysettings = trees[myroot]["vartree"].settings
14216 mysettings.unlock()
14217 adjust_config(myopts, mysettings)
14219 del myroot, mysettings
14221 ldpath_mtimes = mtimedb["ldpath"]
14224 buildpkgonly = "--buildpkgonly" in myopts
14225 pretend = "--pretend" in myopts
14226 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14227 ask = "--ask" in myopts
14228 nodeps = "--nodeps" in myopts
14229 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14230 tree = "--tree" in myopts
14231 if nodeps and tree:
14233 del myopts["--tree"]
14234 portage.writemsg(colorize("WARN", " * ") + \
14235 "--tree is broken with --nodeps. Disabling...\n")
14236 debug = "--debug" in myopts
14237 verbose = "--verbose" in myopts
14238 quiet = "--quiet" in myopts
14239 if pretend or fetchonly:
14240 # make the mtimedb readonly
14241 mtimedb.filename = None
14242 if "--digest" in myopts:
14243 msg = "The --digest option can prevent corruption from being" + \
14244 " noticed. The `repoman manifest` command is the preferred" + \
14245 " way to generate manifests and it is capable of doing an" + \
14246 " entire repository or category at once."
14247 prefix = bad(" * ")
14248 writemsg(prefix + "\n")
14249 from textwrap import wrap
14250 for line in wrap(msg, 72):
14251 writemsg("%s%s\n" % (prefix, line))
14252 writemsg(prefix + "\n")
14254 if "--quiet" not in myopts and \
14255 ("--pretend" in myopts or "--ask" in myopts or \
14256 "--tree" in myopts or "--verbose" in myopts):
14258 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14260 elif "--buildpkgonly" in myopts:
14264 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14266 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14270 print darkgreen("These are the packages that would be %s, in order:") % action
14273 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14274 if not show_spinner:
14275 spinner.update = spinner.update_quiet
14278 favorites = mtimedb["resume"].get("favorites")
14279 if not isinstance(favorites, list):
14283 print "Calculating dependencies ",
14284 myparams = create_depgraph_params(myopts, myaction)
14286 resume_data = mtimedb["resume"]
14287 mergelist = resume_data["mergelist"]
14288 if mergelist and "--skipfirst" in myopts:
14289 for i, task in enumerate(mergelist):
14290 if isinstance(task, list) and \
14291 task and task[-1] == "merge":
14298 success, mydepgraph, dropped_tasks = resume_depgraph(
14299 settings, trees, mtimedb, myopts, myparams, spinner)
14300 except (portage.exception.PackageNotFound,
14301 depgraph.UnsatisfiedResumeDep), e:
14302 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14303 mydepgraph = e.depgraph
14306 from textwrap import wrap
14307 from portage.output import EOutput
14310 resume_data = mtimedb["resume"]
14311 mergelist = resume_data.get("mergelist")
14312 if not isinstance(mergelist, list):
14314 if mergelist and debug or (verbose and not quiet):
14315 out.eerror("Invalid resume list:")
14318 for task in mergelist:
14319 if isinstance(task, list):
14320 out.eerror(indent + str(tuple(task)))
14323 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14324 out.eerror("One or more packages are either masked or " + \
14325 "have missing dependencies:")
14328 for dep in e.value:
14329 if dep.atom is None:
14330 out.eerror(indent + "Masked package:")
14331 out.eerror(2 * indent + str(dep.parent))
14334 out.eerror(indent + str(dep.atom) + " pulled in by:")
14335 out.eerror(2 * indent + str(dep.parent))
14337 msg = "The resume list contains packages " + \
14338 "that are either masked or have " + \
14339 "unsatisfied dependencies. " + \
14340 "Please restart/continue " + \
14341 "the operation manually, or use --skipfirst " + \
14342 "to skip the first package in the list and " + \
14343 "any other packages that may be " + \
14344 "masked or have missing dependencies."
14345 for line in wrap(msg, 72):
14347 elif isinstance(e, portage.exception.PackageNotFound):
14348 out.eerror("An expected package is " + \
14349 "not available: %s" % str(e))
14351 msg = "The resume list contains one or more " + \
14352 "packages that are no longer " + \
14353 "available. Please restart/continue " + \
14354 "the operation manually."
14355 for line in wrap(msg, 72):
14359 print "\b\b... done!"
14363 portage.writemsg("!!! One or more packages have been " + \
14364 "dropped due to\n" + \
14365 "!!! masking or unsatisfied dependencies:\n\n",
14367 for task in dropped_tasks:
14368 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14369 portage.writemsg("\n", noiselevel=-1)
14372 if mydepgraph is not None:
14373 mydepgraph.display_problems()
14374 if not (ask or pretend):
14375 # delete the current list and also the backup
14376 # since it's probably stale too.
14377 for k in ("resume", "resume_backup"):
14378 mtimedb.pop(k, None)
14383 if ("--resume" in myopts):
14384 print darkgreen("emerge: It seems we have nothing to resume...")
14387 myparams = create_depgraph_params(myopts, myaction)
14388 if "--quiet" not in myopts and "--nodeps" not in myopts:
14389 print "Calculating dependencies ",
14391 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14393 retval, favorites = mydepgraph.select_files(myfiles)
14394 except portage.exception.PackageNotFound, e:
14395 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14397 except portage.exception.PackageSetNotFound, e:
14398 root_config = trees[settings["ROOT"]]["root_config"]
14399 display_missing_pkg_set(root_config, e.value)
14402 print "\b\b... done!"
14404 mydepgraph.display_problems()
14407 if "--pretend" not in myopts and \
14408 ("--ask" in myopts or "--tree" in myopts or \
14409 "--verbose" in myopts) and \
14410 not ("--quiet" in myopts and "--ask" not in myopts):
14411 if "--resume" in myopts:
14412 mymergelist = mydepgraph.altlist()
14413 if len(mymergelist) == 0:
14414 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14416 favorites = mtimedb["resume"]["favorites"]
14417 retval = mydepgraph.display(
14418 mydepgraph.altlist(reversed=tree),
14419 favorites=favorites)
14420 mydepgraph.display_problems()
14421 if retval != os.EX_OK:
14423 prompt="Would you like to resume merging these packages?"
14425 retval = mydepgraph.display(
14426 mydepgraph.altlist(reversed=("--tree" in myopts)),
14427 favorites=favorites)
14428 mydepgraph.display_problems()
14429 if retval != os.EX_OK:
14432 for x in mydepgraph.altlist():
14433 if isinstance(x, Package) and x.operation == "merge":
14437 sets = trees[settings["ROOT"]]["root_config"].sets
14438 world_candidates = None
14439 if "--noreplace" in myopts and \
14440 not oneshot and favorites:
14441 # Sets that are not world candidates are filtered
14442 # out here since the favorites list needs to be
14443 # complete for depgraph.loadResumeCommand() to
14444 # operate correctly.
14445 world_candidates = [x for x in favorites \
14446 if not (x.startswith(SETPREFIX) and \
14447 not sets[x[1:]].world_candidate)]
14448 if "--noreplace" in myopts and \
14449 not oneshot and world_candidates:
14451 for x in world_candidates:
14452 print " %s %s" % (good("*"), x)
14453 prompt="Would you like to add these packages to your world favorites?"
14454 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14455 prompt="Nothing to merge; would you like to auto-clean packages?"
14458 print "Nothing to merge; quitting."
14461 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14462 prompt="Would you like to fetch the source files for these packages?"
14464 prompt="Would you like to merge these packages?"
14466 if "--ask" in myopts and userquery(prompt) == "No":
14471 # Don't ask again (e.g. when auto-cleaning packages after merge)
14472 myopts.pop("--ask", None)
14474 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14475 if ("--resume" in myopts):
14476 mymergelist = mydepgraph.altlist()
14477 if len(mymergelist) == 0:
14478 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14480 favorites = mtimedb["resume"]["favorites"]
14481 retval = mydepgraph.display(
14482 mydepgraph.altlist(reversed=tree),
14483 favorites=favorites)
14484 mydepgraph.display_problems()
14485 if retval != os.EX_OK:
14488 retval = mydepgraph.display(
14489 mydepgraph.altlist(reversed=("--tree" in myopts)),
14490 favorites=favorites)
14491 mydepgraph.display_problems()
14492 if retval != os.EX_OK:
14494 if "--buildpkgonly" in myopts:
14495 graph_copy = mydepgraph.digraph.clone()
14496 removed_nodes = set()
14497 for node in list(graph_copy.order):
14498 if not isinstance(node, Package) or \
14499 node.operation == "nomerge":
14500 removed_nodes.add(node)
14501 graph_copy.difference_update(removed_nodes)
14502 if not graph_copy.hasallzeros(ignore_priority = \
14503 DepPrioritySatisfiedRange.ignore_medium):
14504 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14505 print "!!! You have to merge the dependencies before you can build this package.\n"
14508 if "--buildpkgonly" in myopts:
14509 graph_copy = mydepgraph.digraph.clone()
14510 removed_nodes = set()
14511 for node in list(graph_copy.order):
14512 if not isinstance(node, Package) or \
14513 node.operation == "nomerge":
14514 removed_nodes.add(node)
14515 graph_copy.difference_update(removed_nodes)
14516 if not graph_copy.hasallzeros(ignore_priority = \
14517 DepPrioritySatisfiedRange.ignore_medium):
14518 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14519 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14522 if ("--resume" in myopts):
14523 favorites=mtimedb["resume"]["favorites"]
14524 mymergelist = mydepgraph.altlist()
14525 mydepgraph.break_refs(mymergelist)
14526 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14527 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14528 del mydepgraph, mymergelist
14529 clear_caches(trees)
14531 retval = mergetask.merge()
14532 merge_count = mergetask.curval
14534 if "resume" in mtimedb and \
14535 "mergelist" in mtimedb["resume"] and \
14536 len(mtimedb["resume"]["mergelist"]) > 1:
14537 mtimedb["resume_backup"] = mtimedb["resume"]
14538 del mtimedb["resume"]
14540 mtimedb["resume"]={}
14541 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14542 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14543 # a list type for options.
14544 mtimedb["resume"]["myopts"] = myopts.copy()
14546 # Convert Atom instances to plain str.
14547 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14549 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14550 for pkgline in mydepgraph.altlist():
14551 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14552 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14553 tmpsettings = portage.config(clone=settings)
14555 if settings.get("PORTAGE_DEBUG", "") == "1":
14557 retval = portage.doebuild(
14558 y, "digest", settings["ROOT"], tmpsettings, edebug,
14559 ("--pretend" in myopts),
14560 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14563 pkglist = mydepgraph.altlist()
14564 mydepgraph.saveNomergeFavorites()
14565 mydepgraph.break_refs(pkglist)
14566 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14567 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14568 del mydepgraph, pkglist
14569 clear_caches(trees)
14571 retval = mergetask.merge()
14572 merge_count = mergetask.curval
14574 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14575 if "yes" == settings.get("AUTOCLEAN"):
14576 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14577 unmerge(trees[settings["ROOT"]]["root_config"],
14578 myopts, "clean", [],
14579 ldpath_mtimes, autoclean=1)
14581 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14582 + " AUTOCLEAN is disabled. This can cause serious"
14583 + " problems due to overlapping packages.\n")
14584 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14588 def multiple_actions(action1, action2):
14589 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14590 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14593 def insert_optional_args(args):
14595 Parse optional arguments and insert a value if one has
14596 not been provided. This is done before feeding the args
14597 to the optparse parser since that parser does not support
14598 this feature natively.
14602 jobs_opts = ("-j", "--jobs")
14603 arg_stack = args[:]
14604 arg_stack.reverse()
14606 arg = arg_stack.pop()
14608 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14609 if not (short_job_opt or arg in jobs_opts):
14610 new_args.append(arg)
14613 # Insert an empty placeholder in order to
14614 # satisfy the requirements of optparse.
14616 new_args.append("--jobs")
14619 if short_job_opt and len(arg) > 2:
14620 if arg[:2] == "-j":
14622 job_count = int(arg[2:])
14624 saved_opts = arg[2:]
14627 saved_opts = arg[1:].replace("j", "")
14629 if job_count is None and arg_stack:
14631 job_count = int(arg_stack[-1])
14635 # Discard the job count from the stack
14636 # since we're consuming it here.
14639 if job_count is None:
14640 # unlimited number of jobs
14641 new_args.append("True")
14643 new_args.append(str(job_count))
14645 if saved_opts is not None:
14646 new_args.append("-" + saved_opts)
14650 def parse_opts(tmpcmdline, silent=False):
14655 global actions, options, shortmapping
14657 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14658 argument_options = {
14660 "help":"specify the location for portage configuration files",
14664 "help":"enable or disable color output",
14666 "choices":("y", "n")
14671 "help" : "Specifies the number of packages to build " + \
14677 "--load-average": {
14679 "help" :"Specifies that no new builds should be started " + \
14680 "if there are other builds running and the load average " + \
14681 "is at least LOAD (a floating-point number).",
14687 "help":"include unnecessary build time dependencies",
14689 "choices":("y", "n")
14692 "help":"specify conditions to trigger package reinstallation",
14694 "choices":["changed-use"]
14698 from optparse import OptionParser
14699 parser = OptionParser()
14700 if parser.has_option("--help"):
14701 parser.remove_option("--help")
14703 for action_opt in actions:
14704 parser.add_option("--" + action_opt, action="store_true",
14705 dest=action_opt.replace("-", "_"), default=False)
14706 for myopt in options:
14707 parser.add_option(myopt, action="store_true",
14708 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14709 for shortopt, longopt in shortmapping.iteritems():
14710 parser.add_option("-" + shortopt, action="store_true",
14711 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14712 for myalias, myopt in longopt_aliases.iteritems():
14713 parser.add_option(myalias, action="store_true",
14714 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14716 for myopt, kwargs in argument_options.iteritems():
14717 parser.add_option(myopt,
14718 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14720 tmpcmdline = insert_optional_args(tmpcmdline)
14722 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14726 if myoptions.jobs == "True":
14730 jobs = int(myoptions.jobs)
14734 if jobs is not True and \
14738 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14739 (myoptions.jobs,), noiselevel=-1)
14741 myoptions.jobs = jobs
14743 if myoptions.load_average:
14745 load_average = float(myoptions.load_average)
14749 if load_average <= 0.0:
14750 load_average = None
14752 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14753 (myoptions.load_average,), noiselevel=-1)
14755 myoptions.load_average = load_average
14757 for myopt in options:
14758 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14760 myopts[myopt] = True
14762 for myopt in argument_options:
14763 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14767 for action_opt in actions:
14768 v = getattr(myoptions, action_opt.replace("-", "_"))
14771 multiple_actions(myaction, action_opt)
14773 myaction = action_opt
14777 return myaction, myopts, myfiles
14779 def validate_ebuild_environment(trees):
14780 for myroot in trees:
14781 settings = trees[myroot]["vartree"].settings
14782 settings.validate()
14784 def clear_caches(trees):
14785 for d in trees.itervalues():
14786 d["porttree"].dbapi.melt()
14787 d["porttree"].dbapi._aux_cache.clear()
14788 d["bintree"].dbapi._aux_cache.clear()
14789 d["bintree"].dbapi._clear_cache()
14790 d["vartree"].dbapi.linkmap._clear_cache()
14791 portage.dircache.clear()
14794 def load_emerge_config(trees=None):
14796 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14797 v = os.environ.get(envvar, None)
14798 if v and v.strip():
14800 trees = portage.create_trees(trees=trees, **kwargs)
14802 for root, root_trees in trees.iteritems():
14803 settings = root_trees["vartree"].settings
14804 setconfig = load_default_config(settings, root_trees)
14805 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14807 settings = trees["/"]["vartree"].settings
14809 for myroot in trees:
14811 settings = trees[myroot]["vartree"].settings
14814 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14815 mtimedb = portage.MtimeDB(mtimedbfile)
14817 return settings, trees, mtimedb
14819 def adjust_config(myopts, settings):
14820 """Make emerge specific adjustments to the config."""
14822 # To enhance usability, make some vars case insensitive by forcing them to
14824 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14825 if myvar in settings:
14826 settings[myvar] = settings[myvar].lower()
14827 settings.backup_changes(myvar)
14830 # Kill noauto as it will break merges otherwise.
14831 if "noauto" in settings.features:
14832 while "noauto" in settings.features:
14833 settings.features.remove("noauto")
14834 settings["FEATURES"] = " ".join(settings.features)
14835 settings.backup_changes("FEATURES")
14839 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14840 except ValueError, e:
14841 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14842 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14843 settings["CLEAN_DELAY"], noiselevel=-1)
14844 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14845 settings.backup_changes("CLEAN_DELAY")
14847 EMERGE_WARNING_DELAY = 10
14849 EMERGE_WARNING_DELAY = int(settings.get(
14850 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14851 except ValueError, e:
14852 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14853 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14854 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14855 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14856 settings.backup_changes("EMERGE_WARNING_DELAY")
14858 if "--quiet" in myopts:
14859 settings["PORTAGE_QUIET"]="1"
14860 settings.backup_changes("PORTAGE_QUIET")
14862 if "--verbose" in myopts:
14863 settings["PORTAGE_VERBOSE"] = "1"
14864 settings.backup_changes("PORTAGE_VERBOSE")
14866 # Set so that configs will be merged regardless of remembered status
14867 if ("--noconfmem" in myopts):
14868 settings["NOCONFMEM"]="1"
14869 settings.backup_changes("NOCONFMEM")
14871 # Set various debug markers... They should be merged somehow.
14874 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14875 if PORTAGE_DEBUG not in (0, 1):
14876 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14877 PORTAGE_DEBUG, noiselevel=-1)
14878 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14881 except ValueError, e:
14882 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14883 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14884 settings["PORTAGE_DEBUG"], noiselevel=-1)
14886 if "--debug" in myopts:
14888 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14889 settings.backup_changes("PORTAGE_DEBUG")
14891 if settings.get("NOCOLOR") not in ("yes","true"):
14892 portage.output.havecolor = 1
14894 """The explicit --color < y | n > option overrides the NOCOLOR environment
14895 variable and stdout auto-detection."""
14896 if "--color" in myopts:
14897 if "y" == myopts["--color"]:
14898 portage.output.havecolor = 1
14899 settings["NOCOLOR"] = "false"
14901 portage.output.havecolor = 0
14902 settings["NOCOLOR"] = "true"
14903 settings.backup_changes("NOCOLOR")
14904 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14905 portage.output.havecolor = 0
14906 settings["NOCOLOR"] = "true"
14907 settings.backup_changes("NOCOLOR")
14909 def apply_priorities(settings):
14913 def nice(settings):
14915 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14916 except (OSError, ValueError), e:
14917 out = portage.output.EOutput()
14918 out.eerror("Failed to change nice value to '%s'" % \
14919 settings["PORTAGE_NICENESS"])
14920 out.eerror("%s\n" % str(e))
14922 def ionice(settings):
14924 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14926 ionice_cmd = shlex.split(ionice_cmd)
14930 from portage.util import varexpand
14931 variables = {"PID" : str(os.getpid())}
14932 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14935 rval = portage.process.spawn(cmd, env=os.environ)
14936 except portage.exception.CommandNotFound:
14937 # The OS kernel probably doesn't support ionice,
14938 # so return silently.
14941 if rval != os.EX_OK:
14942 out = portage.output.EOutput()
14943 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14944 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14946 def display_missing_pkg_set(root_config, set_name):
14949 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14950 "The following sets exist:") % \
14951 colorize("INFORM", set_name))
14954 for s in sorted(root_config.sets):
14955 msg.append(" %s" % s)
14958 writemsg_level("".join("%s\n" % l for l in msg),
14959 level=logging.ERROR, noiselevel=-1)
14961 def expand_set_arguments(myfiles, myaction, root_config):
14963 setconfig = root_config.setconfig
14965 sets = setconfig.getSets()
14967 # In order to know exactly which atoms/sets should be added to the
14968 # world file, the depgraph performs set expansion later. It will get
14969 # confused about where the atoms came from if it's not allowed to
14970 # expand them itself.
14971 do_not_expand = (None, )
14974 if a in ("system", "world"):
14975 newargs.append(SETPREFIX+a)
14982 # separators for set arguments
14986 # WARNING: all operators must be of equal length
14988 DIFF_OPERATOR = "-@"
14989 UNION_OPERATOR = "+@"
14991 for i in range(0, len(myfiles)):
14992 if myfiles[i].startswith(SETPREFIX):
14995 x = myfiles[i][len(SETPREFIX):]
14998 start = x.find(ARG_START)
14999 end = x.find(ARG_END)
15000 if start > 0 and start < end:
15001 namepart = x[:start]
15002 argpart = x[start+1:end]
15004 # TODO: implement proper quoting
15005 args = argpart.split(",")
15009 k, v = a.split("=", 1)
15012 options[a] = "True"
15013 setconfig.update(namepart, options)
15014 newset += (x[:start-len(namepart)]+namepart)
15015 x = x[end+len(ARG_END):]
15019 myfiles[i] = SETPREFIX+newset
15021 sets = setconfig.getSets()
15023 # display errors that occured while loading the SetConfig instance
15024 for e in setconfig.errors:
15025 print colorize("BAD", "Error during set creation: %s" % e)
15027 # emerge relies on the existance of sets with names "world" and "system"
15028 required_sets = ("world", "system")
15031 for s in required_sets:
15033 missing_sets.append(s)
15035 if len(missing_sets) > 2:
15036 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15037 missing_sets_str += ', and "%s"' % missing_sets[-1]
15038 elif len(missing_sets) == 2:
15039 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15041 missing_sets_str = '"%s"' % missing_sets[-1]
15042 msg = ["emerge: incomplete set configuration, " + \
15043 "missing set(s): %s" % missing_sets_str]
15045 msg.append(" sets defined: %s" % ", ".join(sets))
15046 msg.append(" This usually means that '%s'" % \
15047 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15048 msg.append(" is missing or corrupt.")
15050 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15052 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15055 if a.startswith(SETPREFIX):
15056 # support simple set operations (intersection, difference and union)
15057 # on the commandline. Expressions are evaluated strictly left-to-right
15058 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15059 expression = a[len(SETPREFIX):]
15062 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15063 is_pos = expression.rfind(IS_OPERATOR)
15064 diff_pos = expression.rfind(DIFF_OPERATOR)
15065 union_pos = expression.rfind(UNION_OPERATOR)
15066 op_pos = max(is_pos, diff_pos, union_pos)
15067 s1 = expression[:op_pos]
15068 s2 = expression[op_pos+len(IS_OPERATOR):]
15069 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15071 display_missing_pkg_set(root_config, s2)
15073 expr_sets.insert(0, s2)
15074 expr_ops.insert(0, op)
15076 if not expression in sets:
15077 display_missing_pkg_set(root_config, expression)
15079 expr_sets.insert(0, expression)
15080 result = set(setconfig.getSetAtoms(expression))
15081 for i in range(0, len(expr_ops)):
15082 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15083 if expr_ops[i] == IS_OPERATOR:
15084 result.intersection_update(s2)
15085 elif expr_ops[i] == DIFF_OPERATOR:
15086 result.difference_update(s2)
15087 elif expr_ops[i] == UNION_OPERATOR:
15090 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15091 newargs.extend(result)
15093 s = a[len(SETPREFIX):]
15095 display_missing_pkg_set(root_config, s)
15097 setconfig.active.append(s)
15099 set_atoms = setconfig.getSetAtoms(s)
15100 except portage.exception.PackageSetNotFound, e:
15101 writemsg_level(("emerge: the given set '%s' " + \
15102 "contains a non-existent set named '%s'.\n") % \
15103 (s, e), level=logging.ERROR, noiselevel=-1)
15105 if myaction in unmerge_actions and \
15106 not sets[s].supportsOperation("unmerge"):
15107 sys.stderr.write("emerge: the given set '%s' does " % s + \
15108 "not support unmerge operations\n")
15110 elif not set_atoms:
15111 print "emerge: '%s' is an empty set" % s
15112 elif myaction not in do_not_expand:
15113 newargs.extend(set_atoms)
15115 newargs.append(SETPREFIX+s)
15116 for e in sets[s].errors:
15120 return (newargs, retval)
15122 def repo_name_check(trees):
15123 missing_repo_names = set()
15124 for root, root_trees in trees.iteritems():
15125 if "porttree" in root_trees:
15126 portdb = root_trees["porttree"].dbapi
15127 missing_repo_names.update(portdb.porttrees)
15128 repos = portdb.getRepositories()
15130 missing_repo_names.discard(portdb.getRepositoryPath(r))
15131 if portdb.porttree_root in missing_repo_names and \
15132 not os.path.exists(os.path.join(
15133 portdb.porttree_root, "profiles")):
15134 # This is normal if $PORTDIR happens to be empty,
15135 # so don't warn about it.
15136 missing_repo_names.remove(portdb.porttree_root)
15138 if missing_repo_names:
15140 msg.append("WARNING: One or more repositories " + \
15141 "have missing repo_name entries:")
15143 for p in missing_repo_names:
15144 msg.append("\t%s/profiles/repo_name" % (p,))
15146 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15147 "should be a plain text file containing a unique " + \
15148 "name for the repository on the first line.", 70))
15149 writemsg_level("".join("%s\n" % l for l in msg),
15150 level=logging.WARNING, noiselevel=-1)
15152 return bool(missing_repo_names)
15154 def config_protect_check(trees):
15155 for root, root_trees in trees.iteritems():
15156 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15157 msg = "!!! CONFIG_PROTECT is empty"
15159 msg += " for '%s'" % root
15160 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15162 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15164 if "--quiet" in myopts:
15165 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15166 print "!!! one of the following fully-qualified ebuild names instead:\n"
15167 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15168 print " " + colorize("INFORM", cp)
15171 s = search(root_config, spinner, "--searchdesc" in myopts,
15172 "--quiet" not in myopts, "--usepkg" in myopts,
15173 "--usepkgonly" in myopts)
15174 null_cp = portage.dep_getkey(insert_category_into_atom(
15176 cat, atom_pn = portage.catsplit(null_cp)
15177 s.searchkey = atom_pn
15178 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15181 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15182 print "!!! one of the above fully-qualified ebuild names instead.\n"
15184 def profile_check(trees, myaction, myopts):
15185 if myaction in ("info", "sync"):
15187 elif "--version" in myopts or "--help" in myopts:
15189 for root, root_trees in trees.iteritems():
15190 if root_trees["root_config"].settings.profiles:
15192 # generate some profile related warning messages
15193 validate_ebuild_environment(trees)
15194 msg = "If you have just changed your profile configuration, you " + \
15195 "should revert back to the previous configuration. Due to " + \
15196 "your current profile being invalid, allowed actions are " + \
15197 "limited to --help, --info, --sync, and --version."
15198 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15199 level=logging.ERROR, noiselevel=-1)
15204 global portage # NFC why this is necessary now - genone
15205 portage._disable_legacy_globals()
15206 # Disable color until we're sure that it should be enabled (after
15207 # EMERGE_DEFAULT_OPTS has been parsed).
15208 portage.output.havecolor = 0
15209 # This first pass is just for options that need to be known as early as
15210 # possible, such as --config-root. They will be parsed again later,
15211 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15212 # the value of --config-root).
15213 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15214 if "--debug" in myopts:
15215 os.environ["PORTAGE_DEBUG"] = "1"
15216 if "--config-root" in myopts:
15217 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15219 # Portage needs to ensure a sane umask for the files it creates.
15221 settings, trees, mtimedb = load_emerge_config()
15222 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15223 rval = profile_check(trees, myaction, myopts)
15224 if rval != os.EX_OK:
15227 if portage._global_updates(trees, mtimedb["updates"]):
15229 # Reload the whole config from scratch.
15230 settings, trees, mtimedb = load_emerge_config(trees=trees)
15231 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15233 xterm_titles = "notitles" not in settings.features
15236 if "--ignore-default-opts" not in myopts:
15237 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15238 tmpcmdline.extend(sys.argv[1:])
15239 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15241 if "--digest" in myopts:
15242 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15243 # Reload the whole config from scratch so that the portdbapi internal
15244 # config is updated with new FEATURES.
15245 settings, trees, mtimedb = load_emerge_config(trees=trees)
15246 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15248 for myroot in trees:
15249 mysettings = trees[myroot]["vartree"].settings
15250 mysettings.unlock()
15251 adjust_config(myopts, mysettings)
15252 if "--pretend" not in myopts:
15253 mysettings["PORTAGE_COUNTER_HASH"] = \
15254 trees[myroot]["vartree"].dbapi._counter_hash()
15255 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15257 del myroot, mysettings
15259 apply_priorities(settings)
15261 spinner = stdout_spinner()
15262 if "candy" in settings.features:
15263 spinner.update = spinner.update_scroll
15265 if "--quiet" not in myopts:
15266 portage.deprecated_profile_check(settings=settings)
15267 repo_name_check(trees)
15268 config_protect_check(trees)
15270 eclasses_overridden = {}
15271 for mytrees in trees.itervalues():
15272 mydb = mytrees["porttree"].dbapi
15273 # Freeze the portdbapi for performance (memoize all xmatch results).
15275 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15278 if eclasses_overridden and \
15279 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15280 prefix = bad(" * ")
15281 if len(eclasses_overridden) == 1:
15282 writemsg(prefix + "Overlay eclass overrides " + \
15283 "eclass from PORTDIR:\n", noiselevel=-1)
15285 writemsg(prefix + "Overlay eclasses override " + \
15286 "eclasses from PORTDIR:\n", noiselevel=-1)
15287 writemsg(prefix + "\n", noiselevel=-1)
15288 for eclass_name in sorted(eclasses_overridden):
15289 writemsg(prefix + " '%s/%s.eclass'\n" % \
15290 (eclasses_overridden[eclass_name], eclass_name),
15292 writemsg(prefix + "\n", noiselevel=-1)
15293 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15294 "because it will trigger invalidation of cached ebuild metadata " + \
15295 "that is distributed with the portage tree. If you must " + \
15296 "override eclasses from PORTDIR then you are advised to add " + \
15297 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15298 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15299 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15300 "you would like to disable this warning."
15301 from textwrap import wrap
15302 for line in wrap(msg, 72):
15303 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15305 if "moo" in myfiles:
15308 Larry loves Gentoo (""" + platform.system() + """)
15310 _______________________
15311 < Have you mooed today? >
15312 -----------------------
15322 ext = os.path.splitext(x)[1]
15323 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15324 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15327 root_config = trees[settings["ROOT"]]["root_config"]
15328 if myaction == "list-sets":
15329 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15333 # only expand sets for actions taking package arguments
15334 oldargs = myfiles[:]
15335 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15336 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15337 if retval != os.EX_OK:
15340 # Need to handle empty sets specially, otherwise emerge will react
15341 # with the help message for empty argument lists
15342 if oldargs and not myfiles:
15343 print "emerge: no targets left after set expansion"
15346 if ("--tree" in myopts) and ("--columns" in myopts):
15347 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15350 if ("--quiet" in myopts):
15351 spinner.update = spinner.update_quiet
15352 portage.util.noiselimit = -1
15354 # Always create packages if FEATURES=buildpkg
15355 # Imply --buildpkg if --buildpkgonly
15356 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15357 if "--buildpkg" not in myopts:
15358 myopts["--buildpkg"] = True
15360 # Also allow -S to invoke search action (-sS)
15361 if ("--searchdesc" in myopts):
15362 if myaction and myaction != "search":
15363 myfiles.append(myaction)
15364 if "--search" not in myopts:
15365 myopts["--search"] = True
15366 myaction = "search"
15368 # Always try and fetch binary packages if FEATURES=getbinpkg
15369 if ("getbinpkg" in settings.features):
15370 myopts["--getbinpkg"] = True
15372 if "--buildpkgonly" in myopts:
15373 # --buildpkgonly will not merge anything, so
15374 # it cancels all binary package options.
15375 for opt in ("--getbinpkg", "--getbinpkgonly",
15376 "--usepkg", "--usepkgonly"):
15377 myopts.pop(opt, None)
15379 if "--fetch-all-uri" in myopts:
15380 myopts["--fetchonly"] = True
15382 if "--skipfirst" in myopts and "--resume" not in myopts:
15383 myopts["--resume"] = True
15385 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15386 myopts["--usepkgonly"] = True
15388 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15389 myopts["--getbinpkg"] = True
15391 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15392 myopts["--usepkg"] = True
15394 # Also allow -K to apply --usepkg/-k
15395 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15396 myopts["--usepkg"] = True
15398 # Allow -p to remove --ask
15399 if ("--pretend" in myopts) and ("--ask" in myopts):
15400 print ">>> --pretend disables --ask... removing --ask from options."
15401 del myopts["--ask"]
15403 # forbid --ask when not in a terminal
15404 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15405 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15406 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15410 if settings.get("PORTAGE_DEBUG", "") == "1":
15411 spinner.update = spinner.update_quiet
15413 if "python-trace" in settings.features:
15414 import portage.debug
15415 portage.debug.set_trace(True)
15417 if not ("--quiet" in myopts):
15418 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15419 spinner.update = spinner.update_basic
15421 if "--version" in myopts:
15422 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15423 settings.profile_path, settings["CHOST"],
15424 trees[settings["ROOT"]]["vartree"].dbapi)
15426 elif "--help" in myopts:
15427 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15430 if "--debug" in myopts:
15431 print "myaction", myaction
15432 print "myopts", myopts
15434 if not myaction and not myfiles and "--resume" not in myopts:
15435 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15438 pretend = "--pretend" in myopts
15439 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15440 buildpkgonly = "--buildpkgonly" in myopts
15442 # check if root user is the current user for the actions where emerge needs this
15443 if portage.secpass < 2:
15444 # We've already allowed "--version" and "--help" above.
15445 if "--pretend" not in myopts and myaction not in ("search","info"):
15446 need_superuser = not \
15448 (buildpkgonly and secpass >= 1) or \
15449 myaction in ("metadata", "regen") or \
15450 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15451 if portage.secpass < 1 or \
15454 access_desc = "superuser"
15456 access_desc = "portage group"
15457 # Always show portage_group_warning() when only portage group
15458 # access is required but the user is not in the portage group.
15459 from portage.data import portage_group_warning
15460 if "--ask" in myopts:
15461 myopts["--pretend"] = True
15462 del myopts["--ask"]
15463 print ("%s access is required... " + \
15464 "adding --pretend to options.\n") % access_desc
15465 if portage.secpass < 1 and not need_superuser:
15466 portage_group_warning()
15468 sys.stderr.write(("emerge: %s access is " + \
15469 "required.\n\n") % access_desc)
15470 if portage.secpass < 1 and not need_superuser:
15471 portage_group_warning()
15474 disable_emergelog = False
15475 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15477 disable_emergelog = True
15479 if myaction in ("search", "info"):
15480 disable_emergelog = True
15481 if disable_emergelog:
15482 """ Disable emergelog for everything except build or unmerge
15483 operations. This helps minimize parallel emerge.log entries that can
15484 confuse log parsers. We especially want it disabled during
15485 parallel-fetch, which uses --resume --fetchonly."""
15487 def emergelog(*pargs, **kargs):
15490 if not "--pretend" in myopts:
15491 emergelog(xterm_titles, "Started emerge on: "+\
15492 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15495 myelogstr=" ".join(myopts)
15497 myelogstr+=" "+myaction
15499 myelogstr += " " + " ".join(oldargs)
15500 emergelog(xterm_titles, " *** emerge " + myelogstr)
15503 def emergeexitsig(signum, frame):
15504 signal.signal(signal.SIGINT, signal.SIG_IGN)
15505 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15506 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15507 sys.exit(100+signum)
15508 signal.signal(signal.SIGINT, emergeexitsig)
15509 signal.signal(signal.SIGTERM, emergeexitsig)
15512 """This gets out final log message in before we quit."""
15513 if "--pretend" not in myopts:
15514 emergelog(xterm_titles, " *** terminating.")
15515 if "notitles" not in settings.features:
15517 portage.atexit_register(emergeexit)
15519 if myaction in ("config", "metadata", "regen", "sync"):
15520 if "--pretend" in myopts:
15521 sys.stderr.write(("emerge: The '%s' action does " + \
15522 "not support '--pretend'.\n") % myaction)
15525 if "sync" == myaction:
15526 return action_sync(settings, trees, mtimedb, myopts, myaction)
15527 elif "metadata" == myaction:
15528 action_metadata(settings, portdb, myopts)
15529 elif myaction=="regen":
15530 validate_ebuild_environment(trees)
15531 return action_regen(settings, portdb, myopts.get("--jobs"),
15532 myopts.get("--load-average"))
15534 elif "config"==myaction:
15535 validate_ebuild_environment(trees)
15536 action_config(settings, trees, myopts, myfiles)
15539 elif "search"==myaction:
15540 validate_ebuild_environment(trees)
15541 action_search(trees[settings["ROOT"]]["root_config"],
15542 myopts, myfiles, spinner)
15543 elif myaction in ("clean", "unmerge") or \
15544 (myaction == "prune" and "--nodeps" in myopts):
15545 validate_ebuild_environment(trees)
15547 # Ensure atoms are valid before calling unmerge().
15548 # For backward compat, leading '=' is not required.
15550 if is_valid_package_atom(x) or \
15551 is_valid_package_atom("=" + x):
15554 msg.append("'%s' is not a valid package atom." % (x,))
15555 msg.append("Please check ebuild(5) for full details.")
15556 writemsg_level("".join("!!! %s\n" % line for line in msg),
15557 level=logging.ERROR, noiselevel=-1)
15560 # When given a list of atoms, unmerge
15561 # them in the order given.
15562 ordered = myaction == "unmerge"
15563 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15564 mtimedb["ldpath"], ordered=ordered):
15565 if not (buildpkgonly or fetchonly or pretend):
15566 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15568 elif myaction in ("depclean", "info", "prune"):
15570 # Ensure atoms are valid before calling unmerge().
15571 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15574 if is_valid_package_atom(x):
15576 valid_atoms.append(
15577 portage.dep_expand(x, mydb=vardb, settings=settings))
15578 except portage.exception.AmbiguousPackageName, e:
15579 msg = "The short ebuild name \"" + x + \
15580 "\" is ambiguous. Please specify " + \
15581 "one of the following " + \
15582 "fully-qualified ebuild names instead:"
15583 for line in textwrap.wrap(msg, 70):
15584 writemsg_level("!!! %s\n" % (line,),
15585 level=logging.ERROR, noiselevel=-1)
15587 writemsg_level(" %s\n" % colorize("INFORM", i),
15588 level=logging.ERROR, noiselevel=-1)
15589 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15593 msg.append("'%s' is not a valid package atom." % (x,))
15594 msg.append("Please check ebuild(5) for full details.")
15595 writemsg_level("".join("!!! %s\n" % line for line in msg),
15596 level=logging.ERROR, noiselevel=-1)
15599 if myaction == "info":
15600 return action_info(settings, trees, myopts, valid_atoms)
15602 validate_ebuild_environment(trees)
15603 action_depclean(settings, trees, mtimedb["ldpath"],
15604 myopts, myaction, valid_atoms, spinner)
15605 if not (buildpkgonly or fetchonly or pretend):
15606 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15607 # "update", "system", or just process files:
15609 validate_ebuild_environment(trees)
15610 if "--pretend" not in myopts:
15611 display_news_notification(root_config, myopts)
15612 retval = action_build(settings, trees, mtimedb,
15613 myopts, myaction, myfiles, spinner)
15614 root_config = trees[settings["ROOT"]]["root_config"]
15615 post_emerge(root_config, myopts, mtimedb, retval)