2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time, types
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
59 from UserDict import DictMixin
62 import cPickle as pickle
67 import cStringIO as StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--searchdesc", "--selective",
214 "--usepkg", "--usepkgonly",
215 "--verbose", "--version"
221 "b":"--buildpkg", "B":"--buildpkgonly",
222 "c":"--clean", "C":"--unmerge",
223 "d":"--debug", "D":"--deep",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "k":"--usepkg", "K":"--usepkgonly",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps", "O":"--nodeps",
232 "p":"--pretend", "P":"--prune",
234 "s":"--search", "S":"--searchdesc",
237 "v":"--verbose", "V":"--version"
240 def emergelog(xterm_titles, mystr, short_msg=None):
241 if xterm_titles and short_msg:
242 if "HOSTNAME" in os.environ:
243 short_msg = os.environ["HOSTNAME"]+": "+short_msg
244 xtermTitle(short_msg)
246 file_path = "/var/log/emerge.log"
247 mylogfile = open(file_path, "a")
248 portage.util.apply_secpass_permissions(file_path,
249 uid=portage.portage_uid, gid=portage.portage_gid,
253 mylock = portage.locks.lockfile(mylogfile)
254 # seek because we may have gotten held up by the lock.
255 # if so, we may not be positioned at the end of the file.
257 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
261 portage.locks.unlockfile(mylock)
263 except (IOError,OSError,portage.exception.PortageException), e:
265 print >> sys.stderr, "emergelog():",e
267 def countdown(secs=5, doing="Starting"):
269 print ">>> Waiting",secs,"seconds before starting..."
270 print ">>> (Control-C to abort)...\n"+doing+" in: ",
274 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281 if type(mysize) not in [types.IntType,types.LongType]:
283 if 0 != mysize % 1024:
284 # Always round up to the next kB so that it doesn't show 0 kB when
285 # some small file still needs to be fetched.
286 mysize += 1024 - mysize % 1024
287 mystr=str(mysize/1024)
291 mystr=mystr[:mycount]+","+mystr[mycount:]
295 def getgccversion(chost):
298 return: the current in-use gcc version
301 gcc_ver_command = 'gcc -dumpversion'
302 gcc_ver_prefix = 'gcc-'
304 gcc_not_found_error = red(
305 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306 "!!! to update the environment of this terminal and possibly\n" +
307 "!!! other terminals also.\n"
310 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314 mystatus, myoutput = commands.getstatusoutput(
315 chost + "-" + gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320 if mystatus == os.EX_OK:
321 return gcc_ver_prefix + myoutput
323 portage.writemsg(gcc_not_found_error, noiselevel=-1)
324 return "[unavailable]"
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327 profilever = "unavailable"
329 realpath = os.path.realpath(profile)
330 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
331 if realpath.startswith(basepath):
332 profilever = realpath[1 + len(basepath):]
335 profilever = "!" + os.readlink(profile)
338 del realpath, basepath
341 libclist = vardb.match("virtual/libc")
342 libclist += vardb.match("virtual/glibc")
343 libclist = portage.util.unique_array(libclist)
345 xs=portage.catpkgsplit(x)
347 libcver+=","+"-".join(xs[1:])
349 libcver="-".join(xs[1:])
351 libcver="unavailable"
353 gccver = getgccversion(chost)
354 unameout=platform.release()+" "+platform.machine()
356 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358 def create_depgraph_params(myopts, myaction):
359 #configure emerge engine parameters
361 # self: include _this_ package regardless of if it is merged.
362 # selective: exclude the package if it is merged
363 # recurse: go into the dependencies
364 # deep: go into the dependencies of already merged packages
365 # empty: pretend nothing is merged
366 # complete: completely account for all known dependencies
367 # remove: build graph for use in removing packages
368 myparams = set(["recurse"])
370 if myaction == "remove":
371 myparams.add("remove")
372 myparams.add("complete")
375 if "--update" in myopts or \
376 "--newuse" in myopts or \
377 "--reinstall" in myopts or \
378 "--noreplace" in myopts:
379 myparams.add("selective")
380 if "--emptytree" in myopts:
381 myparams.add("empty")
382 myparams.discard("selective")
383 if "--nodeps" in myopts:
384 myparams.discard("recurse")
385 if "--deep" in myopts:
387 if "--complete-graph" in myopts:
388 myparams.add("complete")
391 # search functionality
392 class search(object):
403 def __init__(self, root_config, spinner, searchdesc,
404 verbose, usepkg, usepkgonly):
405 """Searches the available and installed packages for the supplied search key.
406 The list of available and installed packages is created at object instantiation.
407 This makes successive searches faster."""
408 self.settings = root_config.settings
409 self.vartree = root_config.trees["vartree"]
410 self.spinner = spinner
411 self.verbose = verbose
412 self.searchdesc = searchdesc
413 self.root_config = root_config
414 self.setconfig = root_config.setconfig
415 self.matches = {"pkg" : []}
420 self.portdb = fake_portdb
421 for attrib in ("aux_get", "cp_all",
422 "xmatch", "findname", "getFetchMap"):
423 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
427 portdb = root_config.trees["porttree"].dbapi
428 bindb = root_config.trees["bintree"].dbapi
429 vardb = root_config.trees["vartree"].dbapi
431 if not usepkgonly and portdb._have_root_eclass_dir:
432 self._dbs.append(portdb)
434 if (usepkg or usepkgonly) and bindb.cp_all():
435 self._dbs.append(bindb)
437 self._dbs.append(vardb)
438 self._portdb = portdb
443 cp_all.update(db.cp_all())
444 return list(sorted(cp_all))
446 def _aux_get(self, *args, **kwargs):
449 return db.aux_get(*args, **kwargs)
454 def _findname(self, *args, **kwargs):
456 if db is not self._portdb:
457 # We don't want findname to return anything
458 # unless it's an ebuild in a portage tree.
459 # Otherwise, it's already built and we don't
462 func = getattr(db, "findname", None)
464 value = func(*args, **kwargs)
469 def _getFetchMap(self, *args, **kwargs):
471 func = getattr(db, "getFetchMap", None)
473 value = func(*args, **kwargs)
478 def _visible(self, db, cpv, metadata):
479 installed = db is self.vartree.dbapi
480 built = installed or db is not self._portdb
483 pkg_type = "installed"
486 return visible(self.settings,
487 Package(type_name=pkg_type, root_config=self.root_config,
488 cpv=cpv, built=built, installed=installed, metadata=metadata))
490 def _xmatch(self, level, atom):
492 This method does not expand old-style virtuals because it
493 is restricted to returning matches for a single ${CATEGORY}/${PN}
494 and old-style virual matches unreliable for that when querying
495 multiple package databases. If necessary, old-style virtuals
496 can be performed on atoms prior to calling this method.
498 cp = portage.dep_getkey(atom)
499 if level == "match-all":
502 if hasattr(db, "xmatch"):
503 matches.update(db.xmatch(level, atom))
505 matches.update(db.match(atom))
506 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507 db._cpv_sort_ascending(result)
508 elif level == "match-visible":
511 if hasattr(db, "xmatch"):
512 matches.update(db.xmatch(level, atom))
514 db_keys = list(db._aux_cache_keys)
515 for cpv in db.match(atom):
516 metadata = izip(db_keys,
517 db.aux_get(cpv, db_keys))
518 if not self._visible(db, cpv, metadata):
521 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522 db._cpv_sort_ascending(result)
523 elif level == "bestmatch-visible":
526 if hasattr(db, "xmatch"):
527 cpv = db.xmatch("bestmatch-visible", atom)
528 if not cpv or portage.cpv_getkey(cpv) != cp:
530 if not result or cpv == portage.best([cpv, result]):
533 db_keys = Package.metadata_keys
534 # break out of this loop with highest visible
535 # match, checked in descending order
536 for cpv in reversed(db.match(atom)):
537 if portage.cpv_getkey(cpv) != cp:
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
543 if not result or cpv == portage.best([cpv, result]):
547 raise NotImplementedError(level)
550 def execute(self,searchkey):
551 """Performs the search for the supplied search key"""
553 self.searchkey=searchkey
554 self.packagematches = []
557 self.matches = {"pkg":[], "desc":[], "set":[]}
560 self.matches = {"pkg":[], "set":[]}
561 print "Searching... ",
564 if self.searchkey.startswith('%'):
566 self.searchkey = self.searchkey[1:]
567 if self.searchkey.startswith('@'):
569 self.searchkey = self.searchkey[1:]
571 self.searchre=re.compile(self.searchkey,re.I)
573 self.searchre=re.compile(re.escape(self.searchkey), re.I)
574 for package in self.portdb.cp_all():
575 self.spinner.update()
578 match_string = package[:]
580 match_string = package.split("/")[-1]
583 if self.searchre.search(match_string):
584 if not self.portdb.xmatch("match-visible", package):
586 self.matches["pkg"].append([package,masked])
587 elif self.searchdesc: # DESCRIPTION searching
588 full_package = self.portdb.xmatch("bestmatch-visible", package)
590 #no match found; we don't want to query description
591 full_package = portage.best(
592 self.portdb.xmatch("match-all", package))
598 full_desc = self.portdb.aux_get(
599 full_package, ["DESCRIPTION"])[0]
601 print "emerge: search: aux_get() failed, skipping"
603 if self.searchre.search(full_desc):
604 self.matches["desc"].append([full_package,masked])
606 self.sdict = self.setconfig.getSets()
607 for setname in self.sdict:
608 self.spinner.update()
610 match_string = setname
612 match_string = setname.split("/")[-1]
614 if self.searchre.search(match_string):
615 self.matches["set"].append([setname, False])
616 elif self.searchdesc:
617 if self.searchre.search(
618 self.sdict[setname].getMetadata("DESCRIPTION")):
619 self.matches["set"].append([setname, False])
622 for mtype in self.matches:
623 self.matches[mtype].sort()
624 self.mlen += len(self.matches[mtype])
627 if not self.portdb.xmatch("match-all", cp):
630 if not self.portdb.xmatch("bestmatch-visible", cp):
632 self.matches["pkg"].append([cp, masked])
636 """Outputs the results of the search."""
637 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
638 print "[ Applications found : "+white(str(self.mlen))+" ]"
640 vardb = self.vartree.dbapi
641 for mtype in self.matches:
642 for match,masked in self.matches[mtype]:
646 full_package = self.portdb.xmatch(
647 "bestmatch-visible", match)
649 #no match found; we don't want to query description
651 full_package = portage.best(
652 self.portdb.xmatch("match-all",match))
653 elif mtype == "desc":
655 match = portage.cpv_getkey(match)
657 print green("*")+" "+white(match)
658 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
662 desc, homepage, license = self.portdb.aux_get(
663 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665 print "emerge: search: aux_get() failed, skipping"
668 print green("*")+" "+white(match)+" "+red("[ Masked ]")
670 print green("*")+" "+white(match)
671 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
675 mycat = match.split("/")[0]
676 mypkg = match.split("/")[1]
677 mycpv = match + "-" + myversion
678 myebuild = self.portdb.findname(mycpv)
680 pkgdir = os.path.dirname(myebuild)
681 from portage import manifest
682 mf = manifest.Manifest(
683 pkgdir, self.settings["DISTDIR"])
685 uri_map = self.portdb.getFetchMap(mycpv)
686 except portage.exception.InvalidDependString, e:
687 file_size_str = "Unknown (%s)" % (e,)
691 mysum[0] = mf.getDistfilesSize(uri_map)
693 file_size_str = "Unknown (missing " + \
694 "digest for %s)" % (e,)
699 if db is not vardb and \
700 db.cpv_exists(mycpv):
702 if not myebuild and hasattr(db, "bintree"):
703 myebuild = db.bintree.getname(mycpv)
705 mysum[0] = os.stat(myebuild).st_size
710 if myebuild and file_size_str is None:
711 mystr = str(mysum[0] / 1024)
715 mystr = mystr[:mycount] + "," + mystr[mycount:]
716 file_size_str = mystr + " kB"
720 print " ", darkgreen("Latest version available:"),myversion
721 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
724 (darkgreen("Size of files:"), file_size_str)
725 print " ", darkgreen("Homepage:")+" ",homepage
726 print " ", darkgreen("Description:")+" ",desc
727 print " ", darkgreen("License:")+" ",license
732 def getInstallationStatus(self,package):
733 installed_package = self.vartree.dep_bestmatch(package)
735 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737 result = darkgreen("Latest version installed:")+" "+version
739 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
742 def getVersion(self,full_package,detail):
743 if len(full_package) > 1:
744 package_parts = portage.catpkgsplit(full_package)
745 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746 result = package_parts[2]+ "-" + package_parts[3]
748 result = package_parts[2]
753 class RootConfig(object):
754 """This is used internally by depgraph to track information about a
758 "ebuild" : "porttree",
759 "binary" : "bintree",
760 "installed" : "vartree"
764 for k, v in pkg_tree_map.iteritems():
767 def __init__(self, settings, trees, setconfig):
769 self.settings = settings
770 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771 self.root = self.settings["ROOT"]
772 self.setconfig = setconfig
773 self.sets = self.setconfig.getSets()
774 self.visible_pkgs = PackageVirtualDbapi(self.settings)
776 def create_world_atom(pkg, args_set, root_config):
777 """Create a new atom for the world file if one does not exist. If the
778 argument atom is precise enough to identify a specific slot then a slot
779 atom will be returned. Atoms that are in the system set may also be stored
780 in world since system atoms can only match one slot while world atoms can
781 be greedy with respect to slots. Unslotted system packages will not be
784 arg_atom = args_set.findAtomForPackage(pkg)
787 cp = portage.dep_getkey(arg_atom)
789 sets = root_config.sets
790 portdb = root_config.trees["porttree"].dbapi
791 vardb = root_config.trees["vartree"].dbapi
792 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793 for cpv in portdb.match(cp))
794 slotted = len(available_slots) > 1 or \
795 (len(available_slots) == 1 and "0" not in available_slots)
797 # check the vdb in case this is multislot
798 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799 for cpv in vardb.match(cp))
800 slotted = len(available_slots) > 1 or \
801 (len(available_slots) == 1 and "0" not in available_slots)
802 if slotted and arg_atom != cp:
803 # If the user gave a specific atom, store it as a
804 # slot atom in the world file.
805 slot_atom = pkg.slot_atom
807 # For USE=multislot, there are a couple of cases to
810 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811 # unknown value, so just record an unslotted atom.
813 # 2) SLOT comes from an installed package and there is no
814 # matching SLOT in the portage tree.
816 # Make sure that the slot atom is available in either the
817 # portdb or the vardb, since otherwise the user certainly
818 # doesn't want the SLOT atom recorded in the world file
819 # (case 1 above). If it's only available in the vardb,
820 # the user may be trying to prevent a USE=multislot
821 # package from being removed by --depclean (case 2 above).
824 if not portdb.match(slot_atom):
825 # SLOT seems to come from an installed multislot package
827 # If there is no installed package matching the SLOT atom,
828 # it probably changed SLOT spontaneously due to USE=multislot,
829 # so just record an unslotted atom.
830 if vardb.match(slot_atom):
831 # Now verify that the argument is precise
832 # enough to identify a specific slot.
833 matches = mydb.match(arg_atom)
834 matched_slots = set()
836 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837 if len(matched_slots) == 1:
838 new_world_atom = slot_atom
840 if new_world_atom == sets["world"].findAtomForPackage(pkg):
841 # Both atoms would be identical, so there's nothing to add.
844 # Unlike world atoms, system atoms are not greedy for slots, so they
845 # can't be safely excluded from world if they are slotted.
846 system_atom = sets["system"].findAtomForPackage(pkg)
848 if not portage.dep_getkey(system_atom).startswith("virtual/"):
850 # System virtuals aren't safe to exclude from world since they can
851 # match multiple old-style virtuals but only one of them will be
852 # pulled in by update or depclean.
853 providers = portdb.mysettings.getvirtuals().get(
854 portage.dep_getkey(system_atom))
855 if providers and len(providers) == 1 and providers[0] == cp:
857 return new_world_atom
859 def filter_iuse_defaults(iuse):
861 if flag.startswith("+") or flag.startswith("-"):
866 class SlotObject(object):
867 __slots__ = ("__weakref__",)
869 def __init__(self, **kwargs):
870 classes = [self.__class__]
875 classes.extend(c.__bases__)
876 slots = getattr(c, "__slots__", None)
880 myvalue = kwargs.get(myattr, None)
881 setattr(self, myattr, myvalue)
885 Create a new instance and copy all attributes
886 defined from __slots__ (including those from
889 obj = self.__class__()
891 classes = [self.__class__]
896 classes.extend(c.__bases__)
897 slots = getattr(c, "__slots__", None)
901 setattr(obj, myattr, getattr(self, myattr))
905 class AbstractDepPriority(SlotObject):
906 __slots__ = ("buildtime", "runtime", "runtime_post")
908 def __lt__(self, other):
909 return self.__int__() < other
911 def __le__(self, other):
912 return self.__int__() <= other
914 def __eq__(self, other):
915 return self.__int__() == other
917 def __ne__(self, other):
918 return self.__int__() != other
920 def __gt__(self, other):
921 return self.__int__() > other
923 def __ge__(self, other):
924 return self.__int__() >= other
928 return copy.copy(self)
930 class DepPriority(AbstractDepPriority):
932 This class generates an integer priority level based of various
933 attributes of the dependency relationship. Attributes can be assigned
934 at any time and the new integer value will be generated on calls to the
935 __int__() method. Rich comparison operators are supported.
937 The boolean attributes that affect the integer value are "satisfied",
938 "buildtime", "runtime", and "system". Various combinations of
939 attributes lead to the following priority levels:
941 Combination of properties Priority Category
943 not satisfied and buildtime 0 HARD
944 not satisfied and runtime -1 MEDIUM
945 not satisfied and runtime_post -2 MEDIUM_SOFT
946 satisfied and buildtime and rebuild -3 SOFT
947 satisfied and buildtime -4 SOFT
948 satisfied and runtime -5 SOFT
949 satisfied and runtime_post -6 SOFT
950 (none of the above) -6 SOFT
952 Several integer constants are defined for categorization of priority
955 MEDIUM The upper boundary for medium dependencies.
956 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
957 SOFT The upper boundary for soft dependencies.
958 MIN The lower boundary for soft dependencies.
960 __slots__ = ("satisfied", "rebuild")
967 if not self.satisfied:
972 if self.runtime_post:
980 if self.runtime_post:
985 myvalue = self.__int__()
986 if myvalue > self.MEDIUM:
988 if myvalue > self.MEDIUM_SOFT:
990 if myvalue > self.SOFT:
994 class BlockerDepPriority(DepPriority):
999 BlockerDepPriority.instance = BlockerDepPriority()
1001 class UnmergeDepPriority(AbstractDepPriority):
1002 __slots__ = ("satisfied",)
1004 Combination of properties Priority Category
1007 runtime_post -1 HARD
1009 (none of the above) -2 SOFT
1019 if self.runtime_post:
1026 myvalue = self.__int__()
1027 if myvalue > self.SOFT:
1031 class FakeVartree(portage.vartree):
1032 """This is implements an in-memory copy of a vartree instance that provides
1033 all the interfaces required for use by the depgraph. The vardb is locked
1034 during the constructor call just long enough to read a copy of the
1035 installed package information. This allows the depgraph to do it's
1036 dependency calculations without holding a lock on the vardb. It also
1037 allows things like vardb global updates to be done in memory so that the
1038 user doesn't necessarily need write access to the vardb in cases where
1039 global updates are necessary (updates are performed when necessary if there
1040 is not a matching ebuild in the tree)."""
1041 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1042 self._root_config = root_config
1043 if pkg_cache is None:
1045 real_vartree = root_config.trees["vartree"]
1046 portdb = root_config.trees["porttree"].dbapi
1047 self.root = real_vartree.root
1048 self.settings = real_vartree.settings
1049 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1050 if "_mtime_" not in mykeys:
1051 mykeys.append("_mtime_")
1052 self._db_keys = mykeys
1053 self._pkg_cache = pkg_cache
1054 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1055 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1057 # At least the parent needs to exist for the lock file.
1058 portage.util.ensure_dirs(vdb_path)
1059 except portage.exception.PortageException:
1063 if acquire_lock and os.access(vdb_path, os.W_OK):
1064 vdb_lock = portage.locks.lockdir(vdb_path)
1065 real_dbapi = real_vartree.dbapi
1067 for cpv in real_dbapi.cpv_all():
1068 cache_key = ("installed", self.root, cpv, "nomerge")
1069 pkg = self._pkg_cache.get(cache_key)
1071 metadata = pkg.metadata
1073 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1074 myslot = metadata["SLOT"]
1075 mycp = portage.dep_getkey(cpv)
1076 myslot_atom = "%s:%s" % (mycp, myslot)
1078 mycounter = long(metadata["COUNTER"])
1081 metadata["COUNTER"] = str(mycounter)
1082 other_counter = slot_counters.get(myslot_atom, None)
1083 if other_counter is not None:
1084 if other_counter > mycounter:
1086 slot_counters[myslot_atom] = mycounter
1088 pkg = Package(built=True, cpv=cpv,
1089 installed=True, metadata=metadata,
1090 root_config=root_config, type_name="installed")
1091 self._pkg_cache[pkg] = pkg
1092 self.dbapi.cpv_inject(pkg)
1093 real_dbapi.flush_cache()
1096 portage.locks.unlockdir(vdb_lock)
1097 # Populate the old-style virtuals using the cached values.
1098 if not self.settings.treeVirtuals:
1099 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1100 portage.getCPFromCPV, self.get_all_provides())
1102 # Intialize variables needed for lazy cache pulls of the live ebuild
1103 # metadata. This ensures that the vardb lock is released ASAP, without
1104 # being delayed in case cache generation is triggered.
1105 self._aux_get = self.dbapi.aux_get
1106 self.dbapi.aux_get = self._aux_get_wrapper
1107 self._match = self.dbapi.match
1108 self.dbapi.match = self._match_wrapper
1109 self._aux_get_history = set()
1110 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1111 self._portdb = portdb
1112 self._global_updates = None
1114 def _match_wrapper(self, cpv, use_cache=1):
1116 Make sure the metadata in Package instances gets updated for any
1117 cpv that is returned from a match() call, since the metadata can
1118 be accessed directly from the Package instance instead of via
1121 matches = self._match(cpv, use_cache=use_cache)
1123 if cpv in self._aux_get_history:
1125 self._aux_get_wrapper(cpv, [])
1128 def _aux_get_wrapper(self, pkg, wants):
1129 if pkg in self._aux_get_history:
1130 return self._aux_get(pkg, wants)
1131 self._aux_get_history.add(pkg)
1133 # Use the live ebuild metadata if possible.
1134 live_metadata = dict(izip(self._portdb_keys,
1135 self._portdb.aux_get(pkg, self._portdb_keys)))
1136 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1138 self.dbapi.aux_update(pkg, live_metadata)
1139 except (KeyError, portage.exception.PortageException):
1140 if self._global_updates is None:
1141 self._global_updates = \
1142 grab_global_updates(self._portdb.porttree_root)
1143 perform_global_updates(
1144 pkg, self.dbapi, self._global_updates)
1145 return self._aux_get(pkg, wants)
1147 def sync(self, acquire_lock=1):
1149 Call this method to synchronize state with the real vardb
1150 after one or more packages may have been installed or
1153 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1155 # At least the parent needs to exist for the lock file.
1156 portage.util.ensure_dirs(vdb_path)
1157 except portage.exception.PortageException:
1161 if acquire_lock and os.access(vdb_path, os.W_OK):
1162 vdb_lock = portage.locks.lockdir(vdb_path)
1166 portage.locks.unlockdir(vdb_lock)
1170 real_vardb = self._root_config.trees["vartree"].dbapi
1171 current_cpv_set = frozenset(real_vardb.cpv_all())
1172 pkg_vardb = self.dbapi
1173 aux_get_history = self._aux_get_history
1175 # Remove any packages that have been uninstalled.
1176 for pkg in list(pkg_vardb):
1177 if pkg.cpv not in current_cpv_set:
1178 pkg_vardb.cpv_remove(pkg)
1179 aux_get_history.discard(pkg.cpv)
1181 # Validate counters and timestamps.
1184 validation_keys = ["COUNTER", "_mtime_"]
1185 for cpv in current_cpv_set:
1187 pkg_hash_key = ("installed", root, cpv, "nomerge")
1188 pkg = pkg_vardb.get(pkg_hash_key)
1190 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1192 counter = long(counter)
1196 if counter != pkg.counter or \
1198 pkg_vardb.cpv_remove(pkg)
1199 aux_get_history.discard(pkg.cpv)
1203 pkg = self._pkg(cpv)
1205 other_counter = slot_counters.get(pkg.slot_atom)
1206 if other_counter is not None:
1207 if other_counter > pkg.counter:
1210 slot_counters[pkg.slot_atom] = pkg.counter
1211 pkg_vardb.cpv_inject(pkg)
1213 real_vardb.flush_cache()
1215 def _pkg(self, cpv):
1216 root_config = self._root_config
1217 real_vardb = root_config.trees["vartree"].dbapi
1218 pkg = Package(cpv=cpv, installed=True,
1219 metadata=izip(self._db_keys,
1220 real_vardb.aux_get(cpv, self._db_keys)),
1221 root_config=root_config,
1222 type_name="installed")
1225 mycounter = long(pkg.metadata["COUNTER"])
1228 pkg.metadata["COUNTER"] = str(mycounter)
1232 def grab_global_updates(portdir):
1233 from portage.update import grab_updates, parse_updates
1234 updpath = os.path.join(portdir, "profiles", "updates")
1236 rawupdates = grab_updates(updpath)
1237 except portage.exception.DirectoryNotFound:
1240 for mykey, mystat, mycontent in rawupdates:
1241 commands, errors = parse_updates(mycontent)
1242 upd_commands.extend(commands)
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246 from portage.update import update_dbentries
1247 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249 updates = update_dbentries(mycommands, aux_dict)
1251 mydb.aux_update(mycpv, updates)
1253 def visible(pkgsettings, pkg):
1255 Check if a package is visible. This can raise an InvalidDependString
1256 exception if LICENSE is invalid.
1257 TODO: optionally generate a list of masking reasons
1259 @returns: True if the package is visible, False otherwise.
1261 if not pkg.metadata["SLOT"]:
1263 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264 if not pkgsettings._accept_chost(pkg):
1266 eapi = pkg.metadata["EAPI"]
1267 if not portage.eapi_is_supported(eapi):
1269 if not pkg.installed:
1270 if portage._eapi_is_deprecated(eapi):
1272 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1274 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1276 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1279 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1281 except portage.exception.InvalidDependString:
1285 def get_masking_status(pkg, pkgsettings, root_config):
1287 mreasons = portage.getmaskingstatus(
1288 pkg, settings=pkgsettings,
1289 portdb=root_config.trees["porttree"].dbapi)
1291 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292 if not pkgsettings._accept_chost(pkg):
1293 mreasons.append("CHOST: %s" % \
1294 pkg.metadata["CHOST"])
1296 if not pkg.metadata["SLOT"]:
1297 mreasons.append("invalid: SLOT is undefined")
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302 db, pkg_type, built, installed, db_keys):
1305 metadata = dict(izip(db_keys,
1306 db.aux_get(cpv, db_keys)))
1309 if metadata and not built:
1310 pkgsettings.setcpv(cpv, mydb=metadata)
1311 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312 if metadata is None:
1313 mreasons = ["corruption"]
1315 pkg = Package(type_name=pkg_type, root_config=root_config,
1316 cpv=cpv, built=built, installed=installed, metadata=metadata)
1317 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318 return metadata, mreasons
1320 def show_masked_packages(masked_packages):
1321 shown_licenses = set()
1322 shown_comments = set()
1323 # Maybe there is both an ebuild and a binary. Only
1324 # show one of them to avoid redundant appearance.
1326 have_eapi_mask = False
1327 for (root_config, pkgsettings, cpv,
1328 metadata, mreasons) in masked_packages:
1329 if cpv in shown_cpvs:
1332 comment, filename = None, None
1333 if "package.mask" in mreasons:
1334 comment, filename = \
1335 portage.getmaskingreason(
1336 cpv, metadata=metadata,
1337 settings=pkgsettings,
1338 portdb=root_config.trees["porttree"].dbapi,
1339 return_location=True)
1340 missing_licenses = []
1342 if not portage.eapi_is_supported(metadata["EAPI"]):
1343 have_eapi_mask = True
1345 missing_licenses = \
1346 pkgsettings._getMissingLicenses(
1348 except portage.exception.InvalidDependString:
1349 # This will have already been reported
1350 # above via mreasons.
1353 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354 if comment and comment not in shown_comments:
1357 shown_comments.add(comment)
1358 portdb = root_config.trees["porttree"].dbapi
1359 for l in missing_licenses:
1360 l_path = portdb.findLicensePath(l)
1361 if l in shown_licenses:
1363 msg = ("A copy of the '%s' license" + \
1364 " is located at '%s'.") % (l, l_path)
1367 shown_licenses.add(l)
1368 return have_eapi_mask
1370 class Task(SlotObject):
1371 __slots__ = ("_hash_key", "_hash_value")
1373 def _get_hash_key(self):
1374 hash_key = getattr(self, "_hash_key", None)
1375 if hash_key is None:
1376 raise NotImplementedError(self)
1379 def __eq__(self, other):
1380 return self._get_hash_key() == other
1382 def __ne__(self, other):
1383 return self._get_hash_key() != other
1386 hash_value = getattr(self, "_hash_value", None)
1387 if hash_value is None:
1388 self._hash_value = hash(self._get_hash_key())
1389 return self._hash_value
1392 return len(self._get_hash_key())
1394 def __getitem__(self, key):
1395 return self._get_hash_key()[key]
1398 return iter(self._get_hash_key())
1400 def __contains__(self, key):
1401 return key in self._get_hash_key()
1404 return str(self._get_hash_key())
1406 class Blocker(Task):
1408 __hash__ = Task.__hash__
1409 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1411 def __init__(self, **kwargs):
1412 Task.__init__(self, **kwargs)
1413 self.cp = portage.dep_getkey(self.atom)
1415 def _get_hash_key(self):
1416 hash_key = getattr(self, "_hash_key", None)
1417 if hash_key is None:
1419 ("blocks", self.root, self.atom, self.eapi)
1420 return self._hash_key
1422 class Package(Task):
1424 __hash__ = Task.__hash__
1425 __slots__ = ("built", "cpv", "depth",
1426 "installed", "metadata", "onlydeps", "operation",
1427 "root_config", "type_name",
1428 "category", "counter", "cp", "cpv_split",
1429 "inherited", "iuse", "mtime",
1430 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1433 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434 "INHERITED", "IUSE", "KEYWORDS",
1435 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1438 def __init__(self, **kwargs):
1439 Task.__init__(self, **kwargs)
1440 self.root = self.root_config.root
1441 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442 self.cp = portage.cpv_getkey(self.cpv)
1443 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444 self.category, self.pf = portage.catsplit(self.cpv)
1445 self.cpv_split = portage.catpkgsplit(self.cpv)
1446 self.pv_split = self.cpv_split[1:]
1450 __slots__ = ("__weakref__", "enabled")
1452 def __init__(self, use):
1453 self.enabled = frozenset(use)
1455 class _iuse(object):
1457 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1459 def __init__(self, tokens, iuse_implicit):
1460 self.tokens = tuple(tokens)
1461 self.iuse_implicit = iuse_implicit
1468 enabled.append(x[1:])
1470 disabled.append(x[1:])
1473 self.enabled = frozenset(enabled)
1474 self.disabled = frozenset(disabled)
1475 self.all = frozenset(chain(enabled, disabled, other))
1477 def __getattribute__(self, name):
1480 return object.__getattribute__(self, "regex")
1481 except AttributeError:
1482 all = object.__getattribute__(self, "all")
1483 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484 # Escape anything except ".*" which is supposed
1485 # to pass through from _get_implicit_iuse()
1486 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487 regex = "^(%s)$" % "|".join(regex)
1488 regex = regex.replace("\\.\\*", ".*")
1489 self.regex = re.compile(regex)
1490 return object.__getattribute__(self, name)
1492 def _get_hash_key(self):
1493 hash_key = getattr(self, "_hash_key", None)
1494 if hash_key is None:
1495 if self.operation is None:
1496 self.operation = "merge"
1497 if self.onlydeps or self.installed:
1498 self.operation = "nomerge"
1500 (self.type_name, self.root, self.cpv, self.operation)
1501 return self._hash_key
1503 def __lt__(self, other):
1504 if other.cp != self.cp:
1506 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1510 def __le__(self, other):
1511 if other.cp != self.cp:
1513 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1517 def __gt__(self, other):
1518 if other.cp != self.cp:
1520 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1524 def __ge__(self, other):
1525 if other.cp != self.cp:
1527 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1531 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1532 if not x.startswith("UNUSED_"))
1533 _all_metadata_keys.discard("CDEPEND")
1534 _all_metadata_keys.update(Package.metadata_keys)
1536 from portage.cache.mappings import slot_dict_class
1537 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1539 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1541 Detect metadata updates and synchronize Package attributes.
1544 __slots__ = ("_pkg",)
1545 _wrapped_keys = frozenset(
1546 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1548 def __init__(self, pkg, metadata):
1549 _PackageMetadataWrapperBase.__init__(self)
1551 self.update(metadata)
1553 def __setitem__(self, k, v):
1554 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1555 if k in self._wrapped_keys:
1556 getattr(self, "_set_" + k.lower())(k, v)
1558 def _set_inherited(self, k, v):
1559 if isinstance(v, basestring):
1560 v = frozenset(v.split())
1561 self._pkg.inherited = v
1563 def _set_iuse(self, k, v):
1564 self._pkg.iuse = self._pkg._iuse(
1565 v.split(), self._pkg.root_config.iuse_implicit)
1567 def _set_slot(self, k, v):
1570 def _set_use(self, k, v):
1571 self._pkg.use = self._pkg._use(v.split())
1573 def _set_counter(self, k, v):
1574 if isinstance(v, basestring):
1579 self._pkg.counter = v
1581 def _set__mtime_(self, k, v):
1582 if isinstance(v, basestring):
1589 class EbuildFetchonly(SlotObject):
1591 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1594 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1595 # ensuring sane $PWD (bug #239560) and storing elog
1596 # messages. Use a private temp directory, in order
1597 # to avoid locking the main one.
1598 settings = self.settings
1599 global_tmpdir = settings["PORTAGE_TMPDIR"]
1600 from tempfile import mkdtemp
1602 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1604 if e.errno != portage.exception.PermissionDenied.errno:
1606 raise portage.exception.PermissionDenied(global_tmpdir)
1607 settings["PORTAGE_TMPDIR"] = private_tmpdir
1608 settings.backup_changes("PORTAGE_TMPDIR")
1610 retval = self._execute()
1612 settings["PORTAGE_TMPDIR"] = global_tmpdir
1613 settings.backup_changes("PORTAGE_TMPDIR")
1614 shutil.rmtree(private_tmpdir)
1618 settings = self.settings
1620 root_config = pkg.root_config
1621 portdb = root_config.trees["porttree"].dbapi
1622 ebuild_path = portdb.findname(pkg.cpv)
1623 settings.setcpv(pkg)
1624 debug = settings.get("PORTAGE_DEBUG") == "1"
1625 use_cache = 1 # always true
1626 portage.doebuild_environment(ebuild_path, "fetch",
1627 root_config.root, settings, debug, use_cache, portdb)
1628 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1630 retval = portage.doebuild(ebuild_path, "fetch",
1631 self.settings["ROOT"], self.settings, debug=debug,
1632 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1633 mydbapi=portdb, tree="porttree")
1635 if retval != os.EX_OK:
1636 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1637 eerror(msg, phase="unpack", key=pkg.cpv)
1639 portage.elog.elog_process(self.pkg.cpv, self.settings)
1642 class PollConstants(object):
1645 Provides POLL* constants that are equivalent to those from the
1646 select module, for use by PollSelectAdapter.
1649 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1652 locals()[k] = getattr(select, k, v)
1656 class AsynchronousTask(SlotObject):
1658 Subclasses override _wait() and _poll() so that calls
1659 to public methods can be wrapped for implementing
1660 hooks such as exit listener notification.
1662 Sublasses should call self.wait() to notify exit listeners after
1663 the task is complete and self.returncode has been set.
1666 __slots__ = ("background", "cancelled", "returncode") + \
1667 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1671 Start an asynchronous task and then return as soon as possible.
1677 raise NotImplementedError(self)
1680 return self.returncode is None
1687 return self.returncode
1690 if self.returncode is None:
1693 return self.returncode
1696 return self.returncode
1699 self.cancelled = True
1702 def addStartListener(self, f):
1704 The function will be called with one argument, a reference to self.
1706 if self._start_listeners is None:
1707 self._start_listeners = []
1708 self._start_listeners.append(f)
1710 def removeStartListener(self, f):
1711 if self._start_listeners is None:
1713 self._start_listeners.remove(f)
1715 def _start_hook(self):
1716 if self._start_listeners is not None:
1717 start_listeners = self._start_listeners
1718 self._start_listeners = None
1720 for f in start_listeners:
1723 def addExitListener(self, f):
1725 The function will be called with one argument, a reference to self.
1727 if self._exit_listeners is None:
1728 self._exit_listeners = []
1729 self._exit_listeners.append(f)
1731 def removeExitListener(self, f):
1732 if self._exit_listeners is None:
1733 if self._exit_listener_stack is not None:
1734 self._exit_listener_stack.remove(f)
1736 self._exit_listeners.remove(f)
1738 def _wait_hook(self):
1740 Call this method after the task completes, just before returning
1741 the returncode from wait() or poll(). This hook is
1742 used to trigger exit listeners when the returncode first
1745 if self.returncode is not None and \
1746 self._exit_listeners is not None:
1748 # This prevents recursion, in case one of the
1749 # exit handlers triggers this method again by
1750 # calling wait(). Use a stack that gives
1751 # removeExitListener() an opportunity to consume
1752 # listeners from the stack, before they can get
1753 # called below. This is necessary because a call
1754 # to one exit listener may result in a call to
1755 # removeExitListener() for another listener on
1756 # the stack. That listener needs to be removed
1757 # from the stack since it would be inconsistent
1758 # to call it after it has been been passed into
1759 # removeExitListener().
1760 self._exit_listener_stack = self._exit_listeners
1761 self._exit_listeners = None
1763 self._exit_listener_stack.reverse()
1764 while self._exit_listener_stack:
1765 self._exit_listener_stack.pop()(self)
1767 class AbstractPollTask(AsynchronousTask):
1769 __slots__ = ("scheduler",) + \
1773 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1774 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1777 def _unregister(self):
1778 raise NotImplementedError(self)
1780 def _unregister_if_appropriate(self, event):
1781 if self._registered:
1782 if event & self._exceptional_events:
1785 elif event & PollConstants.POLLHUP:
1789 class PipeReader(AbstractPollTask):
1792 Reads output from one or more files and saves it in memory,
1793 for retrieval via the getvalue() method. This is driven by
1794 the scheduler's poll() loop, so it runs entirely within the
1798 __slots__ = ("input_files",) + \
1799 ("_read_data", "_reg_ids")
1802 self._reg_ids = set()
1803 self._read_data = []
1804 for k, f in self.input_files.iteritems():
1805 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1806 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1807 self._reg_ids.add(self.scheduler.register(f.fileno(),
1808 self._registered_events, self._output_handler))
1809 self._registered = True
1812 return self._registered
1815 if self.returncode is None:
1817 self.cancelled = True
1821 if self.returncode is not None:
1822 return self.returncode
1824 if self._registered:
1825 self.scheduler.schedule(self._reg_ids)
1828 self.returncode = os.EX_OK
1829 return self.returncode
1832 """Retrieve the entire contents"""
1833 return "".join(self._read_data)
1836 """Free the memory buffer."""
1837 self._read_data = None
1839 def _output_handler(self, fd, event):
1841 if event & PollConstants.POLLIN:
1843 for f in self.input_files.itervalues():
1844 if fd == f.fileno():
1847 buf = array.array('B')
1849 buf.fromfile(f, self._bufsize)
1854 self._read_data.append(buf.tostring())
1859 self._unregister_if_appropriate(event)
1860 return self._registered
1862 def _unregister(self):
1864 Unregister from the scheduler and close open files.
1867 self._registered = False
1869 if self._reg_ids is not None:
1870 for reg_id in self._reg_ids:
1871 self.scheduler.unregister(reg_id)
1872 self._reg_ids = None
1874 if self.input_files is not None:
1875 for f in self.input_files.itervalues():
1877 self.input_files = None
1879 class CompositeTask(AsynchronousTask):
1881 __slots__ = ("scheduler",) + ("_current_task",)
1884 return self._current_task is not None
1887 self.cancelled = True
1888 if self._current_task is not None:
1889 self._current_task.cancel()
1893 This does a loop calling self._current_task.poll()
1894 repeatedly as long as the value of self._current_task
1895 keeps changing. It calls poll() a maximum of one time
1896 for a given self._current_task instance. This is useful
1897 since calling poll() on a task can trigger advance to
1898 the next task could eventually lead to the returncode
1899 being set in cases when polling only a single task would
1900 not have the same effect.
1905 task = self._current_task
1906 if task is None or task is prev:
1907 # don't poll the same task more than once
1912 return self.returncode
1918 task = self._current_task
1920 # don't wait for the same task more than once
1923 # Before the task.wait() method returned, an exit
1924 # listener should have set self._current_task to either
1925 # a different task or None. Something is wrong.
1926 raise AssertionError("self._current_task has not " + \
1927 "changed since calling wait", self, task)
1931 return self.returncode
1933 def _assert_current(self, task):
1935 Raises an AssertionError if the given task is not the
1936 same one as self._current_task. This can be useful
1939 if task is not self._current_task:
1940 raise AssertionError("Unrecognized task: %s" % (task,))
1942 def _default_exit(self, task):
1944 Calls _assert_current() on the given task and then sets the
1945 composite returncode attribute if task.returncode != os.EX_OK.
1946 If the task failed then self._current_task will be set to None.
1947 Subclasses can use this as a generic task exit callback.
1950 @returns: The task.returncode attribute.
1952 self._assert_current(task)
1953 if task.returncode != os.EX_OK:
1954 self.returncode = task.returncode
1955 self._current_task = None
1956 return task.returncode
1958 def _final_exit(self, task):
1960 Assumes that task is the final task of this composite task.
1961 Calls _default_exit() and sets self.returncode to the task's
1962 returncode and sets self._current_task to None.
1964 self._default_exit(task)
1965 self._current_task = None
1966 self.returncode = task.returncode
1967 return self.returncode
1969 def _default_final_exit(self, task):
1971 This calls _final_exit() and then wait().
1973 Subclasses can use this as a generic final task exit callback.
1976 self._final_exit(task)
1979 def _start_task(self, task, exit_handler):
1981 Register exit handler for the given task, set it
1982 as self._current_task, and call task.start().
1984 Subclasses can use this as a generic way to start
1988 task.addExitListener(exit_handler)
1989 self._current_task = task
1992 class TaskSequence(CompositeTask):
1994 A collection of tasks that executes sequentially. Each task
1995 must have a addExitListener() method that can be used as
1996 a means to trigger movement from one task to the next.
1999 __slots__ = ("_task_queue",)
2001 def __init__(self, **kwargs):
2002 AsynchronousTask.__init__(self, **kwargs)
2003 self._task_queue = deque()
2005 def add(self, task):
2006 self._task_queue.append(task)
2009 self._start_next_task()
2012 self._task_queue.clear()
2013 CompositeTask.cancel(self)
2015 def _start_next_task(self):
2016 self._start_task(self._task_queue.popleft(),
2017 self._task_exit_handler)
2019 def _task_exit_handler(self, task):
2020 if self._default_exit(task) != os.EX_OK:
2022 elif self._task_queue:
2023 self._start_next_task()
2025 self._final_exit(task)
2028 class SubProcess(AbstractPollTask):
2030 __slots__ = ("pid",) + \
2031 ("_files", "_reg_id")
2033 # A file descriptor is required for the scheduler to monitor changes from
2034 # inside a poll() loop. When logging is not enabled, create a pipe just to
2035 # serve this purpose alone.
2039 if self.returncode is not None:
2040 return self.returncode
2041 if self.pid is None:
2042 return self.returncode
2043 if self._registered:
2044 return self.returncode
2047 retval = os.waitpid(self.pid, os.WNOHANG)
2049 if e.errno != errno.ECHILD:
2052 retval = (self.pid, 1)
2054 if retval == (0, 0):
2056 self._set_returncode(retval)
2057 return self.returncode
2062 os.kill(self.pid, signal.SIGTERM)
2064 if e.errno != errno.ESRCH:
2068 self.cancelled = True
2069 if self.pid is not None:
2071 return self.returncode
2074 return self.pid is not None and \
2075 self.returncode is None
2079 if self.returncode is not None:
2080 return self.returncode
2082 if self._registered:
2083 self.scheduler.schedule(self._reg_id)
2085 if self.returncode is not None:
2086 return self.returncode
2089 wait_retval = os.waitpid(self.pid, 0)
2091 if e.errno != errno.ECHILD:
2094 self._set_returncode((self.pid, 1))
2096 self._set_returncode(wait_retval)
2098 return self.returncode
2100 def _unregister(self):
2102 Unregister from the scheduler and close open files.
2105 self._registered = False
2107 if self._reg_id is not None:
2108 self.scheduler.unregister(self._reg_id)
2111 if self._files is not None:
2112 for f in self._files.itervalues():
2116 def _set_returncode(self, wait_retval):
2118 retval = wait_retval[1]
2120 if retval != os.EX_OK:
2122 retval = (retval & 0xff) << 8
2124 retval = retval >> 8
2126 self.returncode = retval
2128 class SpawnProcess(SubProcess):
2131 Constructor keyword args are passed into portage.process.spawn().
2132 The required "args" keyword argument will be passed as the first
2136 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2137 "uid", "gid", "groups", "umask", "logfile",
2138 "path_lookup", "pre_exec")
2140 __slots__ = ("args",) + \
2143 _file_names = ("log", "process", "stdout")
2144 _files_dict = slot_dict_class(_file_names, prefix="")
2151 if self.fd_pipes is None:
2153 fd_pipes = self.fd_pipes
2154 fd_pipes.setdefault(0, sys.stdin.fileno())
2155 fd_pipes.setdefault(1, sys.stdout.fileno())
2156 fd_pipes.setdefault(2, sys.stderr.fileno())
2158 # flush any pending output
2159 for fd in fd_pipes.itervalues():
2160 if fd == sys.stdout.fileno():
2162 if fd == sys.stderr.fileno():
2165 logfile = self.logfile
2166 self._files = self._files_dict()
2169 master_fd, slave_fd = self._pipe(fd_pipes)
2170 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2171 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2174 fd_pipes_orig = fd_pipes.copy()
2176 # TODO: Use job control functions like tcsetpgrp() to control
2177 # access to stdin. Until then, use /dev/null so that any
2178 # attempts to read from stdin will immediately return EOF
2179 # instead of blocking indefinitely.
2180 null_input = open('/dev/null', 'rb')
2181 fd_pipes[0] = null_input.fileno()
2183 fd_pipes[0] = fd_pipes_orig[0]
2185 files.process = os.fdopen(master_fd, 'r')
2186 if logfile is not None:
2188 fd_pipes[1] = slave_fd
2189 fd_pipes[2] = slave_fd
2191 files.log = open(logfile, "a")
2192 portage.util.apply_secpass_permissions(logfile,
2193 uid=portage.portage_uid, gid=portage.portage_gid,
2196 if not self.background:
2197 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2199 output_handler = self._output_handler
2203 # Create a dummy pipe so the scheduler can monitor
2204 # the process from inside a poll() loop.
2205 fd_pipes[self._dummy_pipe_fd] = slave_fd
2207 fd_pipes[1] = slave_fd
2208 fd_pipes[2] = slave_fd
2209 output_handler = self._dummy_handler
2212 for k in self._spawn_kwarg_names:
2213 v = getattr(self, k)
2217 kwargs["fd_pipes"] = fd_pipes
2218 kwargs["returnpid"] = True
2219 kwargs.pop("logfile", None)
2221 self._reg_id = self.scheduler.register(files.process.fileno(),
2222 self._registered_events, output_handler)
2223 self._registered = True
2225 retval = self._spawn(self.args, **kwargs)
2228 if null_input is not None:
2231 if isinstance(retval, int):
2234 self.returncode = retval
2238 self.pid = retval[0]
2239 portage.process.spawned_pids.remove(self.pid)
2241 def _pipe(self, fd_pipes):
2243 @type fd_pipes: dict
2244 @param fd_pipes: pipes from which to copy terminal size if desired.
2248 def _spawn(self, args, **kwargs):
2249 return portage.process.spawn(args, **kwargs)
2251 def _output_handler(self, fd, event):
2253 if event & PollConstants.POLLIN:
2256 buf = array.array('B')
2258 buf.fromfile(files.process, self._bufsize)
2263 if not self.background:
2264 buf.tofile(files.stdout)
2265 files.stdout.flush()
2266 buf.tofile(files.log)
2272 self._unregister_if_appropriate(event)
2273 return self._registered
2275 def _dummy_handler(self, fd, event):
2277 This method is mainly interested in detecting EOF, since
2278 the only purpose of the pipe is to allow the scheduler to
2279 monitor the process from inside a poll() loop.
2282 if event & PollConstants.POLLIN:
2284 buf = array.array('B')
2286 buf.fromfile(self._files.process, self._bufsize)
2296 self._unregister_if_appropriate(event)
2297 return self._registered
2299 class MiscFunctionsProcess(SpawnProcess):
2301 Spawns misc-functions.sh with an existing ebuild environment.
2304 __slots__ = ("commands", "phase", "pkg", "settings")
2307 settings = self.settings
2308 settings.pop("EBUILD_PHASE", None)
2309 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2310 misc_sh_binary = os.path.join(portage_bin_path,
2311 os.path.basename(portage.const.MISC_SH_BINARY))
2313 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2314 self.logfile = settings.get("PORTAGE_LOG_FILE")
2316 portage._doebuild_exit_status_unlink(
2317 settings.get("EBUILD_EXIT_STATUS_FILE"))
2319 SpawnProcess._start(self)
2321 def _spawn(self, args, **kwargs):
2322 settings = self.settings
2323 debug = settings.get("PORTAGE_DEBUG") == "1"
2324 return portage.spawn(" ".join(args), settings,
2325 debug=debug, **kwargs)
2327 def _set_returncode(self, wait_retval):
2328 SpawnProcess._set_returncode(self, wait_retval)
2329 self.returncode = portage._doebuild_exit_status_check_and_log(
2330 self.settings, self.phase, self.returncode)
2332 class EbuildFetcher(SpawnProcess):
2334 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2339 root_config = self.pkg.root_config
2340 portdb = root_config.trees["porttree"].dbapi
2341 ebuild_path = portdb.findname(self.pkg.cpv)
2342 settings = self.config_pool.allocate()
2343 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2344 self._build_dir.lock()
2345 self._build_dir.clean()
2346 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2347 if self.logfile is None:
2348 self.logfile = settings.get("PORTAGE_LOG_FILE")
2354 # If any incremental variables have been overridden
2355 # via the environment, those values need to be passed
2356 # along here so that they are correctly considered by
2357 # the config instance in the subproccess.
2358 fetch_env = os.environ.copy()
2360 fetch_env["PORTAGE_NICENESS"] = "0"
2362 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2364 ebuild_binary = os.path.join(
2365 settings["PORTAGE_BIN_PATH"], "ebuild")
2367 fetch_args = [ebuild_binary, ebuild_path, phase]
2368 debug = settings.get("PORTAGE_DEBUG") == "1"
2370 fetch_args.append("--debug")
2372 self.args = fetch_args
2373 self.env = fetch_env
2374 SpawnProcess._start(self)
2376 def _pipe(self, fd_pipes):
2377 """When appropriate, use a pty so that fetcher progress bars,
2378 like wget has, will work properly."""
2379 if self.background or not sys.stdout.isatty():
2380 # When the output only goes to a log file,
2381 # there's no point in creating a pty.
2383 stdout_pipe = fd_pipes.get(1)
2384 got_pty, master_fd, slave_fd = \
2385 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2386 return (master_fd, slave_fd)
2388 def _set_returncode(self, wait_retval):
2389 SpawnProcess._set_returncode(self, wait_retval)
2390 # Collect elog messages that might have been
2391 # created by the pkg_nofetch phase.
2392 if self._build_dir is not None:
2393 # Skip elog messages for prefetch, in order to avoid duplicates.
2394 if not self.prefetch and self.returncode != os.EX_OK:
2396 if self.logfile is not None:
2398 elog_out = open(self.logfile, 'a')
2399 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2400 if self.logfile is not None:
2401 msg += ", Log file:"
2402 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2403 if self.logfile is not None:
2404 eerror(" '%s'" % (self.logfile,),
2405 phase="unpack", key=self.pkg.cpv, out=elog_out)
2406 if elog_out is not None:
2408 if not self.prefetch:
2409 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2410 features = self._build_dir.settings.features
2411 if self.returncode == os.EX_OK:
2412 self._build_dir.clean()
2413 self._build_dir.unlock()
2414 self.config_pool.deallocate(self._build_dir.settings)
2415 self._build_dir = None
2417 class EbuildBuildDir(SlotObject):
2419 __slots__ = ("dir_path", "pkg", "settings",
2420 "locked", "_catdir", "_lock_obj")
2422 def __init__(self, **kwargs):
2423 SlotObject.__init__(self, **kwargs)
2428 This raises an AlreadyLocked exception if lock() is called
2429 while a lock is already held. In order to avoid this, call
2430 unlock() or check whether the "locked" attribute is True
2431 or False before calling lock().
2433 if self._lock_obj is not None:
2434 raise self.AlreadyLocked((self._lock_obj,))
2436 dir_path = self.dir_path
2437 if dir_path is None:
2438 root_config = self.pkg.root_config
2439 portdb = root_config.trees["porttree"].dbapi
2440 ebuild_path = portdb.findname(self.pkg.cpv)
2441 settings = self.settings
2442 settings.setcpv(self.pkg)
2443 debug = settings.get("PORTAGE_DEBUG") == "1"
2444 use_cache = 1 # always true
2445 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2446 self.settings, debug, use_cache, portdb)
2447 dir_path = self.settings["PORTAGE_BUILDDIR"]
2449 catdir = os.path.dirname(dir_path)
2450 self._catdir = catdir
2452 portage.util.ensure_dirs(os.path.dirname(catdir),
2453 gid=portage.portage_gid,
2457 catdir_lock = portage.locks.lockdir(catdir)
2458 portage.util.ensure_dirs(catdir,
2459 gid=portage.portage_gid,
2461 self._lock_obj = portage.locks.lockdir(dir_path)
2463 self.locked = self._lock_obj is not None
2464 if catdir_lock is not None:
2465 portage.locks.unlockdir(catdir_lock)
2468 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2469 by keepwork or keeptemp in FEATURES."""
2470 settings = self.settings
2471 features = settings.features
2472 if not ("keepwork" in features or "keeptemp" in features):
2474 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2475 except EnvironmentError, e:
2476 if e.errno != errno.ENOENT:
2481 if self._lock_obj is None:
2484 portage.locks.unlockdir(self._lock_obj)
2485 self._lock_obj = None
2488 catdir = self._catdir
2491 catdir_lock = portage.locks.lockdir(catdir)
2497 if e.errno not in (errno.ENOENT,
2498 errno.ENOTEMPTY, errno.EEXIST):
2501 portage.locks.unlockdir(catdir_lock)
2503 class AlreadyLocked(portage.exception.PortageException):
2506 class EbuildBuild(CompositeTask):
2508 __slots__ = ("args_set", "config_pool", "find_blockers",
2509 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2510 "prefetcher", "settings", "world_atom") + \
2511 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2515 logger = self.logger
2518 settings = self.settings
2519 world_atom = self.world_atom
2520 root_config = pkg.root_config
2523 portdb = root_config.trees[tree].dbapi
2524 settings.setcpv(pkg)
2525 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2526 ebuild_path = portdb.findname(self.pkg.cpv)
2527 self._ebuild_path = ebuild_path
2529 prefetcher = self.prefetcher
2530 if prefetcher is None:
2532 elif not prefetcher.isAlive():
2534 elif prefetcher.poll() is None:
2536 waiting_msg = "Fetching files " + \
2537 "in the background. " + \
2538 "To view fetch progress, run `tail -f " + \
2539 "/var/log/emerge-fetch.log` in another " + \
2541 msg_prefix = colorize("GOOD", " * ")
2542 from textwrap import wrap
2543 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2544 for line in wrap(waiting_msg, 65))
2545 if not self.background:
2546 writemsg(waiting_msg, noiselevel=-1)
2548 self._current_task = prefetcher
2549 prefetcher.addExitListener(self._prefetch_exit)
2552 self._prefetch_exit(prefetcher)
2554 def _prefetch_exit(self, prefetcher):
2558 settings = self.settings
2561 fetcher = EbuildFetchonly(
2562 fetch_all=opts.fetch_all_uri,
2563 pkg=pkg, pretend=opts.pretend,
2565 retval = fetcher.execute()
2566 self.returncode = retval
2570 fetcher = EbuildFetcher(config_pool=self.config_pool,
2571 fetchall=opts.fetch_all_uri,
2572 fetchonly=opts.fetchonly,
2573 background=self.background,
2574 pkg=pkg, scheduler=self.scheduler)
2576 self._start_task(fetcher, self._fetch_exit)
2578 def _fetch_exit(self, fetcher):
2582 fetch_failed = False
2584 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2586 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2588 if fetch_failed and fetcher.logfile is not None and \
2589 os.path.exists(fetcher.logfile):
2590 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2592 if not fetch_failed and fetcher.logfile is not None:
2593 # Fetch was successful, so remove the fetch log.
2595 os.unlink(fetcher.logfile)
2599 if fetch_failed or opts.fetchonly:
2603 logger = self.logger
2605 pkg_count = self.pkg_count
2606 scheduler = self.scheduler
2607 settings = self.settings
2608 features = settings.features
2609 ebuild_path = self._ebuild_path
2610 system_set = pkg.root_config.sets["system"]
2612 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2613 self._build_dir.lock()
2615 # Cleaning is triggered before the setup
2616 # phase, in portage.doebuild().
2617 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2618 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2619 short_msg = "emerge: (%s of %s) %s Clean" % \
2620 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2621 logger.log(msg, short_msg=short_msg)
2623 #buildsyspkg: Check if we need to _force_ binary package creation
2624 self._issyspkg = "buildsyspkg" in features and \
2625 system_set.findAtomForPackage(pkg) and \
2628 if opts.buildpkg or self._issyspkg:
2630 self._buildpkg = True
2632 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2633 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2634 short_msg = "emerge: (%s of %s) %s Compile" % \
2635 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2636 logger.log(msg, short_msg=short_msg)
2639 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2640 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2641 short_msg = "emerge: (%s of %s) %s Compile" % \
2642 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2643 logger.log(msg, short_msg=short_msg)
2645 build = EbuildExecuter(background=self.background, pkg=pkg,
2646 scheduler=scheduler, settings=settings)
2647 self._start_task(build, self._build_exit)
2649 def _unlock_builddir(self):
2650 portage.elog.elog_process(self.pkg.cpv, self.settings)
2651 self._build_dir.unlock()
2653 def _build_exit(self, build):
2654 if self._default_exit(build) != os.EX_OK:
2655 self._unlock_builddir()
2660 buildpkg = self._buildpkg
2663 self._final_exit(build)
2668 msg = ">>> This is a system package, " + \
2669 "let's pack a rescue tarball.\n"
2671 log_path = self.settings.get("PORTAGE_LOG_FILE")
2672 if log_path is not None:
2673 log_file = open(log_path, 'a')
2679 if not self.background:
2680 portage.writemsg_stdout(msg, noiselevel=-1)
2682 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2683 scheduler=self.scheduler, settings=self.settings)
2685 self._start_task(packager, self._buildpkg_exit)
2687 def _buildpkg_exit(self, packager):
2689 Released build dir lock when there is a failure or
2690 when in buildpkgonly mode. Otherwise, the lock will
2691 be released when merge() is called.
2694 if self._default_exit(packager) != os.EX_OK:
2695 self._unlock_builddir()
2699 if self.opts.buildpkgonly:
2700 # Need to call "clean" phase for buildpkgonly mode
2701 portage.elog.elog_process(self.pkg.cpv, self.settings)
2703 clean_phase = EbuildPhase(background=self.background,
2704 pkg=self.pkg, phase=phase,
2705 scheduler=self.scheduler, settings=self.settings,
2707 self._start_task(clean_phase, self._clean_exit)
2710 # Continue holding the builddir lock until
2711 # after the package has been installed.
2712 self._current_task = None
2713 self.returncode = packager.returncode
2716 def _clean_exit(self, clean_phase):
2717 if self._final_exit(clean_phase) != os.EX_OK or \
2718 self.opts.buildpkgonly:
2719 self._unlock_builddir()
2724 Install the package and then clean up and release locks.
2725 Only call this after the build has completed successfully
2726 and neither fetchonly nor buildpkgonly mode are enabled.
2729 find_blockers = self.find_blockers
2730 ldpath_mtimes = self.ldpath_mtimes
2731 logger = self.logger
2733 pkg_count = self.pkg_count
2734 settings = self.settings
2735 world_atom = self.world_atom
2736 ebuild_path = self._ebuild_path
2739 merge = EbuildMerge(find_blockers=self.find_blockers,
2740 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2741 pkg_count=pkg_count, pkg_path=ebuild_path,
2742 scheduler=self.scheduler,
2743 settings=settings, tree=tree, world_atom=world_atom)
2745 msg = " === (%s of %s) Merging (%s::%s)" % \
2746 (pkg_count.curval, pkg_count.maxval,
2747 pkg.cpv, ebuild_path)
2748 short_msg = "emerge: (%s of %s) %s Merge" % \
2749 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2750 logger.log(msg, short_msg=short_msg)
2753 rval = merge.execute()
2755 self._unlock_builddir()
2759 class EbuildExecuter(CompositeTask):
2761 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2763 _phases = ("prepare", "configure", "compile", "test", "install")
2765 _live_eclasses = frozenset([
2775 self._tree = "porttree"
2778 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2779 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2780 self._start_task(clean_phase, self._clean_phase_exit)
2782 def _clean_phase_exit(self, clean_phase):
2784 if self._default_exit(clean_phase) != os.EX_OK:
2789 scheduler = self.scheduler
2790 settings = self.settings
2793 # This initializes PORTAGE_LOG_FILE.
2794 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2796 setup_phase = EbuildPhase(background=self.background,
2797 pkg=pkg, phase="setup", scheduler=scheduler,
2798 settings=settings, tree=self._tree)
2800 setup_phase.addExitListener(self._setup_exit)
2801 self._current_task = setup_phase
2802 self.scheduler.scheduleSetup(setup_phase)
2804 def _setup_exit(self, setup_phase):
2806 if self._default_exit(setup_phase) != os.EX_OK:
2810 unpack_phase = EbuildPhase(background=self.background,
2811 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2812 settings=self.settings, tree=self._tree)
2814 if self._live_eclasses.intersection(self.pkg.inherited):
2815 # Serialize $DISTDIR access for live ebuilds since
2816 # otherwise they can interfere with eachother.
2818 unpack_phase.addExitListener(self._unpack_exit)
2819 self._current_task = unpack_phase
2820 self.scheduler.scheduleUnpack(unpack_phase)
2823 self._start_task(unpack_phase, self._unpack_exit)
2825 def _unpack_exit(self, unpack_phase):
2827 if self._default_exit(unpack_phase) != os.EX_OK:
2831 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2834 phases = self._phases
2835 eapi = pkg.metadata["EAPI"]
2836 if eapi in ("0", "1", "2_pre1"):
2837 # skip src_prepare and src_configure
2839 elif eapi in ("2_pre2",):
2843 for phase in phases:
2844 ebuild_phases.add(EbuildPhase(background=self.background,
2845 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2846 settings=self.settings, tree=self._tree))
2848 self._start_task(ebuild_phases, self._default_final_exit)
2850 class EbuildMetadataPhase(SubProcess):
2853 Asynchronous interface for the ebuild "depend" phase which is
2854 used to extract metadata from the ebuild.
2857 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2858 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2861 _file_names = ("ebuild",)
2862 _files_dict = slot_dict_class(_file_names, prefix="")
2866 settings = self.settings
2868 ebuild_path = self.ebuild_path
2869 debug = settings.get("PORTAGE_DEBUG") == "1"
2873 if self.fd_pipes is not None:
2874 fd_pipes = self.fd_pipes.copy()
2878 fd_pipes.setdefault(0, sys.stdin.fileno())
2879 fd_pipes.setdefault(1, sys.stdout.fileno())
2880 fd_pipes.setdefault(2, sys.stderr.fileno())
2882 # flush any pending output
2883 for fd in fd_pipes.itervalues():
2884 if fd == sys.stdout.fileno():
2886 if fd == sys.stderr.fileno():
2889 fd_pipes_orig = fd_pipes.copy()
2890 self._files = self._files_dict()
2893 master_fd, slave_fd = os.pipe()
2894 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2895 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2897 fd_pipes[self._metadata_fd] = slave_fd
2899 self._raw_metadata = []
2900 files.ebuild = os.fdopen(master_fd, 'r')
2901 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2902 self._registered_events, self._output_handler)
2903 self._registered = True
2905 retval = portage.doebuild(ebuild_path, "depend",
2906 settings["ROOT"], settings, debug,
2907 mydbapi=self.portdb, tree="porttree",
2908 fd_pipes=fd_pipes, returnpid=True)
2912 if isinstance(retval, int):
2913 # doebuild failed before spawning
2915 self.returncode = retval
2919 self.pid = retval[0]
2920 portage.process.spawned_pids.remove(self.pid)
2922 def _output_handler(self, fd, event):
2924 if event & PollConstants.POLLIN:
2925 self._raw_metadata.append(self._files.ebuild.read())
2926 if not self._raw_metadata[-1]:
2930 self._unregister_if_appropriate(event)
2931 return self._registered
2933 def _set_returncode(self, wait_retval):
2934 SubProcess._set_returncode(self, wait_retval)
2935 if self.returncode == os.EX_OK:
2936 metadata_lines = "".join(self._raw_metadata).splitlines()
2937 if len(portage.auxdbkeys) != len(metadata_lines):
2938 # Don't trust bash's returncode if the
2939 # number of lines is incorrect.
2942 metadata = izip(portage.auxdbkeys, metadata_lines)
2943 self.metadata_callback(self.cpv, self.ebuild_path,
2944 self.repo_path, metadata, self.ebuild_mtime)
2946 class EbuildProcess(SpawnProcess):
2948 __slots__ = ("phase", "pkg", "settings", "tree")
2951 # Don't open the log file during the clean phase since the
2952 # open file can result in an nfs lock on $T/build.log which
2953 # prevents the clean phase from removing $T.
2954 if self.phase not in ("clean", "cleanrm"):
2955 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2956 SpawnProcess._start(self)
2958 def _pipe(self, fd_pipes):
2959 stdout_pipe = fd_pipes.get(1)
2960 got_pty, master_fd, slave_fd = \
2961 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2962 return (master_fd, slave_fd)
2964 def _spawn(self, args, **kwargs):
2966 root_config = self.pkg.root_config
2968 mydbapi = root_config.trees[tree].dbapi
2969 settings = self.settings
2970 ebuild_path = settings["EBUILD"]
2971 debug = settings.get("PORTAGE_DEBUG") == "1"
2973 rval = portage.doebuild(ebuild_path, self.phase,
2974 root_config.root, settings, debug,
2975 mydbapi=mydbapi, tree=tree, **kwargs)
2979 def _set_returncode(self, wait_retval):
2980 SpawnProcess._set_returncode(self, wait_retval)
2982 if self.phase not in ("clean", "cleanrm"):
2983 self.returncode = portage._doebuild_exit_status_check_and_log(
2984 self.settings, self.phase, self.returncode)
2986 if self.phase == "test" and self.returncode != os.EX_OK and \
2987 "test-fail-continue" in self.settings.features:
2988 self.returncode = os.EX_OK
2990 portage._post_phase_userpriv_perms(self.settings)
2992 class EbuildPhase(CompositeTask):
2994 __slots__ = ("background", "pkg", "phase",
2995 "scheduler", "settings", "tree")
2997 _post_phase_cmds = portage._post_phase_cmds
3001 ebuild_process = EbuildProcess(background=self.background,
3002 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3003 settings=self.settings, tree=self.tree)
3005 self._start_task(ebuild_process, self._ebuild_exit)
3007 def _ebuild_exit(self, ebuild_process):
3009 if self.phase == "install":
3011 log_path = self.settings.get("PORTAGE_LOG_FILE")
3013 if self.background and log_path is not None:
3014 log_file = open(log_path, 'a')
3017 portage._check_build_log(self.settings, out=out)
3019 if log_file is not None:
3022 if self._default_exit(ebuild_process) != os.EX_OK:
3026 settings = self.settings
3028 if self.phase == "install":
3029 portage._post_src_install_uid_fix(settings)
3031 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3032 if post_phase_cmds is not None:
3033 post_phase = MiscFunctionsProcess(background=self.background,
3034 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3035 scheduler=self.scheduler, settings=settings)
3036 self._start_task(post_phase, self._post_phase_exit)
3039 self.returncode = ebuild_process.returncode
3040 self._current_task = None
3043 def _post_phase_exit(self, post_phase):
3044 if self._final_exit(post_phase) != os.EX_OK:
3045 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3047 self._current_task = None
3051 class EbuildBinpkg(EbuildProcess):
3053 This assumes that src_install() has successfully completed.
3055 __slots__ = ("_binpkg_tmpfile",)
3058 self.phase = "package"
3059 self.tree = "porttree"
3061 root_config = pkg.root_config
3062 portdb = root_config.trees["porttree"].dbapi
3063 bintree = root_config.trees["bintree"]
3064 ebuild_path = portdb.findname(self.pkg.cpv)
3065 settings = self.settings
3066 debug = settings.get("PORTAGE_DEBUG") == "1"
3068 bintree.prevent_collision(pkg.cpv)
3069 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3070 pkg.cpv + ".tbz2." + str(os.getpid()))
3071 self._binpkg_tmpfile = binpkg_tmpfile
3072 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3073 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3076 EbuildProcess._start(self)
3078 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3080 def _set_returncode(self, wait_retval):
3081 EbuildProcess._set_returncode(self, wait_retval)
3084 bintree = pkg.root_config.trees["bintree"]
3085 binpkg_tmpfile = self._binpkg_tmpfile
3086 if self.returncode == os.EX_OK:
3087 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3089 class EbuildMerge(SlotObject):
3091 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3092 "pkg", "pkg_count", "pkg_path", "pretend",
3093 "scheduler", "settings", "tree", "world_atom")
3096 root_config = self.pkg.root_config
3097 settings = self.settings
3098 retval = portage.merge(settings["CATEGORY"],
3099 settings["PF"], settings["D"],
3100 os.path.join(settings["PORTAGE_BUILDDIR"],
3101 "build-info"), root_config.root, settings,
3102 myebuild=settings["EBUILD"],
3103 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3104 vartree=root_config.trees["vartree"],
3105 prev_mtimes=self.ldpath_mtimes,
3106 scheduler=self.scheduler,
3107 blockers=self.find_blockers)
3109 if retval == os.EX_OK:
3110 self.world_atom(self.pkg)
3115 def _log_success(self):
3117 pkg_count = self.pkg_count
3118 pkg_path = self.pkg_path
3119 logger = self.logger
3120 if "noclean" not in self.settings.features:
3121 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3122 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3123 logger.log((" === (%s of %s) " + \
3124 "Post-Build Cleaning (%s::%s)") % \
3125 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3126 short_msg=short_msg)
3127 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3128 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3130 class PackageUninstall(AsynchronousTask):
3132 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3136 unmerge(self.pkg.root_config, self.opts, "unmerge",
3137 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3138 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3139 writemsg_level=self._writemsg_level)
3140 except UninstallFailure, e:
3141 self.returncode = e.status
3143 self.returncode = os.EX_OK
3146 def _writemsg_level(self, msg, level=0, noiselevel=0):
3148 log_path = self.settings.get("PORTAGE_LOG_FILE")
3149 background = self.background
3151 if log_path is None:
3152 if not (background and level < logging.WARNING):
3153 portage.util.writemsg_level(msg,
3154 level=level, noiselevel=noiselevel)
3157 portage.util.writemsg_level(msg,
3158 level=level, noiselevel=noiselevel)
3160 f = open(log_path, 'a')
3166 class Binpkg(CompositeTask):
3168 __slots__ = ("find_blockers",
3169 "ldpath_mtimes", "logger", "opts",
3170 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3171 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3172 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3174 def _writemsg_level(self, msg, level=0, noiselevel=0):
3176 if not self.background:
3177 portage.util.writemsg_level(msg,
3178 level=level, noiselevel=noiselevel)
3180 log_path = self.settings.get("PORTAGE_LOG_FILE")
3181 if log_path is not None:
3182 f = open(log_path, 'a')
3191 settings = self.settings
3192 settings.setcpv(pkg)
3193 self._tree = "bintree"
3194 self._bintree = self.pkg.root_config.trees[self._tree]
3195 self._verify = not self.opts.pretend
3197 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3198 "portage", pkg.category, pkg.pf)
3199 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3200 pkg=pkg, settings=settings)
3201 self._image_dir = os.path.join(dir_path, "image")
3202 self._infloc = os.path.join(dir_path, "build-info")
3203 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3204 settings["EBUILD"] = self._ebuild_path
3205 debug = settings.get("PORTAGE_DEBUG") == "1"
3206 portage.doebuild_environment(self._ebuild_path, "setup",
3207 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3208 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3210 # The prefetcher has already completed or it
3211 # could be running now. If it's running now,
3212 # wait for it to complete since it holds
3213 # a lock on the file being fetched. The
3214 # portage.locks functions are only designed
3215 # to work between separate processes. Since
3216 # the lock is held by the current process,
3217 # use the scheduler and fetcher methods to
3218 # synchronize with the fetcher.
3219 prefetcher = self.prefetcher
3220 if prefetcher is None:
3222 elif not prefetcher.isAlive():
3224 elif prefetcher.poll() is None:
3226 waiting_msg = ("Fetching '%s' " + \
3227 "in the background. " + \
3228 "To view fetch progress, run `tail -f " + \
3229 "/var/log/emerge-fetch.log` in another " + \
3230 "terminal.") % prefetcher.pkg_path
3231 msg_prefix = colorize("GOOD", " * ")
3232 from textwrap import wrap
3233 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3234 for line in wrap(waiting_msg, 65))
3235 if not self.background:
3236 writemsg(waiting_msg, noiselevel=-1)
3238 self._current_task = prefetcher
3239 prefetcher.addExitListener(self._prefetch_exit)
3242 self._prefetch_exit(prefetcher)
3244 def _prefetch_exit(self, prefetcher):
3247 pkg_count = self.pkg_count
3248 if not (self.opts.pretend or self.opts.fetchonly):
3249 self._build_dir.lock()
3251 shutil.rmtree(self._build_dir.dir_path)
3252 except EnvironmentError, e:
3253 if e.errno != errno.ENOENT:
3256 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3257 fetcher = BinpkgFetcher(background=self.background,
3258 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3259 pretend=self.opts.pretend, scheduler=self.scheduler)
3260 pkg_path = fetcher.pkg_path
3261 self._pkg_path = pkg_path
3263 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3265 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3266 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3267 short_msg = "emerge: (%s of %s) %s Fetch" % \
3268 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3269 self.logger.log(msg, short_msg=short_msg)
3270 self._start_task(fetcher, self._fetcher_exit)
3273 self._fetcher_exit(fetcher)
3275 def _fetcher_exit(self, fetcher):
3277 # The fetcher only has a returncode when
3278 # --getbinpkg is enabled.
3279 if fetcher.returncode is not None:
3280 self._fetched_pkg = True
3281 if self._default_exit(fetcher) != os.EX_OK:
3282 self._unlock_builddir()
3286 if self.opts.pretend:
3287 self._current_task = None
3288 self.returncode = os.EX_OK
3296 logfile = self.settings.get("PORTAGE_LOG_FILE")
3297 verifier = BinpkgVerifier(background=self.background,
3298 logfile=logfile, pkg=self.pkg)
3299 self._start_task(verifier, self._verifier_exit)
3302 self._verifier_exit(verifier)
3304 def _verifier_exit(self, verifier):
3305 if verifier is not None and \
3306 self._default_exit(verifier) != os.EX_OK:
3307 self._unlock_builddir()
3311 logger = self.logger
3313 pkg_count = self.pkg_count
3314 pkg_path = self._pkg_path
3316 if self._fetched_pkg:
3317 self._bintree.inject(pkg.cpv, filename=pkg_path)
3319 if self.opts.fetchonly:
3320 self._current_task = None
3321 self.returncode = os.EX_OK
3325 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3326 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3327 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3328 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3329 logger.log(msg, short_msg=short_msg)
3332 settings = self.settings
3333 ebuild_phase = EbuildPhase(background=self.background,
3334 pkg=pkg, phase=phase, scheduler=self.scheduler,
3335 settings=settings, tree=self._tree)
3337 self._start_task(ebuild_phase, self._clean_exit)
3339 def _clean_exit(self, clean_phase):
3340 if self._default_exit(clean_phase) != os.EX_OK:
3341 self._unlock_builddir()
3345 dir_path = self._build_dir.dir_path
3348 shutil.rmtree(dir_path)
3349 except (IOError, OSError), e:
3350 if e.errno != errno.ENOENT:
3354 infloc = self._infloc
3356 pkg_path = self._pkg_path
3359 for mydir in (dir_path, self._image_dir, infloc):
3360 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3361 gid=portage.data.portage_gid, mode=dir_mode)
3363 # This initializes PORTAGE_LOG_FILE.
3364 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3365 self._writemsg_level(">>> Extracting info\n")
3367 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3368 check_missing_metadata = ("CATEGORY", "PF")
3369 missing_metadata = set()
3370 for k in check_missing_metadata:
3371 v = pkg_xpak.getfile(k)
3373 missing_metadata.add(k)
3375 pkg_xpak.unpackinfo(infloc)
3376 for k in missing_metadata:
3384 f = open(os.path.join(infloc, k), 'wb')
3390 # Store the md5sum in the vdb.
3391 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3393 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3397 # This gives bashrc users an opportunity to do various things
3398 # such as remove binary packages after they're installed.
3399 settings = self.settings
3400 settings.setcpv(self.pkg)
3401 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3402 settings.backup_changes("PORTAGE_BINPKG_FILE")
3405 setup_phase = EbuildPhase(background=self.background,
3406 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3407 settings=settings, tree=self._tree)
3409 setup_phase.addExitListener(self._setup_exit)
3410 self._current_task = setup_phase
3411 self.scheduler.scheduleSetup(setup_phase)
3413 def _setup_exit(self, setup_phase):
3414 if self._default_exit(setup_phase) != os.EX_OK:
3415 self._unlock_builddir()
3419 extractor = BinpkgExtractorAsync(background=self.background,
3420 image_dir=self._image_dir,
3421 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3422 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3423 self._start_task(extractor, self._extractor_exit)
3425 def _extractor_exit(self, extractor):
3426 if self._final_exit(extractor) != os.EX_OK:
3427 self._unlock_builddir()
3428 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3432 def _unlock_builddir(self):
3433 if self.opts.pretend or self.opts.fetchonly:
3435 portage.elog.elog_process(self.pkg.cpv, self.settings)
3436 self._build_dir.unlock()
3440 # This gives bashrc users an opportunity to do various things
3441 # such as remove binary packages after they're installed.
3442 settings = self.settings
3443 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3444 settings.backup_changes("PORTAGE_BINPKG_FILE")
3446 merge = EbuildMerge(find_blockers=self.find_blockers,
3447 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3448 pkg=self.pkg, pkg_count=self.pkg_count,
3449 pkg_path=self._pkg_path, scheduler=self.scheduler,
3450 settings=settings, tree=self._tree, world_atom=self.world_atom)
3453 retval = merge.execute()
3455 settings.pop("PORTAGE_BINPKG_FILE", None)
3456 self._unlock_builddir()
3459 class BinpkgFetcher(SpawnProcess):
3461 __slots__ = ("pkg", "pretend",
3462 "locked", "pkg_path", "_lock_obj")
3464 def __init__(self, **kwargs):
3465 SpawnProcess.__init__(self, **kwargs)
3467 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3475 pretend = self.pretend
3476 bintree = pkg.root_config.trees["bintree"]
3477 settings = bintree.settings
3478 use_locks = "distlocks" in settings.features
3479 pkg_path = self.pkg_path
3482 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3485 exists = os.path.exists(pkg_path)
3486 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3487 if not (pretend or resume):
3488 # Remove existing file or broken symlink.
3494 # urljoin doesn't work correctly with
3495 # unrecognized protocols like sftp
3496 if bintree._remote_has_index:
3497 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3499 rel_uri = pkg.cpv + ".tbz2"
3500 uri = bintree._remote_base_uri.rstrip("/") + \
3501 "/" + rel_uri.lstrip("/")
3503 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3504 "/" + pkg.pf + ".tbz2"
3507 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3508 self.returncode = os.EX_OK
3512 protocol = urlparse.urlparse(uri)[0]
3513 fcmd_prefix = "FETCHCOMMAND"
3515 fcmd_prefix = "RESUMECOMMAND"
3516 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3518 fcmd = settings.get(fcmd_prefix)
3521 "DISTDIR" : os.path.dirname(pkg_path),
3523 "FILE" : os.path.basename(pkg_path)
3526 fetch_env = dict(settings.iteritems())
3527 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3528 for x in shlex.split(fcmd)]
3530 if self.fd_pipes is None:
3532 fd_pipes = self.fd_pipes
3534 # Redirect all output to stdout since some fetchers like
3535 # wget pollute stderr (if portage detects a problem then it
3536 # can send it's own message to stderr).
3537 fd_pipes.setdefault(0, sys.stdin.fileno())
3538 fd_pipes.setdefault(1, sys.stdout.fileno())
3539 fd_pipes.setdefault(2, sys.stdout.fileno())
3541 self.args = fetch_args
3542 self.env = fetch_env
3543 SpawnProcess._start(self)
3545 def _set_returncode(self, wait_retval):
3546 SpawnProcess._set_returncode(self, wait_retval)
3547 if self.returncode == os.EX_OK:
3548 # If possible, update the mtime to match the remote package if
3549 # the fetcher didn't already do it automatically.
3550 bintree = self.pkg.root_config.trees["bintree"]
3551 if bintree._remote_has_index:
3552 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3553 if remote_mtime is not None:
3555 remote_mtime = long(remote_mtime)
3560 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3564 if remote_mtime != local_mtime:
3566 os.utime(self.pkg_path,
3567 (remote_mtime, remote_mtime))
3576 This raises an AlreadyLocked exception if lock() is called
3577 while a lock is already held. In order to avoid this, call
3578 unlock() or check whether the "locked" attribute is True
3579 or False before calling lock().
3581 if self._lock_obj is not None:
3582 raise self.AlreadyLocked((self._lock_obj,))
3584 self._lock_obj = portage.locks.lockfile(
3585 self.pkg_path, wantnewlockfile=1)
3588 class AlreadyLocked(portage.exception.PortageException):
3592 if self._lock_obj is None:
3594 portage.locks.unlockfile(self._lock_obj)
3595 self._lock_obj = None
3598 class BinpkgVerifier(AsynchronousTask):
3599 __slots__ = ("logfile", "pkg",)
3603 Note: Unlike a normal AsynchronousTask.start() method,
3604 this one does all work is synchronously. The returncode
3605 attribute will be set before it returns.
3609 root_config = pkg.root_config
3610 bintree = root_config.trees["bintree"]
3612 stdout_orig = sys.stdout
3613 stderr_orig = sys.stderr
3615 if self.background and self.logfile is not None:
3616 log_file = open(self.logfile, 'a')
3618 if log_file is not None:
3619 sys.stdout = log_file
3620 sys.stderr = log_file
3622 bintree.digestCheck(pkg)
3623 except portage.exception.FileNotFound:
3624 writemsg("!!! Fetching Binary failed " + \
3625 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3627 except portage.exception.DigestException, e:
3628 writemsg("\n!!! Digest verification failed:\n",
3630 writemsg("!!! %s\n" % e.value[0],
3632 writemsg("!!! Reason: %s\n" % e.value[1],
3634 writemsg("!!! Got: %s\n" % e.value[2],
3636 writemsg("!!! Expected: %s\n" % e.value[3],
3639 if rval != os.EX_OK:
3640 pkg_path = bintree.getname(pkg.cpv)
3641 head, tail = os.path.split(pkg_path)
3642 temp_filename = portage._checksum_failure_temp_file(head, tail)
3643 writemsg("File renamed to '%s'\n" % (temp_filename,),
3646 sys.stdout = stdout_orig
3647 sys.stderr = stderr_orig
3648 if log_file is not None:
3651 self.returncode = rval
3654 class BinpkgPrefetcher(CompositeTask):
3656 __slots__ = ("pkg",) + \
3657 ("pkg_path", "_bintree",)
3660 self._bintree = self.pkg.root_config.trees["bintree"]
3661 fetcher = BinpkgFetcher(background=self.background,
3662 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3663 scheduler=self.scheduler)
3664 self.pkg_path = fetcher.pkg_path
3665 self._start_task(fetcher, self._fetcher_exit)
3667 def _fetcher_exit(self, fetcher):
3669 if self._default_exit(fetcher) != os.EX_OK:
3673 verifier = BinpkgVerifier(background=self.background,
3674 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3675 self._start_task(verifier, self._verifier_exit)
3677 def _verifier_exit(self, verifier):
3678 if self._default_exit(verifier) != os.EX_OK:
3682 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3684 self._current_task = None
3685 self.returncode = os.EX_OK
3688 class BinpkgExtractorAsync(SpawnProcess):
3690 __slots__ = ("image_dir", "pkg", "pkg_path")
3692 _shell_binary = portage.const.BASH_BINARY
3695 self.args = [self._shell_binary, "-c",
3696 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3697 (portage._shell_quote(self.pkg_path),
3698 portage._shell_quote(self.image_dir))]
3700 self.env = self.pkg.root_config.settings.environ()
3701 SpawnProcess._start(self)
3703 class MergeListItem(CompositeTask):
3706 TODO: For parallel scheduling, everything here needs asynchronous
3707 execution support (start, poll, and wait methods).
3710 __slots__ = ("args_set",
3711 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3712 "find_blockers", "logger", "mtimedb", "pkg",
3713 "pkg_count", "pkg_to_replace", "prefetcher",
3714 "settings", "statusMessage", "world_atom") + \
3720 build_opts = self.build_opts
3723 # uninstall, executed by self.merge()
3724 self.returncode = os.EX_OK
3728 args_set = self.args_set
3729 find_blockers = self.find_blockers
3730 logger = self.logger
3731 mtimedb = self.mtimedb
3732 pkg_count = self.pkg_count
3733 scheduler = self.scheduler
3734 settings = self.settings
3735 world_atom = self.world_atom
3736 ldpath_mtimes = mtimedb["ldpath"]
3738 action_desc = "Emerging"
3740 if pkg.type_name == "binary":
3741 action_desc += " binary"
3743 if build_opts.fetchonly:
3744 action_desc = "Fetching"
3746 msg = "%s (%s of %s) %s" % \
3748 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3749 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3750 colorize("GOOD", pkg.cpv))
3753 msg += " %s %s" % (preposition, pkg.root)
3755 if not build_opts.pretend:
3756 self.statusMessage(msg)
3757 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3758 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3760 if pkg.type_name == "ebuild":
3762 build = EbuildBuild(args_set=args_set,
3763 background=self.background,
3764 config_pool=self.config_pool,
3765 find_blockers=find_blockers,
3766 ldpath_mtimes=ldpath_mtimes, logger=logger,
3767 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3768 prefetcher=self.prefetcher, scheduler=scheduler,
3769 settings=settings, world_atom=world_atom)
3771 self._install_task = build
3772 self._start_task(build, self._default_final_exit)
3775 elif pkg.type_name == "binary":
3777 binpkg = Binpkg(background=self.background,
3778 find_blockers=find_blockers,
3779 ldpath_mtimes=ldpath_mtimes, logger=logger,
3780 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3781 prefetcher=self.prefetcher, settings=settings,
3782 scheduler=scheduler, world_atom=world_atom)
3784 self._install_task = binpkg
3785 self._start_task(binpkg, self._default_final_exit)
3789 self._install_task.poll()
3790 return self.returncode
3793 self._install_task.wait()
3794 return self.returncode
3799 build_opts = self.build_opts
3800 find_blockers = self.find_blockers
3801 logger = self.logger
3802 mtimedb = self.mtimedb
3803 pkg_count = self.pkg_count
3804 prefetcher = self.prefetcher
3805 scheduler = self.scheduler
3806 settings = self.settings
3807 world_atom = self.world_atom
3808 ldpath_mtimes = mtimedb["ldpath"]
3811 if not (build_opts.buildpkgonly or \
3812 build_opts.fetchonly or build_opts.pretend):
3814 uninstall = PackageUninstall(background=self.background,
3815 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3816 pkg=pkg, scheduler=scheduler, settings=settings)
3819 retval = uninstall.wait()
3820 if retval != os.EX_OK:
3824 if build_opts.fetchonly or \
3825 build_opts.buildpkgonly:
3826 return self.returncode
3828 retval = self._install_task.install()
3831 class PackageMerge(AsynchronousTask):
3833 TODO: Implement asynchronous merge so that the scheduler can
3834 run while a merge is executing.
3837 __slots__ = ("merge",)
3841 pkg = self.merge.pkg
3842 pkg_count = self.merge.pkg_count
3845 action_desc = "Uninstalling"
3846 preposition = "from"
3848 action_desc = "Installing"
3851 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3854 msg += " %s %s" % (preposition, pkg.root)
3856 if not self.merge.build_opts.fetchonly and \
3857 not self.merge.build_opts.pretend and \
3858 not self.merge.build_opts.buildpkgonly:
3859 self.merge.statusMessage(msg)
3861 self.returncode = self.merge.merge()
3864 class DependencyArg(object):
3865 def __init__(self, arg=None, root_config=None):
3867 self.root_config = root_config
3870 return str(self.arg)
3872 class AtomArg(DependencyArg):
3873 def __init__(self, atom=None, **kwargs):
3874 DependencyArg.__init__(self, **kwargs)
3876 if not isinstance(self.atom, portage.dep.Atom):
3877 self.atom = portage.dep.Atom(self.atom)
3878 self.set = (self.atom, )
3880 class PackageArg(DependencyArg):
3881 def __init__(self, package=None, **kwargs):
3882 DependencyArg.__init__(self, **kwargs)
3883 self.package = package
3884 self.atom = portage.dep.Atom("=" + package.cpv)
3885 self.set = (self.atom, )
3887 class SetArg(DependencyArg):
3888 def __init__(self, set=None, **kwargs):
3889 DependencyArg.__init__(self, **kwargs)
3891 self.name = self.arg[len(SETPREFIX):]
3893 class Dependency(SlotObject):
3894 __slots__ = ("atom", "blocker", "depth",
3895 "parent", "onlydeps", "priority", "root")
3896 def __init__(self, **kwargs):
3897 SlotObject.__init__(self, **kwargs)
3898 if self.priority is None:
3899 self.priority = DepPriority()
3900 if self.depth is None:
3903 class BlockerCache(DictMixin):
3904 """This caches blockers of installed packages so that dep_check does not
3905 have to be done for every single installed package on every invocation of
3906 emerge. The cache is invalidated whenever it is detected that something
3907 has changed that might alter the results of dep_check() calls:
3908 1) the set of installed packages (including COUNTER) has changed
3909 2) the old-style virtuals have changed
3912 # Number of uncached packages to trigger cache update, since
3913 # it's wasteful to update it for every vdb change.
3914 _cache_threshold = 5
3916 class BlockerData(object):
3918 __slots__ = ("__weakref__", "atoms", "counter")
3920 def __init__(self, counter, atoms):
3921 self.counter = counter
3924 def __init__(self, myroot, vardb):
3926 self._virtuals = vardb.settings.getvirtuals()
3927 self._cache_filename = os.path.join(myroot,
3928 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3929 self._cache_version = "1"
3930 self._cache_data = None
3931 self._modified = set()
3936 f = open(self._cache_filename)
3937 mypickle = pickle.Unpickler(f)
3938 mypickle.find_global = None
3939 self._cache_data = mypickle.load()
3942 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3943 if isinstance(e, pickle.UnpicklingError):
3944 writemsg("!!! Error loading '%s': %s\n" % \
3945 (self._cache_filename, str(e)), noiselevel=-1)
3948 cache_valid = self._cache_data and \
3949 isinstance(self._cache_data, dict) and \
3950 self._cache_data.get("version") == self._cache_version and \
3951 isinstance(self._cache_data.get("blockers"), dict)
3953 # Validate all the atoms and counters so that
3954 # corruption is detected as soon as possible.
3955 invalid_items = set()
3956 for k, v in self._cache_data["blockers"].iteritems():
3957 if not isinstance(k, basestring):
3958 invalid_items.add(k)
3961 if portage.catpkgsplit(k) is None:
3962 invalid_items.add(k)
3964 except portage.exception.InvalidData:
3965 invalid_items.add(k)
3967 if not isinstance(v, tuple) or \
3969 invalid_items.add(k)
3972 if not isinstance(counter, (int, long)):
3973 invalid_items.add(k)
3975 if not isinstance(atoms, (list, tuple)):
3976 invalid_items.add(k)
3978 invalid_atom = False
3980 if not isinstance(atom, basestring):
3983 if atom[:1] != "!" or \
3984 not portage.isvalidatom(
3985 atom, allow_blockers=True):
3989 invalid_items.add(k)
3992 for k in invalid_items:
3993 del self._cache_data["blockers"][k]
3994 if not self._cache_data["blockers"]:
3998 self._cache_data = {"version":self._cache_version}
3999 self._cache_data["blockers"] = {}
4000 self._cache_data["virtuals"] = self._virtuals
4001 self._modified.clear()
4004 """If the current user has permission and the internal blocker cache
4005 been updated, save it to disk and mark it unmodified. This is called
4006 by emerge after it has proccessed blockers for all installed packages.
4007 Currently, the cache is only written if the user has superuser
4008 privileges (since that's required to obtain a lock), but all users
4009 have read access and benefit from faster blocker lookups (as long as
4010 the entire cache is still valid). The cache is stored as a pickled
4011 dict object with the following format:
4015 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4016 "virtuals" : vardb.settings.getvirtuals()
4019 if len(self._modified) >= self._cache_threshold and \
4022 f = portage.util.atomic_ofstream(self._cache_filename)
4023 pickle.dump(self._cache_data, f, -1)
4025 portage.util.apply_secpass_permissions(
4026 self._cache_filename, gid=portage.portage_gid, mode=0644)
4027 except (IOError, OSError), e:
4029 self._modified.clear()
4031 def __setitem__(self, cpv, blocker_data):
4033 Update the cache and mark it as modified for a future call to
4036 @param cpv: Package for which to cache blockers.
4038 @param blocker_data: An object with counter and atoms attributes.
4039 @type blocker_data: BlockerData
4041 self._cache_data["blockers"][cpv] = \
4042 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4043 self._modified.add(cpv)
4046 if self._cache_data is None:
4047 # triggered by python-trace
4049 return iter(self._cache_data["blockers"])
4051 def __delitem__(self, cpv):
4052 del self._cache_data["blockers"][cpv]
4054 def __getitem__(self, cpv):
4057 @returns: An object with counter and atoms attributes.
4059 return self.BlockerData(*self._cache_data["blockers"][cpv])
4062 """This needs to be implemented so that self.__repr__() doesn't raise
4063 an AttributeError."""
4066 class BlockerDB(object):
4068 def __init__(self, root_config):
4069 self._root_config = root_config
4070 self._vartree = root_config.trees["vartree"]
4071 self._portdb = root_config.trees["porttree"].dbapi
4073 self._dep_check_trees = None
4074 self._fake_vartree = None
4076 def _get_fake_vartree(self, acquire_lock=0):
4077 fake_vartree = self._fake_vartree
4078 if fake_vartree is None:
4079 fake_vartree = FakeVartree(self._root_config,
4080 acquire_lock=acquire_lock)
4081 self._fake_vartree = fake_vartree
4082 self._dep_check_trees = { self._vartree.root : {
4083 "porttree" : fake_vartree,
4084 "vartree" : fake_vartree,
4087 fake_vartree.sync(acquire_lock=acquire_lock)
4090 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4091 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4092 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4093 settings = self._vartree.settings
4094 stale_cache = set(blocker_cache)
4095 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4096 dep_check_trees = self._dep_check_trees
4097 vardb = fake_vartree.dbapi
4098 installed_pkgs = list(vardb)
4100 for inst_pkg in installed_pkgs:
4101 stale_cache.discard(inst_pkg.cpv)
4102 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4103 if cached_blockers is not None and \
4104 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4105 cached_blockers = None
4106 if cached_blockers is not None:
4107 blocker_atoms = cached_blockers.atoms
4109 # Use aux_get() to trigger FakeVartree global
4110 # updates on *DEPEND when appropriate.
4111 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4113 portage.dep._dep_check_strict = False
4114 success, atoms = portage.dep_check(depstr,
4115 vardb, settings, myuse=inst_pkg.use.enabled,
4116 trees=dep_check_trees, myroot=inst_pkg.root)
4118 portage.dep._dep_check_strict = True
4120 pkg_location = os.path.join(inst_pkg.root,
4121 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4122 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4123 (pkg_location, atoms), noiselevel=-1)
4126 blocker_atoms = [atom for atom in atoms \
4127 if atom.startswith("!")]
4128 blocker_atoms.sort()
4129 counter = long(inst_pkg.metadata["COUNTER"])
4130 blocker_cache[inst_pkg.cpv] = \
4131 blocker_cache.BlockerData(counter, blocker_atoms)
4132 for cpv in stale_cache:
4133 del blocker_cache[cpv]
4134 blocker_cache.flush()
4136 blocker_parents = digraph()
4138 for pkg in installed_pkgs:
4139 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4140 blocker_atom = blocker_atom.lstrip("!")
4141 blocker_atoms.append(blocker_atom)
4142 blocker_parents.add(blocker_atom, pkg)
4144 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4145 blocking_pkgs = set()
4146 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4147 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4149 # Check for blockers in the other direction.
4150 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4152 portage.dep._dep_check_strict = False
4153 success, atoms = portage.dep_check(depstr,
4154 vardb, settings, myuse=new_pkg.use.enabled,
4155 trees=dep_check_trees, myroot=new_pkg.root)
4157 portage.dep._dep_check_strict = True
4159 # We should never get this far with invalid deps.
4160 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4163 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4166 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4167 for inst_pkg in installed_pkgs:
4169 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4170 except (portage.exception.InvalidDependString, StopIteration):
4172 blocking_pkgs.add(inst_pkg)
4174 return blocking_pkgs
4176 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4178 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4179 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4180 p_type, p_root, p_key, p_status = parent_node
4182 if p_status == "nomerge":
4183 category, pf = portage.catsplit(p_key)
4184 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4185 msg.append("Portage is unable to process the dependencies of the ")
4186 msg.append("'%s' package. " % p_key)
4187 msg.append("In order to correct this problem, the package ")
4188 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4189 msg.append("As a temporary workaround, the --nodeps option can ")
4190 msg.append("be used to ignore all dependencies. For reference, ")
4191 msg.append("the problematic dependencies can be found in the ")
4192 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4194 msg.append("This package can not be installed. ")
4195 msg.append("Please notify the '%s' package maintainer " % p_key)
4196 msg.append("about this problem.")
4198 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4199 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4201 class PackageVirtualDbapi(portage.dbapi):
4203 A dbapi-like interface class that represents the state of the installed
4204 package database as new packages are installed, replacing any packages
4205 that previously existed in the same slot. The main difference between
4206 this class and fakedbapi is that this one uses Package instances
4207 internally (passed in via cpv_inject() and cpv_remove() calls).
4209 def __init__(self, settings):
4210 portage.dbapi.__init__(self)
4211 self.settings = settings
4212 self._match_cache = {}
4218 Remove all packages.
4222 self._cp_map.clear()
4223 self._cpv_map.clear()
4226 obj = PackageVirtualDbapi(self.settings)
4227 obj._match_cache = self._match_cache.copy()
4228 obj._cp_map = self._cp_map.copy()
4229 for k, v in obj._cp_map.iteritems():
4230 obj._cp_map[k] = v[:]
4231 obj._cpv_map = self._cpv_map.copy()
4235 return self._cpv_map.itervalues()
4237 def __contains__(self, item):
4238 existing = self._cpv_map.get(item.cpv)
4239 if existing is not None and \
4244 def get(self, item, default=None):
4245 cpv = getattr(item, "cpv", None)
4249 type_name, root, cpv, operation = item
4251 existing = self._cpv_map.get(cpv)
4252 if existing is not None and \
4257 def match_pkgs(self, atom):
4258 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4260 def _clear_cache(self):
4261 if self._categories is not None:
4262 self._categories = None
4263 if self._match_cache:
4264 self._match_cache = {}
4266 def match(self, origdep, use_cache=1):
4267 result = self._match_cache.get(origdep)
4268 if result is not None:
4270 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4271 self._match_cache[origdep] = result
4274 def cpv_exists(self, cpv):
4275 return cpv in self._cpv_map
4277 def cp_list(self, mycp, use_cache=1):
4278 cachelist = self._match_cache.get(mycp)
4279 # cp_list() doesn't expand old-style virtuals
4280 if cachelist and cachelist[0].startswith(mycp):
4282 cpv_list = self._cp_map.get(mycp)
4283 if cpv_list is None:
4286 cpv_list = [pkg.cpv for pkg in cpv_list]
4287 self._cpv_sort_ascending(cpv_list)
4288 if not (not cpv_list and mycp.startswith("virtual/")):
4289 self._match_cache[mycp] = cpv_list
4293 return list(self._cp_map)
4296 return list(self._cpv_map)
4298 def cpv_inject(self, pkg):
4299 cp_list = self._cp_map.get(pkg.cp)
4302 self._cp_map[pkg.cp] = cp_list
4303 e_pkg = self._cpv_map.get(pkg.cpv)
4304 if e_pkg is not None:
4307 self.cpv_remove(e_pkg)
4308 for e_pkg in cp_list:
4309 if e_pkg.slot_atom == pkg.slot_atom:
4312 self.cpv_remove(e_pkg)
4315 self._cpv_map[pkg.cpv] = pkg
4318 def cpv_remove(self, pkg):
4319 old_pkg = self._cpv_map.get(pkg.cpv)
4322 self._cp_map[pkg.cp].remove(pkg)
4323 del self._cpv_map[pkg.cpv]
4326 def aux_get(self, cpv, wants):
4327 metadata = self._cpv_map[cpv].metadata
4328 return [metadata.get(x, "") for x in wants]
4330 def aux_update(self, cpv, values):
4331 self._cpv_map[cpv].metadata.update(values)
4334 class depgraph(object):
4336 pkg_tree_map = RootConfig.pkg_tree_map
4338 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4340 def __init__(self, settings, trees, myopts, myparams, spinner):
4341 self.settings = settings
4342 self.target_root = settings["ROOT"]
4343 self.myopts = myopts
4344 self.myparams = myparams
4346 if settings.get("PORTAGE_DEBUG", "") == "1":
4348 self.spinner = spinner
4349 self._running_root = trees["/"]["root_config"]
4350 self._opts_no_restart = Scheduler._opts_no_restart
4351 self.pkgsettings = {}
4352 # Maps slot atom to package for each Package added to the graph.
4353 self._slot_pkg_map = {}
4354 # Maps nodes to the reasons they were selected for reinstallation.
4355 self._reinstall_nodes = {}
4358 self._trees_orig = trees
4360 # Contains a filtered view of preferred packages that are selected
4361 # from available repositories.
4362 self._filtered_trees = {}
4363 # Contains installed packages and new packages that have been added
4365 self._graph_trees = {}
4366 # All Package instances
4367 self._pkg_cache = {}
4368 for myroot in trees:
4369 self.trees[myroot] = {}
4370 # Create a RootConfig instance that references
4371 # the FakeVartree instead of the real one.
4372 self.roots[myroot] = RootConfig(
4373 trees[myroot]["vartree"].settings,
4375 trees[myroot]["root_config"].setconfig)
4376 for tree in ("porttree", "bintree"):
4377 self.trees[myroot][tree] = trees[myroot][tree]
4378 self.trees[myroot]["vartree"] = \
4379 FakeVartree(trees[myroot]["root_config"],
4380 pkg_cache=self._pkg_cache)
4381 self.pkgsettings[myroot] = portage.config(
4382 clone=self.trees[myroot]["vartree"].settings)
4383 self._slot_pkg_map[myroot] = {}
4384 vardb = self.trees[myroot]["vartree"].dbapi
4385 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4386 "--buildpkgonly" not in self.myopts
4387 # This fakedbapi instance will model the state that the vdb will
4388 # have after new packages have been installed.
4389 fakedb = PackageVirtualDbapi(vardb.settings)
4390 if preload_installed_pkgs:
4392 self.spinner.update()
4393 # This triggers metadata updates via FakeVartree.
4394 vardb.aux_get(pkg.cpv, [])
4395 fakedb.cpv_inject(pkg)
4397 # Now that the vardb state is cached in our FakeVartree,
4398 # we won't be needing the real vartree cache for awhile.
4399 # To make some room on the heap, clear the vardbapi
4401 trees[myroot]["vartree"].dbapi._clear_cache()
4404 self.mydbapi[myroot] = fakedb
4407 graph_tree.dbapi = fakedb
4408 self._graph_trees[myroot] = {}
4409 self._filtered_trees[myroot] = {}
4410 # Substitute the graph tree for the vartree in dep_check() since we
4411 # want atom selections to be consistent with package selections
4412 # have already been made.
4413 self._graph_trees[myroot]["porttree"] = graph_tree
4414 self._graph_trees[myroot]["vartree"] = graph_tree
4415 def filtered_tree():
4417 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4418 self._filtered_trees[myroot]["porttree"] = filtered_tree
4420 # Passing in graph_tree as the vartree here could lead to better
4421 # atom selections in some cases by causing atoms for packages that
4422 # have been added to the graph to be preferred over other choices.
4423 # However, it can trigger atom selections that result in
4424 # unresolvable direct circular dependencies. For example, this
4425 # happens with gwydion-dylan which depends on either itself or
4426 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4427 # gwydion-dylan-bin needs to be selected in order to avoid a
4428 # an unresolvable direct circular dependency.
4430 # To solve the problem described above, pass in "graph_db" so that
4431 # packages that have been added to the graph are distinguishable
4432 # from other available packages and installed packages. Also, pass
4433 # the parent package into self._select_atoms() calls so that
4434 # unresolvable direct circular dependencies can be detected and
4435 # avoided when possible.
4436 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4437 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4440 portdb = self.trees[myroot]["porttree"].dbapi
4441 bindb = self.trees[myroot]["bintree"].dbapi
4442 vardb = self.trees[myroot]["vartree"].dbapi
4443 # (db, pkg_type, built, installed, db_keys)
4444 if "--usepkgonly" not in self.myopts:
4445 db_keys = list(portdb._aux_cache_keys)
4446 dbs.append((portdb, "ebuild", False, False, db_keys))
4447 if "--usepkg" in self.myopts:
4448 db_keys = list(bindb._aux_cache_keys)
4449 dbs.append((bindb, "binary", True, False, db_keys))
4450 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4451 dbs.append((vardb, "installed", True, True, db_keys))
4452 self._filtered_trees[myroot]["dbs"] = dbs
4453 if "--usepkg" in self.myopts:
4454 self.trees[myroot]["bintree"].populate(
4455 "--getbinpkg" in self.myopts,
4456 "--getbinpkgonly" in self.myopts)
4459 self.digraph=portage.digraph()
4460 # contains all sets added to the graph
4462 # contains atoms given as arguments
4463 self._sets["args"] = InternalPackageSet()
4464 # contains all atoms from all sets added to the graph, including
4465 # atoms given as arguments
4466 self._set_atoms = InternalPackageSet()
4467 self._atom_arg_map = {}
4468 # contains all nodes pulled in by self._set_atoms
4469 self._set_nodes = set()
4470 # Contains only Blocker -> Uninstall edges
4471 self._blocker_uninstalls = digraph()
4472 # Contains only Package -> Blocker edges
4473 self._blocker_parents = digraph()
4474 # Contains only irrelevant Package -> Blocker edges
4475 self._irrelevant_blockers = digraph()
4476 # Contains only unsolvable Package -> Blocker edges
4477 self._unsolvable_blockers = digraph()
4478 self._slot_collision_info = {}
4479 # Slot collision nodes are not allowed to block other packages since
4480 # blocker validation is only able to account for one package per slot.
4481 self._slot_collision_nodes = set()
4482 self._parent_atoms = {}
4483 self._slot_conflict_parent_atoms = set()
4484 self._serialized_tasks_cache = None
4485 self._scheduler_graph = None
4486 self._displayed_list = None
4487 self._pprovided_args = []
4488 self._missing_args = []
4489 self._masked_installed = set()
4490 self._unsatisfied_deps_for_display = []
4491 self._unsatisfied_blockers_for_display = None
4492 self._circular_deps_for_display = None
4493 self._dep_stack = []
4494 self._unsatisfied_deps = []
4495 self._initially_unsatisfied_deps = []
4496 self._ignored_deps = []
4497 self._required_set_names = set(["system", "world"])
4498 self._select_atoms = self._select_atoms_highest_available
4499 self._select_package = self._select_pkg_highest_available
4500 self._highest_pkg_cache = {}
4502 def _show_slot_collision_notice(self):
4503 """Show an informational message advising the user to mask one of the
4504 the packages. In some cases it may be possible to resolve this
4505 automatically, but support for backtracking (removal nodes that have
4506 already been selected) will be required in order to handle all possible
4510 if not self._slot_collision_info:
4513 self._show_merge_list()
4516 msg.append("\n!!! Multiple package instances within a single " + \
4517 "package slot have been pulled\n")
4518 msg.append("!!! into the dependency graph, resulting" + \
4519 " in a slot conflict:\n\n")
4521 # Max number of parents shown, to avoid flooding the display.
4523 explanation_columns = 70
4525 for (slot_atom, root), slot_nodes \
4526 in self._slot_collision_info.iteritems():
4527 msg.append(str(slot_atom))
4530 for node in slot_nodes:
4532 msg.append(str(node))
4533 parent_atoms = self._parent_atoms.get(node)
4536 # Prefer conflict atoms over others.
4537 for parent_atom in parent_atoms:
4538 if len(pruned_list) >= max_parents:
4540 if parent_atom in self._slot_conflict_parent_atoms:
4541 pruned_list.add(parent_atom)
4543 # If this package was pulled in by conflict atoms then
4544 # show those alone since those are the most interesting.
4546 # When generating the pruned list, prefer instances
4547 # of DependencyArg over instances of Package.
4548 for parent_atom in parent_atoms:
4549 if len(pruned_list) >= max_parents:
4551 parent, atom = parent_atom
4552 if isinstance(parent, DependencyArg):
4553 pruned_list.add(parent_atom)
4554 # Prefer Packages instances that themselves have been
4555 # pulled into collision slots.
4556 for parent_atom in parent_atoms:
4557 if len(pruned_list) >= max_parents:
4559 parent, atom = parent_atom
4560 if isinstance(parent, Package) and \
4561 (parent.slot_atom, parent.root) \
4562 in self._slot_collision_info:
4563 pruned_list.add(parent_atom)
4564 for parent_atom in parent_atoms:
4565 if len(pruned_list) >= max_parents:
4567 pruned_list.add(parent_atom)
4568 omitted_parents = len(parent_atoms) - len(pruned_list)
4569 parent_atoms = pruned_list
4570 msg.append(" pulled in by\n")
4571 for parent_atom in parent_atoms:
4572 parent, atom = parent_atom
4573 msg.append(2*indent)
4574 if isinstance(parent,
4575 (PackageArg, AtomArg)):
4576 # For PackageArg and AtomArg types, it's
4577 # redundant to display the atom attribute.
4578 msg.append(str(parent))
4580 # Display the specific atom from SetArg or
4582 msg.append("%s required by %s" % (atom, parent))
4585 msg.append(2*indent)
4586 msg.append("(and %d more)\n" % omitted_parents)
4588 msg.append(" (no parents)\n")
4590 explanation = self._slot_conflict_explanation(slot_nodes)
4593 msg.append(indent + "Explanation:\n\n")
4594 for line in textwrap.wrap(explanation, explanation_columns):
4595 msg.append(2*indent + line + "\n")
4598 sys.stderr.write("".join(msg))
4601 explanations_for_all = explanations == len(self._slot_collision_info)
4603 if explanations_for_all or "--quiet" in self.myopts:
4607 msg.append("It may be possible to solve this problem ")
4608 msg.append("by using package.mask to prevent one of ")
4609 msg.append("those packages from being selected. ")
4610 msg.append("However, it is also possible that conflicting ")
4611 msg.append("dependencies exist such that they are impossible to ")
4612 msg.append("satisfy simultaneously. If such a conflict exists in ")
4613 msg.append("the dependencies of two different packages, then those ")
4614 msg.append("packages can not be installed simultaneously.")
4616 from formatter import AbstractFormatter, DumbWriter
4617 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4619 f.add_flowing_data(x)
4623 msg.append("For more information, see MASKED PACKAGES ")
4624 msg.append("section in the emerge man page or refer ")
4625 msg.append("to the Gentoo Handbook.")
4627 f.add_flowing_data(x)
4631 def _slot_conflict_explanation(self, slot_nodes):
4633 When a slot conflict occurs due to USE deps, there are a few
4634 different cases to consider:
4636 1) New USE are correctly set but --newuse wasn't requested so an
4637 installed package with incorrect USE happened to get pulled
4638 into graph before the new one.
4640 2) New USE are incorrectly set but an installed package has correct
4641 USE so it got pulled into the graph, and a new instance also got
4642 pulled in due to --newuse or an upgrade.
4644 3) Multiple USE deps exist that can't be satisfied simultaneously,
4645 and multiple package instances got pulled into the same slot to
4646 satisfy the conflicting deps.
4648 Currently, explanations and suggested courses of action are generated
4649 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4652 if len(slot_nodes) != 2:
4653 # Suggestions are only implemented for
4654 # conflicts between two packages.
4657 all_conflict_atoms = self._slot_conflict_parent_atoms
4659 matched_atoms = None
4660 unmatched_node = None
4661 for node in slot_nodes:
4662 parent_atoms = self._parent_atoms.get(node)
4663 if not parent_atoms:
4664 # Normally, there are always parent atoms. If there are
4665 # none then something unexpected is happening and there's
4666 # currently no suggestion for this case.
4668 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4669 for parent_atom in conflict_atoms:
4670 parent, atom = parent_atom
4672 # Suggestions are currently only implemented for cases
4673 # in which all conflict atoms have USE deps.
4676 if matched_node is not None:
4677 # If conflict atoms match multiple nodes
4678 # then there's no suggestion.
4681 matched_atoms = conflict_atoms
4683 if unmatched_node is not None:
4684 # Neither node is matched by conflict atoms, and
4685 # there is no suggestion for this case.
4687 unmatched_node = node
4689 if matched_node is None or unmatched_node is None:
4690 # This shouldn't happen.
4693 if unmatched_node.installed and not matched_node.installed:
4694 return "New USE are correctly set, but --newuse wasn't" + \
4695 " requested, so an installed package with incorrect USE " + \
4696 "happened to get pulled into the dependency graph. " + \
4697 "In order to solve " + \
4698 "this, either specify the --newuse option or explicitly " + \
4699 " reinstall '%s'." % matched_node.slot_atom
4701 if matched_node.installed and not unmatched_node.installed:
4702 atoms = sorted(set(atom for parent, atom in matched_atoms))
4703 explanation = ("New USE for '%s' are incorrectly set. " + \
4704 "In order to solve this, adjust USE to satisfy '%s'") % \
4705 (matched_node.slot_atom, atoms[0])
4707 for atom in atoms[1:-1]:
4708 explanation += ", '%s'" % (atom,)
4711 explanation += " and '%s'" % (atoms[-1],)
4717 def _process_slot_conflicts(self):
4719 Process slot conflict data to identify specific atoms which
4720 lead to conflict. These atoms only match a subset of the
4721 packages that have been pulled into a given slot.
4723 for (slot_atom, root), slot_nodes \
4724 in self._slot_collision_info.iteritems():
4726 all_parent_atoms = set()
4727 for pkg in slot_nodes:
4728 parent_atoms = self._parent_atoms.get(pkg)
4729 if not parent_atoms:
4731 all_parent_atoms.update(parent_atoms)
4733 for pkg in slot_nodes:
4734 parent_atoms = self._parent_atoms.get(pkg)
4735 if parent_atoms is None:
4736 parent_atoms = set()
4737 self._parent_atoms[pkg] = parent_atoms
4738 for parent_atom in all_parent_atoms:
4739 if parent_atom in parent_atoms:
4741 # Use package set for matching since it will match via
4742 # PROVIDE when necessary, while match_from_list does not.
4743 parent, atom = parent_atom
4744 atom_set = InternalPackageSet(
4745 initial_atoms=(atom,))
4746 if atom_set.findAtomForPackage(pkg):
4747 parent_atoms.add(parent_atom)
4749 self._slot_conflict_parent_atoms.add(parent_atom)
4751 def _reinstall_for_flags(self, forced_flags,
4752 orig_use, orig_iuse, cur_use, cur_iuse):
4753 """Return a set of flags that trigger reinstallation, or None if there
4754 are no such flags."""
4755 if "--newuse" in self.myopts:
4756 flags = set(orig_iuse.symmetric_difference(
4757 cur_iuse).difference(forced_flags))
4758 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4759 cur_iuse.intersection(cur_use)))
4762 elif "changed-use" == self.myopts.get("--reinstall"):
4763 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4764 cur_iuse.intersection(cur_use))
4769 def _create_graph(self, allow_unsatisfied=False):
4770 dep_stack = self._dep_stack
4772 self.spinner.update()
4773 dep = dep_stack.pop()
4774 if isinstance(dep, Package):
4775 if not self._add_pkg_deps(dep,
4776 allow_unsatisfied=allow_unsatisfied):
4779 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4783 def _add_dep(self, dep, allow_unsatisfied=False):
4784 debug = "--debug" in self.myopts
4785 buildpkgonly = "--buildpkgonly" in self.myopts
4786 nodeps = "--nodeps" in self.myopts
4787 empty = "empty" in self.myparams
4788 deep = "deep" in self.myparams
4789 update = "--update" in self.myopts and dep.depth <= 1
4791 if not buildpkgonly and \
4793 dep.parent not in self._slot_collision_nodes:
4794 if dep.parent.onlydeps:
4795 # It's safe to ignore blockers if the
4796 # parent is an --onlydeps node.
4798 # The blocker applies to the root where
4799 # the parent is or will be installed.
4800 blocker = Blocker(atom=dep.atom,
4801 eapi=dep.parent.metadata["EAPI"],
4802 root=dep.parent.root)
4803 self._blocker_parents.add(blocker, dep.parent)
4805 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4806 onlydeps=dep.onlydeps)
4808 if allow_unsatisfied:
4809 self._unsatisfied_deps.append(dep)
4811 self._unsatisfied_deps_for_display.append(
4812 ((dep.root, dep.atom), {"myparent":dep.parent}))
4814 # In some cases, dep_check will return deps that shouldn't
4815 # be proccessed any further, so they are identified and
4816 # discarded here. Try to discard as few as possible since
4817 # discarded dependencies reduce the amount of information
4818 # available for optimization of merge order.
4819 if dep.priority.satisfied and \
4820 not (existing_node or empty or deep or update):
4822 if dep.root == self.target_root:
4824 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4825 except StopIteration:
4827 except portage.exception.InvalidDependString:
4828 if not dep_pkg.installed:
4829 # This shouldn't happen since the package
4830 # should have been masked.
4833 self._ignored_deps.append(dep)
4836 if not self._add_pkg(dep_pkg, dep):
4840 def _add_pkg(self, pkg, dep):
4847 myparent = dep.parent
4848 priority = dep.priority
4850 if priority is None:
4851 priority = DepPriority()
4853 Fills the digraph with nodes comprised of packages to merge.
4854 mybigkey is the package spec of the package to merge.
4855 myparent is the package depending on mybigkey ( or None )
4856 addme = Should we add this package to the digraph or are we just looking at it's deps?
4857 Think --onlydeps, we need to ignore packages in that case.
4860 #IUSE-aware emerge -> USE DEP aware depgraph
4861 #"no downgrade" emerge
4863 # Ensure that the dependencies of the same package
4864 # are never processed more than once.
4865 previously_added = pkg in self.digraph
4867 # select the correct /var database that we'll be checking against
4868 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4869 pkgsettings = self.pkgsettings[pkg.root]
4874 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4875 except portage.exception.InvalidDependString, e:
4876 if not pkg.installed:
4877 show_invalid_depstring_notice(
4878 pkg, pkg.metadata["PROVIDE"], str(e))
4882 if not pkg.onlydeps:
4883 if not pkg.installed and \
4884 "empty" not in self.myparams and \
4885 vardbapi.match(pkg.slot_atom):
4886 # Increase the priority of dependencies on packages that
4887 # are being rebuilt. This optimizes merge order so that
4888 # dependencies are rebuilt/updated as soon as possible,
4889 # which is needed especially when emerge is called by
4890 # revdep-rebuild since dependencies may be affected by ABI
4891 # breakage that has rendered them useless. Don't adjust
4892 # priority here when in "empty" mode since all packages
4893 # are being merged in that case.
4894 priority.rebuild = True
4896 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4897 slot_collision = False
4899 existing_node_matches = pkg.cpv == existing_node.cpv
4900 if existing_node_matches and \
4901 pkg != existing_node and \
4902 dep.atom is not None:
4903 # Use package set for matching since it will match via
4904 # PROVIDE when necessary, while match_from_list does not.
4905 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4906 if not atom_set.findAtomForPackage(existing_node):
4907 existing_node_matches = False
4908 if existing_node_matches:
4909 # The existing node can be reused.
4911 for parent_atom in arg_atoms:
4912 parent, atom = parent_atom
4913 self.digraph.add(existing_node, parent,
4915 self._add_parent_atom(existing_node, parent_atom)
4916 # If a direct circular dependency is not an unsatisfied
4917 # buildtime dependency then drop it here since otherwise
4918 # it can skew the merge order calculation in an unwanted
4920 if existing_node != myparent or \
4921 (priority.buildtime and not priority.satisfied):
4922 self.digraph.addnode(existing_node, myparent,
4924 if dep.atom is not None and dep.parent is not None:
4925 self._add_parent_atom(existing_node,
4926 (dep.parent, dep.atom))
4930 # A slot collision has occurred. Sometimes this coincides
4931 # with unresolvable blockers, so the slot collision will be
4932 # shown later if there are no unresolvable blockers.
4933 self._add_slot_conflict(pkg)
4934 slot_collision = True
4937 # Now add this node to the graph so that self.display()
4938 # can show use flags and --tree portage.output. This node is
4939 # only being partially added to the graph. It must not be
4940 # allowed to interfere with the other nodes that have been
4941 # added. Do not overwrite data for existing nodes in
4942 # self.mydbapi since that data will be used for blocker
4944 # Even though the graph is now invalid, continue to process
4945 # dependencies so that things like --fetchonly can still
4946 # function despite collisions.
4949 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4950 self.mydbapi[pkg.root].cpv_inject(pkg)
4952 if not pkg.installed:
4953 # Allow this package to satisfy old-style virtuals in case it
4954 # doesn't already. Any pre-existing providers will be preferred
4957 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4958 # For consistency, also update the global virtuals.
4959 settings = self.roots[pkg.root].settings
4961 settings.setinst(pkg.cpv, pkg.metadata)
4963 except portage.exception.InvalidDependString, e:
4964 show_invalid_depstring_notice(
4965 pkg, pkg.metadata["PROVIDE"], str(e))
4970 self._set_nodes.add(pkg)
4972 # Do this even when addme is False (--onlydeps) so that the
4973 # parent/child relationship is always known in case
4974 # self._show_slot_collision_notice() needs to be called later.
4975 self.digraph.add(pkg, myparent, priority=priority)
4976 if dep.atom is not None and dep.parent is not None:
4977 self._add_parent_atom(pkg, (dep.parent, dep.atom))
4980 for parent_atom in arg_atoms:
4981 parent, atom = parent_atom
4982 self.digraph.add(pkg, parent, priority=priority)
4983 self._add_parent_atom(pkg, parent_atom)
4985 """ This section determines whether we go deeper into dependencies or not.
4986 We want to go deeper on a few occasions:
4987 Installing package A, we need to make sure package A's deps are met.
4988 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4989 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4991 dep_stack = self._dep_stack
4992 if "recurse" not in self.myparams:
4994 elif pkg.installed and \
4995 "deep" not in self.myparams:
4996 dep_stack = self._ignored_deps
4998 self.spinner.update()
5003 if not previously_added:
5004 dep_stack.append(pkg)
5007 def _add_parent_atom(self, pkg, parent_atom):
5008 parent_atoms = self._parent_atoms.get(pkg)
5009 if parent_atoms is None:
5010 parent_atoms = set()
5011 self._parent_atoms[pkg] = parent_atoms
5012 parent_atoms.add(parent_atom)
5014 def _add_slot_conflict(self, pkg):
5015 self._slot_collision_nodes.add(pkg)
5016 slot_key = (pkg.slot_atom, pkg.root)
5017 slot_nodes = self._slot_collision_info.get(slot_key)
5018 if slot_nodes is None:
5020 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5021 self._slot_collision_info[slot_key] = slot_nodes
5024 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5026 mytype = pkg.type_name
5029 metadata = pkg.metadata
5030 myuse = pkg.use.enabled
5032 depth = pkg.depth + 1
5033 removal_action = "remove" in self.myparams
5036 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5038 edepend[k] = metadata[k]
5040 if not pkg.built and \
5041 "--buildpkgonly" in self.myopts and \
5042 "deep" not in self.myparams and \
5043 "empty" not in self.myparams:
5044 edepend["RDEPEND"] = ""
5045 edepend["PDEPEND"] = ""
5046 bdeps_satisfied = False
5048 if pkg.built and not removal_action:
5049 if self.myopts.get("--with-bdeps", "n") == "y":
5050 # Pull in build time deps as requested, but marked them as
5051 # "satisfied" since they are not strictly required. This allows
5052 # more freedom in the merge order calculation for solving
5053 # circular dependencies. Don't convert to PDEPEND since that
5054 # could make --with-bdeps=y less effective if it is used to
5055 # adjust merge order to prevent built_with_use() calls from
5057 bdeps_satisfied = True
5059 # built packages do not have build time dependencies.
5060 edepend["DEPEND"] = ""
5062 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5063 edepend["DEPEND"] = ""
5066 ("/", edepend["DEPEND"],
5067 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
5068 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5069 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5072 debug = "--debug" in self.myopts
5073 strict = mytype != "installed"
5075 for dep_root, dep_string, dep_priority in deps:
5077 # Decrease priority so that --buildpkgonly
5078 # hasallzeros() works correctly.
5079 dep_priority = DepPriority()
5084 print "Parent: ", jbigkey
5085 print "Depstring:", dep_string
5086 print "Priority:", dep_priority
5087 vardb = self.roots[dep_root].trees["vartree"].dbapi
5089 selected_atoms = self._select_atoms(dep_root,
5090 dep_string, myuse=myuse, parent=pkg, strict=strict)
5091 except portage.exception.InvalidDependString, e:
5092 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5095 print "Candidates:", selected_atoms
5097 for atom in selected_atoms:
5100 atom = portage.dep.Atom(atom)
5102 mypriority = dep_priority.copy()
5103 if not atom.blocker and vardb.match(atom):
5104 mypriority.satisfied = True
5106 if not self._add_dep(Dependency(atom=atom,
5107 blocker=atom.blocker, depth=depth, parent=pkg,
5108 priority=mypriority, root=dep_root),
5109 allow_unsatisfied=allow_unsatisfied):
5112 except portage.exception.InvalidAtom, e:
5113 show_invalid_depstring_notice(
5114 pkg, dep_string, str(e))
5116 if not pkg.installed:
5120 print "Exiting...", jbigkey
5121 except portage.exception.AmbiguousPackageName, e:
5123 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5124 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5126 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5127 portage.writemsg("\n", noiselevel=-1)
5128 if mytype == "binary":
5130 "!!! This binary package cannot be installed: '%s'\n" % \
5131 mykey, noiselevel=-1)
5132 elif mytype == "ebuild":
5133 portdb = self.roots[myroot].trees["porttree"].dbapi
5134 myebuild, mylocation = portdb.findname2(mykey)
5135 portage.writemsg("!!! This ebuild cannot be installed: " + \
5136 "'%s'\n" % myebuild, noiselevel=-1)
5137 portage.writemsg("!!! Please notify the package maintainer " + \
5138 "that atoms must be fully-qualified.\n", noiselevel=-1)
5142 def _priority(self, **kwargs):
5143 if "remove" in self.myparams:
5144 priority_constructor = UnmergeDepPriority
5146 priority_constructor = DepPriority
5147 return priority_constructor(**kwargs)
5149 def _dep_expand(self, root_config, atom_without_category):
5151 @param root_config: a root config instance
5152 @type root_config: RootConfig
5153 @param atom_without_category: an atom without a category component
5154 @type atom_without_category: String
5156 @returns: a list of atoms containing categories (possibly empty)
5158 null_cp = portage.dep_getkey(insert_category_into_atom(
5159 atom_without_category, "null"))
5160 cat, atom_pn = portage.catsplit(null_cp)
5163 for db, pkg_type, built, installed, db_keys in \
5164 self._filtered_trees[root_config.root]["dbs"]:
5165 cp_set.update(db.cp_all())
5166 for cp in list(cp_set):
5167 cat, pn = portage.catsplit(cp)
5172 cat, pn = portage.catsplit(cp)
5173 deps.append(insert_category_into_atom(
5174 atom_without_category, cat))
5177 def _have_new_virt(self, root, atom_cp):
5179 for db, pkg_type, built, installed, db_keys in \
5180 self._filtered_trees[root]["dbs"]:
5181 if db.cp_list(atom_cp):
5186 def _iter_atoms_for_pkg(self, pkg):
5187 # TODO: add multiple $ROOT support
5188 if pkg.root != self.target_root:
5190 atom_arg_map = self._atom_arg_map
5191 root_config = self.roots[pkg.root]
5192 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5193 atom_cp = portage.dep_getkey(atom)
5194 if atom_cp != pkg.cp and \
5195 self._have_new_virt(pkg.root, atom_cp):
5197 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5198 visible_pkgs.reverse() # descending order
5200 for visible_pkg in visible_pkgs:
5201 if visible_pkg.cp != atom_cp:
5203 if pkg >= visible_pkg:
5204 # This is descending order, and we're not
5205 # interested in any versions <= pkg given.
5207 if pkg.slot_atom != visible_pkg.slot_atom:
5208 higher_slot = visible_pkg
5210 if higher_slot is not None:
5212 for arg in atom_arg_map[(atom, pkg.root)]:
5213 if isinstance(arg, PackageArg) and \
5218 def select_files(self, myfiles):
5219 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5220 appropriate depgraph and return a favorite list."""
5221 debug = "--debug" in self.myopts
5222 root_config = self.roots[self.target_root]
5223 sets = root_config.sets
5224 getSetAtoms = root_config.setconfig.getSetAtoms
5226 myroot = self.target_root
5227 dbs = self._filtered_trees[myroot]["dbs"]
5228 vardb = self.trees[myroot]["vartree"].dbapi
5229 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5230 portdb = self.trees[myroot]["porttree"].dbapi
5231 bindb = self.trees[myroot]["bintree"].dbapi
5232 pkgsettings = self.pkgsettings[myroot]
5234 onlydeps = "--onlydeps" in self.myopts
5237 ext = os.path.splitext(x)[1]
5239 if not os.path.exists(x):
5241 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5242 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5243 elif os.path.exists(
5244 os.path.join(pkgsettings["PKGDIR"], x)):
5245 x = os.path.join(pkgsettings["PKGDIR"], x)
5247 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5248 print "!!! Please ensure the tbz2 exists as specified.\n"
5249 return 0, myfavorites
5250 mytbz2=portage.xpak.tbz2(x)
5251 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5252 if os.path.realpath(x) != \
5253 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5254 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5255 return 0, myfavorites
5256 db_keys = list(bindb._aux_cache_keys)
5257 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5258 pkg = Package(type_name="binary", root_config=root_config,
5259 cpv=mykey, built=True, metadata=metadata,
5261 self._pkg_cache[pkg] = pkg
5262 args.append(PackageArg(arg=x, package=pkg,
5263 root_config=root_config))
5264 elif ext==".ebuild":
5265 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5266 pkgdir = os.path.dirname(ebuild_path)
5267 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5268 cp = pkgdir[len(tree_root)+1:]
5269 e = portage.exception.PackageNotFound(
5270 ("%s is not in a valid portage tree " + \
5271 "hierarchy or does not exist") % x)
5272 if not portage.isvalidatom(cp):
5274 cat = portage.catsplit(cp)[0]
5275 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5276 if not portage.isvalidatom("="+mykey):
5278 ebuild_path = portdb.findname(mykey)
5280 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5281 cp, os.path.basename(ebuild_path)):
5282 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5283 return 0, myfavorites
5284 if mykey not in portdb.xmatch(
5285 "match-visible", portage.dep_getkey(mykey)):
5286 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5287 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5288 print colorize("BAD", "*** page for details.")
5289 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5292 raise portage.exception.PackageNotFound(
5293 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5294 db_keys = list(portdb._aux_cache_keys)
5295 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5296 pkg = Package(type_name="ebuild", root_config=root_config,
5297 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5298 pkgsettings.setcpv(pkg)
5299 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5300 self._pkg_cache[pkg] = pkg
5301 args.append(PackageArg(arg=x, package=pkg,
5302 root_config=root_config))
5303 elif x.startswith(os.path.sep):
5304 if not x.startswith(myroot):
5305 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5306 " $ROOT.\n") % x, noiselevel=-1)
5308 # Queue these up since it's most efficient to handle
5309 # multiple files in a single iter_owners() call.
5310 lookup_owners.append(x)
5312 if x in ("system", "world"):
5314 if x.startswith(SETPREFIX):
5315 s = x[len(SETPREFIX):]
5317 raise portage.exception.PackageSetNotFound(s)
5320 # Recursively expand sets so that containment tests in
5321 # self._get_parent_sets() properly match atoms in nested
5322 # sets (like if world contains system).
5323 expanded_set = InternalPackageSet(
5324 initial_atoms=getSetAtoms(s))
5325 self._sets[s] = expanded_set
5326 args.append(SetArg(arg=x, set=expanded_set,
5327 root_config=root_config))
5328 myfavorites.append(x)
5330 if not is_valid_package_atom(x):
5331 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5333 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5334 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5336 # Don't expand categories or old-style virtuals here unless
5337 # necessary. Expansion of old-style virtuals here causes at
5338 # least the following problems:
5339 # 1) It's more difficult to determine which set(s) an atom
5340 # came from, if any.
5341 # 2) It takes away freedom from the resolver to choose other
5342 # possible expansions when necessary.
5344 args.append(AtomArg(arg=x, atom=x,
5345 root_config=root_config))
5347 expanded_atoms = self._dep_expand(root_config, x)
5348 installed_cp_set = set()
5349 for atom in expanded_atoms:
5350 atom_cp = portage.dep_getkey(atom)
5351 if vardb.cp_list(atom_cp):
5352 installed_cp_set.add(atom_cp)
5353 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5354 installed_cp = iter(installed_cp_set).next()
5355 expanded_atoms = [atom for atom in expanded_atoms \
5356 if portage.dep_getkey(atom) == installed_cp]
5358 if len(expanded_atoms) > 1:
5361 ambiguous_package_name(x, expanded_atoms, root_config,
5362 self.spinner, self.myopts)
5363 return False, myfavorites
5365 atom = expanded_atoms[0]
5367 null_atom = insert_category_into_atom(x, "null")
5368 null_cp = portage.dep_getkey(null_atom)
5369 cat, atom_pn = portage.catsplit(null_cp)
5370 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5372 # Allow the depgraph to choose which virtual.
5373 atom = insert_category_into_atom(x, "virtual")
5375 atom = insert_category_into_atom(x, "null")
5377 args.append(AtomArg(arg=x, atom=atom,
5378 root_config=root_config))
5382 search_for_multiple = False
5383 if len(lookup_owners) > 1:
5384 search_for_multiple = True
5386 for x in lookup_owners:
5387 if not search_for_multiple and os.path.isdir(x):
5388 search_for_multiple = True
5389 relative_paths.append(x[len(myroot):])
5392 for pkg, relative_path in \
5393 real_vardb._owners.iter_owners(relative_paths):
5394 owners.add(pkg.mycpv)
5395 if not search_for_multiple:
5399 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5400 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5404 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5406 # portage now masks packages with missing slot, but it's
5407 # possible that one was installed by an older version
5408 atom = portage.cpv_getkey(cpv)
5410 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5411 args.append(AtomArg(arg=atom, atom=atom,
5412 root_config=root_config))
5414 if "--update" in self.myopts:
5415 # Enable greedy SLOT atoms for atoms given as arguments.
5416 # This is currently disabled for sets since greedy SLOT
5417 # atoms could be a property of the set itself.
5420 # In addition to any installed slots, also try to pull
5421 # in the latest new slot that may be available.
5422 greedy_atoms.append(arg)
5423 if not isinstance(arg, (AtomArg, PackageArg)):
5425 atom_cp = portage.dep_getkey(arg.atom)
5427 for cpv in vardb.match(arg.atom):
5428 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5430 greedy_atoms.append(
5431 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5432 root_config=root_config))
5436 # Create the "args" package set from atoms and
5437 # packages given as arguments.
5438 args_set = self._sets["args"]
5440 if not isinstance(arg, (AtomArg, PackageArg)):
5443 if myatom in args_set:
5445 args_set.add(myatom)
5446 myfavorites.append(myatom)
5447 self._set_atoms.update(chain(*self._sets.itervalues()))
5448 atom_arg_map = self._atom_arg_map
5450 for atom in arg.set:
5451 atom_key = (atom, myroot)
5452 refs = atom_arg_map.get(atom_key)
5455 atom_arg_map[atom_key] = refs
5458 pprovideddict = pkgsettings.pprovideddict
5460 portage.writemsg("\n", noiselevel=-1)
5461 # Order needs to be preserved since a feature of --nodeps
5462 # is to allow the user to force a specific merge order.
5466 for atom in arg.set:
5467 self.spinner.update()
5468 dep = Dependency(atom=atom, onlydeps=onlydeps,
5469 root=myroot, parent=arg)
5470 atom_cp = portage.dep_getkey(atom)
5472 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5473 if pprovided and portage.match_from_list(atom, pprovided):
5474 # A provided package has been specified on the command line.
5475 self._pprovided_args.append((arg, atom))
5477 if isinstance(arg, PackageArg):
5478 if not self._add_pkg(arg.package, dep) or \
5479 not self._create_graph():
5480 sys.stderr.write(("\n\n!!! Problem resolving " + \
5481 "dependencies for %s\n") % arg.arg)
5482 return 0, myfavorites
5485 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5486 (arg, atom), noiselevel=-1)
5487 pkg, existing_node = self._select_package(
5488 myroot, atom, onlydeps=onlydeps)
5490 if not (isinstance(arg, SetArg) and \
5491 arg.name in ("system", "world")):
5492 self._unsatisfied_deps_for_display.append(
5493 ((myroot, atom), {}))
5494 return 0, myfavorites
5495 self._missing_args.append((arg, atom))
5497 if atom_cp != pkg.cp:
5498 # For old-style virtuals, we need to repeat the
5499 # package.provided check against the selected package.
5500 expanded_atom = atom.replace(atom_cp, pkg.cp)
5501 pprovided = pprovideddict.get(pkg.cp)
5503 portage.match_from_list(expanded_atom, pprovided):
5504 # A provided package has been
5505 # specified on the command line.
5506 self._pprovided_args.append((arg, atom))
5508 if pkg.installed and "selective" not in self.myparams:
5509 self._unsatisfied_deps_for_display.append(
5510 ((myroot, atom), {}))
5511 # Previous behavior was to bail out in this case, but
5512 # since the dep is satisfied by the installed package,
5513 # it's more friendly to continue building the graph
5514 # and just show a warning message. Therefore, only bail
5515 # out here if the atom is not from either the system or
5517 if not (isinstance(arg, SetArg) and \
5518 arg.name in ("system", "world")):
5519 return 0, myfavorites
5521 # Add the selected package to the graph as soon as possible
5522 # so that later dep_check() calls can use it as feedback
5523 # for making more consistent atom selections.
5524 if not self._add_pkg(pkg, dep):
5525 if isinstance(arg, SetArg):
5526 sys.stderr.write(("\n\n!!! Problem resolving " + \
5527 "dependencies for %s from %s\n") % \
5530 sys.stderr.write(("\n\n!!! Problem resolving " + \
5531 "dependencies for %s\n") % atom)
5532 return 0, myfavorites
5534 except portage.exception.MissingSignature, e:
5535 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5536 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5537 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5538 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5539 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5540 return 0, myfavorites
5541 except portage.exception.InvalidSignature, e:
5542 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5543 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5544 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5545 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5546 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5547 return 0, myfavorites
5548 except SystemExit, e:
5549 raise # Needed else can't exit
5550 except Exception, e:
5551 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5552 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5555 # Now that the root packages have been added to the graph,
5556 # process the dependencies.
5557 if not self._create_graph():
5558 return 0, myfavorites
5561 if "--usepkgonly" in self.myopts:
5562 for xs in self.digraph.all_nodes():
5563 if not isinstance(xs, Package):
5565 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5569 print "Missing binary for:",xs[2]
5573 except self._unknown_internal_error:
5574 return False, myfavorites
5576 # We're true here unless we are missing binaries.
5577 return (not missing,myfavorites)
5579 def _select_atoms_from_graph(self, *pargs, **kwargs):
5581 Prefer atoms matching packages that have already been
5582 added to the graph or those that are installed and have
5583 not been scheduled for replacement.
5585 kwargs["trees"] = self._graph_trees
5586 return self._select_atoms_highest_available(*pargs, **kwargs)
5588 def _select_atoms_highest_available(self, root, depstring,
5589 myuse=None, parent=None, strict=True, trees=None):
5590 """This will raise InvalidDependString if necessary. If trees is
5591 None then self._filtered_trees is used."""
5592 pkgsettings = self.pkgsettings[root]
5594 trees = self._filtered_trees
5597 if parent is not None:
5598 trees[root]["parent"] = parent
5600 portage.dep._dep_check_strict = False
5601 mycheck = portage.dep_check(depstring, None,
5602 pkgsettings, myuse=myuse,
5603 myroot=root, trees=trees)
5605 if parent is not None:
5606 trees[root].pop("parent")
5607 portage.dep._dep_check_strict = True
5609 raise portage.exception.InvalidDependString(mycheck[1])
5610 selected_atoms = mycheck[1]
5611 return selected_atoms
5613 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5614 atom = portage.dep.Atom(atom)
5615 atom_set = InternalPackageSet(initial_atoms=(atom,))
5616 atom_without_use = atom
5618 atom_without_use = portage.dep.remove_slot(atom)
5620 atom_without_use += ":" + atom.slot
5621 atom_without_use = portage.dep.Atom(atom_without_use)
5622 xinfo = '"%s"' % atom
5625 # Discard null/ from failed cpv_expand category expansion.
5626 xinfo = xinfo.replace("null/", "")
5627 masked_packages = []
5629 missing_licenses = []
5630 have_eapi_mask = False
5631 pkgsettings = self.pkgsettings[root]
5632 implicit_iuse = pkgsettings._get_implicit_iuse()
5633 root_config = self.roots[root]
5634 portdb = self.roots[root].trees["porttree"].dbapi
5635 dbs = self._filtered_trees[root]["dbs"]
5636 for db, pkg_type, built, installed, db_keys in dbs:
5640 if hasattr(db, "xmatch"):
5641 cpv_list = db.xmatch("match-all", atom_without_use)
5643 cpv_list = db.match(atom_without_use)
5646 for cpv in cpv_list:
5647 metadata, mreasons = get_mask_info(root_config, cpv,
5648 pkgsettings, db, pkg_type, built, installed, db_keys)
5649 if metadata is not None:
5650 pkg = Package(built=built, cpv=cpv,
5651 installed=installed, metadata=metadata,
5652 root_config=root_config)
5653 if pkg.cp != atom.cp:
5654 # A cpv can be returned from dbapi.match() as an
5655 # old-style virtual match even in cases when the
5656 # package does not actually PROVIDE the virtual.
5657 # Filter out any such false matches here.
5658 if not atom_set.findAtomForPackage(pkg):
5660 if atom.use and not mreasons:
5661 missing_use.append(pkg)
5663 masked_packages.append(
5664 (root_config, pkgsettings, cpv, metadata, mreasons))
5666 missing_use_reasons = []
5667 missing_iuse_reasons = []
5668 for pkg in missing_use:
5669 use = pkg.use.enabled
5670 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5671 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5673 for x in atom.use.required:
5674 if iuse_re.match(x) is None:
5675 missing_iuse.append(x)
5678 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5679 missing_iuse_reasons.append((pkg, mreasons))
5681 need_enable = sorted(atom.use.enabled.difference(use))
5682 need_disable = sorted(atom.use.disabled.intersection(use))
5683 if need_enable or need_disable:
5685 changes.extend(colorize("red", "+" + x) \
5686 for x in need_enable)
5687 changes.extend(colorize("blue", "-" + x) \
5688 for x in need_disable)
5689 mreasons.append("Change USE: %s" % " ".join(changes))
5690 missing_use_reasons.append((pkg, mreasons))
5692 if missing_iuse_reasons and not missing_use_reasons:
5693 missing_use_reasons = missing_iuse_reasons
5694 elif missing_use_reasons:
5695 # Only show the latest version.
5696 del missing_use_reasons[1:]
5698 if missing_use_reasons:
5699 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5700 print "!!! One of the following packages is required to complete your request:"
5701 for pkg, mreasons in missing_use_reasons:
5702 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5704 elif masked_packages:
5706 colorize("BAD", "All ebuilds that could satisfy ") + \
5707 colorize("INFORM", xinfo) + \
5708 colorize("BAD", " have been masked.")
5709 print "!!! One of the following masked packages is required to complete your request:"
5710 have_eapi_mask = show_masked_packages(masked_packages)
5713 msg = ("The current version of portage supports " + \
5714 "EAPI '%s'. You must upgrade to a newer version" + \
5715 " of portage before EAPI masked packages can" + \
5716 " be installed.") % portage.const.EAPI
5717 from textwrap import wrap
5718 for line in wrap(msg, 75):
5723 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5725 # Show parent nodes and the argument that pulled them in.
5726 traversed_nodes = set()
5729 while node is not None:
5730 traversed_nodes.add(node)
5731 msg.append('(dependency required by "%s" [%s])' % \
5732 (colorize('INFORM', str(node.cpv)), node.type_name))
5733 # When traversing to parents, prefer arguments over packages
5734 # since arguments are root nodes. Never traverse the same
5735 # package twice, in order to prevent an infinite loop.
5736 selected_parent = None
5737 for parent in self.digraph.parent_nodes(node):
5738 if isinstance(parent, DependencyArg):
5739 msg.append('(dependency required by "%s" [argument])' % \
5740 (colorize('INFORM', str(parent))))
5741 selected_parent = None
5743 if parent not in traversed_nodes:
5744 selected_parent = parent
5745 node = selected_parent
5751 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5752 cache_key = (root, atom, onlydeps)
5753 ret = self._highest_pkg_cache.get(cache_key)
5756 if pkg and not existing:
5757 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5758 if existing and existing == pkg:
5759 # Update the cache to reflect that the
5760 # package has been added to the graph.
5762 self._highest_pkg_cache[cache_key] = ret
5764 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5765 self._highest_pkg_cache[cache_key] = ret
5768 settings = pkg.root_config.settings
5769 if visible(settings, pkg) and not (pkg.installed and \
5770 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
5771 pkg.root_config.visible_pkgs.cpv_inject(pkg)
5774 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5775 root_config = self.roots[root]
5776 pkgsettings = self.pkgsettings[root]
5777 dbs = self._filtered_trees[root]["dbs"]
5778 vardb = self.roots[root].trees["vartree"].dbapi
5779 portdb = self.roots[root].trees["porttree"].dbapi
5780 # List of acceptable packages, ordered by type preference.
5781 matched_packages = []
5782 highest_version = None
5783 if not isinstance(atom, portage.dep.Atom):
5784 atom = portage.dep.Atom(atom)
5786 atom_set = InternalPackageSet(initial_atoms=(atom,))
5787 existing_node = None
5789 usepkgonly = "--usepkgonly" in self.myopts
5790 empty = "empty" in self.myparams
5791 selective = "selective" in self.myparams
5793 noreplace = "--noreplace" in self.myopts
5794 # Behavior of the "selective" parameter depends on
5795 # whether or not a package matches an argument atom.
5796 # If an installed package provides an old-style
5797 # virtual that is no longer provided by an available
5798 # package, the installed package may match an argument
5799 # atom even though none of the available packages do.
5800 # Therefore, "selective" logic does not consider
5801 # whether or not an installed package matches an
5802 # argument atom. It only considers whether or not
5803 # available packages match argument atoms, which is
5804 # represented by the found_available_arg flag.
5805 found_available_arg = False
5806 for find_existing_node in True, False:
5809 for db, pkg_type, built, installed, db_keys in dbs:
5812 if installed and not find_existing_node:
5813 want_reinstall = reinstall or empty or \
5814 (found_available_arg and not selective)
5815 if want_reinstall and matched_packages:
5817 if hasattr(db, "xmatch"):
5818 cpv_list = db.xmatch("match-all", atom)
5820 cpv_list = db.match(atom)
5822 # USE=multislot can make an installed package appear as if
5823 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5824 # won't do any good as long as USE=multislot is enabled since
5825 # the newly built package still won't have the expected slot.
5826 # Therefore, assume that such SLOT dependencies are already
5827 # satisfied rather than forcing a rebuild.
5828 if installed and not cpv_list and atom.slot:
5829 for cpv in db.match(atom.cp):
5830 slot_available = False
5831 for other_db, other_type, other_built, \
5832 other_installed, other_keys in dbs:
5835 other_db.aux_get(cpv, ["SLOT"])[0]:
5836 slot_available = True
5840 if not slot_available:
5842 inst_pkg = self._pkg(cpv, "installed",
5843 root_config, installed=installed)
5844 # Remove the slot from the atom and verify that
5845 # the package matches the resulting atom.
5846 atom_without_slot = portage.dep.remove_slot(atom)
5848 atom_without_slot += str(atom.use)
5849 atom_without_slot = portage.dep.Atom(atom_without_slot)
5850 if portage.match_from_list(
5851 atom_without_slot, [inst_pkg]):
5852 cpv_list = [inst_pkg.cpv]
5857 pkg_status = "merge"
5858 if installed or onlydeps:
5859 pkg_status = "nomerge"
5862 for cpv in cpv_list:
5863 # Make --noreplace take precedence over --newuse.
5864 if not installed and noreplace and \
5865 cpv in vardb.match(atom):
5866 # If the installed version is masked, it may
5867 # be necessary to look at lower versions,
5868 # in case there is a visible downgrade.
5870 reinstall_for_flags = None
5871 cache_key = (pkg_type, root, cpv, pkg_status)
5872 calculated_use = True
5873 pkg = self._pkg_cache.get(cache_key)
5875 calculated_use = False
5877 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5880 pkg = Package(built=built, cpv=cpv,
5881 installed=installed, metadata=metadata,
5882 onlydeps=onlydeps, root_config=root_config,
5884 metadata = pkg.metadata
5885 if not built and ("?" in metadata["LICENSE"] or \
5886 "?" in metadata["PROVIDE"]):
5887 # This is avoided whenever possible because
5888 # it's expensive. It only needs to be done here
5889 # if it has an effect on visibility.
5890 pkgsettings.setcpv(pkg)
5891 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5892 calculated_use = True
5893 self._pkg_cache[pkg] = pkg
5895 if not installed or (built and matched_packages):
5896 # Only enforce visibility on installed packages
5897 # if there is at least one other visible package
5898 # available. By filtering installed masked packages
5899 # here, packages that have been masked since they
5900 # were installed can be automatically downgraded
5901 # to an unmasked version.
5903 if not visible(pkgsettings, pkg):
5905 except portage.exception.InvalidDependString:
5909 # Enable upgrade or downgrade to a version
5910 # with visible KEYWORDS when the installed
5911 # version is masked by KEYWORDS, but never
5912 # reinstall the same exact version only due
5913 # to a KEYWORDS mask.
5914 if built and matched_packages:
5916 different_version = None
5917 for avail_pkg in matched_packages:
5918 if not portage.dep.cpvequal(
5919 pkg.cpv, avail_pkg.cpv):
5920 different_version = avail_pkg
5922 if different_version is not None:
5925 pkgsettings._getMissingKeywords(
5926 pkg.cpv, pkg.metadata):
5929 # If the ebuild no longer exists or it's
5930 # keywords have been dropped, reject built
5931 # instances (installed or binary).
5932 # If --usepkgonly is enabled, assume that
5933 # the ebuild status should be ignored.
5937 pkg.cpv, "ebuild", root_config)
5938 except portage.exception.PackageNotFound:
5941 if not visible(pkgsettings, pkg_eb):
5944 if not pkg.built and not calculated_use:
5945 # This is avoided whenever possible because
5947 pkgsettings.setcpv(pkg)
5948 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5950 if pkg.cp != atom.cp:
5951 # A cpv can be returned from dbapi.match() as an
5952 # old-style virtual match even in cases when the
5953 # package does not actually PROVIDE the virtual.
5954 # Filter out any such false matches here.
5955 if not atom_set.findAtomForPackage(pkg):
5959 if root == self.target_root:
5961 # Ebuild USE must have been calculated prior
5962 # to this point, in case atoms have USE deps.
5963 myarg = self._iter_atoms_for_pkg(pkg).next()
5964 except StopIteration:
5966 except portage.exception.InvalidDependString:
5968 # masked by corruption
5970 if not installed and myarg:
5971 found_available_arg = True
5973 if atom.use and not pkg.built:
5974 use = pkg.use.enabled
5975 if atom.use.enabled.difference(use):
5977 if atom.use.disabled.intersection(use):
5979 if pkg.cp == atom_cp:
5980 if highest_version is None:
5981 highest_version = pkg
5982 elif pkg > highest_version:
5983 highest_version = pkg
5984 # At this point, we've found the highest visible
5985 # match from the current repo. Any lower versions
5986 # from this repo are ignored, so this so the loop
5987 # will always end with a break statement below
5989 if find_existing_node:
5990 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5993 if portage.dep.match_from_list(atom, [e_pkg]):
5994 if highest_version and \
5995 e_pkg.cp == atom_cp and \
5996 e_pkg < highest_version and \
5997 e_pkg.slot_atom != highest_version.slot_atom:
5998 # There is a higher version available in a
5999 # different slot, so this existing node is
6003 matched_packages.append(e_pkg)
6004 existing_node = e_pkg
6006 # Compare built package to current config and
6007 # reject the built package if necessary.
6008 if built and not installed and \
6009 ("--newuse" in self.myopts or \
6010 "--reinstall" in self.myopts):
6011 iuses = pkg.iuse.all
6012 old_use = pkg.use.enabled
6014 pkgsettings.setcpv(myeb)
6016 pkgsettings.setcpv(pkg)
6017 now_use = pkgsettings["PORTAGE_USE"].split()
6018 forced_flags = set()
6019 forced_flags.update(pkgsettings.useforce)
6020 forced_flags.update(pkgsettings.usemask)
6022 if myeb and not usepkgonly:
6023 cur_iuse = myeb.iuse.all
6024 if self._reinstall_for_flags(forced_flags,
6028 # Compare current config to installed package
6029 # and do not reinstall if possible.
6030 if not installed and \
6031 ("--newuse" in self.myopts or \
6032 "--reinstall" in self.myopts) and \
6033 cpv in vardb.match(atom):
6034 pkgsettings.setcpv(pkg)
6035 forced_flags = set()
6036 forced_flags.update(pkgsettings.useforce)
6037 forced_flags.update(pkgsettings.usemask)
6038 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6039 old_iuse = set(filter_iuse_defaults(
6040 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6041 cur_use = pkgsettings["PORTAGE_USE"].split()
6042 cur_iuse = pkg.iuse.all
6043 reinstall_for_flags = \
6044 self._reinstall_for_flags(
6045 forced_flags, old_use, old_iuse,
6047 if reinstall_for_flags:
6051 matched_packages.append(pkg)
6052 if reinstall_for_flags:
6053 self._reinstall_nodes[pkg] = \
6057 if not matched_packages:
6060 if "--debug" in self.myopts:
6061 for pkg in matched_packages:
6062 portage.writemsg("%s %s\n" % \
6063 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6065 # Filter out any old-style virtual matches if they are
6066 # mixed with new-style virtual matches.
6067 cp = portage.dep_getkey(atom)
6068 if len(matched_packages) > 1 and \
6069 "virtual" == portage.catsplit(cp)[0]:
6070 for pkg in matched_packages:
6073 # Got a new-style virtual, so filter
6074 # out any old-style virtuals.
6075 matched_packages = [pkg for pkg in matched_packages \
6079 if len(matched_packages) > 1:
6080 bestmatch = portage.best(
6081 [pkg.cpv for pkg in matched_packages])
6082 matched_packages = [pkg for pkg in matched_packages \
6083 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6085 # ordered by type preference ("ebuild" type is the last resort)
6086 return matched_packages[-1], existing_node
6088 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6090 Select packages that have already been added to the graph or
6091 those that are installed and have not been scheduled for
6094 graph_db = self._graph_trees[root]["porttree"].dbapi
6095 matches = graph_db.match(atom)
6098 cpv = matches[-1] # highest match
6099 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
6100 graph_db.aux_get(cpv, ["SLOT"])[0])
6101 e_pkg = self._slot_pkg_map[root].get(slot_atom)
6104 # Since this cpv exists in the graph_db,
6105 # we must have a cached Package instance.
6106 cache_key = ("installed", root, cpv, "nomerge")
6107 return (self._pkg_cache[cache_key], None)
6109 def _complete_graph(self):
6111 Add any deep dependencies of required sets (args, system, world) that
6112 have not been pulled into the graph yet. This ensures that the graph
6113 is consistent such that initially satisfied deep dependencies are not
6114 broken in the new graph. Initially unsatisfied dependencies are
6115 irrelevant since we only want to avoid breaking dependencies that are
6118 Since this method can consume enough time to disturb users, it is
6119 currently only enabled by the --complete-graph option.
6121 if "--buildpkgonly" in self.myopts or \
6122 "recurse" not in self.myparams:
6125 if "complete" not in self.myparams:
6126 # Skip this to avoid consuming enough time to disturb users.
6129 # Put the depgraph into a mode that causes it to only
6130 # select packages that have already been added to the
6131 # graph or those that are installed and have not been
6132 # scheduled for replacement. Also, toggle the "deep"
6133 # parameter so that all dependencies are traversed and
6135 self._select_atoms = self._select_atoms_from_graph
6136 self._select_package = self._select_pkg_from_graph
6137 already_deep = "deep" in self.myparams
6138 if not already_deep:
6139 self.myparams.add("deep")
6141 for root in self.roots:
6142 required_set_names = self._required_set_names.copy()
6143 if root == self.target_root and \
6144 (already_deep or "empty" in self.myparams):
6145 required_set_names.difference_update(self._sets)
6146 if not required_set_names and not self._ignored_deps:
6148 root_config = self.roots[root]
6149 setconfig = root_config.setconfig
6151 # Reuse existing SetArg instances when available.
6152 for arg in self.digraph.root_nodes():
6153 if not isinstance(arg, SetArg):
6155 if arg.root_config != root_config:
6157 if arg.name in required_set_names:
6159 required_set_names.remove(arg.name)
6160 # Create new SetArg instances only when necessary.
6161 for s in required_set_names:
6162 expanded_set = InternalPackageSet(
6163 initial_atoms=setconfig.getSetAtoms(s))
6164 atom = SETPREFIX + s
6165 args.append(SetArg(arg=atom, set=expanded_set,
6166 root_config=root_config))
6167 vardb = root_config.trees["vartree"].dbapi
6169 for atom in arg.set:
6170 self._dep_stack.append(
6171 Dependency(atom=atom, root=root, parent=arg))
6172 if self._ignored_deps:
6173 self._dep_stack.extend(self._ignored_deps)
6174 self._ignored_deps = []
6175 if not self._create_graph(allow_unsatisfied=True):
6177 # Check the unsatisfied deps to see if any initially satisfied deps
6178 # will become unsatisfied due to an upgrade. Initially unsatisfied
6179 # deps are irrelevant since we only want to avoid breaking deps
6180 # that are initially satisfied.
6181 while self._unsatisfied_deps:
6182 dep = self._unsatisfied_deps.pop()
6183 matches = vardb.match_pkgs(dep.atom)
6185 self._initially_unsatisfied_deps.append(dep)
6187 # An scheduled installation broke a deep dependency.
6188 # Add the installed package to the graph so that it
6189 # will be appropriately reported as a slot collision
6190 # (possibly solvable via backtracking).
6191 pkg = matches[-1] # highest match
6192 if not self._add_pkg(pkg, dep):
6194 if not self._create_graph(allow_unsatisfied=True):
6198 def _pkg(self, cpv, type_name, root_config, installed=False):
6200 Get a package instance from the cache, or create a new
6201 one if necessary. Raises KeyError from aux_get if it
6202 failures for some reason (package does not exist or is
6207 operation = "nomerge"
6208 pkg = self._pkg_cache.get(
6209 (type_name, root_config.root, cpv, operation))
6211 tree_type = self.pkg_tree_map[type_name]
6212 db = root_config.trees[tree_type].dbapi
6213 db_keys = list(self._trees_orig[root_config.root][
6214 tree_type].dbapi._aux_cache_keys)
6216 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6218 raise portage.exception.PackageNotFound(cpv)
6219 pkg = Package(cpv=cpv, metadata=metadata,
6220 root_config=root_config, installed=installed)
6221 if type_name == "ebuild":
6222 settings = self.pkgsettings[root_config.root]
6223 settings.setcpv(pkg)
6224 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6225 self._pkg_cache[pkg] = pkg
6228 def validate_blockers(self):
6229 """Remove any blockers from the digraph that do not match any of the
6230 packages within the graph. If necessary, create hard deps to ensure
6231 correct merge order such that mutually blocking packages are never
6232 installed simultaneously."""
6234 if "--buildpkgonly" in self.myopts or \
6235 "--nodeps" in self.myopts:
6238 #if "deep" in self.myparams:
6240 # Pull in blockers from all installed packages that haven't already
6241 # been pulled into the depgraph. This is not enabled by default
6242 # due to the performance penalty that is incurred by all the
6243 # additional dep_check calls that are required.
6245 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6246 for myroot in self.trees:
6247 vardb = self.trees[myroot]["vartree"].dbapi
6248 portdb = self.trees[myroot]["porttree"].dbapi
6249 pkgsettings = self.pkgsettings[myroot]
6250 final_db = self.mydbapi[myroot]
6252 blocker_cache = BlockerCache(myroot, vardb)
6253 stale_cache = set(blocker_cache)
6256 stale_cache.discard(cpv)
6257 pkg_in_graph = self.digraph.contains(pkg)
6259 # Check for masked installed packages. Only warn about
6260 # packages that are in the graph in order to avoid warning
6261 # about those that will be automatically uninstalled during
6262 # the merge process or by --depclean.
6264 if pkg_in_graph and not visible(pkgsettings, pkg):
6265 self._masked_installed.add(pkg)
6267 blocker_atoms = None
6273 self._blocker_parents.child_nodes(pkg))
6278 self._irrelevant_blockers.child_nodes(pkg))
6281 if blockers is not None:
6282 blockers = set(str(blocker.atom) \
6283 for blocker in blockers)
6285 # If this node has any blockers, create a "nomerge"
6286 # node for it so that they can be enforced.
6287 self.spinner.update()
6288 blocker_data = blocker_cache.get(cpv)
6289 if blocker_data is not None and \
6290 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6293 # If blocker data from the graph is available, use
6294 # it to validate the cache and update the cache if
6296 if blocker_data is not None and \
6297 blockers is not None:
6298 if not blockers.symmetric_difference(
6299 blocker_data.atoms):
6303 if blocker_data is None and \
6304 blockers is not None:
6305 # Re-use the blockers from the graph.
6306 blocker_atoms = sorted(blockers)
6307 counter = long(pkg.metadata["COUNTER"])
6309 blocker_cache.BlockerData(counter, blocker_atoms)
6310 blocker_cache[pkg.cpv] = blocker_data
6314 blocker_atoms = blocker_data.atoms
6316 # Use aux_get() to trigger FakeVartree global
6317 # updates on *DEPEND when appropriate.
6318 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6319 # It is crucial to pass in final_db here in order to
6320 # optimize dep_check calls by eliminating atoms via
6321 # dep_wordreduce and dep_eval calls.
6323 portage.dep._dep_check_strict = False
6325 success, atoms = portage.dep_check(depstr,
6326 final_db, pkgsettings, myuse=pkg.use.enabled,
6327 trees=self._graph_trees, myroot=myroot)
6328 except Exception, e:
6329 if isinstance(e, SystemExit):
6331 # This is helpful, for example, if a ValueError
6332 # is thrown from cpv_expand due to multiple
6333 # matches (this can happen if an atom lacks a
6335 show_invalid_depstring_notice(
6336 pkg, depstr, str(e))
6340 portage.dep._dep_check_strict = True
6342 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6343 if replacement_pkg and \
6344 replacement_pkg[0].operation == "merge":
6345 # This package is being replaced anyway, so
6346 # ignore invalid dependencies so as not to
6347 # annoy the user too much (otherwise they'd be
6348 # forced to manually unmerge it first).
6350 show_invalid_depstring_notice(pkg, depstr, atoms)
6352 blocker_atoms = [myatom for myatom in atoms \
6353 if myatom.startswith("!")]
6354 blocker_atoms.sort()
6355 counter = long(pkg.metadata["COUNTER"])
6356 blocker_cache[cpv] = \
6357 blocker_cache.BlockerData(counter, blocker_atoms)
6360 for atom in blocker_atoms:
6361 blocker = Blocker(atom=portage.dep.Atom(atom),
6362 eapi=pkg.metadata["EAPI"], root=myroot)
6363 self._blocker_parents.add(blocker, pkg)
6364 except portage.exception.InvalidAtom, e:
6365 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6366 show_invalid_depstring_notice(
6367 pkg, depstr, "Invalid Atom: %s" % (e,))
6369 for cpv in stale_cache:
6370 del blocker_cache[cpv]
6371 blocker_cache.flush()
6374 # Discard any "uninstall" tasks scheduled by previous calls
6375 # to this method, since those tasks may not make sense given
6376 # the current graph state.
6377 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6378 if previous_uninstall_tasks:
6379 self._blocker_uninstalls = digraph()
6380 self.digraph.difference_update(previous_uninstall_tasks)
6382 for blocker in self._blocker_parents.leaf_nodes():
6383 self.spinner.update()
6384 root_config = self.roots[blocker.root]
6385 virtuals = root_config.settings.getvirtuals()
6386 myroot = blocker.root
6387 initial_db = self.trees[myroot]["vartree"].dbapi
6388 final_db = self.mydbapi[myroot]
6390 provider_virtual = False
6391 if blocker.cp in virtuals and \
6392 not self._have_new_virt(blocker.root, blocker.cp):
6393 provider_virtual = True
6395 if provider_virtual:
6397 for provider_entry in virtuals[blocker.cp]:
6399 portage.dep_getkey(provider_entry)
6400 atoms.append(blocker.atom.replace(
6401 blocker.cp, provider_cp))
6403 atoms = [blocker.atom]
6405 blocked_initial = []
6407 blocked_initial.extend(initial_db.match_pkgs(atom))
6411 blocked_final.extend(final_db.match_pkgs(atom))
6413 if not blocked_initial and not blocked_final:
6414 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6415 self._blocker_parents.remove(blocker)
6416 # Discard any parents that don't have any more blockers.
6417 for pkg in parent_pkgs:
6418 self._irrelevant_blockers.add(blocker, pkg)
6419 if not self._blocker_parents.child_nodes(pkg):
6420 self._blocker_parents.remove(pkg)
6422 for parent in self._blocker_parents.parent_nodes(blocker):
6423 unresolved_blocks = False
6424 depends_on_order = set()
6425 for pkg in blocked_initial:
6426 if pkg.slot_atom == parent.slot_atom:
6427 # TODO: Support blocks within slots in cases where it
6428 # might make sense. For example, a new version might
6429 # require that the old version be uninstalled at build
6432 if parent.installed:
6433 # Two currently installed packages conflict with
6434 # eachother. Ignore this case since the damage
6435 # is already done and this would be likely to
6436 # confuse users if displayed like a normal blocker.
6438 if parent.operation == "merge":
6439 # Maybe the blocked package can be replaced or simply
6440 # unmerged to resolve this block.
6441 depends_on_order.add((pkg, parent))
6443 # None of the above blocker resolutions techniques apply,
6444 # so apparently this one is unresolvable.
6445 unresolved_blocks = True
6446 for pkg in blocked_final:
6447 if pkg.slot_atom == parent.slot_atom:
6448 # TODO: Support blocks within slots.
6450 if parent.operation == "nomerge" and \
6451 pkg.operation == "nomerge":
6452 # This blocker will be handled the next time that a
6453 # merge of either package is triggered.
6456 # Maybe the blocking package can be
6457 # unmerged to resolve this block.
6458 if parent.operation == "merge" and pkg.installed:
6459 depends_on_order.add((pkg, parent))
6461 elif parent.operation == "nomerge":
6462 depends_on_order.add((parent, pkg))
6464 # None of the above blocker resolutions techniques apply,
6465 # so apparently this one is unresolvable.
6466 unresolved_blocks = True
6468 # Make sure we don't unmerge any package that have been pulled
6470 if not unresolved_blocks and depends_on_order:
6471 for inst_pkg, inst_task in depends_on_order:
6472 if self.digraph.contains(inst_pkg) and \
6473 self.digraph.parent_nodes(inst_pkg):
6474 unresolved_blocks = True
6477 if not unresolved_blocks and depends_on_order:
6478 for inst_pkg, inst_task in depends_on_order:
6479 uninst_task = Package(built=inst_pkg.built,
6480 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6481 metadata=inst_pkg.metadata,
6482 operation="uninstall",
6483 root_config=inst_pkg.root_config,
6484 type_name=inst_pkg.type_name)
6485 self._pkg_cache[uninst_task] = uninst_task
6486 # Enforce correct merge order with a hard dep.
6487 self.digraph.addnode(uninst_task, inst_task,
6488 priority=BlockerDepPriority.instance)
6489 # Count references to this blocker so that it can be
6490 # invalidated after nodes referencing it have been
6492 self._blocker_uninstalls.addnode(uninst_task, blocker)
6493 if not unresolved_blocks and not depends_on_order:
6494 self._irrelevant_blockers.add(blocker, parent)
6495 self._blocker_parents.remove_edge(blocker, parent)
6496 if not self._blocker_parents.parent_nodes(blocker):
6497 self._blocker_parents.remove(blocker)
6498 if not self._blocker_parents.child_nodes(parent):
6499 self._blocker_parents.remove(parent)
6500 if unresolved_blocks:
6501 self._unsolvable_blockers.add(blocker, parent)
6505 def _accept_blocker_conflicts(self):
6507 for x in ("--buildpkgonly", "--fetchonly",
6508 "--fetch-all-uri", "--nodeps", "--pretend"):
6509 if x in self.myopts:
6514 def _merge_order_bias(self, mygraph):
6515 """Order nodes from highest to lowest overall reference count for
6516 optimal leaf node selection."""
6518 for node in mygraph.order:
6519 node_info[node] = len(mygraph.parent_nodes(node))
6520 def cmp_merge_preference(node1, node2):
6521 return node_info[node2] - node_info[node1]
6522 mygraph.order.sort(cmp_merge_preference)
6524 def altlist(self, reversed=False):
6526 while self._serialized_tasks_cache is None:
6527 self._resolve_conflicts()
6529 self._serialized_tasks_cache, self._scheduler_graph = \
6530 self._serialize_tasks()
6531 except self._serialize_tasks_retry:
6534 retlist = self._serialized_tasks_cache[:]
6539 def schedulerGraph(self):
6541 The scheduler graph is identical to the normal one except that
6542 uninstall edges are reversed in specific cases that require
6543 conflicting packages to be temporarily installed simultaneously.
6544 This is intended for use by the Scheduler in it's parallelization
6545 logic. It ensures that temporary simultaneous installation of
6546 conflicting packages is avoided when appropriate (especially for
6547 !!atom blockers), but allowed in specific cases that require it.
6549 Note that this method calls break_refs() which alters the state of
6550 internal Package instances such that this depgraph instance should
6551 not be used to perform any more calculations.
6553 if self._scheduler_graph is None:
6555 self.break_refs(self._scheduler_graph.order)
6556 return self._scheduler_graph
6558 def break_refs(self, nodes):
6560 Take a mergelist like that returned from self.altlist() and
6561 break any references that lead back to the depgraph. This is
6562 useful if you want to hold references to packages without
6563 also holding the depgraph on the heap.
6566 if hasattr(node, "root_config"):
6567 # The FakeVartree references the _package_cache which
6568 # references the depgraph. So that Package instances don't
6569 # hold the depgraph and FakeVartree on the heap, replace
6570 # the RootConfig that references the FakeVartree with the
6571 # original RootConfig instance which references the actual
6573 node.root_config = \
6574 self._trees_orig[node.root_config.root]["root_config"]
6576 def _resolve_conflicts(self):
6577 if not self._complete_graph():
6578 raise self._unknown_internal_error()
6580 if not self.validate_blockers():
6581 raise self._unknown_internal_error()
6583 if self._slot_collision_info:
6584 self._process_slot_conflicts()
6586 def _serialize_tasks(self):
6588 if "--debug" in self.myopts:
6589 writemsg("\ndigraph:\n\n", noiselevel=-1)
6590 self.digraph.debug_print()
6591 writemsg("\n", noiselevel=-1)
6593 scheduler_graph = self.digraph.copy()
6594 mygraph=self.digraph.copy()
6595 # Prune "nomerge" root nodes if nothing depends on them, since
6596 # otherwise they slow down merge order calculation. Don't remove
6597 # non-root nodes since they help optimize merge order in some cases
6598 # such as revdep-rebuild.
6599 removed_nodes = set()
6601 for node in mygraph.root_nodes():
6602 if not isinstance(node, Package) or \
6603 node.installed or node.onlydeps:
6604 removed_nodes.add(node)
6606 self.spinner.update()
6607 mygraph.difference_update(removed_nodes)
6608 if not removed_nodes:
6610 removed_nodes.clear()
6611 self._merge_order_bias(mygraph)
6612 def cmp_circular_bias(n1, n2):
6614 RDEPEND is stronger than PDEPEND and this function
6615 measures such a strength bias within a circular
6616 dependency relationship.
6618 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6619 ignore_priority=DepPriority.MEDIUM_SOFT)
6620 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6621 ignore_priority=DepPriority.MEDIUM_SOFT)
6622 if n1_n2_medium == n2_n1_medium:
6627 myblocker_uninstalls = self._blocker_uninstalls.copy()
6629 # Contains uninstall tasks that have been scheduled to
6630 # occur after overlapping blockers have been installed.
6631 scheduled_uninstalls = set()
6632 # Contains any Uninstall tasks that have been ignored
6633 # in order to avoid the circular deps code path. These
6634 # correspond to blocker conflicts that could not be
6636 ignored_uninstall_tasks = set()
6637 have_uninstall_task = False
6638 complete = "complete" in self.myparams
6639 myblocker_parents = self._blocker_parents.copy()
6642 def get_nodes(**kwargs):
6644 Returns leaf nodes excluding Uninstall instances
6645 since those should be executed as late as possible.
6647 return [node for node in mygraph.leaf_nodes(**kwargs) \
6648 if isinstance(node, Package) and \
6649 (node.operation != "uninstall" or \
6650 node in scheduled_uninstalls)]
6652 # sys-apps/portage needs special treatment if ROOT="/"
6653 running_root = self._running_root.root
6654 from portage.const import PORTAGE_PACKAGE_ATOM
6655 runtime_deps = InternalPackageSet(
6656 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6657 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6658 PORTAGE_PACKAGE_ATOM)
6659 replacement_portage = self.mydbapi[running_root].match_pkgs(
6660 PORTAGE_PACKAGE_ATOM)
6663 running_portage = running_portage[0]
6665 running_portage = None
6667 if replacement_portage:
6668 replacement_portage = replacement_portage[0]
6670 replacement_portage = None
6672 if replacement_portage == running_portage:
6673 replacement_portage = None
6675 if replacement_portage is not None:
6676 # update from running_portage to replacement_portage asap
6677 asap_nodes.append(replacement_portage)
6679 if running_portage is not None:
6681 portage_rdepend = self._select_atoms_highest_available(
6682 running_root, running_portage.metadata["RDEPEND"],
6683 myuse=running_portage.use.enabled,
6684 parent=running_portage, strict=False)
6685 except portage.exception.InvalidDependString, e:
6686 portage.writemsg("!!! Invalid RDEPEND in " + \
6687 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6688 (running_root, running_portage.cpv, e), noiselevel=-1)
6690 portage_rdepend = []
6691 runtime_deps.update(atom for atom in portage_rdepend \
6692 if not atom.startswith("!"))
6694 ignore_priority_soft_range = [None]
6695 ignore_priority_soft_range.extend(
6696 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6697 tree_mode = "--tree" in self.myopts
6698 # Tracks whether or not the current iteration should prefer asap_nodes
6699 # if available. This is set to False when the previous iteration
6700 # failed to select any nodes. It is reset whenever nodes are
6701 # successfully selected.
6704 # By default, try to avoid selecting root nodes whenever possible. This
6705 # helps ensure that the maximimum possible number of soft dependencies
6706 # have been removed from the graph before their parent nodes have
6707 # selected. This is especially important when those dependencies are
6708 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6709 # CHOST has been changed (like when building a stage3 from a stage2).
6710 accept_root_node = False
6712 # State of prefer_asap and accept_root_node flags for successive
6713 # iterations that loosen the criteria for node selection.
6715 # iteration prefer_asap accept_root_node
6720 # If no nodes are selected on the 3rd iteration, it is due to
6721 # unresolved blockers or circular dependencies.
6723 while not mygraph.empty():
6724 self.spinner.update()
6725 selected_nodes = None
6726 ignore_priority = None
6727 if prefer_asap and asap_nodes:
6728 """ASAP nodes are merged before their soft deps."""
6729 asap_nodes = [node for node in asap_nodes \
6730 if mygraph.contains(node)]
6731 for node in asap_nodes:
6732 if not mygraph.child_nodes(node,
6733 ignore_priority=DepPriority.SOFT):
6734 selected_nodes = [node]
6735 asap_nodes.remove(node)
6737 if not selected_nodes and \
6738 not (prefer_asap and asap_nodes):
6739 for ignore_priority in ignore_priority_soft_range:
6740 nodes = get_nodes(ignore_priority=ignore_priority)
6744 if ignore_priority is None and not tree_mode:
6745 # Greedily pop all of these nodes since no relationship
6746 # has been ignored. This optimization destroys --tree
6747 # output, so it's disabled in reversed mode. If there
6748 # is a mix of merge and uninstall nodes, save the
6749 # uninstall nodes from later since sometimes a merge
6750 # node will render an install node unnecessary, and
6751 # we want to avoid doing a separate uninstall task in
6753 merge_nodes = [node for node in nodes \
6754 if node.operation == "merge"]
6756 selected_nodes = merge_nodes
6758 selected_nodes = nodes
6760 # For optimal merge order:
6761 # * Only pop one node.
6762 # * Removing a root node (node without a parent)
6763 # will not produce a leaf node, so avoid it.
6765 if mygraph.parent_nodes(node):
6766 # found a non-root node
6767 selected_nodes = [node]
6769 if not selected_nodes and \
6770 (accept_root_node or ignore_priority is None):
6771 # settle for a root node
6772 selected_nodes = [nodes[0]]
6774 if not selected_nodes:
6775 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6777 """Recursively gather a group of nodes that RDEPEND on
6778 eachother. This ensures that they are merged as a group
6779 and get their RDEPENDs satisfied as soon as possible."""
6780 def gather_deps(ignore_priority,
6781 mergeable_nodes, selected_nodes, node):
6782 if node in selected_nodes:
6784 if node not in mergeable_nodes:
6786 if node == replacement_portage and \
6787 mygraph.child_nodes(node,
6788 ignore_priority=DepPriority.MEDIUM_SOFT):
6789 # Make sure that portage always has all of it's
6790 # RDEPENDs installed first.
6792 selected_nodes.add(node)
6793 for child in mygraph.child_nodes(node,
6794 ignore_priority=ignore_priority):
6795 if not gather_deps(ignore_priority,
6796 mergeable_nodes, selected_nodes, child):
6799 mergeable_nodes = set(nodes)
6800 if prefer_asap and asap_nodes:
6802 for ignore_priority in xrange(DepPriority.SOFT,
6803 DepPriority.MEDIUM_SOFT + 1):
6805 if nodes is not asap_nodes and \
6806 not accept_root_node and \
6807 not mygraph.parent_nodes(node):
6809 selected_nodes = set()
6810 if gather_deps(ignore_priority,
6811 mergeable_nodes, selected_nodes, node):
6814 selected_nodes = None
6818 # If any nodes have been selected here, it's always
6819 # possible that anything up to a MEDIUM_SOFT priority
6820 # relationship has been ignored. This state is recorded
6821 # in ignore_priority so that relevant nodes will be
6822 # added to asap_nodes when appropriate.
6824 ignore_priority = DepPriority.MEDIUM_SOFT
6826 if prefer_asap and asap_nodes and not selected_nodes:
6827 # We failed to find any asap nodes to merge, so ignore
6828 # them for the next iteration.
6832 if not selected_nodes and not accept_root_node:
6833 # Maybe there are only root nodes left, so accept them
6834 # for the next iteration.
6835 accept_root_node = True
6838 if selected_nodes and ignore_priority > DepPriority.SOFT:
6839 # Try to merge ignored medium deps as soon as possible.
6840 for node in selected_nodes:
6841 children = set(mygraph.child_nodes(node))
6842 soft = children.difference(
6843 mygraph.child_nodes(node,
6844 ignore_priority=DepPriority.SOFT))
6845 medium_soft = children.difference(
6846 mygraph.child_nodes(node,
6847 ignore_priority=DepPriority.MEDIUM_SOFT))
6848 medium_soft.difference_update(soft)
6849 for child in medium_soft:
6850 if child in selected_nodes:
6852 if child in asap_nodes:
6854 asap_nodes.append(child)
6856 if selected_nodes and len(selected_nodes) > 1:
6857 if not isinstance(selected_nodes, list):
6858 selected_nodes = list(selected_nodes)
6859 selected_nodes.sort(cmp_circular_bias)
6861 if not selected_nodes and not myblocker_uninstalls.is_empty():
6862 # An Uninstall task needs to be executed in order to
6863 # avoid conflict if possible.
6864 min_parent_deps = None
6866 for task in myblocker_uninstalls.leaf_nodes():
6867 # Do some sanity checks so that system or world packages
6868 # don't get uninstalled inappropriately here (only really
6869 # necessary when --complete-graph has not been enabled).
6871 if task in ignored_uninstall_tasks:
6874 if task in scheduled_uninstalls:
6875 # It's been scheduled but it hasn't
6876 # been executed yet due to dependence
6877 # on installation of blocking packages.
6880 root_config = self.roots[task.root]
6881 inst_pkg = self._pkg_cache[
6882 ("installed", task.root, task.cpv, "nomerge")]
6884 if self.digraph.contains(inst_pkg):
6887 forbid_overlap = False
6888 heuristic_overlap = False
6889 for blocker in myblocker_uninstalls.parent_nodes(task):
6890 if blocker.eapi in ("0", "1"):
6891 heuristic_overlap = True
6892 elif blocker.atom.blocker.overlap.forbid:
6893 forbid_overlap = True
6895 if forbid_overlap and running_root == task.root:
6898 if heuristic_overlap and running_root == task.root:
6899 # Never uninstall sys-apps/portage or it's essential
6900 # dependencies, except through replacement.
6902 runtime_dep_atoms = \
6903 list(runtime_deps.iterAtomsForPackage(task))
6904 except portage.exception.InvalidDependString, e:
6905 portage.writemsg("!!! Invalid PROVIDE in " + \
6906 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6907 (task.root, task.cpv, e), noiselevel=-1)
6911 # Don't uninstall a runtime dep if it appears
6912 # to be the only suitable one installed.
6914 vardb = root_config.trees["vartree"].dbapi
6915 for atom in runtime_dep_atoms:
6916 other_version = None
6917 for pkg in vardb.match_pkgs(atom):
6918 if pkg.cpv == task.cpv and \
6919 pkg.metadata["COUNTER"] == \
6920 task.metadata["COUNTER"]:
6924 if other_version is None:
6930 # For packages in the system set, don't take
6931 # any chances. If the conflict can't be resolved
6932 # by a normal replacement operation then abort.
6935 for atom in root_config.sets[
6936 "system"].iterAtomsForPackage(task):
6939 except portage.exception.InvalidDependString, e:
6940 portage.writemsg("!!! Invalid PROVIDE in " + \
6941 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6942 (task.root, task.cpv, e), noiselevel=-1)
6948 # Note that the world check isn't always
6949 # necessary since self._complete_graph() will
6950 # add all packages from the system and world sets to the
6951 # graph. This just allows unresolved conflicts to be
6952 # detected as early as possible, which makes it possible
6953 # to avoid calling self._complete_graph() when it is
6954 # unnecessary due to blockers triggering an abortion.
6956 # For packages in the world set, go ahead an uninstall
6957 # when necessary, as long as the atom will be satisfied
6958 # in the final state.
6959 graph_db = self.mydbapi[task.root]
6962 for atom in root_config.sets[
6963 "world"].iterAtomsForPackage(task):
6965 for pkg in graph_db.match_pkgs(atom):
6973 except portage.exception.InvalidDependString, e:
6974 portage.writemsg("!!! Invalid PROVIDE in " + \
6975 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6976 (task.root, task.cpv, e), noiselevel=-1)
6982 # Check the deps of parent nodes to ensure that
6983 # the chosen task produces a leaf node. Maybe
6984 # this can be optimized some more to make the
6985 # best possible choice, but the current algorithm
6986 # is simple and should be near optimal for most
6989 for parent in mygraph.parent_nodes(task):
6990 parent_deps.update(mygraph.child_nodes(parent,
6991 ignore_priority=DepPriority.MEDIUM_SOFT))
6992 parent_deps.remove(task)
6993 if min_parent_deps is None or \
6994 len(parent_deps) < min_parent_deps:
6995 min_parent_deps = len(parent_deps)
6998 if uninst_task is not None:
6999 # The uninstall is performed only after blocking
7000 # packages have been merged on top of it. File
7001 # collisions between blocking packages are detected
7002 # and removed from the list of files to be uninstalled.
7003 scheduled_uninstalls.add(uninst_task)
7004 parent_nodes = mygraph.parent_nodes(uninst_task)
7006 # Reverse the parent -> uninstall edges since we want
7007 # to do the uninstall after blocking packages have
7008 # been merged on top of it.
7009 mygraph.remove(uninst_task)
7010 for blocked_pkg in parent_nodes:
7011 mygraph.add(blocked_pkg, uninst_task,
7012 priority=BlockerDepPriority.instance)
7013 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7014 scheduler_graph.add(blocked_pkg, uninst_task,
7015 priority=BlockerDepPriority.instance)
7018 # None of the Uninstall tasks are acceptable, so
7019 # the corresponding blockers are unresolvable.
7020 # We need to drop an Uninstall task here in order
7021 # to avoid the circular deps code path, but the
7022 # blocker will still be counted as an unresolved
7024 for node in myblocker_uninstalls.leaf_nodes():
7026 mygraph.remove(node)
7031 ignored_uninstall_tasks.add(node)
7034 if uninst_task is not None:
7035 # After dropping an Uninstall task, reset
7036 # the state variables for leaf node selection and
7037 # continue trying to select leaf nodes.
7039 accept_root_node = False
7042 if not selected_nodes:
7043 self._circular_deps_for_display = mygraph
7044 raise self._unknown_internal_error()
7046 # At this point, we've succeeded in selecting one or more nodes, so
7047 # it's now safe to reset the prefer_asap and accept_root_node flags
7048 # to their default states.
7050 accept_root_node = False
7052 mygraph.difference_update(selected_nodes)
7054 for node in selected_nodes:
7055 if isinstance(node, Package) and \
7056 node.operation == "nomerge":
7059 # Handle interactions between blockers
7060 # and uninstallation tasks.
7061 solved_blockers = set()
7063 if isinstance(node, Package) and \
7064 "uninstall" == node.operation:
7065 have_uninstall_task = True
7068 vardb = self.trees[node.root]["vartree"].dbapi
7069 previous_cpv = vardb.match(node.slot_atom)
7071 # The package will be replaced by this one, so remove
7072 # the corresponding Uninstall task if necessary.
7073 previous_cpv = previous_cpv[0]
7075 ("installed", node.root, previous_cpv, "uninstall")
7077 mygraph.remove(uninst_task)
7081 if uninst_task is not None and \
7082 uninst_task not in ignored_uninstall_tasks and \
7083 myblocker_uninstalls.contains(uninst_task):
7084 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7085 myblocker_uninstalls.remove(uninst_task)
7086 # Discard any blockers that this Uninstall solves.
7087 for blocker in blocker_nodes:
7088 if not myblocker_uninstalls.child_nodes(blocker):
7089 myblocker_uninstalls.remove(blocker)
7090 solved_blockers.add(blocker)
7092 retlist.append(node)
7094 if (isinstance(node, Package) and \
7095 "uninstall" == node.operation) or \
7096 (uninst_task is not None and \
7097 uninst_task in scheduled_uninstalls):
7098 # Include satisfied blockers in the merge list
7099 # since the user might be interested and also
7100 # it serves as an indicator that blocking packages
7101 # will be temporarily installed simultaneously.
7102 for blocker in solved_blockers:
7103 retlist.append(Blocker(atom=blocker.atom,
7104 root=blocker.root, eapi=blocker.eapi,
7107 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7108 for node in myblocker_uninstalls.root_nodes():
7109 unsolvable_blockers.add(node)
7111 for blocker in unsolvable_blockers:
7112 retlist.append(blocker)
7114 # If any Uninstall tasks need to be executed in order
7115 # to avoid a conflict, complete the graph with any
7116 # dependencies that may have been initially
7117 # neglected (to ensure that unsafe Uninstall tasks
7118 # are properly identified and blocked from execution).
7119 if have_uninstall_task and \
7121 not unsolvable_blockers:
7122 self.myparams.add("complete")
7123 raise self._serialize_tasks_retry("")
7125 if unsolvable_blockers and \
7126 not self._accept_blocker_conflicts():
7127 self._unsatisfied_blockers_for_display = unsolvable_blockers
7128 self._serialized_tasks_cache = retlist[:]
7129 self._scheduler_graph = scheduler_graph
7130 raise self._unknown_internal_error()
7132 if self._slot_collision_info and \
7133 not self._accept_blocker_conflicts():
7134 self._serialized_tasks_cache = retlist[:]
7135 self._scheduler_graph = scheduler_graph
7136 raise self._unknown_internal_error()
7138 return retlist, scheduler_graph
7140 def _show_circular_deps(self, mygraph):
7141 # No leaf nodes are available, so we have a circular
7142 # dependency panic situation. Reduce the noise level to a
7143 # minimum via repeated elimination of root nodes since they
7144 # have no parents and thus can not be part of a cycle.
7146 root_nodes = mygraph.root_nodes(
7147 ignore_priority=DepPriority.MEDIUM_SOFT)
7150 mygraph.difference_update(root_nodes)
7151 # Display the USE flags that are enabled on nodes that are part
7152 # of dependency cycles in case that helps the user decide to
7153 # disable some of them.
7155 tempgraph = mygraph.copy()
7156 while not tempgraph.empty():
7157 nodes = tempgraph.leaf_nodes()
7159 node = tempgraph.order[0]
7162 display_order.append(node)
7163 tempgraph.remove(node)
7164 display_order.reverse()
7165 self.myopts.pop("--quiet", None)
7166 self.myopts.pop("--verbose", None)
7167 self.myopts["--tree"] = True
7168 portage.writemsg("\n\n", noiselevel=-1)
7169 self.display(display_order)
7170 prefix = colorize("BAD", " * ")
7171 portage.writemsg("\n", noiselevel=-1)
7172 portage.writemsg(prefix + "Error: circular dependencies:\n",
7174 portage.writemsg("\n", noiselevel=-1)
7175 mygraph.debug_print()
7176 portage.writemsg("\n", noiselevel=-1)
7177 portage.writemsg(prefix + "Note that circular dependencies " + \
7178 "can often be avoided by temporarily\n", noiselevel=-1)
7179 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7180 "optional dependencies.\n", noiselevel=-1)
7182 def _show_merge_list(self):
7183 if self._serialized_tasks_cache is not None and \
7184 not (self._displayed_list and \
7185 (self._displayed_list == self._serialized_tasks_cache or \
7186 self._displayed_list == \
7187 list(reversed(self._serialized_tasks_cache)))):
7188 display_list = self._serialized_tasks_cache[:]
7189 if "--tree" in self.myopts:
7190 display_list.reverse()
7191 self.display(display_list)
7193 def _show_unsatisfied_blockers(self, blockers):
7194 self._show_merge_list()
7195 msg = "Error: The above package list contains " + \
7196 "packages which cannot be installed " + \
7197 "at the same time on the same system."
7198 prefix = colorize("BAD", " * ")
7199 from textwrap import wrap
7200 portage.writemsg("\n", noiselevel=-1)
7201 for line in wrap(msg, 70):
7202 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7203 if "--quiet" not in self.myopts:
7204 show_blocker_docs_link()
7206 def display(self, mylist, favorites=[], verbosity=None):
7208 # This is used to prevent display_problems() from
7209 # redundantly displaying this exact same merge list
7210 # again via _show_merge_list().
7211 self._displayed_list = mylist
7213 if verbosity is None:
7214 verbosity = ("--quiet" in self.myopts and 1 or \
7215 "--verbose" in self.myopts and 3 or 2)
7216 favorites_set = InternalPackageSet(favorites)
7217 oneshot = "--oneshot" in self.myopts or \
7218 "--onlydeps" in self.myopts
7219 columns = "--columns" in self.myopts
7224 counters = PackageCounters()
7226 if verbosity == 1 and "--verbose" not in self.myopts:
7227 def create_use_string(*args):
7230 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7232 is_new, reinst_flags,
7233 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7234 alphabetical=("--alphabetical" in self.myopts)):
7242 cur_iuse = set(cur_iuse)
7243 enabled_flags = cur_iuse.intersection(cur_use)
7244 removed_iuse = set(old_iuse).difference(cur_iuse)
7245 any_iuse = cur_iuse.union(old_iuse)
7246 any_iuse = list(any_iuse)
7248 for flag in any_iuse:
7251 reinst_flag = reinst_flags and flag in reinst_flags
7252 if flag in enabled_flags:
7254 if is_new or flag in old_use and \
7255 (all_flags or reinst_flag):
7256 flag_str = red(flag)
7257 elif flag not in old_iuse:
7258 flag_str = yellow(flag) + "%*"
7259 elif flag not in old_use:
7260 flag_str = green(flag) + "*"
7261 elif flag in removed_iuse:
7262 if all_flags or reinst_flag:
7263 flag_str = yellow("-" + flag) + "%"
7266 flag_str = "(" + flag_str + ")"
7267 removed.append(flag_str)
7270 if is_new or flag in old_iuse and \
7271 flag not in old_use and \
7272 (all_flags or reinst_flag):
7273 flag_str = blue("-" + flag)
7274 elif flag not in old_iuse:
7275 flag_str = yellow("-" + flag)
7276 if flag not in iuse_forced:
7278 elif flag in old_use:
7279 flag_str = green("-" + flag) + "*"
7281 if flag in iuse_forced:
7282 flag_str = "(" + flag_str + ")"
7284 enabled.append(flag_str)
7286 disabled.append(flag_str)
7289 ret = " ".join(enabled)
7291 ret = " ".join(enabled + disabled + removed)
7293 ret = '%s="%s" ' % (name, ret)
7296 repo_display = RepoDisplay(self.roots)
7300 mygraph = self.digraph.copy()
7302 # If there are any Uninstall instances, add the corresponding
7303 # blockers to the digraph (useful for --tree display).
7305 executed_uninstalls = set(node for node in mylist \
7306 if isinstance(node, Package) and node.operation == "unmerge")
7308 for uninstall in self._blocker_uninstalls.leaf_nodes():
7309 uninstall_parents = \
7310 self._blocker_uninstalls.parent_nodes(uninstall)
7311 if not uninstall_parents:
7314 # Remove the corresponding "nomerge" node and substitute
7315 # the Uninstall node.
7316 inst_pkg = self._pkg_cache[
7317 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7319 mygraph.remove(inst_pkg)
7324 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7326 inst_pkg_blockers = []
7328 # Break the Package -> Uninstall edges.
7329 mygraph.remove(uninstall)
7331 # Resolution of a package's blockers
7332 # depend on it's own uninstallation.
7333 for blocker in inst_pkg_blockers:
7334 mygraph.add(uninstall, blocker)
7336 # Expand Package -> Uninstall edges into
7337 # Package -> Blocker -> Uninstall edges.
7338 for blocker in uninstall_parents:
7339 mygraph.add(uninstall, blocker)
7340 for parent in self._blocker_parents.parent_nodes(blocker):
7341 if parent != inst_pkg:
7342 mygraph.add(blocker, parent)
7344 # If the uninstall task did not need to be executed because
7345 # of an upgrade, display Blocker -> Upgrade edges since the
7346 # corresponding Blocker -> Uninstall edges will not be shown.
7348 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7349 if upgrade_node is not None and \
7350 uninstall not in executed_uninstalls:
7351 for blocker in uninstall_parents:
7352 mygraph.add(upgrade_node, blocker)
7354 unsatisfied_blockers = []
7359 if isinstance(x, Blocker) and not x.satisfied:
7360 unsatisfied_blockers.append(x)
7363 if "--tree" in self.myopts:
7364 depth = len(tree_nodes)
7365 while depth and graph_key not in \
7366 mygraph.child_nodes(tree_nodes[depth-1]):
7369 tree_nodes = tree_nodes[:depth]
7370 tree_nodes.append(graph_key)
7371 display_list.append((x, depth, True))
7372 shown_edges.add((graph_key, tree_nodes[depth-1]))
7374 traversed_nodes = set() # prevent endless circles
7375 traversed_nodes.add(graph_key)
7376 def add_parents(current_node, ordered):
7378 # Do not traverse to parents if this node is an
7379 # an argument or a direct member of a set that has
7380 # been specified as an argument (system or world).
7381 if current_node not in self._set_nodes:
7382 parent_nodes = mygraph.parent_nodes(current_node)
7384 child_nodes = set(mygraph.child_nodes(current_node))
7385 selected_parent = None
7386 # First, try to avoid a direct cycle.
7387 for node in parent_nodes:
7388 if not isinstance(node, (Blocker, Package)):
7390 if node not in traversed_nodes and \
7391 node not in child_nodes:
7392 edge = (current_node, node)
7393 if edge in shown_edges:
7395 selected_parent = node
7397 if not selected_parent:
7398 # A direct cycle is unavoidable.
7399 for node in parent_nodes:
7400 if not isinstance(node, (Blocker, Package)):
7402 if node not in traversed_nodes:
7403 edge = (current_node, node)
7404 if edge in shown_edges:
7406 selected_parent = node
7409 shown_edges.add((current_node, selected_parent))
7410 traversed_nodes.add(selected_parent)
7411 add_parents(selected_parent, False)
7412 display_list.append((current_node,
7413 len(tree_nodes), ordered))
7414 tree_nodes.append(current_node)
7416 add_parents(graph_key, True)
7418 display_list.append((x, depth, True))
7419 mylist = display_list
7420 for x in unsatisfied_blockers:
7421 mylist.append((x, 0, True))
7423 last_merge_depth = 0
7424 for i in xrange(len(mylist)-1,-1,-1):
7425 graph_key, depth, ordered = mylist[i]
7426 if not ordered and depth == 0 and i > 0 \
7427 and graph_key == mylist[i-1][0] and \
7428 mylist[i-1][1] == 0:
7429 # An ordered node got a consecutive duplicate when the tree was
7433 if ordered and graph_key[-1] != "nomerge":
7434 last_merge_depth = depth
7436 if depth >= last_merge_depth or \
7437 i < len(mylist) - 1 and \
7438 depth >= mylist[i+1][1]:
7441 from portage import flatten
7442 from portage.dep import use_reduce, paren_reduce
7443 # files to fetch list - avoids counting a same file twice
7444 # in size display (verbose mode)
7447 # Use this set to detect when all the "repoadd" strings are "[0]"
7448 # and disable the entire repo display in this case.
7451 for mylist_index in xrange(len(mylist)):
7452 x, depth, ordered = mylist[mylist_index]
7456 portdb = self.trees[myroot]["porttree"].dbapi
7457 bindb = self.trees[myroot]["bintree"].dbapi
7458 vardb = self.trees[myroot]["vartree"].dbapi
7459 vartree = self.trees[myroot]["vartree"]
7460 pkgsettings = self.pkgsettings[myroot]
7463 indent = " " * depth
7465 if isinstance(x, Blocker):
7467 blocker_style = "PKG_BLOCKER_SATISFIED"
7468 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7470 blocker_style = "PKG_BLOCKER"
7471 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7473 counters.blocks += 1
7475 counters.blocks_satisfied += 1
7476 resolved = portage.key_expand(
7477 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7478 if "--columns" in self.myopts and "--quiet" in self.myopts:
7479 addl += " " + colorize(blocker_style, resolved)
7481 addl = "[%s %s] %s%s" % \
7482 (colorize(blocker_style, "blocks"),
7483 addl, indent, colorize(blocker_style, resolved))
7484 block_parents = self._blocker_parents.parent_nodes(x)
7485 block_parents = set([pnode[2] for pnode in block_parents])
7486 block_parents = ", ".join(block_parents)
7488 addl += colorize(blocker_style,
7489 " (\"%s\" is blocking %s)") % \
7490 (str(x.atom).lstrip("!"), block_parents)
7492 addl += colorize(blocker_style,
7493 " (is blocking %s)") % block_parents
7494 if isinstance(x, Blocker) and x.satisfied:
7499 blockers.append(addl)
7502 pkg_merge = ordered and pkg_status == "merge"
7503 if not pkg_merge and pkg_status == "merge":
7504 pkg_status = "nomerge"
7505 built = pkg_type != "ebuild"
7506 installed = pkg_type == "installed"
7508 metadata = pkg.metadata
7510 repo_name = metadata["repository"]
7511 if pkg_type == "ebuild":
7512 ebuild_path = portdb.findname(pkg_key)
7513 if not ebuild_path: # shouldn't happen
7514 raise portage.exception.PackageNotFound(pkg_key)
7515 repo_path_real = os.path.dirname(os.path.dirname(
7516 os.path.dirname(ebuild_path)))
7518 repo_path_real = portdb.getRepositoryPath(repo_name)
7519 pkg_use = list(pkg.use.enabled)
7521 restrict = flatten(use_reduce(paren_reduce(
7522 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7523 except portage.exception.InvalidDependString, e:
7524 if not pkg.installed:
7525 show_invalid_depstring_notice(x,
7526 pkg.metadata["RESTRICT"], str(e))
7530 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7531 "fetch" in restrict:
7534 counters.restrict_fetch += 1
7535 if portdb.fetch_check(pkg_key, pkg_use):
7538 counters.restrict_fetch_satisfied += 1
7540 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7541 #param is used for -u, where you still *do* want to see when something is being upgraded.
7544 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7545 if vardb.cpv_exists(pkg_key):
7546 addl=" "+yellow("R")+fetch+" "
7549 counters.reinst += 1
7550 elif pkg_status == "uninstall":
7551 counters.uninst += 1
7552 # filter out old-style virtual matches
7553 elif installed_versions and \
7554 portage.cpv_getkey(installed_versions[0]) == \
7555 portage.cpv_getkey(pkg_key):
7556 myinslotlist = vardb.match(pkg.slot_atom)
7557 # If this is the first install of a new-style virtual, we
7558 # need to filter out old-style virtual matches.
7559 if myinslotlist and \
7560 portage.cpv_getkey(myinslotlist[0]) != \
7561 portage.cpv_getkey(pkg_key):
7564 myoldbest = myinslotlist[:]
7566 if not portage.dep.cpvequal(pkg_key,
7567 portage.best([pkg_key] + myoldbest)):
7569 addl += turquoise("U")+blue("D")
7571 counters.downgrades += 1
7574 addl += turquoise("U") + " "
7576 counters.upgrades += 1
7578 # New slot, mark it new.
7579 addl = " " + green("NS") + fetch + " "
7580 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7582 counters.newslot += 1
7584 if "--changelog" in self.myopts:
7585 inst_matches = vardb.match(pkg.slot_atom)
7587 changelogs.extend(self.calc_changelog(
7588 portdb.findname(pkg_key),
7589 inst_matches[0], pkg_key))
7591 addl = " " + green("N") + " " + fetch + " "
7600 forced_flags = set()
7601 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7602 forced_flags.update(pkgsettings.useforce)
7603 forced_flags.update(pkgsettings.usemask)
7605 cur_use = [flag for flag in pkg.use.enabled \
7606 if flag in pkg.iuse.all]
7607 cur_iuse = sorted(pkg.iuse.all)
7609 if myoldbest and myinslotlist:
7610 previous_cpv = myoldbest[0]
7612 previous_cpv = pkg.cpv
7613 if vardb.cpv_exists(previous_cpv):
7614 old_iuse, old_use = vardb.aux_get(
7615 previous_cpv, ["IUSE", "USE"])
7616 old_iuse = list(set(
7617 filter_iuse_defaults(old_iuse.split())))
7619 old_use = old_use.split()
7626 old_use = [flag for flag in old_use if flag in old_iuse]
7628 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7630 use_expand.reverse()
7631 use_expand_hidden = \
7632 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7634 def map_to_use_expand(myvals, forcedFlags=False,
7638 for exp in use_expand:
7641 for val in myvals[:]:
7642 if val.startswith(exp.lower()+"_"):
7643 if val in forced_flags:
7644 forced[exp].add(val[len(exp)+1:])
7645 ret[exp].append(val[len(exp)+1:])
7648 forced["USE"] = [val for val in myvals \
7649 if val in forced_flags]
7651 for exp in use_expand_hidden:
7657 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7658 # are the only thing that triggered reinstallation.
7659 reinst_flags_map = {}
7660 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7661 reinst_expand_map = None
7662 if reinstall_for_flags:
7663 reinst_flags_map = map_to_use_expand(
7664 list(reinstall_for_flags), removeHidden=False)
7665 for k in list(reinst_flags_map):
7666 if not reinst_flags_map[k]:
7667 del reinst_flags_map[k]
7668 if not reinst_flags_map.get("USE"):
7669 reinst_expand_map = reinst_flags_map.copy()
7670 reinst_expand_map.pop("USE", None)
7671 if reinst_expand_map and \
7672 not set(reinst_expand_map).difference(
7674 use_expand_hidden = \
7675 set(use_expand_hidden).difference(
7678 cur_iuse_map, iuse_forced = \
7679 map_to_use_expand(cur_iuse, forcedFlags=True)
7680 cur_use_map = map_to_use_expand(cur_use)
7681 old_iuse_map = map_to_use_expand(old_iuse)
7682 old_use_map = map_to_use_expand(old_use)
7685 use_expand.insert(0, "USE")
7687 for key in use_expand:
7688 if key in use_expand_hidden:
7690 verboseadd += create_use_string(key.upper(),
7691 cur_iuse_map[key], iuse_forced[key],
7692 cur_use_map[key], old_iuse_map[key],
7693 old_use_map[key], is_new,
7694 reinst_flags_map.get(key))
7699 if pkg_type == "ebuild" and pkg_merge:
7701 myfilesdict = portdb.getfetchsizes(pkg_key,
7702 useflags=pkg_use, debug=self.edebug)
7703 except portage.exception.InvalidDependString, e:
7704 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7705 show_invalid_depstring_notice(x, src_uri, str(e))
7708 if myfilesdict is None:
7709 myfilesdict="[empty/missing/bad digest]"
7711 for myfetchfile in myfilesdict:
7712 if myfetchfile not in myfetchlist:
7713 mysize+=myfilesdict[myfetchfile]
7714 myfetchlist.append(myfetchfile)
7716 counters.totalsize += mysize
7717 verboseadd += format_size(mysize)
7720 # assign index for a previous version in the same slot
7721 has_previous = False
7722 repo_name_prev = None
7723 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7725 slot_matches = vardb.match(slot_atom)
7728 repo_name_prev = vardb.aux_get(slot_matches[0],
7731 # now use the data to generate output
7732 if pkg.installed or not has_previous:
7733 repoadd = repo_display.repoStr(repo_path_real)
7735 repo_path_prev = None
7737 repo_path_prev = portdb.getRepositoryPath(
7739 if repo_path_prev == repo_path_real:
7740 repoadd = repo_display.repoStr(repo_path_real)
7742 repoadd = "%s=>%s" % (
7743 repo_display.repoStr(repo_path_prev),
7744 repo_display.repoStr(repo_path_real))
7746 repoadd_set.add(repoadd)
7748 xs = [portage.cpv_getkey(pkg_key)] + \
7749 list(portage.catpkgsplit(pkg_key)[2:])
7756 if "COLUMNWIDTH" in self.settings:
7758 mywidth = int(self.settings["COLUMNWIDTH"])
7759 except ValueError, e:
7760 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7762 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7763 self.settings["COLUMNWIDTH"], noiselevel=-1)
7765 oldlp = mywidth - 30
7768 # Convert myoldbest from a list to a string.
7772 for pos, key in enumerate(myoldbest):
7773 key = portage.catpkgsplit(key)[2] + \
7774 "-" + portage.catpkgsplit(key)[3]
7775 if key[-3:] == "-r0":
7777 myoldbest[pos] = key
7778 myoldbest = blue("["+", ".join(myoldbest)+"]")
7781 root_config = self.roots[myroot]
7782 system_set = root_config.sets["system"]
7783 world_set = root_config.sets["world"]
7788 pkg_system = system_set.findAtomForPackage(pkg)
7789 pkg_world = world_set.findAtomForPackage(pkg)
7790 if not (oneshot or pkg_world) and \
7791 myroot == self.target_root and \
7792 favorites_set.findAtomForPackage(pkg):
7793 # Maybe it will be added to world now.
7794 if create_world_atom(pkg, favorites_set, root_config):
7796 except portage.exception.InvalidDependString:
7797 # This is reported elsewhere if relevant.
7800 def pkgprint(pkg_str):
7803 return colorize("PKG_MERGE_SYSTEM", pkg_str)
7805 return colorize("PKG_MERGE_WORLD", pkg_str)
7807 return colorize("PKG_MERGE", pkg_str)
7808 elif pkg_status == "uninstall":
7809 return colorize("PKG_UNINSTALL", pkg_str)
7812 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7814 return colorize("PKG_NOMERGE_WORLD", pkg_str)
7816 return colorize("PKG_NOMERGE", pkg_str)
7819 properties = flatten(use_reduce(paren_reduce(
7820 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7821 except portage.exception.InvalidDependString, e:
7822 if not pkg.installed:
7823 show_invalid_depstring_notice(pkg,
7824 pkg.metadata["PROPERTIES"], str(e))
7828 interactive = "interactive" in properties
7829 if interactive and pkg.operation == "merge":
7830 addl = colorize("WARN", "I") + addl[1:]
7832 counters.interactive += 1
7837 if "--columns" in self.myopts:
7838 if "--quiet" in self.myopts:
7839 myprint=addl+" "+indent+pkgprint(pkg_cp)
7840 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7841 myprint=myprint+myoldbest
7842 myprint=myprint+darkgreen("to "+x[1])
7846 myprint = "[%s] %s%s" % \
7847 (pkgprint(pkg_status.ljust(13)),
7848 indent, pkgprint(pkg.cp))
7850 myprint = "[%s %s] %s%s" % \
7851 (pkgprint(pkg.type_name), addl,
7852 indent, pkgprint(pkg.cp))
7853 if (newlp-nc_len(myprint)) > 0:
7854 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7855 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7856 if (oldlp-nc_len(myprint)) > 0:
7857 myprint=myprint+" "*(oldlp-nc_len(myprint))
7858 myprint=myprint+myoldbest
7859 myprint += darkgreen("to " + pkg.root)
7862 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7864 myprint = "[" + pkg_type + " " + addl + "] "
7865 myprint += indent + pkgprint(pkg_key) + " " + \
7866 myoldbest + darkgreen("to " + myroot)
7868 if "--columns" in self.myopts:
7869 if "--quiet" in self.myopts:
7870 myprint=addl+" "+indent+pkgprint(pkg_cp)
7871 myprint=myprint+" "+green(xs[1]+xs[2])+" "
7872 myprint=myprint+myoldbest
7876 myprint = "[%s] %s%s" % \
7877 (pkgprint(pkg_status.ljust(13)),
7878 indent, pkgprint(pkg.cp))
7880 myprint = "[%s %s] %s%s" % \
7881 (pkgprint(pkg.type_name), addl,
7882 indent, pkgprint(pkg.cp))
7883 if (newlp-nc_len(myprint)) > 0:
7884 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7885 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7886 if (oldlp-nc_len(myprint)) > 0:
7887 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7888 myprint += myoldbest
7891 myprint = "[%s] %s%s %s" % \
7892 (pkgprint(pkg_status.ljust(13)),
7893 indent, pkgprint(pkg.cpv),
7896 myprint = "[%s %s] %s%s %s" % \
7897 (pkgprint(pkg_type), addl, indent,
7898 pkgprint(pkg.cpv), myoldbest)
7900 if columns and pkg.operation == "uninstall":
7902 p.append((myprint, verboseadd, repoadd))
7904 if "--tree" not in self.myopts and \
7905 "--quiet" not in self.myopts and \
7906 not self._opts_no_restart.intersection(self.myopts) and \
7907 pkg.root == self._running_root.root and \
7908 portage.match_from_list(
7909 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7910 not vardb.cpv_exists(pkg.cpv) and \
7911 "--quiet" not in self.myopts:
7912 if mylist_index < len(mylist) - 1:
7913 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7914 p.append(colorize("WARN", " then resume the merge."))
7917 show_repos = repoadd_set and repoadd_set != set(["0"])
7920 if isinstance(x, basestring):
7921 out.write("%s\n" % (x,))
7924 myprint, verboseadd, repoadd = x
7927 myprint += " " + verboseadd
7929 if show_repos and repoadd:
7930 myprint += " " + teal("[%s]" % repoadd)
7932 out.write("%s\n" % (myprint,))
7941 sys.stdout.write(str(repo_display))
7943 if "--changelog" in self.myopts:
7945 for revision,text in changelogs:
7946 print bold('*'+revision)
7947 sys.stdout.write(text)
7952 def display_problems(self):
7954 Display problems with the dependency graph such as slot collisions.
7955 This is called internally by display() to show the problems _after_
7956 the merge list where it is most likely to be seen, but if display()
7957 is not going to be called then this method should be called explicitly
7958 to ensure that the user is notified of problems with the graph.
7960 All output goes to stderr, except for unsatisfied dependencies which
7961 go to stdout for parsing by programs such as autounmask.
7964 # Note that show_masked_packages() sends it's output to
7965 # stdout, and some programs such as autounmask parse the
7966 # output in cases when emerge bails out. However, when
7967 # show_masked_packages() is called for installed packages
7968 # here, the message is a warning that is more appropriate
7969 # to send to stderr, so temporarily redirect stdout to
7970 # stderr. TODO: Fix output code so there's a cleaner way
7971 # to redirect everything to stderr.
7976 sys.stdout = sys.stderr
7977 self._display_problems()
7983 # This goes to stdout for parsing by programs like autounmask.
7984 for pargs, kwargs in self._unsatisfied_deps_for_display:
7985 self._show_unsatisfied_dep(*pargs, **kwargs)
7987 def _display_problems(self):
7988 if self._circular_deps_for_display is not None:
7989 self._show_circular_deps(
7990 self._circular_deps_for_display)
7992 # The user is only notified of a slot conflict if
7993 # there are no unresolvable blocker conflicts.
7994 if self._unsatisfied_blockers_for_display is not None:
7995 self._show_unsatisfied_blockers(
7996 self._unsatisfied_blockers_for_display)
7998 self._show_slot_collision_notice()
8000 # TODO: Add generic support for "set problem" handlers so that
8001 # the below warnings aren't special cases for world only.
8003 if self._missing_args:
8004 world_problems = False
8005 if "world" in self._sets:
8006 # Filter out indirect members of world (from nested sets)
8007 # since only direct members of world are desired here.
8008 world_set = self.roots[self.target_root].sets["world"]
8009 for arg, atom in self._missing_args:
8010 if arg.name == "world" and atom in world_set:
8011 world_problems = True
8015 sys.stderr.write("\n!!! Problems have been " + \
8016 "detected with your world file\n")
8017 sys.stderr.write("!!! Please run " + \
8018 green("emaint --check world")+"\n\n")
8020 if self._missing_args:
8021 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8022 " Ebuilds for the following packages are either all\n")
8023 sys.stderr.write(colorize("BAD", "!!!") + \
8024 " masked or don't exist:\n")
8025 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8026 self._missing_args) + "\n")
8028 if self._pprovided_args:
8030 for arg, atom in self._pprovided_args:
8031 if isinstance(arg, SetArg):
8033 arg_atom = (atom, atom)
8036 arg_atom = (arg.arg, atom)
8037 refs = arg_refs.setdefault(arg_atom, [])
8038 if parent not in refs:
8041 msg.append(bad("\nWARNING: "))
8042 if len(self._pprovided_args) > 1:
8043 msg.append("Requested packages will not be " + \
8044 "merged because they are listed in\n")
8046 msg.append("A requested package will not be " + \
8047 "merged because it is listed in\n")
8048 msg.append("package.provided:\n\n")
8049 problems_sets = set()
8050 for (arg, atom), refs in arg_refs.iteritems():
8053 problems_sets.update(refs)
8055 ref_string = ", ".join(["'%s'" % name for name in refs])
8056 ref_string = " pulled in by " + ref_string
8057 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8059 if "world" in problems_sets:
8060 msg.append("This problem can be solved in one of the following ways:\n\n")
8061 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8062 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8063 msg.append(" C) Remove offending entries from package.provided.\n\n")
8064 msg.append("The best course of action depends on the reason that an offending\n")
8065 msg.append("package.provided entry exists.\n\n")
8066 sys.stderr.write("".join(msg))
8068 masked_packages = []
8069 for pkg in self._masked_installed:
8070 root_config = pkg.root_config
8071 pkgsettings = self.pkgsettings[pkg.root]
8072 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8073 masked_packages.append((root_config, pkgsettings,
8074 pkg.cpv, pkg.metadata, mreasons))
8076 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8077 " The following installed packages are masked:\n")
8078 show_masked_packages(masked_packages)
8082 def calc_changelog(self,ebuildpath,current,next):
8083 if ebuildpath == None or not os.path.exists(ebuildpath):
8085 current = '-'.join(portage.catpkgsplit(current)[1:])
8086 if current.endswith('-r0'):
8087 current = current[:-3]
8088 next = '-'.join(portage.catpkgsplit(next)[1:])
8089 if next.endswith('-r0'):
8091 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8093 changelog = open(changelogpath).read()
8094 except SystemExit, e:
8095 raise # Needed else can't exit
8098 divisions = self.find_changelog_tags(changelog)
8099 #print 'XX from',current,'to',next
8100 #for div,text in divisions: print 'XX',div
8101 # skip entries for all revisions above the one we are about to emerge
8102 for i in range(len(divisions)):
8103 if divisions[i][0]==next:
8104 divisions = divisions[i:]
8106 # find out how many entries we are going to display
8107 for i in range(len(divisions)):
8108 if divisions[i][0]==current:
8109 divisions = divisions[:i]
8112 # couldnt find the current revision in the list. display nothing
8116 def find_changelog_tags(self,changelog):
8120 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8122 if release is not None:
8123 divs.append((release,changelog))
8125 if release is not None:
8126 divs.append((release,changelog[:match.start()]))
8127 changelog = changelog[match.end():]
8128 release = match.group(1)
8129 if release.endswith('.ebuild'):
8130 release = release[:-7]
8131 if release.endswith('-r0'):
8132 release = release[:-3]
8134 def saveNomergeFavorites(self):
8135 """Find atoms in favorites that are not in the mergelist and add them
8136 to the world file if necessary."""
8137 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8138 "--oneshot", "--onlydeps", "--pretend"):
8139 if x in self.myopts:
8141 root_config = self.roots[self.target_root]
8142 world_set = root_config.sets["world"]
8144 world_locked = False
8145 if hasattr(world_set, "lock"):
8149 if hasattr(world_set, "load"):
8150 world_set.load() # maybe it's changed on disk
8152 args_set = self._sets["args"]
8153 portdb = self.trees[self.target_root]["porttree"].dbapi
8154 added_favorites = set()
8155 for x in self._set_nodes:
8156 pkg_type, root, pkg_key, pkg_status = x
8157 if pkg_status != "nomerge":
8161 myfavkey = create_world_atom(x, args_set, root_config)
8163 if myfavkey in added_favorites:
8165 added_favorites.add(myfavkey)
8166 except portage.exception.InvalidDependString, e:
8167 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8168 (pkg_key, str(e)), noiselevel=-1)
8169 writemsg("!!! see '%s'\n\n" % os.path.join(
8170 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8173 for k in self._sets:
8174 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8179 all_added.append(SETPREFIX + k)
8180 all_added.extend(added_favorites)
8183 print ">>> Recording %s in \"world\" favorites file..." % \
8184 colorize("INFORM", str(a))
8186 world_set.update(all_added)
8191 def loadResumeCommand(self, resume_data, skip_masked=False):
8193 Add a resume command to the graph and validate it in the process. This
8194 will raise a PackageNotFound exception if a package is not available.
8197 if not isinstance(resume_data, dict):
8200 mergelist = resume_data.get("mergelist")
8201 if not isinstance(mergelist, list):
8204 fakedb = self.mydbapi
8206 serialized_tasks = []
8209 if not (isinstance(x, list) and len(x) == 4):
8211 pkg_type, myroot, pkg_key, action = x
8212 if pkg_type not in self.pkg_tree_map:
8214 if action != "merge":
8216 tree_type = self.pkg_tree_map[pkg_type]
8217 mydb = trees[myroot][tree_type].dbapi
8218 db_keys = list(self._trees_orig[myroot][
8219 tree_type].dbapi._aux_cache_keys)
8221 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8223 # It does no exist or it is corrupt.
8224 if action == "uninstall":
8226 raise portage.exception.PackageNotFound(pkg_key)
8227 installed = action == "uninstall"
8228 built = pkg_type != "ebuild"
8229 root_config = self.roots[myroot]
8230 pkg = Package(built=built, cpv=pkg_key,
8231 installed=installed, metadata=metadata,
8232 operation=action, root_config=root_config,
8234 if pkg_type == "ebuild":
8235 pkgsettings = self.pkgsettings[myroot]
8236 pkgsettings.setcpv(pkg)
8237 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8238 self._pkg_cache[pkg] = pkg
8240 root_config = self.roots[pkg.root]
8241 if "merge" == pkg.operation and \
8242 not visible(root_config.settings, pkg):
8244 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8246 self._unsatisfied_deps_for_display.append(
8247 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8249 fakedb[myroot].cpv_inject(pkg)
8250 serialized_tasks.append(pkg)
8251 self.spinner.update()
8253 if self._unsatisfied_deps_for_display:
8256 if not serialized_tasks or "--nodeps" in self.myopts:
8257 self._serialized_tasks_cache = serialized_tasks
8258 self._scheduler_graph = self.digraph
8260 self._select_package = self._select_pkg_from_graph
8261 self.myparams.add("selective")
8263 favorites = resume_data.get("favorites")
8264 args_set = self._sets["args"]
8265 if isinstance(favorites, list):
8266 args = self._load_favorites(favorites)
8270 for task in serialized_tasks:
8271 if isinstance(task, Package) and \
8272 task.operation == "merge":
8273 if not self._add_pkg(task, None):
8276 # Packages for argument atoms need to be explicitly
8277 # added via _add_pkg() so that they are included in the
8278 # digraph (needed at least for --tree display).
8280 for atom in arg.set:
8281 pkg, existing_node = self._select_package(
8282 arg.root_config.root, atom)
8283 if existing_node is None and \
8285 if not self._add_pkg(pkg, Dependency(atom=atom,
8286 root=pkg.root, parent=arg)):
8289 # Allow unsatisfied deps here to avoid showing a masking
8290 # message for an unsatisfied dep that isn't necessarily
8292 if not self._create_graph(allow_unsatisfied=True):
8294 if masked_tasks or self._unsatisfied_deps:
8295 # This probably means that a required package
8296 # was dropped via --skipfirst. It makes the
8297 # resume list invalid, so convert it to a
8298 # UnsatisfiedResumeDep exception.
8299 raise self.UnsatisfiedResumeDep(self,
8300 masked_tasks + self._unsatisfied_deps)
8301 self._serialized_tasks_cache = None
8304 except self._unknown_internal_error:
8309 def _load_favorites(self, favorites):
8311 Use a list of favorites to resume state from a
8312 previous select_files() call. This creates similar
8313 DependencyArg instances to those that would have
8314 been created by the original select_files() call.
8315 This allows Package instances to be matched with
8316 DependencyArg instances during graph creation.
8318 root_config = self.roots[self.target_root]
8319 getSetAtoms = root_config.setconfig.getSetAtoms
8320 sets = root_config.sets
8323 if not isinstance(x, basestring):
8325 if x in ("system", "world"):
8327 if x.startswith(SETPREFIX):
8328 s = x[len(SETPREFIX):]
8333 # Recursively expand sets so that containment tests in
8334 # self._get_parent_sets() properly match atoms in nested
8335 # sets (like if world contains system).
8336 expanded_set = InternalPackageSet(
8337 initial_atoms=getSetAtoms(s))
8338 self._sets[s] = expanded_set
8339 args.append(SetArg(arg=x, set=expanded_set,
8340 root_config=root_config))
8342 if not portage.isvalidatom(x):
8344 args.append(AtomArg(arg=x, atom=x,
8345 root_config=root_config))
8347 # Create the "args" package set from atoms and
8348 # packages given as arguments.
8349 args_set = self._sets["args"]
8351 if not isinstance(arg, (AtomArg, PackageArg)):
8354 if myatom in args_set:
8356 args_set.add(myatom)
8357 self._set_atoms.update(chain(*self._sets.itervalues()))
8358 atom_arg_map = self._atom_arg_map
8360 for atom in arg.set:
8361 atom_key = (atom, arg.root_config.root)
8362 refs = atom_arg_map.get(atom_key)
8365 atom_arg_map[atom_key] = refs
8370 class UnsatisfiedResumeDep(portage.exception.PortageException):
8372 A dependency of a resume list is not installed. This
8373 can occur when a required package is dropped from the
8374 merge list via --skipfirst.
8376 def __init__(self, depgraph, value):
8377 portage.exception.PortageException.__init__(self, value)
8378 self.depgraph = depgraph
8380 class _internal_exception(portage.exception.PortageException):
8381 def __init__(self, value=""):
8382 portage.exception.PortageException.__init__(self, value)
8384 class _unknown_internal_error(_internal_exception):
8386 Used by the depgraph internally to terminate graph creation.
8387 The specific reason for the failure should have been dumped
8388 to stderr, unfortunately, the exact reason for the failure
8392 class _serialize_tasks_retry(_internal_exception):
8394 This is raised by the _serialize_tasks() method when it needs to
8395 be called again for some reason. The only case that it's currently
8396 used for is when neglected dependencies need to be added to the
8397 graph in order to avoid making a potentially unsafe decision.
8400 class _dep_check_composite_db(portage.dbapi):
8402 A dbapi-like interface that is optimized for use in dep_check() calls.
8403 This is built on top of the existing depgraph package selection logic.
8404 Some packages that have been added to the graph may be masked from this
8405 view in order to influence the atom preference selection that occurs
8408 def __init__(self, depgraph, root):
8409 portage.dbapi.__init__(self)
8410 self._depgraph = depgraph
8412 self._match_cache = {}
8413 self._cpv_pkg_map = {}
8415 def match(self, atom):
8416 ret = self._match_cache.get(atom)
8421 atom = self._dep_expand(atom)
8422 pkg, existing = self._depgraph._select_package(self._root, atom)
8426 # Return the highest available from select_package() as well as
8427 # any matching slots in the graph db.
8429 slots.add(pkg.metadata["SLOT"])
8430 atom_cp = portage.dep_getkey(atom)
8431 if pkg.cp.startswith("virtual/"):
8432 # For new-style virtual lookahead that occurs inside
8433 # dep_check(), examine all slots. This is needed
8434 # so that newer slots will not unnecessarily be pulled in
8435 # when a satisfying lower slot is already installed. For
8436 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8437 # there's no need to pull in a newer slot to satisfy a
8438 # virtual/jdk dependency.
8439 for db, pkg_type, built, installed, db_keys in \
8440 self._depgraph._filtered_trees[self._root]["dbs"]:
8441 for cpv in db.match(atom):
8442 if portage.cpv_getkey(cpv) != pkg.cp:
8444 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8446 if self._visible(pkg):
8447 self._cpv_pkg_map[pkg.cpv] = pkg
8449 slots.remove(pkg.metadata["SLOT"])
8451 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8452 pkg, existing = self._depgraph._select_package(
8453 self._root, slot_atom)
8456 if not self._visible(pkg):
8458 self._cpv_pkg_map[pkg.cpv] = pkg
8461 self._cpv_sort_ascending(ret)
8462 self._match_cache[orig_atom] = ret
8465 def _visible(self, pkg):
8466 if pkg.installed and "selective" not in self._depgraph.myparams:
8468 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8469 except (StopIteration, portage.exception.InvalidDependString):
8476 self._depgraph.pkgsettings[pkg.root], pkg):
8478 except portage.exception.InvalidDependString:
8482 def _dep_expand(self, atom):
8484 This is only needed for old installed packages that may
8485 contain atoms that are not fully qualified with a specific
8486 category. Emulate the cpv_expand() function that's used by
8487 dbapi.match() in cases like this. If there are multiple
8488 matches, it's often due to a new-style virtual that has
8489 been added, so try to filter those out to avoid raising
8492 root_config = self._depgraph.roots[self._root]
8494 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8495 if len(expanded_atoms) > 1:
8496 non_virtual_atoms = []
8497 for x in expanded_atoms:
8498 if not portage.dep_getkey(x).startswith("virtual/"):
8499 non_virtual_atoms.append(x)
8500 if len(non_virtual_atoms) == 1:
8501 expanded_atoms = non_virtual_atoms
8502 if len(expanded_atoms) > 1:
8503 # compatible with portage.cpv_expand()
8504 raise portage.exception.AmbiguousPackageName(
8505 [portage.dep_getkey(x) for x in expanded_atoms])
8507 atom = expanded_atoms[0]
8509 null_atom = insert_category_into_atom(atom, "null")
8510 null_cp = portage.dep_getkey(null_atom)
8511 cat, atom_pn = portage.catsplit(null_cp)
8512 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8514 # Allow the resolver to choose which virtual.
8515 atom = insert_category_into_atom(atom, "virtual")
8517 atom = insert_category_into_atom(atom, "null")
8520 def aux_get(self, cpv, wants):
8521 metadata = self._cpv_pkg_map[cpv].metadata
8522 return [metadata.get(x, "") for x in wants]
8524 class RepoDisplay(object):
8525 def __init__(self, roots):
8526 self._shown_repos = {}
8527 self._unknown_repo = False
8529 for root_config in roots.itervalues():
8530 portdir = root_config.settings.get("PORTDIR")
8532 repo_paths.add(portdir)
8533 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8535 repo_paths.update(overlays.split())
8536 repo_paths = list(repo_paths)
8537 self._repo_paths = repo_paths
8538 self._repo_paths_real = [ os.path.realpath(repo_path) \
8539 for repo_path in repo_paths ]
8541 # pre-allocate index for PORTDIR so that it always has index 0.
8542 for root_config in roots.itervalues():
8543 portdb = root_config.trees["porttree"].dbapi
8544 portdir = portdb.porttree_root
8546 self.repoStr(portdir)
8548 def repoStr(self, repo_path_real):
8551 real_index = self._repo_paths_real.index(repo_path_real)
8552 if real_index == -1:
8554 self._unknown_repo = True
8556 shown_repos = self._shown_repos
8557 repo_paths = self._repo_paths
8558 repo_path = repo_paths[real_index]
8559 index = shown_repos.get(repo_path)
8561 index = len(shown_repos)
8562 shown_repos[repo_path] = index
8568 shown_repos = self._shown_repos
8569 unknown_repo = self._unknown_repo
8570 if shown_repos or self._unknown_repo:
8571 output.append("Portage tree and overlays:\n")
8572 show_repo_paths = list(shown_repos)
8573 for repo_path, repo_index in shown_repos.iteritems():
8574 show_repo_paths[repo_index] = repo_path
8576 for index, repo_path in enumerate(show_repo_paths):
8577 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8579 output.append(" "+teal("[?]") + \
8580 " indicates that the source repository could not be determined\n")
8581 return "".join(output)
8583 class PackageCounters(object):
8593 self.blocks_satisfied = 0
8595 self.restrict_fetch = 0
8596 self.restrict_fetch_satisfied = 0
8597 self.interactive = 0
8600 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8603 myoutput.append("Total: %s package" % total_installs)
8604 if total_installs != 1:
8605 myoutput.append("s")
8606 if total_installs != 0:
8607 myoutput.append(" (")
8608 if self.upgrades > 0:
8609 details.append("%s upgrade" % self.upgrades)
8610 if self.upgrades > 1:
8612 if self.downgrades > 0:
8613 details.append("%s downgrade" % self.downgrades)
8614 if self.downgrades > 1:
8617 details.append("%s new" % self.new)
8618 if self.newslot > 0:
8619 details.append("%s in new slot" % self.newslot)
8620 if self.newslot > 1:
8623 details.append("%s reinstall" % self.reinst)
8627 details.append("%s uninstall" % self.uninst)
8630 if self.interactive > 0:
8631 details.append("%s %s" % (self.interactive,
8632 colorize("WARN", "interactive")))
8633 myoutput.append(", ".join(details))
8634 if total_installs != 0:
8635 myoutput.append(")")
8636 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8637 if self.restrict_fetch:
8638 myoutput.append("\nFetch Restriction: %s package" % \
8639 self.restrict_fetch)
8640 if self.restrict_fetch > 1:
8641 myoutput.append("s")
8642 if self.restrict_fetch_satisfied < self.restrict_fetch:
8643 myoutput.append(bad(" (%s unsatisfied)") % \
8644 (self.restrict_fetch - self.restrict_fetch_satisfied))
8646 myoutput.append("\nConflict: %s block" % \
8649 myoutput.append("s")
8650 if self.blocks_satisfied < self.blocks:
8651 myoutput.append(bad(" (%s unsatisfied)") % \
8652 (self.blocks - self.blocks_satisfied))
8653 return "".join(myoutput)
8655 class PollSelectAdapter(PollConstants):
8658 Use select to emulate a poll object, for
8659 systems that don't support poll().
8663 self._registered = {}
8664 self._select_args = [[], [], []]
8666 def register(self, fd, *args):
8668 Only POLLIN is currently supported!
8672 "register expected at most 2 arguments, got " + \
8673 repr(1 + len(args)))
8675 eventmask = PollConstants.POLLIN | \
8676 PollConstants.POLLPRI | PollConstants.POLLOUT
8680 self._registered[fd] = eventmask
8681 self._select_args = None
8683 def unregister(self, fd):
8684 self._select_args = None
8685 del self._registered[fd]
8687 def poll(self, *args):
8690 "poll expected at most 2 arguments, got " + \
8691 repr(1 + len(args)))
8697 select_args = self._select_args
8698 if select_args is None:
8699 select_args = [self._registered.keys(), [], []]
8701 if timeout is not None:
8702 select_args = select_args[:]
8703 # Translate poll() timeout args to select() timeout args:
8705 # | units | value(s) for indefinite block
8706 # ---------|--------------|------------------------------
8707 # poll | milliseconds | omitted, negative, or None
8708 # ---------|--------------|------------------------------
8709 # select | seconds | omitted
8710 # ---------|--------------|------------------------------
8712 if timeout is not None and timeout < 0:
8714 if timeout is not None:
8715 select_args.append(timeout / 1000)
8717 select_events = select.select(*select_args)
8719 for fd in select_events[0]:
8720 poll_events.append((fd, PollConstants.POLLIN))
8723 class SequentialTaskQueue(SlotObject):
8725 __slots__ = ("max_jobs", "running_tasks") + \
8726 ("_dirty", "_scheduling", "_task_queue")
8728 def __init__(self, **kwargs):
8729 SlotObject.__init__(self, **kwargs)
8730 self._task_queue = deque()
8731 self.running_tasks = set()
8732 if self.max_jobs is None:
8736 def add(self, task):
8737 self._task_queue.append(task)
8740 def addFront(self, task):
8741 self._task_queue.appendleft(task)
8752 if self._scheduling:
8753 # Ignore any recursive schedule() calls triggered via
8754 # self._task_exit().
8757 self._scheduling = True
8759 task_queue = self._task_queue
8760 running_tasks = self.running_tasks
8761 max_jobs = self.max_jobs
8762 state_changed = False
8764 while task_queue and \
8765 (max_jobs is True or len(running_tasks) < max_jobs):
8766 task = task_queue.popleft()
8767 cancelled = getattr(task, "cancelled", None)
8769 running_tasks.add(task)
8770 task.addExitListener(self._task_exit)
8772 state_changed = True
8775 self._scheduling = False
8777 return state_changed
8779 def _task_exit(self, task):
8781 Since we can always rely on exit listeners being called, the set of
8782 running tasks is always pruned automatically and there is never any need
8783 to actively prune it.
8785 self.running_tasks.remove(task)
8786 if self._task_queue:
8790 self._task_queue.clear()
8791 running_tasks = self.running_tasks
8792 while running_tasks:
8793 task = running_tasks.pop()
8794 task.removeExitListener(self._task_exit)
8798 def __nonzero__(self):
8799 return bool(self._task_queue or self.running_tasks)
8802 return len(self._task_queue) + len(self.running_tasks)
8804 _can_poll_device = None
8806 def can_poll_device():
8808 Test if it's possible to use poll() on a device such as a pty. This
8809 is known to fail on Darwin.
8811 @returns: True if poll() on a device succeeds, False otherwise.
8814 global _can_poll_device
8815 if _can_poll_device is not None:
8816 return _can_poll_device
8818 if not hasattr(select, "poll"):
8819 _can_poll_device = False
8820 return _can_poll_device
8823 dev_null = open('/dev/null', 'rb')
8825 _can_poll_device = False
8826 return _can_poll_device
8829 p.register(dev_null.fileno(), PollConstants.POLLIN)
8831 invalid_request = False
8832 for f, event in p.poll():
8833 if event & PollConstants.POLLNVAL:
8834 invalid_request = True
8838 _can_poll_device = not invalid_request
8839 return _can_poll_device
8841 def create_poll_instance():
8843 Create an instance of select.poll, or an instance of
8844 PollSelectAdapter there is no poll() implementation or
8845 it is broken somehow.
8847 if can_poll_device():
8848 return select.poll()
8849 return PollSelectAdapter()
8851 class PollScheduler(object):
8853 class _sched_iface_class(SlotObject):
8854 __slots__ = ("register", "schedule", "unregister")
8858 self._max_load = None
8860 self._poll_event_queue = []
8861 self._poll_event_handlers = {}
8862 self._poll_event_handler_ids = {}
8863 # Increment id for each new handler.
8864 self._event_handler_id = 0
8865 self._poll_obj = create_poll_instance()
8866 self._scheduling = False
8868 def _schedule(self):
8870 Calls _schedule_tasks() and automatically returns early from
8871 any recursive calls to this method that the _schedule_tasks()
8872 call might trigger. This makes _schedule() safe to call from
8873 inside exit listeners.
8875 if self._scheduling:
8877 self._scheduling = True
8879 return self._schedule_tasks()
8881 self._scheduling = False
8883 def _running_job_count(self):
8886 def _can_add_job(self):
8887 max_jobs = self._max_jobs
8888 max_load = self._max_load
8890 if self._max_jobs is not True and \
8891 self._running_job_count() >= self._max_jobs:
8894 if max_load is not None and \
8895 (max_jobs is True or max_jobs > 1) and \
8896 self._running_job_count() >= 1:
8898 avg1, avg5, avg15 = os.getloadavg()
8899 except (AttributeError, OSError), e:
8900 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8905 if avg1 >= max_load:
8910 def _poll(self, timeout=None):
8912 All poll() calls pass through here. The poll events
8913 are added directly to self._poll_event_queue.
8914 In order to avoid endless blocking, this raises
8915 StopIteration if timeout is None and there are
8916 no file descriptors to poll.
8918 if not self._poll_event_handlers:
8920 if timeout is None and \
8921 not self._poll_event_handlers:
8922 raise StopIteration(
8923 "timeout is None and there are no poll() event handlers")
8925 # The following error is known to occur with Linux kernel versions
8928 # select.error: (4, 'Interrupted system call')
8930 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8931 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8932 # without any events.
8935 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8937 except select.error, e:
8938 writemsg_level("\n!!! select error: %s\n" % (e,),
8939 level=logging.ERROR, noiselevel=-1)
8941 if timeout is not None:
8944 def _next_poll_event(self, timeout=None):
8946 Since the _schedule_wait() loop is called by event
8947 handlers from _poll_loop(), maintain a central event
8948 queue for both of them to share events from a single
8949 poll() call. In order to avoid endless blocking, this
8950 raises StopIteration if timeout is None and there are
8951 no file descriptors to poll.
8953 if not self._poll_event_queue:
8955 return self._poll_event_queue.pop()
8957 def _poll_loop(self):
8959 event_handlers = self._poll_event_handlers
8960 event_handled = False
8963 while event_handlers:
8964 f, event = self._next_poll_event()
8965 handler, reg_id = event_handlers[f]
8967 event_handled = True
8968 except StopIteration:
8969 event_handled = True
8971 if not event_handled:
8972 raise AssertionError("tight loop")
8974 def _schedule_yield(self):
8976 Schedule for a short period of time chosen by the scheduler based
8977 on internal state. Synchronous tasks should call this periodically
8978 in order to allow the scheduler to service pending poll events. The
8979 scheduler will call poll() exactly once, without blocking, and any
8980 resulting poll events will be serviced.
8982 event_handlers = self._poll_event_handlers
8985 if not event_handlers:
8986 return bool(events_handled)
8988 if not self._poll_event_queue:
8992 while event_handlers and self._poll_event_queue:
8993 f, event = self._next_poll_event()
8994 handler, reg_id = event_handlers[f]
8997 except StopIteration:
9000 return bool(events_handled)
9002 def _register(self, f, eventmask, handler):
9005 @return: A unique registration id, for use in schedule() or
9008 if f in self._poll_event_handlers:
9009 raise AssertionError("fd %d is already registered" % f)
9010 self._event_handler_id += 1
9011 reg_id = self._event_handler_id
9012 self._poll_event_handler_ids[reg_id] = f
9013 self._poll_event_handlers[f] = (handler, reg_id)
9014 self._poll_obj.register(f, eventmask)
9017 def _unregister(self, reg_id):
9018 f = self._poll_event_handler_ids[reg_id]
9019 self._poll_obj.unregister(f)
9020 del self._poll_event_handlers[f]
9021 del self._poll_event_handler_ids[reg_id]
9023 def _schedule_wait(self, wait_ids):
9025 Schedule until wait_id is not longer registered
9028 @param wait_id: a task id to wait for
9030 event_handlers = self._poll_event_handlers
9031 handler_ids = self._poll_event_handler_ids
9032 event_handled = False
9034 if isinstance(wait_ids, int):
9035 wait_ids = frozenset([wait_ids])
9038 while wait_ids.intersection(handler_ids):
9039 f, event = self._next_poll_event()
9040 handler, reg_id = event_handlers[f]
9042 event_handled = True
9043 except StopIteration:
9044 event_handled = True
9046 return event_handled
9048 class QueueScheduler(PollScheduler):
9051 Add instances of SequentialTaskQueue and then call run(). The
9052 run() method returns when no tasks remain.
9055 def __init__(self, max_jobs=None, max_load=None):
9056 PollScheduler.__init__(self)
9058 if max_jobs is None:
9061 self._max_jobs = max_jobs
9062 self._max_load = max_load
9063 self.sched_iface = self._sched_iface_class(
9064 register=self._register,
9065 schedule=self._schedule_wait,
9066 unregister=self._unregister)
9069 self._schedule_listeners = []
9072 self._queues.append(q)
9074 def remove(self, q):
9075 self._queues.remove(q)
9079 while self._schedule():
9082 while self._running_job_count():
9085 def _schedule_tasks(self):
9088 @returns: True if there may be remaining tasks to schedule,
9091 while self._can_add_job():
9092 n = self._max_jobs - self._running_job_count()
9096 if not self._start_next_job(n):
9099 for q in self._queues:
9104 def _running_job_count(self):
9106 for q in self._queues:
9107 job_count += len(q.running_tasks)
9108 self._jobs = job_count
9111 def _start_next_job(self, n=1):
9113 for q in self._queues:
9114 initial_job_count = len(q.running_tasks)
9116 final_job_count = len(q.running_tasks)
9117 if final_job_count > initial_job_count:
9118 started_count += (final_job_count - initial_job_count)
9119 if started_count >= n:
9121 return started_count
9123 class TaskScheduler(object):
9126 A simple way to handle scheduling of AsynchrousTask instances. Simply
9127 add tasks and call run(). The run() method returns when no tasks remain.
9130 def __init__(self, max_jobs=None, max_load=None):
9131 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9132 self._scheduler = QueueScheduler(
9133 max_jobs=max_jobs, max_load=max_load)
9134 self.sched_iface = self._scheduler.sched_iface
9135 self.run = self._scheduler.run
9136 self._scheduler.add(self._queue)
9138 def add(self, task):
9139 self._queue.add(task)
9141 class JobStatusDisplay(object):
9143 _bound_properties = ("curval", "failed", "running")
9144 _jobs_column_width = 48
9146 # Don't update the display unless at least this much
9147 # time has passed, in units of seconds.
9148 _min_display_latency = 2
9150 _default_term_codes = {
9156 _termcap_name_map = {
9157 'carriage_return' : 'cr',
9162 def __init__(self, out=sys.stdout, quiet=False):
9163 object.__setattr__(self, "out", out)
9164 object.__setattr__(self, "quiet", quiet)
9165 object.__setattr__(self, "maxval", 0)
9166 object.__setattr__(self, "merges", 0)
9167 object.__setattr__(self, "_changed", False)
9168 object.__setattr__(self, "_displayed", False)
9169 object.__setattr__(self, "_last_display_time", 0)
9170 object.__setattr__(self, "width", 80)
9173 isatty = hasattr(out, "isatty") and out.isatty()
9174 object.__setattr__(self, "_isatty", isatty)
9175 if not isatty or not self._init_term():
9177 for k, capname in self._termcap_name_map.iteritems():
9178 term_codes[k] = self._default_term_codes[capname]
9179 object.__setattr__(self, "_term_codes", term_codes)
9181 def _init_term(self):
9183 Initialize term control codes.
9185 @returns: True if term codes were successfully initialized,
9189 term_type = os.environ.get("TERM", "vt100")
9195 curses.setupterm(term_type, self.out.fileno())
9196 tigetstr = curses.tigetstr
9197 except curses.error:
9202 if tigetstr is None:
9206 for k, capname in self._termcap_name_map.iteritems():
9207 code = tigetstr(capname)
9209 code = self._default_term_codes[capname]
9210 term_codes[k] = code
9211 object.__setattr__(self, "_term_codes", term_codes)
9214 def _format_msg(self, msg):
9215 return ">>> %s" % msg
9219 self._term_codes['carriage_return'] + \
9220 self._term_codes['clr_eol'])
9222 self._displayed = False
9224 def _display(self, line):
9225 self.out.write(line)
9227 self._displayed = True
9229 def _update(self, msg):
9232 if not self._isatty:
9233 out.write(self._format_msg(msg) + self._term_codes['newline'])
9235 self._displayed = True
9241 self._display(self._format_msg(msg))
9243 def displayMessage(self, msg):
9245 was_displayed = self._displayed
9247 if self._isatty and self._displayed:
9250 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9252 self._displayed = False
9255 self._changed = True
9261 for name in self._bound_properties:
9262 object.__setattr__(self, name, 0)
9265 self.out.write(self._term_codes['newline'])
9267 self._displayed = False
9269 def __setattr__(self, name, value):
9270 old_value = getattr(self, name)
9271 if value == old_value:
9273 object.__setattr__(self, name, value)
9274 if name in self._bound_properties:
9275 self._property_change(name, old_value, value)
9277 def _property_change(self, name, old_value, new_value):
9278 self._changed = True
9281 def _load_avg_str(self):
9283 avg = os.getloadavg()
9284 except (AttributeError, OSError), e:
9296 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9300 Display status on stdout, but only if something has
9301 changed since the last call.
9307 current_time = time.time()
9308 time_delta = current_time - self._last_display_time
9309 if self._displayed and \
9311 if not self._isatty:
9313 if time_delta < self._min_display_latency:
9316 self._last_display_time = current_time
9317 self._changed = False
9318 self._display_status()
9320 def _display_status(self):
9321 # Don't use len(self._completed_tasks) here since that also
9322 # can include uninstall tasks.
9323 curval_str = str(self.curval)
9324 maxval_str = str(self.maxval)
9325 running_str = str(self.running)
9326 failed_str = str(self.failed)
9327 load_avg_str = self._load_avg_str()
9329 color_output = StringIO.StringIO()
9330 plain_output = StringIO.StringIO()
9331 style_file = portage.output.ConsoleStyleFile(color_output)
9332 style_file.write_listener = plain_output
9333 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9334 style_writer.style_listener = style_file.new_styles
9335 f = formatter.AbstractFormatter(style_writer)
9337 number_style = "INFORM"
9338 f.add_literal_data("Jobs: ")
9339 f.push_style(number_style)
9340 f.add_literal_data(curval_str)
9342 f.add_literal_data(" of ")
9343 f.push_style(number_style)
9344 f.add_literal_data(maxval_str)
9346 f.add_literal_data(" complete")
9349 f.add_literal_data(", ")
9350 f.push_style(number_style)
9351 f.add_literal_data(running_str)
9353 f.add_literal_data(" running")
9356 f.add_literal_data(", ")
9357 f.push_style(number_style)
9358 f.add_literal_data(failed_str)
9360 f.add_literal_data(" failed")
9362 padding = self._jobs_column_width - len(plain_output.getvalue())
9364 f.add_literal_data(padding * " ")
9366 f.add_literal_data("Load avg: ")
9367 f.add_literal_data(load_avg_str)
9369 # Truncate to fit width, to avoid making the terminal scroll if the
9370 # line overflows (happens when the load average is large).
9371 plain_output = plain_output.getvalue()
9372 if self._isatty and len(plain_output) > self.width:
9373 # Use plain_output here since it's easier to truncate
9374 # properly than the color output which contains console
9376 self._update(plain_output[:self.width])
9378 self._update(color_output.getvalue())
9380 xtermTitle(" ".join(plain_output.split()))
9382 class Scheduler(PollScheduler):
9384 _opts_ignore_blockers = \
9385 frozenset(["--buildpkgonly",
9386 "--fetchonly", "--fetch-all-uri",
9387 "--nodeps", "--pretend"])
9389 _opts_no_background = \
9390 frozenset(["--pretend",
9391 "--fetchonly", "--fetch-all-uri"])
9393 _opts_no_restart = frozenset(["--buildpkgonly",
9394 "--fetchonly", "--fetch-all-uri", "--pretend"])
9396 _bad_resume_opts = set(["--ask", "--changelog",
9397 "--resume", "--skipfirst"])
9399 _fetch_log = "/var/log/emerge-fetch.log"
9401 class _iface_class(SlotObject):
9402 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9403 "dblinkElog", "fetch", "register", "schedule",
9404 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9407 class _fetch_iface_class(SlotObject):
9408 __slots__ = ("log_file", "schedule")
9410 _task_queues_class = slot_dict_class(
9411 ("merge", "jobs", "fetch", "unpack"), prefix="")
9413 class _build_opts_class(SlotObject):
9414 __slots__ = ("buildpkg", "buildpkgonly",
9415 "fetch_all_uri", "fetchonly", "pretend")
9417 class _binpkg_opts_class(SlotObject):
9418 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9420 class _pkg_count_class(SlotObject):
9421 __slots__ = ("curval", "maxval")
9423 class _emerge_log_class(SlotObject):
9424 __slots__ = ("xterm_titles",)
9426 def log(self, *pargs, **kwargs):
9427 if not self.xterm_titles:
9428 # Avoid interference with the scheduler's status display.
9429 kwargs.pop("short_msg", None)
9430 emergelog(self.xterm_titles, *pargs, **kwargs)
9432 class _failed_pkg(SlotObject):
9433 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9435 class _ConfigPool(object):
9436 """Interface for a task to temporarily allocate a config
9437 instance from a pool. This allows a task to be constructed
9438 long before the config instance actually becomes needed, like
9439 when prefetchers are constructed for the whole merge list."""
9440 __slots__ = ("_root", "_allocate", "_deallocate")
9441 def __init__(self, root, allocate, deallocate):
9443 self._allocate = allocate
9444 self._deallocate = deallocate
9446 return self._allocate(self._root)
9447 def deallocate(self, settings):
9448 self._deallocate(settings)
9450 class _unknown_internal_error(portage.exception.PortageException):
9452 Used internally to terminate scheduling. The specific reason for
9453 the failure should have been dumped to stderr.
9455 def __init__(self, value=""):
9456 portage.exception.PortageException.__init__(self, value)
9458 def __init__(self, settings, trees, mtimedb, myopts,
9459 spinner, mergelist, favorites, digraph):
9460 PollScheduler.__init__(self)
9461 self.settings = settings
9462 self.target_root = settings["ROOT"]
9464 self.myopts = myopts
9465 self._spinner = spinner
9466 self._mtimedb = mtimedb
9467 self._mergelist = mergelist
9468 self._favorites = favorites
9469 self._args_set = InternalPackageSet(favorites)
9470 self._build_opts = self._build_opts_class()
9471 for k in self._build_opts.__slots__:
9472 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9473 self._binpkg_opts = self._binpkg_opts_class()
9474 for k in self._binpkg_opts.__slots__:
9475 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9478 self._logger = self._emerge_log_class()
9479 self._task_queues = self._task_queues_class()
9480 for k in self._task_queues.allowed_keys:
9481 setattr(self._task_queues, k,
9482 SequentialTaskQueue())
9483 self._status_display = JobStatusDisplay()
9484 self._max_load = myopts.get("--load-average")
9485 max_jobs = myopts.get("--jobs")
9486 if max_jobs is None:
9488 self._set_max_jobs(max_jobs)
9490 # The root where the currently running
9491 # portage instance is installed.
9492 self._running_root = trees["/"]["root_config"]
9494 if settings.get("PORTAGE_DEBUG", "") == "1":
9496 self.pkgsettings = {}
9497 self._config_pool = {}
9498 self._blocker_db = {}
9500 self._config_pool[root] = []
9501 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9503 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9504 schedule=self._schedule_fetch)
9505 self._sched_iface = self._iface_class(
9506 dblinkEbuildPhase=self._dblink_ebuild_phase,
9507 dblinkDisplayMerge=self._dblink_display_merge,
9508 dblinkElog=self._dblink_elog,
9509 fetch=fetch_iface, register=self._register,
9510 schedule=self._schedule_wait,
9511 scheduleSetup=self._schedule_setup,
9512 scheduleUnpack=self._schedule_unpack,
9513 scheduleYield=self._schedule_yield,
9514 unregister=self._unregister)
9516 self._prefetchers = weakref.WeakValueDictionary()
9517 self._pkg_queue = []
9518 self._completed_tasks = set()
9520 self._failed_pkgs = []
9521 self._failed_pkgs_all = []
9522 self._failed_pkgs_die_msgs = []
9523 self._post_mod_echo_msgs = []
9524 self._parallel_fetch = False
9525 merge_count = len([x for x in mergelist \
9526 if isinstance(x, Package) and x.operation == "merge"])
9527 self._pkg_count = self._pkg_count_class(
9528 curval=0, maxval=merge_count)
9529 self._status_display.maxval = self._pkg_count.maxval
9531 # The load average takes some time to respond when new
9532 # jobs are added, so we need to limit the rate of adding
9534 self._job_delay_max = 10
9535 self._job_delay_factor = 1.0
9536 self._job_delay_exp = 1.5
9537 self._previous_job_start_time = None
9539 self._set_digraph(digraph)
9541 # This is used to memoize the _choose_pkg() result when
9542 # no packages can be chosen until one of the existing
9544 self._choose_pkg_return_early = False
9546 features = self.settings.features
9547 if "parallel-fetch" in features and \
9548 not ("--pretend" in self.myopts or \
9549 "--fetch-all-uri" in self.myopts or \
9550 "--fetchonly" in self.myopts):
9551 if "distlocks" not in features:
9552 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9553 portage.writemsg(red("!!!")+" parallel-fetching " + \
9554 "requires the distlocks feature enabled"+"\n",
9556 portage.writemsg(red("!!!")+" you have it disabled, " + \
9557 "thus parallel-fetching is being disabled"+"\n",
9559 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9560 elif len(mergelist) > 1:
9561 self._parallel_fetch = True
9563 if self._parallel_fetch:
9564 # clear out existing fetch log if it exists
9566 open(self._fetch_log, 'w')
9567 except EnvironmentError:
9570 self._running_portage = None
9571 portage_match = self._running_root.trees["vartree"].dbapi.match(
9572 portage.const.PORTAGE_PACKAGE_ATOM)
9574 cpv = portage_match.pop()
9575 self._running_portage = self._pkg(cpv, "installed",
9576 self._running_root, installed=True)
9578 def _poll(self, timeout=None):
9580 PollScheduler._poll(self, timeout=timeout)
9582 def _set_max_jobs(self, max_jobs):
9583 self._max_jobs = max_jobs
9584 self._task_queues.jobs.max_jobs = max_jobs
9586 def _background_mode(self):
9588 Check if background mode is enabled and adjust states as necessary.
9591 @returns: True if background mode is enabled, False otherwise.
9593 background = (self._max_jobs is True or \
9594 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9595 not bool(self._opts_no_background.intersection(self.myopts))
9598 interactive_tasks = self._get_interactive_tasks()
9599 if interactive_tasks:
9601 writemsg_level(">>> Sending package output to stdio due " + \
9602 "to interactive package(s):\n",
9603 level=logging.INFO, noiselevel=-1)
9605 for pkg in interactive_tasks:
9606 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9608 pkg_str += " for " + pkg.root
9611 writemsg_level("".join("%s\n" % (l,) for l in msg),
9612 level=logging.INFO, noiselevel=-1)
9613 if self._max_jobs is True or self._max_jobs > 1:
9614 self._set_max_jobs(1)
9615 writemsg_level(">>> Setting --jobs=1 due " + \
9616 "to the above interactive package(s)\n",
9617 level=logging.INFO, noiselevel=-1)
9619 self._status_display.quiet = \
9621 ("--quiet" in self.myopts and \
9622 "--verbose" not in self.myopts)
9624 self._logger.xterm_titles = \
9625 "notitles" not in self.settings.features and \
9626 self._status_display.quiet
9630 def _get_interactive_tasks(self):
9631 from portage import flatten
9632 from portage.dep import use_reduce, paren_reduce
9633 interactive_tasks = []
9634 for task in self._mergelist:
9635 if not (isinstance(task, Package) and \
9636 task.operation == "merge"):
9639 properties = flatten(use_reduce(paren_reduce(
9640 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9641 except portage.exception.InvalidDependString, e:
9642 show_invalid_depstring_notice(task,
9643 task.metadata["PROPERTIES"], str(e))
9644 raise self._unknown_internal_error()
9645 if "interactive" in properties:
9646 interactive_tasks.append(task)
9647 return interactive_tasks
9649 def _set_digraph(self, digraph):
9650 if "--nodeps" in self.myopts or \
9651 (self._max_jobs is not True and self._max_jobs < 2):
9653 self._digraph = None
9656 self._digraph = digraph
9657 self._prune_digraph()
9659 def _prune_digraph(self):
9661 Prune any root nodes that are irrelevant.
9664 graph = self._digraph
9665 completed_tasks = self._completed_tasks
9666 removed_nodes = set()
9668 for node in graph.root_nodes():
9669 if not isinstance(node, Package) or \
9670 (node.installed and node.operation == "nomerge") or \
9672 node in completed_tasks:
9673 removed_nodes.add(node)
9675 graph.difference_update(removed_nodes)
9676 if not removed_nodes:
9678 removed_nodes.clear()
9680 class _pkg_failure(portage.exception.PortageException):
9682 An instance of this class is raised by unmerge() when
9683 an uninstallation fails.
9686 def __init__(self, *pargs):
9687 portage.exception.PortageException.__init__(self, pargs)
9689 self.status = pargs[0]
9691 def _schedule_fetch(self, fetcher):
9693 Schedule a fetcher on the fetch queue, in order to
9694 serialize access to the fetch log.
9696 self._task_queues.fetch.addFront(fetcher)
9698 def _schedule_setup(self, setup_phase):
9700 Schedule a setup phase on the merge queue, in order to
9701 serialize unsandboxed access to the live filesystem.
9703 self._task_queues.merge.addFront(setup_phase)
9706 def _schedule_unpack(self, unpack_phase):
9708 Schedule an unpack phase on the unpack queue, in order
9709 to serialize $DISTDIR access for live ebuilds.
9711 self._task_queues.unpack.add(unpack_phase)
9713 def _find_blockers(self, new_pkg):
9715 Returns a callable which should be called only when
9716 the vdb lock has been acquired.
9719 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9722 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9723 if self._opts_ignore_blockers.intersection(self.myopts):
9726 # Call gc.collect() here to avoid heap overflow that
9727 # triggers 'Cannot allocate memory' errors (reported
9732 blocker_db = self._blocker_db[new_pkg.root]
9734 blocker_dblinks = []
9735 for blocking_pkg in blocker_db.findInstalledBlockers(
9736 new_pkg, acquire_lock=acquire_lock):
9737 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9739 if new_pkg.cpv == blocking_pkg.cpv:
9741 blocker_dblinks.append(portage.dblink(
9742 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9743 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9744 vartree=self.trees[blocking_pkg.root]["vartree"]))
9748 return blocker_dblinks
9750 def _dblink_pkg(self, pkg_dblink):
9751 cpv = pkg_dblink.mycpv
9752 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9753 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9754 installed = type_name == "installed"
9755 return self._pkg(cpv, type_name, root_config, installed=installed)
9757 def _append_to_log_path(self, log_path, msg):
9758 f = open(log_path, 'a')
9764 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9766 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9769 background = self._background
9771 if background and log_path is not None:
9772 log_file = open(log_path, 'a')
9777 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9779 if log_file is not None:
9782 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9783 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9784 background = self._background
9786 if log_path is None:
9787 if not (background and level < logging.WARN):
9788 portage.util.writemsg_level(msg,
9789 level=level, noiselevel=noiselevel)
9792 portage.util.writemsg_level(msg,
9793 level=level, noiselevel=noiselevel)
9794 self._append_to_log_path(log_path, msg)
9796 def _dblink_ebuild_phase(self,
9797 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9799 Using this callback for merge phases allows the scheduler
9800 to run while these phases execute asynchronously, and allows
9801 the scheduler control output handling.
9804 scheduler = self._sched_iface
9805 settings = pkg_dblink.settings
9806 pkg = self._dblink_pkg(pkg_dblink)
9807 background = self._background
9808 log_path = settings.get("PORTAGE_LOG_FILE")
9810 ebuild_phase = EbuildPhase(background=background,
9811 pkg=pkg, phase=phase, scheduler=scheduler,
9812 settings=settings, tree=pkg_dblink.treetype)
9813 ebuild_phase.start()
9816 return ebuild_phase.returncode
9818 def _check_manifests(self):
9819 # Verify all the manifests now so that the user is notified of failure
9820 # as soon as possible.
9821 if "strict" not in self.settings.features or \
9822 "--fetchonly" in self.myopts or \
9823 "--fetch-all-uri" in self.myopts:
9826 shown_verifying_msg = False
9828 for myroot, pkgsettings in self.pkgsettings.iteritems():
9829 quiet_config = portage.config(clone=pkgsettings)
9830 quiet_config["PORTAGE_QUIET"] = "1"
9831 quiet_config.backup_changes("PORTAGE_QUIET")
9832 quiet_settings[myroot] = quiet_config
9835 for x in self._mergelist:
9836 if not isinstance(x, Package) or \
9837 x.type_name != "ebuild":
9840 if not shown_verifying_msg:
9841 shown_verifying_msg = True
9842 self._status_msg("Verifying ebuild manifests")
9844 root_config = x.root_config
9845 portdb = root_config.trees["porttree"].dbapi
9846 quiet_config = quiet_settings[root_config.root]
9847 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9848 if not portage.digestcheck([], quiet_config, strict=True):
9853 def _add_prefetchers(self):
9855 if not self._parallel_fetch:
9858 if self._parallel_fetch:
9859 self._status_msg("Starting parallel fetch")
9861 prefetchers = self._prefetchers
9862 getbinpkg = "--getbinpkg" in self.myopts
9864 # In order to avoid "waiting for lock" messages
9865 # at the beginning, which annoy users, never
9866 # spawn a prefetcher for the first package.
9867 for pkg in self._mergelist[1:]:
9868 prefetcher = self._create_prefetcher(pkg)
9869 if prefetcher is not None:
9870 self._task_queues.fetch.add(prefetcher)
9871 prefetchers[pkg] = prefetcher
9873 def _create_prefetcher(self, pkg):
9875 @return: a prefetcher, or None if not applicable
9879 if not isinstance(pkg, Package):
9882 elif pkg.type_name == "ebuild":
9884 prefetcher = EbuildFetcher(background=True,
9885 config_pool=self._ConfigPool(pkg.root,
9886 self._allocate_config, self._deallocate_config),
9887 fetchonly=1, logfile=self._fetch_log,
9888 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9890 elif pkg.type_name == "binary" and \
9891 "--getbinpkg" in self.myopts and \
9892 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9894 prefetcher = BinpkgPrefetcher(background=True,
9895 pkg=pkg, scheduler=self._sched_iface)
9899 def _is_restart_scheduled(self):
9901 Check if the merge list contains a replacement
9902 for the current running instance, that will result
9903 in restart after merge.
9905 @returns: True if a restart is scheduled, False otherwise.
9907 if self._opts_no_restart.intersection(self.myopts):
9910 mergelist = self._mergelist
9912 for i, pkg in enumerate(mergelist):
9913 if self._is_restart_necessary(pkg) and \
9914 i != len(mergelist) - 1:
9919 def _is_restart_necessary(self, pkg):
9921 @return: True if merging the given package
9922 requires restart, False otherwise.
9925 # Figure out if we need a restart.
9926 if pkg.root == self._running_root.root and \
9927 portage.match_from_list(
9928 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9929 if self._running_portage:
9930 return pkg.cpv != self._running_portage.cpv
9934 def _restart_if_necessary(self, pkg):
9936 Use execv() to restart emerge. This happens
9937 if portage upgrades itself and there are
9938 remaining packages in the list.
9941 if self._opts_no_restart.intersection(self.myopts):
9944 if not self._is_restart_necessary(pkg):
9947 if pkg == self._mergelist[-1]:
9950 self._main_loop_cleanup()
9952 logger = self._logger
9953 pkg_count = self._pkg_count
9954 mtimedb = self._mtimedb
9955 bad_resume_opts = self._bad_resume_opts
9957 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9958 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9960 logger.log(" *** RESTARTING " + \
9961 "emerge via exec() after change of " + \
9964 mtimedb["resume"]["mergelist"].remove(list(pkg))
9966 portage.run_exitfuncs()
9967 mynewargv = [sys.argv[0], "--resume"]
9968 resume_opts = self.myopts.copy()
9969 # For automatic resume, we need to prevent
9970 # any of bad_resume_opts from leaking in
9971 # via EMERGE_DEFAULT_OPTS.
9972 resume_opts["--ignore-default-opts"] = True
9973 for myopt, myarg in resume_opts.iteritems():
9974 if myopt not in bad_resume_opts:
9976 mynewargv.append(myopt)
9978 mynewargv.append(myopt +"="+ str(myarg))
9979 # priority only needs to be adjusted on the first run
9980 os.environ["PORTAGE_NICENESS"] = "0"
9981 os.execv(mynewargv[0], mynewargv)
9985 if "--resume" in self.myopts:
9987 portage.writemsg_stdout(
9988 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9989 self._logger.log(" *** Resuming merge...")
9991 self._save_resume_list()
9994 self._background = self._background_mode()
9995 except self._unknown_internal_error:
9998 for root in self.trees:
9999 root_config = self.trees[root]["root_config"]
10001 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10002 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10003 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10004 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10005 if not tmpdir or not os.path.isdir(tmpdir):
10006 msg = "The directory specified in your " + \
10007 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10008 "does not exist. Please create this " + \
10009 "directory or correct your PORTAGE_TMPDIR setting."
10010 msg = textwrap.wrap(msg, 70)
10011 out = portage.output.EOutput()
10016 if self._background:
10017 root_config.settings.unlock()
10018 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10019 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10020 root_config.settings.lock()
10022 self.pkgsettings[root] = portage.config(
10023 clone=root_config.settings)
10025 rval = self._check_manifests()
10026 if rval != os.EX_OK:
10029 keep_going = "--keep-going" in self.myopts
10030 fetchonly = self._build_opts.fetchonly
10031 mtimedb = self._mtimedb
10032 failed_pkgs = self._failed_pkgs
10035 rval = self._merge()
10036 if rval == os.EX_OK or fetchonly or not keep_going:
10038 if "resume" not in mtimedb:
10040 mergelist = self._mtimedb["resume"].get("mergelist")
10044 if not failed_pkgs:
10047 for failed_pkg in failed_pkgs:
10048 mergelist.remove(list(failed_pkg.pkg))
10050 self._failed_pkgs_all.extend(failed_pkgs)
10056 if not self._calc_resume_list():
10059 clear_caches(self.trees)
10060 if not self._mergelist:
10063 self._save_resume_list()
10064 self._pkg_count.curval = 0
10065 self._pkg_count.maxval = len([x for x in self._mergelist \
10066 if isinstance(x, Package) and x.operation == "merge"])
10067 self._status_display.maxval = self._pkg_count.maxval
10069 self._logger.log(" *** Finished. Cleaning up...")
10072 self._failed_pkgs_all.extend(failed_pkgs)
10075 background = self._background
10076 failure_log_shown = False
10077 if background and len(self._failed_pkgs_all) == 1:
10078 # If only one package failed then just show it's
10079 # whole log for easy viewing.
10080 failed_pkg = self._failed_pkgs_all[-1]
10081 build_dir = failed_pkg.build_dir
10084 log_paths = [failed_pkg.build_log]
10086 log_path = self._locate_failure_log(failed_pkg)
10087 if log_path is not None:
10089 log_file = open(log_path, 'rb')
10093 if log_file is not None:
10095 for line in log_file:
10096 writemsg_level(line, noiselevel=-1)
10099 failure_log_shown = True
10101 # Dump mod_echo output now since it tends to flood the terminal.
10102 # This allows us to avoid having more important output, generated
10103 # later, from being swept away by the mod_echo output.
10104 mod_echo_output = _flush_elog_mod_echo()
10106 if background and not failure_log_shown and \
10107 self._failed_pkgs_all and \
10108 self._failed_pkgs_die_msgs and \
10109 not mod_echo_output:
10111 printer = portage.output.EOutput()
10112 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10114 if mysettings["ROOT"] != "/":
10115 root_msg = " merged to %s" % mysettings["ROOT"]
10117 printer.einfo("Error messages for package %s%s:" % \
10118 (colorize("INFORM", key), root_msg))
10120 for phase in portage.const.EBUILD_PHASES:
10121 if phase not in logentries:
10123 for msgtype, msgcontent in logentries[phase]:
10124 if isinstance(msgcontent, basestring):
10125 msgcontent = [msgcontent]
10126 for line in msgcontent:
10127 printer.eerror(line.strip("\n"))
10129 if self._post_mod_echo_msgs:
10130 for msg in self._post_mod_echo_msgs:
10133 if len(self._failed_pkgs_all) > 1:
10134 msg = "The following packages have " + \
10135 "failed to build or install:"
10136 prefix = bad(" * ")
10137 writemsg(prefix + "\n", noiselevel=-1)
10138 from textwrap import wrap
10139 for line in wrap(msg, 72):
10140 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10141 writemsg(prefix + "\n", noiselevel=-1)
10142 for failed_pkg in self._failed_pkgs_all:
10143 writemsg("%s\t%s\n" % (prefix,
10144 colorize("INFORM", str(failed_pkg.pkg))),
10146 writemsg(prefix + "\n", noiselevel=-1)
10150 def _elog_listener(self, mysettings, key, logentries, fulltext):
10151 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10153 self._failed_pkgs_die_msgs.append(
10154 (mysettings, key, errors))
10156 def _locate_failure_log(self, failed_pkg):
10158 build_dir = failed_pkg.build_dir
10161 log_paths = [failed_pkg.build_log]
10163 for log_path in log_paths:
10168 log_size = os.stat(log_path).st_size
10179 def _add_packages(self):
10180 pkg_queue = self._pkg_queue
10181 for pkg in self._mergelist:
10182 if isinstance(pkg, Package):
10183 pkg_queue.append(pkg)
10184 elif isinstance(pkg, Blocker):
10187 def _merge_exit(self, merge):
10188 self._do_merge_exit(merge)
10189 self._deallocate_config(merge.merge.settings)
10190 if merge.returncode == os.EX_OK and \
10191 not merge.merge.pkg.installed:
10192 self._status_display.curval += 1
10193 self._status_display.merges = len(self._task_queues.merge)
10196 def _do_merge_exit(self, merge):
10197 pkg = merge.merge.pkg
10198 if merge.returncode != os.EX_OK:
10199 settings = merge.merge.settings
10200 build_dir = settings.get("PORTAGE_BUILDDIR")
10201 build_log = settings.get("PORTAGE_LOG_FILE")
10203 self._failed_pkgs.append(self._failed_pkg(
10204 build_dir=build_dir, build_log=build_log,
10206 returncode=merge.returncode))
10207 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10209 self._status_display.failed = len(self._failed_pkgs)
10212 self._task_complete(pkg)
10213 pkg_to_replace = merge.merge.pkg_to_replace
10214 if pkg_to_replace is not None:
10215 # When a package is replaced, mark it's uninstall
10216 # task complete (if any).
10217 uninst_hash_key = \
10218 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10219 self._task_complete(uninst_hash_key)
10224 self._restart_if_necessary(pkg)
10226 # Call mtimedb.commit() after each merge so that
10227 # --resume still works after being interrupted
10228 # by reboot, sigkill or similar.
10229 mtimedb = self._mtimedb
10230 mtimedb["resume"]["mergelist"].remove(list(pkg))
10231 if not mtimedb["resume"]["mergelist"]:
10232 del mtimedb["resume"]
10235 def _build_exit(self, build):
10236 if build.returncode == os.EX_OK:
10238 merge = PackageMerge(merge=build)
10239 merge.addExitListener(self._merge_exit)
10240 self._task_queues.merge.add(merge)
10241 self._status_display.merges = len(self._task_queues.merge)
10243 settings = build.settings
10244 build_dir = settings.get("PORTAGE_BUILDDIR")
10245 build_log = settings.get("PORTAGE_LOG_FILE")
10247 self._failed_pkgs.append(self._failed_pkg(
10248 build_dir=build_dir, build_log=build_log,
10250 returncode=build.returncode))
10251 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10253 self._status_display.failed = len(self._failed_pkgs)
10254 self._deallocate_config(build.settings)
10256 self._status_display.running = self._jobs
10259 def _extract_exit(self, build):
10260 self._build_exit(build)
10262 def _task_complete(self, pkg):
10263 self._completed_tasks.add(pkg)
10264 self._choose_pkg_return_early = False
10268 self._add_prefetchers()
10269 self._add_packages()
10270 pkg_queue = self._pkg_queue
10271 failed_pkgs = self._failed_pkgs
10272 portage.locks._quiet = self._background
10273 portage.elog._emerge_elog_listener = self._elog_listener
10279 self._main_loop_cleanup()
10280 portage.locks._quiet = False
10281 portage.elog._emerge_elog_listener = None
10283 rval = failed_pkgs[-1].returncode
10287 def _main_loop_cleanup(self):
10288 del self._pkg_queue[:]
10289 self._completed_tasks.clear()
10290 self._choose_pkg_return_early = False
10291 self._status_display.reset()
10292 self._digraph = None
10293 self._task_queues.fetch.clear()
10295 def _choose_pkg(self):
10297 Choose a task that has all it's dependencies satisfied.
10300 if self._choose_pkg_return_early:
10303 if self._digraph is None:
10304 if (self._jobs or self._task_queues.merge) and \
10305 not ("--nodeps" in self.myopts and \
10306 (self._max_jobs is True or self._max_jobs > 1)):
10307 self._choose_pkg_return_early = True
10309 return self._pkg_queue.pop(0)
10311 if not (self._jobs or self._task_queues.merge):
10312 return self._pkg_queue.pop(0)
10314 self._prune_digraph()
10317 later = set(self._pkg_queue)
10318 for pkg in self._pkg_queue:
10320 if not self._dependent_on_scheduled_merges(pkg, later):
10324 if chosen_pkg is not None:
10325 self._pkg_queue.remove(chosen_pkg)
10327 if chosen_pkg is None:
10328 # There's no point in searching for a package to
10329 # choose until at least one of the existing jobs
10331 self._choose_pkg_return_early = True
10335 def _dependent_on_scheduled_merges(self, pkg, later):
10337 Traverse the subgraph of the given packages deep dependencies
10338 to see if it contains any scheduled merges.
10339 @param pkg: a package to check dependencies for
10341 @param later: packages for which dependence should be ignored
10342 since they will be merged later than pkg anyway and therefore
10343 delaying the merge of pkg will not result in a more optimal
10347 @returns: True if the package is dependent, False otherwise.
10350 graph = self._digraph
10351 completed_tasks = self._completed_tasks
10354 traversed_nodes = set([pkg])
10355 direct_deps = graph.child_nodes(pkg)
10356 node_stack = direct_deps
10357 direct_deps = frozenset(direct_deps)
10359 node = node_stack.pop()
10360 if node in traversed_nodes:
10362 traversed_nodes.add(node)
10363 if not ((node.installed and node.operation == "nomerge") or \
10364 (node.operation == "uninstall" and \
10365 node not in direct_deps) or \
10366 node in completed_tasks or \
10370 node_stack.extend(graph.child_nodes(node))
10374 def _allocate_config(self, root):
10376 Allocate a unique config instance for a task in order
10377 to prevent interference between parallel tasks.
10379 if self._config_pool[root]:
10380 temp_settings = self._config_pool[root].pop()
10382 temp_settings = portage.config(clone=self.pkgsettings[root])
10383 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10384 # performance reasons, call it here to make sure all settings from the
10385 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10386 temp_settings.reload()
10387 temp_settings.reset()
10388 return temp_settings
10390 def _deallocate_config(self, settings):
10391 self._config_pool[settings["ROOT"]].append(settings)
10393 def _main_loop(self):
10395 # Only allow 1 job max if a restart is scheduled
10396 # due to portage update.
10397 if self._is_restart_scheduled() or \
10398 self._opts_no_background.intersection(self.myopts):
10399 self._set_max_jobs(1)
10401 merge_queue = self._task_queues.merge
10403 while self._schedule():
10404 if self._poll_event_handlers:
10409 if not (self._jobs or merge_queue):
10411 if self._poll_event_handlers:
10414 def _keep_scheduling(self):
10415 return bool(self._pkg_queue and \
10416 not (self._failed_pkgs and not self._build_opts.fetchonly))
10418 def _schedule_tasks(self):
10419 self._schedule_tasks_imp()
10420 self._status_display.display()
10423 for q in self._task_queues.values():
10427 # Cancel prefetchers if they're the only reason
10428 # the main poll loop is still running.
10429 if self._failed_pkgs and not self._build_opts.fetchonly and \
10430 not (self._jobs or self._task_queues.merge) and \
10431 self._task_queues.fetch:
10432 self._task_queues.fetch.clear()
10436 self._schedule_tasks_imp()
10437 self._status_display.display()
10439 return self._keep_scheduling()
10441 def _job_delay(self):
10444 @returns: True if job scheduling should be delayed, False otherwise.
10447 if self._jobs and self._max_load is not None:
10449 current_time = time.time()
10451 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10452 if delay > self._job_delay_max:
10453 delay = self._job_delay_max
10454 if (current_time - self._previous_job_start_time) < delay:
10459 def _schedule_tasks_imp(self):
10462 @returns: True if state changed, False otherwise.
10469 if not self._keep_scheduling():
10470 return bool(state_change)
10472 if self._choose_pkg_return_early or \
10473 not self._can_add_job() or \
10475 return bool(state_change)
10477 pkg = self._choose_pkg()
10479 return bool(state_change)
10483 if not pkg.installed:
10484 self._pkg_count.curval += 1
10486 task = self._task(pkg)
10489 merge = PackageMerge(merge=task)
10490 merge.addExitListener(self._merge_exit)
10491 self._task_queues.merge.add(merge)
10495 self._previous_job_start_time = time.time()
10496 self._status_display.running = self._jobs
10497 task.addExitListener(self._extract_exit)
10498 self._task_queues.jobs.add(task)
10502 self._previous_job_start_time = time.time()
10503 self._status_display.running = self._jobs
10504 task.addExitListener(self._build_exit)
10505 self._task_queues.jobs.add(task)
10507 return bool(state_change)
10509 def _task(self, pkg):
10511 pkg_to_replace = None
10512 if pkg.operation != "uninstall":
10513 vardb = pkg.root_config.trees["vartree"].dbapi
10514 previous_cpv = vardb.match(pkg.slot_atom)
10516 previous_cpv = previous_cpv.pop()
10517 pkg_to_replace = self._pkg(previous_cpv,
10518 "installed", pkg.root_config, installed=True)
10520 task = MergeListItem(args_set=self._args_set,
10521 background=self._background, binpkg_opts=self._binpkg_opts,
10522 build_opts=self._build_opts,
10523 config_pool=self._ConfigPool(pkg.root,
10524 self._allocate_config, self._deallocate_config),
10525 emerge_opts=self.myopts,
10526 find_blockers=self._find_blockers(pkg), logger=self._logger,
10527 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10528 pkg_to_replace=pkg_to_replace,
10529 prefetcher=self._prefetchers.get(pkg),
10530 scheduler=self._sched_iface,
10531 settings=self._allocate_config(pkg.root),
10532 statusMessage=self._status_msg,
10533 world_atom=self._world_atom)
10537 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10538 pkg = failed_pkg.pkg
10539 msg = "%s to %s %s" % \
10540 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10541 if pkg.root != "/":
10542 msg += " %s %s" % (preposition, pkg.root)
10544 log_path = self._locate_failure_log(failed_pkg)
10545 if log_path is not None:
10546 msg += ", Log file:"
10547 self._status_msg(msg)
10549 if log_path is not None:
10550 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10552 def _status_msg(self, msg):
10554 Display a brief status message (no newlines) in the status display.
10555 This is called by tasks to provide feedback to the user. This
10556 delegates the resposibility of generating \r and \n control characters,
10557 to guarantee that lines are created or erased when necessary and
10561 @param msg: a brief status message (no newlines allowed)
10563 if not self._background:
10564 writemsg_level("\n")
10565 self._status_display.displayMessage(msg)
10567 def _save_resume_list(self):
10569 Do this before verifying the ebuild Manifests since it might
10570 be possible for the user to use --resume --skipfirst get past
10571 a non-essential package with a broken digest.
10573 mtimedb = self._mtimedb
10574 mtimedb["resume"]["mergelist"] = [list(x) \
10575 for x in self._mergelist \
10576 if isinstance(x, Package) and x.operation == "merge"]
10580 def _calc_resume_list(self):
10582 Use the current resume list to calculate a new one,
10583 dropping any packages with unsatisfied deps.
10585 @returns: True if successful, False otherwise.
10587 print colorize("GOOD", "*** Resuming merge...")
10589 if self._show_list():
10590 if "--tree" in self.myopts:
10591 portage.writemsg_stdout("\n" + \
10592 darkgreen("These are the packages that " + \
10593 "would be merged, in reverse order:\n\n"))
10596 portage.writemsg_stdout("\n" + \
10597 darkgreen("These are the packages that " + \
10598 "would be merged, in order:\n\n"))
10600 show_spinner = "--quiet" not in self.myopts and \
10601 "--nodeps" not in self.myopts
10604 print "Calculating dependencies ",
10606 myparams = create_depgraph_params(self.myopts, None)
10610 success, mydepgraph, dropped_tasks = resume_depgraph(
10611 self.settings, self.trees, self._mtimedb, self.myopts,
10612 myparams, self._spinner, skip_unsatisfied=True)
10613 except depgraph.UnsatisfiedResumeDep, e:
10614 mydepgraph = e.depgraph
10615 dropped_tasks = set()
10618 print "\b\b... done!"
10621 def unsatisfied_resume_dep_msg():
10622 mydepgraph.display_problems()
10623 out = portage.output.EOutput()
10624 out.eerror("One or more packages are either masked or " + \
10625 "have missing dependencies:")
10628 show_parents = set()
10629 for dep in e.value:
10630 if dep.parent in show_parents:
10632 show_parents.add(dep.parent)
10633 if dep.atom is None:
10634 out.eerror(indent + "Masked package:")
10635 out.eerror(2 * indent + str(dep.parent))
10638 out.eerror(indent + str(dep.atom) + " pulled in by:")
10639 out.eerror(2 * indent + str(dep.parent))
10641 msg = "The resume list contains packages " + \
10642 "that are either masked or have " + \
10643 "unsatisfied dependencies. " + \
10644 "Please restart/continue " + \
10645 "the operation manually, or use --skipfirst " + \
10646 "to skip the first package in the list and " + \
10647 "any other packages that may be " + \
10648 "masked or have missing dependencies."
10649 for line in textwrap.wrap(msg, 72):
10651 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10654 if success and self._show_list():
10655 mylist = mydepgraph.altlist()
10657 if "--tree" in self.myopts:
10659 mydepgraph.display(mylist, favorites=self._favorites)
10662 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10664 mydepgraph.display_problems()
10666 mylist = mydepgraph.altlist()
10667 mydepgraph.break_refs(mylist)
10668 mydepgraph.break_refs(dropped_tasks)
10669 self._mergelist = mylist
10670 self._set_digraph(mydepgraph.schedulerGraph())
10673 for task in dropped_tasks:
10674 if not (isinstance(task, Package) and task.operation == "merge"):
10677 msg = "emerge --keep-going:" + \
10679 if pkg.root != "/":
10680 msg += " for %s" % (pkg.root,)
10681 msg += " dropped due to unsatisfied dependency."
10682 for line in textwrap.wrap(msg, msg_width):
10683 eerror(line, phase="other", key=pkg.cpv)
10684 settings = self.pkgsettings[pkg.root]
10685 # Ensure that log collection from $T is disabled inside
10686 # elog_process(), since any logs that might exist are
10688 settings.pop("T", None)
10689 portage.elog.elog_process(pkg.cpv, settings)
10690 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10694 def _show_list(self):
10695 myopts = self.myopts
10696 if "--quiet" not in myopts and \
10697 ("--ask" in myopts or "--tree" in myopts or \
10698 "--verbose" in myopts):
10702 def _world_atom(self, pkg):
10704 Add the package to the world file, but only if
10705 it's supposed to be added. Otherwise, do nothing.
10708 if set(("--buildpkgonly", "--fetchonly",
10710 "--oneshot", "--onlydeps",
10711 "--pretend")).intersection(self.myopts):
10714 if pkg.root != self.target_root:
10717 args_set = self._args_set
10718 if not args_set.findAtomForPackage(pkg):
10721 logger = self._logger
10722 pkg_count = self._pkg_count
10723 root_config = pkg.root_config
10724 world_set = root_config.sets["world"]
10725 world_locked = False
10726 if hasattr(world_set, "lock"):
10728 world_locked = True
10731 if hasattr(world_set, "load"):
10732 world_set.load() # maybe it's changed on disk
10734 atom = create_world_atom(pkg, args_set, root_config)
10736 if hasattr(world_set, "add"):
10737 self._status_msg(('Recording %s in "world" ' + \
10738 'favorites file...') % atom)
10739 logger.log(" === (%s of %s) Updating world file (%s)" % \
10740 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10741 world_set.add(atom)
10743 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10744 (atom,), level=logging.WARN, noiselevel=-1)
10749 def _pkg(self, cpv, type_name, root_config, installed=False):
10751 Get a package instance from the cache, or create a new
10752 one if necessary. Raises KeyError from aux_get if it
10753 failures for some reason (package does not exist or is
10756 operation = "merge"
10758 operation = "nomerge"
10760 if self._digraph is not None:
10761 # Reuse existing instance when available.
10762 pkg = self._digraph.get(
10763 (type_name, root_config.root, cpv, operation))
10764 if pkg is not None:
10767 tree_type = depgraph.pkg_tree_map[type_name]
10768 db = root_config.trees[tree_type].dbapi
10769 db_keys = list(self.trees[root_config.root][
10770 tree_type].dbapi._aux_cache_keys)
10771 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10772 pkg = Package(cpv=cpv, metadata=metadata,
10773 root_config=root_config, installed=installed)
10774 if type_name == "ebuild":
10775 settings = self.pkgsettings[root_config.root]
10776 settings.setcpv(pkg)
10777 pkg.metadata["USE"] = settings["PORTAGE_USE"]
10781 class MetadataRegen(PollScheduler):
10783 def __init__(self, portdb, max_jobs=None, max_load=None):
10784 PollScheduler.__init__(self)
10785 self._portdb = portdb
10787 if max_jobs is None:
10790 self._max_jobs = max_jobs
10791 self._max_load = max_load
10792 self._sched_iface = self._sched_iface_class(
10793 register=self._register,
10794 schedule=self._schedule_wait,
10795 unregister=self._unregister)
10797 self._valid_pkgs = set()
10798 self._process_iter = self._iter_metadata_processes()
10800 def _iter_metadata_processes(self):
10801 portdb = self._portdb
10802 valid_pkgs = self._valid_pkgs
10803 every_cp = portdb.cp_all()
10804 every_cp.sort(reverse=True)
10807 cp = every_cp.pop()
10808 portage.writemsg_stdout("Processing %s\n" % cp)
10809 cpv_list = portdb.cp_list(cp)
10810 for cpv in cpv_list:
10811 valid_pkgs.add(cpv)
10812 ebuild_path, repo_path = portdb.findname2(cpv)
10813 metadata_process = portdb._metadata_process(
10814 cpv, ebuild_path, repo_path)
10815 if metadata_process is None:
10817 yield metadata_process
10821 portdb = self._portdb
10822 from portage.cache.cache_errors import CacheError
10825 for mytree in portdb.porttrees:
10827 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10828 except CacheError, e:
10829 portage.writemsg("Error listing cache entries for " + \
10830 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10835 while self._schedule():
10842 for y in self._valid_pkgs:
10843 for mytree in portdb.porttrees:
10844 if portdb.findname2(y, mytree=mytree)[0]:
10845 dead_nodes[mytree].discard(y)
10847 for mytree, nodes in dead_nodes.iteritems():
10848 auxdb = portdb.auxdb[mytree]
10852 except (KeyError, CacheError):
10855 def _schedule_tasks(self):
10858 @returns: True if there may be remaining tasks to schedule,
10861 while self._can_add_job():
10863 metadata_process = self._process_iter.next()
10864 except StopIteration:
10868 metadata_process.scheduler = self._sched_iface
10869 metadata_process.addExitListener(self._metadata_exit)
10870 metadata_process.start()
10873 def _metadata_exit(self, metadata_process):
10875 if metadata_process.returncode != os.EX_OK:
10876 self._valid_pkgs.discard(metadata_process.cpv)
10877 portage.writemsg("Error processing %s, continuing...\n" % \
10878 (metadata_process.cpv,))
10881 class UninstallFailure(portage.exception.PortageException):
10883 An instance of this class is raised by unmerge() when
10884 an uninstallation fails.
10887 def __init__(self, *pargs):
10888 portage.exception.PortageException.__init__(self, pargs)
10890 self.status = pargs[0]
10892 def unmerge(root_config, myopts, unmerge_action,
10893 unmerge_files, ldpath_mtimes, autoclean=0,
10894 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10895 scheduler=None, writemsg_level=portage.util.writemsg_level):
10897 quiet = "--quiet" in myopts
10898 settings = root_config.settings
10899 sets = root_config.sets
10900 vartree = root_config.trees["vartree"]
10901 candidate_catpkgs=[]
10903 xterm_titles = "notitles" not in settings.features
10904 out = portage.output.EOutput()
10906 db_keys = list(vartree.dbapi._aux_cache_keys)
10909 pkg = pkg_cache.get(cpv)
10911 pkg = Package(cpv=cpv, installed=True,
10912 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10913 root_config=root_config,
10914 type_name="installed")
10915 pkg_cache[cpv] = pkg
10918 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10920 # At least the parent needs to exist for the lock file.
10921 portage.util.ensure_dirs(vdb_path)
10922 except portage.exception.PortageException:
10926 if os.access(vdb_path, os.W_OK):
10927 vdb_lock = portage.locks.lockdir(vdb_path)
10928 realsyslist = sets["system"].getAtoms()
10930 for x in realsyslist:
10931 mycp = portage.dep_getkey(x)
10932 if mycp in settings.getvirtuals():
10934 for provider in settings.getvirtuals()[mycp]:
10935 if vartree.dbapi.match(provider):
10936 providers.append(provider)
10937 if len(providers) == 1:
10938 syslist.extend(providers)
10940 syslist.append(mycp)
10942 mysettings = portage.config(clone=settings)
10944 if not unmerge_files:
10945 if unmerge_action == "unmerge":
10947 print bold("emerge unmerge") + " can only be used with specific package names"
10953 localtree = vartree
10954 # process all arguments and add all
10955 # valid db entries to candidate_catpkgs
10957 if not unmerge_files:
10958 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10960 #we've got command-line arguments
10961 if not unmerge_files:
10962 print "\nNo packages to unmerge have been provided.\n"
10964 for x in unmerge_files:
10965 arg_parts = x.split('/')
10966 if x[0] not in [".","/"] and \
10967 arg_parts[-1][-7:] != ".ebuild":
10968 #possible cat/pkg or dep; treat as such
10969 candidate_catpkgs.append(x)
10970 elif unmerge_action in ["prune","clean"]:
10971 print "\n!!! Prune and clean do not accept individual" + \
10972 " ebuilds as arguments;\n skipping.\n"
10975 # it appears that the user is specifying an installed
10976 # ebuild and we're in "unmerge" mode, so it's ok.
10977 if not os.path.exists(x):
10978 print "\n!!! The path '"+x+"' doesn't exist.\n"
10981 absx = os.path.abspath(x)
10982 sp_absx = absx.split("/")
10983 if sp_absx[-1][-7:] == ".ebuild":
10985 absx = "/".join(sp_absx)
10987 sp_absx_len = len(sp_absx)
10989 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10990 vdb_len = len(vdb_path)
10992 sp_vdb = vdb_path.split("/")
10993 sp_vdb_len = len(sp_vdb)
10995 if not os.path.exists(absx+"/CONTENTS"):
10996 print "!!! Not a valid db dir: "+str(absx)
10999 if sp_absx_len <= sp_vdb_len:
11000 # The Path is shorter... so it can't be inside the vdb.
11003 print "\n!!!",x,"cannot be inside "+ \
11004 vdb_path+"; aborting.\n"
11007 for idx in range(0,sp_vdb_len):
11008 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11011 print "\n!!!", x, "is not inside "+\
11012 vdb_path+"; aborting.\n"
11015 print "="+"/".join(sp_absx[sp_vdb_len:])
11016 candidate_catpkgs.append(
11017 "="+"/".join(sp_absx[sp_vdb_len:]))
11020 if (not "--quiet" in myopts):
11022 if settings["ROOT"] != "/":
11023 writemsg_level(darkgreen(newline+ \
11024 ">>> Using system located in ROOT tree %s\n" % \
11027 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11028 not ("--quiet" in myopts):
11029 writemsg_level(darkgreen(newline+\
11030 ">>> These are the packages that would be unmerged:\n"))
11032 # Preservation of order is required for --depclean and --prune so
11033 # that dependencies are respected. Use all_selected to eliminate
11034 # duplicate packages since the same package may be selected by
11037 all_selected = set()
11038 for x in candidate_catpkgs:
11039 # cycle through all our candidate deps and determine
11040 # what will and will not get unmerged
11042 mymatch = vartree.dbapi.match(x)
11043 except portage.exception.AmbiguousPackageName, errpkgs:
11044 print "\n\n!!! The short ebuild name \"" + \
11045 x + "\" is ambiguous. Please specify"
11046 print "!!! one of the following fully-qualified " + \
11047 "ebuild names instead:\n"
11048 for i in errpkgs[0]:
11049 print " " + green(i)
11053 if not mymatch and x[0] not in "<>=~":
11054 mymatch = localtree.dep_match(x)
11056 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11057 (x, unmerge_action), noiselevel=-1)
11061 {"protected": set(), "selected": set(), "omitted": set()})
11062 mykey = len(pkgmap) - 1
11063 if unmerge_action=="unmerge":
11065 if y not in all_selected:
11066 pkgmap[mykey]["selected"].add(y)
11067 all_selected.add(y)
11068 elif unmerge_action == "prune":
11069 if len(mymatch) == 1:
11071 best_version = mymatch[0]
11072 best_slot = vartree.getslot(best_version)
11073 best_counter = vartree.dbapi.cpv_counter(best_version)
11074 for mypkg in mymatch[1:]:
11075 myslot = vartree.getslot(mypkg)
11076 mycounter = vartree.dbapi.cpv_counter(mypkg)
11077 if (myslot == best_slot and mycounter > best_counter) or \
11078 mypkg == portage.best([mypkg, best_version]):
11079 if myslot == best_slot:
11080 if mycounter < best_counter:
11081 # On slot collision, keep the one with the
11082 # highest counter since it is the most
11083 # recently installed.
11085 best_version = mypkg
11087 best_counter = mycounter
11088 pkgmap[mykey]["protected"].add(best_version)
11089 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11090 if mypkg != best_version and mypkg not in all_selected)
11091 all_selected.update(pkgmap[mykey]["selected"])
11093 # unmerge_action == "clean"
11095 for mypkg in mymatch:
11096 if unmerge_action == "clean":
11097 myslot = localtree.getslot(mypkg)
11099 # since we're pruning, we don't care about slots
11100 # and put all the pkgs in together
11102 if myslot not in slotmap:
11103 slotmap[myslot] = {}
11104 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11106 for mypkg in vartree.dbapi.cp_list(
11107 portage.dep_getkey(mymatch[0])):
11108 myslot = vartree.getslot(mypkg)
11109 if myslot not in slotmap:
11110 slotmap[myslot] = {}
11111 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11113 for myslot in slotmap:
11114 counterkeys = slotmap[myslot].keys()
11115 if not counterkeys:
11118 pkgmap[mykey]["protected"].add(
11119 slotmap[myslot][counterkeys[-1]])
11120 del counterkeys[-1]
11122 for counter in counterkeys[:]:
11123 mypkg = slotmap[myslot][counter]
11124 if mypkg not in mymatch:
11125 counterkeys.remove(counter)
11126 pkgmap[mykey]["protected"].add(
11127 slotmap[myslot][counter])
11129 #be pretty and get them in order of merge:
11130 for ckey in counterkeys:
11131 mypkg = slotmap[myslot][ckey]
11132 if mypkg not in all_selected:
11133 pkgmap[mykey]["selected"].add(mypkg)
11134 all_selected.add(mypkg)
11135 # ok, now the last-merged package
11136 # is protected, and the rest are selected
11137 numselected = len(all_selected)
11138 if global_unmerge and not numselected:
11139 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11142 if not numselected:
11143 portage.writemsg_stdout(
11144 "\n>>> No packages selected for removal by " + \
11145 unmerge_action + "\n")
11149 vartree.dbapi.flush_cache()
11150 portage.locks.unlockdir(vdb_lock)
11152 from portage.sets.base import EditablePackageSet
11154 # generate a list of package sets that are directly or indirectly listed in "world",
11155 # as there is no persistent list of "installed" sets
11156 installed_sets = ["world"]
11161 pos = len(installed_sets)
11162 for s in installed_sets[pos - 1:]:
11165 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11168 installed_sets += candidates
11169 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11172 # we don't want to unmerge packages that are still listed in user-editable package sets
11173 # listed in "world" as they would be remerged on the next update of "world" or the
11174 # relevant package sets.
11175 unknown_sets = set()
11176 for cp in xrange(len(pkgmap)):
11177 for cpv in pkgmap[cp]["selected"].copy():
11181 # It could have been uninstalled
11182 # by a concurrent process.
11185 if unmerge_action != "clean" and \
11186 root_config.root == "/" and \
11187 portage.match_from_list(
11188 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11189 msg = ("Not unmerging package %s since there is no valid " + \
11190 "reason for portage to unmerge itself.") % (pkg.cpv,)
11191 for line in textwrap.wrap(msg, 75):
11193 # adjust pkgmap so the display output is correct
11194 pkgmap[cp]["selected"].remove(cpv)
11195 all_selected.remove(cpv)
11196 pkgmap[cp]["protected"].add(cpv)
11200 for s in installed_sets:
11201 # skip sets that the user requested to unmerge, and skip world
11202 # unless we're unmerging a package set (as the package would be
11203 # removed from "world" later on)
11204 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11208 if s in unknown_sets:
11210 unknown_sets.add(s)
11211 out = portage.output.EOutput()
11212 out.eerror(("Unknown set '@%s' in " + \
11213 "%svar/lib/portage/world_sets") % \
11214 (s, root_config.root))
11217 # only check instances of EditablePackageSet as other classes are generally used for
11218 # special purposes and can be ignored here (and are usually generated dynamically, so the
11219 # user can't do much about them anyway)
11220 if isinstance(sets[s], EditablePackageSet):
11222 # This is derived from a snippet of code in the
11223 # depgraph._iter_atoms_for_pkg() method.
11224 for atom in sets[s].iterAtomsForPackage(pkg):
11225 inst_matches = vartree.dbapi.match(atom)
11226 inst_matches.reverse() # descending order
11228 for inst_cpv in inst_matches:
11230 inst_pkg = _pkg(inst_cpv)
11232 # It could have been uninstalled
11233 # by a concurrent process.
11236 if inst_pkg.cp != atom.cp:
11238 if pkg >= inst_pkg:
11239 # This is descending order, and we're not
11240 # interested in any versions <= pkg given.
11242 if pkg.slot_atom != inst_pkg.slot_atom:
11243 higher_slot = inst_pkg
11245 if higher_slot is None:
11249 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11250 #print colorize("WARN", "but still listed in the following package sets:")
11251 #print " %s\n" % ", ".join(parents)
11252 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11253 print colorize("WARN", "still referenced by the following package sets:")
11254 print " %s\n" % ", ".join(parents)
11255 # adjust pkgmap so the display output is correct
11256 pkgmap[cp]["selected"].remove(cpv)
11257 all_selected.remove(cpv)
11258 pkgmap[cp]["protected"].add(cpv)
11262 numselected = len(all_selected)
11263 if not numselected:
11265 "\n>>> No packages selected for removal by " + \
11266 unmerge_action + "\n")
11269 # Unmerge order only matters in some cases
11273 selected = d["selected"]
11276 cp = portage.cpv_getkey(iter(selected).next())
11277 cp_dict = unordered.get(cp)
11278 if cp_dict is None:
11280 unordered[cp] = cp_dict
11283 for k, v in d.iteritems():
11284 cp_dict[k].update(v)
11285 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11287 for x in xrange(len(pkgmap)):
11288 selected = pkgmap[x]["selected"]
11291 for mytype, mylist in pkgmap[x].iteritems():
11292 if mytype == "selected":
11294 mylist.difference_update(all_selected)
11295 cp = portage.cpv_getkey(iter(selected).next())
11296 for y in localtree.dep_match(cp):
11297 if y not in pkgmap[x]["omitted"] and \
11298 y not in pkgmap[x]["selected"] and \
11299 y not in pkgmap[x]["protected"] and \
11300 y not in all_selected:
11301 pkgmap[x]["omitted"].add(y)
11302 if global_unmerge and not pkgmap[x]["selected"]:
11303 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11305 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11306 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11307 "'%s' is part of your system profile.\n" % cp),
11308 level=logging.WARNING, noiselevel=-1)
11309 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11310 "be damaging to your system.\n\n"),
11311 level=logging.WARNING, noiselevel=-1)
11312 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11313 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11314 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11316 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11318 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11319 for mytype in ["selected","protected","omitted"]:
11321 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11322 if pkgmap[x][mytype]:
11323 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11324 sorted_pkgs.sort(portage.pkgcmp)
11325 for pn, ver, rev in sorted_pkgs:
11329 myversion = ver + "-" + rev
11330 if mytype == "selected":
11332 colorize("UNMERGE_WARN", myversion + " "),
11336 colorize("GOOD", myversion + " "), noiselevel=-1)
11338 writemsg_level("none ", noiselevel=-1)
11340 writemsg_level("\n", noiselevel=-1)
11342 writemsg_level("\n", noiselevel=-1)
11344 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11345 " packages are slated for removal.\n")
11346 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11347 " and " + colorize("GOOD", "'omitted'") + \
11348 " packages will not be removed.\n\n")
11350 if "--pretend" in myopts:
11351 #we're done... return
11353 if "--ask" in myopts:
11354 if userquery("Would you like to unmerge these packages?")=="No":
11355 # enter pretend mode for correct formatting of results
11356 myopts["--pretend"] = True
11361 #the real unmerging begins, after a short delay....
11362 if clean_delay and not autoclean:
11363 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11365 for x in xrange(len(pkgmap)):
11366 for y in pkgmap[x]["selected"]:
11367 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11368 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11369 mysplit = y.split("/")
11371 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11372 mysettings, unmerge_action not in ["clean","prune"],
11373 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11374 scheduler=scheduler)
11376 if retval != os.EX_OK:
11377 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11379 raise UninstallFailure(retval)
11382 if clean_world and hasattr(sets["world"], "cleanPackage"):
11383 sets["world"].cleanPackage(vartree.dbapi, y)
11384 emergelog(xterm_titles, " >>> unmerge success: "+y)
11385 if clean_world and hasattr(sets["world"], "remove"):
11386 for s in root_config.setconfig.active:
11387 sets["world"].remove(SETPREFIX+s)
11390 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11392 if os.path.exists("/usr/bin/install-info"):
11393 out = portage.output.EOutput()
11398 inforoot=normpath(root+z)
11399 if os.path.isdir(inforoot):
11400 infomtime = long(os.stat(inforoot).st_mtime)
11401 if inforoot not in prev_mtimes or \
11402 prev_mtimes[inforoot] != infomtime:
11403 regen_infodirs.append(inforoot)
11405 if not regen_infodirs:
11406 portage.writemsg_stdout("\n")
11407 out.einfo("GNU info directory index is up-to-date.")
11409 portage.writemsg_stdout("\n")
11410 out.einfo("Regenerating GNU info directory index...")
11412 dir_extensions = ("", ".gz", ".bz2")
11416 for inforoot in regen_infodirs:
11420 if not os.path.isdir(inforoot) or \
11421 not os.access(inforoot, os.W_OK):
11424 file_list = os.listdir(inforoot)
11426 dir_file = os.path.join(inforoot, "dir")
11427 moved_old_dir = False
11428 processed_count = 0
11429 for x in file_list:
11430 if x.startswith(".") or \
11431 os.path.isdir(os.path.join(inforoot, x)):
11433 if x.startswith("dir"):
11435 for ext in dir_extensions:
11436 if x == "dir" + ext or \
11437 x == "dir" + ext + ".old":
11442 if processed_count == 0:
11443 for ext in dir_extensions:
11445 os.rename(dir_file + ext, dir_file + ext + ".old")
11446 moved_old_dir = True
11447 except EnvironmentError, e:
11448 if e.errno != errno.ENOENT:
11451 processed_count += 1
11452 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11453 existsstr="already exists, for file `"
11455 if re.search(existsstr,myso):
11456 # Already exists... Don't increment the count for this.
11458 elif myso[:44]=="install-info: warning: no info dir entry in ":
11459 # This info file doesn't contain a DIR-header: install-info produces this
11460 # (harmless) warning (the --quiet switch doesn't seem to work).
11461 # Don't increment the count for this.
11464 badcount=badcount+1
11465 errmsg += myso + "\n"
11468 if moved_old_dir and not os.path.exists(dir_file):
11469 # We didn't generate a new dir file, so put the old file
11470 # back where it was originally found.
11471 for ext in dir_extensions:
11473 os.rename(dir_file + ext + ".old", dir_file + ext)
11474 except EnvironmentError, e:
11475 if e.errno != errno.ENOENT:
11479 # Clean dir.old cruft so that they don't prevent
11480 # unmerge of otherwise empty directories.
11481 for ext in dir_extensions:
11483 os.unlink(dir_file + ext + ".old")
11484 except EnvironmentError, e:
11485 if e.errno != errno.ENOENT:
11489 #update mtime so we can potentially avoid regenerating.
11490 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11493 out.eerror("Processed %d info files; %d errors." % \
11494 (icount, badcount))
11495 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11498 out.einfo("Processed %d info files." % (icount,))
11501 def display_news_notification(root_config, myopts):
11502 target_root = root_config.root
11503 trees = root_config.trees
11504 settings = trees["vartree"].settings
11505 portdb = trees["porttree"].dbapi
11506 vardb = trees["vartree"].dbapi
11507 NEWS_PATH = os.path.join("metadata", "news")
11508 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11509 newsReaderDisplay = False
11510 update = "--pretend" not in myopts
11512 for repo in portdb.getRepositories():
11513 unreadItems = checkUpdatedNewsItems(
11514 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11516 if not newsReaderDisplay:
11517 newsReaderDisplay = True
11519 print colorize("WARN", " * IMPORTANT:"),
11520 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11523 if newsReaderDisplay:
11524 print colorize("WARN", " *"),
11525 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11528 def display_preserved_libs(vardbapi):
11531 # Ensure the registry is consistent with existing files.
11532 vardbapi.plib_registry.pruneNonExisting()
11534 if vardbapi.plib_registry.hasEntries():
11536 print colorize("WARN", "!!!") + " existing preserved libs:"
11537 plibdata = vardbapi.plib_registry.getPreservedLibs()
11538 linkmap = vardbapi.linkmap
11541 linkmap_broken = False
11545 except portage.exception.CommandNotFound, e:
11546 writemsg_level("!!! Command Not Found: %s\n" % (e,),
11547 level=logging.ERROR, noiselevel=-1)
11549 linkmap_broken = True
11551 search_for_owners = set()
11552 for cpv in plibdata:
11553 internal_plib_keys = set(linkmap._obj_key(f) \
11554 for f in plibdata[cpv])
11555 for f in plibdata[cpv]:
11556 if f in consumer_map:
11559 for c in linkmap.findConsumers(f):
11560 # Filter out any consumers that are also preserved libs
11561 # belonging to the same package as the provider.
11562 if linkmap._obj_key(c) not in internal_plib_keys:
11563 consumers.append(c)
11565 consumer_map[f] = consumers
11566 search_for_owners.update(consumers[:MAX_DISPLAY+1])
11568 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11570 for cpv in plibdata:
11571 print colorize("WARN", ">>>") + " package: %s" % cpv
11573 for f in plibdata[cpv]:
11574 obj_key = linkmap._obj_key(f)
11575 alt_paths = samefile_map.get(obj_key)
11576 if alt_paths is None:
11578 samefile_map[obj_key] = alt_paths
11581 for alt_paths in samefile_map.itervalues():
11582 alt_paths = sorted(alt_paths)
11583 for p in alt_paths:
11584 print colorize("WARN", " * ") + " - %s" % (p,)
11586 consumers = consumer_map.get(f, [])
11587 for c in consumers[:MAX_DISPLAY]:
11588 print colorize("WARN", " * ") + " used by %s (%s)" % \
11589 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11590 if len(consumers) == MAX_DISPLAY + 1:
11591 print colorize("WARN", " * ") + " used by %s (%s)" % \
11592 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11593 for x in owners.get(consumers[MAX_DISPLAY], [])))
11594 elif len(consumers) > MAX_DISPLAY:
11595 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
11596 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11599 def _flush_elog_mod_echo():
11601 Dump the mod_echo output now so that our other
11602 notifications are shown last.
11604 @returns: True if messages were shown, False otherwise.
11606 messages_shown = False
11608 from portage.elog import mod_echo
11609 except ImportError:
11610 pass # happens during downgrade to a version without the module
11612 messages_shown = bool(mod_echo._items)
11613 mod_echo.finalize()
11614 return messages_shown
11616 def post_emerge(root_config, myopts, mtimedb, retval):
11618 Misc. things to run at the end of a merge session.
11621 Update Config Files
11624 Display preserved libs warnings
11627 @param trees: A dictionary mapping each ROOT to it's package databases
11629 @param mtimedb: The mtimeDB to store data needed across merge invocations
11630 @type mtimedb: MtimeDB class instance
11631 @param retval: Emerge's return value
11635 1. Calls sys.exit(retval)
11638 target_root = root_config.root
11639 trees = { target_root : root_config.trees }
11640 vardbapi = trees[target_root]["vartree"].dbapi
11641 settings = vardbapi.settings
11642 info_mtimes = mtimedb["info"]
11644 # Load the most current variables from ${ROOT}/etc/profile.env
11647 settings.regenerate()
11650 config_protect = settings.get("CONFIG_PROTECT","").split()
11651 infodirs = settings.get("INFOPATH","").split(":") + \
11652 settings.get("INFODIR","").split(":")
11656 if retval == os.EX_OK:
11657 exit_msg = " *** exiting successfully."
11659 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11660 emergelog("notitles" not in settings.features, exit_msg)
11662 _flush_elog_mod_echo()
11664 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11665 if counter_hash is not None and \
11666 counter_hash == vardbapi._counter_hash():
11667 display_news_notification(root_config, myopts)
11668 # If vdb state has not changed then there's nothing else to do.
11671 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11672 portage.util.ensure_dirs(vdb_path)
11674 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11675 vdb_lock = portage.locks.lockdir(vdb_path)
11679 if "noinfo" not in settings.features:
11680 chk_updated_info_files(target_root,
11681 infodirs, info_mtimes, retval)
11685 portage.locks.unlockdir(vdb_lock)
11687 chk_updated_cfg_files(target_root, config_protect)
11689 display_news_notification(root_config, myopts)
11690 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11691 display_preserved_libs(vardbapi)
11696 def chk_updated_cfg_files(target_root, config_protect):
11698 #number of directories with some protect files in them
11700 for x in config_protect:
11701 x = os.path.join(target_root, x.lstrip(os.path.sep))
11702 if not os.access(x, os.W_OK):
11703 # Avoid Permission denied errors generated
11707 mymode = os.lstat(x).st_mode
11710 if stat.S_ISLNK(mymode):
11711 # We want to treat it like a directory if it
11712 # is a symlink to an existing directory.
11714 real_mode = os.stat(x).st_mode
11715 if stat.S_ISDIR(real_mode):
11719 if stat.S_ISDIR(mymode):
11720 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11722 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11723 os.path.split(x.rstrip(os.path.sep))
11724 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11725 a = commands.getstatusoutput(mycommand)
11727 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11729 # Show the error message alone, sending stdout to /dev/null.
11730 os.system(mycommand + " 1>/dev/null")
11732 files = a[1].split('\0')
11733 # split always produces an empty string as the last element
11734 if files and not files[-1]:
11738 print "\n"+colorize("WARN", " * IMPORTANT:"),
11739 if stat.S_ISDIR(mymode):
11740 print "%d config files in '%s' need updating." % \
11743 print "config file '%s' needs updating." % x
11746 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11747 " section of the " + bold("emerge")
11748 print " "+yellow("*")+" man page to learn how to update config files."
11750 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11753 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11754 Returns the number of unread (yet relevent) items.
11756 @param portdb: a portage tree database
11757 @type portdb: pordbapi
11758 @param vardb: an installed package database
11759 @type vardb: vardbapi
11762 @param UNREAD_PATH:
11768 1. The number of unread but relevant news items.
11771 from portage.news import NewsManager
11772 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11773 return manager.getUnreadItems( repo_id, update=update )
11775 def insert_category_into_atom(atom, category):
11776 alphanum = re.search(r'\w', atom)
11778 ret = atom[:alphanum.start()] + "%s/" % category + \
11779 atom[alphanum.start():]
11784 def is_valid_package_atom(x):
11786 alphanum = re.search(r'\w', x)
11788 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11789 return portage.isvalidatom(x)
11791 def show_blocker_docs_link():
11793 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11794 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11796 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11799 def show_mask_docs():
11800 print "For more information, see the MASKED PACKAGES section in the emerge"
11801 print "man page or refer to the Gentoo Handbook."
11803 def action_sync(settings, trees, mtimedb, myopts, myaction):
11804 xterm_titles = "notitles" not in settings.features
11805 emergelog(xterm_titles, " === sync")
11806 myportdir = settings.get("PORTDIR", None)
11807 out = portage.output.EOutput()
11809 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
11811 if myportdir[-1]=="/":
11812 myportdir=myportdir[:-1]
11814 st = os.stat(myportdir)
11818 print ">>>",myportdir,"not found, creating it."
11819 os.makedirs(myportdir,0755)
11820 st = os.stat(myportdir)
11823 spawn_kwargs["env"] = settings.environ()
11824 if portage.data.secpass >= 2 and \
11825 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
11826 st.st_gid != os.getgid() and st.st_mode & 0070):
11828 homedir = pwd.getpwuid(st.st_uid).pw_dir
11832 # Drop privileges when syncing, in order to match
11833 # existing uid/gid settings.
11834 spawn_kwargs["uid"] = st.st_uid
11835 spawn_kwargs["gid"] = st.st_gid
11836 spawn_kwargs["groups"] = [st.st_gid]
11837 spawn_kwargs["env"]["HOME"] = homedir
11839 if not st.st_mode & 0020:
11840 umask = umask | 0020
11841 spawn_kwargs["umask"] = umask
11843 syncuri = settings.get("SYNC", "").strip()
11845 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11846 noiselevel=-1, level=logging.ERROR)
11849 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
11850 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
11853 dosyncuri = syncuri
11854 updatecache_flg = False
11855 if myaction == "metadata":
11856 print "skipping sync"
11857 updatecache_flg = True
11858 elif ".git" in vcs_dirs:
11859 # Update existing git repository, and ignore the syncuri. We are
11860 # going to trust the user and assume that the user is in the branch
11861 # that he/she wants updated. We'll let the user manage branches with
11863 msg = ">>> Starting git pull in %s..." % myportdir
11864 emergelog(xterm_titles, msg )
11865 writemsg_level(msg + "\n")
11866 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
11867 (portage._shell_quote(myportdir),), **spawn_kwargs)
11868 if exitcode != os.EX_OK:
11869 msg = "!!! git pull error in %s." % myportdir
11870 emergelog(xterm_titles, msg)
11871 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
11873 msg = ">>> Git pull in %s successful" % myportdir
11874 emergelog(xterm_titles, msg)
11875 writemsg_level(msg + "\n")
11876 exitcode = git_sync_timestamps(settings, myportdir)
11877 if exitcode == os.EX_OK:
11878 updatecache_flg = True
11879 elif syncuri[:8]=="rsync://":
11880 for vcs_dir in vcs_dirs:
11881 writemsg_level(("!!! %s appears to be under revision " + \
11882 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
11883 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
11885 if not os.path.exists("/usr/bin/rsync"):
11886 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11887 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11892 import shlex, StringIO
11893 if settings["PORTAGE_RSYNC_OPTS"] == "":
11894 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11895 rsync_opts.extend([
11896 "--recursive", # Recurse directories
11897 "--links", # Consider symlinks
11898 "--safe-links", # Ignore links outside of tree
11899 "--perms", # Preserve permissions
11900 "--times", # Preserive mod times
11901 "--compress", # Compress the data transmitted
11902 "--force", # Force deletion on non-empty dirs
11903 "--whole-file", # Don't do block transfers, only entire files
11904 "--delete", # Delete files that aren't in the master tree
11905 "--stats", # Show final statistics about what was transfered
11906 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11907 "--exclude=/distfiles", # Exclude distfiles from consideration
11908 "--exclude=/local", # Exclude local from consideration
11909 "--exclude=/packages", # Exclude packages from consideration
11913 # The below validation is not needed when using the above hardcoded
11916 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11917 lexer = shlex.shlex(StringIO.StringIO(
11918 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11919 lexer.whitespace_split = True
11920 rsync_opts.extend(lexer)
11923 for opt in ("--recursive", "--times"):
11924 if opt not in rsync_opts:
11925 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11926 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11927 rsync_opts.append(opt)
11929 for exclude in ("distfiles", "local", "packages"):
11930 opt = "--exclude=/%s" % exclude
11931 if opt not in rsync_opts:
11932 portage.writemsg(yellow("WARNING:") + \
11933 " adding required option %s not included in " % opt + \
11934 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11935 rsync_opts.append(opt)
11937 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11938 def rsync_opt_startswith(opt_prefix):
11939 for x in rsync_opts:
11940 if x.startswith(opt_prefix):
11944 if not rsync_opt_startswith("--timeout="):
11945 rsync_opts.append("--timeout=%d" % mytimeout)
11947 for opt in ("--compress", "--whole-file"):
11948 if opt not in rsync_opts:
11949 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11950 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11951 rsync_opts.append(opt)
11953 if "--quiet" in myopts:
11954 rsync_opts.append("--quiet") # Shut up a lot
11956 rsync_opts.append("--verbose") # Print filelist
11958 if "--verbose" in myopts:
11959 rsync_opts.append("--progress") # Progress meter for each file
11961 if "--debug" in myopts:
11962 rsync_opts.append("--checksum") # Force checksum on all files
11964 # Real local timestamp file.
11965 servertimestampfile = os.path.join(
11966 myportdir, "metadata", "timestamp.chk")
11968 content = portage.util.grabfile(servertimestampfile)
11972 mytimestamp = time.mktime(time.strptime(content[0],
11973 "%a, %d %b %Y %H:%M:%S +0000"))
11974 except (OverflowError, ValueError):
11979 rsync_initial_timeout = \
11980 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11982 rsync_initial_timeout = 15
11985 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11986 except SystemExit, e:
11987 raise # Needed else can't exit
11989 maxretries=3 #default number of retries
11992 user_name, hostname, port = re.split(
11993 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11996 if user_name is None:
11998 updatecache_flg=True
11999 all_rsync_opts = set(rsync_opts)
12000 lexer = shlex.shlex(StringIO.StringIO(
12001 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
12002 lexer.whitespace_split = True
12003 extra_rsync_opts = list(lexer)
12005 all_rsync_opts.update(extra_rsync_opts)
12006 family = socket.AF_INET
12007 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12008 family = socket.AF_INET
12009 elif socket.has_ipv6 and \
12010 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12011 family = socket.AF_INET6
12013 SERVER_OUT_OF_DATE = -1
12014 EXCEEDED_MAX_RETRIES = -2
12020 for addrinfo in socket.getaddrinfo(
12021 hostname, None, family, socket.SOCK_STREAM):
12022 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12023 # IPv6 addresses need to be enclosed in square brackets
12024 ips.append("[%s]" % addrinfo[4][0])
12026 ips.append(addrinfo[4][0])
12027 from random import shuffle
12029 except SystemExit, e:
12030 raise # Needed else can't exit
12031 except Exception, e:
12032 print "Notice:",str(e)
12037 dosyncuri = syncuri.replace(
12038 "//" + user_name + hostname + port + "/",
12039 "//" + user_name + ips[0] + port + "/", 1)
12040 except SystemExit, e:
12041 raise # Needed else can't exit
12042 except Exception, e:
12043 print "Notice:",str(e)
12047 if "--ask" in myopts:
12048 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12053 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12054 if "--quiet" not in myopts:
12055 print ">>> Starting rsync with "+dosyncuri+"..."
12057 emergelog(xterm_titles,
12058 ">>> Starting retry %d of %d with %s" % \
12059 (retries,maxretries,dosyncuri))
12060 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12062 if mytimestamp != 0 and "--quiet" not in myopts:
12063 print ">>> Checking server timestamp ..."
12065 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12067 if "--debug" in myopts:
12070 exitcode = os.EX_OK
12071 servertimestamp = 0
12072 # Even if there's no timestamp available locally, fetch the
12073 # timestamp anyway as an initial probe to verify that the server is
12074 # responsive. This protects us from hanging indefinitely on a
12075 # connection attempt to an unresponsive server which rsync's
12076 # --timeout option does not prevent.
12078 # Temporary file for remote server timestamp comparison.
12079 from tempfile import mkstemp
12080 fd, tmpservertimestampfile = mkstemp()
12082 mycommand = rsynccommand[:]
12083 mycommand.append(dosyncuri.rstrip("/") + \
12084 "/metadata/timestamp.chk")
12085 mycommand.append(tmpservertimestampfile)
12089 def timeout_handler(signum, frame):
12090 raise portage.exception.PortageException("timed out")
12091 signal.signal(signal.SIGALRM, timeout_handler)
12092 # Timeout here in case the server is unresponsive. The
12093 # --timeout rsync option doesn't apply to the initial
12094 # connection attempt.
12095 if rsync_initial_timeout:
12096 signal.alarm(rsync_initial_timeout)
12098 mypids.extend(portage.process.spawn(
12099 mycommand, env=settings.environ(), returnpid=True))
12100 exitcode = os.waitpid(mypids[0], 0)[1]
12101 content = portage.grabfile(tmpservertimestampfile)
12103 if rsync_initial_timeout:
12106 os.unlink(tmpservertimestampfile)
12109 except portage.exception.PortageException, e:
12113 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12114 os.kill(mypids[0], signal.SIGTERM)
12115 os.waitpid(mypids[0], 0)
12116 # This is the same code rsync uses for timeout.
12119 if exitcode != os.EX_OK:
12120 if exitcode & 0xff:
12121 exitcode = (exitcode & 0xff) << 8
12123 exitcode = exitcode >> 8
12125 portage.process.spawned_pids.remove(mypids[0])
12128 servertimestamp = time.mktime(time.strptime(
12129 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12130 except (OverflowError, ValueError):
12132 del mycommand, mypids, content
12133 if exitcode == os.EX_OK:
12134 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12135 emergelog(xterm_titles,
12136 ">>> Cancelling sync -- Already current.")
12139 print ">>> Timestamps on the server and in the local repository are the same."
12140 print ">>> Cancelling all further sync action. You are already up to date."
12142 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12146 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12147 emergelog(xterm_titles,
12148 ">>> Server out of date: %s" % dosyncuri)
12151 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12153 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12156 exitcode = SERVER_OUT_OF_DATE
12157 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12159 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12160 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12161 if exitcode in [0,1,3,4,11,14,20,21]:
12163 elif exitcode in [1,3,4,11,14,20,21]:
12166 # Code 2 indicates protocol incompatibility, which is expected
12167 # for servers with protocol < 29 that don't support
12168 # --prune-empty-directories. Retry for a server that supports
12169 # at least rsync protocol version 29 (>=rsync-2.6.4).
12174 if retries<=maxretries:
12175 print ">>> Retrying..."
12180 updatecache_flg=False
12181 exitcode = EXCEEDED_MAX_RETRIES
12185 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12186 elif exitcode == SERVER_OUT_OF_DATE:
12188 elif exitcode == EXCEEDED_MAX_RETRIES:
12190 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12195 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12196 msg.append("that your SYNC statement is proper.")
12197 msg.append("SYNC=" + settings["SYNC"])
12199 msg.append("Rsync has reported that there is a File IO error. Normally")
12200 msg.append("this means your disk is full, but can be caused by corruption")
12201 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12202 msg.append("and try again after the problem has been fixed.")
12203 msg.append("PORTDIR=" + settings["PORTDIR"])
12205 msg.append("Rsync was killed before it finished.")
12207 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12208 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12209 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12210 msg.append("temporary problem unless complications exist with your network")
12211 msg.append("(and possibly your system's filesystem) configuration.")
12215 elif syncuri[:6]=="cvs://":
12216 if not os.path.exists("/usr/bin/cvs"):
12217 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12218 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12220 cvsroot=syncuri[6:]
12221 cvsdir=os.path.dirname(myportdir)
12222 if not os.path.exists(myportdir+"/CVS"):
12224 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12225 if os.path.exists(cvsdir+"/gentoo-x86"):
12226 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12229 os.rmdir(myportdir)
12231 if e.errno != errno.ENOENT:
12233 "!!! existing '%s' directory; exiting.\n" % myportdir)
12236 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12237 print "!!! cvs checkout error; exiting."
12239 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12242 print ">>> Starting cvs update with "+syncuri+"..."
12243 retval = portage.process.spawn_bash(
12244 "cd %s; cvs -z0 -q update -dP" % \
12245 (portage._shell_quote(myportdir),), **spawn_kwargs)
12246 if retval != os.EX_OK:
12248 dosyncuri = syncuri
12250 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12251 noiselevel=-1, level=logging.ERROR)
12254 if updatecache_flg and \
12255 myaction != "metadata" and \
12256 "metadata-transfer" not in settings.features:
12257 updatecache_flg = False
12259 # Reload the whole config from scratch.
12260 settings, trees, mtimedb = load_emerge_config(trees=trees)
12261 root_config = trees[settings["ROOT"]]["root_config"]
12262 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12264 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12265 action_metadata(settings, portdb, myopts)
12267 if portage._global_updates(trees, mtimedb["updates"]):
12269 # Reload the whole config from scratch.
12270 settings, trees, mtimedb = load_emerge_config(trees=trees)
12271 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12272 root_config = trees[settings["ROOT"]]["root_config"]
12274 mybestpv = portdb.xmatch("bestmatch-visible",
12275 portage.const.PORTAGE_PACKAGE_ATOM)
12276 mypvs = portage.best(
12277 trees[settings["ROOT"]]["vartree"].dbapi.match(
12278 portage.const.PORTAGE_PACKAGE_ATOM))
12280 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12282 if myaction != "metadata":
12283 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12284 retval = portage.process.spawn(
12285 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12286 dosyncuri], env=settings.environ())
12287 if retval != os.EX_OK:
12288 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12290 if(mybestpv != mypvs) and not "--quiet" in myopts:
12292 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12293 print red(" * ")+"that you update portage now, before any other packages are updated."
12295 print red(" * ")+"To update portage, run 'emerge portage' now."
12298 display_news_notification(root_config, myopts)
12301 def git_sync_timestamps(settings, portdir):
12303 Since git doesn't preserve timestamps, synchronize timestamps between
12304 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12305 for a given file as long as the file in the working tree is not modified
12306 (relative to HEAD).
12308 cache_dir = os.path.join(portdir, "metadata", "cache")
12309 if not os.path.isdir(cache_dir):
12311 writemsg_level(">>> Synchronizing timestamps...\n")
12313 from portage.cache.cache_errors import CacheError
12315 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12316 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12317 except CacheError, e:
12318 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12319 level=logging.ERROR, noiselevel=-1)
12322 ec_dir = os.path.join(portdir, "eclass")
12324 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12325 if f.endswith(".eclass"))
12327 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12328 level=logging.ERROR, noiselevel=-1)
12331 args = [portage.const.BASH_BINARY, "-c",
12332 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
12333 portage._shell_quote(portdir)]
12335 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
12336 modified_files = set(l.rstrip("\n") for l in proc.stdout)
12338 if rval != os.EX_OK:
12341 modified_eclasses = set(ec for ec in ec_names \
12342 if os.path.join("eclass", ec + ".eclass") in modified_files)
12344 updated_ec_mtimes = {}
12346 for cpv in cache_db:
12347 cpv_split = portage.catpkgsplit(cpv)
12348 if cpv_split is None:
12349 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
12350 level=logging.ERROR, noiselevel=-1)
12353 cat, pn, ver, rev = cpv_split
12354 cat, pf = portage.catsplit(cpv)
12355 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
12356 if relative_eb_path in modified_files:
12360 cache_entry = cache_db[cpv]
12361 eb_mtime = cache_entry.get("_mtime_")
12362 ec_mtimes = cache_entry.get("_eclasses_")
12364 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
12365 level=logging.ERROR, noiselevel=-1)
12367 except CacheError, e:
12368 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
12369 (cpv, e), level=logging.ERROR, noiselevel=-1)
12372 if eb_mtime is None:
12373 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
12374 level=logging.ERROR, noiselevel=-1)
12378 eb_mtime = long(eb_mtime)
12380 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
12381 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
12384 if ec_mtimes is None:
12385 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
12386 level=logging.ERROR, noiselevel=-1)
12389 if modified_eclasses.intersection(ec_mtimes):
12392 missing_eclasses = set(ec_mtimes).difference(ec_names)
12393 if missing_eclasses:
12394 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
12395 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
12399 eb_path = os.path.join(portdir, relative_eb_path)
12401 current_eb_mtime = os.stat(eb_path)
12403 writemsg_level("!!! Missing ebuild: %s\n" % \
12404 (cpv,), level=logging.ERROR, noiselevel=-1)
12407 inconsistent = False
12408 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12409 updated_mtime = updated_ec_mtimes.get(ec)
12410 if updated_mtime is not None and updated_mtime != ec_mtime:
12411 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
12412 (cpv, ec), level=logging.ERROR, noiselevel=-1)
12413 inconsistent = True
12419 if current_eb_mtime != eb_mtime:
12420 os.utime(eb_path, (eb_mtime, eb_mtime))
12422 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12423 if ec in updated_ec_mtimes:
12425 ec_path = os.path.join(ec_dir, ec + ".eclass")
12426 current_mtime = long(os.stat(ec_path).st_mtime)
12427 if current_mtime != ec_mtime:
12428 os.utime(ec_path, (ec_mtime, ec_mtime))
12429 updated_ec_mtimes[ec] = ec_mtime
12433 def action_metadata(settings, portdb, myopts):
12434 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
12435 old_umask = os.umask(0002)
12436 cachedir = os.path.normpath(settings.depcachedir)
12437 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
12438 "/lib", "/opt", "/proc", "/root", "/sbin",
12439 "/sys", "/tmp", "/usr", "/var"]:
12440 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12441 "ROOT DIRECTORY ON YOUR SYSTEM."
12442 print >> sys.stderr, \
12443 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12445 if not os.path.exists(cachedir):
12448 ec = portage.eclass_cache.cache(portdb.porttree_root)
12449 myportdir = os.path.realpath(settings["PORTDIR"])
12450 cm = settings.load_best_module("portdbapi.metadbmodule")(
12451 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12453 from portage.cache import util
12455 class percentage_noise_maker(util.quiet_mirroring):
12456 def __init__(self, dbapi):
12458 self.cp_all = dbapi.cp_all()
12459 l = len(self.cp_all)
12460 self.call_update_min = 100000000
12461 self.min_cp_all = l/100.0
12465 def __iter__(self):
12466 for x in self.cp_all:
12468 if self.count > self.min_cp_all:
12469 self.call_update_min = 0
12471 for y in self.dbapi.cp_list(x):
12473 self.call_update_mine = 0
12475 def update(self, *arg):
12476 try: self.pstr = int(self.pstr) + 1
12477 except ValueError: self.pstr = 1
12478 sys.stdout.write("%s%i%%" % \
12479 ("\b" * (len(str(self.pstr))+1), self.pstr))
12481 self.call_update_min = 10000000
12483 def finish(self, *arg):
12484 sys.stdout.write("\b\b\b\b100%\n")
12487 if "--quiet" in myopts:
12488 def quicky_cpv_generator(cp_all_list):
12489 for x in cp_all_list:
12490 for y in portdb.cp_list(x):
12492 source = quicky_cpv_generator(portdb.cp_all())
12493 noise_maker = portage.cache.util.quiet_mirroring()
12495 noise_maker = source = percentage_noise_maker(portdb)
12496 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12497 eclass_cache=ec, verbose_instance=noise_maker)
12500 os.umask(old_umask)
12502 def action_regen(settings, portdb, max_jobs, max_load):
12503 xterm_titles = "notitles" not in settings.features
12504 emergelog(xterm_titles, " === regen")
12505 #regenerate cache entries
12506 portage.writemsg_stdout("Regenerating cache entries...\n")
12508 os.close(sys.stdin.fileno())
12509 except SystemExit, e:
12510 raise # Needed else can't exit
12515 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12518 portage.writemsg_stdout("done!\n")
12520 def action_config(settings, trees, myopts, myfiles):
12521 if len(myfiles) != 1:
12522 print red("!!! config can only take a single package atom at this time\n")
12524 if not is_valid_package_atom(myfiles[0]):
12525 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12527 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12528 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12532 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12533 except portage.exception.AmbiguousPackageName, e:
12534 # Multiple matches thrown from cpv_expand
12537 print "No packages found.\n"
12539 elif len(pkgs) > 1:
12540 if "--ask" in myopts:
12542 print "Please select a package to configure:"
12546 options.append(str(idx))
12547 print options[-1]+") "+pkg
12549 options.append("X")
12550 idx = userquery("Selection?", options)
12553 pkg = pkgs[int(idx)-1]
12555 print "The following packages available:"
12558 print "\nPlease use a specific atom or the --ask option."
12564 if "--ask" in myopts:
12565 if userquery("Ready to configure "+pkg+"?") == "No":
12568 print "Configuring pkg..."
12570 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12571 mysettings = portage.config(clone=settings)
12572 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12573 debug = mysettings.get("PORTAGE_DEBUG") == "1"
12574 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12576 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12577 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12578 if retval == os.EX_OK:
12579 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12580 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12583 def action_info(settings, trees, myopts, myfiles):
12584 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12585 settings.profile_path, settings["CHOST"],
12586 trees[settings["ROOT"]]["vartree"].dbapi)
12588 header_title = "System Settings"
12590 print header_width * "="
12591 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12592 print header_width * "="
12593 print "System uname: "+platform.platform(aliased=1)
12595 lastSync = portage.grabfile(os.path.join(
12596 settings["PORTDIR"], "metadata", "timestamp.chk"))
12597 print "Timestamp of tree:",
12603 output=commands.getstatusoutput("distcc --version")
12605 print str(output[1].split("\n",1)[0]),
12606 if "distcc" in settings.features:
12611 output=commands.getstatusoutput("ccache -V")
12613 print str(output[1].split("\n",1)[0]),
12614 if "ccache" in settings.features:
12619 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12620 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
12621 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12622 myvars = portage.util.unique_array(myvars)
12626 if portage.isvalidatom(x):
12627 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12628 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12629 pkg_matches.sort(portage.pkgcmp)
12631 for pn, ver, rev in pkg_matches:
12633 pkgs.append(ver + "-" + rev)
12637 pkgs = ", ".join(pkgs)
12638 print "%-20s %s" % (x+":", pkgs)
12640 print "%-20s %s" % (x+":", "[NOT VALID]")
12642 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12644 if "--verbose" in myopts:
12645 myvars=settings.keys()
12647 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12648 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12649 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12650 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12652 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12654 myvars = portage.util.unique_array(myvars)
12660 print '%s="%s"' % (x, settings[x])
12662 use = set(settings["USE"].split())
12663 use_expand = settings["USE_EXPAND"].split()
12665 for varname in use_expand:
12666 flag_prefix = varname.lower() + "_"
12667 for f in list(use):
12668 if f.startswith(flag_prefix):
12672 print 'USE="%s"' % " ".join(use),
12673 for varname in use_expand:
12674 myval = settings.get(varname)
12676 print '%s="%s"' % (varname, myval),
12679 unset_vars.append(x)
12681 print "Unset: "+", ".join(unset_vars)
12684 if "--debug" in myopts:
12685 for x in dir(portage):
12686 module = getattr(portage, x)
12687 if "cvs_id_string" in dir(module):
12688 print "%s: %s" % (str(x), str(module.cvs_id_string))
12690 # See if we can find any packages installed matching the strings
12691 # passed on the command line
12693 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12694 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12696 mypkgs.extend(vardb.match(x))
12698 # If some packages were found...
12700 # Get our global settings (we only print stuff if it varies from
12701 # the current config)
12702 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12703 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12705 pkgsettings = portage.config(clone=settings)
12707 for myvar in mydesiredvars:
12708 global_vals[myvar] = set(settings.get(myvar, "").split())
12710 # Loop through each package
12711 # Only print settings if they differ from global settings
12712 header_title = "Package Settings"
12713 print header_width * "="
12714 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12715 print header_width * "="
12716 from portage.output import EOutput
12719 # Get all package specific variables
12720 auxvalues = vardb.aux_get(pkg, auxkeys)
12722 for i in xrange(len(auxkeys)):
12723 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12725 for myvar in mydesiredvars:
12726 # If the package variable doesn't match the
12727 # current global variable, something has changed
12728 # so set diff_found so we know to print
12729 if valuesmap[myvar] != global_vals[myvar]:
12730 diff_values[myvar] = valuesmap[myvar]
12731 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12732 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12733 pkgsettings.reset()
12734 # If a matching ebuild is no longer available in the tree, maybe it
12735 # would make sense to compare against the flags for the best
12736 # available version with the same slot?
12738 if portdb.cpv_exists(pkg):
12740 pkgsettings.setcpv(pkg, mydb=mydb)
12741 if valuesmap["IUSE"].intersection(
12742 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12743 diff_values["USE"] = valuesmap["USE"]
12744 # If a difference was found, print the info for
12747 # Print package info
12748 print "%s was built with the following:" % pkg
12749 for myvar in mydesiredvars + ["USE"]:
12750 if myvar in diff_values:
12751 mylist = list(diff_values[myvar])
12753 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12755 print ">>> Attempting to run pkg_info() for '%s'" % pkg
12756 ebuildpath = vardb.findname(pkg)
12757 if not ebuildpath or not os.path.exists(ebuildpath):
12758 out.ewarn("No ebuild found for '%s'" % pkg)
12760 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12761 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12762 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12765 def action_search(root_config, myopts, myfiles, spinner):
12767 print "emerge: no search terms provided."
12769 searchinstance = search(root_config,
12770 spinner, "--searchdesc" in myopts,
12771 "--quiet" not in myopts, "--usepkg" in myopts,
12772 "--usepkgonly" in myopts)
12773 for mysearch in myfiles:
12775 searchinstance.execute(mysearch)
12776 except re.error, comment:
12777 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12779 searchinstance.output()
12781 def action_depclean(settings, trees, ldpath_mtimes,
12782 myopts, action, myfiles, spinner):
12783 # Kill packages that aren't explicitly merged or are required as a
12784 # dependency of another package. World file is explicit.
12786 # Global depclean or prune operations are not very safe when there are
12787 # missing dependencies since it's unknown how badly incomplete
12788 # the dependency graph is, and we might accidentally remove packages
12789 # that should have been pulled into the graph. On the other hand, it's
12790 # relatively safe to ignore missing deps when only asked to remove
12791 # specific packages.
12792 allow_missing_deps = len(myfiles) > 0
12795 msg.append("Always study the list of packages to be cleaned for any obvious\n")
12796 msg.append("mistakes. Packages that are part of the world set will always\n")
12797 msg.append("be kept. They can be manually added to this set with\n")
12798 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
12799 msg.append("package.provided (see portage(5)) will be removed by\n")
12800 msg.append("depclean, even if they are part of the world set.\n")
12802 msg.append("As a safety measure, depclean will not remove any packages\n")
12803 msg.append("unless *all* required dependencies have been resolved. As a\n")
12804 msg.append("consequence, it is often necessary to run %s\n" % \
12805 good("`emerge --update"))
12806 msg.append(good("--newuse --deep @system @world`") + \
12807 " prior to depclean.\n")
12809 if action == "depclean" and "--quiet" not in myopts and not myfiles:
12810 portage.writemsg_stdout("\n")
12812 portage.writemsg_stdout(colorize("WARN", " * ") + x)
12814 xterm_titles = "notitles" not in settings.features
12815 myroot = settings["ROOT"]
12816 root_config = trees[myroot]["root_config"]
12817 getSetAtoms = root_config.setconfig.getSetAtoms
12818 vardb = trees[myroot]["vartree"].dbapi
12820 required_set_names = ("system", "world")
12824 for s in required_set_names:
12825 required_sets[s] = InternalPackageSet(
12826 initial_atoms=getSetAtoms(s))
12829 # When removing packages, use a temporary version of world
12830 # which excludes packages that are intended to be eligible for
12832 world_temp_set = required_sets["world"]
12833 system_set = required_sets["system"]
12835 if not system_set or not world_temp_set:
12838 writemsg_level("!!! You have no system list.\n",
12839 level=logging.ERROR, noiselevel=-1)
12841 if not world_temp_set:
12842 writemsg_level("!!! You have no world file.\n",
12843 level=logging.WARNING, noiselevel=-1)
12845 writemsg_level("!!! Proceeding is likely to " + \
12846 "break your installation.\n",
12847 level=logging.WARNING, noiselevel=-1)
12848 if "--pretend" not in myopts:
12849 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12851 if action == "depclean":
12852 emergelog(xterm_titles, " >>> depclean")
12855 args_set = InternalPackageSet()
12858 if not is_valid_package_atom(x):
12859 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12860 level=logging.ERROR, noiselevel=-1)
12861 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12864 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12865 except portage.exception.AmbiguousPackageName, e:
12866 msg = "The short ebuild name \"" + x + \
12867 "\" is ambiguous. Please specify " + \
12868 "one of the following " + \
12869 "fully-qualified ebuild names instead:"
12870 for line in textwrap.wrap(msg, 70):
12871 writemsg_level("!!! %s\n" % (line,),
12872 level=logging.ERROR, noiselevel=-1)
12874 writemsg_level(" %s\n" % colorize("INFORM", i),
12875 level=logging.ERROR, noiselevel=-1)
12876 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12879 matched_packages = False
12882 matched_packages = True
12884 if not matched_packages:
12885 writemsg_level(">>> No packages selected for removal by %s\n" % \
12889 writemsg_level("\nCalculating dependencies ")
12890 resolver_params = create_depgraph_params(myopts, "remove")
12891 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12892 vardb = resolver.trees[myroot]["vartree"].dbapi
12894 if action == "depclean":
12897 # Pull in everything that's installed but not matched
12898 # by an argument atom since we don't want to clean any
12899 # package if something depends on it.
12901 world_temp_set.clear()
12906 if args_set.findAtomForPackage(pkg) is None:
12907 world_temp_set.add("=" + pkg.cpv)
12909 except portage.exception.InvalidDependString, e:
12910 show_invalid_depstring_notice(pkg,
12911 pkg.metadata["PROVIDE"], str(e))
12913 world_temp_set.add("=" + pkg.cpv)
12916 elif action == "prune":
12918 # Pull in everything that's installed since we don't
12919 # to prune a package if something depends on it.
12920 world_temp_set.clear()
12921 world_temp_set.update(vardb.cp_all())
12925 # Try to prune everything that's slotted.
12926 for cp in vardb.cp_all():
12927 if len(vardb.cp_list(cp)) > 1:
12930 # Remove atoms from world that match installed packages
12931 # that are also matched by argument atoms, but do not remove
12932 # them if they match the highest installed version.
12935 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12936 if not pkgs_for_cp or pkg not in pkgs_for_cp:
12937 raise AssertionError("package expected in matches: " + \
12938 "cp = %s, cpv = %s matches = %s" % \
12939 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12941 highest_version = pkgs_for_cp[-1]
12942 if pkg == highest_version:
12943 # pkg is the highest version
12944 world_temp_set.add("=" + pkg.cpv)
12947 if len(pkgs_for_cp) <= 1:
12948 raise AssertionError("more packages expected: " + \
12949 "cp = %s, cpv = %s matches = %s" % \
12950 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12953 if args_set.findAtomForPackage(pkg) is None:
12954 world_temp_set.add("=" + pkg.cpv)
12956 except portage.exception.InvalidDependString, e:
12957 show_invalid_depstring_notice(pkg,
12958 pkg.metadata["PROVIDE"], str(e))
12960 world_temp_set.add("=" + pkg.cpv)
12964 for s, package_set in required_sets.iteritems():
12965 set_atom = SETPREFIX + s
12966 set_arg = SetArg(arg=set_atom, set=package_set,
12967 root_config=resolver.roots[myroot])
12968 set_args[s] = set_arg
12969 for atom in set_arg.set:
12970 resolver._dep_stack.append(
12971 Dependency(atom=atom, root=myroot, parent=set_arg))
12972 resolver.digraph.add(set_arg, None)
12974 success = resolver._complete_graph()
12975 writemsg_level("\b\b... done!\n")
12977 resolver.display_problems()
12982 def unresolved_deps():
12984 unresolvable = set()
12985 for dep in resolver._initially_unsatisfied_deps:
12986 if isinstance(dep.parent, Package) and \
12987 (dep.priority > UnmergeDepPriority.SOFT):
12988 unresolvable.add((dep.atom, dep.parent.cpv))
12990 if not unresolvable:
12993 if unresolvable and not allow_missing_deps:
12994 prefix = bad(" * ")
12996 msg.append("Dependencies could not be completely resolved due to")
12997 msg.append("the following required packages not being installed:")
12999 for atom, parent in unresolvable:
13000 msg.append(" %s pulled in by:" % (atom,))
13001 msg.append(" %s" % (parent,))
13003 msg.append("Have you forgotten to run " + \
13004 good("`emerge --update --newuse --deep @system @world`") + " prior")
13005 msg.append(("to %s? It may be necessary to manually " + \
13006 "uninstall packages that no longer") % action)
13007 msg.append("exist in the portage tree since " + \
13008 "it may not be possible to satisfy their")
13009 msg.append("dependencies. Also, be aware of " + \
13010 "the --with-bdeps option that is documented")
13011 msg.append("in " + good("`man emerge`") + ".")
13012 if action == "prune":
13014 msg.append("If you would like to ignore " + \
13015 "dependencies then use %s." % good("--nodeps"))
13016 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13017 level=logging.ERROR, noiselevel=-1)
13021 if unresolved_deps():
13024 graph = resolver.digraph.copy()
13025 required_pkgs_total = 0
13027 if isinstance(node, Package):
13028 required_pkgs_total += 1
13030 def show_parents(child_node):
13031 parent_nodes = graph.parent_nodes(child_node)
13032 if not parent_nodes:
13033 # With --prune, the highest version can be pulled in without any
13034 # real parent since all installed packages are pulled in. In that
13035 # case there's nothing to show here.
13038 for node in parent_nodes:
13039 parent_strs.append(str(getattr(node, "cpv", node)))
13042 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13043 for parent_str in parent_strs:
13044 msg.append(" %s\n" % (parent_str,))
13046 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13048 def create_cleanlist():
13049 pkgs_to_remove = []
13051 if action == "depclean":
13057 arg_atom = args_set.findAtomForPackage(pkg)
13058 except portage.exception.InvalidDependString:
13059 # this error has already been displayed by now
13063 if pkg not in graph:
13064 pkgs_to_remove.append(pkg)
13065 elif "--verbose" in myopts:
13070 if pkg not in graph:
13071 pkgs_to_remove.append(pkg)
13072 elif "--verbose" in myopts:
13075 elif action == "prune":
13076 # Prune really uses all installed instead of world. It's not
13077 # a real reverse dependency so don't display it as such.
13078 graph.remove(set_args["world"])
13080 for atom in args_set:
13081 for pkg in vardb.match_pkgs(atom):
13082 if pkg not in graph:
13083 pkgs_to_remove.append(pkg)
13084 elif "--verbose" in myopts:
13087 if not pkgs_to_remove:
13089 ">>> No packages selected for removal by %s\n" % action)
13090 if "--verbose" not in myopts:
13092 ">>> To see reverse dependencies, use %s\n" % \
13094 if action == "prune":
13096 ">>> To ignore dependencies, use %s\n" % \
13099 return pkgs_to_remove
13101 cleanlist = create_cleanlist()
13104 clean_set = set(cleanlist)
13106 # Check if any of these package are the sole providers of libraries
13107 # with consumers that have not been selected for removal. If so, these
13108 # packages and any dependencies need to be added to the graph.
13109 real_vardb = trees[myroot]["vartree"].dbapi
13110 linkmap = real_vardb.linkmap
13111 liblist = linkmap.listLibraryObjects()
13112 consumer_cache = {}
13113 provider_cache = {}
13117 writemsg_level(">>> Checking for lib consumers...\n")
13119 for pkg in cleanlist:
13120 pkg_dblink = real_vardb._dblink(pkg.cpv)
13121 provided_libs = set()
13123 for lib in liblist:
13124 if pkg_dblink.isowner(lib, myroot):
13125 provided_libs.add(lib)
13127 if not provided_libs:
13131 for lib in provided_libs:
13132 lib_consumers = consumer_cache.get(lib)
13133 if lib_consumers is None:
13134 lib_consumers = linkmap.findConsumers(lib)
13135 consumer_cache[lib] = lib_consumers
13137 consumers[lib] = lib_consumers
13142 for lib, lib_consumers in consumers.items():
13143 for consumer_file in list(lib_consumers):
13144 if pkg_dblink.isowner(consumer_file, myroot):
13145 lib_consumers.remove(consumer_file)
13146 if not lib_consumers:
13152 for lib, lib_consumers in consumers.iteritems():
13154 soname = soname_cache.get(lib)
13156 soname = linkmap.getSoname(lib)
13157 soname_cache[lib] = soname
13159 consumer_providers = []
13160 for lib_consumer in lib_consumers:
13161 providers = provider_cache.get(lib)
13162 if providers is None:
13163 providers = linkmap.findProviders(lib_consumer)
13164 provider_cache[lib_consumer] = providers
13165 if soname not in providers:
13166 # Why does this happen?
13168 consumer_providers.append(
13169 (lib_consumer, providers[soname]))
13171 consumers[lib] = consumer_providers
13173 consumer_map[pkg] = consumers
13177 search_files = set()
13178 for consumers in consumer_map.itervalues():
13179 for lib, consumer_providers in consumers.iteritems():
13180 for lib_consumer, providers in consumer_providers:
13181 search_files.add(lib_consumer)
13182 search_files.update(providers)
13184 writemsg_level(">>> Assigning files to packages...\n")
13185 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13187 for pkg, consumers in consumer_map.items():
13188 for lib, consumer_providers in consumers.items():
13189 lib_consumers = set()
13191 for lib_consumer, providers in consumer_providers:
13192 owner_set = file_owners.get(lib_consumer)
13193 provider_dblinks = set()
13194 provider_pkgs = set()
13196 if len(providers) > 1:
13197 for provider in providers:
13198 provider_set = file_owners.get(provider)
13199 if provider_set is not None:
13200 provider_dblinks.update(provider_set)
13202 if len(provider_dblinks) > 1:
13203 for provider_dblink in provider_dblinks:
13204 pkg_key = ("installed", myroot,
13205 provider_dblink.mycpv, "nomerge")
13206 if pkg_key not in clean_set:
13207 provider_pkgs.add(vardb.get(pkg_key))
13212 if owner_set is not None:
13213 lib_consumers.update(owner_set)
13215 for consumer_dblink in list(lib_consumers):
13216 if ("installed", myroot, consumer_dblink.mycpv,
13217 "nomerge") in clean_set:
13218 lib_consumers.remove(consumer_dblink)
13222 consumers[lib] = lib_consumers
13226 del consumer_map[pkg]
13229 # TODO: Implement a package set for rebuilding consumer packages.
13231 msg = "In order to avoid breakage of link level " + \
13232 "dependencies, one or more packages will not be removed. " + \
13233 "This can be solved by rebuilding " + \
13234 "the packages that pulled them in."
13236 prefix = bad(" * ")
13237 from textwrap import wrap
13238 writemsg_level("".join(prefix + "%s\n" % line for \
13239 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13242 for pkg, consumers in consumer_map.iteritems():
13243 unique_consumers = set(chain(*consumers.values()))
13244 unique_consumers = sorted(consumer.mycpv \
13245 for consumer in unique_consumers)
13247 msg.append(" %s pulled in by:" % (pkg.cpv,))
13248 for consumer in unique_consumers:
13249 msg.append(" %s" % (consumer,))
13251 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13252 level=logging.WARNING, noiselevel=-1)
13254 # Add lib providers to the graph as children of lib consumers,
13255 # and also add any dependencies pulled in by the provider.
13256 writemsg_level(">>> Adding lib providers to graph...\n")
13258 for pkg, consumers in consumer_map.iteritems():
13259 for consumer_dblink in set(chain(*consumers.values())):
13260 consumer_pkg = vardb.get(("installed", myroot,
13261 consumer_dblink.mycpv, "nomerge"))
13262 if not resolver._add_pkg(pkg,
13263 Dependency(parent=consumer_pkg,
13264 priority=UnmergeDepPriority(runtime=True),
13266 resolver.display_problems()
13269 writemsg_level("\nCalculating dependencies ")
13270 success = resolver._complete_graph()
13271 writemsg_level("\b\b... done!\n")
13272 resolver.display_problems()
13275 if unresolved_deps():
13278 graph = resolver.digraph.copy()
13279 required_pkgs_total = 0
13281 if isinstance(node, Package):
13282 required_pkgs_total += 1
13283 cleanlist = create_cleanlist()
13286 clean_set = set(cleanlist)
13288 # Use a topological sort to create an unmerge order such that
13289 # each package is unmerged before it's dependencies. This is
13290 # necessary to avoid breaking things that may need to run
13291 # during pkg_prerm or pkg_postrm phases.
13293 # Create a new graph to account for dependencies between the
13294 # packages being unmerged.
13298 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13299 runtime = UnmergeDepPriority(runtime=True)
13300 runtime_post = UnmergeDepPriority(runtime_post=True)
13301 buildtime = UnmergeDepPriority(buildtime=True)
13303 "RDEPEND": runtime,
13304 "PDEPEND": runtime_post,
13305 "DEPEND": buildtime,
13308 for node in clean_set:
13309 graph.add(node, None)
13311 node_use = node.metadata["USE"].split()
13312 for dep_type in dep_keys:
13313 depstr = node.metadata[dep_type]
13317 portage.dep._dep_check_strict = False
13318 success, atoms = portage.dep_check(depstr, None, settings,
13319 myuse=node_use, trees=resolver._graph_trees,
13322 portage.dep._dep_check_strict = True
13324 # Ignore invalid deps of packages that will
13325 # be uninstalled anyway.
13328 priority = priority_map[dep_type]
13330 if not isinstance(atom, portage.dep.Atom):
13331 # Ignore invalid atoms returned from dep_check().
13335 matches = vardb.match_pkgs(atom)
13338 for child_node in matches:
13339 if child_node in clean_set:
13340 graph.add(child_node, node, priority=priority)
13343 if len(graph.order) == len(graph.root_nodes()):
13344 # If there are no dependencies between packages
13345 # let unmerge() group them by cat/pn.
13347 cleanlist = [pkg.cpv for pkg in graph.order]
13349 # Order nodes from lowest to highest overall reference count for
13350 # optimal root node selection.
13351 node_refcounts = {}
13352 for node in graph.order:
13353 node_refcounts[node] = len(graph.parent_nodes(node))
13354 def cmp_reference_count(node1, node2):
13355 return node_refcounts[node1] - node_refcounts[node2]
13356 graph.order.sort(cmp_reference_count)
13358 ignore_priority_range = [None]
13359 ignore_priority_range.extend(
13360 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13361 while not graph.empty():
13362 for ignore_priority in ignore_priority_range:
13363 nodes = graph.root_nodes(ignore_priority=ignore_priority)
13367 raise AssertionError("no root nodes")
13368 if ignore_priority is not None:
13369 # Some deps have been dropped due to circular dependencies,
13370 # so only pop one node in order do minimize the number that
13375 cleanlist.append(node.cpv)
13377 unmerge(root_config, myopts, "unmerge", cleanlist,
13378 ldpath_mtimes, ordered=ordered)
13380 if action == "prune":
13383 if not cleanlist and "--quiet" in myopts:
13386 print "Packages installed: "+str(len(vardb.cpv_all()))
13387 print "Packages in world: " + \
13388 str(len(root_config.sets["world"].getAtoms()))
13389 print "Packages in system: " + \
13390 str(len(root_config.sets["system"].getAtoms()))
13391 print "Required packages: "+str(required_pkgs_total)
13392 if "--pretend" in myopts:
13393 print "Number to remove: "+str(len(cleanlist))
13395 print "Number removed: "+str(len(cleanlist))
13397 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13398 skip_masked=False, skip_unsatisfied=False):
13400 Construct a depgraph for the given resume list. This will raise
13401 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13403 @returns: (success, depgraph, dropped_tasks)
13405 mergelist = mtimedb["resume"]["mergelist"]
13406 dropped_tasks = set()
13408 mydepgraph = depgraph(settings, trees,
13409 myopts, myparams, spinner)
13411 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13412 skip_masked=skip_masked)
13413 except depgraph.UnsatisfiedResumeDep, e:
13414 if not skip_unsatisfied:
13417 graph = mydepgraph.digraph
13418 unsatisfied_parents = dict((dep.parent, dep.parent) \
13419 for dep in e.value)
13420 traversed_nodes = set()
13421 unsatisfied_stack = list(unsatisfied_parents)
13422 while unsatisfied_stack:
13423 pkg = unsatisfied_stack.pop()
13424 if pkg in traversed_nodes:
13426 traversed_nodes.add(pkg)
13428 # If this package was pulled in by a parent
13429 # package scheduled for merge, removing this
13430 # package may cause the the parent package's
13431 # dependency to become unsatisfied.
13432 for parent_node in graph.parent_nodes(pkg):
13433 if not isinstance(parent_node, Package) \
13434 or parent_node.operation not in ("merge", "nomerge"):
13437 graph.child_nodes(parent_node,
13438 ignore_priority=DepPriority.SOFT)
13439 if pkg in unsatisfied:
13440 unsatisfied_parents[parent_node] = parent_node
13441 unsatisfied_stack.append(parent_node)
13443 pruned_mergelist = [x for x in mergelist \
13444 if isinstance(x, list) and \
13445 tuple(x) not in unsatisfied_parents]
13447 # If the mergelist doesn't shrink then this loop is infinite.
13448 if len(pruned_mergelist) == len(mergelist):
13449 # This happens if a package can't be dropped because
13450 # it's already installed, but it has unsatisfied PDEPEND.
13452 mergelist[:] = pruned_mergelist
13454 # Exclude installed packages that have been removed from the graph due
13455 # to failure to build/install runtime dependencies after the dependent
13456 # package has already been installed.
13457 dropped_tasks.update(pkg for pkg in \
13458 unsatisfied_parents if pkg.operation != "nomerge")
13459 mydepgraph.break_refs(unsatisfied_parents)
13461 del e, graph, traversed_nodes, \
13462 unsatisfied_parents, unsatisfied_stack
13466 return (success, mydepgraph, dropped_tasks)
13468 def action_build(settings, trees, mtimedb,
13469 myopts, myaction, myfiles, spinner):
13471 # validate the state of the resume data
13472 # so that we can make assumptions later.
13473 for k in ("resume", "resume_backup"):
13474 if k not in mtimedb:
13476 resume_data = mtimedb[k]
13477 if not isinstance(resume_data, dict):
13480 mergelist = resume_data.get("mergelist")
13481 if not isinstance(mergelist, list):
13484 for x in mergelist:
13485 if not (isinstance(x, list) and len(x) == 4):
13487 pkg_type, pkg_root, pkg_key, pkg_action = x
13488 if pkg_root not in trees:
13489 # Current $ROOT setting differs,
13490 # so the list must be stale.
13496 resume_opts = resume_data.get("myopts")
13497 if not isinstance(resume_opts, (dict, list)):
13500 favorites = resume_data.get("favorites")
13501 if not isinstance(favorites, list):
13506 if "--resume" in myopts and \
13507 ("resume" in mtimedb or
13508 "resume_backup" in mtimedb):
13510 if "resume" not in mtimedb:
13511 mtimedb["resume"] = mtimedb["resume_backup"]
13512 del mtimedb["resume_backup"]
13514 # "myopts" is a list for backward compatibility.
13515 resume_opts = mtimedb["resume"].get("myopts", [])
13516 if isinstance(resume_opts, list):
13517 resume_opts = dict((k,True) for k in resume_opts)
13518 for opt in ("--skipfirst", "--ask", "--tree"):
13519 resume_opts.pop(opt, None)
13520 myopts.update(resume_opts)
13522 if "--debug" in myopts:
13523 writemsg_level("myopts %s\n" % (myopts,))
13525 # Adjust config according to options of the command being resumed.
13526 for myroot in trees:
13527 mysettings = trees[myroot]["vartree"].settings
13528 mysettings.unlock()
13529 adjust_config(myopts, mysettings)
13531 del myroot, mysettings
13533 ldpath_mtimes = mtimedb["ldpath"]
13536 buildpkgonly = "--buildpkgonly" in myopts
13537 pretend = "--pretend" in myopts
13538 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13539 ask = "--ask" in myopts
13540 nodeps = "--nodeps" in myopts
13541 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13542 tree = "--tree" in myopts
13543 if nodeps and tree:
13545 del myopts["--tree"]
13546 portage.writemsg(colorize("WARN", " * ") + \
13547 "--tree is broken with --nodeps. Disabling...\n")
13548 debug = "--debug" in myopts
13549 verbose = "--verbose" in myopts
13550 quiet = "--quiet" in myopts
13551 if pretend or fetchonly:
13552 # make the mtimedb readonly
13553 mtimedb.filename = None
13554 if "--digest" in myopts:
13555 msg = "The --digest option can prevent corruption from being" + \
13556 " noticed. The `repoman manifest` command is the preferred" + \
13557 " way to generate manifests and it is capable of doing an" + \
13558 " entire repository or category at once."
13559 prefix = bad(" * ")
13560 writemsg(prefix + "\n")
13561 from textwrap import wrap
13562 for line in wrap(msg, 72):
13563 writemsg("%s%s\n" % (prefix, line))
13564 writemsg(prefix + "\n")
13566 if "--quiet" not in myopts and \
13567 ("--pretend" in myopts or "--ask" in myopts or \
13568 "--tree" in myopts or "--verbose" in myopts):
13570 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13572 elif "--buildpkgonly" in myopts:
13576 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13578 print darkgreen("These are the packages that would be %s, in reverse order:") % action
13582 print darkgreen("These are the packages that would be %s, in order:") % action
13585 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13586 if not show_spinner:
13587 spinner.update = spinner.update_quiet
13590 favorites = mtimedb["resume"].get("favorites")
13591 if not isinstance(favorites, list):
13595 print "Calculating dependencies ",
13596 myparams = create_depgraph_params(myopts, myaction)
13598 resume_data = mtimedb["resume"]
13599 mergelist = resume_data["mergelist"]
13600 if mergelist and "--skipfirst" in myopts:
13601 for i, task in enumerate(mergelist):
13602 if isinstance(task, list) and \
13603 task and task[-1] == "merge":
13607 skip_masked = "--skipfirst" in myopts
13608 skip_unsatisfied = "--skipfirst" in myopts
13612 success, mydepgraph, dropped_tasks = resume_depgraph(
13613 settings, trees, mtimedb, myopts, myparams, spinner,
13614 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13615 except (portage.exception.PackageNotFound,
13616 depgraph.UnsatisfiedResumeDep), e:
13617 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13618 mydepgraph = e.depgraph
13621 from textwrap import wrap
13622 from portage.output import EOutput
13625 resume_data = mtimedb["resume"]
13626 mergelist = resume_data.get("mergelist")
13627 if not isinstance(mergelist, list):
13629 if mergelist and debug or (verbose and not quiet):
13630 out.eerror("Invalid resume list:")
13633 for task in mergelist:
13634 if isinstance(task, list):
13635 out.eerror(indent + str(tuple(task)))
13638 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13639 out.eerror("One or more packages are either masked or " + \
13640 "have missing dependencies:")
13643 for dep in e.value:
13644 if dep.atom is None:
13645 out.eerror(indent + "Masked package:")
13646 out.eerror(2 * indent + str(dep.parent))
13649 out.eerror(indent + str(dep.atom) + " pulled in by:")
13650 out.eerror(2 * indent + str(dep.parent))
13652 msg = "The resume list contains packages " + \
13653 "that are either masked or have " + \
13654 "unsatisfied dependencies. " + \
13655 "Please restart/continue " + \
13656 "the operation manually, or use --skipfirst " + \
13657 "to skip the first package in the list and " + \
13658 "any other packages that may be " + \
13659 "masked or have missing dependencies."
13660 for line in wrap(msg, 72):
13662 elif isinstance(e, portage.exception.PackageNotFound):
13663 out.eerror("An expected package is " + \
13664 "not available: %s" % str(e))
13666 msg = "The resume list contains one or more " + \
13667 "packages that are no longer " + \
13668 "available. Please restart/continue " + \
13669 "the operation manually."
13670 for line in wrap(msg, 72):
13674 print "\b\b... done!"
13678 portage.writemsg("!!! One or more packages have been " + \
13679 "dropped due to\n" + \
13680 "!!! masking or unsatisfied dependencies:\n\n",
13682 for task in dropped_tasks:
13683 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
13684 portage.writemsg("\n", noiselevel=-1)
13687 if mydepgraph is not None:
13688 mydepgraph.display_problems()
13689 if not (ask or pretend):
13690 # delete the current list and also the backup
13691 # since it's probably stale too.
13692 for k in ("resume", "resume_backup"):
13693 mtimedb.pop(k, None)
13698 if ("--resume" in myopts):
13699 print darkgreen("emerge: It seems we have nothing to resume...")
13702 myparams = create_depgraph_params(myopts, myaction)
13703 if "--quiet" not in myopts and "--nodeps" not in myopts:
13704 print "Calculating dependencies ",
13706 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13708 retval, favorites = mydepgraph.select_files(myfiles)
13709 except portage.exception.PackageNotFound, e:
13710 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13712 except portage.exception.PackageSetNotFound, e:
13713 root_config = trees[settings["ROOT"]]["root_config"]
13714 display_missing_pkg_set(root_config, e.value)
13717 print "\b\b... done!"
13719 mydepgraph.display_problems()
13722 if "--pretend" not in myopts and \
13723 ("--ask" in myopts or "--tree" in myopts or \
13724 "--verbose" in myopts) and \
13725 not ("--quiet" in myopts and "--ask" not in myopts):
13726 if "--resume" in myopts:
13727 mymergelist = mydepgraph.altlist()
13728 if len(mymergelist) == 0:
13729 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13731 favorites = mtimedb["resume"]["favorites"]
13732 retval = mydepgraph.display(
13733 mydepgraph.altlist(reversed=tree),
13734 favorites=favorites)
13735 mydepgraph.display_problems()
13736 if retval != os.EX_OK:
13738 prompt="Would you like to resume merging these packages?"
13740 retval = mydepgraph.display(
13741 mydepgraph.altlist(reversed=("--tree" in myopts)),
13742 favorites=favorites)
13743 mydepgraph.display_problems()
13744 if retval != os.EX_OK:
13747 for x in mydepgraph.altlist():
13748 if isinstance(x, Package) and x.operation == "merge":
13752 sets = trees[settings["ROOT"]]["root_config"].sets
13753 world_candidates = None
13754 if "--noreplace" in myopts and \
13755 not oneshot and favorites:
13756 # Sets that are not world candidates are filtered
13757 # out here since the favorites list needs to be
13758 # complete for depgraph.loadResumeCommand() to
13759 # operate correctly.
13760 world_candidates = [x for x in favorites \
13761 if not (x.startswith(SETPREFIX) and \
13762 not sets[x[1:]].world_candidate)]
13763 if "--noreplace" in myopts and \
13764 not oneshot and world_candidates:
13766 for x in world_candidates:
13767 print " %s %s" % (good("*"), x)
13768 prompt="Would you like to add these packages to your world favorites?"
13769 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13770 prompt="Nothing to merge; would you like to auto-clean packages?"
13773 print "Nothing to merge; quitting."
13776 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13777 prompt="Would you like to fetch the source files for these packages?"
13779 prompt="Would you like to merge these packages?"
13781 if "--ask" in myopts and userquery(prompt) == "No":
13786 # Don't ask again (e.g. when auto-cleaning packages after merge)
13787 myopts.pop("--ask", None)
13789 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13790 if ("--resume" in myopts):
13791 mymergelist = mydepgraph.altlist()
13792 if len(mymergelist) == 0:
13793 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13795 favorites = mtimedb["resume"]["favorites"]
13796 retval = mydepgraph.display(
13797 mydepgraph.altlist(reversed=tree),
13798 favorites=favorites)
13799 mydepgraph.display_problems()
13800 if retval != os.EX_OK:
13803 retval = mydepgraph.display(
13804 mydepgraph.altlist(reversed=("--tree" in myopts)),
13805 favorites=favorites)
13806 mydepgraph.display_problems()
13807 if retval != os.EX_OK:
13809 if "--buildpkgonly" in myopts:
13810 graph_copy = mydepgraph.digraph.clone()
13811 for node in list(graph_copy.order):
13812 if not isinstance(node, Package):
13813 graph_copy.remove(node)
13814 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13815 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13816 print "!!! You have to merge the dependencies before you can build this package.\n"
13819 if "--buildpkgonly" in myopts:
13820 graph_copy = mydepgraph.digraph.clone()
13821 for node in list(graph_copy.order):
13822 if not isinstance(node, Package):
13823 graph_copy.remove(node)
13824 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13825 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13826 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13829 if ("--resume" in myopts):
13830 favorites=mtimedb["resume"]["favorites"]
13831 mymergelist = mydepgraph.altlist()
13832 mydepgraph.break_refs(mymergelist)
13833 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13834 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13835 del mydepgraph, mymergelist
13836 clear_caches(trees)
13838 retval = mergetask.merge()
13839 merge_count = mergetask.curval
13841 if "resume" in mtimedb and \
13842 "mergelist" in mtimedb["resume"] and \
13843 len(mtimedb["resume"]["mergelist"]) > 1:
13844 mtimedb["resume_backup"] = mtimedb["resume"]
13845 del mtimedb["resume"]
13847 mtimedb["resume"]={}
13848 # Stored as a dict starting with portage-2.1.6_rc1, and supported
13849 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13850 # a list type for options.
13851 mtimedb["resume"]["myopts"] = myopts.copy()
13853 # Convert Atom instances to plain str since the mtimedb loader
13854 # sets unpickler.find_global = None which causes unpickler.load()
13855 # to raise the following exception:
13857 # cPickle.UnpicklingError: Global and instance pickles are not supported.
13859 # TODO: Maybe stop setting find_global = None, or find some other
13860 # way to avoid accidental triggering of the above UnpicklingError.
13861 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13863 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13864 for pkgline in mydepgraph.altlist():
13865 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13866 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13867 tmpsettings = portage.config(clone=settings)
13869 if settings.get("PORTAGE_DEBUG", "") == "1":
13871 retval = portage.doebuild(
13872 y, "digest", settings["ROOT"], tmpsettings, edebug,
13873 ("--pretend" in myopts),
13874 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13877 pkglist = mydepgraph.altlist()
13878 mydepgraph.saveNomergeFavorites()
13879 mydepgraph.break_refs(pkglist)
13880 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13881 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13882 del mydepgraph, pkglist
13883 clear_caches(trees)
13885 retval = mergetask.merge()
13886 merge_count = mergetask.curval
13888 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13889 if "yes" == settings.get("AUTOCLEAN"):
13890 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
13891 unmerge(trees[settings["ROOT"]]["root_config"],
13892 myopts, "clean", [],
13893 ldpath_mtimes, autoclean=1)
13895 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13896 + " AUTOCLEAN is disabled. This can cause serious"
13897 + " problems due to overlapping packages.\n")
13898 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
13902 def multiple_actions(action1, action2):
13903 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13904 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
13907 def insert_optional_args(args):
13909 Parse optional arguments and insert a value if one has
13910 not been provided. This is done before feeding the args
13911 to the optparse parser since that parser does not support
13912 this feature natively.
13916 jobs_opts = ("-j", "--jobs")
13917 arg_stack = args[:]
13918 arg_stack.reverse()
13920 arg = arg_stack.pop()
13922 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
13923 if not (short_job_opt or arg in jobs_opts):
13924 new_args.append(arg)
13927 # Insert an empty placeholder in order to
13928 # satisfy the requirements of optparse.
13930 new_args.append("--jobs")
13933 if short_job_opt and len(arg) > 2:
13934 if arg[:2] == "-j":
13936 job_count = int(arg[2:])
13938 saved_opts = arg[2:]
13941 saved_opts = arg[1:].replace("j", "")
13943 if job_count is None and arg_stack:
13945 job_count = int(arg_stack[-1])
13949 # Discard the job count from the stack
13950 # since we're consuming it here.
13953 if job_count is None:
13954 # unlimited number of jobs
13955 new_args.append("True")
13957 new_args.append(str(job_count))
13959 if saved_opts is not None:
13960 new_args.append("-" + saved_opts)
13964 def parse_opts(tmpcmdline, silent=False):
13969 global actions, options, shortmapping
13971 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13972 argument_options = {
13974 "help":"specify the location for portage configuration files",
13978 "help":"enable or disable color output",
13980 "choices":("y", "n")
13985 "help" : "Specifies the number of packages to build " + \
13991 "--load-average": {
13993 "help" :"Specifies that no new builds should be started " + \
13994 "if there are other builds running and the load average " + \
13995 "is at least LOAD (a floating-point number).",
14001 "help":"include unnecessary build time dependencies",
14003 "choices":("y", "n")
14006 "help":"specify conditions to trigger package reinstallation",
14008 "choices":["changed-use"]
14012 from optparse import OptionParser
14013 parser = OptionParser()
14014 if parser.has_option("--help"):
14015 parser.remove_option("--help")
14017 for action_opt in actions:
14018 parser.add_option("--" + action_opt, action="store_true",
14019 dest=action_opt.replace("-", "_"), default=False)
14020 for myopt in options:
14021 parser.add_option(myopt, action="store_true",
14022 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14023 for shortopt, longopt in shortmapping.iteritems():
14024 parser.add_option("-" + shortopt, action="store_true",
14025 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14026 for myalias, myopt in longopt_aliases.iteritems():
14027 parser.add_option(myalias, action="store_true",
14028 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14030 for myopt, kwargs in argument_options.iteritems():
14031 parser.add_option(myopt,
14032 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14034 tmpcmdline = insert_optional_args(tmpcmdline)
14036 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14040 if myoptions.jobs == "True":
14044 jobs = int(myoptions.jobs)
14048 if jobs is not True and \
14052 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14053 (myoptions.jobs,), noiselevel=-1)
14055 myoptions.jobs = jobs
14057 if myoptions.load_average:
14059 load_average = float(myoptions.load_average)
14063 if load_average <= 0.0:
14064 load_average = None
14066 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14067 (myoptions.load_average,), noiselevel=-1)
14069 myoptions.load_average = load_average
14071 for myopt in options:
14072 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14074 myopts[myopt] = True
14076 for myopt in argument_options:
14077 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14081 for action_opt in actions:
14082 v = getattr(myoptions, action_opt.replace("-", "_"))
14085 multiple_actions(myaction, action_opt)
14087 myaction = action_opt
14091 return myaction, myopts, myfiles
14093 def validate_ebuild_environment(trees):
14094 for myroot in trees:
14095 settings = trees[myroot]["vartree"].settings
14096 settings.validate()
14098 def clear_caches(trees):
14099 for d in trees.itervalues():
14100 d["porttree"].dbapi.melt()
14101 d["porttree"].dbapi._aux_cache.clear()
14102 d["bintree"].dbapi._aux_cache.clear()
14103 d["bintree"].dbapi._clear_cache()
14104 d["vartree"].dbapi.linkmap._clear_cache()
14105 portage.dircache.clear()
14108 def load_emerge_config(trees=None):
14110 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14111 v = os.environ.get(envvar, None)
14112 if v and v.strip():
14114 trees = portage.create_trees(trees=trees, **kwargs)
14116 for root, root_trees in trees.iteritems():
14117 settings = root_trees["vartree"].settings
14118 setconfig = load_default_config(settings, root_trees)
14119 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14121 settings = trees["/"]["vartree"].settings
14123 for myroot in trees:
14125 settings = trees[myroot]["vartree"].settings
14128 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14129 mtimedb = portage.MtimeDB(mtimedbfile)
14131 return settings, trees, mtimedb
14133 def adjust_config(myopts, settings):
14134 """Make emerge specific adjustments to the config."""
14136 # To enhance usability, make some vars case insensitive by forcing them to
14138 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14139 if myvar in settings:
14140 settings[myvar] = settings[myvar].lower()
14141 settings.backup_changes(myvar)
14144 # Kill noauto as it will break merges otherwise.
14145 if "noauto" in settings.features:
14146 while "noauto" in settings.features:
14147 settings.features.remove("noauto")
14148 settings["FEATURES"] = " ".join(settings.features)
14149 settings.backup_changes("FEATURES")
14153 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14154 except ValueError, e:
14155 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14156 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14157 settings["CLEAN_DELAY"], noiselevel=-1)
14158 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14159 settings.backup_changes("CLEAN_DELAY")
14161 EMERGE_WARNING_DELAY = 10
14163 EMERGE_WARNING_DELAY = int(settings.get(
14164 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14165 except ValueError, e:
14166 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14167 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14168 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14169 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14170 settings.backup_changes("EMERGE_WARNING_DELAY")
14172 if "--quiet" in myopts:
14173 settings["PORTAGE_QUIET"]="1"
14174 settings.backup_changes("PORTAGE_QUIET")
14176 if "--verbose" in myopts:
14177 settings["PORTAGE_VERBOSE"] = "1"
14178 settings.backup_changes("PORTAGE_VERBOSE")
14180 # Set so that configs will be merged regardless of remembered status
14181 if ("--noconfmem" in myopts):
14182 settings["NOCONFMEM"]="1"
14183 settings.backup_changes("NOCONFMEM")
14185 # Set various debug markers... They should be merged somehow.
14188 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14189 if PORTAGE_DEBUG not in (0, 1):
14190 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14191 PORTAGE_DEBUG, noiselevel=-1)
14192 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14195 except ValueError, e:
14196 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14197 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14198 settings["PORTAGE_DEBUG"], noiselevel=-1)
14200 if "--debug" in myopts:
14202 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14203 settings.backup_changes("PORTAGE_DEBUG")
14205 if settings.get("NOCOLOR") not in ("yes","true"):
14206 portage.output.havecolor = 1
14208 """The explicit --color < y | n > option overrides the NOCOLOR environment
14209 variable and stdout auto-detection."""
14210 if "--color" in myopts:
14211 if "y" == myopts["--color"]:
14212 portage.output.havecolor = 1
14213 settings["NOCOLOR"] = "false"
14215 portage.output.havecolor = 0
14216 settings["NOCOLOR"] = "true"
14217 settings.backup_changes("NOCOLOR")
14218 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14219 portage.output.havecolor = 0
14220 settings["NOCOLOR"] = "true"
14221 settings.backup_changes("NOCOLOR")
14223 def apply_priorities(settings):
14227 def nice(settings):
14229 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14230 except (OSError, ValueError), e:
14231 out = portage.output.EOutput()
14232 out.eerror("Failed to change nice value to '%s'" % \
14233 settings["PORTAGE_NICENESS"])
14234 out.eerror("%s\n" % str(e))
14236 def ionice(settings):
14238 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14240 ionice_cmd = shlex.split(ionice_cmd)
14244 from portage.util import varexpand
14245 variables = {"PID" : str(os.getpid())}
14246 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14249 rval = portage.process.spawn(cmd, env=os.environ)
14250 except portage.exception.CommandNotFound:
14251 # The OS kernel probably doesn't support ionice,
14252 # so return silently.
14255 if rval != os.EX_OK:
14256 out = portage.output.EOutput()
14257 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14258 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14260 def display_missing_pkg_set(root_config, set_name):
14263 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14264 "The following sets exist:") % \
14265 colorize("INFORM", set_name))
14268 for s in sorted(root_config.sets):
14269 msg.append(" %s" % s)
14272 writemsg_level("".join("%s\n" % l for l in msg),
14273 level=logging.ERROR, noiselevel=-1)
14275 def expand_set_arguments(myfiles, myaction, root_config):
14277 setconfig = root_config.setconfig
14279 sets = setconfig.getSets()
14281 # In order to know exactly which atoms/sets should be added to the
14282 # world file, the depgraph performs set expansion later. It will get
14283 # confused about where the atoms came from if it's not allowed to
14284 # expand them itself.
14285 do_not_expand = (None, )
14288 if a in ("system", "world"):
14289 newargs.append(SETPREFIX+a)
14296 # separators for set arguments
14300 # WARNING: all operators must be of equal length
14302 DIFF_OPERATOR = "-@"
14303 UNION_OPERATOR = "+@"
14305 for i in range(0, len(myfiles)):
14306 if myfiles[i].startswith(SETPREFIX):
14309 x = myfiles[i][len(SETPREFIX):]
14312 start = x.find(ARG_START)
14313 end = x.find(ARG_END)
14314 if start > 0 and start < end:
14315 namepart = x[:start]
14316 argpart = x[start+1:end]
14318 # TODO: implement proper quoting
14319 args = argpart.split(",")
14323 k, v = a.split("=", 1)
14326 options[a] = "True"
14327 setconfig.update(namepart, options)
14328 newset += (x[:start-len(namepart)]+namepart)
14329 x = x[end+len(ARG_END):]
14333 myfiles[i] = SETPREFIX+newset
14335 sets = setconfig.getSets()
14337 # display errors that occured while loading the SetConfig instance
14338 for e in setconfig.errors:
14339 print colorize("BAD", "Error during set creation: %s" % e)
14341 # emerge relies on the existance of sets with names "world" and "system"
14342 required_sets = ("world", "system")
14345 for s in required_sets:
14347 missing_sets.append(s)
14349 if len(missing_sets) > 2:
14350 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
14351 missing_sets_str += ', and "%s"' % missing_sets[-1]
14352 elif len(missing_sets) == 2:
14353 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
14355 missing_sets_str = '"%s"' % missing_sets[-1]
14356 msg = ["emerge: incomplete set configuration, " + \
14357 "missing set(s): %s" % missing_sets_str]
14359 msg.append(" sets defined: %s" % ", ".join(sets))
14360 msg.append(" This usually means that '%s'" % \
14361 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
14362 msg.append(" is missing or corrupt.")
14364 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
14366 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14369 if a.startswith(SETPREFIX):
14370 # support simple set operations (intersection, difference and union)
14371 # on the commandline. Expressions are evaluated strictly left-to-right
14372 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14373 expression = a[len(SETPREFIX):]
14376 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14377 is_pos = expression.rfind(IS_OPERATOR)
14378 diff_pos = expression.rfind(DIFF_OPERATOR)
14379 union_pos = expression.rfind(UNION_OPERATOR)
14380 op_pos = max(is_pos, diff_pos, union_pos)
14381 s1 = expression[:op_pos]
14382 s2 = expression[op_pos+len(IS_OPERATOR):]
14383 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14385 display_missing_pkg_set(root_config, s2)
14387 expr_sets.insert(0, s2)
14388 expr_ops.insert(0, op)
14390 if not expression in sets:
14391 display_missing_pkg_set(root_config, expression)
14393 expr_sets.insert(0, expression)
14394 result = set(setconfig.getSetAtoms(expression))
14395 for i in range(0, len(expr_ops)):
14396 s2 = setconfig.getSetAtoms(expr_sets[i+1])
14397 if expr_ops[i] == IS_OPERATOR:
14398 result.intersection_update(s2)
14399 elif expr_ops[i] == DIFF_OPERATOR:
14400 result.difference_update(s2)
14401 elif expr_ops[i] == UNION_OPERATOR:
14404 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14405 newargs.extend(result)
14407 s = a[len(SETPREFIX):]
14409 display_missing_pkg_set(root_config, s)
14411 setconfig.active.append(s)
14413 set_atoms = setconfig.getSetAtoms(s)
14414 except portage.exception.PackageSetNotFound, e:
14415 writemsg_level(("emerge: the given set '%s' " + \
14416 "contains a non-existent set named '%s'.\n") % \
14417 (s, e), level=logging.ERROR, noiselevel=-1)
14419 if myaction in unmerge_actions and \
14420 not sets[s].supportsOperation("unmerge"):
14421 sys.stderr.write("emerge: the given set '%s' does " % s + \
14422 "not support unmerge operations\n")
14424 elif not set_atoms:
14425 print "emerge: '%s' is an empty set" % s
14426 elif myaction not in do_not_expand:
14427 newargs.extend(set_atoms)
14429 newargs.append(SETPREFIX+s)
14430 for e in sets[s].errors:
14434 return (newargs, retval)
14436 def repo_name_check(trees):
14437 missing_repo_names = set()
14438 for root, root_trees in trees.iteritems():
14439 if "porttree" in root_trees:
14440 portdb = root_trees["porttree"].dbapi
14441 missing_repo_names.update(portdb.porttrees)
14442 repos = portdb.getRepositories()
14444 missing_repo_names.discard(portdb.getRepositoryPath(r))
14445 if portdb.porttree_root in missing_repo_names and \
14446 not os.path.exists(os.path.join(
14447 portdb.porttree_root, "profiles")):
14448 # This is normal if $PORTDIR happens to be empty,
14449 # so don't warn about it.
14450 missing_repo_names.remove(portdb.porttree_root)
14452 if missing_repo_names:
14454 msg.append("WARNING: One or more repositories " + \
14455 "have missing repo_name entries:")
14457 for p in missing_repo_names:
14458 msg.append("\t%s/profiles/repo_name" % (p,))
14460 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14461 "should be a plain text file containing a unique " + \
14462 "name for the repository on the first line.", 70))
14463 writemsg_level("".join("%s\n" % l for l in msg),
14464 level=logging.WARNING, noiselevel=-1)
14466 return bool(missing_repo_names)
14468 def config_protect_check(trees):
14469 for root, root_trees in trees.iteritems():
14470 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14471 msg = "!!! CONFIG_PROTECT is empty"
14473 msg += " for '%s'" % root
14474 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14476 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14478 if "--quiet" in myopts:
14479 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14480 print "!!! one of the following fully-qualified ebuild names instead:\n"
14481 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14482 print " " + colorize("INFORM", cp)
14485 s = search(root_config, spinner, "--searchdesc" in myopts,
14486 "--quiet" not in myopts, "--usepkg" in myopts,
14487 "--usepkgonly" in myopts)
14488 null_cp = portage.dep_getkey(insert_category_into_atom(
14490 cat, atom_pn = portage.catsplit(null_cp)
14491 s.searchkey = atom_pn
14492 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14495 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14496 print "!!! one of the above fully-qualified ebuild names instead.\n"
14498 def profile_check(trees, myaction, myopts):
14499 if myaction in ("info", "sync"):
14501 elif "--version" in myopts or "--help" in myopts:
14503 for root, root_trees in trees.iteritems():
14504 if root_trees["root_config"].settings.profiles:
14506 # generate some profile related warning messages
14507 validate_ebuild_environment(trees)
14508 msg = "If you have just changed your profile configuration, you " + \
14509 "should revert back to the previous configuration. Due to " + \
14510 "your current profile being invalid, allowed actions are " + \
14511 "limited to --help, --info, --sync, and --version."
14512 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14513 level=logging.ERROR, noiselevel=-1)
14518 global portage # NFC why this is necessary now - genone
14519 portage._disable_legacy_globals()
14520 # Disable color until we're sure that it should be enabled (after
14521 # EMERGE_DEFAULT_OPTS has been parsed).
14522 portage.output.havecolor = 0
14523 # This first pass is just for options that need to be known as early as
14524 # possible, such as --config-root. They will be parsed again later,
14525 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14526 # the value of --config-root).
14527 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14528 if "--debug" in myopts:
14529 os.environ["PORTAGE_DEBUG"] = "1"
14530 if "--config-root" in myopts:
14531 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14533 # Portage needs to ensure a sane umask for the files it creates.
14535 settings, trees, mtimedb = load_emerge_config()
14536 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14537 rval = profile_check(trees, myaction, myopts)
14538 if rval != os.EX_OK:
14541 if portage._global_updates(trees, mtimedb["updates"]):
14543 # Reload the whole config from scratch.
14544 settings, trees, mtimedb = load_emerge_config(trees=trees)
14545 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14547 xterm_titles = "notitles" not in settings.features
14550 if "--ignore-default-opts" not in myopts:
14551 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14552 tmpcmdline.extend(sys.argv[1:])
14553 myaction, myopts, myfiles = parse_opts(tmpcmdline)
14555 if "--digest" in myopts:
14556 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14557 # Reload the whole config from scratch so that the portdbapi internal
14558 # config is updated with new FEATURES.
14559 settings, trees, mtimedb = load_emerge_config(trees=trees)
14560 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14562 for myroot in trees:
14563 mysettings = trees[myroot]["vartree"].settings
14564 mysettings.unlock()
14565 adjust_config(myopts, mysettings)
14566 mysettings["PORTAGE_COUNTER_HASH"] = \
14567 trees[myroot]["vartree"].dbapi._counter_hash()
14568 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14570 del myroot, mysettings
14572 apply_priorities(settings)
14574 spinner = stdout_spinner()
14575 if "candy" in settings.features:
14576 spinner.update = spinner.update_scroll
14578 if "--quiet" not in myopts:
14579 portage.deprecated_profile_check(settings=settings)
14580 repo_name_check(trees)
14581 config_protect_check(trees)
14583 eclasses_overridden = {}
14584 for mytrees in trees.itervalues():
14585 mydb = mytrees["porttree"].dbapi
14586 # Freeze the portdbapi for performance (memoize all xmatch results).
14588 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14591 if eclasses_overridden and \
14592 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14593 prefix = bad(" * ")
14594 if len(eclasses_overridden) == 1:
14595 writemsg(prefix + "Overlay eclass overrides " + \
14596 "eclass from PORTDIR:\n", noiselevel=-1)
14598 writemsg(prefix + "Overlay eclasses override " + \
14599 "eclasses from PORTDIR:\n", noiselevel=-1)
14600 writemsg(prefix + "\n", noiselevel=-1)
14601 for eclass_name in sorted(eclasses_overridden):
14602 writemsg(prefix + " '%s/%s.eclass'\n" % \
14603 (eclasses_overridden[eclass_name], eclass_name),
14605 writemsg(prefix + "\n", noiselevel=-1)
14606 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14607 "because it will trigger invalidation of cached ebuild metadata " + \
14608 "that is distributed with the portage tree. If you must " + \
14609 "override eclasses from PORTDIR then you are advised to add " + \
14610 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14611 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14612 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14613 "you would like to disable this warning."
14614 from textwrap import wrap
14615 for line in wrap(msg, 72):
14616 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14618 if "moo" in myfiles:
14621 Larry loves Gentoo (""" + platform.system() + """)
14623 _______________________
14624 < Have you mooed today? >
14625 -----------------------
14635 ext = os.path.splitext(x)[1]
14636 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14637 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14640 root_config = trees[settings["ROOT"]]["root_config"]
14641 if myaction == "list-sets":
14642 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14646 # only expand sets for actions taking package arguments
14647 oldargs = myfiles[:]
14648 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14649 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14650 if retval != os.EX_OK:
14653 # Need to handle empty sets specially, otherwise emerge will react
14654 # with the help message for empty argument lists
14655 if oldargs and not myfiles:
14656 print "emerge: no targets left after set expansion"
14659 if ("--tree" in myopts) and ("--columns" in myopts):
14660 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14663 if ("--quiet" in myopts):
14664 spinner.update = spinner.update_quiet
14665 portage.util.noiselimit = -1
14667 # Always create packages if FEATURES=buildpkg
14668 # Imply --buildpkg if --buildpkgonly
14669 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14670 if "--buildpkg" not in myopts:
14671 myopts["--buildpkg"] = True
14673 # Also allow -S to invoke search action (-sS)
14674 if ("--searchdesc" in myopts):
14675 if myaction and myaction != "search":
14676 myfiles.append(myaction)
14677 if "--search" not in myopts:
14678 myopts["--search"] = True
14679 myaction = "search"
14681 # Always try and fetch binary packages if FEATURES=getbinpkg
14682 if ("getbinpkg" in settings.features):
14683 myopts["--getbinpkg"] = True
14685 if "--buildpkgonly" in myopts:
14686 # --buildpkgonly will not merge anything, so
14687 # it cancels all binary package options.
14688 for opt in ("--getbinpkg", "--getbinpkgonly",
14689 "--usepkg", "--usepkgonly"):
14690 myopts.pop(opt, None)
14692 if "--fetch-all-uri" in myopts:
14693 myopts["--fetchonly"] = True
14695 if "--skipfirst" in myopts and "--resume" not in myopts:
14696 myopts["--resume"] = True
14698 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14699 myopts["--usepkgonly"] = True
14701 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14702 myopts["--getbinpkg"] = True
14704 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14705 myopts["--usepkg"] = True
14707 # Also allow -K to apply --usepkg/-k
14708 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14709 myopts["--usepkg"] = True
14711 # Allow -p to remove --ask
14712 if ("--pretend" in myopts) and ("--ask" in myopts):
14713 print ">>> --pretend disables --ask... removing --ask from options."
14714 del myopts["--ask"]
14716 # forbid --ask when not in a terminal
14717 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14718 if ("--ask" in myopts) and (not sys.stdin.isatty()):
14719 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14723 if settings.get("PORTAGE_DEBUG", "") == "1":
14724 spinner.update = spinner.update_quiet
14726 if "python-trace" in settings.features:
14727 import portage.debug
14728 portage.debug.set_trace(True)
14730 if not ("--quiet" in myopts):
14731 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14732 spinner.update = spinner.update_basic
14734 if "--version" in myopts:
14735 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14736 settings.profile_path, settings["CHOST"],
14737 trees[settings["ROOT"]]["vartree"].dbapi)
14739 elif "--help" in myopts:
14740 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14743 if "--debug" in myopts:
14744 print "myaction", myaction
14745 print "myopts", myopts
14747 if not myaction and not myfiles and "--resume" not in myopts:
14748 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14751 pretend = "--pretend" in myopts
14752 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14753 buildpkgonly = "--buildpkgonly" in myopts
14755 # check if root user is the current user for the actions where emerge needs this
14756 if portage.secpass < 2:
14757 # We've already allowed "--version" and "--help" above.
14758 if "--pretend" not in myopts and myaction not in ("search","info"):
14759 need_superuser = not \
14761 (buildpkgonly and secpass >= 1) or \
14762 myaction in ("metadata", "regen") or \
14763 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
14764 if portage.secpass < 1 or \
14767 access_desc = "superuser"
14769 access_desc = "portage group"
14770 # Always show portage_group_warning() when only portage group
14771 # access is required but the user is not in the portage group.
14772 from portage.data import portage_group_warning
14773 if "--ask" in myopts:
14774 myopts["--pretend"] = True
14775 del myopts["--ask"]
14776 print ("%s access is required... " + \
14777 "adding --pretend to options.\n") % access_desc
14778 if portage.secpass < 1 and not need_superuser:
14779 portage_group_warning()
14781 sys.stderr.write(("emerge: %s access is " + \
14782 "required.\n\n") % access_desc)
14783 if portage.secpass < 1 and not need_superuser:
14784 portage_group_warning()
14787 disable_emergelog = False
14788 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
14790 disable_emergelog = True
14792 if myaction in ("search", "info"):
14793 disable_emergelog = True
14794 if disable_emergelog:
14795 """ Disable emergelog for everything except build or unmerge
14796 operations. This helps minimize parallel emerge.log entries that can
14797 confuse log parsers. We especially want it disabled during
14798 parallel-fetch, which uses --resume --fetchonly."""
14800 def emergelog(*pargs, **kargs):
14803 if not "--pretend" in myopts:
14804 emergelog(xterm_titles, "Started emerge on: "+\
14805 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
14808 myelogstr=" ".join(myopts)
14810 myelogstr+=" "+myaction
14812 myelogstr += " " + " ".join(oldargs)
14813 emergelog(xterm_titles, " *** emerge " + myelogstr)
14816 def emergeexitsig(signum, frame):
14817 signal.signal(signal.SIGINT, signal.SIG_IGN)
14818 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14819 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
14820 sys.exit(100+signum)
14821 signal.signal(signal.SIGINT, emergeexitsig)
14822 signal.signal(signal.SIGTERM, emergeexitsig)
14825 """This gets out final log message in before we quit."""
14826 if "--pretend" not in myopts:
14827 emergelog(xterm_titles, " *** terminating.")
14828 if "notitles" not in settings.features:
14830 portage.atexit_register(emergeexit)
14832 if myaction in ("config", "metadata", "regen", "sync"):
14833 if "--pretend" in myopts:
14834 sys.stderr.write(("emerge: The '%s' action does " + \
14835 "not support '--pretend'.\n") % myaction)
14838 if "sync" == myaction:
14839 return action_sync(settings, trees, mtimedb, myopts, myaction)
14840 elif "metadata" == myaction:
14841 action_metadata(settings, portdb, myopts)
14842 elif myaction=="regen":
14843 validate_ebuild_environment(trees)
14844 action_regen(settings, portdb, myopts.get("--jobs"),
14845 myopts.get("--load-average"))
14847 elif "config"==myaction:
14848 validate_ebuild_environment(trees)
14849 action_config(settings, trees, myopts, myfiles)
14852 elif "search"==myaction:
14853 validate_ebuild_environment(trees)
14854 action_search(trees[settings["ROOT"]]["root_config"],
14855 myopts, myfiles, spinner)
14856 elif myaction in ("clean", "unmerge") or \
14857 (myaction == "prune" and "--nodeps" in myopts):
14858 validate_ebuild_environment(trees)
14860 # Ensure atoms are valid before calling unmerge().
14861 # For backward compat, leading '=' is not required.
14863 if is_valid_package_atom(x) or \
14864 is_valid_package_atom("=" + x):
14867 msg.append("'%s' is not a valid package atom." % (x,))
14868 msg.append("Please check ebuild(5) for full details.")
14869 writemsg_level("".join("!!! %s\n" % line for line in msg),
14870 level=logging.ERROR, noiselevel=-1)
14873 # When given a list of atoms, unmerge
14874 # them in the order given.
14875 ordered = myaction == "unmerge"
14876 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14877 mtimedb["ldpath"], ordered=ordered):
14878 if not (buildpkgonly or fetchonly or pretend):
14879 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14881 elif myaction in ("depclean", "info", "prune"):
14883 # Ensure atoms are valid before calling unmerge().
14884 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14887 if is_valid_package_atom(x):
14889 valid_atoms.append(
14890 portage.dep_expand(x, mydb=vardb, settings=settings))
14891 except portage.exception.AmbiguousPackageName, e:
14892 msg = "The short ebuild name \"" + x + \
14893 "\" is ambiguous. Please specify " + \
14894 "one of the following " + \
14895 "fully-qualified ebuild names instead:"
14896 for line in textwrap.wrap(msg, 70):
14897 writemsg_level("!!! %s\n" % (line,),
14898 level=logging.ERROR, noiselevel=-1)
14900 writemsg_level(" %s\n" % colorize("INFORM", i),
14901 level=logging.ERROR, noiselevel=-1)
14902 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14906 msg.append("'%s' is not a valid package atom." % (x,))
14907 msg.append("Please check ebuild(5) for full details.")
14908 writemsg_level("".join("!!! %s\n" % line for line in msg),
14909 level=logging.ERROR, noiselevel=-1)
14912 if myaction == "info":
14913 return action_info(settings, trees, myopts, valid_atoms)
14915 validate_ebuild_environment(trees)
14916 action_depclean(settings, trees, mtimedb["ldpath"],
14917 myopts, myaction, valid_atoms, spinner)
14918 if not (buildpkgonly or fetchonly or pretend):
14919 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14920 # "update", "system", or just process files:
14922 validate_ebuild_environment(trees)
14923 if "--pretend" not in myopts:
14924 display_news_notification(root_config, myopts)
14925 retval = action_build(settings, trees, mtimedb,
14926 myopts, myaction, myfiles, spinner)
14927 root_config = trees[settings["ROOT"]]["root_config"]
14928 post_emerge(root_config, myopts, mtimedb, retval)