2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time, types
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
59 from UserDict import DictMixin
62 import cPickle as pickle
67 import cStringIO as StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--searchdesc", "--selective",
214 "--usepkg", "--usepkgonly",
215 "--verbose", "--version"
221 "b":"--buildpkg", "B":"--buildpkgonly",
222 "c":"--clean", "C":"--unmerge",
223 "d":"--debug", "D":"--deep",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "k":"--usepkg", "K":"--usepkgonly",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps", "O":"--nodeps",
232 "p":"--pretend", "P":"--prune",
234 "s":"--search", "S":"--searchdesc",
237 "v":"--verbose", "V":"--version"
240 def emergelog(xterm_titles, mystr, short_msg=None):
241 if xterm_titles and short_msg:
242 if "HOSTNAME" in os.environ:
243 short_msg = os.environ["HOSTNAME"]+": "+short_msg
244 xtermTitle(short_msg)
246 file_path = "/var/log/emerge.log"
247 mylogfile = open(file_path, "a")
248 portage.util.apply_secpass_permissions(file_path,
249 uid=portage.portage_uid, gid=portage.portage_gid,
253 mylock = portage.locks.lockfile(mylogfile)
254 # seek because we may have gotten held up by the lock.
255 # if so, we may not be positioned at the end of the file.
257 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
261 portage.locks.unlockfile(mylock)
263 except (IOError,OSError,portage.exception.PortageException), e:
265 print >> sys.stderr, "emergelog():",e
267 def countdown(secs=5, doing="Starting"):
269 print ">>> Waiting",secs,"seconds before starting..."
270 print ">>> (Control-C to abort)...\n"+doing+" in: ",
274 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281 if type(mysize) not in [types.IntType,types.LongType]:
283 if 0 != mysize % 1024:
284 # Always round up to the next kB so that it doesn't show 0 kB when
285 # some small file still needs to be fetched.
286 mysize += 1024 - mysize % 1024
287 mystr=str(mysize/1024)
291 mystr=mystr[:mycount]+","+mystr[mycount:]
295 def getgccversion(chost):
298 return: the current in-use gcc version
301 gcc_ver_command = 'gcc -dumpversion'
302 gcc_ver_prefix = 'gcc-'
304 gcc_not_found_error = red(
305 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306 "!!! to update the environment of this terminal and possibly\n" +
307 "!!! other terminals also.\n"
310 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314 mystatus, myoutput = commands.getstatusoutput(
315 chost + "-" + gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320 if mystatus == os.EX_OK:
321 return gcc_ver_prefix + myoutput
323 portage.writemsg(gcc_not_found_error, noiselevel=-1)
324 return "[unavailable]"
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327 profilever = "unavailable"
329 realpath = os.path.realpath(profile)
330 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
331 if realpath.startswith(basepath):
332 profilever = realpath[1 + len(basepath):]
335 profilever = "!" + os.readlink(profile)
338 del realpath, basepath
341 libclist = vardb.match("virtual/libc")
342 libclist += vardb.match("virtual/glibc")
343 libclist = portage.util.unique_array(libclist)
345 xs=portage.catpkgsplit(x)
347 libcver+=","+"-".join(xs[1:])
349 libcver="-".join(xs[1:])
351 libcver="unavailable"
353 gccver = getgccversion(chost)
354 unameout=platform.release()+" "+platform.machine()
356 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358 def create_depgraph_params(myopts, myaction):
359 #configure emerge engine parameters
361 # self: include _this_ package regardless of if it is merged.
362 # selective: exclude the package if it is merged
363 # recurse: go into the dependencies
364 # deep: go into the dependencies of already merged packages
365 # empty: pretend nothing is merged
366 # complete: completely account for all known dependencies
367 # remove: build graph for use in removing packages
368 myparams = set(["recurse"])
370 if myaction == "remove":
371 myparams.add("remove")
372 myparams.add("complete")
375 if "--update" in myopts or \
376 "--newuse" in myopts or \
377 "--reinstall" in myopts or \
378 "--noreplace" in myopts:
379 myparams.add("selective")
380 if "--emptytree" in myopts:
381 myparams.add("empty")
382 myparams.discard("selective")
383 if "--nodeps" in myopts:
384 myparams.discard("recurse")
385 if "--deep" in myopts:
387 if "--complete-graph" in myopts:
388 myparams.add("complete")
391 # search functionality
392 class search(object):
403 def __init__(self, root_config, spinner, searchdesc,
404 verbose, usepkg, usepkgonly):
405 """Searches the available and installed packages for the supplied search key.
406 The list of available and installed packages is created at object instantiation.
407 This makes successive searches faster."""
408 self.settings = root_config.settings
409 self.vartree = root_config.trees["vartree"]
410 self.spinner = spinner
411 self.verbose = verbose
412 self.searchdesc = searchdesc
413 self.root_config = root_config
414 self.setconfig = root_config.setconfig
415 self.matches = {"pkg" : []}
420 self.portdb = fake_portdb
421 for attrib in ("aux_get", "cp_all",
422 "xmatch", "findname", "getFetchMap"):
423 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
427 portdb = root_config.trees["porttree"].dbapi
428 bindb = root_config.trees["bintree"].dbapi
429 vardb = root_config.trees["vartree"].dbapi
431 if not usepkgonly and portdb._have_root_eclass_dir:
432 self._dbs.append(portdb)
434 if (usepkg or usepkgonly) and bindb.cp_all():
435 self._dbs.append(bindb)
437 self._dbs.append(vardb)
438 self._portdb = portdb
443 cp_all.update(db.cp_all())
444 return list(sorted(cp_all))
446 def _aux_get(self, *args, **kwargs):
449 return db.aux_get(*args, **kwargs)
454 def _findname(self, *args, **kwargs):
456 if db is not self._portdb:
457 # We don't want findname to return anything
458 # unless it's an ebuild in a portage tree.
459 # Otherwise, it's already built and we don't
462 func = getattr(db, "findname", None)
464 value = func(*args, **kwargs)
469 def _getFetchMap(self, *args, **kwargs):
471 func = getattr(db, "getFetchMap", None)
473 value = func(*args, **kwargs)
478 def _visible(self, db, cpv, metadata):
479 installed = db is self.vartree.dbapi
480 built = installed or db is not self._portdb
483 pkg_type = "installed"
486 return visible(self.settings,
487 Package(type_name=pkg_type, root_config=self.root_config,
488 cpv=cpv, built=built, installed=installed, metadata=metadata))
490 def _xmatch(self, level, atom):
492 This method does not expand old-style virtuals because it
493 is restricted to returning matches for a single ${CATEGORY}/${PN}
494 and old-style virual matches unreliable for that when querying
495 multiple package databases. If necessary, old-style virtuals
496 can be performed on atoms prior to calling this method.
498 cp = portage.dep_getkey(atom)
499 if level == "match-all":
502 if hasattr(db, "xmatch"):
503 matches.update(db.xmatch(level, atom))
505 matches.update(db.match(atom))
506 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507 db._cpv_sort_ascending(result)
508 elif level == "match-visible":
511 if hasattr(db, "xmatch"):
512 matches.update(db.xmatch(level, atom))
514 db_keys = list(db._aux_cache_keys)
515 for cpv in db.match(atom):
516 metadata = izip(db_keys,
517 db.aux_get(cpv, db_keys))
518 if not self._visible(db, cpv, metadata):
521 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522 db._cpv_sort_ascending(result)
523 elif level == "bestmatch-visible":
526 if hasattr(db, "xmatch"):
527 cpv = db.xmatch("bestmatch-visible", atom)
528 if not cpv or portage.cpv_getkey(cpv) != cp:
530 if not result or cpv == portage.best([cpv, result]):
533 db_keys = Package.metadata_keys
534 # break out of this loop with highest visible
535 # match, checked in descending order
536 for cpv in reversed(db.match(atom)):
537 if portage.cpv_getkey(cpv) != cp:
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
543 if not result or cpv == portage.best([cpv, result]):
547 raise NotImplementedError(level)
550 def execute(self,searchkey):
551 """Performs the search for the supplied search key"""
553 self.searchkey=searchkey
554 self.packagematches = []
557 self.matches = {"pkg":[], "desc":[], "set":[]}
560 self.matches = {"pkg":[], "set":[]}
561 print "Searching... ",
564 if self.searchkey.startswith('%'):
566 self.searchkey = self.searchkey[1:]
567 if self.searchkey.startswith('@'):
569 self.searchkey = self.searchkey[1:]
571 self.searchre=re.compile(self.searchkey,re.I)
573 self.searchre=re.compile(re.escape(self.searchkey), re.I)
574 for package in self.portdb.cp_all():
575 self.spinner.update()
578 match_string = package[:]
580 match_string = package.split("/")[-1]
583 if self.searchre.search(match_string):
584 if not self.portdb.xmatch("match-visible", package):
586 self.matches["pkg"].append([package,masked])
587 elif self.searchdesc: # DESCRIPTION searching
588 full_package = self.portdb.xmatch("bestmatch-visible", package)
590 #no match found; we don't want to query description
591 full_package = portage.best(
592 self.portdb.xmatch("match-all", package))
598 full_desc = self.portdb.aux_get(
599 full_package, ["DESCRIPTION"])[0]
601 print "emerge: search: aux_get() failed, skipping"
603 if self.searchre.search(full_desc):
604 self.matches["desc"].append([full_package,masked])
606 self.sdict = self.setconfig.getSets()
607 for setname in self.sdict:
608 self.spinner.update()
610 match_string = setname
612 match_string = setname.split("/")[-1]
614 if self.searchre.search(match_string):
615 self.matches["set"].append([setname, False])
616 elif self.searchdesc:
617 if self.searchre.search(
618 self.sdict[setname].getMetadata("DESCRIPTION")):
619 self.matches["set"].append([setname, False])
622 for mtype in self.matches:
623 self.matches[mtype].sort()
624 self.mlen += len(self.matches[mtype])
627 if not self.portdb.xmatch("match-all", cp):
630 if not self.portdb.xmatch("bestmatch-visible", cp):
632 self.matches["pkg"].append([cp, masked])
636 """Outputs the results of the search."""
637 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
638 print "[ Applications found : "+white(str(self.mlen))+" ]"
640 vardb = self.vartree.dbapi
641 for mtype in self.matches:
642 for match,masked in self.matches[mtype]:
646 full_package = self.portdb.xmatch(
647 "bestmatch-visible", match)
649 #no match found; we don't want to query description
651 full_package = portage.best(
652 self.portdb.xmatch("match-all",match))
653 elif mtype == "desc":
655 match = portage.cpv_getkey(match)
657 print green("*")+" "+white(match)
658 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
662 desc, homepage, license = self.portdb.aux_get(
663 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665 print "emerge: search: aux_get() failed, skipping"
668 print green("*")+" "+white(match)+" "+red("[ Masked ]")
670 print green("*")+" "+white(match)
671 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
675 mycat = match.split("/")[0]
676 mypkg = match.split("/")[1]
677 mycpv = match + "-" + myversion
678 myebuild = self.portdb.findname(mycpv)
680 pkgdir = os.path.dirname(myebuild)
681 from portage import manifest
682 mf = manifest.Manifest(
683 pkgdir, self.settings["DISTDIR"])
685 uri_map = self.portdb.getFetchMap(mycpv)
686 except portage.exception.InvalidDependString, e:
687 file_size_str = "Unknown (%s)" % (e,)
691 mysum[0] = mf.getDistfilesSize(uri_map)
693 file_size_str = "Unknown (missing " + \
694 "digest for %s)" % (e,)
699 if db is not vardb and \
700 db.cpv_exists(mycpv):
702 if not myebuild and hasattr(db, "bintree"):
703 myebuild = db.bintree.getname(mycpv)
705 mysum[0] = os.stat(myebuild).st_size
710 if myebuild and file_size_str is None:
711 mystr = str(mysum[0] / 1024)
715 mystr = mystr[:mycount] + "," + mystr[mycount:]
716 file_size_str = mystr + " kB"
720 print " ", darkgreen("Latest version available:"),myversion
721 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
724 (darkgreen("Size of files:"), file_size_str)
725 print " ", darkgreen("Homepage:")+" ",homepage
726 print " ", darkgreen("Description:")+" ",desc
727 print " ", darkgreen("License:")+" ",license
732 def getInstallationStatus(self,package):
733 installed_package = self.vartree.dep_bestmatch(package)
735 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737 result = darkgreen("Latest version installed:")+" "+version
739 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
742 def getVersion(self,full_package,detail):
743 if len(full_package) > 1:
744 package_parts = portage.catpkgsplit(full_package)
745 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746 result = package_parts[2]+ "-" + package_parts[3]
748 result = package_parts[2]
753 class RootConfig(object):
754 """This is used internally by depgraph to track information about a
758 "ebuild" : "porttree",
759 "binary" : "bintree",
760 "installed" : "vartree"
764 for k, v in pkg_tree_map.iteritems():
767 def __init__(self, settings, trees, setconfig):
769 self.settings = settings
770 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771 self.root = self.settings["ROOT"]
772 self.setconfig = setconfig
773 self.sets = self.setconfig.getSets()
774 self.visible_pkgs = PackageVirtualDbapi(self.settings)
776 def create_world_atom(pkg, args_set, root_config):
777 """Create a new atom for the world file if one does not exist. If the
778 argument atom is precise enough to identify a specific slot then a slot
779 atom will be returned. Atoms that are in the system set may also be stored
780 in world since system atoms can only match one slot while world atoms can
781 be greedy with respect to slots. Unslotted system packages will not be
784 arg_atom = args_set.findAtomForPackage(pkg)
787 cp = portage.dep_getkey(arg_atom)
789 sets = root_config.sets
790 portdb = root_config.trees["porttree"].dbapi
791 vardb = root_config.trees["vartree"].dbapi
792 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793 for cpv in portdb.match(cp))
794 slotted = len(available_slots) > 1 or \
795 (len(available_slots) == 1 and "0" not in available_slots)
797 # check the vdb in case this is multislot
798 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799 for cpv in vardb.match(cp))
800 slotted = len(available_slots) > 1 or \
801 (len(available_slots) == 1 and "0" not in available_slots)
802 if slotted and arg_atom != cp:
803 # If the user gave a specific atom, store it as a
804 # slot atom in the world file.
805 slot_atom = pkg.slot_atom
807 # For USE=multislot, there are a couple of cases to
810 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811 # unknown value, so just record an unslotted atom.
813 # 2) SLOT comes from an installed package and there is no
814 # matching SLOT in the portage tree.
816 # Make sure that the slot atom is available in either the
817 # portdb or the vardb, since otherwise the user certainly
818 # doesn't want the SLOT atom recorded in the world file
819 # (case 1 above). If it's only available in the vardb,
820 # the user may be trying to prevent a USE=multislot
821 # package from being removed by --depclean (case 2 above).
824 if not portdb.match(slot_atom):
825 # SLOT seems to come from an installed multislot package
827 # If there is no installed package matching the SLOT atom,
828 # it probably changed SLOT spontaneously due to USE=multislot,
829 # so just record an unslotted atom.
830 if vardb.match(slot_atom):
831 # Now verify that the argument is precise
832 # enough to identify a specific slot.
833 matches = mydb.match(arg_atom)
834 matched_slots = set()
836 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837 if len(matched_slots) == 1:
838 new_world_atom = slot_atom
840 if new_world_atom == sets["world"].findAtomForPackage(pkg):
841 # Both atoms would be identical, so there's nothing to add.
844 # Unlike world atoms, system atoms are not greedy for slots, so they
845 # can't be safely excluded from world if they are slotted.
846 system_atom = sets["system"].findAtomForPackage(pkg)
848 if not portage.dep_getkey(system_atom).startswith("virtual/"):
850 # System virtuals aren't safe to exclude from world since they can
851 # match multiple old-style virtuals but only one of them will be
852 # pulled in by update or depclean.
853 providers = portdb.mysettings.getvirtuals().get(
854 portage.dep_getkey(system_atom))
855 if providers and len(providers) == 1 and providers[0] == cp:
857 return new_world_atom
859 def filter_iuse_defaults(iuse):
861 if flag.startswith("+") or flag.startswith("-"):
866 class SlotObject(object):
867 __slots__ = ("__weakref__",)
869 def __init__(self, **kwargs):
870 classes = [self.__class__]
875 classes.extend(c.__bases__)
876 slots = getattr(c, "__slots__", None)
880 myvalue = kwargs.get(myattr, None)
881 setattr(self, myattr, myvalue)
885 Create a new instance and copy all attributes
886 defined from __slots__ (including those from
889 obj = self.__class__()
891 classes = [self.__class__]
896 classes.extend(c.__bases__)
897 slots = getattr(c, "__slots__", None)
901 setattr(obj, myattr, getattr(self, myattr))
905 class AbstractDepPriority(SlotObject):
906 __slots__ = ("buildtime", "runtime", "runtime_post")
908 def __lt__(self, other):
909 return self.__int__() < other
911 def __le__(self, other):
912 return self.__int__() <= other
914 def __eq__(self, other):
915 return self.__int__() == other
917 def __ne__(self, other):
918 return self.__int__() != other
920 def __gt__(self, other):
921 return self.__int__() > other
923 def __ge__(self, other):
924 return self.__int__() >= other
928 return copy.copy(self)
930 class DepPriority(AbstractDepPriority):
932 This class generates an integer priority level based of various
933 attributes of the dependency relationship. Attributes can be assigned
934 at any time and the new integer value will be generated on calls to the
935 __int__() method. Rich comparison operators are supported.
937 The boolean attributes that affect the integer value are "satisfied",
938 "buildtime", "runtime", and "system". Various combinations of
939 attributes lead to the following priority levels:
941 Combination of properties Priority Category
943 not satisfied and buildtime 0 HARD
944 not satisfied and runtime -1 MEDIUM
945 not satisfied and runtime_post -2 MEDIUM_SOFT
946 satisfied and buildtime and rebuild -3 SOFT
947 satisfied and buildtime -4 SOFT
948 satisfied and runtime -5 SOFT
949 satisfied and runtime_post -6 SOFT
950 (none of the above) -6 SOFT
952 Several integer constants are defined for categorization of priority
955 MEDIUM The upper boundary for medium dependencies.
956 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
957 SOFT The upper boundary for soft dependencies.
958 MIN The lower boundary for soft dependencies.
960 __slots__ = ("satisfied", "rebuild")
967 if not self.satisfied:
972 if self.runtime_post:
980 if self.runtime_post:
985 myvalue = self.__int__()
986 if myvalue > self.MEDIUM:
988 if myvalue > self.MEDIUM_SOFT:
990 if myvalue > self.SOFT:
994 class BlockerDepPriority(DepPriority):
999 BlockerDepPriority.instance = BlockerDepPriority()
1001 class UnmergeDepPriority(AbstractDepPriority):
1002 __slots__ = ("satisfied",)
1004 Combination of properties Priority Category
1007 runtime_post -1 HARD
1009 (none of the above) -2 SOFT
1019 if self.runtime_post:
1026 myvalue = self.__int__()
1027 if myvalue > self.SOFT:
1031 class FakeVartree(portage.vartree):
1032 """This is implements an in-memory copy of a vartree instance that provides
1033 all the interfaces required for use by the depgraph. The vardb is locked
1034 during the constructor call just long enough to read a copy of the
1035 installed package information. This allows the depgraph to do it's
1036 dependency calculations without holding a lock on the vardb. It also
1037 allows things like vardb global updates to be done in memory so that the
1038 user doesn't necessarily need write access to the vardb in cases where
1039 global updates are necessary (updates are performed when necessary if there
1040 is not a matching ebuild in the tree)."""
1041 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1042 self._root_config = root_config
1043 if pkg_cache is None:
1045 real_vartree = root_config.trees["vartree"]
1046 portdb = root_config.trees["porttree"].dbapi
1047 self.root = real_vartree.root
1048 self.settings = real_vartree.settings
1049 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1050 if "_mtime_" not in mykeys:
1051 mykeys.append("_mtime_")
1052 self._db_keys = mykeys
1053 self._pkg_cache = pkg_cache
1054 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1055 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1057 # At least the parent needs to exist for the lock file.
1058 portage.util.ensure_dirs(vdb_path)
1059 except portage.exception.PortageException:
1063 if acquire_lock and os.access(vdb_path, os.W_OK):
1064 vdb_lock = portage.locks.lockdir(vdb_path)
1065 real_dbapi = real_vartree.dbapi
1067 for cpv in real_dbapi.cpv_all():
1068 cache_key = ("installed", self.root, cpv, "nomerge")
1069 pkg = self._pkg_cache.get(cache_key)
1071 metadata = pkg.metadata
1073 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1074 myslot = metadata["SLOT"]
1075 mycp = portage.dep_getkey(cpv)
1076 myslot_atom = "%s:%s" % (mycp, myslot)
1078 mycounter = long(metadata["COUNTER"])
1081 metadata["COUNTER"] = str(mycounter)
1082 other_counter = slot_counters.get(myslot_atom, None)
1083 if other_counter is not None:
1084 if other_counter > mycounter:
1086 slot_counters[myslot_atom] = mycounter
1088 pkg = Package(built=True, cpv=cpv,
1089 installed=True, metadata=metadata,
1090 root_config=root_config, type_name="installed")
1091 self._pkg_cache[pkg] = pkg
1092 self.dbapi.cpv_inject(pkg)
1093 real_dbapi.flush_cache()
1096 portage.locks.unlockdir(vdb_lock)
1097 # Populate the old-style virtuals using the cached values.
1098 if not self.settings.treeVirtuals:
1099 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1100 portage.getCPFromCPV, self.get_all_provides())
1102 # Intialize variables needed for lazy cache pulls of the live ebuild
1103 # metadata. This ensures that the vardb lock is released ASAP, without
1104 # being delayed in case cache generation is triggered.
1105 self._aux_get = self.dbapi.aux_get
1106 self.dbapi.aux_get = self._aux_get_wrapper
1107 self._match = self.dbapi.match
1108 self.dbapi.match = self._match_wrapper
1109 self._aux_get_history = set()
1110 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1111 self._portdb = portdb
1112 self._global_updates = None
1114 def _match_wrapper(self, cpv, use_cache=1):
1116 Make sure the metadata in Package instances gets updated for any
1117 cpv that is returned from a match() call, since the metadata can
1118 be accessed directly from the Package instance instead of via
1121 matches = self._match(cpv, use_cache=use_cache)
1123 if cpv in self._aux_get_history:
1125 self._aux_get_wrapper(cpv, [])
1128 def _aux_get_wrapper(self, pkg, wants):
1129 if pkg in self._aux_get_history:
1130 return self._aux_get(pkg, wants)
1131 self._aux_get_history.add(pkg)
1133 # Use the live ebuild metadata if possible.
1134 live_metadata = dict(izip(self._portdb_keys,
1135 self._portdb.aux_get(pkg, self._portdb_keys)))
1136 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1138 self.dbapi.aux_update(pkg, live_metadata)
1139 except (KeyError, portage.exception.PortageException):
1140 if self._global_updates is None:
1141 self._global_updates = \
1142 grab_global_updates(self._portdb.porttree_root)
1143 perform_global_updates(
1144 pkg, self.dbapi, self._global_updates)
1145 return self._aux_get(pkg, wants)
1147 def sync(self, acquire_lock=1):
1149 Call this method to synchronize state with the real vardb
1150 after one or more packages may have been installed or
1153 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1155 # At least the parent needs to exist for the lock file.
1156 portage.util.ensure_dirs(vdb_path)
1157 except portage.exception.PortageException:
1161 if acquire_lock and os.access(vdb_path, os.W_OK):
1162 vdb_lock = portage.locks.lockdir(vdb_path)
1166 portage.locks.unlockdir(vdb_lock)
1170 real_vardb = self._root_config.trees["vartree"].dbapi
1171 current_cpv_set = frozenset(real_vardb.cpv_all())
1172 pkg_vardb = self.dbapi
1173 aux_get_history = self._aux_get_history
1175 # Remove any packages that have been uninstalled.
1176 for pkg in list(pkg_vardb):
1177 if pkg.cpv not in current_cpv_set:
1178 pkg_vardb.cpv_remove(pkg)
1179 aux_get_history.discard(pkg.cpv)
1181 # Validate counters and timestamps.
1184 validation_keys = ["COUNTER", "_mtime_"]
1185 for cpv in current_cpv_set:
1187 pkg_hash_key = ("installed", root, cpv, "nomerge")
1188 pkg = pkg_vardb.get(pkg_hash_key)
1190 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1192 counter = long(counter)
1196 if counter != pkg.counter or \
1198 pkg_vardb.cpv_remove(pkg)
1199 aux_get_history.discard(pkg.cpv)
1203 pkg = self._pkg(cpv)
1205 other_counter = slot_counters.get(pkg.slot_atom)
1206 if other_counter is not None:
1207 if other_counter > pkg.counter:
1210 slot_counters[pkg.slot_atom] = pkg.counter
1211 pkg_vardb.cpv_inject(pkg)
1213 real_vardb.flush_cache()
1215 def _pkg(self, cpv):
1216 root_config = self._root_config
1217 real_vardb = root_config.trees["vartree"].dbapi
1218 pkg = Package(cpv=cpv, installed=True,
1219 metadata=izip(self._db_keys,
1220 real_vardb.aux_get(cpv, self._db_keys)),
1221 root_config=root_config,
1222 type_name="installed")
1225 mycounter = long(pkg.metadata["COUNTER"])
1228 pkg.metadata["COUNTER"] = str(mycounter)
1232 def grab_global_updates(portdir):
1233 from portage.update import grab_updates, parse_updates
1234 updpath = os.path.join(portdir, "profiles", "updates")
1236 rawupdates = grab_updates(updpath)
1237 except portage.exception.DirectoryNotFound:
1240 for mykey, mystat, mycontent in rawupdates:
1241 commands, errors = parse_updates(mycontent)
1242 upd_commands.extend(commands)
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246 from portage.update import update_dbentries
1247 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249 updates = update_dbentries(mycommands, aux_dict)
1251 mydb.aux_update(mycpv, updates)
1253 def visible(pkgsettings, pkg):
1255 Check if a package is visible. This can raise an InvalidDependString
1256 exception if LICENSE is invalid.
1257 TODO: optionally generate a list of masking reasons
1259 @returns: True if the package is visible, False otherwise.
1261 if not pkg.metadata["SLOT"]:
1263 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264 if not pkgsettings._accept_chost(pkg):
1266 eapi = pkg.metadata["EAPI"]
1267 if not portage.eapi_is_supported(eapi):
1269 if not pkg.installed:
1270 if portage._eapi_is_deprecated(eapi):
1272 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1274 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1276 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1279 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1281 except portage.exception.InvalidDependString:
1285 def get_masking_status(pkg, pkgsettings, root_config):
1287 mreasons = portage.getmaskingstatus(
1288 pkg, settings=pkgsettings,
1289 portdb=root_config.trees["porttree"].dbapi)
1291 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292 if not pkgsettings._accept_chost(pkg):
1293 mreasons.append("CHOST: %s" % \
1294 pkg.metadata["CHOST"])
1296 if not pkg.metadata["SLOT"]:
1297 mreasons.append("invalid: SLOT is undefined")
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302 db, pkg_type, built, installed, db_keys):
1305 metadata = dict(izip(db_keys,
1306 db.aux_get(cpv, db_keys)))
1309 if metadata and not built:
1310 pkgsettings.setcpv(cpv, mydb=metadata)
1311 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312 if metadata is None:
1313 mreasons = ["corruption"]
1315 pkg = Package(type_name=pkg_type, root_config=root_config,
1316 cpv=cpv, built=built, installed=installed, metadata=metadata)
1317 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318 return metadata, mreasons
1320 def show_masked_packages(masked_packages):
1321 shown_licenses = set()
1322 shown_comments = set()
1323 # Maybe there is both an ebuild and a binary. Only
1324 # show one of them to avoid redundant appearance.
1326 have_eapi_mask = False
1327 for (root_config, pkgsettings, cpv,
1328 metadata, mreasons) in masked_packages:
1329 if cpv in shown_cpvs:
1332 comment, filename = None, None
1333 if "package.mask" in mreasons:
1334 comment, filename = \
1335 portage.getmaskingreason(
1336 cpv, metadata=metadata,
1337 settings=pkgsettings,
1338 portdb=root_config.trees["porttree"].dbapi,
1339 return_location=True)
1340 missing_licenses = []
1342 if not portage.eapi_is_supported(metadata["EAPI"]):
1343 have_eapi_mask = True
1345 missing_licenses = \
1346 pkgsettings._getMissingLicenses(
1348 except portage.exception.InvalidDependString:
1349 # This will have already been reported
1350 # above via mreasons.
1353 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354 if comment and comment not in shown_comments:
1357 shown_comments.add(comment)
1358 portdb = root_config.trees["porttree"].dbapi
1359 for l in missing_licenses:
1360 l_path = portdb.findLicensePath(l)
1361 if l in shown_licenses:
1363 msg = ("A copy of the '%s' license" + \
1364 " is located at '%s'.") % (l, l_path)
1367 shown_licenses.add(l)
1368 return have_eapi_mask
1370 class Task(SlotObject):
1371 __slots__ = ("_hash_key", "_hash_value")
1373 def _get_hash_key(self):
1374 hash_key = getattr(self, "_hash_key", None)
1375 if hash_key is None:
1376 raise NotImplementedError(self)
1379 def __eq__(self, other):
1380 return self._get_hash_key() == other
1382 def __ne__(self, other):
1383 return self._get_hash_key() != other
1386 hash_value = getattr(self, "_hash_value", None)
1387 if hash_value is None:
1388 self._hash_value = hash(self._get_hash_key())
1389 return self._hash_value
1392 return len(self._get_hash_key())
1394 def __getitem__(self, key):
1395 return self._get_hash_key()[key]
1398 return iter(self._get_hash_key())
1400 def __contains__(self, key):
1401 return key in self._get_hash_key()
1404 return str(self._get_hash_key())
1406 class Blocker(Task):
1408 __hash__ = Task.__hash__
1409 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1411 def __init__(self, **kwargs):
1412 Task.__init__(self, **kwargs)
1413 self.cp = portage.dep_getkey(self.atom)
1415 def _get_hash_key(self):
1416 hash_key = getattr(self, "_hash_key", None)
1417 if hash_key is None:
1419 ("blocks", self.root, self.atom, self.eapi)
1420 return self._hash_key
1422 class Package(Task):
1424 __hash__ = Task.__hash__
1425 __slots__ = ("built", "cpv", "depth",
1426 "installed", "metadata", "onlydeps", "operation",
1427 "root_config", "type_name",
1428 "category", "counter", "cp", "cpv_split",
1429 "inherited", "iuse", "mtime",
1430 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1433 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434 "INHERITED", "IUSE", "KEYWORDS",
1435 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1438 def __init__(self, **kwargs):
1439 Task.__init__(self, **kwargs)
1440 self.root = self.root_config.root
1441 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442 self.cp = portage.cpv_getkey(self.cpv)
1443 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444 self.category, self.pf = portage.catsplit(self.cpv)
1445 self.cpv_split = portage.catpkgsplit(self.cpv)
1446 self.pv_split = self.cpv_split[1:]
1450 __slots__ = ("__weakref__", "enabled")
1452 def __init__(self, use):
1453 self.enabled = frozenset(use)
1455 class _iuse(object):
1457 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1459 def __init__(self, tokens, iuse_implicit):
1460 self.tokens = tuple(tokens)
1461 self.iuse_implicit = iuse_implicit
1468 enabled.append(x[1:])
1470 disabled.append(x[1:])
1473 self.enabled = frozenset(enabled)
1474 self.disabled = frozenset(disabled)
1475 self.all = frozenset(chain(enabled, disabled, other))
1477 def __getattribute__(self, name):
1480 return object.__getattribute__(self, "regex")
1481 except AttributeError:
1482 all = object.__getattribute__(self, "all")
1483 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484 # Escape anything except ".*" which is supposed
1485 # to pass through from _get_implicit_iuse()
1486 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487 regex = "^(%s)$" % "|".join(regex)
1488 regex = regex.replace("\\.\\*", ".*")
1489 self.regex = re.compile(regex)
1490 return object.__getattribute__(self, name)
1492 def _get_hash_key(self):
1493 hash_key = getattr(self, "_hash_key", None)
1494 if hash_key is None:
1495 if self.operation is None:
1496 self.operation = "merge"
1497 if self.onlydeps or self.installed:
1498 self.operation = "nomerge"
1500 (self.type_name, self.root, self.cpv, self.operation)
1501 return self._hash_key
1503 def __lt__(self, other):
1504 if other.cp != self.cp:
1506 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1510 def __le__(self, other):
1511 if other.cp != self.cp:
1513 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1517 def __gt__(self, other):
1518 if other.cp != self.cp:
1520 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1524 def __ge__(self, other):
1525 if other.cp != self.cp:
1527 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1531 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1532 if not x.startswith("UNUSED_"))
1533 _all_metadata_keys.discard("CDEPEND")
1534 _all_metadata_keys.update(Package.metadata_keys)
1536 from portage.cache.mappings import slot_dict_class
1537 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1539 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1541 Detect metadata updates and synchronize Package attributes.
1544 __slots__ = ("_pkg",)
1545 _wrapped_keys = frozenset(
1546 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1548 def __init__(self, pkg, metadata):
1549 _PackageMetadataWrapperBase.__init__(self)
1551 self.update(metadata)
1553 def __setitem__(self, k, v):
1554 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1555 if k in self._wrapped_keys:
1556 getattr(self, "_set_" + k.lower())(k, v)
1558 def _set_inherited(self, k, v):
1559 if isinstance(v, basestring):
1560 v = frozenset(v.split())
1561 self._pkg.inherited = v
1563 def _set_iuse(self, k, v):
1564 self._pkg.iuse = self._pkg._iuse(
1565 v.split(), self._pkg.root_config.iuse_implicit)
1567 def _set_slot(self, k, v):
1570 def _set_use(self, k, v):
1571 self._pkg.use = self._pkg._use(v.split())
1573 def _set_counter(self, k, v):
1574 if isinstance(v, basestring):
1579 self._pkg.counter = v
1581 def _set__mtime_(self, k, v):
1582 if isinstance(v, basestring):
1589 class EbuildFetchonly(SlotObject):
1591 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1594 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1595 # ensuring sane $PWD (bug #239560) and storing elog
1596 # messages. Use a private temp directory, in order
1597 # to avoid locking the main one.
1598 settings = self.settings
1599 global_tmpdir = settings["PORTAGE_TMPDIR"]
1600 from tempfile import mkdtemp
1602 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1604 if e.errno != portage.exception.PermissionDenied.errno:
1606 raise portage.exception.PermissionDenied(global_tmpdir)
1607 settings["PORTAGE_TMPDIR"] = private_tmpdir
1608 settings.backup_changes("PORTAGE_TMPDIR")
1610 retval = self._execute()
1612 settings["PORTAGE_TMPDIR"] = global_tmpdir
1613 settings.backup_changes("PORTAGE_TMPDIR")
1614 shutil.rmtree(private_tmpdir)
1618 settings = self.settings
1620 root_config = pkg.root_config
1621 portdb = root_config.trees["porttree"].dbapi
1622 ebuild_path = portdb.findname(pkg.cpv)
1623 settings.setcpv(pkg)
1624 debug = settings.get("PORTAGE_DEBUG") == "1"
1625 use_cache = 1 # always true
1626 portage.doebuild_environment(ebuild_path, "fetch",
1627 root_config.root, settings, debug, use_cache, portdb)
1628 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1630 retval = portage.doebuild(ebuild_path, "fetch",
1631 self.settings["ROOT"], self.settings, debug=debug,
1632 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1633 mydbapi=portdb, tree="porttree")
1635 if retval != os.EX_OK:
1636 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1637 eerror(msg, phase="unpack", key=pkg.cpv)
1639 portage.elog.elog_process(self.pkg.cpv, self.settings)
1642 class PollConstants(object):
1645 Provides POLL* constants that are equivalent to those from the
1646 select module, for use by PollSelectAdapter.
1649 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1652 locals()[k] = getattr(select, k, v)
1656 class AsynchronousTask(SlotObject):
1658 Subclasses override _wait() and _poll() so that calls
1659 to public methods can be wrapped for implementing
1660 hooks such as exit listener notification.
1662 Sublasses should call self.wait() to notify exit listeners after
1663 the task is complete and self.returncode has been set.
1666 __slots__ = ("background", "cancelled", "returncode") + \
1667 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1671 Start an asynchronous task and then return as soon as possible.
1677 raise NotImplementedError(self)
1680 return self.returncode is None
1687 return self.returncode
1690 if self.returncode is None:
1693 return self.returncode
1696 return self.returncode
1699 self.cancelled = True
1702 def addStartListener(self, f):
1704 The function will be called with one argument, a reference to self.
1706 if self._start_listeners is None:
1707 self._start_listeners = []
1708 self._start_listeners.append(f)
1710 def removeStartListener(self, f):
1711 if self._start_listeners is None:
1713 self._start_listeners.remove(f)
1715 def _start_hook(self):
1716 if self._start_listeners is not None:
1717 start_listeners = self._start_listeners
1718 self._start_listeners = None
1720 for f in start_listeners:
1723 def addExitListener(self, f):
1725 The function will be called with one argument, a reference to self.
1727 if self._exit_listeners is None:
1728 self._exit_listeners = []
1729 self._exit_listeners.append(f)
1731 def removeExitListener(self, f):
1732 if self._exit_listeners is None:
1733 if self._exit_listener_stack is not None:
1734 self._exit_listener_stack.remove(f)
1736 self._exit_listeners.remove(f)
1738 def _wait_hook(self):
1740 Call this method after the task completes, just before returning
1741 the returncode from wait() or poll(). This hook is
1742 used to trigger exit listeners when the returncode first
1745 if self.returncode is not None and \
1746 self._exit_listeners is not None:
1748 # This prevents recursion, in case one of the
1749 # exit handlers triggers this method again by
1750 # calling wait(). Use a stack that gives
1751 # removeExitListener() an opportunity to consume
1752 # listeners from the stack, before they can get
1753 # called below. This is necessary because a call
1754 # to one exit listener may result in a call to
1755 # removeExitListener() for another listener on
1756 # the stack. That listener needs to be removed
1757 # from the stack since it would be inconsistent
1758 # to call it after it has been been passed into
1759 # removeExitListener().
1760 self._exit_listener_stack = self._exit_listeners
1761 self._exit_listeners = None
1763 self._exit_listener_stack.reverse()
1764 while self._exit_listener_stack:
1765 self._exit_listener_stack.pop()(self)
1767 class AbstractPollTask(AsynchronousTask):
1769 __slots__ = ("scheduler",) + \
1773 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1774 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1777 def _unregister(self):
1778 raise NotImplementedError(self)
1780 def _unregister_if_appropriate(self, event):
1781 if self._registered:
1782 if event & self._exceptional_events:
1785 elif event & PollConstants.POLLHUP:
1789 class PipeReader(AbstractPollTask):
1792 Reads output from one or more files and saves it in memory,
1793 for retrieval via the getvalue() method. This is driven by
1794 the scheduler's poll() loop, so it runs entirely within the
1798 __slots__ = ("input_files",) + \
1799 ("_read_data", "_reg_ids")
1802 self._reg_ids = set()
1803 self._read_data = []
1804 for k, f in self.input_files.iteritems():
1805 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1806 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1807 self._reg_ids.add(self.scheduler.register(f.fileno(),
1808 self._registered_events, self._output_handler))
1809 self._registered = True
1812 return self._registered
1815 if self.returncode is None:
1817 self.cancelled = True
1821 if self.returncode is not None:
1822 return self.returncode
1824 if self._registered:
1825 self.scheduler.schedule(self._reg_ids)
1828 self.returncode = os.EX_OK
1829 return self.returncode
1832 """Retrieve the entire contents"""
1833 return "".join(self._read_data)
1836 """Free the memory buffer."""
1837 self._read_data = None
1839 def _output_handler(self, fd, event):
1841 if event & PollConstants.POLLIN:
1843 for f in self.input_files.itervalues():
1844 if fd == f.fileno():
1847 buf = array.array('B')
1849 buf.fromfile(f, self._bufsize)
1854 self._read_data.append(buf.tostring())
1859 self._unregister_if_appropriate(event)
1860 return self._registered
1862 def _unregister(self):
1864 Unregister from the scheduler and close open files.
1867 self._registered = False
1869 if self._reg_ids is not None:
1870 for reg_id in self._reg_ids:
1871 self.scheduler.unregister(reg_id)
1872 self._reg_ids = None
1874 if self.input_files is not None:
1875 for f in self.input_files.itervalues():
1877 self.input_files = None
1879 class CompositeTask(AsynchronousTask):
1881 __slots__ = ("scheduler",) + ("_current_task",)
1884 return self._current_task is not None
1887 self.cancelled = True
1888 if self._current_task is not None:
1889 self._current_task.cancel()
1893 This does a loop calling self._current_task.poll()
1894 repeatedly as long as the value of self._current_task
1895 keeps changing. It calls poll() a maximum of one time
1896 for a given self._current_task instance. This is useful
1897 since calling poll() on a task can trigger advance to
1898 the next task could eventually lead to the returncode
1899 being set in cases when polling only a single task would
1900 not have the same effect.
1905 task = self._current_task
1906 if task is None or task is prev:
1907 # don't poll the same task more than once
1912 return self.returncode
1918 task = self._current_task
1920 # don't wait for the same task more than once
1923 # Before the task.wait() method returned, an exit
1924 # listener should have set self._current_task to either
1925 # a different task or None. Something is wrong.
1926 raise AssertionError("self._current_task has not " + \
1927 "changed since calling wait", self, task)
1931 return self.returncode
1933 def _assert_current(self, task):
1935 Raises an AssertionError if the given task is not the
1936 same one as self._current_task. This can be useful
1939 if task is not self._current_task:
1940 raise AssertionError("Unrecognized task: %s" % (task,))
1942 def _default_exit(self, task):
1944 Calls _assert_current() on the given task and then sets the
1945 composite returncode attribute if task.returncode != os.EX_OK.
1946 If the task failed then self._current_task will be set to None.
1947 Subclasses can use this as a generic task exit callback.
1950 @returns: The task.returncode attribute.
1952 self._assert_current(task)
1953 if task.returncode != os.EX_OK:
1954 self.returncode = task.returncode
1955 self._current_task = None
1956 return task.returncode
1958 def _final_exit(self, task):
1960 Assumes that task is the final task of this composite task.
1961 Calls _default_exit() and sets self.returncode to the task's
1962 returncode and sets self._current_task to None.
1964 self._default_exit(task)
1965 self._current_task = None
1966 self.returncode = task.returncode
1967 return self.returncode
1969 def _default_final_exit(self, task):
1971 This calls _final_exit() and then wait().
1973 Subclasses can use this as a generic final task exit callback.
1976 self._final_exit(task)
1979 def _start_task(self, task, exit_handler):
1981 Register exit handler for the given task, set it
1982 as self._current_task, and call task.start().
1984 Subclasses can use this as a generic way to start
1988 task.addExitListener(exit_handler)
1989 self._current_task = task
1992 class TaskSequence(CompositeTask):
1994 A collection of tasks that executes sequentially. Each task
1995 must have a addExitListener() method that can be used as
1996 a means to trigger movement from one task to the next.
1999 __slots__ = ("_task_queue",)
2001 def __init__(self, **kwargs):
2002 AsynchronousTask.__init__(self, **kwargs)
2003 self._task_queue = deque()
2005 def add(self, task):
2006 self._task_queue.append(task)
2009 self._start_next_task()
2012 self._task_queue.clear()
2013 CompositeTask.cancel(self)
2015 def _start_next_task(self):
2016 self._start_task(self._task_queue.popleft(),
2017 self._task_exit_handler)
2019 def _task_exit_handler(self, task):
2020 if self._default_exit(task) != os.EX_OK:
2022 elif self._task_queue:
2023 self._start_next_task()
2025 self._final_exit(task)
2028 class SubProcess(AbstractPollTask):
2030 __slots__ = ("pid",) + \
2031 ("_files", "_reg_id")
2033 # A file descriptor is required for the scheduler to monitor changes from
2034 # inside a poll() loop. When logging is not enabled, create a pipe just to
2035 # serve this purpose alone.
2039 if self.returncode is not None:
2040 return self.returncode
2041 if self.pid is None:
2042 return self.returncode
2043 if self._registered:
2044 return self.returncode
2047 retval = os.waitpid(self.pid, os.WNOHANG)
2049 if e.errno != errno.ECHILD:
2052 retval = (self.pid, 1)
2054 if retval == (0, 0):
2056 self._set_returncode(retval)
2057 return self.returncode
2062 os.kill(self.pid, signal.SIGTERM)
2064 if e.errno != errno.ESRCH:
2068 self.cancelled = True
2069 if self.pid is not None:
2071 return self.returncode
2074 return self.pid is not None and \
2075 self.returncode is None
2079 if self.returncode is not None:
2080 return self.returncode
2082 if self._registered:
2083 self.scheduler.schedule(self._reg_id)
2085 if self.returncode is not None:
2086 return self.returncode
2089 wait_retval = os.waitpid(self.pid, 0)
2091 if e.errno != errno.ECHILD:
2094 self._set_returncode((self.pid, 1))
2096 self._set_returncode(wait_retval)
2098 return self.returncode
2100 def _unregister(self):
2102 Unregister from the scheduler and close open files.
2105 self._registered = False
2107 if self._reg_id is not None:
2108 self.scheduler.unregister(self._reg_id)
2111 if self._files is not None:
2112 for f in self._files.itervalues():
2116 def _set_returncode(self, wait_retval):
2118 retval = wait_retval[1]
2120 if retval != os.EX_OK:
2122 retval = (retval & 0xff) << 8
2124 retval = retval >> 8
2126 self.returncode = retval
2128 class SpawnProcess(SubProcess):
2131 Constructor keyword args are passed into portage.process.spawn().
2132 The required "args" keyword argument will be passed as the first
2136 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2137 "uid", "gid", "groups", "umask", "logfile",
2138 "path_lookup", "pre_exec")
2140 __slots__ = ("args",) + \
2143 _file_names = ("log", "process", "stdout")
2144 _files_dict = slot_dict_class(_file_names, prefix="")
2151 if self.fd_pipes is None:
2153 fd_pipes = self.fd_pipes
2154 fd_pipes.setdefault(0, sys.stdin.fileno())
2155 fd_pipes.setdefault(1, sys.stdout.fileno())
2156 fd_pipes.setdefault(2, sys.stderr.fileno())
2158 # flush any pending output
2159 for fd in fd_pipes.itervalues():
2160 if fd == sys.stdout.fileno():
2162 if fd == sys.stderr.fileno():
2165 logfile = self.logfile
2166 self._files = self._files_dict()
2169 master_fd, slave_fd = self._pipe(fd_pipes)
2170 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2171 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2174 fd_pipes_orig = fd_pipes.copy()
2176 # TODO: Use job control functions like tcsetpgrp() to control
2177 # access to stdin. Until then, use /dev/null so that any
2178 # attempts to read from stdin will immediately return EOF
2179 # instead of blocking indefinitely.
2180 null_input = open('/dev/null', 'rb')
2181 fd_pipes[0] = null_input.fileno()
2183 fd_pipes[0] = fd_pipes_orig[0]
2185 files.process = os.fdopen(master_fd, 'r')
2186 if logfile is not None:
2188 fd_pipes[1] = slave_fd
2189 fd_pipes[2] = slave_fd
2191 files.log = open(logfile, "a")
2192 portage.util.apply_secpass_permissions(logfile,
2193 uid=portage.portage_uid, gid=portage.portage_gid,
2196 if not self.background:
2197 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2199 output_handler = self._output_handler
2203 # Create a dummy pipe so the scheduler can monitor
2204 # the process from inside a poll() loop.
2205 fd_pipes[self._dummy_pipe_fd] = slave_fd
2207 fd_pipes[1] = slave_fd
2208 fd_pipes[2] = slave_fd
2209 output_handler = self._dummy_handler
2212 for k in self._spawn_kwarg_names:
2213 v = getattr(self, k)
2217 kwargs["fd_pipes"] = fd_pipes
2218 kwargs["returnpid"] = True
2219 kwargs.pop("logfile", None)
2221 self._reg_id = self.scheduler.register(files.process.fileno(),
2222 self._registered_events, output_handler)
2223 self._registered = True
2225 retval = self._spawn(self.args, **kwargs)
2228 if null_input is not None:
2231 if isinstance(retval, int):
2234 self.returncode = retval
2238 self.pid = retval[0]
2239 portage.process.spawned_pids.remove(self.pid)
2241 def _pipe(self, fd_pipes):
2243 @type fd_pipes: dict
2244 @param fd_pipes: pipes from which to copy terminal size if desired.
2248 def _spawn(self, args, **kwargs):
2249 return portage.process.spawn(args, **kwargs)
2251 def _output_handler(self, fd, event):
2253 if event & PollConstants.POLLIN:
2256 buf = array.array('B')
2258 buf.fromfile(files.process, self._bufsize)
2263 if not self.background:
2264 buf.tofile(files.stdout)
2265 files.stdout.flush()
2266 buf.tofile(files.log)
2272 self._unregister_if_appropriate(event)
2273 return self._registered
2275 def _dummy_handler(self, fd, event):
2277 This method is mainly interested in detecting EOF, since
2278 the only purpose of the pipe is to allow the scheduler to
2279 monitor the process from inside a poll() loop.
2282 if event & PollConstants.POLLIN:
2284 buf = array.array('B')
2286 buf.fromfile(self._files.process, self._bufsize)
2296 self._unregister_if_appropriate(event)
2297 return self._registered
2299 class MiscFunctionsProcess(SpawnProcess):
2301 Spawns misc-functions.sh with an existing ebuild environment.
2304 __slots__ = ("commands", "phase", "pkg", "settings")
2307 settings = self.settings
2308 settings.pop("EBUILD_PHASE", None)
2309 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2310 misc_sh_binary = os.path.join(portage_bin_path,
2311 os.path.basename(portage.const.MISC_SH_BINARY))
2313 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2314 self.logfile = settings.get("PORTAGE_LOG_FILE")
2316 portage._doebuild_exit_status_unlink(
2317 settings.get("EBUILD_EXIT_STATUS_FILE"))
2319 SpawnProcess._start(self)
2321 def _spawn(self, args, **kwargs):
2322 settings = self.settings
2323 debug = settings.get("PORTAGE_DEBUG") == "1"
2324 return portage.spawn(" ".join(args), settings,
2325 debug=debug, **kwargs)
2327 def _set_returncode(self, wait_retval):
2328 SpawnProcess._set_returncode(self, wait_retval)
2329 self.returncode = portage._doebuild_exit_status_check_and_log(
2330 self.settings, self.phase, self.returncode)
2332 class EbuildFetcher(SpawnProcess):
2334 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2339 root_config = self.pkg.root_config
2340 portdb = root_config.trees["porttree"].dbapi
2341 ebuild_path = portdb.findname(self.pkg.cpv)
2342 settings = self.config_pool.allocate()
2343 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2344 self._build_dir.lock()
2345 self._build_dir.clean()
2346 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2347 if self.logfile is None:
2348 self.logfile = settings.get("PORTAGE_LOG_FILE")
2354 # If any incremental variables have been overridden
2355 # via the environment, those values need to be passed
2356 # along here so that they are correctly considered by
2357 # the config instance in the subproccess.
2358 fetch_env = os.environ.copy()
2360 fetch_env["PORTAGE_NICENESS"] = "0"
2362 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2364 ebuild_binary = os.path.join(
2365 settings["PORTAGE_BIN_PATH"], "ebuild")
2367 fetch_args = [ebuild_binary, ebuild_path, phase]
2368 debug = settings.get("PORTAGE_DEBUG") == "1"
2370 fetch_args.append("--debug")
2372 self.args = fetch_args
2373 self.env = fetch_env
2374 SpawnProcess._start(self)
2376 def _pipe(self, fd_pipes):
2377 """When appropriate, use a pty so that fetcher progress bars,
2378 like wget has, will work properly."""
2379 if self.background or not sys.stdout.isatty():
2380 # When the output only goes to a log file,
2381 # there's no point in creating a pty.
2383 stdout_pipe = fd_pipes.get(1)
2384 got_pty, master_fd, slave_fd = \
2385 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2386 return (master_fd, slave_fd)
2388 def _set_returncode(self, wait_retval):
2389 SpawnProcess._set_returncode(self, wait_retval)
2390 # Collect elog messages that might have been
2391 # created by the pkg_nofetch phase.
2392 if self._build_dir is not None:
2393 # Skip elog messages for prefetch, in order to avoid duplicates.
2394 if not self.prefetch and self.returncode != os.EX_OK:
2396 if self.logfile is not None:
2398 elog_out = open(self.logfile, 'a')
2399 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2400 if self.logfile is not None:
2401 msg += ", Log file:"
2402 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2403 if self.logfile is not None:
2404 eerror(" '%s'" % (self.logfile,),
2405 phase="unpack", key=self.pkg.cpv, out=elog_out)
2406 if elog_out is not None:
2408 if not self.prefetch:
2409 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2410 features = self._build_dir.settings.features
2411 if self.returncode == os.EX_OK:
2412 self._build_dir.clean()
2413 self._build_dir.unlock()
2414 self.config_pool.deallocate(self._build_dir.settings)
2415 self._build_dir = None
2417 class EbuildBuildDir(SlotObject):
2419 __slots__ = ("dir_path", "pkg", "settings",
2420 "locked", "_catdir", "_lock_obj")
2422 def __init__(self, **kwargs):
2423 SlotObject.__init__(self, **kwargs)
2428 This raises an AlreadyLocked exception if lock() is called
2429 while a lock is already held. In order to avoid this, call
2430 unlock() or check whether the "locked" attribute is True
2431 or False before calling lock().
2433 if self._lock_obj is not None:
2434 raise self.AlreadyLocked((self._lock_obj,))
2436 dir_path = self.dir_path
2437 if dir_path is None:
2438 root_config = self.pkg.root_config
2439 portdb = root_config.trees["porttree"].dbapi
2440 ebuild_path = portdb.findname(self.pkg.cpv)
2441 settings = self.settings
2442 settings.setcpv(self.pkg)
2443 debug = settings.get("PORTAGE_DEBUG") == "1"
2444 use_cache = 1 # always true
2445 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2446 self.settings, debug, use_cache, portdb)
2447 dir_path = self.settings["PORTAGE_BUILDDIR"]
2449 catdir = os.path.dirname(dir_path)
2450 self._catdir = catdir
2452 portage.util.ensure_dirs(os.path.dirname(catdir),
2453 gid=portage.portage_gid,
2457 catdir_lock = portage.locks.lockdir(catdir)
2458 portage.util.ensure_dirs(catdir,
2459 gid=portage.portage_gid,
2461 self._lock_obj = portage.locks.lockdir(dir_path)
2463 self.locked = self._lock_obj is not None
2464 if catdir_lock is not None:
2465 portage.locks.unlockdir(catdir_lock)
2468 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2469 by keepwork or keeptemp in FEATURES."""
2470 settings = self.settings
2471 features = settings.features
2472 if not ("keepwork" in features or "keeptemp" in features):
2474 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2475 except EnvironmentError, e:
2476 if e.errno != errno.ENOENT:
2481 if self._lock_obj is None:
2484 portage.locks.unlockdir(self._lock_obj)
2485 self._lock_obj = None
2488 catdir = self._catdir
2491 catdir_lock = portage.locks.lockdir(catdir)
2497 if e.errno not in (errno.ENOENT,
2498 errno.ENOTEMPTY, errno.EEXIST):
2501 portage.locks.unlockdir(catdir_lock)
2503 class AlreadyLocked(portage.exception.PortageException):
2506 class EbuildBuild(CompositeTask):
2508 __slots__ = ("args_set", "config_pool", "find_blockers",
2509 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2510 "prefetcher", "settings", "world_atom") + \
2511 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2515 logger = self.logger
2518 settings = self.settings
2519 world_atom = self.world_atom
2520 root_config = pkg.root_config
2523 portdb = root_config.trees[tree].dbapi
2524 settings.setcpv(pkg)
2525 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2526 ebuild_path = portdb.findname(self.pkg.cpv)
2527 self._ebuild_path = ebuild_path
2529 prefetcher = self.prefetcher
2530 if prefetcher is None:
2532 elif not prefetcher.isAlive():
2534 elif prefetcher.poll() is None:
2536 waiting_msg = "Fetching files " + \
2537 "in the background. " + \
2538 "To view fetch progress, run `tail -f " + \
2539 "/var/log/emerge-fetch.log` in another " + \
2541 msg_prefix = colorize("GOOD", " * ")
2542 from textwrap import wrap
2543 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2544 for line in wrap(waiting_msg, 65))
2545 if not self.background:
2546 writemsg(waiting_msg, noiselevel=-1)
2548 self._current_task = prefetcher
2549 prefetcher.addExitListener(self._prefetch_exit)
2552 self._prefetch_exit(prefetcher)
2554 def _prefetch_exit(self, prefetcher):
2558 settings = self.settings
2561 fetcher = EbuildFetchonly(
2562 fetch_all=opts.fetch_all_uri,
2563 pkg=pkg, pretend=opts.pretend,
2565 retval = fetcher.execute()
2566 self.returncode = retval
2570 fetcher = EbuildFetcher(config_pool=self.config_pool,
2571 fetchall=opts.fetch_all_uri,
2572 fetchonly=opts.fetchonly,
2573 background=self.background,
2574 pkg=pkg, scheduler=self.scheduler)
2576 self._start_task(fetcher, self._fetch_exit)
2578 def _fetch_exit(self, fetcher):
2582 fetch_failed = False
2584 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2586 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2588 if fetch_failed and fetcher.logfile is not None and \
2589 os.path.exists(fetcher.logfile):
2590 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2592 if not fetch_failed and fetcher.logfile is not None:
2593 # Fetch was successful, so remove the fetch log.
2595 os.unlink(fetcher.logfile)
2599 if fetch_failed or opts.fetchonly:
2603 logger = self.logger
2605 pkg_count = self.pkg_count
2606 scheduler = self.scheduler
2607 settings = self.settings
2608 features = settings.features
2609 ebuild_path = self._ebuild_path
2610 system_set = pkg.root_config.sets["system"]
2612 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2613 self._build_dir.lock()
2615 # Cleaning is triggered before the setup
2616 # phase, in portage.doebuild().
2617 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2618 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2619 short_msg = "emerge: (%s of %s) %s Clean" % \
2620 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2621 logger.log(msg, short_msg=short_msg)
2623 #buildsyspkg: Check if we need to _force_ binary package creation
2624 self._issyspkg = "buildsyspkg" in features and \
2625 system_set.findAtomForPackage(pkg) and \
2628 if opts.buildpkg or self._issyspkg:
2630 self._buildpkg = True
2632 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2633 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2634 short_msg = "emerge: (%s of %s) %s Compile" % \
2635 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2636 logger.log(msg, short_msg=short_msg)
2639 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2640 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2641 short_msg = "emerge: (%s of %s) %s Compile" % \
2642 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2643 logger.log(msg, short_msg=short_msg)
2645 build = EbuildExecuter(background=self.background, pkg=pkg,
2646 scheduler=scheduler, settings=settings)
2647 self._start_task(build, self._build_exit)
2649 def _unlock_builddir(self):
2650 portage.elog.elog_process(self.pkg.cpv, self.settings)
2651 self._build_dir.unlock()
2653 def _build_exit(self, build):
2654 if self._default_exit(build) != os.EX_OK:
2655 self._unlock_builddir()
2660 buildpkg = self._buildpkg
2663 self._final_exit(build)
2668 msg = ">>> This is a system package, " + \
2669 "let's pack a rescue tarball.\n"
2671 log_path = self.settings.get("PORTAGE_LOG_FILE")
2672 if log_path is not None:
2673 log_file = open(log_path, 'a')
2679 if not self.background:
2680 portage.writemsg_stdout(msg, noiselevel=-1)
2682 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2683 scheduler=self.scheduler, settings=self.settings)
2685 self._start_task(packager, self._buildpkg_exit)
2687 def _buildpkg_exit(self, packager):
2689 Released build dir lock when there is a failure or
2690 when in buildpkgonly mode. Otherwise, the lock will
2691 be released when merge() is called.
2694 if self._default_exit(packager) != os.EX_OK:
2695 self._unlock_builddir()
2699 if self.opts.buildpkgonly:
2700 # Need to call "clean" phase for buildpkgonly mode
2701 portage.elog.elog_process(self.pkg.cpv, self.settings)
2703 clean_phase = EbuildPhase(background=self.background,
2704 pkg=self.pkg, phase=phase,
2705 scheduler=self.scheduler, settings=self.settings,
2707 self._start_task(clean_phase, self._clean_exit)
2710 # Continue holding the builddir lock until
2711 # after the package has been installed.
2712 self._current_task = None
2713 self.returncode = packager.returncode
2716 def _clean_exit(self, clean_phase):
2717 if self._final_exit(clean_phase) != os.EX_OK or \
2718 self.opts.buildpkgonly:
2719 self._unlock_builddir()
2724 Install the package and then clean up and release locks.
2725 Only call this after the build has completed successfully
2726 and neither fetchonly nor buildpkgonly mode are enabled.
2729 find_blockers = self.find_blockers
2730 ldpath_mtimes = self.ldpath_mtimes
2731 logger = self.logger
2733 pkg_count = self.pkg_count
2734 settings = self.settings
2735 world_atom = self.world_atom
2736 ebuild_path = self._ebuild_path
2739 merge = EbuildMerge(find_blockers=self.find_blockers,
2740 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2741 pkg_count=pkg_count, pkg_path=ebuild_path,
2742 scheduler=self.scheduler,
2743 settings=settings, tree=tree, world_atom=world_atom)
2745 msg = " === (%s of %s) Merging (%s::%s)" % \
2746 (pkg_count.curval, pkg_count.maxval,
2747 pkg.cpv, ebuild_path)
2748 short_msg = "emerge: (%s of %s) %s Merge" % \
2749 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2750 logger.log(msg, short_msg=short_msg)
2753 rval = merge.execute()
2755 self._unlock_builddir()
2759 class EbuildExecuter(CompositeTask):
2761 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2763 _phases = ("prepare", "configure", "compile", "test", "install")
2765 _live_eclasses = frozenset([
2775 self._tree = "porttree"
2778 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2779 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2780 self._start_task(clean_phase, self._clean_phase_exit)
2782 def _clean_phase_exit(self, clean_phase):
2784 if self._default_exit(clean_phase) != os.EX_OK:
2789 scheduler = self.scheduler
2790 settings = self.settings
2793 # This initializes PORTAGE_LOG_FILE.
2794 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2796 setup_phase = EbuildPhase(background=self.background,
2797 pkg=pkg, phase="setup", scheduler=scheduler,
2798 settings=settings, tree=self._tree)
2800 setup_phase.addExitListener(self._setup_exit)
2801 self._current_task = setup_phase
2802 self.scheduler.scheduleSetup(setup_phase)
2804 def _setup_exit(self, setup_phase):
2806 if self._default_exit(setup_phase) != os.EX_OK:
2810 unpack_phase = EbuildPhase(background=self.background,
2811 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2812 settings=self.settings, tree=self._tree)
2814 if self._live_eclasses.intersection(self.pkg.inherited):
2815 # Serialize $DISTDIR access for live ebuilds since
2816 # otherwise they can interfere with eachother.
2818 unpack_phase.addExitListener(self._unpack_exit)
2819 self._current_task = unpack_phase
2820 self.scheduler.scheduleUnpack(unpack_phase)
2823 self._start_task(unpack_phase, self._unpack_exit)
2825 def _unpack_exit(self, unpack_phase):
2827 if self._default_exit(unpack_phase) != os.EX_OK:
2831 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2834 phases = self._phases
2835 eapi = pkg.metadata["EAPI"]
2836 if eapi in ("0", "1", "2_pre1"):
2837 # skip src_prepare and src_configure
2839 elif eapi in ("2_pre2",):
2843 for phase in phases:
2844 ebuild_phases.add(EbuildPhase(background=self.background,
2845 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2846 settings=self.settings, tree=self._tree))
2848 self._start_task(ebuild_phases, self._default_final_exit)
2850 class EbuildMetadataPhase(SubProcess):
2853 Asynchronous interface for the ebuild "depend" phase which is
2854 used to extract metadata from the ebuild.
2857 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2858 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2861 _file_names = ("ebuild",)
2862 _files_dict = slot_dict_class(_file_names, prefix="")
2866 settings = self.settings
2868 ebuild_path = self.ebuild_path
2869 debug = settings.get("PORTAGE_DEBUG") == "1"
2873 if self.fd_pipes is not None:
2874 fd_pipes = self.fd_pipes.copy()
2878 fd_pipes.setdefault(0, sys.stdin.fileno())
2879 fd_pipes.setdefault(1, sys.stdout.fileno())
2880 fd_pipes.setdefault(2, sys.stderr.fileno())
2882 # flush any pending output
2883 for fd in fd_pipes.itervalues():
2884 if fd == sys.stdout.fileno():
2886 if fd == sys.stderr.fileno():
2889 fd_pipes_orig = fd_pipes.copy()
2890 self._files = self._files_dict()
2893 master_fd, slave_fd = os.pipe()
2894 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2895 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2897 fd_pipes[self._metadata_fd] = slave_fd
2899 self._raw_metadata = []
2900 files.ebuild = os.fdopen(master_fd, 'r')
2901 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2902 self._registered_events, self._output_handler)
2903 self._registered = True
2905 retval = portage.doebuild(ebuild_path, "depend",
2906 settings["ROOT"], settings, debug,
2907 mydbapi=self.portdb, tree="porttree",
2908 fd_pipes=fd_pipes, returnpid=True)
2912 if isinstance(retval, int):
2913 # doebuild failed before spawning
2915 self.returncode = retval
2919 self.pid = retval[0]
2920 portage.process.spawned_pids.remove(self.pid)
2922 def _output_handler(self, fd, event):
2924 if event & PollConstants.POLLIN:
2925 self._raw_metadata.append(self._files.ebuild.read())
2926 if not self._raw_metadata[-1]:
2930 self._unregister_if_appropriate(event)
2931 return self._registered
2933 def _set_returncode(self, wait_retval):
2934 SubProcess._set_returncode(self, wait_retval)
2935 if self.returncode == os.EX_OK:
2936 metadata_lines = "".join(self._raw_metadata).splitlines()
2937 if len(portage.auxdbkeys) != len(metadata_lines):
2938 # Don't trust bash's returncode if the
2939 # number of lines is incorrect.
2942 metadata = izip(portage.auxdbkeys, metadata_lines)
2943 self.metadata_callback(self.cpv, self.ebuild_path,
2944 self.repo_path, metadata, self.ebuild_mtime)
2946 class EbuildProcess(SpawnProcess):
2948 __slots__ = ("phase", "pkg", "settings", "tree")
2951 # Don't open the log file during the clean phase since the
2952 # open file can result in an nfs lock on $T/build.log which
2953 # prevents the clean phase from removing $T.
2954 if self.phase not in ("clean", "cleanrm"):
2955 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2956 SpawnProcess._start(self)
2958 def _pipe(self, fd_pipes):
2959 stdout_pipe = fd_pipes.get(1)
2960 got_pty, master_fd, slave_fd = \
2961 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2962 return (master_fd, slave_fd)
2964 def _spawn(self, args, **kwargs):
2966 root_config = self.pkg.root_config
2968 mydbapi = root_config.trees[tree].dbapi
2969 settings = self.settings
2970 ebuild_path = settings["EBUILD"]
2971 debug = settings.get("PORTAGE_DEBUG") == "1"
2973 rval = portage.doebuild(ebuild_path, self.phase,
2974 root_config.root, settings, debug,
2975 mydbapi=mydbapi, tree=tree, **kwargs)
2979 def _set_returncode(self, wait_retval):
2980 SpawnProcess._set_returncode(self, wait_retval)
2982 if self.phase not in ("clean", "cleanrm"):
2983 self.returncode = portage._doebuild_exit_status_check_and_log(
2984 self.settings, self.phase, self.returncode)
2986 if self.phase == "test" and self.returncode != os.EX_OK and \
2987 "test-fail-continue" in self.settings.features:
2988 self.returncode = os.EX_OK
2990 portage._post_phase_userpriv_perms(self.settings)
2992 class EbuildPhase(CompositeTask):
2994 __slots__ = ("background", "pkg", "phase",
2995 "scheduler", "settings", "tree")
2997 _post_phase_cmds = portage._post_phase_cmds
3001 ebuild_process = EbuildProcess(background=self.background,
3002 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3003 settings=self.settings, tree=self.tree)
3005 self._start_task(ebuild_process, self._ebuild_exit)
3007 def _ebuild_exit(self, ebuild_process):
3009 if self.phase == "install":
3011 log_path = self.settings.get("PORTAGE_LOG_FILE")
3013 if self.background and log_path is not None:
3014 log_file = open(log_path, 'a')
3017 portage._check_build_log(self.settings, out=out)
3019 if log_file is not None:
3022 if self._default_exit(ebuild_process) != os.EX_OK:
3026 settings = self.settings
3028 if self.phase == "install":
3029 portage._post_src_install_uid_fix(settings)
3031 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3032 if post_phase_cmds is not None:
3033 post_phase = MiscFunctionsProcess(background=self.background,
3034 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3035 scheduler=self.scheduler, settings=settings)
3036 self._start_task(post_phase, self._post_phase_exit)
3039 self.returncode = ebuild_process.returncode
3040 self._current_task = None
3043 def _post_phase_exit(self, post_phase):
3044 if self._final_exit(post_phase) != os.EX_OK:
3045 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3047 self._current_task = None
3051 class EbuildBinpkg(EbuildProcess):
3053 This assumes that src_install() has successfully completed.
3055 __slots__ = ("_binpkg_tmpfile",)
3058 self.phase = "package"
3059 self.tree = "porttree"
3061 root_config = pkg.root_config
3062 portdb = root_config.trees["porttree"].dbapi
3063 bintree = root_config.trees["bintree"]
3064 ebuild_path = portdb.findname(self.pkg.cpv)
3065 settings = self.settings
3066 debug = settings.get("PORTAGE_DEBUG") == "1"
3068 bintree.prevent_collision(pkg.cpv)
3069 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3070 pkg.cpv + ".tbz2." + str(os.getpid()))
3071 self._binpkg_tmpfile = binpkg_tmpfile
3072 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3073 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3076 EbuildProcess._start(self)
3078 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3080 def _set_returncode(self, wait_retval):
3081 EbuildProcess._set_returncode(self, wait_retval)
3084 bintree = pkg.root_config.trees["bintree"]
3085 binpkg_tmpfile = self._binpkg_tmpfile
3086 if self.returncode == os.EX_OK:
3087 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3089 class EbuildMerge(SlotObject):
3091 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3092 "pkg", "pkg_count", "pkg_path", "pretend",
3093 "scheduler", "settings", "tree", "world_atom")
3096 root_config = self.pkg.root_config
3097 settings = self.settings
3098 retval = portage.merge(settings["CATEGORY"],
3099 settings["PF"], settings["D"],
3100 os.path.join(settings["PORTAGE_BUILDDIR"],
3101 "build-info"), root_config.root, settings,
3102 myebuild=settings["EBUILD"],
3103 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3104 vartree=root_config.trees["vartree"],
3105 prev_mtimes=self.ldpath_mtimes,
3106 scheduler=self.scheduler,
3107 blockers=self.find_blockers)
3109 if retval == os.EX_OK:
3110 self.world_atom(self.pkg)
3115 def _log_success(self):
3117 pkg_count = self.pkg_count
3118 pkg_path = self.pkg_path
3119 logger = self.logger
3120 if "noclean" not in self.settings.features:
3121 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3122 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3123 logger.log((" === (%s of %s) " + \
3124 "Post-Build Cleaning (%s::%s)") % \
3125 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3126 short_msg=short_msg)
3127 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3128 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3130 class PackageUninstall(AsynchronousTask):
3132 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3136 unmerge(self.pkg.root_config, self.opts, "unmerge",
3137 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3138 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3139 writemsg_level=self._writemsg_level)
3140 except UninstallFailure, e:
3141 self.returncode = e.status
3143 self.returncode = os.EX_OK
3146 def _writemsg_level(self, msg, level=0, noiselevel=0):
3148 log_path = self.settings.get("PORTAGE_LOG_FILE")
3149 background = self.background
3151 if log_path is None:
3152 if not (background and level < logging.WARNING):
3153 portage.util.writemsg_level(msg,
3154 level=level, noiselevel=noiselevel)
3157 portage.util.writemsg_level(msg,
3158 level=level, noiselevel=noiselevel)
3160 f = open(log_path, 'a')
3166 class Binpkg(CompositeTask):
3168 __slots__ = ("find_blockers",
3169 "ldpath_mtimes", "logger", "opts",
3170 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3171 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3172 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3174 def _writemsg_level(self, msg, level=0, noiselevel=0):
3176 if not self.background:
3177 portage.util.writemsg_level(msg,
3178 level=level, noiselevel=noiselevel)
3180 log_path = self.settings.get("PORTAGE_LOG_FILE")
3181 if log_path is not None:
3182 f = open(log_path, 'a')
3191 settings = self.settings
3192 settings.setcpv(pkg)
3193 self._tree = "bintree"
3194 self._bintree = self.pkg.root_config.trees[self._tree]
3195 self._verify = not self.opts.pretend
3197 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3198 "portage", pkg.category, pkg.pf)
3199 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3200 pkg=pkg, settings=settings)
3201 self._image_dir = os.path.join(dir_path, "image")
3202 self._infloc = os.path.join(dir_path, "build-info")
3203 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3204 settings["EBUILD"] = self._ebuild_path
3205 debug = settings.get("PORTAGE_DEBUG") == "1"
3206 portage.doebuild_environment(self._ebuild_path, "setup",
3207 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3208 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3210 # The prefetcher has already completed or it
3211 # could be running now. If it's running now,
3212 # wait for it to complete since it holds
3213 # a lock on the file being fetched. The
3214 # portage.locks functions are only designed
3215 # to work between separate processes. Since
3216 # the lock is held by the current process,
3217 # use the scheduler and fetcher methods to
3218 # synchronize with the fetcher.
3219 prefetcher = self.prefetcher
3220 if prefetcher is None:
3222 elif not prefetcher.isAlive():
3224 elif prefetcher.poll() is None:
3226 waiting_msg = ("Fetching '%s' " + \
3227 "in the background. " + \
3228 "To view fetch progress, run `tail -f " + \
3229 "/var/log/emerge-fetch.log` in another " + \
3230 "terminal.") % prefetcher.pkg_path
3231 msg_prefix = colorize("GOOD", " * ")
3232 from textwrap import wrap
3233 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3234 for line in wrap(waiting_msg, 65))
3235 if not self.background:
3236 writemsg(waiting_msg, noiselevel=-1)
3238 self._current_task = prefetcher
3239 prefetcher.addExitListener(self._prefetch_exit)
3242 self._prefetch_exit(prefetcher)
3244 def _prefetch_exit(self, prefetcher):
3247 pkg_count = self.pkg_count
3248 if not (self.opts.pretend or self.opts.fetchonly):
3249 self._build_dir.lock()
3251 shutil.rmtree(self._build_dir.dir_path)
3252 except EnvironmentError, e:
3253 if e.errno != errno.ENOENT:
3256 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3257 fetcher = BinpkgFetcher(background=self.background,
3258 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3259 pretend=self.opts.pretend, scheduler=self.scheduler)
3260 pkg_path = fetcher.pkg_path
3261 self._pkg_path = pkg_path
3263 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3265 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3266 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3267 short_msg = "emerge: (%s of %s) %s Fetch" % \
3268 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3269 self.logger.log(msg, short_msg=short_msg)
3270 self._start_task(fetcher, self._fetcher_exit)
3273 self._fetcher_exit(fetcher)
3275 def _fetcher_exit(self, fetcher):
3277 # The fetcher only has a returncode when
3278 # --getbinpkg is enabled.
3279 if fetcher.returncode is not None:
3280 self._fetched_pkg = True
3281 if self._default_exit(fetcher) != os.EX_OK:
3282 self._unlock_builddir()
3286 if self.opts.pretend:
3287 self._current_task = None
3288 self.returncode = os.EX_OK
3296 logfile = self.settings.get("PORTAGE_LOG_FILE")
3297 verifier = BinpkgVerifier(background=self.background,
3298 logfile=logfile, pkg=self.pkg)
3299 self._start_task(verifier, self._verifier_exit)
3302 self._verifier_exit(verifier)
3304 def _verifier_exit(self, verifier):
3305 if verifier is not None and \
3306 self._default_exit(verifier) != os.EX_OK:
3307 self._unlock_builddir()
3311 logger = self.logger
3313 pkg_count = self.pkg_count
3314 pkg_path = self._pkg_path
3316 if self._fetched_pkg:
3317 self._bintree.inject(pkg.cpv, filename=pkg_path)
3319 if self.opts.fetchonly:
3320 self._current_task = None
3321 self.returncode = os.EX_OK
3325 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3326 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3327 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3328 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3329 logger.log(msg, short_msg=short_msg)
3332 settings = self.settings
3333 ebuild_phase = EbuildPhase(background=self.background,
3334 pkg=pkg, phase=phase, scheduler=self.scheduler,
3335 settings=settings, tree=self._tree)
3337 self._start_task(ebuild_phase, self._clean_exit)
3339 def _clean_exit(self, clean_phase):
3340 if self._default_exit(clean_phase) != os.EX_OK:
3341 self._unlock_builddir()
3345 dir_path = self._build_dir.dir_path
3348 shutil.rmtree(dir_path)
3349 except (IOError, OSError), e:
3350 if e.errno != errno.ENOENT:
3354 infloc = self._infloc
3356 pkg_path = self._pkg_path
3359 for mydir in (dir_path, self._image_dir, infloc):
3360 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3361 gid=portage.data.portage_gid, mode=dir_mode)
3363 # This initializes PORTAGE_LOG_FILE.
3364 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3365 self._writemsg_level(">>> Extracting info\n")
3367 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3368 check_missing_metadata = ("CATEGORY", "PF")
3369 missing_metadata = set()
3370 for k in check_missing_metadata:
3371 v = pkg_xpak.getfile(k)
3373 missing_metadata.add(k)
3375 pkg_xpak.unpackinfo(infloc)
3376 for k in missing_metadata:
3384 f = open(os.path.join(infloc, k), 'wb')
3390 # Store the md5sum in the vdb.
3391 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3393 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3397 # This gives bashrc users an opportunity to do various things
3398 # such as remove binary packages after they're installed.
3399 settings = self.settings
3400 settings.setcpv(self.pkg)
3401 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3402 settings.backup_changes("PORTAGE_BINPKG_FILE")
3405 setup_phase = EbuildPhase(background=self.background,
3406 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3407 settings=settings, tree=self._tree)
3409 setup_phase.addExitListener(self._setup_exit)
3410 self._current_task = setup_phase
3411 self.scheduler.scheduleSetup(setup_phase)
3413 def _setup_exit(self, setup_phase):
3414 if self._default_exit(setup_phase) != os.EX_OK:
3415 self._unlock_builddir()
3419 extractor = BinpkgExtractorAsync(background=self.background,
3420 image_dir=self._image_dir,
3421 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3422 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3423 self._start_task(extractor, self._extractor_exit)
3425 def _extractor_exit(self, extractor):
3426 if self._final_exit(extractor) != os.EX_OK:
3427 self._unlock_builddir()
3428 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3432 def _unlock_builddir(self):
3433 if self.opts.pretend or self.opts.fetchonly:
3435 portage.elog.elog_process(self.pkg.cpv, self.settings)
3436 self._build_dir.unlock()
3440 # This gives bashrc users an opportunity to do various things
3441 # such as remove binary packages after they're installed.
3442 settings = self.settings
3443 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3444 settings.backup_changes("PORTAGE_BINPKG_FILE")
3446 merge = EbuildMerge(find_blockers=self.find_blockers,
3447 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3448 pkg=self.pkg, pkg_count=self.pkg_count,
3449 pkg_path=self._pkg_path, scheduler=self.scheduler,
3450 settings=settings, tree=self._tree, world_atom=self.world_atom)
3453 retval = merge.execute()
3455 settings.pop("PORTAGE_BINPKG_FILE", None)
3456 self._unlock_builddir()
3459 class BinpkgFetcher(SpawnProcess):
3461 __slots__ = ("pkg", "pretend",
3462 "locked", "pkg_path", "_lock_obj")
3464 def __init__(self, **kwargs):
3465 SpawnProcess.__init__(self, **kwargs)
3467 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3475 pretend = self.pretend
3476 bintree = pkg.root_config.trees["bintree"]
3477 settings = bintree.settings
3478 use_locks = "distlocks" in settings.features
3479 pkg_path = self.pkg_path
3482 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3485 exists = os.path.exists(pkg_path)
3486 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3487 if not (pretend or resume):
3488 # Remove existing file or broken symlink.
3494 # urljoin doesn't work correctly with
3495 # unrecognized protocols like sftp
3496 if bintree._remote_has_index:
3497 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3499 rel_uri = pkg.cpv + ".tbz2"
3500 uri = bintree._remote_base_uri.rstrip("/") + \
3501 "/" + rel_uri.lstrip("/")
3503 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3504 "/" + pkg.pf + ".tbz2"
3507 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3508 self.returncode = os.EX_OK
3512 protocol = urlparse.urlparse(uri)[0]
3513 fcmd_prefix = "FETCHCOMMAND"
3515 fcmd_prefix = "RESUMECOMMAND"
3516 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3518 fcmd = settings.get(fcmd_prefix)
3521 "DISTDIR" : os.path.dirname(pkg_path),
3523 "FILE" : os.path.basename(pkg_path)
3526 fetch_env = dict(settings.iteritems())
3527 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3528 for x in shlex.split(fcmd)]
3530 if self.fd_pipes is None:
3532 fd_pipes = self.fd_pipes
3534 # Redirect all output to stdout since some fetchers like
3535 # wget pollute stderr (if portage detects a problem then it
3536 # can send it's own message to stderr).
3537 fd_pipes.setdefault(0, sys.stdin.fileno())
3538 fd_pipes.setdefault(1, sys.stdout.fileno())
3539 fd_pipes.setdefault(2, sys.stdout.fileno())
3541 self.args = fetch_args
3542 self.env = fetch_env
3543 SpawnProcess._start(self)
3545 def _set_returncode(self, wait_retval):
3546 SpawnProcess._set_returncode(self, wait_retval)
3547 if self.returncode == os.EX_OK:
3548 # If possible, update the mtime to match the remote package if
3549 # the fetcher didn't already do it automatically.
3550 bintree = self.pkg.root_config.trees["bintree"]
3551 if bintree._remote_has_index:
3552 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3553 if remote_mtime is not None:
3555 remote_mtime = long(remote_mtime)
3560 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3564 if remote_mtime != local_mtime:
3566 os.utime(self.pkg_path,
3567 (remote_mtime, remote_mtime))
3576 This raises an AlreadyLocked exception if lock() is called
3577 while a lock is already held. In order to avoid this, call
3578 unlock() or check whether the "locked" attribute is True
3579 or False before calling lock().
3581 if self._lock_obj is not None:
3582 raise self.AlreadyLocked((self._lock_obj,))
3584 self._lock_obj = portage.locks.lockfile(
3585 self.pkg_path, wantnewlockfile=1)
3588 class AlreadyLocked(portage.exception.PortageException):
3592 if self._lock_obj is None:
3594 portage.locks.unlockfile(self._lock_obj)
3595 self._lock_obj = None
3598 class BinpkgVerifier(AsynchronousTask):
3599 __slots__ = ("logfile", "pkg",)
3603 Note: Unlike a normal AsynchronousTask.start() method,
3604 this one does all work is synchronously. The returncode
3605 attribute will be set before it returns.
3609 root_config = pkg.root_config
3610 bintree = root_config.trees["bintree"]
3612 stdout_orig = sys.stdout
3613 stderr_orig = sys.stderr
3615 if self.background and self.logfile is not None:
3616 log_file = open(self.logfile, 'a')
3618 if log_file is not None:
3619 sys.stdout = log_file
3620 sys.stderr = log_file
3622 bintree.digestCheck(pkg)
3623 except portage.exception.FileNotFound:
3624 writemsg("!!! Fetching Binary failed " + \
3625 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3627 except portage.exception.DigestException, e:
3628 writemsg("\n!!! Digest verification failed:\n",
3630 writemsg("!!! %s\n" % e.value[0],
3632 writemsg("!!! Reason: %s\n" % e.value[1],
3634 writemsg("!!! Got: %s\n" % e.value[2],
3636 writemsg("!!! Expected: %s\n" % e.value[3],
3639 if rval != os.EX_OK:
3640 pkg_path = bintree.getname(pkg.cpv)
3641 head, tail = os.path.split(pkg_path)
3642 temp_filename = portage._checksum_failure_temp_file(head, tail)
3643 writemsg("File renamed to '%s'\n" % (temp_filename,),
3646 sys.stdout = stdout_orig
3647 sys.stderr = stderr_orig
3648 if log_file is not None:
3651 self.returncode = rval
3654 class BinpkgPrefetcher(CompositeTask):
3656 __slots__ = ("pkg",) + \
3657 ("pkg_path", "_bintree",)
3660 self._bintree = self.pkg.root_config.trees["bintree"]
3661 fetcher = BinpkgFetcher(background=self.background,
3662 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3663 scheduler=self.scheduler)
3664 self.pkg_path = fetcher.pkg_path
3665 self._start_task(fetcher, self._fetcher_exit)
3667 def _fetcher_exit(self, fetcher):
3669 if self._default_exit(fetcher) != os.EX_OK:
3673 verifier = BinpkgVerifier(background=self.background,
3674 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3675 self._start_task(verifier, self._verifier_exit)
3677 def _verifier_exit(self, verifier):
3678 if self._default_exit(verifier) != os.EX_OK:
3682 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3684 self._current_task = None
3685 self.returncode = os.EX_OK
3688 class BinpkgExtractorAsync(SpawnProcess):
3690 __slots__ = ("image_dir", "pkg", "pkg_path")
3692 _shell_binary = portage.const.BASH_BINARY
3695 self.args = [self._shell_binary, "-c",
3696 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3697 (portage._shell_quote(self.pkg_path),
3698 portage._shell_quote(self.image_dir))]
3700 self.env = self.pkg.root_config.settings.environ()
3701 SpawnProcess._start(self)
3703 class MergeListItem(CompositeTask):
3706 TODO: For parallel scheduling, everything here needs asynchronous
3707 execution support (start, poll, and wait methods).
3710 __slots__ = ("args_set",
3711 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3712 "find_blockers", "logger", "mtimedb", "pkg",
3713 "pkg_count", "pkg_to_replace", "prefetcher",
3714 "settings", "statusMessage", "world_atom") + \
3720 build_opts = self.build_opts
3723 # uninstall, executed by self.merge()
3724 self.returncode = os.EX_OK
3728 args_set = self.args_set
3729 find_blockers = self.find_blockers
3730 logger = self.logger
3731 mtimedb = self.mtimedb
3732 pkg_count = self.pkg_count
3733 scheduler = self.scheduler
3734 settings = self.settings
3735 world_atom = self.world_atom
3736 ldpath_mtimes = mtimedb["ldpath"]
3738 action_desc = "Emerging"
3740 if pkg.type_name == "binary":
3741 action_desc += " binary"
3743 if build_opts.fetchonly:
3744 action_desc = "Fetching"
3746 msg = "%s (%s of %s) %s" % \
3748 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3749 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3750 colorize("GOOD", pkg.cpv))
3752 portdb = pkg.root_config.trees["porttree"].dbapi
3753 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3754 if portdir_repo_name:
3755 pkg_repo_name = pkg.metadata.get("repository")
3756 if pkg_repo_name != portdir_repo_name:
3757 if not pkg_repo_name:
3758 pkg_repo_name = "unknown repo"
3759 msg += " from %s" % pkg_repo_name
3762 msg += " %s %s" % (preposition, pkg.root)
3764 if not build_opts.pretend:
3765 self.statusMessage(msg)
3766 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3767 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3769 if pkg.type_name == "ebuild":
3771 build = EbuildBuild(args_set=args_set,
3772 background=self.background,
3773 config_pool=self.config_pool,
3774 find_blockers=find_blockers,
3775 ldpath_mtimes=ldpath_mtimes, logger=logger,
3776 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3777 prefetcher=self.prefetcher, scheduler=scheduler,
3778 settings=settings, world_atom=world_atom)
3780 self._install_task = build
3781 self._start_task(build, self._default_final_exit)
3784 elif pkg.type_name == "binary":
3786 binpkg = Binpkg(background=self.background,
3787 find_blockers=find_blockers,
3788 ldpath_mtimes=ldpath_mtimes, logger=logger,
3789 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3790 prefetcher=self.prefetcher, settings=settings,
3791 scheduler=scheduler, world_atom=world_atom)
3793 self._install_task = binpkg
3794 self._start_task(binpkg, self._default_final_exit)
3798 self._install_task.poll()
3799 return self.returncode
3802 self._install_task.wait()
3803 return self.returncode
3808 build_opts = self.build_opts
3809 find_blockers = self.find_blockers
3810 logger = self.logger
3811 mtimedb = self.mtimedb
3812 pkg_count = self.pkg_count
3813 prefetcher = self.prefetcher
3814 scheduler = self.scheduler
3815 settings = self.settings
3816 world_atom = self.world_atom
3817 ldpath_mtimes = mtimedb["ldpath"]
3820 if not (build_opts.buildpkgonly or \
3821 build_opts.fetchonly or build_opts.pretend):
3823 uninstall = PackageUninstall(background=self.background,
3824 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3825 pkg=pkg, scheduler=scheduler, settings=settings)
3828 retval = uninstall.wait()
3829 if retval != os.EX_OK:
3833 if build_opts.fetchonly or \
3834 build_opts.buildpkgonly:
3835 return self.returncode
3837 retval = self._install_task.install()
3840 class PackageMerge(AsynchronousTask):
3842 TODO: Implement asynchronous merge so that the scheduler can
3843 run while a merge is executing.
3846 __slots__ = ("merge",)
3850 pkg = self.merge.pkg
3851 pkg_count = self.merge.pkg_count
3854 action_desc = "Uninstalling"
3855 preposition = "from"
3857 action_desc = "Installing"
3860 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3863 msg += " %s %s" % (preposition, pkg.root)
3865 if not self.merge.build_opts.fetchonly and \
3866 not self.merge.build_opts.pretend and \
3867 not self.merge.build_opts.buildpkgonly:
3868 self.merge.statusMessage(msg)
3870 self.returncode = self.merge.merge()
3873 class DependencyArg(object):
3874 def __init__(self, arg=None, root_config=None):
3876 self.root_config = root_config
3879 return str(self.arg)
3881 class AtomArg(DependencyArg):
3882 def __init__(self, atom=None, **kwargs):
3883 DependencyArg.__init__(self, **kwargs)
3885 if not isinstance(self.atom, portage.dep.Atom):
3886 self.atom = portage.dep.Atom(self.atom)
3887 self.set = (self.atom, )
3889 class PackageArg(DependencyArg):
3890 def __init__(self, package=None, **kwargs):
3891 DependencyArg.__init__(self, **kwargs)
3892 self.package = package
3893 self.atom = portage.dep.Atom("=" + package.cpv)
3894 self.set = (self.atom, )
3896 class SetArg(DependencyArg):
3897 def __init__(self, set=None, **kwargs):
3898 DependencyArg.__init__(self, **kwargs)
3900 self.name = self.arg[len(SETPREFIX):]
3902 class Dependency(SlotObject):
3903 __slots__ = ("atom", "blocker", "depth",
3904 "parent", "onlydeps", "priority", "root")
3905 def __init__(self, **kwargs):
3906 SlotObject.__init__(self, **kwargs)
3907 if self.priority is None:
3908 self.priority = DepPriority()
3909 if self.depth is None:
3912 class BlockerCache(DictMixin):
3913 """This caches blockers of installed packages so that dep_check does not
3914 have to be done for every single installed package on every invocation of
3915 emerge. The cache is invalidated whenever it is detected that something
3916 has changed that might alter the results of dep_check() calls:
3917 1) the set of installed packages (including COUNTER) has changed
3918 2) the old-style virtuals have changed
3921 # Number of uncached packages to trigger cache update, since
3922 # it's wasteful to update it for every vdb change.
3923 _cache_threshold = 5
3925 class BlockerData(object):
3927 __slots__ = ("__weakref__", "atoms", "counter")
3929 def __init__(self, counter, atoms):
3930 self.counter = counter
3933 def __init__(self, myroot, vardb):
3935 self._virtuals = vardb.settings.getvirtuals()
3936 self._cache_filename = os.path.join(myroot,
3937 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3938 self._cache_version = "1"
3939 self._cache_data = None
3940 self._modified = set()
3945 f = open(self._cache_filename)
3946 mypickle = pickle.Unpickler(f)
3947 mypickle.find_global = None
3948 self._cache_data = mypickle.load()
3951 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3952 if isinstance(e, pickle.UnpicklingError):
3953 writemsg("!!! Error loading '%s': %s\n" % \
3954 (self._cache_filename, str(e)), noiselevel=-1)
3957 cache_valid = self._cache_data and \
3958 isinstance(self._cache_data, dict) and \
3959 self._cache_data.get("version") == self._cache_version and \
3960 isinstance(self._cache_data.get("blockers"), dict)
3962 # Validate all the atoms and counters so that
3963 # corruption is detected as soon as possible.
3964 invalid_items = set()
3965 for k, v in self._cache_data["blockers"].iteritems():
3966 if not isinstance(k, basestring):
3967 invalid_items.add(k)
3970 if portage.catpkgsplit(k) is None:
3971 invalid_items.add(k)
3973 except portage.exception.InvalidData:
3974 invalid_items.add(k)
3976 if not isinstance(v, tuple) or \
3978 invalid_items.add(k)
3981 if not isinstance(counter, (int, long)):
3982 invalid_items.add(k)
3984 if not isinstance(atoms, (list, tuple)):
3985 invalid_items.add(k)
3987 invalid_atom = False
3989 if not isinstance(atom, basestring):
3992 if atom[:1] != "!" or \
3993 not portage.isvalidatom(
3994 atom, allow_blockers=True):
3998 invalid_items.add(k)
4001 for k in invalid_items:
4002 del self._cache_data["blockers"][k]
4003 if not self._cache_data["blockers"]:
4007 self._cache_data = {"version":self._cache_version}
4008 self._cache_data["blockers"] = {}
4009 self._cache_data["virtuals"] = self._virtuals
4010 self._modified.clear()
4013 """If the current user has permission and the internal blocker cache
4014 been updated, save it to disk and mark it unmodified. This is called
4015 by emerge after it has proccessed blockers for all installed packages.
4016 Currently, the cache is only written if the user has superuser
4017 privileges (since that's required to obtain a lock), but all users
4018 have read access and benefit from faster blocker lookups (as long as
4019 the entire cache is still valid). The cache is stored as a pickled
4020 dict object with the following format:
4024 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4025 "virtuals" : vardb.settings.getvirtuals()
4028 if len(self._modified) >= self._cache_threshold and \
4031 f = portage.util.atomic_ofstream(self._cache_filename)
4032 pickle.dump(self._cache_data, f, -1)
4034 portage.util.apply_secpass_permissions(
4035 self._cache_filename, gid=portage.portage_gid, mode=0644)
4036 except (IOError, OSError), e:
4038 self._modified.clear()
4040 def __setitem__(self, cpv, blocker_data):
4042 Update the cache and mark it as modified for a future call to
4045 @param cpv: Package for which to cache blockers.
4047 @param blocker_data: An object with counter and atoms attributes.
4048 @type blocker_data: BlockerData
4050 self._cache_data["blockers"][cpv] = \
4051 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4052 self._modified.add(cpv)
4055 if self._cache_data is None:
4056 # triggered by python-trace
4058 return iter(self._cache_data["blockers"])
4060 def __delitem__(self, cpv):
4061 del self._cache_data["blockers"][cpv]
4063 def __getitem__(self, cpv):
4066 @returns: An object with counter and atoms attributes.
4068 return self.BlockerData(*self._cache_data["blockers"][cpv])
4071 """This needs to be implemented so that self.__repr__() doesn't raise
4072 an AttributeError."""
4075 class BlockerDB(object):
4077 def __init__(self, root_config):
4078 self._root_config = root_config
4079 self._vartree = root_config.trees["vartree"]
4080 self._portdb = root_config.trees["porttree"].dbapi
4082 self._dep_check_trees = None
4083 self._fake_vartree = None
4085 def _get_fake_vartree(self, acquire_lock=0):
4086 fake_vartree = self._fake_vartree
4087 if fake_vartree is None:
4088 fake_vartree = FakeVartree(self._root_config,
4089 acquire_lock=acquire_lock)
4090 self._fake_vartree = fake_vartree
4091 self._dep_check_trees = { self._vartree.root : {
4092 "porttree" : fake_vartree,
4093 "vartree" : fake_vartree,
4096 fake_vartree.sync(acquire_lock=acquire_lock)
4099 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4100 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4101 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4102 settings = self._vartree.settings
4103 stale_cache = set(blocker_cache)
4104 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4105 dep_check_trees = self._dep_check_trees
4106 vardb = fake_vartree.dbapi
4107 installed_pkgs = list(vardb)
4109 for inst_pkg in installed_pkgs:
4110 stale_cache.discard(inst_pkg.cpv)
4111 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4112 if cached_blockers is not None and \
4113 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4114 cached_blockers = None
4115 if cached_blockers is not None:
4116 blocker_atoms = cached_blockers.atoms
4118 # Use aux_get() to trigger FakeVartree global
4119 # updates on *DEPEND when appropriate.
4120 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4122 portage.dep._dep_check_strict = False
4123 success, atoms = portage.dep_check(depstr,
4124 vardb, settings, myuse=inst_pkg.use.enabled,
4125 trees=dep_check_trees, myroot=inst_pkg.root)
4127 portage.dep._dep_check_strict = True
4129 pkg_location = os.path.join(inst_pkg.root,
4130 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4131 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4132 (pkg_location, atoms), noiselevel=-1)
4135 blocker_atoms = [atom for atom in atoms \
4136 if atom.startswith("!")]
4137 blocker_atoms.sort()
4138 counter = long(inst_pkg.metadata["COUNTER"])
4139 blocker_cache[inst_pkg.cpv] = \
4140 blocker_cache.BlockerData(counter, blocker_atoms)
4141 for cpv in stale_cache:
4142 del blocker_cache[cpv]
4143 blocker_cache.flush()
4145 blocker_parents = digraph()
4147 for pkg in installed_pkgs:
4148 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4149 blocker_atom = blocker_atom.lstrip("!")
4150 blocker_atoms.append(blocker_atom)
4151 blocker_parents.add(blocker_atom, pkg)
4153 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4154 blocking_pkgs = set()
4155 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4156 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4158 # Check for blockers in the other direction.
4159 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4161 portage.dep._dep_check_strict = False
4162 success, atoms = portage.dep_check(depstr,
4163 vardb, settings, myuse=new_pkg.use.enabled,
4164 trees=dep_check_trees, myroot=new_pkg.root)
4166 portage.dep._dep_check_strict = True
4168 # We should never get this far with invalid deps.
4169 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4172 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4175 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4176 for inst_pkg in installed_pkgs:
4178 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4179 except (portage.exception.InvalidDependString, StopIteration):
4181 blocking_pkgs.add(inst_pkg)
4183 return blocking_pkgs
4185 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4187 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4188 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4189 p_type, p_root, p_key, p_status = parent_node
4191 if p_status == "nomerge":
4192 category, pf = portage.catsplit(p_key)
4193 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4194 msg.append("Portage is unable to process the dependencies of the ")
4195 msg.append("'%s' package. " % p_key)
4196 msg.append("In order to correct this problem, the package ")
4197 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4198 msg.append("As a temporary workaround, the --nodeps option can ")
4199 msg.append("be used to ignore all dependencies. For reference, ")
4200 msg.append("the problematic dependencies can be found in the ")
4201 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4203 msg.append("This package can not be installed. ")
4204 msg.append("Please notify the '%s' package maintainer " % p_key)
4205 msg.append("about this problem.")
4207 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4208 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4210 class PackageVirtualDbapi(portage.dbapi):
4212 A dbapi-like interface class that represents the state of the installed
4213 package database as new packages are installed, replacing any packages
4214 that previously existed in the same slot. The main difference between
4215 this class and fakedbapi is that this one uses Package instances
4216 internally (passed in via cpv_inject() and cpv_remove() calls).
4218 def __init__(self, settings):
4219 portage.dbapi.__init__(self)
4220 self.settings = settings
4221 self._match_cache = {}
4227 Remove all packages.
4231 self._cp_map.clear()
4232 self._cpv_map.clear()
4235 obj = PackageVirtualDbapi(self.settings)
4236 obj._match_cache = self._match_cache.copy()
4237 obj._cp_map = self._cp_map.copy()
4238 for k, v in obj._cp_map.iteritems():
4239 obj._cp_map[k] = v[:]
4240 obj._cpv_map = self._cpv_map.copy()
4244 return self._cpv_map.itervalues()
4246 def __contains__(self, item):
4247 existing = self._cpv_map.get(item.cpv)
4248 if existing is not None and \
4253 def get(self, item, default=None):
4254 cpv = getattr(item, "cpv", None)
4258 type_name, root, cpv, operation = item
4260 existing = self._cpv_map.get(cpv)
4261 if existing is not None and \
4266 def match_pkgs(self, atom):
4267 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4269 def _clear_cache(self):
4270 if self._categories is not None:
4271 self._categories = None
4272 if self._match_cache:
4273 self._match_cache = {}
4275 def match(self, origdep, use_cache=1):
4276 result = self._match_cache.get(origdep)
4277 if result is not None:
4279 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4280 self._match_cache[origdep] = result
4283 def cpv_exists(self, cpv):
4284 return cpv in self._cpv_map
4286 def cp_list(self, mycp, use_cache=1):
4287 cachelist = self._match_cache.get(mycp)
4288 # cp_list() doesn't expand old-style virtuals
4289 if cachelist and cachelist[0].startswith(mycp):
4291 cpv_list = self._cp_map.get(mycp)
4292 if cpv_list is None:
4295 cpv_list = [pkg.cpv for pkg in cpv_list]
4296 self._cpv_sort_ascending(cpv_list)
4297 if not (not cpv_list and mycp.startswith("virtual/")):
4298 self._match_cache[mycp] = cpv_list
4302 return list(self._cp_map)
4305 return list(self._cpv_map)
4307 def cpv_inject(self, pkg):
4308 cp_list = self._cp_map.get(pkg.cp)
4311 self._cp_map[pkg.cp] = cp_list
4312 e_pkg = self._cpv_map.get(pkg.cpv)
4313 if e_pkg is not None:
4316 self.cpv_remove(e_pkg)
4317 for e_pkg in cp_list:
4318 if e_pkg.slot_atom == pkg.slot_atom:
4321 self.cpv_remove(e_pkg)
4324 self._cpv_map[pkg.cpv] = pkg
4327 def cpv_remove(self, pkg):
4328 old_pkg = self._cpv_map.get(pkg.cpv)
4331 self._cp_map[pkg.cp].remove(pkg)
4332 del self._cpv_map[pkg.cpv]
4335 def aux_get(self, cpv, wants):
4336 metadata = self._cpv_map[cpv].metadata
4337 return [metadata.get(x, "") for x in wants]
4339 def aux_update(self, cpv, values):
4340 self._cpv_map[cpv].metadata.update(values)
4343 class depgraph(object):
4345 pkg_tree_map = RootConfig.pkg_tree_map
4347 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4349 def __init__(self, settings, trees, myopts, myparams, spinner):
4350 self.settings = settings
4351 self.target_root = settings["ROOT"]
4352 self.myopts = myopts
4353 self.myparams = myparams
4355 if settings.get("PORTAGE_DEBUG", "") == "1":
4357 self.spinner = spinner
4358 self._running_root = trees["/"]["root_config"]
4359 self._opts_no_restart = Scheduler._opts_no_restart
4360 self.pkgsettings = {}
4361 # Maps slot atom to package for each Package added to the graph.
4362 self._slot_pkg_map = {}
4363 # Maps nodes to the reasons they were selected for reinstallation.
4364 self._reinstall_nodes = {}
4367 self._trees_orig = trees
4369 # Contains a filtered view of preferred packages that are selected
4370 # from available repositories.
4371 self._filtered_trees = {}
4372 # Contains installed packages and new packages that have been added
4374 self._graph_trees = {}
4375 # All Package instances
4376 self._pkg_cache = {}
4377 for myroot in trees:
4378 self.trees[myroot] = {}
4379 # Create a RootConfig instance that references
4380 # the FakeVartree instead of the real one.
4381 self.roots[myroot] = RootConfig(
4382 trees[myroot]["vartree"].settings,
4384 trees[myroot]["root_config"].setconfig)
4385 for tree in ("porttree", "bintree"):
4386 self.trees[myroot][tree] = trees[myroot][tree]
4387 self.trees[myroot]["vartree"] = \
4388 FakeVartree(trees[myroot]["root_config"],
4389 pkg_cache=self._pkg_cache)
4390 self.pkgsettings[myroot] = portage.config(
4391 clone=self.trees[myroot]["vartree"].settings)
4392 self._slot_pkg_map[myroot] = {}
4393 vardb = self.trees[myroot]["vartree"].dbapi
4394 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4395 "--buildpkgonly" not in self.myopts
4396 # This fakedbapi instance will model the state that the vdb will
4397 # have after new packages have been installed.
4398 fakedb = PackageVirtualDbapi(vardb.settings)
4399 if preload_installed_pkgs:
4401 self.spinner.update()
4402 # This triggers metadata updates via FakeVartree.
4403 vardb.aux_get(pkg.cpv, [])
4404 fakedb.cpv_inject(pkg)
4406 # Now that the vardb state is cached in our FakeVartree,
4407 # we won't be needing the real vartree cache for awhile.
4408 # To make some room on the heap, clear the vardbapi
4410 trees[myroot]["vartree"].dbapi._clear_cache()
4413 self.mydbapi[myroot] = fakedb
4416 graph_tree.dbapi = fakedb
4417 self._graph_trees[myroot] = {}
4418 self._filtered_trees[myroot] = {}
4419 # Substitute the graph tree for the vartree in dep_check() since we
4420 # want atom selections to be consistent with package selections
4421 # have already been made.
4422 self._graph_trees[myroot]["porttree"] = graph_tree
4423 self._graph_trees[myroot]["vartree"] = graph_tree
4424 def filtered_tree():
4426 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4427 self._filtered_trees[myroot]["porttree"] = filtered_tree
4429 # Passing in graph_tree as the vartree here could lead to better
4430 # atom selections in some cases by causing atoms for packages that
4431 # have been added to the graph to be preferred over other choices.
4432 # However, it can trigger atom selections that result in
4433 # unresolvable direct circular dependencies. For example, this
4434 # happens with gwydion-dylan which depends on either itself or
4435 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4436 # gwydion-dylan-bin needs to be selected in order to avoid a
4437 # an unresolvable direct circular dependency.
4439 # To solve the problem described above, pass in "graph_db" so that
4440 # packages that have been added to the graph are distinguishable
4441 # from other available packages and installed packages. Also, pass
4442 # the parent package into self._select_atoms() calls so that
4443 # unresolvable direct circular dependencies can be detected and
4444 # avoided when possible.
4445 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4446 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4449 portdb = self.trees[myroot]["porttree"].dbapi
4450 bindb = self.trees[myroot]["bintree"].dbapi
4451 vardb = self.trees[myroot]["vartree"].dbapi
4452 # (db, pkg_type, built, installed, db_keys)
4453 if "--usepkgonly" not in self.myopts:
4454 db_keys = list(portdb._aux_cache_keys)
4455 dbs.append((portdb, "ebuild", False, False, db_keys))
4456 if "--usepkg" in self.myopts:
4457 db_keys = list(bindb._aux_cache_keys)
4458 dbs.append((bindb, "binary", True, False, db_keys))
4459 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4460 dbs.append((vardb, "installed", True, True, db_keys))
4461 self._filtered_trees[myroot]["dbs"] = dbs
4462 if "--usepkg" in self.myopts:
4463 self.trees[myroot]["bintree"].populate(
4464 "--getbinpkg" in self.myopts,
4465 "--getbinpkgonly" in self.myopts)
4468 self.digraph=portage.digraph()
4469 # contains all sets added to the graph
4471 # contains atoms given as arguments
4472 self._sets["args"] = InternalPackageSet()
4473 # contains all atoms from all sets added to the graph, including
4474 # atoms given as arguments
4475 self._set_atoms = InternalPackageSet()
4476 self._atom_arg_map = {}
4477 # contains all nodes pulled in by self._set_atoms
4478 self._set_nodes = set()
4479 # Contains only Blocker -> Uninstall edges
4480 self._blocker_uninstalls = digraph()
4481 # Contains only Package -> Blocker edges
4482 self._blocker_parents = digraph()
4483 # Contains only irrelevant Package -> Blocker edges
4484 self._irrelevant_blockers = digraph()
4485 # Contains only unsolvable Package -> Blocker edges
4486 self._unsolvable_blockers = digraph()
4487 # Contains all Blocker -> Blocked Package edges
4488 self._blocked_pkgs = digraph()
4489 # Contains world packages that have been protected from
4490 # uninstallation but may not have been added to the graph
4491 # if the graph is not complete yet.
4492 self._blocked_world_pkgs = {}
4493 self._slot_collision_info = {}
4494 # Slot collision nodes are not allowed to block other packages since
4495 # blocker validation is only able to account for one package per slot.
4496 self._slot_collision_nodes = set()
4497 self._parent_atoms = {}
4498 self._slot_conflict_parent_atoms = set()
4499 self._serialized_tasks_cache = None
4500 self._scheduler_graph = None
4501 self._displayed_list = None
4502 self._pprovided_args = []
4503 self._missing_args = []
4504 self._masked_installed = set()
4505 self._unsatisfied_deps_for_display = []
4506 self._unsatisfied_blockers_for_display = None
4507 self._circular_deps_for_display = None
4508 self._dep_stack = []
4509 self._unsatisfied_deps = []
4510 self._initially_unsatisfied_deps = []
4511 self._ignored_deps = []
4512 self._required_set_names = set(["system", "world"])
4513 self._select_atoms = self._select_atoms_highest_available
4514 self._select_package = self._select_pkg_highest_available
4515 self._highest_pkg_cache = {}
4517 def _show_slot_collision_notice(self):
4518 """Show an informational message advising the user to mask one of the
4519 the packages. In some cases it may be possible to resolve this
4520 automatically, but support for backtracking (removal nodes that have
4521 already been selected) will be required in order to handle all possible
4525 if not self._slot_collision_info:
4528 self._show_merge_list()
4531 msg.append("\n!!! Multiple package instances within a single " + \
4532 "package slot have been pulled\n")
4533 msg.append("!!! into the dependency graph, resulting" + \
4534 " in a slot conflict:\n\n")
4536 # Max number of parents shown, to avoid flooding the display.
4538 explanation_columns = 70
4540 for (slot_atom, root), slot_nodes \
4541 in self._slot_collision_info.iteritems():
4542 msg.append(str(slot_atom))
4545 for node in slot_nodes:
4547 msg.append(str(node))
4548 parent_atoms = self._parent_atoms.get(node)
4551 # Prefer conflict atoms over others.
4552 for parent_atom in parent_atoms:
4553 if len(pruned_list) >= max_parents:
4555 if parent_atom in self._slot_conflict_parent_atoms:
4556 pruned_list.add(parent_atom)
4558 # If this package was pulled in by conflict atoms then
4559 # show those alone since those are the most interesting.
4561 # When generating the pruned list, prefer instances
4562 # of DependencyArg over instances of Package.
4563 for parent_atom in parent_atoms:
4564 if len(pruned_list) >= max_parents:
4566 parent, atom = parent_atom
4567 if isinstance(parent, DependencyArg):
4568 pruned_list.add(parent_atom)
4569 # Prefer Packages instances that themselves have been
4570 # pulled into collision slots.
4571 for parent_atom in parent_atoms:
4572 if len(pruned_list) >= max_parents:
4574 parent, atom = parent_atom
4575 if isinstance(parent, Package) and \
4576 (parent.slot_atom, parent.root) \
4577 in self._slot_collision_info:
4578 pruned_list.add(parent_atom)
4579 for parent_atom in parent_atoms:
4580 if len(pruned_list) >= max_parents:
4582 pruned_list.add(parent_atom)
4583 omitted_parents = len(parent_atoms) - len(pruned_list)
4584 parent_atoms = pruned_list
4585 msg.append(" pulled in by\n")
4586 for parent_atom in parent_atoms:
4587 parent, atom = parent_atom
4588 msg.append(2*indent)
4589 if isinstance(parent,
4590 (PackageArg, AtomArg)):
4591 # For PackageArg and AtomArg types, it's
4592 # redundant to display the atom attribute.
4593 msg.append(str(parent))
4595 # Display the specific atom from SetArg or
4597 msg.append("%s required by %s" % (atom, parent))
4600 msg.append(2*indent)
4601 msg.append("(and %d more)\n" % omitted_parents)
4603 msg.append(" (no parents)\n")
4605 explanation = self._slot_conflict_explanation(slot_nodes)
4608 msg.append(indent + "Explanation:\n\n")
4609 for line in textwrap.wrap(explanation, explanation_columns):
4610 msg.append(2*indent + line + "\n")
4613 sys.stderr.write("".join(msg))
4616 explanations_for_all = explanations == len(self._slot_collision_info)
4618 if explanations_for_all or "--quiet" in self.myopts:
4622 msg.append("It may be possible to solve this problem ")
4623 msg.append("by using package.mask to prevent one of ")
4624 msg.append("those packages from being selected. ")
4625 msg.append("However, it is also possible that conflicting ")
4626 msg.append("dependencies exist such that they are impossible to ")
4627 msg.append("satisfy simultaneously. If such a conflict exists in ")
4628 msg.append("the dependencies of two different packages, then those ")
4629 msg.append("packages can not be installed simultaneously.")
4631 from formatter import AbstractFormatter, DumbWriter
4632 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4634 f.add_flowing_data(x)
4638 msg.append("For more information, see MASKED PACKAGES ")
4639 msg.append("section in the emerge man page or refer ")
4640 msg.append("to the Gentoo Handbook.")
4642 f.add_flowing_data(x)
4646 def _slot_conflict_explanation(self, slot_nodes):
4648 When a slot conflict occurs due to USE deps, there are a few
4649 different cases to consider:
4651 1) New USE are correctly set but --newuse wasn't requested so an
4652 installed package with incorrect USE happened to get pulled
4653 into graph before the new one.
4655 2) New USE are incorrectly set but an installed package has correct
4656 USE so it got pulled into the graph, and a new instance also got
4657 pulled in due to --newuse or an upgrade.
4659 3) Multiple USE deps exist that can't be satisfied simultaneously,
4660 and multiple package instances got pulled into the same slot to
4661 satisfy the conflicting deps.
4663 Currently, explanations and suggested courses of action are generated
4664 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4667 if len(slot_nodes) != 2:
4668 # Suggestions are only implemented for
4669 # conflicts between two packages.
4672 all_conflict_atoms = self._slot_conflict_parent_atoms
4674 matched_atoms = None
4675 unmatched_node = None
4676 for node in slot_nodes:
4677 parent_atoms = self._parent_atoms.get(node)
4678 if not parent_atoms:
4679 # Normally, there are always parent atoms. If there are
4680 # none then something unexpected is happening and there's
4681 # currently no suggestion for this case.
4683 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4684 for parent_atom in conflict_atoms:
4685 parent, atom = parent_atom
4687 # Suggestions are currently only implemented for cases
4688 # in which all conflict atoms have USE deps.
4691 if matched_node is not None:
4692 # If conflict atoms match multiple nodes
4693 # then there's no suggestion.
4696 matched_atoms = conflict_atoms
4698 if unmatched_node is not None:
4699 # Neither node is matched by conflict atoms, and
4700 # there is no suggestion for this case.
4702 unmatched_node = node
4704 if matched_node is None or unmatched_node is None:
4705 # This shouldn't happen.
4708 if unmatched_node.installed and not matched_node.installed:
4709 return "New USE are correctly set, but --newuse wasn't" + \
4710 " requested, so an installed package with incorrect USE " + \
4711 "happened to get pulled into the dependency graph. " + \
4712 "In order to solve " + \
4713 "this, either specify the --newuse option or explicitly " + \
4714 " reinstall '%s'." % matched_node.slot_atom
4716 if matched_node.installed and not unmatched_node.installed:
4717 atoms = sorted(set(atom for parent, atom in matched_atoms))
4718 explanation = ("New USE for '%s' are incorrectly set. " + \
4719 "In order to solve this, adjust USE to satisfy '%s'") % \
4720 (matched_node.slot_atom, atoms[0])
4722 for atom in atoms[1:-1]:
4723 explanation += ", '%s'" % (atom,)
4726 explanation += " and '%s'" % (atoms[-1],)
4732 def _process_slot_conflicts(self):
4734 Process slot conflict data to identify specific atoms which
4735 lead to conflict. These atoms only match a subset of the
4736 packages that have been pulled into a given slot.
4738 for (slot_atom, root), slot_nodes \
4739 in self._slot_collision_info.iteritems():
4741 all_parent_atoms = set()
4742 for pkg in slot_nodes:
4743 parent_atoms = self._parent_atoms.get(pkg)
4744 if not parent_atoms:
4746 all_parent_atoms.update(parent_atoms)
4748 for pkg in slot_nodes:
4749 parent_atoms = self._parent_atoms.get(pkg)
4750 if parent_atoms is None:
4751 parent_atoms = set()
4752 self._parent_atoms[pkg] = parent_atoms
4753 for parent_atom in all_parent_atoms:
4754 if parent_atom in parent_atoms:
4756 # Use package set for matching since it will match via
4757 # PROVIDE when necessary, while match_from_list does not.
4758 parent, atom = parent_atom
4759 atom_set = InternalPackageSet(
4760 initial_atoms=(atom,))
4761 if atom_set.findAtomForPackage(pkg):
4762 parent_atoms.add(parent_atom)
4764 self._slot_conflict_parent_atoms.add(parent_atom)
4766 def _reinstall_for_flags(self, forced_flags,
4767 orig_use, orig_iuse, cur_use, cur_iuse):
4768 """Return a set of flags that trigger reinstallation, or None if there
4769 are no such flags."""
4770 if "--newuse" in self.myopts:
4771 flags = set(orig_iuse.symmetric_difference(
4772 cur_iuse).difference(forced_flags))
4773 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4774 cur_iuse.intersection(cur_use)))
4777 elif "changed-use" == self.myopts.get("--reinstall"):
4778 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4779 cur_iuse.intersection(cur_use))
4784 def _create_graph(self, allow_unsatisfied=False):
4785 dep_stack = self._dep_stack
4787 self.spinner.update()
4788 dep = dep_stack.pop()
4789 if isinstance(dep, Package):
4790 if not self._add_pkg_deps(dep,
4791 allow_unsatisfied=allow_unsatisfied):
4794 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4798 def _add_dep(self, dep, allow_unsatisfied=False):
4799 debug = "--debug" in self.myopts
4800 buildpkgonly = "--buildpkgonly" in self.myopts
4801 nodeps = "--nodeps" in self.myopts
4802 empty = "empty" in self.myparams
4803 deep = "deep" in self.myparams
4804 update = "--update" in self.myopts and dep.depth <= 1
4806 if not buildpkgonly and \
4808 dep.parent not in self._slot_collision_nodes:
4809 if dep.parent.onlydeps:
4810 # It's safe to ignore blockers if the
4811 # parent is an --onlydeps node.
4813 # The blocker applies to the root where
4814 # the parent is or will be installed.
4815 blocker = Blocker(atom=dep.atom,
4816 eapi=dep.parent.metadata["EAPI"],
4817 root=dep.parent.root)
4818 self._blocker_parents.add(blocker, dep.parent)
4820 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4821 onlydeps=dep.onlydeps)
4823 if allow_unsatisfied:
4824 self._unsatisfied_deps.append(dep)
4826 self._unsatisfied_deps_for_display.append(
4827 ((dep.root, dep.atom), {"myparent":dep.parent}))
4829 # In some cases, dep_check will return deps that shouldn't
4830 # be proccessed any further, so they are identified and
4831 # discarded here. Try to discard as few as possible since
4832 # discarded dependencies reduce the amount of information
4833 # available for optimization of merge order.
4834 if dep.priority.satisfied and \
4835 not (existing_node or empty or deep or update):
4837 if dep.root == self.target_root:
4839 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4840 except StopIteration:
4842 except portage.exception.InvalidDependString:
4843 if not dep_pkg.installed:
4844 # This shouldn't happen since the package
4845 # should have been masked.
4848 self._ignored_deps.append(dep)
4851 if not self._add_pkg(dep_pkg, dep):
4855 def _add_pkg(self, pkg, dep):
4862 myparent = dep.parent
4863 priority = dep.priority
4865 if priority is None:
4866 priority = DepPriority()
4868 Fills the digraph with nodes comprised of packages to merge.
4869 mybigkey is the package spec of the package to merge.
4870 myparent is the package depending on mybigkey ( or None )
4871 addme = Should we add this package to the digraph or are we just looking at it's deps?
4872 Think --onlydeps, we need to ignore packages in that case.
4875 #IUSE-aware emerge -> USE DEP aware depgraph
4876 #"no downgrade" emerge
4878 # Ensure that the dependencies of the same package
4879 # are never processed more than once.
4880 previously_added = pkg in self.digraph
4882 # select the correct /var database that we'll be checking against
4883 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4884 pkgsettings = self.pkgsettings[pkg.root]
4889 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4890 except portage.exception.InvalidDependString, e:
4891 if not pkg.installed:
4892 show_invalid_depstring_notice(
4893 pkg, pkg.metadata["PROVIDE"], str(e))
4897 if not pkg.onlydeps:
4898 if not pkg.installed and \
4899 "empty" not in self.myparams and \
4900 vardbapi.match(pkg.slot_atom):
4901 # Increase the priority of dependencies on packages that
4902 # are being rebuilt. This optimizes merge order so that
4903 # dependencies are rebuilt/updated as soon as possible,
4904 # which is needed especially when emerge is called by
4905 # revdep-rebuild since dependencies may be affected by ABI
4906 # breakage that has rendered them useless. Don't adjust
4907 # priority here when in "empty" mode since all packages
4908 # are being merged in that case.
4909 priority.rebuild = True
4911 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4912 slot_collision = False
4914 existing_node_matches = pkg.cpv == existing_node.cpv
4915 if existing_node_matches and \
4916 pkg != existing_node and \
4917 dep.atom is not None:
4918 # Use package set for matching since it will match via
4919 # PROVIDE when necessary, while match_from_list does not.
4920 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4921 if not atom_set.findAtomForPackage(existing_node):
4922 existing_node_matches = False
4923 if existing_node_matches:
4924 # The existing node can be reused.
4926 for parent_atom in arg_atoms:
4927 parent, atom = parent_atom
4928 self.digraph.add(existing_node, parent,
4930 self._add_parent_atom(existing_node, parent_atom)
4931 # If a direct circular dependency is not an unsatisfied
4932 # buildtime dependency then drop it here since otherwise
4933 # it can skew the merge order calculation in an unwanted
4935 if existing_node != myparent or \
4936 (priority.buildtime and not priority.satisfied):
4937 self.digraph.addnode(existing_node, myparent,
4939 if dep.atom is not None and dep.parent is not None:
4940 self._add_parent_atom(existing_node,
4941 (dep.parent, dep.atom))
4945 # A slot collision has occurred. Sometimes this coincides
4946 # with unresolvable blockers, so the slot collision will be
4947 # shown later if there are no unresolvable blockers.
4948 self._add_slot_conflict(pkg)
4949 slot_collision = True
4952 # Now add this node to the graph so that self.display()
4953 # can show use flags and --tree portage.output. This node is
4954 # only being partially added to the graph. It must not be
4955 # allowed to interfere with the other nodes that have been
4956 # added. Do not overwrite data for existing nodes in
4957 # self.mydbapi since that data will be used for blocker
4959 # Even though the graph is now invalid, continue to process
4960 # dependencies so that things like --fetchonly can still
4961 # function despite collisions.
4964 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4965 self.mydbapi[pkg.root].cpv_inject(pkg)
4967 if not pkg.installed:
4968 # Allow this package to satisfy old-style virtuals in case it
4969 # doesn't already. Any pre-existing providers will be preferred
4972 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4973 # For consistency, also update the global virtuals.
4974 settings = self.roots[pkg.root].settings
4976 settings.setinst(pkg.cpv, pkg.metadata)
4978 except portage.exception.InvalidDependString, e:
4979 show_invalid_depstring_notice(
4980 pkg, pkg.metadata["PROVIDE"], str(e))
4985 self._set_nodes.add(pkg)
4987 # Do this even when addme is False (--onlydeps) so that the
4988 # parent/child relationship is always known in case
4989 # self._show_slot_collision_notice() needs to be called later.
4990 self.digraph.add(pkg, myparent, priority=priority)
4991 if dep.atom is not None and dep.parent is not None:
4992 self._add_parent_atom(pkg, (dep.parent, dep.atom))
4995 for parent_atom in arg_atoms:
4996 parent, atom = parent_atom
4997 self.digraph.add(pkg, parent, priority=priority)
4998 self._add_parent_atom(pkg, parent_atom)
5000 """ This section determines whether we go deeper into dependencies or not.
5001 We want to go deeper on a few occasions:
5002 Installing package A, we need to make sure package A's deps are met.
5003 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5004 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5006 dep_stack = self._dep_stack
5007 if "recurse" not in self.myparams:
5009 elif pkg.installed and \
5010 "deep" not in self.myparams:
5011 dep_stack = self._ignored_deps
5013 self.spinner.update()
5018 if not previously_added:
5019 dep_stack.append(pkg)
5022 def _add_parent_atom(self, pkg, parent_atom):
5023 parent_atoms = self._parent_atoms.get(pkg)
5024 if parent_atoms is None:
5025 parent_atoms = set()
5026 self._parent_atoms[pkg] = parent_atoms
5027 parent_atoms.add(parent_atom)
5029 def _add_slot_conflict(self, pkg):
5030 self._slot_collision_nodes.add(pkg)
5031 slot_key = (pkg.slot_atom, pkg.root)
5032 slot_nodes = self._slot_collision_info.get(slot_key)
5033 if slot_nodes is None:
5035 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5036 self._slot_collision_info[slot_key] = slot_nodes
5039 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5041 mytype = pkg.type_name
5044 metadata = pkg.metadata
5045 myuse = pkg.use.enabled
5047 depth = pkg.depth + 1
5048 removal_action = "remove" in self.myparams
5051 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5053 edepend[k] = metadata[k]
5055 if not pkg.built and \
5056 "--buildpkgonly" in self.myopts and \
5057 "deep" not in self.myparams and \
5058 "empty" not in self.myparams:
5059 edepend["RDEPEND"] = ""
5060 edepend["PDEPEND"] = ""
5061 bdeps_satisfied = False
5063 if pkg.built and not removal_action:
5064 if self.myopts.get("--with-bdeps", "n") == "y":
5065 # Pull in build time deps as requested, but marked them as
5066 # "satisfied" since they are not strictly required. This allows
5067 # more freedom in the merge order calculation for solving
5068 # circular dependencies. Don't convert to PDEPEND since that
5069 # could make --with-bdeps=y less effective if it is used to
5070 # adjust merge order to prevent built_with_use() calls from
5072 bdeps_satisfied = True
5074 # built packages do not have build time dependencies.
5075 edepend["DEPEND"] = ""
5077 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5078 edepend["DEPEND"] = ""
5081 ("/", edepend["DEPEND"],
5082 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
5083 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5084 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5087 debug = "--debug" in self.myopts
5088 strict = mytype != "installed"
5090 for dep_root, dep_string, dep_priority in deps:
5092 # Decrease priority so that --buildpkgonly
5093 # hasallzeros() works correctly.
5094 dep_priority = DepPriority()
5099 print "Parent: ", jbigkey
5100 print "Depstring:", dep_string
5101 print "Priority:", dep_priority
5102 vardb = self.roots[dep_root].trees["vartree"].dbapi
5104 selected_atoms = self._select_atoms(dep_root,
5105 dep_string, myuse=myuse, parent=pkg, strict=strict)
5106 except portage.exception.InvalidDependString, e:
5107 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5110 print "Candidates:", selected_atoms
5112 for atom in selected_atoms:
5115 atom = portage.dep.Atom(atom)
5117 mypriority = dep_priority.copy()
5118 if not atom.blocker and vardb.match(atom):
5119 mypriority.satisfied = True
5121 if not self._add_dep(Dependency(atom=atom,
5122 blocker=atom.blocker, depth=depth, parent=pkg,
5123 priority=mypriority, root=dep_root),
5124 allow_unsatisfied=allow_unsatisfied):
5127 except portage.exception.InvalidAtom, e:
5128 show_invalid_depstring_notice(
5129 pkg, dep_string, str(e))
5131 if not pkg.installed:
5135 print "Exiting...", jbigkey
5136 except portage.exception.AmbiguousPackageName, e:
5138 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5139 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5141 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5142 portage.writemsg("\n", noiselevel=-1)
5143 if mytype == "binary":
5145 "!!! This binary package cannot be installed: '%s'\n" % \
5146 mykey, noiselevel=-1)
5147 elif mytype == "ebuild":
5148 portdb = self.roots[myroot].trees["porttree"].dbapi
5149 myebuild, mylocation = portdb.findname2(mykey)
5150 portage.writemsg("!!! This ebuild cannot be installed: " + \
5151 "'%s'\n" % myebuild, noiselevel=-1)
5152 portage.writemsg("!!! Please notify the package maintainer " + \
5153 "that atoms must be fully-qualified.\n", noiselevel=-1)
5157 def _priority(self, **kwargs):
5158 if "remove" in self.myparams:
5159 priority_constructor = UnmergeDepPriority
5161 priority_constructor = DepPriority
5162 return priority_constructor(**kwargs)
5164 def _dep_expand(self, root_config, atom_without_category):
5166 @param root_config: a root config instance
5167 @type root_config: RootConfig
5168 @param atom_without_category: an atom without a category component
5169 @type atom_without_category: String
5171 @returns: a list of atoms containing categories (possibly empty)
5173 null_cp = portage.dep_getkey(insert_category_into_atom(
5174 atom_without_category, "null"))
5175 cat, atom_pn = portage.catsplit(null_cp)
5178 for db, pkg_type, built, installed, db_keys in \
5179 self._filtered_trees[root_config.root]["dbs"]:
5180 cp_set.update(db.cp_all())
5181 for cp in list(cp_set):
5182 cat, pn = portage.catsplit(cp)
5187 cat, pn = portage.catsplit(cp)
5188 deps.append(insert_category_into_atom(
5189 atom_without_category, cat))
5192 def _have_new_virt(self, root, atom_cp):
5194 for db, pkg_type, built, installed, db_keys in \
5195 self._filtered_trees[root]["dbs"]:
5196 if db.cp_list(atom_cp):
5201 def _iter_atoms_for_pkg(self, pkg):
5202 # TODO: add multiple $ROOT support
5203 if pkg.root != self.target_root:
5205 atom_arg_map = self._atom_arg_map
5206 root_config = self.roots[pkg.root]
5207 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5208 atom_cp = portage.dep_getkey(atom)
5209 if atom_cp != pkg.cp and \
5210 self._have_new_virt(pkg.root, atom_cp):
5212 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5213 visible_pkgs.reverse() # descending order
5215 for visible_pkg in visible_pkgs:
5216 if visible_pkg.cp != atom_cp:
5218 if pkg >= visible_pkg:
5219 # This is descending order, and we're not
5220 # interested in any versions <= pkg given.
5222 if pkg.slot_atom != visible_pkg.slot_atom:
5223 higher_slot = visible_pkg
5225 if higher_slot is not None:
5227 for arg in atom_arg_map[(atom, pkg.root)]:
5228 if isinstance(arg, PackageArg) and \
5233 def select_files(self, myfiles):
5234 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5235 appropriate depgraph and return a favorite list."""
5236 debug = "--debug" in self.myopts
5237 root_config = self.roots[self.target_root]
5238 sets = root_config.sets
5239 getSetAtoms = root_config.setconfig.getSetAtoms
5241 myroot = self.target_root
5242 dbs = self._filtered_trees[myroot]["dbs"]
5243 vardb = self.trees[myroot]["vartree"].dbapi
5244 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5245 portdb = self.trees[myroot]["porttree"].dbapi
5246 bindb = self.trees[myroot]["bintree"].dbapi
5247 pkgsettings = self.pkgsettings[myroot]
5249 onlydeps = "--onlydeps" in self.myopts
5252 ext = os.path.splitext(x)[1]
5254 if not os.path.exists(x):
5256 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5257 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5258 elif os.path.exists(
5259 os.path.join(pkgsettings["PKGDIR"], x)):
5260 x = os.path.join(pkgsettings["PKGDIR"], x)
5262 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5263 print "!!! Please ensure the tbz2 exists as specified.\n"
5264 return 0, myfavorites
5265 mytbz2=portage.xpak.tbz2(x)
5266 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5267 if os.path.realpath(x) != \
5268 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5269 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5270 return 0, myfavorites
5271 db_keys = list(bindb._aux_cache_keys)
5272 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5273 pkg = Package(type_name="binary", root_config=root_config,
5274 cpv=mykey, built=True, metadata=metadata,
5276 self._pkg_cache[pkg] = pkg
5277 args.append(PackageArg(arg=x, package=pkg,
5278 root_config=root_config))
5279 elif ext==".ebuild":
5280 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5281 pkgdir = os.path.dirname(ebuild_path)
5282 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5283 cp = pkgdir[len(tree_root)+1:]
5284 e = portage.exception.PackageNotFound(
5285 ("%s is not in a valid portage tree " + \
5286 "hierarchy or does not exist") % x)
5287 if not portage.isvalidatom(cp):
5289 cat = portage.catsplit(cp)[0]
5290 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5291 if not portage.isvalidatom("="+mykey):
5293 ebuild_path = portdb.findname(mykey)
5295 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5296 cp, os.path.basename(ebuild_path)):
5297 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5298 return 0, myfavorites
5299 if mykey not in portdb.xmatch(
5300 "match-visible", portage.dep_getkey(mykey)):
5301 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5302 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5303 print colorize("BAD", "*** page for details.")
5304 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5307 raise portage.exception.PackageNotFound(
5308 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5309 db_keys = list(portdb._aux_cache_keys)
5310 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5311 pkg = Package(type_name="ebuild", root_config=root_config,
5312 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5313 pkgsettings.setcpv(pkg)
5314 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5315 self._pkg_cache[pkg] = pkg
5316 args.append(PackageArg(arg=x, package=pkg,
5317 root_config=root_config))
5318 elif x.startswith(os.path.sep):
5319 if not x.startswith(myroot):
5320 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5321 " $ROOT.\n") % x, noiselevel=-1)
5323 # Queue these up since it's most efficient to handle
5324 # multiple files in a single iter_owners() call.
5325 lookup_owners.append(x)
5327 if x in ("system", "world"):
5329 if x.startswith(SETPREFIX):
5330 s = x[len(SETPREFIX):]
5332 raise portage.exception.PackageSetNotFound(s)
5335 # Recursively expand sets so that containment tests in
5336 # self._get_parent_sets() properly match atoms in nested
5337 # sets (like if world contains system).
5338 expanded_set = InternalPackageSet(
5339 initial_atoms=getSetAtoms(s))
5340 self._sets[s] = expanded_set
5341 args.append(SetArg(arg=x, set=expanded_set,
5342 root_config=root_config))
5343 myfavorites.append(x)
5345 if not is_valid_package_atom(x):
5346 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5348 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5349 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5351 # Don't expand categories or old-style virtuals here unless
5352 # necessary. Expansion of old-style virtuals here causes at
5353 # least the following problems:
5354 # 1) It's more difficult to determine which set(s) an atom
5355 # came from, if any.
5356 # 2) It takes away freedom from the resolver to choose other
5357 # possible expansions when necessary.
5359 args.append(AtomArg(arg=x, atom=x,
5360 root_config=root_config))
5362 expanded_atoms = self._dep_expand(root_config, x)
5363 installed_cp_set = set()
5364 for atom in expanded_atoms:
5365 atom_cp = portage.dep_getkey(atom)
5366 if vardb.cp_list(atom_cp):
5367 installed_cp_set.add(atom_cp)
5368 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5369 installed_cp = iter(installed_cp_set).next()
5370 expanded_atoms = [atom for atom in expanded_atoms \
5371 if portage.dep_getkey(atom) == installed_cp]
5373 if len(expanded_atoms) > 1:
5376 ambiguous_package_name(x, expanded_atoms, root_config,
5377 self.spinner, self.myopts)
5378 return False, myfavorites
5380 atom = expanded_atoms[0]
5382 null_atom = insert_category_into_atom(x, "null")
5383 null_cp = portage.dep_getkey(null_atom)
5384 cat, atom_pn = portage.catsplit(null_cp)
5385 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5387 # Allow the depgraph to choose which virtual.
5388 atom = insert_category_into_atom(x, "virtual")
5390 atom = insert_category_into_atom(x, "null")
5392 args.append(AtomArg(arg=x, atom=atom,
5393 root_config=root_config))
5397 search_for_multiple = False
5398 if len(lookup_owners) > 1:
5399 search_for_multiple = True
5401 for x in lookup_owners:
5402 if not search_for_multiple and os.path.isdir(x):
5403 search_for_multiple = True
5404 relative_paths.append(x[len(myroot):])
5407 for pkg, relative_path in \
5408 real_vardb._owners.iter_owners(relative_paths):
5409 owners.add(pkg.mycpv)
5410 if not search_for_multiple:
5414 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5415 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5419 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5421 # portage now masks packages with missing slot, but it's
5422 # possible that one was installed by an older version
5423 atom = portage.cpv_getkey(cpv)
5425 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5426 args.append(AtomArg(arg=atom, atom=atom,
5427 root_config=root_config))
5429 if "--update" in self.myopts:
5430 # Enable greedy SLOT atoms for atoms given as arguments.
5431 # This is currently disabled for sets since greedy SLOT
5432 # atoms could be a property of the set itself.
5435 # In addition to any installed slots, also try to pull
5436 # in the latest new slot that may be available.
5437 greedy_atoms.append(arg)
5438 if not isinstance(arg, (AtomArg, PackageArg)):
5440 atom_cp = portage.dep_getkey(arg.atom)
5442 for cpv in vardb.match(arg.atom):
5443 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5445 greedy_atoms.append(
5446 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5447 root_config=root_config))
5451 # Create the "args" package set from atoms and
5452 # packages given as arguments.
5453 args_set = self._sets["args"]
5455 if not isinstance(arg, (AtomArg, PackageArg)):
5458 if myatom in args_set:
5460 args_set.add(myatom)
5461 myfavorites.append(myatom)
5462 self._set_atoms.update(chain(*self._sets.itervalues()))
5463 atom_arg_map = self._atom_arg_map
5465 for atom in arg.set:
5466 atom_key = (atom, myroot)
5467 refs = atom_arg_map.get(atom_key)
5470 atom_arg_map[atom_key] = refs
5473 pprovideddict = pkgsettings.pprovideddict
5475 portage.writemsg("\n", noiselevel=-1)
5476 # Order needs to be preserved since a feature of --nodeps
5477 # is to allow the user to force a specific merge order.
5481 for atom in arg.set:
5482 self.spinner.update()
5483 dep = Dependency(atom=atom, onlydeps=onlydeps,
5484 root=myroot, parent=arg)
5485 atom_cp = portage.dep_getkey(atom)
5487 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5488 if pprovided and portage.match_from_list(atom, pprovided):
5489 # A provided package has been specified on the command line.
5490 self._pprovided_args.append((arg, atom))
5492 if isinstance(arg, PackageArg):
5493 if not self._add_pkg(arg.package, dep) or \
5494 not self._create_graph():
5495 sys.stderr.write(("\n\n!!! Problem resolving " + \
5496 "dependencies for %s\n") % arg.arg)
5497 return 0, myfavorites
5500 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5501 (arg, atom), noiselevel=-1)
5502 pkg, existing_node = self._select_package(
5503 myroot, atom, onlydeps=onlydeps)
5505 if not (isinstance(arg, SetArg) and \
5506 arg.name in ("system", "world")):
5507 self._unsatisfied_deps_for_display.append(
5508 ((myroot, atom), {}))
5509 return 0, myfavorites
5510 self._missing_args.append((arg, atom))
5512 if atom_cp != pkg.cp:
5513 # For old-style virtuals, we need to repeat the
5514 # package.provided check against the selected package.
5515 expanded_atom = atom.replace(atom_cp, pkg.cp)
5516 pprovided = pprovideddict.get(pkg.cp)
5518 portage.match_from_list(expanded_atom, pprovided):
5519 # A provided package has been
5520 # specified on the command line.
5521 self._pprovided_args.append((arg, atom))
5523 if pkg.installed and "selective" not in self.myparams:
5524 self._unsatisfied_deps_for_display.append(
5525 ((myroot, atom), {}))
5526 # Previous behavior was to bail out in this case, but
5527 # since the dep is satisfied by the installed package,
5528 # it's more friendly to continue building the graph
5529 # and just show a warning message. Therefore, only bail
5530 # out here if the atom is not from either the system or
5532 if not (isinstance(arg, SetArg) and \
5533 arg.name in ("system", "world")):
5534 return 0, myfavorites
5536 # Add the selected package to the graph as soon as possible
5537 # so that later dep_check() calls can use it as feedback
5538 # for making more consistent atom selections.
5539 if not self._add_pkg(pkg, dep):
5540 if isinstance(arg, SetArg):
5541 sys.stderr.write(("\n\n!!! Problem resolving " + \
5542 "dependencies for %s from %s\n") % \
5545 sys.stderr.write(("\n\n!!! Problem resolving " + \
5546 "dependencies for %s\n") % atom)
5547 return 0, myfavorites
5549 except portage.exception.MissingSignature, e:
5550 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5551 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5552 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5553 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5554 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5555 return 0, myfavorites
5556 except portage.exception.InvalidSignature, e:
5557 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5558 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5559 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5560 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5561 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5562 return 0, myfavorites
5563 except SystemExit, e:
5564 raise # Needed else can't exit
5565 except Exception, e:
5566 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5567 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5570 # Now that the root packages have been added to the graph,
5571 # process the dependencies.
5572 if not self._create_graph():
5573 return 0, myfavorites
5576 if "--usepkgonly" in self.myopts:
5577 for xs in self.digraph.all_nodes():
5578 if not isinstance(xs, Package):
5580 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5584 print "Missing binary for:",xs[2]
5588 except self._unknown_internal_error:
5589 return False, myfavorites
5591 # We're true here unless we are missing binaries.
5592 return (not missing,myfavorites)
5594 def _select_atoms_from_graph(self, *pargs, **kwargs):
5596 Prefer atoms matching packages that have already been
5597 added to the graph or those that are installed and have
5598 not been scheduled for replacement.
5600 kwargs["trees"] = self._graph_trees
5601 return self._select_atoms_highest_available(*pargs, **kwargs)
5603 def _select_atoms_highest_available(self, root, depstring,
5604 myuse=None, parent=None, strict=True, trees=None):
5605 """This will raise InvalidDependString if necessary. If trees is
5606 None then self._filtered_trees is used."""
5607 pkgsettings = self.pkgsettings[root]
5609 trees = self._filtered_trees
5612 if parent is not None:
5613 trees[root]["parent"] = parent
5615 portage.dep._dep_check_strict = False
5616 mycheck = portage.dep_check(depstring, None,
5617 pkgsettings, myuse=myuse,
5618 myroot=root, trees=trees)
5620 if parent is not None:
5621 trees[root].pop("parent")
5622 portage.dep._dep_check_strict = True
5624 raise portage.exception.InvalidDependString(mycheck[1])
5625 selected_atoms = mycheck[1]
5626 return selected_atoms
5628 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5629 atom = portage.dep.Atom(atom)
5630 atom_set = InternalPackageSet(initial_atoms=(atom,))
5631 atom_without_use = atom
5633 atom_without_use = portage.dep.remove_slot(atom)
5635 atom_without_use += ":" + atom.slot
5636 atom_without_use = portage.dep.Atom(atom_without_use)
5637 xinfo = '"%s"' % atom
5640 # Discard null/ from failed cpv_expand category expansion.
5641 xinfo = xinfo.replace("null/", "")
5642 masked_packages = []
5644 missing_licenses = []
5645 have_eapi_mask = False
5646 pkgsettings = self.pkgsettings[root]
5647 implicit_iuse = pkgsettings._get_implicit_iuse()
5648 root_config = self.roots[root]
5649 portdb = self.roots[root].trees["porttree"].dbapi
5650 dbs = self._filtered_trees[root]["dbs"]
5651 for db, pkg_type, built, installed, db_keys in dbs:
5655 if hasattr(db, "xmatch"):
5656 cpv_list = db.xmatch("match-all", atom_without_use)
5658 cpv_list = db.match(atom_without_use)
5661 for cpv in cpv_list:
5662 metadata, mreasons = get_mask_info(root_config, cpv,
5663 pkgsettings, db, pkg_type, built, installed, db_keys)
5664 if metadata is not None:
5665 pkg = Package(built=built, cpv=cpv,
5666 installed=installed, metadata=metadata,
5667 root_config=root_config)
5668 if pkg.cp != atom.cp:
5669 # A cpv can be returned from dbapi.match() as an
5670 # old-style virtual match even in cases when the
5671 # package does not actually PROVIDE the virtual.
5672 # Filter out any such false matches here.
5673 if not atom_set.findAtomForPackage(pkg):
5675 if atom.use and not mreasons:
5676 missing_use.append(pkg)
5678 masked_packages.append(
5679 (root_config, pkgsettings, cpv, metadata, mreasons))
5681 missing_use_reasons = []
5682 missing_iuse_reasons = []
5683 for pkg in missing_use:
5684 use = pkg.use.enabled
5685 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5686 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5688 for x in atom.use.required:
5689 if iuse_re.match(x) is None:
5690 missing_iuse.append(x)
5693 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5694 missing_iuse_reasons.append((pkg, mreasons))
5696 need_enable = sorted(atom.use.enabled.difference(use))
5697 need_disable = sorted(atom.use.disabled.intersection(use))
5698 if need_enable or need_disable:
5700 changes.extend(colorize("red", "+" + x) \
5701 for x in need_enable)
5702 changes.extend(colorize("blue", "-" + x) \
5703 for x in need_disable)
5704 mreasons.append("Change USE: %s" % " ".join(changes))
5705 missing_use_reasons.append((pkg, mreasons))
5707 if missing_iuse_reasons and not missing_use_reasons:
5708 missing_use_reasons = missing_iuse_reasons
5709 elif missing_use_reasons:
5710 # Only show the latest version.
5711 del missing_use_reasons[1:]
5713 if missing_use_reasons:
5714 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5715 print "!!! One of the following packages is required to complete your request:"
5716 for pkg, mreasons in missing_use_reasons:
5717 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5719 elif masked_packages:
5721 colorize("BAD", "All ebuilds that could satisfy ") + \
5722 colorize("INFORM", xinfo) + \
5723 colorize("BAD", " have been masked.")
5724 print "!!! One of the following masked packages is required to complete your request:"
5725 have_eapi_mask = show_masked_packages(masked_packages)
5728 msg = ("The current version of portage supports " + \
5729 "EAPI '%s'. You must upgrade to a newer version" + \
5730 " of portage before EAPI masked packages can" + \
5731 " be installed.") % portage.const.EAPI
5732 from textwrap import wrap
5733 for line in wrap(msg, 75):
5738 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5740 # Show parent nodes and the argument that pulled them in.
5741 traversed_nodes = set()
5744 while node is not None:
5745 traversed_nodes.add(node)
5746 msg.append('(dependency required by "%s" [%s])' % \
5747 (colorize('INFORM', str(node.cpv)), node.type_name))
5748 # When traversing to parents, prefer arguments over packages
5749 # since arguments are root nodes. Never traverse the same
5750 # package twice, in order to prevent an infinite loop.
5751 selected_parent = None
5752 for parent in self.digraph.parent_nodes(node):
5753 if isinstance(parent, DependencyArg):
5754 msg.append('(dependency required by "%s" [argument])' % \
5755 (colorize('INFORM', str(parent))))
5756 selected_parent = None
5758 if parent not in traversed_nodes:
5759 selected_parent = parent
5760 node = selected_parent
5766 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5767 cache_key = (root, atom, onlydeps)
5768 ret = self._highest_pkg_cache.get(cache_key)
5771 if pkg and not existing:
5772 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5773 if existing and existing == pkg:
5774 # Update the cache to reflect that the
5775 # package has been added to the graph.
5777 self._highest_pkg_cache[cache_key] = ret
5779 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5780 self._highest_pkg_cache[cache_key] = ret
5783 settings = pkg.root_config.settings
5784 if visible(settings, pkg) and not (pkg.installed and \
5785 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
5786 pkg.root_config.visible_pkgs.cpv_inject(pkg)
5789 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5790 root_config = self.roots[root]
5791 pkgsettings = self.pkgsettings[root]
5792 dbs = self._filtered_trees[root]["dbs"]
5793 vardb = self.roots[root].trees["vartree"].dbapi
5794 portdb = self.roots[root].trees["porttree"].dbapi
5795 # List of acceptable packages, ordered by type preference.
5796 matched_packages = []
5797 highest_version = None
5798 if not isinstance(atom, portage.dep.Atom):
5799 atom = portage.dep.Atom(atom)
5801 atom_set = InternalPackageSet(initial_atoms=(atom,))
5802 existing_node = None
5804 usepkgonly = "--usepkgonly" in self.myopts
5805 empty = "empty" in self.myparams
5806 selective = "selective" in self.myparams
5808 noreplace = "--noreplace" in self.myopts
5809 # Behavior of the "selective" parameter depends on
5810 # whether or not a package matches an argument atom.
5811 # If an installed package provides an old-style
5812 # virtual that is no longer provided by an available
5813 # package, the installed package may match an argument
5814 # atom even though none of the available packages do.
5815 # Therefore, "selective" logic does not consider
5816 # whether or not an installed package matches an
5817 # argument atom. It only considers whether or not
5818 # available packages match argument atoms, which is
5819 # represented by the found_available_arg flag.
5820 found_available_arg = False
5821 for find_existing_node in True, False:
5824 for db, pkg_type, built, installed, db_keys in dbs:
5827 if installed and not find_existing_node:
5828 want_reinstall = reinstall or empty or \
5829 (found_available_arg and not selective)
5830 if want_reinstall and matched_packages:
5832 if hasattr(db, "xmatch"):
5833 cpv_list = db.xmatch("match-all", atom)
5835 cpv_list = db.match(atom)
5837 # USE=multislot can make an installed package appear as if
5838 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5839 # won't do any good as long as USE=multislot is enabled since
5840 # the newly built package still won't have the expected slot.
5841 # Therefore, assume that such SLOT dependencies are already
5842 # satisfied rather than forcing a rebuild.
5843 if installed and not cpv_list and atom.slot:
5844 for cpv in db.match(atom.cp):
5845 slot_available = False
5846 for other_db, other_type, other_built, \
5847 other_installed, other_keys in dbs:
5850 other_db.aux_get(cpv, ["SLOT"])[0]:
5851 slot_available = True
5855 if not slot_available:
5857 inst_pkg = self._pkg(cpv, "installed",
5858 root_config, installed=installed)
5859 # Remove the slot from the atom and verify that
5860 # the package matches the resulting atom.
5861 atom_without_slot = portage.dep.remove_slot(atom)
5863 atom_without_slot += str(atom.use)
5864 atom_without_slot = portage.dep.Atom(atom_without_slot)
5865 if portage.match_from_list(
5866 atom_without_slot, [inst_pkg]):
5867 cpv_list = [inst_pkg.cpv]
5872 pkg_status = "merge"
5873 if installed or onlydeps:
5874 pkg_status = "nomerge"
5877 for cpv in cpv_list:
5878 # Make --noreplace take precedence over --newuse.
5879 if not installed and noreplace and \
5880 cpv in vardb.match(atom):
5881 # If the installed version is masked, it may
5882 # be necessary to look at lower versions,
5883 # in case there is a visible downgrade.
5885 reinstall_for_flags = None
5886 cache_key = (pkg_type, root, cpv, pkg_status)
5887 calculated_use = True
5888 pkg = self._pkg_cache.get(cache_key)
5890 calculated_use = False
5892 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5895 pkg = Package(built=built, cpv=cpv,
5896 installed=installed, metadata=metadata,
5897 onlydeps=onlydeps, root_config=root_config,
5899 metadata = pkg.metadata
5900 if not built and ("?" in metadata["LICENSE"] or \
5901 "?" in metadata["PROVIDE"]):
5902 # This is avoided whenever possible because
5903 # it's expensive. It only needs to be done here
5904 # if it has an effect on visibility.
5905 pkgsettings.setcpv(pkg)
5906 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5907 calculated_use = True
5908 self._pkg_cache[pkg] = pkg
5910 if not installed or (built and matched_packages):
5911 # Only enforce visibility on installed packages
5912 # if there is at least one other visible package
5913 # available. By filtering installed masked packages
5914 # here, packages that have been masked since they
5915 # were installed can be automatically downgraded
5916 # to an unmasked version.
5918 if not visible(pkgsettings, pkg):
5920 except portage.exception.InvalidDependString:
5924 # Enable upgrade or downgrade to a version
5925 # with visible KEYWORDS when the installed
5926 # version is masked by KEYWORDS, but never
5927 # reinstall the same exact version only due
5928 # to a KEYWORDS mask.
5929 if built and matched_packages:
5931 different_version = None
5932 for avail_pkg in matched_packages:
5933 if not portage.dep.cpvequal(
5934 pkg.cpv, avail_pkg.cpv):
5935 different_version = avail_pkg
5937 if different_version is not None:
5940 pkgsettings._getMissingKeywords(
5941 pkg.cpv, pkg.metadata):
5944 # If the ebuild no longer exists or it's
5945 # keywords have been dropped, reject built
5946 # instances (installed or binary).
5947 # If --usepkgonly is enabled, assume that
5948 # the ebuild status should be ignored.
5952 pkg.cpv, "ebuild", root_config)
5953 except portage.exception.PackageNotFound:
5956 if not visible(pkgsettings, pkg_eb):
5959 if not pkg.built and not calculated_use:
5960 # This is avoided whenever possible because
5962 pkgsettings.setcpv(pkg)
5963 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5965 if pkg.cp != atom.cp:
5966 # A cpv can be returned from dbapi.match() as an
5967 # old-style virtual match even in cases when the
5968 # package does not actually PROVIDE the virtual.
5969 # Filter out any such false matches here.
5970 if not atom_set.findAtomForPackage(pkg):
5974 if root == self.target_root:
5976 # Ebuild USE must have been calculated prior
5977 # to this point, in case atoms have USE deps.
5978 myarg = self._iter_atoms_for_pkg(pkg).next()
5979 except StopIteration:
5981 except portage.exception.InvalidDependString:
5983 # masked by corruption
5985 if not installed and myarg:
5986 found_available_arg = True
5988 if atom.use and not pkg.built:
5989 use = pkg.use.enabled
5990 if atom.use.enabled.difference(use):
5992 if atom.use.disabled.intersection(use):
5994 if pkg.cp == atom_cp:
5995 if highest_version is None:
5996 highest_version = pkg
5997 elif pkg > highest_version:
5998 highest_version = pkg
5999 # At this point, we've found the highest visible
6000 # match from the current repo. Any lower versions
6001 # from this repo are ignored, so this so the loop
6002 # will always end with a break statement below
6004 if find_existing_node:
6005 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6008 if portage.dep.match_from_list(atom, [e_pkg]):
6009 if highest_version and \
6010 e_pkg.cp == atom_cp and \
6011 e_pkg < highest_version and \
6012 e_pkg.slot_atom != highest_version.slot_atom:
6013 # There is a higher version available in a
6014 # different slot, so this existing node is
6018 matched_packages.append(e_pkg)
6019 existing_node = e_pkg
6021 # Compare built package to current config and
6022 # reject the built package if necessary.
6023 if built and not installed and \
6024 ("--newuse" in self.myopts or \
6025 "--reinstall" in self.myopts):
6026 iuses = pkg.iuse.all
6027 old_use = pkg.use.enabled
6029 pkgsettings.setcpv(myeb)
6031 pkgsettings.setcpv(pkg)
6032 now_use = pkgsettings["PORTAGE_USE"].split()
6033 forced_flags = set()
6034 forced_flags.update(pkgsettings.useforce)
6035 forced_flags.update(pkgsettings.usemask)
6037 if myeb and not usepkgonly:
6038 cur_iuse = myeb.iuse.all
6039 if self._reinstall_for_flags(forced_flags,
6043 # Compare current config to installed package
6044 # and do not reinstall if possible.
6045 if not installed and \
6046 ("--newuse" in self.myopts or \
6047 "--reinstall" in self.myopts) and \
6048 cpv in vardb.match(atom):
6049 pkgsettings.setcpv(pkg)
6050 forced_flags = set()
6051 forced_flags.update(pkgsettings.useforce)
6052 forced_flags.update(pkgsettings.usemask)
6053 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6054 old_iuse = set(filter_iuse_defaults(
6055 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6056 cur_use = pkgsettings["PORTAGE_USE"].split()
6057 cur_iuse = pkg.iuse.all
6058 reinstall_for_flags = \
6059 self._reinstall_for_flags(
6060 forced_flags, old_use, old_iuse,
6062 if reinstall_for_flags:
6066 matched_packages.append(pkg)
6067 if reinstall_for_flags:
6068 self._reinstall_nodes[pkg] = \
6072 if not matched_packages:
6075 if "--debug" in self.myopts:
6076 for pkg in matched_packages:
6077 portage.writemsg("%s %s\n" % \
6078 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6080 # Filter out any old-style virtual matches if they are
6081 # mixed with new-style virtual matches.
6082 cp = portage.dep_getkey(atom)
6083 if len(matched_packages) > 1 and \
6084 "virtual" == portage.catsplit(cp)[0]:
6085 for pkg in matched_packages:
6088 # Got a new-style virtual, so filter
6089 # out any old-style virtuals.
6090 matched_packages = [pkg for pkg in matched_packages \
6094 if len(matched_packages) > 1:
6095 bestmatch = portage.best(
6096 [pkg.cpv for pkg in matched_packages])
6097 matched_packages = [pkg for pkg in matched_packages \
6098 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6100 # ordered by type preference ("ebuild" type is the last resort)
6101 return matched_packages[-1], existing_node
6103 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6105 Select packages that have already been added to the graph or
6106 those that are installed and have not been scheduled for
6109 graph_db = self._graph_trees[root]["porttree"].dbapi
6110 matches = graph_db.match(atom)
6113 cpv = matches[-1] # highest match
6114 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
6115 graph_db.aux_get(cpv, ["SLOT"])[0])
6116 e_pkg = self._slot_pkg_map[root].get(slot_atom)
6119 # Since this cpv exists in the graph_db,
6120 # we must have a cached Package instance.
6121 cache_key = ("installed", root, cpv, "nomerge")
6122 return (self._pkg_cache[cache_key], None)
6124 def _complete_graph(self):
6126 Add any deep dependencies of required sets (args, system, world) that
6127 have not been pulled into the graph yet. This ensures that the graph
6128 is consistent such that initially satisfied deep dependencies are not
6129 broken in the new graph. Initially unsatisfied dependencies are
6130 irrelevant since we only want to avoid breaking dependencies that are
6133 Since this method can consume enough time to disturb users, it is
6134 currently only enabled by the --complete-graph option.
6136 if "--buildpkgonly" in self.myopts or \
6137 "recurse" not in self.myparams:
6140 if "complete" not in self.myparams:
6141 # Skip this to avoid consuming enough time to disturb users.
6144 # Put the depgraph into a mode that causes it to only
6145 # select packages that have already been added to the
6146 # graph or those that are installed and have not been
6147 # scheduled for replacement. Also, toggle the "deep"
6148 # parameter so that all dependencies are traversed and
6150 self._select_atoms = self._select_atoms_from_graph
6151 self._select_package = self._select_pkg_from_graph
6152 already_deep = "deep" in self.myparams
6153 if not already_deep:
6154 self.myparams.add("deep")
6156 for root in self.roots:
6157 required_set_names = self._required_set_names.copy()
6158 if root == self.target_root and \
6159 (already_deep or "empty" in self.myparams):
6160 required_set_names.difference_update(self._sets)
6161 if not required_set_names and not self._ignored_deps:
6163 root_config = self.roots[root]
6164 setconfig = root_config.setconfig
6166 # Reuse existing SetArg instances when available.
6167 for arg in self.digraph.root_nodes():
6168 if not isinstance(arg, SetArg):
6170 if arg.root_config != root_config:
6172 if arg.name in required_set_names:
6174 required_set_names.remove(arg.name)
6175 # Create new SetArg instances only when necessary.
6176 for s in required_set_names:
6177 expanded_set = InternalPackageSet(
6178 initial_atoms=setconfig.getSetAtoms(s))
6179 atom = SETPREFIX + s
6180 args.append(SetArg(arg=atom, set=expanded_set,
6181 root_config=root_config))
6182 vardb = root_config.trees["vartree"].dbapi
6184 for atom in arg.set:
6185 self._dep_stack.append(
6186 Dependency(atom=atom, root=root, parent=arg))
6187 if self._ignored_deps:
6188 self._dep_stack.extend(self._ignored_deps)
6189 self._ignored_deps = []
6190 if not self._create_graph(allow_unsatisfied=True):
6192 # Check the unsatisfied deps to see if any initially satisfied deps
6193 # will become unsatisfied due to an upgrade. Initially unsatisfied
6194 # deps are irrelevant since we only want to avoid breaking deps
6195 # that are initially satisfied.
6196 while self._unsatisfied_deps:
6197 dep = self._unsatisfied_deps.pop()
6198 matches = vardb.match_pkgs(dep.atom)
6200 self._initially_unsatisfied_deps.append(dep)
6202 # An scheduled installation broke a deep dependency.
6203 # Add the installed package to the graph so that it
6204 # will be appropriately reported as a slot collision
6205 # (possibly solvable via backtracking).
6206 pkg = matches[-1] # highest match
6207 if not self._add_pkg(pkg, dep):
6209 if not self._create_graph(allow_unsatisfied=True):
6213 def _pkg(self, cpv, type_name, root_config, installed=False):
6215 Get a package instance from the cache, or create a new
6216 one if necessary. Raises KeyError from aux_get if it
6217 failures for some reason (package does not exist or is
6222 operation = "nomerge"
6223 pkg = self._pkg_cache.get(
6224 (type_name, root_config.root, cpv, operation))
6226 tree_type = self.pkg_tree_map[type_name]
6227 db = root_config.trees[tree_type].dbapi
6228 db_keys = list(self._trees_orig[root_config.root][
6229 tree_type].dbapi._aux_cache_keys)
6231 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6233 raise portage.exception.PackageNotFound(cpv)
6234 pkg = Package(cpv=cpv, metadata=metadata,
6235 root_config=root_config, installed=installed)
6236 if type_name == "ebuild":
6237 settings = self.pkgsettings[root_config.root]
6238 settings.setcpv(pkg)
6239 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6240 self._pkg_cache[pkg] = pkg
6243 def validate_blockers(self):
6244 """Remove any blockers from the digraph that do not match any of the
6245 packages within the graph. If necessary, create hard deps to ensure
6246 correct merge order such that mutually blocking packages are never
6247 installed simultaneously."""
6249 if "--buildpkgonly" in self.myopts or \
6250 "--nodeps" in self.myopts:
6253 #if "deep" in self.myparams:
6255 # Pull in blockers from all installed packages that haven't already
6256 # been pulled into the depgraph. This is not enabled by default
6257 # due to the performance penalty that is incurred by all the
6258 # additional dep_check calls that are required.
6260 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6261 for myroot in self.trees:
6262 vardb = self.trees[myroot]["vartree"].dbapi
6263 portdb = self.trees[myroot]["porttree"].dbapi
6264 pkgsettings = self.pkgsettings[myroot]
6265 final_db = self.mydbapi[myroot]
6267 blocker_cache = BlockerCache(myroot, vardb)
6268 stale_cache = set(blocker_cache)
6271 stale_cache.discard(cpv)
6272 pkg_in_graph = self.digraph.contains(pkg)
6274 # Check for masked installed packages. Only warn about
6275 # packages that are in the graph in order to avoid warning
6276 # about those that will be automatically uninstalled during
6277 # the merge process or by --depclean.
6279 if pkg_in_graph and not visible(pkgsettings, pkg):
6280 self._masked_installed.add(pkg)
6282 blocker_atoms = None
6288 self._blocker_parents.child_nodes(pkg))
6293 self._irrelevant_blockers.child_nodes(pkg))
6296 if blockers is not None:
6297 blockers = set(str(blocker.atom) \
6298 for blocker in blockers)
6300 # If this node has any blockers, create a "nomerge"
6301 # node for it so that they can be enforced.
6302 self.spinner.update()
6303 blocker_data = blocker_cache.get(cpv)
6304 if blocker_data is not None and \
6305 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6308 # If blocker data from the graph is available, use
6309 # it to validate the cache and update the cache if
6311 if blocker_data is not None and \
6312 blockers is not None:
6313 if not blockers.symmetric_difference(
6314 blocker_data.atoms):
6318 if blocker_data is None and \
6319 blockers is not None:
6320 # Re-use the blockers from the graph.
6321 blocker_atoms = sorted(blockers)
6322 counter = long(pkg.metadata["COUNTER"])
6324 blocker_cache.BlockerData(counter, blocker_atoms)
6325 blocker_cache[pkg.cpv] = blocker_data
6329 blocker_atoms = blocker_data.atoms
6331 # Use aux_get() to trigger FakeVartree global
6332 # updates on *DEPEND when appropriate.
6333 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6334 # It is crucial to pass in final_db here in order to
6335 # optimize dep_check calls by eliminating atoms via
6336 # dep_wordreduce and dep_eval calls.
6338 portage.dep._dep_check_strict = False
6340 success, atoms = portage.dep_check(depstr,
6341 final_db, pkgsettings, myuse=pkg.use.enabled,
6342 trees=self._graph_trees, myroot=myroot)
6343 except Exception, e:
6344 if isinstance(e, SystemExit):
6346 # This is helpful, for example, if a ValueError
6347 # is thrown from cpv_expand due to multiple
6348 # matches (this can happen if an atom lacks a
6350 show_invalid_depstring_notice(
6351 pkg, depstr, str(e))
6355 portage.dep._dep_check_strict = True
6357 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6358 if replacement_pkg and \
6359 replacement_pkg[0].operation == "merge":
6360 # This package is being replaced anyway, so
6361 # ignore invalid dependencies so as not to
6362 # annoy the user too much (otherwise they'd be
6363 # forced to manually unmerge it first).
6365 show_invalid_depstring_notice(pkg, depstr, atoms)
6367 blocker_atoms = [myatom for myatom in atoms \
6368 if myatom.startswith("!")]
6369 blocker_atoms.sort()
6370 counter = long(pkg.metadata["COUNTER"])
6371 blocker_cache[cpv] = \
6372 blocker_cache.BlockerData(counter, blocker_atoms)
6375 for atom in blocker_atoms:
6376 blocker = Blocker(atom=portage.dep.Atom(atom),
6377 eapi=pkg.metadata["EAPI"], root=myroot)
6378 self._blocker_parents.add(blocker, pkg)
6379 except portage.exception.InvalidAtom, e:
6380 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6381 show_invalid_depstring_notice(
6382 pkg, depstr, "Invalid Atom: %s" % (e,))
6384 for cpv in stale_cache:
6385 del blocker_cache[cpv]
6386 blocker_cache.flush()
6389 # Discard any "uninstall" tasks scheduled by previous calls
6390 # to this method, since those tasks may not make sense given
6391 # the current graph state.
6392 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6393 if previous_uninstall_tasks:
6394 self._blocker_uninstalls = digraph()
6395 self.digraph.difference_update(previous_uninstall_tasks)
6397 for blocker in self._blocker_parents.leaf_nodes():
6398 self.spinner.update()
6399 root_config = self.roots[blocker.root]
6400 virtuals = root_config.settings.getvirtuals()
6401 myroot = blocker.root
6402 initial_db = self.trees[myroot]["vartree"].dbapi
6403 final_db = self.mydbapi[myroot]
6405 provider_virtual = False
6406 if blocker.cp in virtuals and \
6407 not self._have_new_virt(blocker.root, blocker.cp):
6408 provider_virtual = True
6410 if provider_virtual:
6412 for provider_entry in virtuals[blocker.cp]:
6414 portage.dep_getkey(provider_entry)
6415 atoms.append(blocker.atom.replace(
6416 blocker.cp, provider_cp))
6418 atoms = [blocker.atom]
6420 blocked_initial = []
6422 blocked_initial.extend(initial_db.match_pkgs(atom))
6426 blocked_final.extend(final_db.match_pkgs(atom))
6428 if not blocked_initial and not blocked_final:
6429 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6430 self._blocker_parents.remove(blocker)
6431 # Discard any parents that don't have any more blockers.
6432 for pkg in parent_pkgs:
6433 self._irrelevant_blockers.add(blocker, pkg)
6434 if not self._blocker_parents.child_nodes(pkg):
6435 self._blocker_parents.remove(pkg)
6437 for parent in self._blocker_parents.parent_nodes(blocker):
6438 unresolved_blocks = False
6439 depends_on_order = set()
6440 for pkg in blocked_initial:
6441 if pkg.slot_atom == parent.slot_atom:
6442 # TODO: Support blocks within slots in cases where it
6443 # might make sense. For example, a new version might
6444 # require that the old version be uninstalled at build
6447 if parent.installed:
6448 # Two currently installed packages conflict with
6449 # eachother. Ignore this case since the damage
6450 # is already done and this would be likely to
6451 # confuse users if displayed like a normal blocker.
6454 self._blocked_pkgs.add(pkg, blocker)
6456 if parent.operation == "merge":
6457 # Maybe the blocked package can be replaced or simply
6458 # unmerged to resolve this block.
6459 depends_on_order.add((pkg, parent))
6461 # None of the above blocker resolutions techniques apply,
6462 # so apparently this one is unresolvable.
6463 unresolved_blocks = True
6464 for pkg in blocked_final:
6465 if pkg.slot_atom == parent.slot_atom:
6466 # TODO: Support blocks within slots.
6468 if parent.operation == "nomerge" and \
6469 pkg.operation == "nomerge":
6470 # This blocker will be handled the next time that a
6471 # merge of either package is triggered.
6474 self._blocked_pkgs.add(pkg, blocker)
6476 # Maybe the blocking package can be
6477 # unmerged to resolve this block.
6478 if parent.operation == "merge" and pkg.installed:
6479 depends_on_order.add((pkg, parent))
6481 elif parent.operation == "nomerge":
6482 depends_on_order.add((parent, pkg))
6484 # None of the above blocker resolutions techniques apply,
6485 # so apparently this one is unresolvable.
6486 unresolved_blocks = True
6488 # Make sure we don't unmerge any package that have been pulled
6490 if not unresolved_blocks and depends_on_order:
6491 for inst_pkg, inst_task in depends_on_order:
6492 if self.digraph.contains(inst_pkg) and \
6493 self.digraph.parent_nodes(inst_pkg):
6494 unresolved_blocks = True
6497 if not unresolved_blocks and depends_on_order:
6498 for inst_pkg, inst_task in depends_on_order:
6499 uninst_task = Package(built=inst_pkg.built,
6500 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6501 metadata=inst_pkg.metadata,
6502 operation="uninstall",
6503 root_config=inst_pkg.root_config,
6504 type_name=inst_pkg.type_name)
6505 self._pkg_cache[uninst_task] = uninst_task
6506 # Enforce correct merge order with a hard dep.
6507 self.digraph.addnode(uninst_task, inst_task,
6508 priority=BlockerDepPriority.instance)
6509 # Count references to this blocker so that it can be
6510 # invalidated after nodes referencing it have been
6512 self._blocker_uninstalls.addnode(uninst_task, blocker)
6513 if not unresolved_blocks and not depends_on_order:
6514 self._irrelevant_blockers.add(blocker, parent)
6515 self._blocker_parents.remove_edge(blocker, parent)
6516 if not self._blocker_parents.parent_nodes(blocker):
6517 self._blocker_parents.remove(blocker)
6518 if not self._blocker_parents.child_nodes(parent):
6519 self._blocker_parents.remove(parent)
6520 if unresolved_blocks:
6521 self._unsolvable_blockers.add(blocker, parent)
6525 def _accept_blocker_conflicts(self):
6527 for x in ("--buildpkgonly", "--fetchonly",
6528 "--fetch-all-uri", "--nodeps"):
6529 if x in self.myopts:
6534 def _merge_order_bias(self, mygraph):
6535 """Order nodes from highest to lowest overall reference count for
6536 optimal leaf node selection."""
6538 for node in mygraph.order:
6539 node_info[node] = len(mygraph.parent_nodes(node))
6540 def cmp_merge_preference(node1, node2):
6541 return node_info[node2] - node_info[node1]
6542 mygraph.order.sort(cmp_merge_preference)
6544 def altlist(self, reversed=False):
6546 while self._serialized_tasks_cache is None:
6547 self._resolve_conflicts()
6549 self._serialized_tasks_cache, self._scheduler_graph = \
6550 self._serialize_tasks()
6551 except self._serialize_tasks_retry:
6554 retlist = self._serialized_tasks_cache[:]
6559 def schedulerGraph(self):
6561 The scheduler graph is identical to the normal one except that
6562 uninstall edges are reversed in specific cases that require
6563 conflicting packages to be temporarily installed simultaneously.
6564 This is intended for use by the Scheduler in it's parallelization
6565 logic. It ensures that temporary simultaneous installation of
6566 conflicting packages is avoided when appropriate (especially for
6567 !!atom blockers), but allowed in specific cases that require it.
6569 Note that this method calls break_refs() which alters the state of
6570 internal Package instances such that this depgraph instance should
6571 not be used to perform any more calculations.
6573 if self._scheduler_graph is None:
6575 self.break_refs(self._scheduler_graph.order)
6576 return self._scheduler_graph
6578 def break_refs(self, nodes):
6580 Take a mergelist like that returned from self.altlist() and
6581 break any references that lead back to the depgraph. This is
6582 useful if you want to hold references to packages without
6583 also holding the depgraph on the heap.
6586 if hasattr(node, "root_config"):
6587 # The FakeVartree references the _package_cache which
6588 # references the depgraph. So that Package instances don't
6589 # hold the depgraph and FakeVartree on the heap, replace
6590 # the RootConfig that references the FakeVartree with the
6591 # original RootConfig instance which references the actual
6593 node.root_config = \
6594 self._trees_orig[node.root_config.root]["root_config"]
6596 def _resolve_conflicts(self):
6597 if not self._complete_graph():
6598 raise self._unknown_internal_error()
6600 if not self.validate_blockers():
6601 raise self._unknown_internal_error()
6603 if self._slot_collision_info:
6604 self._process_slot_conflicts()
6606 def _serialize_tasks(self):
6608 if "--debug" in self.myopts:
6609 writemsg("\ndigraph:\n\n", noiselevel=-1)
6610 self.digraph.debug_print()
6611 writemsg("\n", noiselevel=-1)
6613 scheduler_graph = self.digraph.copy()
6614 mygraph=self.digraph.copy()
6615 # Prune "nomerge" root nodes if nothing depends on them, since
6616 # otherwise they slow down merge order calculation. Don't remove
6617 # non-root nodes since they help optimize merge order in some cases
6618 # such as revdep-rebuild.
6619 removed_nodes = set()
6621 for node in mygraph.root_nodes():
6622 if not isinstance(node, Package) or \
6623 node.installed or node.onlydeps:
6624 removed_nodes.add(node)
6626 self.spinner.update()
6627 mygraph.difference_update(removed_nodes)
6628 if not removed_nodes:
6630 removed_nodes.clear()
6631 self._merge_order_bias(mygraph)
6632 def cmp_circular_bias(n1, n2):
6634 RDEPEND is stronger than PDEPEND and this function
6635 measures such a strength bias within a circular
6636 dependency relationship.
6638 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6639 ignore_priority=DepPriority.MEDIUM_SOFT)
6640 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6641 ignore_priority=DepPriority.MEDIUM_SOFT)
6642 if n1_n2_medium == n2_n1_medium:
6647 myblocker_uninstalls = self._blocker_uninstalls.copy()
6649 # Contains uninstall tasks that have been scheduled to
6650 # occur after overlapping blockers have been installed.
6651 scheduled_uninstalls = set()
6652 # Contains any Uninstall tasks that have been ignored
6653 # in order to avoid the circular deps code path. These
6654 # correspond to blocker conflicts that could not be
6656 ignored_uninstall_tasks = set()
6657 have_uninstall_task = False
6658 complete = "complete" in self.myparams
6661 def get_nodes(**kwargs):
6663 Returns leaf nodes excluding Uninstall instances
6664 since those should be executed as late as possible.
6666 return [node for node in mygraph.leaf_nodes(**kwargs) \
6667 if isinstance(node, Package) and \
6668 (node.operation != "uninstall" or \
6669 node in scheduled_uninstalls)]
6671 # sys-apps/portage needs special treatment if ROOT="/"
6672 running_root = self._running_root.root
6673 from portage.const import PORTAGE_PACKAGE_ATOM
6674 runtime_deps = InternalPackageSet(
6675 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6676 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6677 PORTAGE_PACKAGE_ATOM)
6678 replacement_portage = self.mydbapi[running_root].match_pkgs(
6679 PORTAGE_PACKAGE_ATOM)
6682 running_portage = running_portage[0]
6684 running_portage = None
6686 if replacement_portage:
6687 replacement_portage = replacement_portage[0]
6689 replacement_portage = None
6691 if replacement_portage == running_portage:
6692 replacement_portage = None
6694 if replacement_portage is not None:
6695 # update from running_portage to replacement_portage asap
6696 asap_nodes.append(replacement_portage)
6698 if running_portage is not None:
6700 portage_rdepend = self._select_atoms_highest_available(
6701 running_root, running_portage.metadata["RDEPEND"],
6702 myuse=running_portage.use.enabled,
6703 parent=running_portage, strict=False)
6704 except portage.exception.InvalidDependString, e:
6705 portage.writemsg("!!! Invalid RDEPEND in " + \
6706 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6707 (running_root, running_portage.cpv, e), noiselevel=-1)
6709 portage_rdepend = []
6710 runtime_deps.update(atom for atom in portage_rdepend \
6711 if not atom.startswith("!"))
6713 ignore_priority_soft_range = [None]
6714 ignore_priority_soft_range.extend(
6715 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6716 tree_mode = "--tree" in self.myopts
6717 # Tracks whether or not the current iteration should prefer asap_nodes
6718 # if available. This is set to False when the previous iteration
6719 # failed to select any nodes. It is reset whenever nodes are
6720 # successfully selected.
6723 # By default, try to avoid selecting root nodes whenever possible. This
6724 # helps ensure that the maximimum possible number of soft dependencies
6725 # have been removed from the graph before their parent nodes have
6726 # selected. This is especially important when those dependencies are
6727 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6728 # CHOST has been changed (like when building a stage3 from a stage2).
6729 accept_root_node = False
6731 # State of prefer_asap and accept_root_node flags for successive
6732 # iterations that loosen the criteria for node selection.
6734 # iteration prefer_asap accept_root_node
6739 # If no nodes are selected on the 3rd iteration, it is due to
6740 # unresolved blockers or circular dependencies.
6742 while not mygraph.empty():
6743 self.spinner.update()
6744 selected_nodes = None
6745 ignore_priority = None
6746 if prefer_asap and asap_nodes:
6747 """ASAP nodes are merged before their soft deps."""
6748 asap_nodes = [node for node in asap_nodes \
6749 if mygraph.contains(node)]
6750 for node in asap_nodes:
6751 if not mygraph.child_nodes(node,
6752 ignore_priority=DepPriority.SOFT):
6753 selected_nodes = [node]
6754 asap_nodes.remove(node)
6756 if not selected_nodes and \
6757 not (prefer_asap and asap_nodes):
6758 for ignore_priority in ignore_priority_soft_range:
6759 nodes = get_nodes(ignore_priority=ignore_priority)
6763 if ignore_priority is None and not tree_mode:
6764 # Greedily pop all of these nodes since no relationship
6765 # has been ignored. This optimization destroys --tree
6766 # output, so it's disabled in reversed mode. If there
6767 # is a mix of merge and uninstall nodes, save the
6768 # uninstall nodes from later since sometimes a merge
6769 # node will render an install node unnecessary, and
6770 # we want to avoid doing a separate uninstall task in
6772 merge_nodes = [node for node in nodes \
6773 if node.operation == "merge"]
6775 selected_nodes = merge_nodes
6777 selected_nodes = nodes
6779 # For optimal merge order:
6780 # * Only pop one node.
6781 # * Removing a root node (node without a parent)
6782 # will not produce a leaf node, so avoid it.
6784 if mygraph.parent_nodes(node):
6785 # found a non-root node
6786 selected_nodes = [node]
6788 if not selected_nodes and \
6789 (accept_root_node or ignore_priority is None):
6790 # settle for a root node
6791 selected_nodes = [nodes[0]]
6793 if not selected_nodes:
6794 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6796 """Recursively gather a group of nodes that RDEPEND on
6797 eachother. This ensures that they are merged as a group
6798 and get their RDEPENDs satisfied as soon as possible."""
6799 def gather_deps(ignore_priority,
6800 mergeable_nodes, selected_nodes, node):
6801 if node in selected_nodes:
6803 if node not in mergeable_nodes:
6805 if node == replacement_portage and \
6806 mygraph.child_nodes(node,
6807 ignore_priority=DepPriority.MEDIUM_SOFT):
6808 # Make sure that portage always has all of it's
6809 # RDEPENDs installed first.
6811 selected_nodes.add(node)
6812 for child in mygraph.child_nodes(node,
6813 ignore_priority=ignore_priority):
6814 if not gather_deps(ignore_priority,
6815 mergeable_nodes, selected_nodes, child):
6818 mergeable_nodes = set(nodes)
6819 if prefer_asap and asap_nodes:
6821 for ignore_priority in xrange(DepPriority.SOFT,
6822 DepPriority.MEDIUM_SOFT + 1):
6824 if nodes is not asap_nodes and \
6825 not accept_root_node and \
6826 not mygraph.parent_nodes(node):
6828 selected_nodes = set()
6829 if gather_deps(ignore_priority,
6830 mergeable_nodes, selected_nodes, node):
6833 selected_nodes = None
6837 # If any nodes have been selected here, it's always
6838 # possible that anything up to a MEDIUM_SOFT priority
6839 # relationship has been ignored. This state is recorded
6840 # in ignore_priority so that relevant nodes will be
6841 # added to asap_nodes when appropriate.
6843 ignore_priority = DepPriority.MEDIUM_SOFT
6845 if prefer_asap and asap_nodes and not selected_nodes:
6846 # We failed to find any asap nodes to merge, so ignore
6847 # them for the next iteration.
6851 if not selected_nodes and not accept_root_node:
6852 # Maybe there are only root nodes left, so accept them
6853 # for the next iteration.
6854 accept_root_node = True
6857 if selected_nodes and ignore_priority > DepPriority.SOFT:
6858 # Try to merge ignored medium deps as soon as possible.
6859 for node in selected_nodes:
6860 children = set(mygraph.child_nodes(node))
6861 soft = children.difference(
6862 mygraph.child_nodes(node,
6863 ignore_priority=DepPriority.SOFT))
6864 medium_soft = children.difference(
6865 mygraph.child_nodes(node,
6866 ignore_priority=DepPriority.MEDIUM_SOFT))
6867 medium_soft.difference_update(soft)
6868 for child in medium_soft:
6869 if child in selected_nodes:
6871 if child in asap_nodes:
6873 asap_nodes.append(child)
6875 if selected_nodes and len(selected_nodes) > 1:
6876 if not isinstance(selected_nodes, list):
6877 selected_nodes = list(selected_nodes)
6878 selected_nodes.sort(cmp_circular_bias)
6880 if not selected_nodes and not myblocker_uninstalls.is_empty():
6881 # An Uninstall task needs to be executed in order to
6882 # avoid conflict if possible.
6883 min_parent_deps = None
6885 for task in myblocker_uninstalls.leaf_nodes():
6886 # Do some sanity checks so that system or world packages
6887 # don't get uninstalled inappropriately here (only really
6888 # necessary when --complete-graph has not been enabled).
6890 if task in ignored_uninstall_tasks:
6893 if task in scheduled_uninstalls:
6894 # It's been scheduled but it hasn't
6895 # been executed yet due to dependence
6896 # on installation of blocking packages.
6899 root_config = self.roots[task.root]
6900 inst_pkg = self._pkg_cache[
6901 ("installed", task.root, task.cpv, "nomerge")]
6903 if self.digraph.contains(inst_pkg):
6906 forbid_overlap = False
6907 heuristic_overlap = False
6908 for blocker in myblocker_uninstalls.parent_nodes(task):
6909 if blocker.eapi in ("0", "1"):
6910 heuristic_overlap = True
6911 elif blocker.atom.blocker.overlap.forbid:
6912 forbid_overlap = True
6914 if forbid_overlap and running_root == task.root:
6917 if heuristic_overlap and running_root == task.root:
6918 # Never uninstall sys-apps/portage or it's essential
6919 # dependencies, except through replacement.
6921 runtime_dep_atoms = \
6922 list(runtime_deps.iterAtomsForPackage(task))
6923 except portage.exception.InvalidDependString, e:
6924 portage.writemsg("!!! Invalid PROVIDE in " + \
6925 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6926 (task.root, task.cpv, e), noiselevel=-1)
6930 # Don't uninstall a runtime dep if it appears
6931 # to be the only suitable one installed.
6933 vardb = root_config.trees["vartree"].dbapi
6934 for atom in runtime_dep_atoms:
6935 other_version = None
6936 for pkg in vardb.match_pkgs(atom):
6937 if pkg.cpv == task.cpv and \
6938 pkg.metadata["COUNTER"] == \
6939 task.metadata["COUNTER"]:
6943 if other_version is None:
6949 # For packages in the system set, don't take
6950 # any chances. If the conflict can't be resolved
6951 # by a normal replacement operation then abort.
6954 for atom in root_config.sets[
6955 "system"].iterAtomsForPackage(task):
6958 except portage.exception.InvalidDependString, e:
6959 portage.writemsg("!!! Invalid PROVIDE in " + \
6960 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6961 (task.root, task.cpv, e), noiselevel=-1)
6967 # Note that the world check isn't always
6968 # necessary since self._complete_graph() will
6969 # add all packages from the system and world sets to the
6970 # graph. This just allows unresolved conflicts to be
6971 # detected as early as possible, which makes it possible
6972 # to avoid calling self._complete_graph() when it is
6973 # unnecessary due to blockers triggering an abortion.
6975 # For packages in the world set, go ahead an uninstall
6976 # when necessary, as long as the atom will be satisfied
6977 # in the final state.
6978 graph_db = self.mydbapi[task.root]
6981 for atom in root_config.sets[
6982 "world"].iterAtomsForPackage(task):
6984 for pkg in graph_db.match_pkgs(atom):
6991 self._blocked_world_pkgs[inst_pkg] = atom
6993 except portage.exception.InvalidDependString, e:
6994 portage.writemsg("!!! Invalid PROVIDE in " + \
6995 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6996 (task.root, task.cpv, e), noiselevel=-1)
7002 # Check the deps of parent nodes to ensure that
7003 # the chosen task produces a leaf node. Maybe
7004 # this can be optimized some more to make the
7005 # best possible choice, but the current algorithm
7006 # is simple and should be near optimal for most
7009 for parent in mygraph.parent_nodes(task):
7010 parent_deps.update(mygraph.child_nodes(parent,
7011 ignore_priority=DepPriority.MEDIUM_SOFT))
7012 parent_deps.remove(task)
7013 if min_parent_deps is None or \
7014 len(parent_deps) < min_parent_deps:
7015 min_parent_deps = len(parent_deps)
7018 if uninst_task is not None:
7019 # The uninstall is performed only after blocking
7020 # packages have been merged on top of it. File
7021 # collisions between blocking packages are detected
7022 # and removed from the list of files to be uninstalled.
7023 scheduled_uninstalls.add(uninst_task)
7024 parent_nodes = mygraph.parent_nodes(uninst_task)
7026 # Reverse the parent -> uninstall edges since we want
7027 # to do the uninstall after blocking packages have
7028 # been merged on top of it.
7029 mygraph.remove(uninst_task)
7030 for blocked_pkg in parent_nodes:
7031 mygraph.add(blocked_pkg, uninst_task,
7032 priority=BlockerDepPriority.instance)
7033 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7034 scheduler_graph.add(blocked_pkg, uninst_task,
7035 priority=BlockerDepPriority.instance)
7038 # None of the Uninstall tasks are acceptable, so
7039 # the corresponding blockers are unresolvable.
7040 # We need to drop an Uninstall task here in order
7041 # to avoid the circular deps code path, but the
7042 # blocker will still be counted as an unresolved
7044 for node in myblocker_uninstalls.leaf_nodes():
7046 mygraph.remove(node)
7051 ignored_uninstall_tasks.add(node)
7054 if uninst_task is not None:
7055 # After dropping an Uninstall task, reset
7056 # the state variables for leaf node selection and
7057 # continue trying to select leaf nodes.
7059 accept_root_node = False
7062 if not selected_nodes:
7063 self._circular_deps_for_display = mygraph
7064 raise self._unknown_internal_error()
7066 # At this point, we've succeeded in selecting one or more nodes, so
7067 # it's now safe to reset the prefer_asap and accept_root_node flags
7068 # to their default states.
7070 accept_root_node = False
7072 mygraph.difference_update(selected_nodes)
7074 for node in selected_nodes:
7075 if isinstance(node, Package) and \
7076 node.operation == "nomerge":
7079 # Handle interactions between blockers
7080 # and uninstallation tasks.
7081 solved_blockers = set()
7083 if isinstance(node, Package) and \
7084 "uninstall" == node.operation:
7085 have_uninstall_task = True
7088 vardb = self.trees[node.root]["vartree"].dbapi
7089 previous_cpv = vardb.match(node.slot_atom)
7091 # The package will be replaced by this one, so remove
7092 # the corresponding Uninstall task if necessary.
7093 previous_cpv = previous_cpv[0]
7095 ("installed", node.root, previous_cpv, "uninstall")
7097 mygraph.remove(uninst_task)
7101 if uninst_task is not None and \
7102 uninst_task not in ignored_uninstall_tasks and \
7103 myblocker_uninstalls.contains(uninst_task):
7104 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7105 myblocker_uninstalls.remove(uninst_task)
7106 # Discard any blockers that this Uninstall solves.
7107 for blocker in blocker_nodes:
7108 if not myblocker_uninstalls.child_nodes(blocker):
7109 myblocker_uninstalls.remove(blocker)
7110 solved_blockers.add(blocker)
7112 retlist.append(node)
7114 if (isinstance(node, Package) and \
7115 "uninstall" == node.operation) or \
7116 (uninst_task is not None and \
7117 uninst_task in scheduled_uninstalls):
7118 # Include satisfied blockers in the merge list
7119 # since the user might be interested and also
7120 # it serves as an indicator that blocking packages
7121 # will be temporarily installed simultaneously.
7122 for blocker in solved_blockers:
7123 retlist.append(Blocker(atom=blocker.atom,
7124 root=blocker.root, eapi=blocker.eapi,
7127 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7128 for node in myblocker_uninstalls.root_nodes():
7129 unsolvable_blockers.add(node)
7131 for blocker in unsolvable_blockers:
7132 retlist.append(blocker)
7134 # If any Uninstall tasks need to be executed in order
7135 # to avoid a conflict, complete the graph with any
7136 # dependencies that may have been initially
7137 # neglected (to ensure that unsafe Uninstall tasks
7138 # are properly identified and blocked from execution).
7139 if have_uninstall_task and \
7141 not unsolvable_blockers:
7142 self.myparams.add("complete")
7143 raise self._serialize_tasks_retry("")
7145 if unsolvable_blockers and \
7146 not self._accept_blocker_conflicts():
7147 self._unsatisfied_blockers_for_display = unsolvable_blockers
7148 self._serialized_tasks_cache = retlist[:]
7149 self._scheduler_graph = scheduler_graph
7150 raise self._unknown_internal_error()
7152 if self._slot_collision_info and \
7153 not self._accept_blocker_conflicts():
7154 self._serialized_tasks_cache = retlist[:]
7155 self._scheduler_graph = scheduler_graph
7156 raise self._unknown_internal_error()
7158 return retlist, scheduler_graph
7160 def _show_circular_deps(self, mygraph):
7161 # No leaf nodes are available, so we have a circular
7162 # dependency panic situation. Reduce the noise level to a
7163 # minimum via repeated elimination of root nodes since they
7164 # have no parents and thus can not be part of a cycle.
7166 root_nodes = mygraph.root_nodes(
7167 ignore_priority=DepPriority.MEDIUM_SOFT)
7170 mygraph.difference_update(root_nodes)
7171 # Display the USE flags that are enabled on nodes that are part
7172 # of dependency cycles in case that helps the user decide to
7173 # disable some of them.
7175 tempgraph = mygraph.copy()
7176 while not tempgraph.empty():
7177 nodes = tempgraph.leaf_nodes()
7179 node = tempgraph.order[0]
7182 display_order.append(node)
7183 tempgraph.remove(node)
7184 display_order.reverse()
7185 self.myopts.pop("--quiet", None)
7186 self.myopts.pop("--verbose", None)
7187 self.myopts["--tree"] = True
7188 portage.writemsg("\n\n", noiselevel=-1)
7189 self.display(display_order)
7190 prefix = colorize("BAD", " * ")
7191 portage.writemsg("\n", noiselevel=-1)
7192 portage.writemsg(prefix + "Error: circular dependencies:\n",
7194 portage.writemsg("\n", noiselevel=-1)
7195 mygraph.debug_print()
7196 portage.writemsg("\n", noiselevel=-1)
7197 portage.writemsg(prefix + "Note that circular dependencies " + \
7198 "can often be avoided by temporarily\n", noiselevel=-1)
7199 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7200 "optional dependencies.\n", noiselevel=-1)
7202 def _show_merge_list(self):
7203 if self._serialized_tasks_cache is not None and \
7204 not (self._displayed_list and \
7205 (self._displayed_list == self._serialized_tasks_cache or \
7206 self._displayed_list == \
7207 list(reversed(self._serialized_tasks_cache)))):
7208 display_list = self._serialized_tasks_cache[:]
7209 if "--tree" in self.myopts:
7210 display_list.reverse()
7211 self.display(display_list)
7213 def _show_unsatisfied_blockers(self, blockers):
7214 self._show_merge_list()
7215 msg = "Error: The above package list contains " + \
7216 "packages which cannot be installed " + \
7217 "at the same time on the same system."
7218 prefix = colorize("BAD", " * ")
7219 from textwrap import wrap
7220 portage.writemsg("\n", noiselevel=-1)
7221 for line in wrap(msg, 70):
7222 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7224 # Display the conflicting packages along with the packages
7225 # that pulled them in. This is helpful for troubleshooting
7226 # cases in which blockers don't solve automatically and
7227 # the reasons are not apparent from the normal merge list
7231 for blocker in blockers:
7232 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7233 self._blocker_parents.parent_nodes(blocker)):
7234 parent_atoms = self._parent_atoms.get(pkg)
7235 if not parent_atoms:
7236 atom = self._blocked_world_pkgs.get(pkg)
7237 if atom is not None:
7238 parent_atoms = set([("@world", atom)])
7240 conflict_pkgs[pkg] = parent_atoms
7246 # Max number of parents shown, to avoid flooding the display.
7248 for pkg, parent_atoms in conflict_pkgs.iteritems():
7252 # Prefer conflict packages over others.
7253 for parent_atom in parent_atoms:
7254 if len(pruned_list) >= max_parents:
7256 parent, atom = parent_atom
7257 if parent in conflict_pkgs:
7258 pruned_list.add(parent_atom)
7260 for parent_atom in parent_atoms:
7261 if len(pruned_list) >= max_parents:
7263 pruned_list.add(parent_atom)
7265 omitted_parents = len(parent_atoms) - len(pruned_list)
7266 msg.append(indent + "%s pulled in by\n" % pkg)
7268 for parent_atom in pruned_list:
7269 parent, atom = parent_atom
7270 msg.append(2*indent)
7271 if isinstance(parent,
7272 (PackageArg, AtomArg)):
7273 # For PackageArg and AtomArg types, it's
7274 # redundant to display the atom attribute.
7275 msg.append(str(parent))
7277 # Display the specific atom from SetArg or
7279 msg.append("%s required by %s" % (atom, parent))
7283 msg.append(2*indent)
7284 msg.append("(and %d more)\n" % omitted_parents)
7288 sys.stderr.write("".join(msg))
7291 if "--quiet" not in self.myopts:
7292 show_blocker_docs_link()
7294 def display(self, mylist, favorites=[], verbosity=None):
7296 # This is used to prevent display_problems() from
7297 # redundantly displaying this exact same merge list
7298 # again via _show_merge_list().
7299 self._displayed_list = mylist
7301 if verbosity is None:
7302 verbosity = ("--quiet" in self.myopts and 1 or \
7303 "--verbose" in self.myopts and 3 or 2)
7304 favorites_set = InternalPackageSet(favorites)
7305 oneshot = "--oneshot" in self.myopts or \
7306 "--onlydeps" in self.myopts
7307 columns = "--columns" in self.myopts
7312 counters = PackageCounters()
7314 if verbosity == 1 and "--verbose" not in self.myopts:
7315 def create_use_string(*args):
7318 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7320 is_new, reinst_flags,
7321 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7322 alphabetical=("--alphabetical" in self.myopts)):
7330 cur_iuse = set(cur_iuse)
7331 enabled_flags = cur_iuse.intersection(cur_use)
7332 removed_iuse = set(old_iuse).difference(cur_iuse)
7333 any_iuse = cur_iuse.union(old_iuse)
7334 any_iuse = list(any_iuse)
7336 for flag in any_iuse:
7339 reinst_flag = reinst_flags and flag in reinst_flags
7340 if flag in enabled_flags:
7342 if is_new or flag in old_use and \
7343 (all_flags or reinst_flag):
7344 flag_str = red(flag)
7345 elif flag not in old_iuse:
7346 flag_str = yellow(flag) + "%*"
7347 elif flag not in old_use:
7348 flag_str = green(flag) + "*"
7349 elif flag in removed_iuse:
7350 if all_flags or reinst_flag:
7351 flag_str = yellow("-" + flag) + "%"
7354 flag_str = "(" + flag_str + ")"
7355 removed.append(flag_str)
7358 if is_new or flag in old_iuse and \
7359 flag not in old_use and \
7360 (all_flags or reinst_flag):
7361 flag_str = blue("-" + flag)
7362 elif flag not in old_iuse:
7363 flag_str = yellow("-" + flag)
7364 if flag not in iuse_forced:
7366 elif flag in old_use:
7367 flag_str = green("-" + flag) + "*"
7369 if flag in iuse_forced:
7370 flag_str = "(" + flag_str + ")"
7372 enabled.append(flag_str)
7374 disabled.append(flag_str)
7377 ret = " ".join(enabled)
7379 ret = " ".join(enabled + disabled + removed)
7381 ret = '%s="%s" ' % (name, ret)
7384 repo_display = RepoDisplay(self.roots)
7388 mygraph = self.digraph.copy()
7390 # If there are any Uninstall instances, add the corresponding
7391 # blockers to the digraph (useful for --tree display).
7393 executed_uninstalls = set(node for node in mylist \
7394 if isinstance(node, Package) and node.operation == "unmerge")
7396 for uninstall in self._blocker_uninstalls.leaf_nodes():
7397 uninstall_parents = \
7398 self._blocker_uninstalls.parent_nodes(uninstall)
7399 if not uninstall_parents:
7402 # Remove the corresponding "nomerge" node and substitute
7403 # the Uninstall node.
7404 inst_pkg = self._pkg_cache[
7405 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7407 mygraph.remove(inst_pkg)
7412 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7414 inst_pkg_blockers = []
7416 # Break the Package -> Uninstall edges.
7417 mygraph.remove(uninstall)
7419 # Resolution of a package's blockers
7420 # depend on it's own uninstallation.
7421 for blocker in inst_pkg_blockers:
7422 mygraph.add(uninstall, blocker)
7424 # Expand Package -> Uninstall edges into
7425 # Package -> Blocker -> Uninstall edges.
7426 for blocker in uninstall_parents:
7427 mygraph.add(uninstall, blocker)
7428 for parent in self._blocker_parents.parent_nodes(blocker):
7429 if parent != inst_pkg:
7430 mygraph.add(blocker, parent)
7432 # If the uninstall task did not need to be executed because
7433 # of an upgrade, display Blocker -> Upgrade edges since the
7434 # corresponding Blocker -> Uninstall edges will not be shown.
7436 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7437 if upgrade_node is not None and \
7438 uninstall not in executed_uninstalls:
7439 for blocker in uninstall_parents:
7440 mygraph.add(upgrade_node, blocker)
7442 unsatisfied_blockers = []
7447 if isinstance(x, Blocker) and not x.satisfied:
7448 unsatisfied_blockers.append(x)
7451 if "--tree" in self.myopts:
7452 depth = len(tree_nodes)
7453 while depth and graph_key not in \
7454 mygraph.child_nodes(tree_nodes[depth-1]):
7457 tree_nodes = tree_nodes[:depth]
7458 tree_nodes.append(graph_key)
7459 display_list.append((x, depth, True))
7460 shown_edges.add((graph_key, tree_nodes[depth-1]))
7462 traversed_nodes = set() # prevent endless circles
7463 traversed_nodes.add(graph_key)
7464 def add_parents(current_node, ordered):
7466 # Do not traverse to parents if this node is an
7467 # an argument or a direct member of a set that has
7468 # been specified as an argument (system or world).
7469 if current_node not in self._set_nodes:
7470 parent_nodes = mygraph.parent_nodes(current_node)
7472 child_nodes = set(mygraph.child_nodes(current_node))
7473 selected_parent = None
7474 # First, try to avoid a direct cycle.
7475 for node in parent_nodes:
7476 if not isinstance(node, (Blocker, Package)):
7478 if node not in traversed_nodes and \
7479 node not in child_nodes:
7480 edge = (current_node, node)
7481 if edge in shown_edges:
7483 selected_parent = node
7485 if not selected_parent:
7486 # A direct cycle is unavoidable.
7487 for node in parent_nodes:
7488 if not isinstance(node, (Blocker, Package)):
7490 if node not in traversed_nodes:
7491 edge = (current_node, node)
7492 if edge in shown_edges:
7494 selected_parent = node
7497 shown_edges.add((current_node, selected_parent))
7498 traversed_nodes.add(selected_parent)
7499 add_parents(selected_parent, False)
7500 display_list.append((current_node,
7501 len(tree_nodes), ordered))
7502 tree_nodes.append(current_node)
7504 add_parents(graph_key, True)
7506 display_list.append((x, depth, True))
7507 mylist = display_list
7508 for x in unsatisfied_blockers:
7509 mylist.append((x, 0, True))
7511 last_merge_depth = 0
7512 for i in xrange(len(mylist)-1,-1,-1):
7513 graph_key, depth, ordered = mylist[i]
7514 if not ordered and depth == 0 and i > 0 \
7515 and graph_key == mylist[i-1][0] and \
7516 mylist[i-1][1] == 0:
7517 # An ordered node got a consecutive duplicate when the tree was
7521 if ordered and graph_key[-1] != "nomerge":
7522 last_merge_depth = depth
7524 if depth >= last_merge_depth or \
7525 i < len(mylist) - 1 and \
7526 depth >= mylist[i+1][1]:
7529 from portage import flatten
7530 from portage.dep import use_reduce, paren_reduce
7531 # files to fetch list - avoids counting a same file twice
7532 # in size display (verbose mode)
7535 # Use this set to detect when all the "repoadd" strings are "[0]"
7536 # and disable the entire repo display in this case.
7539 for mylist_index in xrange(len(mylist)):
7540 x, depth, ordered = mylist[mylist_index]
7544 portdb = self.trees[myroot]["porttree"].dbapi
7545 bindb = self.trees[myroot]["bintree"].dbapi
7546 vardb = self.trees[myroot]["vartree"].dbapi
7547 vartree = self.trees[myroot]["vartree"]
7548 pkgsettings = self.pkgsettings[myroot]
7551 indent = " " * depth
7553 if isinstance(x, Blocker):
7555 blocker_style = "PKG_BLOCKER_SATISFIED"
7556 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7558 blocker_style = "PKG_BLOCKER"
7559 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7561 counters.blocks += 1
7563 counters.blocks_satisfied += 1
7564 resolved = portage.key_expand(
7565 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7566 if "--columns" in self.myopts and "--quiet" in self.myopts:
7567 addl += " " + colorize(blocker_style, resolved)
7569 addl = "[%s %s] %s%s" % \
7570 (colorize(blocker_style, "blocks"),
7571 addl, indent, colorize(blocker_style, resolved))
7572 block_parents = self._blocker_parents.parent_nodes(x)
7573 block_parents = set([pnode[2] for pnode in block_parents])
7574 block_parents = ", ".join(block_parents)
7576 addl += colorize(blocker_style,
7577 " (\"%s\" is blocking %s)") % \
7578 (str(x.atom).lstrip("!"), block_parents)
7580 addl += colorize(blocker_style,
7581 " (is blocking %s)") % block_parents
7582 if isinstance(x, Blocker) and x.satisfied:
7587 blockers.append(addl)
7590 pkg_merge = ordered and pkg_status == "merge"
7591 if not pkg_merge and pkg_status == "merge":
7592 pkg_status = "nomerge"
7593 built = pkg_type != "ebuild"
7594 installed = pkg_type == "installed"
7596 metadata = pkg.metadata
7598 repo_name = metadata["repository"]
7599 if pkg_type == "ebuild":
7600 ebuild_path = portdb.findname(pkg_key)
7601 if not ebuild_path: # shouldn't happen
7602 raise portage.exception.PackageNotFound(pkg_key)
7603 repo_path_real = os.path.dirname(os.path.dirname(
7604 os.path.dirname(ebuild_path)))
7606 repo_path_real = portdb.getRepositoryPath(repo_name)
7607 pkg_use = list(pkg.use.enabled)
7609 restrict = flatten(use_reduce(paren_reduce(
7610 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7611 except portage.exception.InvalidDependString, e:
7612 if not pkg.installed:
7613 show_invalid_depstring_notice(x,
7614 pkg.metadata["RESTRICT"], str(e))
7618 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7619 "fetch" in restrict:
7622 counters.restrict_fetch += 1
7623 if portdb.fetch_check(pkg_key, pkg_use):
7626 counters.restrict_fetch_satisfied += 1
7628 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7629 #param is used for -u, where you still *do* want to see when something is being upgraded.
7632 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7633 if vardb.cpv_exists(pkg_key):
7634 addl=" "+yellow("R")+fetch+" "
7637 counters.reinst += 1
7638 elif pkg_status == "uninstall":
7639 counters.uninst += 1
7640 # filter out old-style virtual matches
7641 elif installed_versions and \
7642 portage.cpv_getkey(installed_versions[0]) == \
7643 portage.cpv_getkey(pkg_key):
7644 myinslotlist = vardb.match(pkg.slot_atom)
7645 # If this is the first install of a new-style virtual, we
7646 # need to filter out old-style virtual matches.
7647 if myinslotlist and \
7648 portage.cpv_getkey(myinslotlist[0]) != \
7649 portage.cpv_getkey(pkg_key):
7652 myoldbest = myinslotlist[:]
7654 if not portage.dep.cpvequal(pkg_key,
7655 portage.best([pkg_key] + myoldbest)):
7657 addl += turquoise("U")+blue("D")
7659 counters.downgrades += 1
7662 addl += turquoise("U") + " "
7664 counters.upgrades += 1
7666 # New slot, mark it new.
7667 addl = " " + green("NS") + fetch + " "
7668 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7670 counters.newslot += 1
7672 if "--changelog" in self.myopts:
7673 inst_matches = vardb.match(pkg.slot_atom)
7675 changelogs.extend(self.calc_changelog(
7676 portdb.findname(pkg_key),
7677 inst_matches[0], pkg_key))
7679 addl = " " + green("N") + " " + fetch + " "
7688 forced_flags = set()
7689 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7690 forced_flags.update(pkgsettings.useforce)
7691 forced_flags.update(pkgsettings.usemask)
7693 cur_use = [flag for flag in pkg.use.enabled \
7694 if flag in pkg.iuse.all]
7695 cur_iuse = sorted(pkg.iuse.all)
7697 if myoldbest and myinslotlist:
7698 previous_cpv = myoldbest[0]
7700 previous_cpv = pkg.cpv
7701 if vardb.cpv_exists(previous_cpv):
7702 old_iuse, old_use = vardb.aux_get(
7703 previous_cpv, ["IUSE", "USE"])
7704 old_iuse = list(set(
7705 filter_iuse_defaults(old_iuse.split())))
7707 old_use = old_use.split()
7714 old_use = [flag for flag in old_use if flag in old_iuse]
7716 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7718 use_expand.reverse()
7719 use_expand_hidden = \
7720 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7722 def map_to_use_expand(myvals, forcedFlags=False,
7726 for exp in use_expand:
7729 for val in myvals[:]:
7730 if val.startswith(exp.lower()+"_"):
7731 if val in forced_flags:
7732 forced[exp].add(val[len(exp)+1:])
7733 ret[exp].append(val[len(exp)+1:])
7736 forced["USE"] = [val for val in myvals \
7737 if val in forced_flags]
7739 for exp in use_expand_hidden:
7745 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7746 # are the only thing that triggered reinstallation.
7747 reinst_flags_map = {}
7748 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7749 reinst_expand_map = None
7750 if reinstall_for_flags:
7751 reinst_flags_map = map_to_use_expand(
7752 list(reinstall_for_flags), removeHidden=False)
7753 for k in list(reinst_flags_map):
7754 if not reinst_flags_map[k]:
7755 del reinst_flags_map[k]
7756 if not reinst_flags_map.get("USE"):
7757 reinst_expand_map = reinst_flags_map.copy()
7758 reinst_expand_map.pop("USE", None)
7759 if reinst_expand_map and \
7760 not set(reinst_expand_map).difference(
7762 use_expand_hidden = \
7763 set(use_expand_hidden).difference(
7766 cur_iuse_map, iuse_forced = \
7767 map_to_use_expand(cur_iuse, forcedFlags=True)
7768 cur_use_map = map_to_use_expand(cur_use)
7769 old_iuse_map = map_to_use_expand(old_iuse)
7770 old_use_map = map_to_use_expand(old_use)
7773 use_expand.insert(0, "USE")
7775 for key in use_expand:
7776 if key in use_expand_hidden:
7778 verboseadd += create_use_string(key.upper(),
7779 cur_iuse_map[key], iuse_forced[key],
7780 cur_use_map[key], old_iuse_map[key],
7781 old_use_map[key], is_new,
7782 reinst_flags_map.get(key))
7787 if pkg_type == "ebuild" and pkg_merge:
7789 myfilesdict = portdb.getfetchsizes(pkg_key,
7790 useflags=pkg_use, debug=self.edebug)
7791 except portage.exception.InvalidDependString, e:
7792 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7793 show_invalid_depstring_notice(x, src_uri, str(e))
7796 if myfilesdict is None:
7797 myfilesdict="[empty/missing/bad digest]"
7799 for myfetchfile in myfilesdict:
7800 if myfetchfile not in myfetchlist:
7801 mysize+=myfilesdict[myfetchfile]
7802 myfetchlist.append(myfetchfile)
7804 counters.totalsize += mysize
7805 verboseadd += format_size(mysize)
7808 # assign index for a previous version in the same slot
7809 has_previous = False
7810 repo_name_prev = None
7811 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7813 slot_matches = vardb.match(slot_atom)
7816 repo_name_prev = vardb.aux_get(slot_matches[0],
7819 # now use the data to generate output
7820 if pkg.installed or not has_previous:
7821 repoadd = repo_display.repoStr(repo_path_real)
7823 repo_path_prev = None
7825 repo_path_prev = portdb.getRepositoryPath(
7827 if repo_path_prev == repo_path_real:
7828 repoadd = repo_display.repoStr(repo_path_real)
7830 repoadd = "%s=>%s" % (
7831 repo_display.repoStr(repo_path_prev),
7832 repo_display.repoStr(repo_path_real))
7834 repoadd_set.add(repoadd)
7836 xs = [portage.cpv_getkey(pkg_key)] + \
7837 list(portage.catpkgsplit(pkg_key)[2:])
7844 if "COLUMNWIDTH" in self.settings:
7846 mywidth = int(self.settings["COLUMNWIDTH"])
7847 except ValueError, e:
7848 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7850 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7851 self.settings["COLUMNWIDTH"], noiselevel=-1)
7853 oldlp = mywidth - 30
7856 # Convert myoldbest from a list to a string.
7860 for pos, key in enumerate(myoldbest):
7861 key = portage.catpkgsplit(key)[2] + \
7862 "-" + portage.catpkgsplit(key)[3]
7863 if key[-3:] == "-r0":
7865 myoldbest[pos] = key
7866 myoldbest = blue("["+", ".join(myoldbest)+"]")
7869 root_config = self.roots[myroot]
7870 system_set = root_config.sets["system"]
7871 world_set = root_config.sets["world"]
7876 pkg_system = system_set.findAtomForPackage(pkg)
7877 pkg_world = world_set.findAtomForPackage(pkg)
7878 if not (oneshot or pkg_world) and \
7879 myroot == self.target_root and \
7880 favorites_set.findAtomForPackage(pkg):
7881 # Maybe it will be added to world now.
7882 if create_world_atom(pkg, favorites_set, root_config):
7884 except portage.exception.InvalidDependString:
7885 # This is reported elsewhere if relevant.
7888 def pkgprint(pkg_str):
7891 return colorize("PKG_MERGE_SYSTEM", pkg_str)
7893 return colorize("PKG_MERGE_WORLD", pkg_str)
7895 return colorize("PKG_MERGE", pkg_str)
7896 elif pkg_status == "uninstall":
7897 return colorize("PKG_UNINSTALL", pkg_str)
7900 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7902 return colorize("PKG_NOMERGE_WORLD", pkg_str)
7904 return colorize("PKG_NOMERGE", pkg_str)
7907 properties = flatten(use_reduce(paren_reduce(
7908 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7909 except portage.exception.InvalidDependString, e:
7910 if not pkg.installed:
7911 show_invalid_depstring_notice(pkg,
7912 pkg.metadata["PROPERTIES"], str(e))
7916 interactive = "interactive" in properties
7917 if interactive and pkg.operation == "merge":
7918 addl = colorize("WARN", "I") + addl[1:]
7920 counters.interactive += 1
7925 if "--columns" in self.myopts:
7926 if "--quiet" in self.myopts:
7927 myprint=addl+" "+indent+pkgprint(pkg_cp)
7928 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7929 myprint=myprint+myoldbest
7930 myprint=myprint+darkgreen("to "+x[1])
7934 myprint = "[%s] %s%s" % \
7935 (pkgprint(pkg_status.ljust(13)),
7936 indent, pkgprint(pkg.cp))
7938 myprint = "[%s %s] %s%s" % \
7939 (pkgprint(pkg.type_name), addl,
7940 indent, pkgprint(pkg.cp))
7941 if (newlp-nc_len(myprint)) > 0:
7942 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7943 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7944 if (oldlp-nc_len(myprint)) > 0:
7945 myprint=myprint+" "*(oldlp-nc_len(myprint))
7946 myprint=myprint+myoldbest
7947 myprint += darkgreen("to " + pkg.root)
7950 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7952 myprint = "[" + pkg_type + " " + addl + "] "
7953 myprint += indent + pkgprint(pkg_key) + " " + \
7954 myoldbest + darkgreen("to " + myroot)
7956 if "--columns" in self.myopts:
7957 if "--quiet" in self.myopts:
7958 myprint=addl+" "+indent+pkgprint(pkg_cp)
7959 myprint=myprint+" "+green(xs[1]+xs[2])+" "
7960 myprint=myprint+myoldbest
7964 myprint = "[%s] %s%s" % \
7965 (pkgprint(pkg_status.ljust(13)),
7966 indent, pkgprint(pkg.cp))
7968 myprint = "[%s %s] %s%s" % \
7969 (pkgprint(pkg.type_name), addl,
7970 indent, pkgprint(pkg.cp))
7971 if (newlp-nc_len(myprint)) > 0:
7972 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7973 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7974 if (oldlp-nc_len(myprint)) > 0:
7975 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7976 myprint += myoldbest
7979 myprint = "[%s] %s%s %s" % \
7980 (pkgprint(pkg_status.ljust(13)),
7981 indent, pkgprint(pkg.cpv),
7984 myprint = "[%s %s] %s%s %s" % \
7985 (pkgprint(pkg_type), addl, indent,
7986 pkgprint(pkg.cpv), myoldbest)
7988 if columns and pkg.operation == "uninstall":
7990 p.append((myprint, verboseadd, repoadd))
7992 if "--tree" not in self.myopts and \
7993 "--quiet" not in self.myopts and \
7994 not self._opts_no_restart.intersection(self.myopts) and \
7995 pkg.root == self._running_root.root and \
7996 portage.match_from_list(
7997 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7998 not vardb.cpv_exists(pkg.cpv) and \
7999 "--quiet" not in self.myopts:
8000 if mylist_index < len(mylist) - 1:
8001 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8002 p.append(colorize("WARN", " then resume the merge."))
8005 show_repos = repoadd_set and repoadd_set != set(["0"])
8008 if isinstance(x, basestring):
8009 out.write("%s\n" % (x,))
8012 myprint, verboseadd, repoadd = x
8015 myprint += " " + verboseadd
8017 if show_repos and repoadd:
8018 myprint += " " + teal("[%s]" % repoadd)
8020 out.write("%s\n" % (myprint,))
8029 sys.stdout.write(str(repo_display))
8031 if "--changelog" in self.myopts:
8033 for revision,text in changelogs:
8034 print bold('*'+revision)
8035 sys.stdout.write(text)
8040 def display_problems(self):
8042 Display problems with the dependency graph such as slot collisions.
8043 This is called internally by display() to show the problems _after_
8044 the merge list where it is most likely to be seen, but if display()
8045 is not going to be called then this method should be called explicitly
8046 to ensure that the user is notified of problems with the graph.
8048 All output goes to stderr, except for unsatisfied dependencies which
8049 go to stdout for parsing by programs such as autounmask.
8052 # Note that show_masked_packages() sends it's output to
8053 # stdout, and some programs such as autounmask parse the
8054 # output in cases when emerge bails out. However, when
8055 # show_masked_packages() is called for installed packages
8056 # here, the message is a warning that is more appropriate
8057 # to send to stderr, so temporarily redirect stdout to
8058 # stderr. TODO: Fix output code so there's a cleaner way
8059 # to redirect everything to stderr.
8064 sys.stdout = sys.stderr
8065 self._display_problems()
8071 # This goes to stdout for parsing by programs like autounmask.
8072 for pargs, kwargs in self._unsatisfied_deps_for_display:
8073 self._show_unsatisfied_dep(*pargs, **kwargs)
8075 def _display_problems(self):
8076 if self._circular_deps_for_display is not None:
8077 self._show_circular_deps(
8078 self._circular_deps_for_display)
8080 # The user is only notified of a slot conflict if
8081 # there are no unresolvable blocker conflicts.
8082 if self._unsatisfied_blockers_for_display is not None:
8083 self._show_unsatisfied_blockers(
8084 self._unsatisfied_blockers_for_display)
8086 self._show_slot_collision_notice()
8088 # TODO: Add generic support for "set problem" handlers so that
8089 # the below warnings aren't special cases for world only.
8091 if self._missing_args:
8092 world_problems = False
8093 if "world" in self._sets:
8094 # Filter out indirect members of world (from nested sets)
8095 # since only direct members of world are desired here.
8096 world_set = self.roots[self.target_root].sets["world"]
8097 for arg, atom in self._missing_args:
8098 if arg.name == "world" and atom in world_set:
8099 world_problems = True
8103 sys.stderr.write("\n!!! Problems have been " + \
8104 "detected with your world file\n")
8105 sys.stderr.write("!!! Please run " + \
8106 green("emaint --check world")+"\n\n")
8108 if self._missing_args:
8109 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8110 " Ebuilds for the following packages are either all\n")
8111 sys.stderr.write(colorize("BAD", "!!!") + \
8112 " masked or don't exist:\n")
8113 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8114 self._missing_args) + "\n")
8116 if self._pprovided_args:
8118 for arg, atom in self._pprovided_args:
8119 if isinstance(arg, SetArg):
8121 arg_atom = (atom, atom)
8124 arg_atom = (arg.arg, atom)
8125 refs = arg_refs.setdefault(arg_atom, [])
8126 if parent not in refs:
8129 msg.append(bad("\nWARNING: "))
8130 if len(self._pprovided_args) > 1:
8131 msg.append("Requested packages will not be " + \
8132 "merged because they are listed in\n")
8134 msg.append("A requested package will not be " + \
8135 "merged because it is listed in\n")
8136 msg.append("package.provided:\n\n")
8137 problems_sets = set()
8138 for (arg, atom), refs in arg_refs.iteritems():
8141 problems_sets.update(refs)
8143 ref_string = ", ".join(["'%s'" % name for name in refs])
8144 ref_string = " pulled in by " + ref_string
8145 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8147 if "world" in problems_sets:
8148 msg.append("This problem can be solved in one of the following ways:\n\n")
8149 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8150 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8151 msg.append(" C) Remove offending entries from package.provided.\n\n")
8152 msg.append("The best course of action depends on the reason that an offending\n")
8153 msg.append("package.provided entry exists.\n\n")
8154 sys.stderr.write("".join(msg))
8156 masked_packages = []
8157 for pkg in self._masked_installed:
8158 root_config = pkg.root_config
8159 pkgsettings = self.pkgsettings[pkg.root]
8160 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8161 masked_packages.append((root_config, pkgsettings,
8162 pkg.cpv, pkg.metadata, mreasons))
8164 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8165 " The following installed packages are masked:\n")
8166 show_masked_packages(masked_packages)
8170 def calc_changelog(self,ebuildpath,current,next):
8171 if ebuildpath == None or not os.path.exists(ebuildpath):
8173 current = '-'.join(portage.catpkgsplit(current)[1:])
8174 if current.endswith('-r0'):
8175 current = current[:-3]
8176 next = '-'.join(portage.catpkgsplit(next)[1:])
8177 if next.endswith('-r0'):
8179 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8181 changelog = open(changelogpath).read()
8182 except SystemExit, e:
8183 raise # Needed else can't exit
8186 divisions = self.find_changelog_tags(changelog)
8187 #print 'XX from',current,'to',next
8188 #for div,text in divisions: print 'XX',div
8189 # skip entries for all revisions above the one we are about to emerge
8190 for i in range(len(divisions)):
8191 if divisions[i][0]==next:
8192 divisions = divisions[i:]
8194 # find out how many entries we are going to display
8195 for i in range(len(divisions)):
8196 if divisions[i][0]==current:
8197 divisions = divisions[:i]
8200 # couldnt find the current revision in the list. display nothing
8204 def find_changelog_tags(self,changelog):
8208 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8210 if release is not None:
8211 divs.append((release,changelog))
8213 if release is not None:
8214 divs.append((release,changelog[:match.start()]))
8215 changelog = changelog[match.end():]
8216 release = match.group(1)
8217 if release.endswith('.ebuild'):
8218 release = release[:-7]
8219 if release.endswith('-r0'):
8220 release = release[:-3]
8222 def saveNomergeFavorites(self):
8223 """Find atoms in favorites that are not in the mergelist and add them
8224 to the world file if necessary."""
8225 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8226 "--oneshot", "--onlydeps", "--pretend"):
8227 if x in self.myopts:
8229 root_config = self.roots[self.target_root]
8230 world_set = root_config.sets["world"]
8232 world_locked = False
8233 if hasattr(world_set, "lock"):
8237 if hasattr(world_set, "load"):
8238 world_set.load() # maybe it's changed on disk
8240 args_set = self._sets["args"]
8241 portdb = self.trees[self.target_root]["porttree"].dbapi
8242 added_favorites = set()
8243 for x in self._set_nodes:
8244 pkg_type, root, pkg_key, pkg_status = x
8245 if pkg_status != "nomerge":
8249 myfavkey = create_world_atom(x, args_set, root_config)
8251 if myfavkey in added_favorites:
8253 added_favorites.add(myfavkey)
8254 except portage.exception.InvalidDependString, e:
8255 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8256 (pkg_key, str(e)), noiselevel=-1)
8257 writemsg("!!! see '%s'\n\n" % os.path.join(
8258 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8261 for k in self._sets:
8262 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8267 all_added.append(SETPREFIX + k)
8268 all_added.extend(added_favorites)
8271 print ">>> Recording %s in \"world\" favorites file..." % \
8272 colorize("INFORM", str(a))
8274 world_set.update(all_added)
8279 def loadResumeCommand(self, resume_data, skip_masked=False):
8281 Add a resume command to the graph and validate it in the process. This
8282 will raise a PackageNotFound exception if a package is not available.
8285 if not isinstance(resume_data, dict):
8288 mergelist = resume_data.get("mergelist")
8289 if not isinstance(mergelist, list):
8292 fakedb = self.mydbapi
8294 serialized_tasks = []
8297 if not (isinstance(x, list) and len(x) == 4):
8299 pkg_type, myroot, pkg_key, action = x
8300 if pkg_type not in self.pkg_tree_map:
8302 if action != "merge":
8304 tree_type = self.pkg_tree_map[pkg_type]
8305 mydb = trees[myroot][tree_type].dbapi
8306 db_keys = list(self._trees_orig[myroot][
8307 tree_type].dbapi._aux_cache_keys)
8309 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8311 # It does no exist or it is corrupt.
8312 if action == "uninstall":
8314 raise portage.exception.PackageNotFound(pkg_key)
8315 installed = action == "uninstall"
8316 built = pkg_type != "ebuild"
8317 root_config = self.roots[myroot]
8318 pkg = Package(built=built, cpv=pkg_key,
8319 installed=installed, metadata=metadata,
8320 operation=action, root_config=root_config,
8322 if pkg_type == "ebuild":
8323 pkgsettings = self.pkgsettings[myroot]
8324 pkgsettings.setcpv(pkg)
8325 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8326 self._pkg_cache[pkg] = pkg
8328 root_config = self.roots[pkg.root]
8329 if "merge" == pkg.operation and \
8330 not visible(root_config.settings, pkg):
8332 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8334 self._unsatisfied_deps_for_display.append(
8335 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8337 fakedb[myroot].cpv_inject(pkg)
8338 serialized_tasks.append(pkg)
8339 self.spinner.update()
8341 if self._unsatisfied_deps_for_display:
8344 if not serialized_tasks or "--nodeps" in self.myopts:
8345 self._serialized_tasks_cache = serialized_tasks
8346 self._scheduler_graph = self.digraph
8348 self._select_package = self._select_pkg_from_graph
8349 self.myparams.add("selective")
8351 favorites = resume_data.get("favorites")
8352 args_set = self._sets["args"]
8353 if isinstance(favorites, list):
8354 args = self._load_favorites(favorites)
8358 for task in serialized_tasks:
8359 if isinstance(task, Package) and \
8360 task.operation == "merge":
8361 if not self._add_pkg(task, None):
8364 # Packages for argument atoms need to be explicitly
8365 # added via _add_pkg() so that they are included in the
8366 # digraph (needed at least for --tree display).
8368 for atom in arg.set:
8369 pkg, existing_node = self._select_package(
8370 arg.root_config.root, atom)
8371 if existing_node is None and \
8373 if not self._add_pkg(pkg, Dependency(atom=atom,
8374 root=pkg.root, parent=arg)):
8377 # Allow unsatisfied deps here to avoid showing a masking
8378 # message for an unsatisfied dep that isn't necessarily
8380 if not self._create_graph(allow_unsatisfied=True):
8382 if masked_tasks or self._unsatisfied_deps:
8383 # This probably means that a required package
8384 # was dropped via --skipfirst. It makes the
8385 # resume list invalid, so convert it to a
8386 # UnsatisfiedResumeDep exception.
8387 raise self.UnsatisfiedResumeDep(self,
8388 masked_tasks + self._unsatisfied_deps)
8389 self._serialized_tasks_cache = None
8392 except self._unknown_internal_error:
8397 def _load_favorites(self, favorites):
8399 Use a list of favorites to resume state from a
8400 previous select_files() call. This creates similar
8401 DependencyArg instances to those that would have
8402 been created by the original select_files() call.
8403 This allows Package instances to be matched with
8404 DependencyArg instances during graph creation.
8406 root_config = self.roots[self.target_root]
8407 getSetAtoms = root_config.setconfig.getSetAtoms
8408 sets = root_config.sets
8411 if not isinstance(x, basestring):
8413 if x in ("system", "world"):
8415 if x.startswith(SETPREFIX):
8416 s = x[len(SETPREFIX):]
8421 # Recursively expand sets so that containment tests in
8422 # self._get_parent_sets() properly match atoms in nested
8423 # sets (like if world contains system).
8424 expanded_set = InternalPackageSet(
8425 initial_atoms=getSetAtoms(s))
8426 self._sets[s] = expanded_set
8427 args.append(SetArg(arg=x, set=expanded_set,
8428 root_config=root_config))
8430 if not portage.isvalidatom(x):
8432 args.append(AtomArg(arg=x, atom=x,
8433 root_config=root_config))
8435 # Create the "args" package set from atoms and
8436 # packages given as arguments.
8437 args_set = self._sets["args"]
8439 if not isinstance(arg, (AtomArg, PackageArg)):
8442 if myatom in args_set:
8444 args_set.add(myatom)
8445 self._set_atoms.update(chain(*self._sets.itervalues()))
8446 atom_arg_map = self._atom_arg_map
8448 for atom in arg.set:
8449 atom_key = (atom, arg.root_config.root)
8450 refs = atom_arg_map.get(atom_key)
8453 atom_arg_map[atom_key] = refs
8458 class UnsatisfiedResumeDep(portage.exception.PortageException):
8460 A dependency of a resume list is not installed. This
8461 can occur when a required package is dropped from the
8462 merge list via --skipfirst.
8464 def __init__(self, depgraph, value):
8465 portage.exception.PortageException.__init__(self, value)
8466 self.depgraph = depgraph
8468 class _internal_exception(portage.exception.PortageException):
8469 def __init__(self, value=""):
8470 portage.exception.PortageException.__init__(self, value)
8472 class _unknown_internal_error(_internal_exception):
8474 Used by the depgraph internally to terminate graph creation.
8475 The specific reason for the failure should have been dumped
8476 to stderr, unfortunately, the exact reason for the failure
8480 class _serialize_tasks_retry(_internal_exception):
8482 This is raised by the _serialize_tasks() method when it needs to
8483 be called again for some reason. The only case that it's currently
8484 used for is when neglected dependencies need to be added to the
8485 graph in order to avoid making a potentially unsafe decision.
8488 class _dep_check_composite_db(portage.dbapi):
8490 A dbapi-like interface that is optimized for use in dep_check() calls.
8491 This is built on top of the existing depgraph package selection logic.
8492 Some packages that have been added to the graph may be masked from this
8493 view in order to influence the atom preference selection that occurs
8496 def __init__(self, depgraph, root):
8497 portage.dbapi.__init__(self)
8498 self._depgraph = depgraph
8500 self._match_cache = {}
8501 self._cpv_pkg_map = {}
8503 def match(self, atom):
8504 ret = self._match_cache.get(atom)
8509 atom = self._dep_expand(atom)
8510 pkg, existing = self._depgraph._select_package(self._root, atom)
8514 # Return the highest available from select_package() as well as
8515 # any matching slots in the graph db.
8517 slots.add(pkg.metadata["SLOT"])
8518 atom_cp = portage.dep_getkey(atom)
8519 if pkg.cp.startswith("virtual/"):
8520 # For new-style virtual lookahead that occurs inside
8521 # dep_check(), examine all slots. This is needed
8522 # so that newer slots will not unnecessarily be pulled in
8523 # when a satisfying lower slot is already installed. For
8524 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8525 # there's no need to pull in a newer slot to satisfy a
8526 # virtual/jdk dependency.
8527 for db, pkg_type, built, installed, db_keys in \
8528 self._depgraph._filtered_trees[self._root]["dbs"]:
8529 for cpv in db.match(atom):
8530 if portage.cpv_getkey(cpv) != pkg.cp:
8532 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8534 if self._visible(pkg):
8535 self._cpv_pkg_map[pkg.cpv] = pkg
8537 slots.remove(pkg.metadata["SLOT"])
8539 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8540 pkg, existing = self._depgraph._select_package(
8541 self._root, slot_atom)
8544 if not self._visible(pkg):
8546 self._cpv_pkg_map[pkg.cpv] = pkg
8549 self._cpv_sort_ascending(ret)
8550 self._match_cache[orig_atom] = ret
8553 def _visible(self, pkg):
8554 if pkg.installed and "selective" not in self._depgraph.myparams:
8556 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8557 except (StopIteration, portage.exception.InvalidDependString):
8564 self._depgraph.pkgsettings[pkg.root], pkg):
8566 except portage.exception.InvalidDependString:
8570 def _dep_expand(self, atom):
8572 This is only needed for old installed packages that may
8573 contain atoms that are not fully qualified with a specific
8574 category. Emulate the cpv_expand() function that's used by
8575 dbapi.match() in cases like this. If there are multiple
8576 matches, it's often due to a new-style virtual that has
8577 been added, so try to filter those out to avoid raising
8580 root_config = self._depgraph.roots[self._root]
8582 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8583 if len(expanded_atoms) > 1:
8584 non_virtual_atoms = []
8585 for x in expanded_atoms:
8586 if not portage.dep_getkey(x).startswith("virtual/"):
8587 non_virtual_atoms.append(x)
8588 if len(non_virtual_atoms) == 1:
8589 expanded_atoms = non_virtual_atoms
8590 if len(expanded_atoms) > 1:
8591 # compatible with portage.cpv_expand()
8592 raise portage.exception.AmbiguousPackageName(
8593 [portage.dep_getkey(x) for x in expanded_atoms])
8595 atom = expanded_atoms[0]
8597 null_atom = insert_category_into_atom(atom, "null")
8598 null_cp = portage.dep_getkey(null_atom)
8599 cat, atom_pn = portage.catsplit(null_cp)
8600 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8602 # Allow the resolver to choose which virtual.
8603 atom = insert_category_into_atom(atom, "virtual")
8605 atom = insert_category_into_atom(atom, "null")
8608 def aux_get(self, cpv, wants):
8609 metadata = self._cpv_pkg_map[cpv].metadata
8610 return [metadata.get(x, "") for x in wants]
8612 class RepoDisplay(object):
8613 def __init__(self, roots):
8614 self._shown_repos = {}
8615 self._unknown_repo = False
8617 for root_config in roots.itervalues():
8618 portdir = root_config.settings.get("PORTDIR")
8620 repo_paths.add(portdir)
8621 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8623 repo_paths.update(overlays.split())
8624 repo_paths = list(repo_paths)
8625 self._repo_paths = repo_paths
8626 self._repo_paths_real = [ os.path.realpath(repo_path) \
8627 for repo_path in repo_paths ]
8629 # pre-allocate index for PORTDIR so that it always has index 0.
8630 for root_config in roots.itervalues():
8631 portdb = root_config.trees["porttree"].dbapi
8632 portdir = portdb.porttree_root
8634 self.repoStr(portdir)
8636 def repoStr(self, repo_path_real):
8639 real_index = self._repo_paths_real.index(repo_path_real)
8640 if real_index == -1:
8642 self._unknown_repo = True
8644 shown_repos = self._shown_repos
8645 repo_paths = self._repo_paths
8646 repo_path = repo_paths[real_index]
8647 index = shown_repos.get(repo_path)
8649 index = len(shown_repos)
8650 shown_repos[repo_path] = index
8656 shown_repos = self._shown_repos
8657 unknown_repo = self._unknown_repo
8658 if shown_repos or self._unknown_repo:
8659 output.append("Portage tree and overlays:\n")
8660 show_repo_paths = list(shown_repos)
8661 for repo_path, repo_index in shown_repos.iteritems():
8662 show_repo_paths[repo_index] = repo_path
8664 for index, repo_path in enumerate(show_repo_paths):
8665 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8667 output.append(" "+teal("[?]") + \
8668 " indicates that the source repository could not be determined\n")
8669 return "".join(output)
8671 class PackageCounters(object):
8681 self.blocks_satisfied = 0
8683 self.restrict_fetch = 0
8684 self.restrict_fetch_satisfied = 0
8685 self.interactive = 0
8688 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8691 myoutput.append("Total: %s package" % total_installs)
8692 if total_installs != 1:
8693 myoutput.append("s")
8694 if total_installs != 0:
8695 myoutput.append(" (")
8696 if self.upgrades > 0:
8697 details.append("%s upgrade" % self.upgrades)
8698 if self.upgrades > 1:
8700 if self.downgrades > 0:
8701 details.append("%s downgrade" % self.downgrades)
8702 if self.downgrades > 1:
8705 details.append("%s new" % self.new)
8706 if self.newslot > 0:
8707 details.append("%s in new slot" % self.newslot)
8708 if self.newslot > 1:
8711 details.append("%s reinstall" % self.reinst)
8715 details.append("%s uninstall" % self.uninst)
8718 if self.interactive > 0:
8719 details.append("%s %s" % (self.interactive,
8720 colorize("WARN", "interactive")))
8721 myoutput.append(", ".join(details))
8722 if total_installs != 0:
8723 myoutput.append(")")
8724 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8725 if self.restrict_fetch:
8726 myoutput.append("\nFetch Restriction: %s package" % \
8727 self.restrict_fetch)
8728 if self.restrict_fetch > 1:
8729 myoutput.append("s")
8730 if self.restrict_fetch_satisfied < self.restrict_fetch:
8731 myoutput.append(bad(" (%s unsatisfied)") % \
8732 (self.restrict_fetch - self.restrict_fetch_satisfied))
8734 myoutput.append("\nConflict: %s block" % \
8737 myoutput.append("s")
8738 if self.blocks_satisfied < self.blocks:
8739 myoutput.append(bad(" (%s unsatisfied)") % \
8740 (self.blocks - self.blocks_satisfied))
8741 return "".join(myoutput)
8743 class PollSelectAdapter(PollConstants):
8746 Use select to emulate a poll object, for
8747 systems that don't support poll().
8751 self._registered = {}
8752 self._select_args = [[], [], []]
8754 def register(self, fd, *args):
8756 Only POLLIN is currently supported!
8760 "register expected at most 2 arguments, got " + \
8761 repr(1 + len(args)))
8763 eventmask = PollConstants.POLLIN | \
8764 PollConstants.POLLPRI | PollConstants.POLLOUT
8768 self._registered[fd] = eventmask
8769 self._select_args = None
8771 def unregister(self, fd):
8772 self._select_args = None
8773 del self._registered[fd]
8775 def poll(self, *args):
8778 "poll expected at most 2 arguments, got " + \
8779 repr(1 + len(args)))
8785 select_args = self._select_args
8786 if select_args is None:
8787 select_args = [self._registered.keys(), [], []]
8789 if timeout is not None:
8790 select_args = select_args[:]
8791 # Translate poll() timeout args to select() timeout args:
8793 # | units | value(s) for indefinite block
8794 # ---------|--------------|------------------------------
8795 # poll | milliseconds | omitted, negative, or None
8796 # ---------|--------------|------------------------------
8797 # select | seconds | omitted
8798 # ---------|--------------|------------------------------
8800 if timeout is not None and timeout < 0:
8802 if timeout is not None:
8803 select_args.append(timeout / 1000)
8805 select_events = select.select(*select_args)
8807 for fd in select_events[0]:
8808 poll_events.append((fd, PollConstants.POLLIN))
8811 class SequentialTaskQueue(SlotObject):
8813 __slots__ = ("max_jobs", "running_tasks") + \
8814 ("_dirty", "_scheduling", "_task_queue")
8816 def __init__(self, **kwargs):
8817 SlotObject.__init__(self, **kwargs)
8818 self._task_queue = deque()
8819 self.running_tasks = set()
8820 if self.max_jobs is None:
8824 def add(self, task):
8825 self._task_queue.append(task)
8828 def addFront(self, task):
8829 self._task_queue.appendleft(task)
8840 if self._scheduling:
8841 # Ignore any recursive schedule() calls triggered via
8842 # self._task_exit().
8845 self._scheduling = True
8847 task_queue = self._task_queue
8848 running_tasks = self.running_tasks
8849 max_jobs = self.max_jobs
8850 state_changed = False
8852 while task_queue and \
8853 (max_jobs is True or len(running_tasks) < max_jobs):
8854 task = task_queue.popleft()
8855 cancelled = getattr(task, "cancelled", None)
8857 running_tasks.add(task)
8858 task.addExitListener(self._task_exit)
8860 state_changed = True
8863 self._scheduling = False
8865 return state_changed
8867 def _task_exit(self, task):
8869 Since we can always rely on exit listeners being called, the set of
8870 running tasks is always pruned automatically and there is never any need
8871 to actively prune it.
8873 self.running_tasks.remove(task)
8874 if self._task_queue:
8878 self._task_queue.clear()
8879 running_tasks = self.running_tasks
8880 while running_tasks:
8881 task = running_tasks.pop()
8882 task.removeExitListener(self._task_exit)
8886 def __nonzero__(self):
8887 return bool(self._task_queue or self.running_tasks)
8890 return len(self._task_queue) + len(self.running_tasks)
8892 _can_poll_device = None
8894 def can_poll_device():
8896 Test if it's possible to use poll() on a device such as a pty. This
8897 is known to fail on Darwin.
8899 @returns: True if poll() on a device succeeds, False otherwise.
8902 global _can_poll_device
8903 if _can_poll_device is not None:
8904 return _can_poll_device
8906 if not hasattr(select, "poll"):
8907 _can_poll_device = False
8908 return _can_poll_device
8911 dev_null = open('/dev/null', 'rb')
8913 _can_poll_device = False
8914 return _can_poll_device
8917 p.register(dev_null.fileno(), PollConstants.POLLIN)
8919 invalid_request = False
8920 for f, event in p.poll():
8921 if event & PollConstants.POLLNVAL:
8922 invalid_request = True
8926 _can_poll_device = not invalid_request
8927 return _can_poll_device
8929 def create_poll_instance():
8931 Create an instance of select.poll, or an instance of
8932 PollSelectAdapter there is no poll() implementation or
8933 it is broken somehow.
8935 if can_poll_device():
8936 return select.poll()
8937 return PollSelectAdapter()
8939 class PollScheduler(object):
8941 class _sched_iface_class(SlotObject):
8942 __slots__ = ("register", "schedule", "unregister")
8946 self._max_load = None
8948 self._poll_event_queue = []
8949 self._poll_event_handlers = {}
8950 self._poll_event_handler_ids = {}
8951 # Increment id for each new handler.
8952 self._event_handler_id = 0
8953 self._poll_obj = create_poll_instance()
8954 self._scheduling = False
8956 def _schedule(self):
8958 Calls _schedule_tasks() and automatically returns early from
8959 any recursive calls to this method that the _schedule_tasks()
8960 call might trigger. This makes _schedule() safe to call from
8961 inside exit listeners.
8963 if self._scheduling:
8965 self._scheduling = True
8967 return self._schedule_tasks()
8969 self._scheduling = False
8971 def _running_job_count(self):
8974 def _can_add_job(self):
8975 max_jobs = self._max_jobs
8976 max_load = self._max_load
8978 if self._max_jobs is not True and \
8979 self._running_job_count() >= self._max_jobs:
8982 if max_load is not None and \
8983 (max_jobs is True or max_jobs > 1) and \
8984 self._running_job_count() >= 1:
8986 avg1, avg5, avg15 = os.getloadavg()
8987 except (AttributeError, OSError), e:
8988 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8993 if avg1 >= max_load:
8998 def _poll(self, timeout=None):
9000 All poll() calls pass through here. The poll events
9001 are added directly to self._poll_event_queue.
9002 In order to avoid endless blocking, this raises
9003 StopIteration if timeout is None and there are
9004 no file descriptors to poll.
9006 if not self._poll_event_handlers:
9008 if timeout is None and \
9009 not self._poll_event_handlers:
9010 raise StopIteration(
9011 "timeout is None and there are no poll() event handlers")
9013 # The following error is known to occur with Linux kernel versions
9016 # select.error: (4, 'Interrupted system call')
9018 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9019 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9020 # without any events.
9023 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9025 except select.error, e:
9026 writemsg_level("\n!!! select error: %s\n" % (e,),
9027 level=logging.ERROR, noiselevel=-1)
9029 if timeout is not None:
9032 def _next_poll_event(self, timeout=None):
9034 Since the _schedule_wait() loop is called by event
9035 handlers from _poll_loop(), maintain a central event
9036 queue for both of them to share events from a single
9037 poll() call. In order to avoid endless blocking, this
9038 raises StopIteration if timeout is None and there are
9039 no file descriptors to poll.
9041 if not self._poll_event_queue:
9043 return self._poll_event_queue.pop()
9045 def _poll_loop(self):
9047 event_handlers = self._poll_event_handlers
9048 event_handled = False
9051 while event_handlers:
9052 f, event = self._next_poll_event()
9053 handler, reg_id = event_handlers[f]
9055 event_handled = True
9056 except StopIteration:
9057 event_handled = True
9059 if not event_handled:
9060 raise AssertionError("tight loop")
9062 def _schedule_yield(self):
9064 Schedule for a short period of time chosen by the scheduler based
9065 on internal state. Synchronous tasks should call this periodically
9066 in order to allow the scheduler to service pending poll events. The
9067 scheduler will call poll() exactly once, without blocking, and any
9068 resulting poll events will be serviced.
9070 event_handlers = self._poll_event_handlers
9073 if not event_handlers:
9074 return bool(events_handled)
9076 if not self._poll_event_queue:
9080 while event_handlers and self._poll_event_queue:
9081 f, event = self._next_poll_event()
9082 handler, reg_id = event_handlers[f]
9085 except StopIteration:
9088 return bool(events_handled)
9090 def _register(self, f, eventmask, handler):
9093 @return: A unique registration id, for use in schedule() or
9096 if f in self._poll_event_handlers:
9097 raise AssertionError("fd %d is already registered" % f)
9098 self._event_handler_id += 1
9099 reg_id = self._event_handler_id
9100 self._poll_event_handler_ids[reg_id] = f
9101 self._poll_event_handlers[f] = (handler, reg_id)
9102 self._poll_obj.register(f, eventmask)
9105 def _unregister(self, reg_id):
9106 f = self._poll_event_handler_ids[reg_id]
9107 self._poll_obj.unregister(f)
9108 del self._poll_event_handlers[f]
9109 del self._poll_event_handler_ids[reg_id]
9111 def _schedule_wait(self, wait_ids):
9113 Schedule until wait_id is not longer registered
9116 @param wait_id: a task id to wait for
9118 event_handlers = self._poll_event_handlers
9119 handler_ids = self._poll_event_handler_ids
9120 event_handled = False
9122 if isinstance(wait_ids, int):
9123 wait_ids = frozenset([wait_ids])
9126 while wait_ids.intersection(handler_ids):
9127 f, event = self._next_poll_event()
9128 handler, reg_id = event_handlers[f]
9130 event_handled = True
9131 except StopIteration:
9132 event_handled = True
9134 return event_handled
9136 class QueueScheduler(PollScheduler):
9139 Add instances of SequentialTaskQueue and then call run(). The
9140 run() method returns when no tasks remain.
9143 def __init__(self, max_jobs=None, max_load=None):
9144 PollScheduler.__init__(self)
9146 if max_jobs is None:
9149 self._max_jobs = max_jobs
9150 self._max_load = max_load
9151 self.sched_iface = self._sched_iface_class(
9152 register=self._register,
9153 schedule=self._schedule_wait,
9154 unregister=self._unregister)
9157 self._schedule_listeners = []
9160 self._queues.append(q)
9162 def remove(self, q):
9163 self._queues.remove(q)
9167 while self._schedule():
9170 while self._running_job_count():
9173 def _schedule_tasks(self):
9176 @returns: True if there may be remaining tasks to schedule,
9179 while self._can_add_job():
9180 n = self._max_jobs - self._running_job_count()
9184 if not self._start_next_job(n):
9187 for q in self._queues:
9192 def _running_job_count(self):
9194 for q in self._queues:
9195 job_count += len(q.running_tasks)
9196 self._jobs = job_count
9199 def _start_next_job(self, n=1):
9201 for q in self._queues:
9202 initial_job_count = len(q.running_tasks)
9204 final_job_count = len(q.running_tasks)
9205 if final_job_count > initial_job_count:
9206 started_count += (final_job_count - initial_job_count)
9207 if started_count >= n:
9209 return started_count
9211 class TaskScheduler(object):
9214 A simple way to handle scheduling of AsynchrousTask instances. Simply
9215 add tasks and call run(). The run() method returns when no tasks remain.
9218 def __init__(self, max_jobs=None, max_load=None):
9219 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9220 self._scheduler = QueueScheduler(
9221 max_jobs=max_jobs, max_load=max_load)
9222 self.sched_iface = self._scheduler.sched_iface
9223 self.run = self._scheduler.run
9224 self._scheduler.add(self._queue)
9226 def add(self, task):
9227 self._queue.add(task)
9229 class JobStatusDisplay(object):
9231 _bound_properties = ("curval", "failed", "running")
9232 _jobs_column_width = 48
9234 # Don't update the display unless at least this much
9235 # time has passed, in units of seconds.
9236 _min_display_latency = 2
9238 _default_term_codes = {
9244 _termcap_name_map = {
9245 'carriage_return' : 'cr',
9250 def __init__(self, out=sys.stdout, quiet=False):
9251 object.__setattr__(self, "out", out)
9252 object.__setattr__(self, "quiet", quiet)
9253 object.__setattr__(self, "maxval", 0)
9254 object.__setattr__(self, "merges", 0)
9255 object.__setattr__(self, "_changed", False)
9256 object.__setattr__(self, "_displayed", False)
9257 object.__setattr__(self, "_last_display_time", 0)
9258 object.__setattr__(self, "width", 80)
9261 isatty = hasattr(out, "isatty") and out.isatty()
9262 object.__setattr__(self, "_isatty", isatty)
9263 if not isatty or not self._init_term():
9265 for k, capname in self._termcap_name_map.iteritems():
9266 term_codes[k] = self._default_term_codes[capname]
9267 object.__setattr__(self, "_term_codes", term_codes)
9269 def _init_term(self):
9271 Initialize term control codes.
9273 @returns: True if term codes were successfully initialized,
9277 term_type = os.environ.get("TERM", "vt100")
9283 curses.setupterm(term_type, self.out.fileno())
9284 tigetstr = curses.tigetstr
9285 except curses.error:
9290 if tigetstr is None:
9294 for k, capname in self._termcap_name_map.iteritems():
9295 code = tigetstr(capname)
9297 code = self._default_term_codes[capname]
9298 term_codes[k] = code
9299 object.__setattr__(self, "_term_codes", term_codes)
9302 def _format_msg(self, msg):
9303 return ">>> %s" % msg
9307 self._term_codes['carriage_return'] + \
9308 self._term_codes['clr_eol'])
9310 self._displayed = False
9312 def _display(self, line):
9313 self.out.write(line)
9315 self._displayed = True
9317 def _update(self, msg):
9320 if not self._isatty:
9321 out.write(self._format_msg(msg) + self._term_codes['newline'])
9323 self._displayed = True
9329 self._display(self._format_msg(msg))
9331 def displayMessage(self, msg):
9333 was_displayed = self._displayed
9335 if self._isatty and self._displayed:
9338 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9340 self._displayed = False
9343 self._changed = True
9349 for name in self._bound_properties:
9350 object.__setattr__(self, name, 0)
9353 self.out.write(self._term_codes['newline'])
9355 self._displayed = False
9357 def __setattr__(self, name, value):
9358 old_value = getattr(self, name)
9359 if value == old_value:
9361 object.__setattr__(self, name, value)
9362 if name in self._bound_properties:
9363 self._property_change(name, old_value, value)
9365 def _property_change(self, name, old_value, new_value):
9366 self._changed = True
9369 def _load_avg_str(self):
9371 avg = os.getloadavg()
9372 except (AttributeError, OSError), e:
9384 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9388 Display status on stdout, but only if something has
9389 changed since the last call.
9395 current_time = time.time()
9396 time_delta = current_time - self._last_display_time
9397 if self._displayed and \
9399 if not self._isatty:
9401 if time_delta < self._min_display_latency:
9404 self._last_display_time = current_time
9405 self._changed = False
9406 self._display_status()
9408 def _display_status(self):
9409 # Don't use len(self._completed_tasks) here since that also
9410 # can include uninstall tasks.
9411 curval_str = str(self.curval)
9412 maxval_str = str(self.maxval)
9413 running_str = str(self.running)
9414 failed_str = str(self.failed)
9415 load_avg_str = self._load_avg_str()
9417 color_output = StringIO.StringIO()
9418 plain_output = StringIO.StringIO()
9419 style_file = portage.output.ConsoleStyleFile(color_output)
9420 style_file.write_listener = plain_output
9421 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9422 style_writer.style_listener = style_file.new_styles
9423 f = formatter.AbstractFormatter(style_writer)
9425 number_style = "INFORM"
9426 f.add_literal_data("Jobs: ")
9427 f.push_style(number_style)
9428 f.add_literal_data(curval_str)
9430 f.add_literal_data(" of ")
9431 f.push_style(number_style)
9432 f.add_literal_data(maxval_str)
9434 f.add_literal_data(" complete")
9437 f.add_literal_data(", ")
9438 f.push_style(number_style)
9439 f.add_literal_data(running_str)
9441 f.add_literal_data(" running")
9444 f.add_literal_data(", ")
9445 f.push_style(number_style)
9446 f.add_literal_data(failed_str)
9448 f.add_literal_data(" failed")
9450 padding = self._jobs_column_width - len(plain_output.getvalue())
9452 f.add_literal_data(padding * " ")
9454 f.add_literal_data("Load avg: ")
9455 f.add_literal_data(load_avg_str)
9457 # Truncate to fit width, to avoid making the terminal scroll if the
9458 # line overflows (happens when the load average is large).
9459 plain_output = plain_output.getvalue()
9460 if self._isatty and len(plain_output) > self.width:
9461 # Use plain_output here since it's easier to truncate
9462 # properly than the color output which contains console
9464 self._update(plain_output[:self.width])
9466 self._update(color_output.getvalue())
9468 xtermTitle(" ".join(plain_output.split()))
9470 class Scheduler(PollScheduler):
9472 _opts_ignore_blockers = \
9473 frozenset(["--buildpkgonly",
9474 "--fetchonly", "--fetch-all-uri",
9475 "--nodeps", "--pretend"])
9477 _opts_no_background = \
9478 frozenset(["--pretend",
9479 "--fetchonly", "--fetch-all-uri"])
9481 _opts_no_restart = frozenset(["--buildpkgonly",
9482 "--fetchonly", "--fetch-all-uri", "--pretend"])
9484 _bad_resume_opts = set(["--ask", "--changelog",
9485 "--resume", "--skipfirst"])
9487 _fetch_log = "/var/log/emerge-fetch.log"
9489 class _iface_class(SlotObject):
9490 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9491 "dblinkElog", "fetch", "register", "schedule",
9492 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9495 class _fetch_iface_class(SlotObject):
9496 __slots__ = ("log_file", "schedule")
9498 _task_queues_class = slot_dict_class(
9499 ("merge", "jobs", "fetch", "unpack"), prefix="")
9501 class _build_opts_class(SlotObject):
9502 __slots__ = ("buildpkg", "buildpkgonly",
9503 "fetch_all_uri", "fetchonly", "pretend")
9505 class _binpkg_opts_class(SlotObject):
9506 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9508 class _pkg_count_class(SlotObject):
9509 __slots__ = ("curval", "maxval")
9511 class _emerge_log_class(SlotObject):
9512 __slots__ = ("xterm_titles",)
9514 def log(self, *pargs, **kwargs):
9515 if not self.xterm_titles:
9516 # Avoid interference with the scheduler's status display.
9517 kwargs.pop("short_msg", None)
9518 emergelog(self.xterm_titles, *pargs, **kwargs)
9520 class _failed_pkg(SlotObject):
9521 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9523 class _ConfigPool(object):
9524 """Interface for a task to temporarily allocate a config
9525 instance from a pool. This allows a task to be constructed
9526 long before the config instance actually becomes needed, like
9527 when prefetchers are constructed for the whole merge list."""
9528 __slots__ = ("_root", "_allocate", "_deallocate")
9529 def __init__(self, root, allocate, deallocate):
9531 self._allocate = allocate
9532 self._deallocate = deallocate
9534 return self._allocate(self._root)
9535 def deallocate(self, settings):
9536 self._deallocate(settings)
9538 class _unknown_internal_error(portage.exception.PortageException):
9540 Used internally to terminate scheduling. The specific reason for
9541 the failure should have been dumped to stderr.
9543 def __init__(self, value=""):
9544 portage.exception.PortageException.__init__(self, value)
9546 def __init__(self, settings, trees, mtimedb, myopts,
9547 spinner, mergelist, favorites, digraph):
9548 PollScheduler.__init__(self)
9549 self.settings = settings
9550 self.target_root = settings["ROOT"]
9552 self.myopts = myopts
9553 self._spinner = spinner
9554 self._mtimedb = mtimedb
9555 self._mergelist = mergelist
9556 self._favorites = favorites
9557 self._args_set = InternalPackageSet(favorites)
9558 self._build_opts = self._build_opts_class()
9559 for k in self._build_opts.__slots__:
9560 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9561 self._binpkg_opts = self._binpkg_opts_class()
9562 for k in self._binpkg_opts.__slots__:
9563 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9566 self._logger = self._emerge_log_class()
9567 self._task_queues = self._task_queues_class()
9568 for k in self._task_queues.allowed_keys:
9569 setattr(self._task_queues, k,
9570 SequentialTaskQueue())
9571 self._status_display = JobStatusDisplay()
9572 self._max_load = myopts.get("--load-average")
9573 max_jobs = myopts.get("--jobs")
9574 if max_jobs is None:
9576 self._set_max_jobs(max_jobs)
9578 # The root where the currently running
9579 # portage instance is installed.
9580 self._running_root = trees["/"]["root_config"]
9582 if settings.get("PORTAGE_DEBUG", "") == "1":
9584 self.pkgsettings = {}
9585 self._config_pool = {}
9586 self._blocker_db = {}
9588 self._config_pool[root] = []
9589 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9591 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9592 schedule=self._schedule_fetch)
9593 self._sched_iface = self._iface_class(
9594 dblinkEbuildPhase=self._dblink_ebuild_phase,
9595 dblinkDisplayMerge=self._dblink_display_merge,
9596 dblinkElog=self._dblink_elog,
9597 fetch=fetch_iface, register=self._register,
9598 schedule=self._schedule_wait,
9599 scheduleSetup=self._schedule_setup,
9600 scheduleUnpack=self._schedule_unpack,
9601 scheduleYield=self._schedule_yield,
9602 unregister=self._unregister)
9604 self._prefetchers = weakref.WeakValueDictionary()
9605 self._pkg_queue = []
9606 self._completed_tasks = set()
9608 self._failed_pkgs = []
9609 self._failed_pkgs_all = []
9610 self._failed_pkgs_die_msgs = []
9611 self._post_mod_echo_msgs = []
9612 self._parallel_fetch = False
9613 merge_count = len([x for x in mergelist \
9614 if isinstance(x, Package) and x.operation == "merge"])
9615 self._pkg_count = self._pkg_count_class(
9616 curval=0, maxval=merge_count)
9617 self._status_display.maxval = self._pkg_count.maxval
9619 # The load average takes some time to respond when new
9620 # jobs are added, so we need to limit the rate of adding
9622 self._job_delay_max = 10
9623 self._job_delay_factor = 1.0
9624 self._job_delay_exp = 1.5
9625 self._previous_job_start_time = None
9627 self._set_digraph(digraph)
9629 # This is used to memoize the _choose_pkg() result when
9630 # no packages can be chosen until one of the existing
9632 self._choose_pkg_return_early = False
9634 features = self.settings.features
9635 if "parallel-fetch" in features and \
9636 not ("--pretend" in self.myopts or \
9637 "--fetch-all-uri" in self.myopts or \
9638 "--fetchonly" in self.myopts):
9639 if "distlocks" not in features:
9640 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9641 portage.writemsg(red("!!!")+" parallel-fetching " + \
9642 "requires the distlocks feature enabled"+"\n",
9644 portage.writemsg(red("!!!")+" you have it disabled, " + \
9645 "thus parallel-fetching is being disabled"+"\n",
9647 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9648 elif len(mergelist) > 1:
9649 self._parallel_fetch = True
9651 if self._parallel_fetch:
9652 # clear out existing fetch log if it exists
9654 open(self._fetch_log, 'w')
9655 except EnvironmentError:
9658 self._running_portage = None
9659 portage_match = self._running_root.trees["vartree"].dbapi.match(
9660 portage.const.PORTAGE_PACKAGE_ATOM)
9662 cpv = portage_match.pop()
9663 self._running_portage = self._pkg(cpv, "installed",
9664 self._running_root, installed=True)
9666 def _poll(self, timeout=None):
9668 PollScheduler._poll(self, timeout=timeout)
9670 def _set_max_jobs(self, max_jobs):
9671 self._max_jobs = max_jobs
9672 self._task_queues.jobs.max_jobs = max_jobs
9674 def _background_mode(self):
9676 Check if background mode is enabled and adjust states as necessary.
9679 @returns: True if background mode is enabled, False otherwise.
9681 background = (self._max_jobs is True or \
9682 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9683 not bool(self._opts_no_background.intersection(self.myopts))
9686 interactive_tasks = self._get_interactive_tasks()
9687 if interactive_tasks:
9689 writemsg_level(">>> Sending package output to stdio due " + \
9690 "to interactive package(s):\n",
9691 level=logging.INFO, noiselevel=-1)
9693 for pkg in interactive_tasks:
9694 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9696 pkg_str += " for " + pkg.root
9699 writemsg_level("".join("%s\n" % (l,) for l in msg),
9700 level=logging.INFO, noiselevel=-1)
9701 if self._max_jobs is True or self._max_jobs > 1:
9702 self._set_max_jobs(1)
9703 writemsg_level(">>> Setting --jobs=1 due " + \
9704 "to the above interactive package(s)\n",
9705 level=logging.INFO, noiselevel=-1)
9707 self._status_display.quiet = \
9709 ("--quiet" in self.myopts and \
9710 "--verbose" not in self.myopts)
9712 self._logger.xterm_titles = \
9713 "notitles" not in self.settings.features and \
9714 self._status_display.quiet
9718 def _get_interactive_tasks(self):
9719 from portage import flatten
9720 from portage.dep import use_reduce, paren_reduce
9721 interactive_tasks = []
9722 for task in self._mergelist:
9723 if not (isinstance(task, Package) and \
9724 task.operation == "merge"):
9727 properties = flatten(use_reduce(paren_reduce(
9728 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9729 except portage.exception.InvalidDependString, e:
9730 show_invalid_depstring_notice(task,
9731 task.metadata["PROPERTIES"], str(e))
9732 raise self._unknown_internal_error()
9733 if "interactive" in properties:
9734 interactive_tasks.append(task)
9735 return interactive_tasks
9737 def _set_digraph(self, digraph):
9738 if "--nodeps" in self.myopts or \
9739 (self._max_jobs is not True and self._max_jobs < 2):
9741 self._digraph = None
9744 self._digraph = digraph
9745 self._prune_digraph()
9747 def _prune_digraph(self):
9749 Prune any root nodes that are irrelevant.
9752 graph = self._digraph
9753 completed_tasks = self._completed_tasks
9754 removed_nodes = set()
9756 for node in graph.root_nodes():
9757 if not isinstance(node, Package) or \
9758 (node.installed and node.operation == "nomerge") or \
9760 node in completed_tasks:
9761 removed_nodes.add(node)
9763 graph.difference_update(removed_nodes)
9764 if not removed_nodes:
9766 removed_nodes.clear()
9768 class _pkg_failure(portage.exception.PortageException):
9770 An instance of this class is raised by unmerge() when
9771 an uninstallation fails.
9774 def __init__(self, *pargs):
9775 portage.exception.PortageException.__init__(self, pargs)
9777 self.status = pargs[0]
9779 def _schedule_fetch(self, fetcher):
9781 Schedule a fetcher on the fetch queue, in order to
9782 serialize access to the fetch log.
9784 self._task_queues.fetch.addFront(fetcher)
9786 def _schedule_setup(self, setup_phase):
9788 Schedule a setup phase on the merge queue, in order to
9789 serialize unsandboxed access to the live filesystem.
9791 self._task_queues.merge.addFront(setup_phase)
9794 def _schedule_unpack(self, unpack_phase):
9796 Schedule an unpack phase on the unpack queue, in order
9797 to serialize $DISTDIR access for live ebuilds.
9799 self._task_queues.unpack.add(unpack_phase)
9801 def _find_blockers(self, new_pkg):
9803 Returns a callable which should be called only when
9804 the vdb lock has been acquired.
9807 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9810 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9811 if self._opts_ignore_blockers.intersection(self.myopts):
9814 # Call gc.collect() here to avoid heap overflow that
9815 # triggers 'Cannot allocate memory' errors (reported
9820 blocker_db = self._blocker_db[new_pkg.root]
9822 blocker_dblinks = []
9823 for blocking_pkg in blocker_db.findInstalledBlockers(
9824 new_pkg, acquire_lock=acquire_lock):
9825 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9827 if new_pkg.cpv == blocking_pkg.cpv:
9829 blocker_dblinks.append(portage.dblink(
9830 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9831 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9832 vartree=self.trees[blocking_pkg.root]["vartree"]))
9836 return blocker_dblinks
9838 def _dblink_pkg(self, pkg_dblink):
9839 cpv = pkg_dblink.mycpv
9840 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9841 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9842 installed = type_name == "installed"
9843 return self._pkg(cpv, type_name, root_config, installed=installed)
9845 def _append_to_log_path(self, log_path, msg):
9846 f = open(log_path, 'a')
9852 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9854 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9857 background = self._background
9859 if background and log_path is not None:
9860 log_file = open(log_path, 'a')
9865 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9867 if log_file is not None:
9870 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9871 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9872 background = self._background
9874 if log_path is None:
9875 if not (background and level < logging.WARN):
9876 portage.util.writemsg_level(msg,
9877 level=level, noiselevel=noiselevel)
9880 portage.util.writemsg_level(msg,
9881 level=level, noiselevel=noiselevel)
9882 self._append_to_log_path(log_path, msg)
9884 def _dblink_ebuild_phase(self,
9885 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9887 Using this callback for merge phases allows the scheduler
9888 to run while these phases execute asynchronously, and allows
9889 the scheduler control output handling.
9892 scheduler = self._sched_iface
9893 settings = pkg_dblink.settings
9894 pkg = self._dblink_pkg(pkg_dblink)
9895 background = self._background
9896 log_path = settings.get("PORTAGE_LOG_FILE")
9898 ebuild_phase = EbuildPhase(background=background,
9899 pkg=pkg, phase=phase, scheduler=scheduler,
9900 settings=settings, tree=pkg_dblink.treetype)
9901 ebuild_phase.start()
9904 return ebuild_phase.returncode
9906 def _check_manifests(self):
9907 # Verify all the manifests now so that the user is notified of failure
9908 # as soon as possible.
9909 if "strict" not in self.settings.features or \
9910 "--fetchonly" in self.myopts or \
9911 "--fetch-all-uri" in self.myopts:
9914 shown_verifying_msg = False
9916 for myroot, pkgsettings in self.pkgsettings.iteritems():
9917 quiet_config = portage.config(clone=pkgsettings)
9918 quiet_config["PORTAGE_QUIET"] = "1"
9919 quiet_config.backup_changes("PORTAGE_QUIET")
9920 quiet_settings[myroot] = quiet_config
9923 for x in self._mergelist:
9924 if not isinstance(x, Package) or \
9925 x.type_name != "ebuild":
9928 if not shown_verifying_msg:
9929 shown_verifying_msg = True
9930 self._status_msg("Verifying ebuild manifests")
9932 root_config = x.root_config
9933 portdb = root_config.trees["porttree"].dbapi
9934 quiet_config = quiet_settings[root_config.root]
9935 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9936 if not portage.digestcheck([], quiet_config, strict=True):
9941 def _add_prefetchers(self):
9943 if not self._parallel_fetch:
9946 if self._parallel_fetch:
9947 self._status_msg("Starting parallel fetch")
9949 prefetchers = self._prefetchers
9950 getbinpkg = "--getbinpkg" in self.myopts
9952 # In order to avoid "waiting for lock" messages
9953 # at the beginning, which annoy users, never
9954 # spawn a prefetcher for the first package.
9955 for pkg in self._mergelist[1:]:
9956 prefetcher = self._create_prefetcher(pkg)
9957 if prefetcher is not None:
9958 self._task_queues.fetch.add(prefetcher)
9959 prefetchers[pkg] = prefetcher
9961 def _create_prefetcher(self, pkg):
9963 @return: a prefetcher, or None if not applicable
9967 if not isinstance(pkg, Package):
9970 elif pkg.type_name == "ebuild":
9972 prefetcher = EbuildFetcher(background=True,
9973 config_pool=self._ConfigPool(pkg.root,
9974 self._allocate_config, self._deallocate_config),
9975 fetchonly=1, logfile=self._fetch_log,
9976 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9978 elif pkg.type_name == "binary" and \
9979 "--getbinpkg" in self.myopts and \
9980 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9982 prefetcher = BinpkgPrefetcher(background=True,
9983 pkg=pkg, scheduler=self._sched_iface)
9987 def _is_restart_scheduled(self):
9989 Check if the merge list contains a replacement
9990 for the current running instance, that will result
9991 in restart after merge.
9993 @returns: True if a restart is scheduled, False otherwise.
9995 if self._opts_no_restart.intersection(self.myopts):
9998 mergelist = self._mergelist
10000 for i, pkg in enumerate(mergelist):
10001 if self._is_restart_necessary(pkg) and \
10002 i != len(mergelist) - 1:
10007 def _is_restart_necessary(self, pkg):
10009 @return: True if merging the given package
10010 requires restart, False otherwise.
10013 # Figure out if we need a restart.
10014 if pkg.root == self._running_root.root and \
10015 portage.match_from_list(
10016 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10017 if self._running_portage:
10018 return pkg.cpv != self._running_portage.cpv
10022 def _restart_if_necessary(self, pkg):
10024 Use execv() to restart emerge. This happens
10025 if portage upgrades itself and there are
10026 remaining packages in the list.
10029 if self._opts_no_restart.intersection(self.myopts):
10032 if not self._is_restart_necessary(pkg):
10035 if pkg == self._mergelist[-1]:
10038 self._main_loop_cleanup()
10040 logger = self._logger
10041 pkg_count = self._pkg_count
10042 mtimedb = self._mtimedb
10043 bad_resume_opts = self._bad_resume_opts
10045 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10046 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10048 logger.log(" *** RESTARTING " + \
10049 "emerge via exec() after change of " + \
10050 "portage version.")
10052 mtimedb["resume"]["mergelist"].remove(list(pkg))
10054 portage.run_exitfuncs()
10055 mynewargv = [sys.argv[0], "--resume"]
10056 resume_opts = self.myopts.copy()
10057 # For automatic resume, we need to prevent
10058 # any of bad_resume_opts from leaking in
10059 # via EMERGE_DEFAULT_OPTS.
10060 resume_opts["--ignore-default-opts"] = True
10061 for myopt, myarg in resume_opts.iteritems():
10062 if myopt not in bad_resume_opts:
10064 mynewargv.append(myopt)
10066 mynewargv.append(myopt +"="+ str(myarg))
10067 # priority only needs to be adjusted on the first run
10068 os.environ["PORTAGE_NICENESS"] = "0"
10069 os.execv(mynewargv[0], mynewargv)
10073 if "--resume" in self.myopts:
10075 portage.writemsg_stdout(
10076 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10077 self._logger.log(" *** Resuming merge...")
10079 self._save_resume_list()
10082 self._background = self._background_mode()
10083 except self._unknown_internal_error:
10086 for root in self.trees:
10087 root_config = self.trees[root]["root_config"]
10089 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10090 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10091 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10092 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10093 if not tmpdir or not os.path.isdir(tmpdir):
10094 msg = "The directory specified in your " + \
10095 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10096 "does not exist. Please create this " + \
10097 "directory or correct your PORTAGE_TMPDIR setting."
10098 msg = textwrap.wrap(msg, 70)
10099 out = portage.output.EOutput()
10104 if self._background:
10105 root_config.settings.unlock()
10106 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10107 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10108 root_config.settings.lock()
10110 self.pkgsettings[root] = portage.config(
10111 clone=root_config.settings)
10113 rval = self._check_manifests()
10114 if rval != os.EX_OK:
10117 keep_going = "--keep-going" in self.myopts
10118 fetchonly = self._build_opts.fetchonly
10119 mtimedb = self._mtimedb
10120 failed_pkgs = self._failed_pkgs
10123 rval = self._merge()
10124 if rval == os.EX_OK or fetchonly or not keep_going:
10126 if "resume" not in mtimedb:
10128 mergelist = self._mtimedb["resume"].get("mergelist")
10132 if not failed_pkgs:
10135 for failed_pkg in failed_pkgs:
10136 mergelist.remove(list(failed_pkg.pkg))
10138 self._failed_pkgs_all.extend(failed_pkgs)
10144 if not self._calc_resume_list():
10147 clear_caches(self.trees)
10148 if not self._mergelist:
10151 self._save_resume_list()
10152 self._pkg_count.curval = 0
10153 self._pkg_count.maxval = len([x for x in self._mergelist \
10154 if isinstance(x, Package) and x.operation == "merge"])
10155 self._status_display.maxval = self._pkg_count.maxval
10157 self._logger.log(" *** Finished. Cleaning up...")
10160 self._failed_pkgs_all.extend(failed_pkgs)
10163 background = self._background
10164 failure_log_shown = False
10165 if background and len(self._failed_pkgs_all) == 1:
10166 # If only one package failed then just show it's
10167 # whole log for easy viewing.
10168 failed_pkg = self._failed_pkgs_all[-1]
10169 build_dir = failed_pkg.build_dir
10172 log_paths = [failed_pkg.build_log]
10174 log_path = self._locate_failure_log(failed_pkg)
10175 if log_path is not None:
10177 log_file = open(log_path, 'rb')
10181 if log_file is not None:
10183 for line in log_file:
10184 writemsg_level(line, noiselevel=-1)
10187 failure_log_shown = True
10189 # Dump mod_echo output now since it tends to flood the terminal.
10190 # This allows us to avoid having more important output, generated
10191 # later, from being swept away by the mod_echo output.
10192 mod_echo_output = _flush_elog_mod_echo()
10194 if background and not failure_log_shown and \
10195 self._failed_pkgs_all and \
10196 self._failed_pkgs_die_msgs and \
10197 not mod_echo_output:
10199 printer = portage.output.EOutput()
10200 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10202 if mysettings["ROOT"] != "/":
10203 root_msg = " merged to %s" % mysettings["ROOT"]
10205 printer.einfo("Error messages for package %s%s:" % \
10206 (colorize("INFORM", key), root_msg))
10208 for phase in portage.const.EBUILD_PHASES:
10209 if phase not in logentries:
10211 for msgtype, msgcontent in logentries[phase]:
10212 if isinstance(msgcontent, basestring):
10213 msgcontent = [msgcontent]
10214 for line in msgcontent:
10215 printer.eerror(line.strip("\n"))
10217 if self._post_mod_echo_msgs:
10218 for msg in self._post_mod_echo_msgs:
10221 if len(self._failed_pkgs_all) > 1:
10222 msg = "The following packages have " + \
10223 "failed to build or install:"
10224 prefix = bad(" * ")
10225 writemsg(prefix + "\n", noiselevel=-1)
10226 from textwrap import wrap
10227 for line in wrap(msg, 72):
10228 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10229 writemsg(prefix + "\n", noiselevel=-1)
10230 for failed_pkg in self._failed_pkgs_all:
10231 writemsg("%s\t%s\n" % (prefix,
10232 colorize("INFORM", str(failed_pkg.pkg))),
10234 writemsg(prefix + "\n", noiselevel=-1)
10238 def _elog_listener(self, mysettings, key, logentries, fulltext):
10239 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10241 self._failed_pkgs_die_msgs.append(
10242 (mysettings, key, errors))
10244 def _locate_failure_log(self, failed_pkg):
10246 build_dir = failed_pkg.build_dir
10249 log_paths = [failed_pkg.build_log]
10251 for log_path in log_paths:
10256 log_size = os.stat(log_path).st_size
10267 def _add_packages(self):
10268 pkg_queue = self._pkg_queue
10269 for pkg in self._mergelist:
10270 if isinstance(pkg, Package):
10271 pkg_queue.append(pkg)
10272 elif isinstance(pkg, Blocker):
10275 def _merge_exit(self, merge):
10276 self._do_merge_exit(merge)
10277 self._deallocate_config(merge.merge.settings)
10278 if merge.returncode == os.EX_OK and \
10279 not merge.merge.pkg.installed:
10280 self._status_display.curval += 1
10281 self._status_display.merges = len(self._task_queues.merge)
10284 def _do_merge_exit(self, merge):
10285 pkg = merge.merge.pkg
10286 if merge.returncode != os.EX_OK:
10287 settings = merge.merge.settings
10288 build_dir = settings.get("PORTAGE_BUILDDIR")
10289 build_log = settings.get("PORTAGE_LOG_FILE")
10291 self._failed_pkgs.append(self._failed_pkg(
10292 build_dir=build_dir, build_log=build_log,
10294 returncode=merge.returncode))
10295 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10297 self._status_display.failed = len(self._failed_pkgs)
10300 self._task_complete(pkg)
10301 pkg_to_replace = merge.merge.pkg_to_replace
10302 if pkg_to_replace is not None:
10303 # When a package is replaced, mark it's uninstall
10304 # task complete (if any).
10305 uninst_hash_key = \
10306 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10307 self._task_complete(uninst_hash_key)
10312 self._restart_if_necessary(pkg)
10314 # Call mtimedb.commit() after each merge so that
10315 # --resume still works after being interrupted
10316 # by reboot, sigkill or similar.
10317 mtimedb = self._mtimedb
10318 mtimedb["resume"]["mergelist"].remove(list(pkg))
10319 if not mtimedb["resume"]["mergelist"]:
10320 del mtimedb["resume"]
10323 def _build_exit(self, build):
10324 if build.returncode == os.EX_OK:
10326 merge = PackageMerge(merge=build)
10327 merge.addExitListener(self._merge_exit)
10328 self._task_queues.merge.add(merge)
10329 self._status_display.merges = len(self._task_queues.merge)
10331 settings = build.settings
10332 build_dir = settings.get("PORTAGE_BUILDDIR")
10333 build_log = settings.get("PORTAGE_LOG_FILE")
10335 self._failed_pkgs.append(self._failed_pkg(
10336 build_dir=build_dir, build_log=build_log,
10338 returncode=build.returncode))
10339 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10341 self._status_display.failed = len(self._failed_pkgs)
10342 self._deallocate_config(build.settings)
10344 self._status_display.running = self._jobs
10347 def _extract_exit(self, build):
10348 self._build_exit(build)
10350 def _task_complete(self, pkg):
10351 self._completed_tasks.add(pkg)
10352 self._choose_pkg_return_early = False
10356 self._add_prefetchers()
10357 self._add_packages()
10358 pkg_queue = self._pkg_queue
10359 failed_pkgs = self._failed_pkgs
10360 portage.locks._quiet = self._background
10361 portage.elog._emerge_elog_listener = self._elog_listener
10367 self._main_loop_cleanup()
10368 portage.locks._quiet = False
10369 portage.elog._emerge_elog_listener = None
10371 rval = failed_pkgs[-1].returncode
10375 def _main_loop_cleanup(self):
10376 del self._pkg_queue[:]
10377 self._completed_tasks.clear()
10378 self._choose_pkg_return_early = False
10379 self._status_display.reset()
10380 self._digraph = None
10381 self._task_queues.fetch.clear()
10383 def _choose_pkg(self):
10385 Choose a task that has all it's dependencies satisfied.
10388 if self._choose_pkg_return_early:
10391 if self._digraph is None:
10392 if (self._jobs or self._task_queues.merge) and \
10393 not ("--nodeps" in self.myopts and \
10394 (self._max_jobs is True or self._max_jobs > 1)):
10395 self._choose_pkg_return_early = True
10397 return self._pkg_queue.pop(0)
10399 if not (self._jobs or self._task_queues.merge):
10400 return self._pkg_queue.pop(0)
10402 self._prune_digraph()
10405 later = set(self._pkg_queue)
10406 for pkg in self._pkg_queue:
10408 if not self._dependent_on_scheduled_merges(pkg, later):
10412 if chosen_pkg is not None:
10413 self._pkg_queue.remove(chosen_pkg)
10415 if chosen_pkg is None:
10416 # There's no point in searching for a package to
10417 # choose until at least one of the existing jobs
10419 self._choose_pkg_return_early = True
10423 def _dependent_on_scheduled_merges(self, pkg, later):
10425 Traverse the subgraph of the given packages deep dependencies
10426 to see if it contains any scheduled merges.
10427 @param pkg: a package to check dependencies for
10429 @param later: packages for which dependence should be ignored
10430 since they will be merged later than pkg anyway and therefore
10431 delaying the merge of pkg will not result in a more optimal
10435 @returns: True if the package is dependent, False otherwise.
10438 graph = self._digraph
10439 completed_tasks = self._completed_tasks
10442 traversed_nodes = set([pkg])
10443 direct_deps = graph.child_nodes(pkg)
10444 node_stack = direct_deps
10445 direct_deps = frozenset(direct_deps)
10447 node = node_stack.pop()
10448 if node in traversed_nodes:
10450 traversed_nodes.add(node)
10451 if not ((node.installed and node.operation == "nomerge") or \
10452 (node.operation == "uninstall" and \
10453 node not in direct_deps) or \
10454 node in completed_tasks or \
10458 node_stack.extend(graph.child_nodes(node))
10462 def _allocate_config(self, root):
10464 Allocate a unique config instance for a task in order
10465 to prevent interference between parallel tasks.
10467 if self._config_pool[root]:
10468 temp_settings = self._config_pool[root].pop()
10470 temp_settings = portage.config(clone=self.pkgsettings[root])
10471 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10472 # performance reasons, call it here to make sure all settings from the
10473 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10474 temp_settings.reload()
10475 temp_settings.reset()
10476 return temp_settings
10478 def _deallocate_config(self, settings):
10479 self._config_pool[settings["ROOT"]].append(settings)
10481 def _main_loop(self):
10483 # Only allow 1 job max if a restart is scheduled
10484 # due to portage update.
10485 if self._is_restart_scheduled() or \
10486 self._opts_no_background.intersection(self.myopts):
10487 self._set_max_jobs(1)
10489 merge_queue = self._task_queues.merge
10491 while self._schedule():
10492 if self._poll_event_handlers:
10497 if not (self._jobs or merge_queue):
10499 if self._poll_event_handlers:
10502 def _keep_scheduling(self):
10503 return bool(self._pkg_queue and \
10504 not (self._failed_pkgs and not self._build_opts.fetchonly))
10506 def _schedule_tasks(self):
10507 self._schedule_tasks_imp()
10508 self._status_display.display()
10511 for q in self._task_queues.values():
10515 # Cancel prefetchers if they're the only reason
10516 # the main poll loop is still running.
10517 if self._failed_pkgs and not self._build_opts.fetchonly and \
10518 not (self._jobs or self._task_queues.merge) and \
10519 self._task_queues.fetch:
10520 self._task_queues.fetch.clear()
10524 self._schedule_tasks_imp()
10525 self._status_display.display()
10527 return self._keep_scheduling()
10529 def _job_delay(self):
10532 @returns: True if job scheduling should be delayed, False otherwise.
10535 if self._jobs and self._max_load is not None:
10537 current_time = time.time()
10539 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10540 if delay > self._job_delay_max:
10541 delay = self._job_delay_max
10542 if (current_time - self._previous_job_start_time) < delay:
10547 def _schedule_tasks_imp(self):
10550 @returns: True if state changed, False otherwise.
10557 if not self._keep_scheduling():
10558 return bool(state_change)
10560 if self._choose_pkg_return_early or \
10561 not self._can_add_job() or \
10563 return bool(state_change)
10565 pkg = self._choose_pkg()
10567 return bool(state_change)
10571 if not pkg.installed:
10572 self._pkg_count.curval += 1
10574 task = self._task(pkg)
10577 merge = PackageMerge(merge=task)
10578 merge.addExitListener(self._merge_exit)
10579 self._task_queues.merge.add(merge)
10583 self._previous_job_start_time = time.time()
10584 self._status_display.running = self._jobs
10585 task.addExitListener(self._extract_exit)
10586 self._task_queues.jobs.add(task)
10590 self._previous_job_start_time = time.time()
10591 self._status_display.running = self._jobs
10592 task.addExitListener(self._build_exit)
10593 self._task_queues.jobs.add(task)
10595 return bool(state_change)
10597 def _task(self, pkg):
10599 pkg_to_replace = None
10600 if pkg.operation != "uninstall":
10601 vardb = pkg.root_config.trees["vartree"].dbapi
10602 previous_cpv = vardb.match(pkg.slot_atom)
10604 previous_cpv = previous_cpv.pop()
10605 pkg_to_replace = self._pkg(previous_cpv,
10606 "installed", pkg.root_config, installed=True)
10608 task = MergeListItem(args_set=self._args_set,
10609 background=self._background, binpkg_opts=self._binpkg_opts,
10610 build_opts=self._build_opts,
10611 config_pool=self._ConfigPool(pkg.root,
10612 self._allocate_config, self._deallocate_config),
10613 emerge_opts=self.myopts,
10614 find_blockers=self._find_blockers(pkg), logger=self._logger,
10615 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10616 pkg_to_replace=pkg_to_replace,
10617 prefetcher=self._prefetchers.get(pkg),
10618 scheduler=self._sched_iface,
10619 settings=self._allocate_config(pkg.root),
10620 statusMessage=self._status_msg,
10621 world_atom=self._world_atom)
10625 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10626 pkg = failed_pkg.pkg
10627 msg = "%s to %s %s" % \
10628 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10629 if pkg.root != "/":
10630 msg += " %s %s" % (preposition, pkg.root)
10632 log_path = self._locate_failure_log(failed_pkg)
10633 if log_path is not None:
10634 msg += ", Log file:"
10635 self._status_msg(msg)
10637 if log_path is not None:
10638 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10640 def _status_msg(self, msg):
10642 Display a brief status message (no newlines) in the status display.
10643 This is called by tasks to provide feedback to the user. This
10644 delegates the resposibility of generating \r and \n control characters,
10645 to guarantee that lines are created or erased when necessary and
10649 @param msg: a brief status message (no newlines allowed)
10651 if not self._background:
10652 writemsg_level("\n")
10653 self._status_display.displayMessage(msg)
10655 def _save_resume_list(self):
10657 Do this before verifying the ebuild Manifests since it might
10658 be possible for the user to use --resume --skipfirst get past
10659 a non-essential package with a broken digest.
10661 mtimedb = self._mtimedb
10662 mtimedb["resume"]["mergelist"] = [list(x) \
10663 for x in self._mergelist \
10664 if isinstance(x, Package) and x.operation == "merge"]
10668 def _calc_resume_list(self):
10670 Use the current resume list to calculate a new one,
10671 dropping any packages with unsatisfied deps.
10673 @returns: True if successful, False otherwise.
10675 print colorize("GOOD", "*** Resuming merge...")
10677 if self._show_list():
10678 if "--tree" in self.myopts:
10679 portage.writemsg_stdout("\n" + \
10680 darkgreen("These are the packages that " + \
10681 "would be merged, in reverse order:\n\n"))
10684 portage.writemsg_stdout("\n" + \
10685 darkgreen("These are the packages that " + \
10686 "would be merged, in order:\n\n"))
10688 show_spinner = "--quiet" not in self.myopts and \
10689 "--nodeps" not in self.myopts
10692 print "Calculating dependencies ",
10694 myparams = create_depgraph_params(self.myopts, None)
10698 success, mydepgraph, dropped_tasks = resume_depgraph(
10699 self.settings, self.trees, self._mtimedb, self.myopts,
10700 myparams, self._spinner, skip_unsatisfied=True)
10701 except depgraph.UnsatisfiedResumeDep, e:
10702 mydepgraph = e.depgraph
10703 dropped_tasks = set()
10706 print "\b\b... done!"
10709 def unsatisfied_resume_dep_msg():
10710 mydepgraph.display_problems()
10711 out = portage.output.EOutput()
10712 out.eerror("One or more packages are either masked or " + \
10713 "have missing dependencies:")
10716 show_parents = set()
10717 for dep in e.value:
10718 if dep.parent in show_parents:
10720 show_parents.add(dep.parent)
10721 if dep.atom is None:
10722 out.eerror(indent + "Masked package:")
10723 out.eerror(2 * indent + str(dep.parent))
10726 out.eerror(indent + str(dep.atom) + " pulled in by:")
10727 out.eerror(2 * indent + str(dep.parent))
10729 msg = "The resume list contains packages " + \
10730 "that are either masked or have " + \
10731 "unsatisfied dependencies. " + \
10732 "Please restart/continue " + \
10733 "the operation manually, or use --skipfirst " + \
10734 "to skip the first package in the list and " + \
10735 "any other packages that may be " + \
10736 "masked or have missing dependencies."
10737 for line in textwrap.wrap(msg, 72):
10739 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10742 if success and self._show_list():
10743 mylist = mydepgraph.altlist()
10745 if "--tree" in self.myopts:
10747 mydepgraph.display(mylist, favorites=self._favorites)
10750 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10752 mydepgraph.display_problems()
10754 mylist = mydepgraph.altlist()
10755 mydepgraph.break_refs(mylist)
10756 mydepgraph.break_refs(dropped_tasks)
10757 self._mergelist = mylist
10758 self._set_digraph(mydepgraph.schedulerGraph())
10761 for task in dropped_tasks:
10762 if not (isinstance(task, Package) and task.operation == "merge"):
10765 msg = "emerge --keep-going:" + \
10767 if pkg.root != "/":
10768 msg += " for %s" % (pkg.root,)
10769 msg += " dropped due to unsatisfied dependency."
10770 for line in textwrap.wrap(msg, msg_width):
10771 eerror(line, phase="other", key=pkg.cpv)
10772 settings = self.pkgsettings[pkg.root]
10773 # Ensure that log collection from $T is disabled inside
10774 # elog_process(), since any logs that might exist are
10776 settings.pop("T", None)
10777 portage.elog.elog_process(pkg.cpv, settings)
10778 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10782 def _show_list(self):
10783 myopts = self.myopts
10784 if "--quiet" not in myopts and \
10785 ("--ask" in myopts or "--tree" in myopts or \
10786 "--verbose" in myopts):
10790 def _world_atom(self, pkg):
10792 Add the package to the world file, but only if
10793 it's supposed to be added. Otherwise, do nothing.
10796 if set(("--buildpkgonly", "--fetchonly",
10798 "--oneshot", "--onlydeps",
10799 "--pretend")).intersection(self.myopts):
10802 if pkg.root != self.target_root:
10805 args_set = self._args_set
10806 if not args_set.findAtomForPackage(pkg):
10809 logger = self._logger
10810 pkg_count = self._pkg_count
10811 root_config = pkg.root_config
10812 world_set = root_config.sets["world"]
10813 world_locked = False
10814 if hasattr(world_set, "lock"):
10816 world_locked = True
10819 if hasattr(world_set, "load"):
10820 world_set.load() # maybe it's changed on disk
10822 atom = create_world_atom(pkg, args_set, root_config)
10824 if hasattr(world_set, "add"):
10825 self._status_msg(('Recording %s in "world" ' + \
10826 'favorites file...') % atom)
10827 logger.log(" === (%s of %s) Updating world file (%s)" % \
10828 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10829 world_set.add(atom)
10831 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10832 (atom,), level=logging.WARN, noiselevel=-1)
10837 def _pkg(self, cpv, type_name, root_config, installed=False):
10839 Get a package instance from the cache, or create a new
10840 one if necessary. Raises KeyError from aux_get if it
10841 failures for some reason (package does not exist or is
10844 operation = "merge"
10846 operation = "nomerge"
10848 if self._digraph is not None:
10849 # Reuse existing instance when available.
10850 pkg = self._digraph.get(
10851 (type_name, root_config.root, cpv, operation))
10852 if pkg is not None:
10855 tree_type = depgraph.pkg_tree_map[type_name]
10856 db = root_config.trees[tree_type].dbapi
10857 db_keys = list(self.trees[root_config.root][
10858 tree_type].dbapi._aux_cache_keys)
10859 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10860 pkg = Package(cpv=cpv, metadata=metadata,
10861 root_config=root_config, installed=installed)
10862 if type_name == "ebuild":
10863 settings = self.pkgsettings[root_config.root]
10864 settings.setcpv(pkg)
10865 pkg.metadata["USE"] = settings["PORTAGE_USE"]
10869 class MetadataRegen(PollScheduler):
10871 def __init__(self, portdb, max_jobs=None, max_load=None):
10872 PollScheduler.__init__(self)
10873 self._portdb = portdb
10875 if max_jobs is None:
10878 self._max_jobs = max_jobs
10879 self._max_load = max_load
10880 self._sched_iface = self._sched_iface_class(
10881 register=self._register,
10882 schedule=self._schedule_wait,
10883 unregister=self._unregister)
10885 self._valid_pkgs = set()
10886 self._process_iter = self._iter_metadata_processes()
10888 def _iter_metadata_processes(self):
10889 portdb = self._portdb
10890 valid_pkgs = self._valid_pkgs
10891 every_cp = portdb.cp_all()
10892 every_cp.sort(reverse=True)
10895 cp = every_cp.pop()
10896 portage.writemsg_stdout("Processing %s\n" % cp)
10897 cpv_list = portdb.cp_list(cp)
10898 for cpv in cpv_list:
10899 valid_pkgs.add(cpv)
10900 ebuild_path, repo_path = portdb.findname2(cpv)
10901 metadata_process = portdb._metadata_process(
10902 cpv, ebuild_path, repo_path)
10903 if metadata_process is None:
10905 yield metadata_process
10909 portdb = self._portdb
10910 from portage.cache.cache_errors import CacheError
10913 for mytree in portdb.porttrees:
10915 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10916 except CacheError, e:
10917 portage.writemsg("Error listing cache entries for " + \
10918 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10923 while self._schedule():
10930 for y in self._valid_pkgs:
10931 for mytree in portdb.porttrees:
10932 if portdb.findname2(y, mytree=mytree)[0]:
10933 dead_nodes[mytree].discard(y)
10935 for mytree, nodes in dead_nodes.iteritems():
10936 auxdb = portdb.auxdb[mytree]
10940 except (KeyError, CacheError):
10943 def _schedule_tasks(self):
10946 @returns: True if there may be remaining tasks to schedule,
10949 while self._can_add_job():
10951 metadata_process = self._process_iter.next()
10952 except StopIteration:
10956 metadata_process.scheduler = self._sched_iface
10957 metadata_process.addExitListener(self._metadata_exit)
10958 metadata_process.start()
10961 def _metadata_exit(self, metadata_process):
10963 if metadata_process.returncode != os.EX_OK:
10964 self._valid_pkgs.discard(metadata_process.cpv)
10965 portage.writemsg("Error processing %s, continuing...\n" % \
10966 (metadata_process.cpv,))
10969 class UninstallFailure(portage.exception.PortageException):
10971 An instance of this class is raised by unmerge() when
10972 an uninstallation fails.
10975 def __init__(self, *pargs):
10976 portage.exception.PortageException.__init__(self, pargs)
10978 self.status = pargs[0]
10980 def unmerge(root_config, myopts, unmerge_action,
10981 unmerge_files, ldpath_mtimes, autoclean=0,
10982 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10983 scheduler=None, writemsg_level=portage.util.writemsg_level):
10985 quiet = "--quiet" in myopts
10986 settings = root_config.settings
10987 sets = root_config.sets
10988 vartree = root_config.trees["vartree"]
10989 candidate_catpkgs=[]
10991 xterm_titles = "notitles" not in settings.features
10992 out = portage.output.EOutput()
10994 db_keys = list(vartree.dbapi._aux_cache_keys)
10997 pkg = pkg_cache.get(cpv)
10999 pkg = Package(cpv=cpv, installed=True,
11000 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11001 root_config=root_config,
11002 type_name="installed")
11003 pkg_cache[cpv] = pkg
11006 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11008 # At least the parent needs to exist for the lock file.
11009 portage.util.ensure_dirs(vdb_path)
11010 except portage.exception.PortageException:
11014 if os.access(vdb_path, os.W_OK):
11015 vdb_lock = portage.locks.lockdir(vdb_path)
11016 realsyslist = sets["system"].getAtoms()
11018 for x in realsyslist:
11019 mycp = portage.dep_getkey(x)
11020 if mycp in settings.getvirtuals():
11022 for provider in settings.getvirtuals()[mycp]:
11023 if vartree.dbapi.match(provider):
11024 providers.append(provider)
11025 if len(providers) == 1:
11026 syslist.extend(providers)
11028 syslist.append(mycp)
11030 mysettings = portage.config(clone=settings)
11032 if not unmerge_files:
11033 if unmerge_action == "unmerge":
11035 print bold("emerge unmerge") + " can only be used with specific package names"
11041 localtree = vartree
11042 # process all arguments and add all
11043 # valid db entries to candidate_catpkgs
11045 if not unmerge_files:
11046 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11048 #we've got command-line arguments
11049 if not unmerge_files:
11050 print "\nNo packages to unmerge have been provided.\n"
11052 for x in unmerge_files:
11053 arg_parts = x.split('/')
11054 if x[0] not in [".","/"] and \
11055 arg_parts[-1][-7:] != ".ebuild":
11056 #possible cat/pkg or dep; treat as such
11057 candidate_catpkgs.append(x)
11058 elif unmerge_action in ["prune","clean"]:
11059 print "\n!!! Prune and clean do not accept individual" + \
11060 " ebuilds as arguments;\n skipping.\n"
11063 # it appears that the user is specifying an installed
11064 # ebuild and we're in "unmerge" mode, so it's ok.
11065 if not os.path.exists(x):
11066 print "\n!!! The path '"+x+"' doesn't exist.\n"
11069 absx = os.path.abspath(x)
11070 sp_absx = absx.split("/")
11071 if sp_absx[-1][-7:] == ".ebuild":
11073 absx = "/".join(sp_absx)
11075 sp_absx_len = len(sp_absx)
11077 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11078 vdb_len = len(vdb_path)
11080 sp_vdb = vdb_path.split("/")
11081 sp_vdb_len = len(sp_vdb)
11083 if not os.path.exists(absx+"/CONTENTS"):
11084 print "!!! Not a valid db dir: "+str(absx)
11087 if sp_absx_len <= sp_vdb_len:
11088 # The Path is shorter... so it can't be inside the vdb.
11091 print "\n!!!",x,"cannot be inside "+ \
11092 vdb_path+"; aborting.\n"
11095 for idx in range(0,sp_vdb_len):
11096 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11099 print "\n!!!", x, "is not inside "+\
11100 vdb_path+"; aborting.\n"
11103 print "="+"/".join(sp_absx[sp_vdb_len:])
11104 candidate_catpkgs.append(
11105 "="+"/".join(sp_absx[sp_vdb_len:]))
11108 if (not "--quiet" in myopts):
11110 if settings["ROOT"] != "/":
11111 writemsg_level(darkgreen(newline+ \
11112 ">>> Using system located in ROOT tree %s\n" % \
11115 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11116 not ("--quiet" in myopts):
11117 writemsg_level(darkgreen(newline+\
11118 ">>> These are the packages that would be unmerged:\n"))
11120 # Preservation of order is required for --depclean and --prune so
11121 # that dependencies are respected. Use all_selected to eliminate
11122 # duplicate packages since the same package may be selected by
11125 all_selected = set()
11126 for x in candidate_catpkgs:
11127 # cycle through all our candidate deps and determine
11128 # what will and will not get unmerged
11130 mymatch = vartree.dbapi.match(x)
11131 except portage.exception.AmbiguousPackageName, errpkgs:
11132 print "\n\n!!! The short ebuild name \"" + \
11133 x + "\" is ambiguous. Please specify"
11134 print "!!! one of the following fully-qualified " + \
11135 "ebuild names instead:\n"
11136 for i in errpkgs[0]:
11137 print " " + green(i)
11141 if not mymatch and x[0] not in "<>=~":
11142 mymatch = localtree.dep_match(x)
11144 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11145 (x, unmerge_action), noiselevel=-1)
11149 {"protected": set(), "selected": set(), "omitted": set()})
11150 mykey = len(pkgmap) - 1
11151 if unmerge_action=="unmerge":
11153 if y not in all_selected:
11154 pkgmap[mykey]["selected"].add(y)
11155 all_selected.add(y)
11156 elif unmerge_action == "prune":
11157 if len(mymatch) == 1:
11159 best_version = mymatch[0]
11160 best_slot = vartree.getslot(best_version)
11161 best_counter = vartree.dbapi.cpv_counter(best_version)
11162 for mypkg in mymatch[1:]:
11163 myslot = vartree.getslot(mypkg)
11164 mycounter = vartree.dbapi.cpv_counter(mypkg)
11165 if (myslot == best_slot and mycounter > best_counter) or \
11166 mypkg == portage.best([mypkg, best_version]):
11167 if myslot == best_slot:
11168 if mycounter < best_counter:
11169 # On slot collision, keep the one with the
11170 # highest counter since it is the most
11171 # recently installed.
11173 best_version = mypkg
11175 best_counter = mycounter
11176 pkgmap[mykey]["protected"].add(best_version)
11177 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11178 if mypkg != best_version and mypkg not in all_selected)
11179 all_selected.update(pkgmap[mykey]["selected"])
11181 # unmerge_action == "clean"
11183 for mypkg in mymatch:
11184 if unmerge_action == "clean":
11185 myslot = localtree.getslot(mypkg)
11187 # since we're pruning, we don't care about slots
11188 # and put all the pkgs in together
11190 if myslot not in slotmap:
11191 slotmap[myslot] = {}
11192 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11194 for mypkg in vartree.dbapi.cp_list(
11195 portage.dep_getkey(mymatch[0])):
11196 myslot = vartree.getslot(mypkg)
11197 if myslot not in slotmap:
11198 slotmap[myslot] = {}
11199 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11201 for myslot in slotmap:
11202 counterkeys = slotmap[myslot].keys()
11203 if not counterkeys:
11206 pkgmap[mykey]["protected"].add(
11207 slotmap[myslot][counterkeys[-1]])
11208 del counterkeys[-1]
11210 for counter in counterkeys[:]:
11211 mypkg = slotmap[myslot][counter]
11212 if mypkg not in mymatch:
11213 counterkeys.remove(counter)
11214 pkgmap[mykey]["protected"].add(
11215 slotmap[myslot][counter])
11217 #be pretty and get them in order of merge:
11218 for ckey in counterkeys:
11219 mypkg = slotmap[myslot][ckey]
11220 if mypkg not in all_selected:
11221 pkgmap[mykey]["selected"].add(mypkg)
11222 all_selected.add(mypkg)
11223 # ok, now the last-merged package
11224 # is protected, and the rest are selected
11225 numselected = len(all_selected)
11226 if global_unmerge and not numselected:
11227 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11230 if not numselected:
11231 portage.writemsg_stdout(
11232 "\n>>> No packages selected for removal by " + \
11233 unmerge_action + "\n")
11237 vartree.dbapi.flush_cache()
11238 portage.locks.unlockdir(vdb_lock)
11240 from portage.sets.base import EditablePackageSet
11242 # generate a list of package sets that are directly or indirectly listed in "world",
11243 # as there is no persistent list of "installed" sets
11244 installed_sets = ["world"]
11249 pos = len(installed_sets)
11250 for s in installed_sets[pos - 1:]:
11253 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11256 installed_sets += candidates
11257 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11260 # we don't want to unmerge packages that are still listed in user-editable package sets
11261 # listed in "world" as they would be remerged on the next update of "world" or the
11262 # relevant package sets.
11263 unknown_sets = set()
11264 for cp in xrange(len(pkgmap)):
11265 for cpv in pkgmap[cp]["selected"].copy():
11269 # It could have been uninstalled
11270 # by a concurrent process.
11273 if unmerge_action != "clean" and \
11274 root_config.root == "/" and \
11275 portage.match_from_list(
11276 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11277 msg = ("Not unmerging package %s since there is no valid " + \
11278 "reason for portage to unmerge itself.") % (pkg.cpv,)
11279 for line in textwrap.wrap(msg, 75):
11281 # adjust pkgmap so the display output is correct
11282 pkgmap[cp]["selected"].remove(cpv)
11283 all_selected.remove(cpv)
11284 pkgmap[cp]["protected"].add(cpv)
11288 for s in installed_sets:
11289 # skip sets that the user requested to unmerge, and skip world
11290 # unless we're unmerging a package set (as the package would be
11291 # removed from "world" later on)
11292 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11296 if s in unknown_sets:
11298 unknown_sets.add(s)
11299 out = portage.output.EOutput()
11300 out.eerror(("Unknown set '@%s' in " + \
11301 "%svar/lib/portage/world_sets") % \
11302 (s, root_config.root))
11305 # only check instances of EditablePackageSet as other classes are generally used for
11306 # special purposes and can be ignored here (and are usually generated dynamically, so the
11307 # user can't do much about them anyway)
11308 if isinstance(sets[s], EditablePackageSet):
11310 # This is derived from a snippet of code in the
11311 # depgraph._iter_atoms_for_pkg() method.
11312 for atom in sets[s].iterAtomsForPackage(pkg):
11313 inst_matches = vartree.dbapi.match(atom)
11314 inst_matches.reverse() # descending order
11316 for inst_cpv in inst_matches:
11318 inst_pkg = _pkg(inst_cpv)
11320 # It could have been uninstalled
11321 # by a concurrent process.
11324 if inst_pkg.cp != atom.cp:
11326 if pkg >= inst_pkg:
11327 # This is descending order, and we're not
11328 # interested in any versions <= pkg given.
11330 if pkg.slot_atom != inst_pkg.slot_atom:
11331 higher_slot = inst_pkg
11333 if higher_slot is None:
11337 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11338 #print colorize("WARN", "but still listed in the following package sets:")
11339 #print " %s\n" % ", ".join(parents)
11340 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11341 print colorize("WARN", "still referenced by the following package sets:")
11342 print " %s\n" % ", ".join(parents)
11343 # adjust pkgmap so the display output is correct
11344 pkgmap[cp]["selected"].remove(cpv)
11345 all_selected.remove(cpv)
11346 pkgmap[cp]["protected"].add(cpv)
11350 numselected = len(all_selected)
11351 if not numselected:
11353 "\n>>> No packages selected for removal by " + \
11354 unmerge_action + "\n")
11357 # Unmerge order only matters in some cases
11361 selected = d["selected"]
11364 cp = portage.cpv_getkey(iter(selected).next())
11365 cp_dict = unordered.get(cp)
11366 if cp_dict is None:
11368 unordered[cp] = cp_dict
11371 for k, v in d.iteritems():
11372 cp_dict[k].update(v)
11373 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11375 for x in xrange(len(pkgmap)):
11376 selected = pkgmap[x]["selected"]
11379 for mytype, mylist in pkgmap[x].iteritems():
11380 if mytype == "selected":
11382 mylist.difference_update(all_selected)
11383 cp = portage.cpv_getkey(iter(selected).next())
11384 for y in localtree.dep_match(cp):
11385 if y not in pkgmap[x]["omitted"] and \
11386 y not in pkgmap[x]["selected"] and \
11387 y not in pkgmap[x]["protected"] and \
11388 y not in all_selected:
11389 pkgmap[x]["omitted"].add(y)
11390 if global_unmerge and not pkgmap[x]["selected"]:
11391 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11393 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11394 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11395 "'%s' is part of your system profile.\n" % cp),
11396 level=logging.WARNING, noiselevel=-1)
11397 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11398 "be damaging to your system.\n\n"),
11399 level=logging.WARNING, noiselevel=-1)
11400 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11401 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11402 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11404 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11406 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11407 for mytype in ["selected","protected","omitted"]:
11409 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11410 if pkgmap[x][mytype]:
11411 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11412 sorted_pkgs.sort(portage.pkgcmp)
11413 for pn, ver, rev in sorted_pkgs:
11417 myversion = ver + "-" + rev
11418 if mytype == "selected":
11420 colorize("UNMERGE_WARN", myversion + " "),
11424 colorize("GOOD", myversion + " "), noiselevel=-1)
11426 writemsg_level("none ", noiselevel=-1)
11428 writemsg_level("\n", noiselevel=-1)
11430 writemsg_level("\n", noiselevel=-1)
11432 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11433 " packages are slated for removal.\n")
11434 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11435 " and " + colorize("GOOD", "'omitted'") + \
11436 " packages will not be removed.\n\n")
11438 if "--pretend" in myopts:
11439 #we're done... return
11441 if "--ask" in myopts:
11442 if userquery("Would you like to unmerge these packages?")=="No":
11443 # enter pretend mode for correct formatting of results
11444 myopts["--pretend"] = True
11449 #the real unmerging begins, after a short delay....
11450 if clean_delay and not autoclean:
11451 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11453 for x in xrange(len(pkgmap)):
11454 for y in pkgmap[x]["selected"]:
11455 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11456 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11457 mysplit = y.split("/")
11459 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11460 mysettings, unmerge_action not in ["clean","prune"],
11461 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11462 scheduler=scheduler)
11464 if retval != os.EX_OK:
11465 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11467 raise UninstallFailure(retval)
11470 if clean_world and hasattr(sets["world"], "cleanPackage"):
11471 sets["world"].cleanPackage(vartree.dbapi, y)
11472 emergelog(xterm_titles, " >>> unmerge success: "+y)
11473 if clean_world and hasattr(sets["world"], "remove"):
11474 for s in root_config.setconfig.active:
11475 sets["world"].remove(SETPREFIX+s)
11478 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11480 if os.path.exists("/usr/bin/install-info"):
11481 out = portage.output.EOutput()
11486 inforoot=normpath(root+z)
11487 if os.path.isdir(inforoot):
11488 infomtime = long(os.stat(inforoot).st_mtime)
11489 if inforoot not in prev_mtimes or \
11490 prev_mtimes[inforoot] != infomtime:
11491 regen_infodirs.append(inforoot)
11493 if not regen_infodirs:
11494 portage.writemsg_stdout("\n")
11495 out.einfo("GNU info directory index is up-to-date.")
11497 portage.writemsg_stdout("\n")
11498 out.einfo("Regenerating GNU info directory index...")
11500 dir_extensions = ("", ".gz", ".bz2")
11504 for inforoot in regen_infodirs:
11508 if not os.path.isdir(inforoot) or \
11509 not os.access(inforoot, os.W_OK):
11512 file_list = os.listdir(inforoot)
11514 dir_file = os.path.join(inforoot, "dir")
11515 moved_old_dir = False
11516 processed_count = 0
11517 for x in file_list:
11518 if x.startswith(".") or \
11519 os.path.isdir(os.path.join(inforoot, x)):
11521 if x.startswith("dir"):
11523 for ext in dir_extensions:
11524 if x == "dir" + ext or \
11525 x == "dir" + ext + ".old":
11530 if processed_count == 0:
11531 for ext in dir_extensions:
11533 os.rename(dir_file + ext, dir_file + ext + ".old")
11534 moved_old_dir = True
11535 except EnvironmentError, e:
11536 if e.errno != errno.ENOENT:
11539 processed_count += 1
11540 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11541 existsstr="already exists, for file `"
11543 if re.search(existsstr,myso):
11544 # Already exists... Don't increment the count for this.
11546 elif myso[:44]=="install-info: warning: no info dir entry in ":
11547 # This info file doesn't contain a DIR-header: install-info produces this
11548 # (harmless) warning (the --quiet switch doesn't seem to work).
11549 # Don't increment the count for this.
11552 badcount=badcount+1
11553 errmsg += myso + "\n"
11556 if moved_old_dir and not os.path.exists(dir_file):
11557 # We didn't generate a new dir file, so put the old file
11558 # back where it was originally found.
11559 for ext in dir_extensions:
11561 os.rename(dir_file + ext + ".old", dir_file + ext)
11562 except EnvironmentError, e:
11563 if e.errno != errno.ENOENT:
11567 # Clean dir.old cruft so that they don't prevent
11568 # unmerge of otherwise empty directories.
11569 for ext in dir_extensions:
11571 os.unlink(dir_file + ext + ".old")
11572 except EnvironmentError, e:
11573 if e.errno != errno.ENOENT:
11577 #update mtime so we can potentially avoid regenerating.
11578 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11581 out.eerror("Processed %d info files; %d errors." % \
11582 (icount, badcount))
11583 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11586 out.einfo("Processed %d info files." % (icount,))
11589 def display_news_notification(root_config, myopts):
11590 target_root = root_config.root
11591 trees = root_config.trees
11592 settings = trees["vartree"].settings
11593 portdb = trees["porttree"].dbapi
11594 vardb = trees["vartree"].dbapi
11595 NEWS_PATH = os.path.join("metadata", "news")
11596 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11597 newsReaderDisplay = False
11598 update = "--pretend" not in myopts
11600 for repo in portdb.getRepositories():
11601 unreadItems = checkUpdatedNewsItems(
11602 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11604 if not newsReaderDisplay:
11605 newsReaderDisplay = True
11607 print colorize("WARN", " * IMPORTANT:"),
11608 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11611 if newsReaderDisplay:
11612 print colorize("WARN", " *"),
11613 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11616 def display_preserved_libs(vardbapi):
11619 # Ensure the registry is consistent with existing files.
11620 vardbapi.plib_registry.pruneNonExisting()
11622 if vardbapi.plib_registry.hasEntries():
11624 print colorize("WARN", "!!!") + " existing preserved libs:"
11625 plibdata = vardbapi.plib_registry.getPreservedLibs()
11626 linkmap = vardbapi.linkmap
11629 linkmap_broken = False
11633 except portage.exception.CommandNotFound, e:
11634 writemsg_level("!!! Command Not Found: %s\n" % (e,),
11635 level=logging.ERROR, noiselevel=-1)
11637 linkmap_broken = True
11639 search_for_owners = set()
11640 for cpv in plibdata:
11641 internal_plib_keys = set(linkmap._obj_key(f) \
11642 for f in plibdata[cpv])
11643 for f in plibdata[cpv]:
11644 if f in consumer_map:
11647 for c in linkmap.findConsumers(f):
11648 # Filter out any consumers that are also preserved libs
11649 # belonging to the same package as the provider.
11650 if linkmap._obj_key(c) not in internal_plib_keys:
11651 consumers.append(c)
11653 consumer_map[f] = consumers
11654 search_for_owners.update(consumers[:MAX_DISPLAY+1])
11656 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11658 for cpv in plibdata:
11659 print colorize("WARN", ">>>") + " package: %s" % cpv
11661 for f in plibdata[cpv]:
11662 obj_key = linkmap._obj_key(f)
11663 alt_paths = samefile_map.get(obj_key)
11664 if alt_paths is None:
11666 samefile_map[obj_key] = alt_paths
11669 for alt_paths in samefile_map.itervalues():
11670 alt_paths = sorted(alt_paths)
11671 for p in alt_paths:
11672 print colorize("WARN", " * ") + " - %s" % (p,)
11674 consumers = consumer_map.get(f, [])
11675 for c in consumers[:MAX_DISPLAY]:
11676 print colorize("WARN", " * ") + " used by %s (%s)" % \
11677 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11678 if len(consumers) == MAX_DISPLAY + 1:
11679 print colorize("WARN", " * ") + " used by %s (%s)" % \
11680 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11681 for x in owners.get(consumers[MAX_DISPLAY], [])))
11682 elif len(consumers) > MAX_DISPLAY:
11683 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
11684 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11687 def _flush_elog_mod_echo():
11689 Dump the mod_echo output now so that our other
11690 notifications are shown last.
11692 @returns: True if messages were shown, False otherwise.
11694 messages_shown = False
11696 from portage.elog import mod_echo
11697 except ImportError:
11698 pass # happens during downgrade to a version without the module
11700 messages_shown = bool(mod_echo._items)
11701 mod_echo.finalize()
11702 return messages_shown
11704 def post_emerge(root_config, myopts, mtimedb, retval):
11706 Misc. things to run at the end of a merge session.
11709 Update Config Files
11712 Display preserved libs warnings
11715 @param trees: A dictionary mapping each ROOT to it's package databases
11717 @param mtimedb: The mtimeDB to store data needed across merge invocations
11718 @type mtimedb: MtimeDB class instance
11719 @param retval: Emerge's return value
11723 1. Calls sys.exit(retval)
11726 target_root = root_config.root
11727 trees = { target_root : root_config.trees }
11728 vardbapi = trees[target_root]["vartree"].dbapi
11729 settings = vardbapi.settings
11730 info_mtimes = mtimedb["info"]
11732 # Load the most current variables from ${ROOT}/etc/profile.env
11735 settings.regenerate()
11738 config_protect = settings.get("CONFIG_PROTECT","").split()
11739 infodirs = settings.get("INFOPATH","").split(":") + \
11740 settings.get("INFODIR","").split(":")
11744 if retval == os.EX_OK:
11745 exit_msg = " *** exiting successfully."
11747 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11748 emergelog("notitles" not in settings.features, exit_msg)
11750 _flush_elog_mod_echo()
11752 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11753 if counter_hash is not None and \
11754 counter_hash == vardbapi._counter_hash():
11755 display_news_notification(root_config, myopts)
11756 # If vdb state has not changed then there's nothing else to do.
11759 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11760 portage.util.ensure_dirs(vdb_path)
11762 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11763 vdb_lock = portage.locks.lockdir(vdb_path)
11767 if "noinfo" not in settings.features:
11768 chk_updated_info_files(target_root,
11769 infodirs, info_mtimes, retval)
11773 portage.locks.unlockdir(vdb_lock)
11775 chk_updated_cfg_files(target_root, config_protect)
11777 display_news_notification(root_config, myopts)
11778 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11779 display_preserved_libs(vardbapi)
11784 def chk_updated_cfg_files(target_root, config_protect):
11786 #number of directories with some protect files in them
11788 for x in config_protect:
11789 x = os.path.join(target_root, x.lstrip(os.path.sep))
11790 if not os.access(x, os.W_OK):
11791 # Avoid Permission denied errors generated
11795 mymode = os.lstat(x).st_mode
11798 if stat.S_ISLNK(mymode):
11799 # We want to treat it like a directory if it
11800 # is a symlink to an existing directory.
11802 real_mode = os.stat(x).st_mode
11803 if stat.S_ISDIR(real_mode):
11807 if stat.S_ISDIR(mymode):
11808 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11810 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11811 os.path.split(x.rstrip(os.path.sep))
11812 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11813 a = commands.getstatusoutput(mycommand)
11815 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11817 # Show the error message alone, sending stdout to /dev/null.
11818 os.system(mycommand + " 1>/dev/null")
11820 files = a[1].split('\0')
11821 # split always produces an empty string as the last element
11822 if files and not files[-1]:
11826 print "\n"+colorize("WARN", " * IMPORTANT:"),
11827 if stat.S_ISDIR(mymode):
11828 print "%d config files in '%s' need updating." % \
11831 print "config file '%s' needs updating." % x
11834 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11835 " section of the " + bold("emerge")
11836 print " "+yellow("*")+" man page to learn how to update config files."
11838 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11841 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11842 Returns the number of unread (yet relevent) items.
11844 @param portdb: a portage tree database
11845 @type portdb: pordbapi
11846 @param vardb: an installed package database
11847 @type vardb: vardbapi
11850 @param UNREAD_PATH:
11856 1. The number of unread but relevant news items.
11859 from portage.news import NewsManager
11860 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11861 return manager.getUnreadItems( repo_id, update=update )
11863 def insert_category_into_atom(atom, category):
11864 alphanum = re.search(r'\w', atom)
11866 ret = atom[:alphanum.start()] + "%s/" % category + \
11867 atom[alphanum.start():]
11872 def is_valid_package_atom(x):
11874 alphanum = re.search(r'\w', x)
11876 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11877 return portage.isvalidatom(x)
11879 def show_blocker_docs_link():
11881 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11882 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11884 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11887 def show_mask_docs():
11888 print "For more information, see the MASKED PACKAGES section in the emerge"
11889 print "man page or refer to the Gentoo Handbook."
11891 def action_sync(settings, trees, mtimedb, myopts, myaction):
11892 xterm_titles = "notitles" not in settings.features
11893 emergelog(xterm_titles, " === sync")
11894 myportdir = settings.get("PORTDIR", None)
11895 out = portage.output.EOutput()
11897 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
11899 if myportdir[-1]=="/":
11900 myportdir=myportdir[:-1]
11902 st = os.stat(myportdir)
11906 print ">>>",myportdir,"not found, creating it."
11907 os.makedirs(myportdir,0755)
11908 st = os.stat(myportdir)
11911 spawn_kwargs["env"] = settings.environ()
11912 if portage.data.secpass >= 2 and \
11913 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
11914 st.st_gid != os.getgid() and st.st_mode & 0070):
11916 homedir = pwd.getpwuid(st.st_uid).pw_dir
11920 # Drop privileges when syncing, in order to match
11921 # existing uid/gid settings.
11922 spawn_kwargs["uid"] = st.st_uid
11923 spawn_kwargs["gid"] = st.st_gid
11924 spawn_kwargs["groups"] = [st.st_gid]
11925 spawn_kwargs["env"]["HOME"] = homedir
11927 if not st.st_mode & 0020:
11928 umask = umask | 0020
11929 spawn_kwargs["umask"] = umask
11931 syncuri = settings.get("SYNC", "").strip()
11933 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11934 noiselevel=-1, level=logging.ERROR)
11937 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
11938 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
11941 dosyncuri = syncuri
11942 updatecache_flg = False
11943 if myaction == "metadata":
11944 print "skipping sync"
11945 updatecache_flg = True
11946 elif ".git" in vcs_dirs:
11947 # Update existing git repository, and ignore the syncuri. We are
11948 # going to trust the user and assume that the user is in the branch
11949 # that he/she wants updated. We'll let the user manage branches with
11951 if portage.process.find_binary("git") is None:
11952 msg = ["Command not found: git",
11953 "Type \"emerge dev-util/git\" to enable git support."]
11955 writemsg_level("!!! %s\n" % l,
11956 level=logging.ERROR, noiselevel=-1)
11958 msg = ">>> Starting git pull in %s..." % myportdir
11959 emergelog(xterm_titles, msg )
11960 writemsg_level(msg + "\n")
11961 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
11962 (portage._shell_quote(myportdir),), **spawn_kwargs)
11963 if exitcode != os.EX_OK:
11964 msg = "!!! git pull error in %s." % myportdir
11965 emergelog(xterm_titles, msg)
11966 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
11968 msg = ">>> Git pull in %s successful" % myportdir
11969 emergelog(xterm_titles, msg)
11970 writemsg_level(msg + "\n")
11971 exitcode = git_sync_timestamps(settings, myportdir)
11972 if exitcode == os.EX_OK:
11973 updatecache_flg = True
11974 elif syncuri[:8]=="rsync://":
11975 for vcs_dir in vcs_dirs:
11976 writemsg_level(("!!! %s appears to be under revision " + \
11977 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
11978 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
11980 if not os.path.exists("/usr/bin/rsync"):
11981 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11982 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11987 import shlex, StringIO
11988 if settings["PORTAGE_RSYNC_OPTS"] == "":
11989 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11990 rsync_opts.extend([
11991 "--recursive", # Recurse directories
11992 "--links", # Consider symlinks
11993 "--safe-links", # Ignore links outside of tree
11994 "--perms", # Preserve permissions
11995 "--times", # Preserive mod times
11996 "--compress", # Compress the data transmitted
11997 "--force", # Force deletion on non-empty dirs
11998 "--whole-file", # Don't do block transfers, only entire files
11999 "--delete", # Delete files that aren't in the master tree
12000 "--stats", # Show final statistics about what was transfered
12001 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12002 "--exclude=/distfiles", # Exclude distfiles from consideration
12003 "--exclude=/local", # Exclude local from consideration
12004 "--exclude=/packages", # Exclude packages from consideration
12008 # The below validation is not needed when using the above hardcoded
12011 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12012 lexer = shlex.shlex(StringIO.StringIO(
12013 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
12014 lexer.whitespace_split = True
12015 rsync_opts.extend(lexer)
12018 for opt in ("--recursive", "--times"):
12019 if opt not in rsync_opts:
12020 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12021 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12022 rsync_opts.append(opt)
12024 for exclude in ("distfiles", "local", "packages"):
12025 opt = "--exclude=/%s" % exclude
12026 if opt not in rsync_opts:
12027 portage.writemsg(yellow("WARNING:") + \
12028 " adding required option %s not included in " % opt + \
12029 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12030 rsync_opts.append(opt)
12032 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12033 def rsync_opt_startswith(opt_prefix):
12034 for x in rsync_opts:
12035 if x.startswith(opt_prefix):
12039 if not rsync_opt_startswith("--timeout="):
12040 rsync_opts.append("--timeout=%d" % mytimeout)
12042 for opt in ("--compress", "--whole-file"):
12043 if opt not in rsync_opts:
12044 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12045 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12046 rsync_opts.append(opt)
12048 if "--quiet" in myopts:
12049 rsync_opts.append("--quiet") # Shut up a lot
12051 rsync_opts.append("--verbose") # Print filelist
12053 if "--verbose" in myopts:
12054 rsync_opts.append("--progress") # Progress meter for each file
12056 if "--debug" in myopts:
12057 rsync_opts.append("--checksum") # Force checksum on all files
12059 # Real local timestamp file.
12060 servertimestampfile = os.path.join(
12061 myportdir, "metadata", "timestamp.chk")
12063 content = portage.util.grabfile(servertimestampfile)
12067 mytimestamp = time.mktime(time.strptime(content[0],
12068 "%a, %d %b %Y %H:%M:%S +0000"))
12069 except (OverflowError, ValueError):
12074 rsync_initial_timeout = \
12075 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12077 rsync_initial_timeout = 15
12080 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12081 except SystemExit, e:
12082 raise # Needed else can't exit
12084 maxretries=3 #default number of retries
12087 user_name, hostname, port = re.split(
12088 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12091 if user_name is None:
12093 updatecache_flg=True
12094 all_rsync_opts = set(rsync_opts)
12095 lexer = shlex.shlex(StringIO.StringIO(
12096 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
12097 lexer.whitespace_split = True
12098 extra_rsync_opts = list(lexer)
12100 all_rsync_opts.update(extra_rsync_opts)
12101 family = socket.AF_INET
12102 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12103 family = socket.AF_INET
12104 elif socket.has_ipv6 and \
12105 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12106 family = socket.AF_INET6
12108 SERVER_OUT_OF_DATE = -1
12109 EXCEEDED_MAX_RETRIES = -2
12115 for addrinfo in socket.getaddrinfo(
12116 hostname, None, family, socket.SOCK_STREAM):
12117 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12118 # IPv6 addresses need to be enclosed in square brackets
12119 ips.append("[%s]" % addrinfo[4][0])
12121 ips.append(addrinfo[4][0])
12122 from random import shuffle
12124 except SystemExit, e:
12125 raise # Needed else can't exit
12126 except Exception, e:
12127 print "Notice:",str(e)
12132 dosyncuri = syncuri.replace(
12133 "//" + user_name + hostname + port + "/",
12134 "//" + user_name + ips[0] + port + "/", 1)
12135 except SystemExit, e:
12136 raise # Needed else can't exit
12137 except Exception, e:
12138 print "Notice:",str(e)
12142 if "--ask" in myopts:
12143 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12148 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12149 if "--quiet" not in myopts:
12150 print ">>> Starting rsync with "+dosyncuri+"..."
12152 emergelog(xterm_titles,
12153 ">>> Starting retry %d of %d with %s" % \
12154 (retries,maxretries,dosyncuri))
12155 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12157 if mytimestamp != 0 and "--quiet" not in myopts:
12158 print ">>> Checking server timestamp ..."
12160 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12162 if "--debug" in myopts:
12165 exitcode = os.EX_OK
12166 servertimestamp = 0
12167 # Even if there's no timestamp available locally, fetch the
12168 # timestamp anyway as an initial probe to verify that the server is
12169 # responsive. This protects us from hanging indefinitely on a
12170 # connection attempt to an unresponsive server which rsync's
12171 # --timeout option does not prevent.
12173 # Temporary file for remote server timestamp comparison.
12174 from tempfile import mkstemp
12175 fd, tmpservertimestampfile = mkstemp()
12177 mycommand = rsynccommand[:]
12178 mycommand.append(dosyncuri.rstrip("/") + \
12179 "/metadata/timestamp.chk")
12180 mycommand.append(tmpservertimestampfile)
12184 def timeout_handler(signum, frame):
12185 raise portage.exception.PortageException("timed out")
12186 signal.signal(signal.SIGALRM, timeout_handler)
12187 # Timeout here in case the server is unresponsive. The
12188 # --timeout rsync option doesn't apply to the initial
12189 # connection attempt.
12190 if rsync_initial_timeout:
12191 signal.alarm(rsync_initial_timeout)
12193 mypids.extend(portage.process.spawn(
12194 mycommand, env=settings.environ(), returnpid=True))
12195 exitcode = os.waitpid(mypids[0], 0)[1]
12196 content = portage.grabfile(tmpservertimestampfile)
12198 if rsync_initial_timeout:
12201 os.unlink(tmpservertimestampfile)
12204 except portage.exception.PortageException, e:
12208 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12209 os.kill(mypids[0], signal.SIGTERM)
12210 os.waitpid(mypids[0], 0)
12211 # This is the same code rsync uses for timeout.
12214 if exitcode != os.EX_OK:
12215 if exitcode & 0xff:
12216 exitcode = (exitcode & 0xff) << 8
12218 exitcode = exitcode >> 8
12220 portage.process.spawned_pids.remove(mypids[0])
12223 servertimestamp = time.mktime(time.strptime(
12224 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12225 except (OverflowError, ValueError):
12227 del mycommand, mypids, content
12228 if exitcode == os.EX_OK:
12229 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12230 emergelog(xterm_titles,
12231 ">>> Cancelling sync -- Already current.")
12234 print ">>> Timestamps on the server and in the local repository are the same."
12235 print ">>> Cancelling all further sync action. You are already up to date."
12237 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12241 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12242 emergelog(xterm_titles,
12243 ">>> Server out of date: %s" % dosyncuri)
12246 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12248 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12251 exitcode = SERVER_OUT_OF_DATE
12252 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12254 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12255 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12256 if exitcode in [0,1,3,4,11,14,20,21]:
12258 elif exitcode in [1,3,4,11,14,20,21]:
12261 # Code 2 indicates protocol incompatibility, which is expected
12262 # for servers with protocol < 29 that don't support
12263 # --prune-empty-directories. Retry for a server that supports
12264 # at least rsync protocol version 29 (>=rsync-2.6.4).
12269 if retries<=maxretries:
12270 print ">>> Retrying..."
12275 updatecache_flg=False
12276 exitcode = EXCEEDED_MAX_RETRIES
12280 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12281 elif exitcode == SERVER_OUT_OF_DATE:
12283 elif exitcode == EXCEEDED_MAX_RETRIES:
12285 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12290 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12291 msg.append("that your SYNC statement is proper.")
12292 msg.append("SYNC=" + settings["SYNC"])
12294 msg.append("Rsync has reported that there is a File IO error. Normally")
12295 msg.append("this means your disk is full, but can be caused by corruption")
12296 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12297 msg.append("and try again after the problem has been fixed.")
12298 msg.append("PORTDIR=" + settings["PORTDIR"])
12300 msg.append("Rsync was killed before it finished.")
12302 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12303 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12304 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12305 msg.append("temporary problem unless complications exist with your network")
12306 msg.append("(and possibly your system's filesystem) configuration.")
12310 elif syncuri[:6]=="cvs://":
12311 if not os.path.exists("/usr/bin/cvs"):
12312 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12313 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12315 cvsroot=syncuri[6:]
12316 cvsdir=os.path.dirname(myportdir)
12317 if not os.path.exists(myportdir+"/CVS"):
12319 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12320 if os.path.exists(cvsdir+"/gentoo-x86"):
12321 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12324 os.rmdir(myportdir)
12326 if e.errno != errno.ENOENT:
12328 "!!! existing '%s' directory; exiting.\n" % myportdir)
12331 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12332 print "!!! cvs checkout error; exiting."
12334 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12337 print ">>> Starting cvs update with "+syncuri+"..."
12338 retval = portage.process.spawn_bash(
12339 "cd %s; cvs -z0 -q update -dP" % \
12340 (portage._shell_quote(myportdir),), **spawn_kwargs)
12341 if retval != os.EX_OK:
12343 dosyncuri = syncuri
12345 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12346 noiselevel=-1, level=logging.ERROR)
12349 if updatecache_flg and \
12350 myaction != "metadata" and \
12351 "metadata-transfer" not in settings.features:
12352 updatecache_flg = False
12354 # Reload the whole config from scratch.
12355 settings, trees, mtimedb = load_emerge_config(trees=trees)
12356 root_config = trees[settings["ROOT"]]["root_config"]
12357 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12359 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12360 action_metadata(settings, portdb, myopts)
12362 if portage._global_updates(trees, mtimedb["updates"]):
12364 # Reload the whole config from scratch.
12365 settings, trees, mtimedb = load_emerge_config(trees=trees)
12366 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12367 root_config = trees[settings["ROOT"]]["root_config"]
12369 mybestpv = portdb.xmatch("bestmatch-visible",
12370 portage.const.PORTAGE_PACKAGE_ATOM)
12371 mypvs = portage.best(
12372 trees[settings["ROOT"]]["vartree"].dbapi.match(
12373 portage.const.PORTAGE_PACKAGE_ATOM))
12375 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12377 if myaction != "metadata":
12378 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12379 retval = portage.process.spawn(
12380 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12381 dosyncuri], env=settings.environ())
12382 if retval != os.EX_OK:
12383 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12385 if(mybestpv != mypvs) and not "--quiet" in myopts:
12387 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12388 print red(" * ")+"that you update portage now, before any other packages are updated."
12390 print red(" * ")+"To update portage, run 'emerge portage' now."
12393 display_news_notification(root_config, myopts)
12396 def git_sync_timestamps(settings, portdir):
12398 Since git doesn't preserve timestamps, synchronize timestamps between
12399 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12400 for a given file as long as the file in the working tree is not modified
12401 (relative to HEAD).
12403 cache_dir = os.path.join(portdir, "metadata", "cache")
12404 if not os.path.isdir(cache_dir):
12406 writemsg_level(">>> Synchronizing timestamps...\n")
12408 from portage.cache.cache_errors import CacheError
12410 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12411 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12412 except CacheError, e:
12413 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12414 level=logging.ERROR, noiselevel=-1)
12417 ec_dir = os.path.join(portdir, "eclass")
12419 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12420 if f.endswith(".eclass"))
12422 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12423 level=logging.ERROR, noiselevel=-1)
12426 args = [portage.const.BASH_BINARY, "-c",
12427 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
12428 portage._shell_quote(portdir)]
12430 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
12431 modified_files = set(l.rstrip("\n") for l in proc.stdout)
12433 if rval != os.EX_OK:
12436 modified_eclasses = set(ec for ec in ec_names \
12437 if os.path.join("eclass", ec + ".eclass") in modified_files)
12439 updated_ec_mtimes = {}
12441 for cpv in cache_db:
12442 cpv_split = portage.catpkgsplit(cpv)
12443 if cpv_split is None:
12444 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
12445 level=logging.ERROR, noiselevel=-1)
12448 cat, pn, ver, rev = cpv_split
12449 cat, pf = portage.catsplit(cpv)
12450 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
12451 if relative_eb_path in modified_files:
12455 cache_entry = cache_db[cpv]
12456 eb_mtime = cache_entry.get("_mtime_")
12457 ec_mtimes = cache_entry.get("_eclasses_")
12459 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
12460 level=logging.ERROR, noiselevel=-1)
12462 except CacheError, e:
12463 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
12464 (cpv, e), level=logging.ERROR, noiselevel=-1)
12467 if eb_mtime is None:
12468 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
12469 level=logging.ERROR, noiselevel=-1)
12473 eb_mtime = long(eb_mtime)
12475 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
12476 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
12479 if ec_mtimes is None:
12480 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
12481 level=logging.ERROR, noiselevel=-1)
12484 if modified_eclasses.intersection(ec_mtimes):
12487 missing_eclasses = set(ec_mtimes).difference(ec_names)
12488 if missing_eclasses:
12489 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
12490 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
12494 eb_path = os.path.join(portdir, relative_eb_path)
12496 current_eb_mtime = os.stat(eb_path)
12498 writemsg_level("!!! Missing ebuild: %s\n" % \
12499 (cpv,), level=logging.ERROR, noiselevel=-1)
12502 inconsistent = False
12503 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12504 updated_mtime = updated_ec_mtimes.get(ec)
12505 if updated_mtime is not None and updated_mtime != ec_mtime:
12506 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
12507 (cpv, ec), level=logging.ERROR, noiselevel=-1)
12508 inconsistent = True
12514 if current_eb_mtime != eb_mtime:
12515 os.utime(eb_path, (eb_mtime, eb_mtime))
12517 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12518 if ec in updated_ec_mtimes:
12520 ec_path = os.path.join(ec_dir, ec + ".eclass")
12521 current_mtime = long(os.stat(ec_path).st_mtime)
12522 if current_mtime != ec_mtime:
12523 os.utime(ec_path, (ec_mtime, ec_mtime))
12524 updated_ec_mtimes[ec] = ec_mtime
12528 def action_metadata(settings, portdb, myopts):
12529 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
12530 old_umask = os.umask(0002)
12531 cachedir = os.path.normpath(settings.depcachedir)
12532 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
12533 "/lib", "/opt", "/proc", "/root", "/sbin",
12534 "/sys", "/tmp", "/usr", "/var"]:
12535 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12536 "ROOT DIRECTORY ON YOUR SYSTEM."
12537 print >> sys.stderr, \
12538 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12540 if not os.path.exists(cachedir):
12543 ec = portage.eclass_cache.cache(portdb.porttree_root)
12544 myportdir = os.path.realpath(settings["PORTDIR"])
12545 cm = settings.load_best_module("portdbapi.metadbmodule")(
12546 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12548 from portage.cache import util
12550 class percentage_noise_maker(util.quiet_mirroring):
12551 def __init__(self, dbapi):
12553 self.cp_all = dbapi.cp_all()
12554 l = len(self.cp_all)
12555 self.call_update_min = 100000000
12556 self.min_cp_all = l/100.0
12560 def __iter__(self):
12561 for x in self.cp_all:
12563 if self.count > self.min_cp_all:
12564 self.call_update_min = 0
12566 for y in self.dbapi.cp_list(x):
12568 self.call_update_mine = 0
12570 def update(self, *arg):
12571 try: self.pstr = int(self.pstr) + 1
12572 except ValueError: self.pstr = 1
12573 sys.stdout.write("%s%i%%" % \
12574 ("\b" * (len(str(self.pstr))+1), self.pstr))
12576 self.call_update_min = 10000000
12578 def finish(self, *arg):
12579 sys.stdout.write("\b\b\b\b100%\n")
12582 if "--quiet" in myopts:
12583 def quicky_cpv_generator(cp_all_list):
12584 for x in cp_all_list:
12585 for y in portdb.cp_list(x):
12587 source = quicky_cpv_generator(portdb.cp_all())
12588 noise_maker = portage.cache.util.quiet_mirroring()
12590 noise_maker = source = percentage_noise_maker(portdb)
12591 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12592 eclass_cache=ec, verbose_instance=noise_maker)
12595 os.umask(old_umask)
12597 def action_regen(settings, portdb, max_jobs, max_load):
12598 xterm_titles = "notitles" not in settings.features
12599 emergelog(xterm_titles, " === regen")
12600 #regenerate cache entries
12601 portage.writemsg_stdout("Regenerating cache entries...\n")
12603 os.close(sys.stdin.fileno())
12604 except SystemExit, e:
12605 raise # Needed else can't exit
12610 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12613 portage.writemsg_stdout("done!\n")
12615 def action_config(settings, trees, myopts, myfiles):
12616 if len(myfiles) != 1:
12617 print red("!!! config can only take a single package atom at this time\n")
12619 if not is_valid_package_atom(myfiles[0]):
12620 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12622 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12623 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12627 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12628 except portage.exception.AmbiguousPackageName, e:
12629 # Multiple matches thrown from cpv_expand
12632 print "No packages found.\n"
12634 elif len(pkgs) > 1:
12635 if "--ask" in myopts:
12637 print "Please select a package to configure:"
12641 options.append(str(idx))
12642 print options[-1]+") "+pkg
12644 options.append("X")
12645 idx = userquery("Selection?", options)
12648 pkg = pkgs[int(idx)-1]
12650 print "The following packages available:"
12653 print "\nPlease use a specific atom or the --ask option."
12659 if "--ask" in myopts:
12660 if userquery("Ready to configure "+pkg+"?") == "No":
12663 print "Configuring pkg..."
12665 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12666 mysettings = portage.config(clone=settings)
12667 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12668 debug = mysettings.get("PORTAGE_DEBUG") == "1"
12669 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12671 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12672 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12673 if retval == os.EX_OK:
12674 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12675 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12678 def action_info(settings, trees, myopts, myfiles):
12679 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12680 settings.profile_path, settings["CHOST"],
12681 trees[settings["ROOT"]]["vartree"].dbapi)
12683 header_title = "System Settings"
12685 print header_width * "="
12686 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12687 print header_width * "="
12688 print "System uname: "+platform.platform(aliased=1)
12690 lastSync = portage.grabfile(os.path.join(
12691 settings["PORTDIR"], "metadata", "timestamp.chk"))
12692 print "Timestamp of tree:",
12698 output=commands.getstatusoutput("distcc --version")
12700 print str(output[1].split("\n",1)[0]),
12701 if "distcc" in settings.features:
12706 output=commands.getstatusoutput("ccache -V")
12708 print str(output[1].split("\n",1)[0]),
12709 if "ccache" in settings.features:
12714 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12715 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
12716 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12717 myvars = portage.util.unique_array(myvars)
12721 if portage.isvalidatom(x):
12722 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12723 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12724 pkg_matches.sort(portage.pkgcmp)
12726 for pn, ver, rev in pkg_matches:
12728 pkgs.append(ver + "-" + rev)
12732 pkgs = ", ".join(pkgs)
12733 print "%-20s %s" % (x+":", pkgs)
12735 print "%-20s %s" % (x+":", "[NOT VALID]")
12737 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12739 if "--verbose" in myopts:
12740 myvars=settings.keys()
12742 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12743 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12744 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12745 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12747 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12749 myvars = portage.util.unique_array(myvars)
12755 print '%s="%s"' % (x, settings[x])
12757 use = set(settings["USE"].split())
12758 use_expand = settings["USE_EXPAND"].split()
12760 for varname in use_expand:
12761 flag_prefix = varname.lower() + "_"
12762 for f in list(use):
12763 if f.startswith(flag_prefix):
12767 print 'USE="%s"' % " ".join(use),
12768 for varname in use_expand:
12769 myval = settings.get(varname)
12771 print '%s="%s"' % (varname, myval),
12774 unset_vars.append(x)
12776 print "Unset: "+", ".join(unset_vars)
12779 if "--debug" in myopts:
12780 for x in dir(portage):
12781 module = getattr(portage, x)
12782 if "cvs_id_string" in dir(module):
12783 print "%s: %s" % (str(x), str(module.cvs_id_string))
12785 # See if we can find any packages installed matching the strings
12786 # passed on the command line
12788 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12789 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12791 mypkgs.extend(vardb.match(x))
12793 # If some packages were found...
12795 # Get our global settings (we only print stuff if it varies from
12796 # the current config)
12797 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12798 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12800 pkgsettings = portage.config(clone=settings)
12802 for myvar in mydesiredvars:
12803 global_vals[myvar] = set(settings.get(myvar, "").split())
12805 # Loop through each package
12806 # Only print settings if they differ from global settings
12807 header_title = "Package Settings"
12808 print header_width * "="
12809 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12810 print header_width * "="
12811 from portage.output import EOutput
12814 # Get all package specific variables
12815 auxvalues = vardb.aux_get(pkg, auxkeys)
12817 for i in xrange(len(auxkeys)):
12818 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12820 for myvar in mydesiredvars:
12821 # If the package variable doesn't match the
12822 # current global variable, something has changed
12823 # so set diff_found so we know to print
12824 if valuesmap[myvar] != global_vals[myvar]:
12825 diff_values[myvar] = valuesmap[myvar]
12826 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12827 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12828 pkgsettings.reset()
12829 # If a matching ebuild is no longer available in the tree, maybe it
12830 # would make sense to compare against the flags for the best
12831 # available version with the same slot?
12833 if portdb.cpv_exists(pkg):
12835 pkgsettings.setcpv(pkg, mydb=mydb)
12836 if valuesmap["IUSE"].intersection(
12837 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12838 diff_values["USE"] = valuesmap["USE"]
12839 # If a difference was found, print the info for
12842 # Print package info
12843 print "%s was built with the following:" % pkg
12844 for myvar in mydesiredvars + ["USE"]:
12845 if myvar in diff_values:
12846 mylist = list(diff_values[myvar])
12848 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12850 print ">>> Attempting to run pkg_info() for '%s'" % pkg
12851 ebuildpath = vardb.findname(pkg)
12852 if not ebuildpath or not os.path.exists(ebuildpath):
12853 out.ewarn("No ebuild found for '%s'" % pkg)
12855 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12856 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12857 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12860 def action_search(root_config, myopts, myfiles, spinner):
12862 print "emerge: no search terms provided."
12864 searchinstance = search(root_config,
12865 spinner, "--searchdesc" in myopts,
12866 "--quiet" not in myopts, "--usepkg" in myopts,
12867 "--usepkgonly" in myopts)
12868 for mysearch in myfiles:
12870 searchinstance.execute(mysearch)
12871 except re.error, comment:
12872 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12874 searchinstance.output()
12876 def action_depclean(settings, trees, ldpath_mtimes,
12877 myopts, action, myfiles, spinner):
12878 # Kill packages that aren't explicitly merged or are required as a
12879 # dependency of another package. World file is explicit.
12881 # Global depclean or prune operations are not very safe when there are
12882 # missing dependencies since it's unknown how badly incomplete
12883 # the dependency graph is, and we might accidentally remove packages
12884 # that should have been pulled into the graph. On the other hand, it's
12885 # relatively safe to ignore missing deps when only asked to remove
12886 # specific packages.
12887 allow_missing_deps = len(myfiles) > 0
12890 msg.append("Always study the list of packages to be cleaned for any obvious\n")
12891 msg.append("mistakes. Packages that are part of the world set will always\n")
12892 msg.append("be kept. They can be manually added to this set with\n")
12893 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
12894 msg.append("package.provided (see portage(5)) will be removed by\n")
12895 msg.append("depclean, even if they are part of the world set.\n")
12897 msg.append("As a safety measure, depclean will not remove any packages\n")
12898 msg.append("unless *all* required dependencies have been resolved. As a\n")
12899 msg.append("consequence, it is often necessary to run %s\n" % \
12900 good("`emerge --update"))
12901 msg.append(good("--newuse --deep @system @world`") + \
12902 " prior to depclean.\n")
12904 if action == "depclean" and "--quiet" not in myopts and not myfiles:
12905 portage.writemsg_stdout("\n")
12907 portage.writemsg_stdout(colorize("WARN", " * ") + x)
12909 xterm_titles = "notitles" not in settings.features
12910 myroot = settings["ROOT"]
12911 root_config = trees[myroot]["root_config"]
12912 getSetAtoms = root_config.setconfig.getSetAtoms
12913 vardb = trees[myroot]["vartree"].dbapi
12915 required_set_names = ("system", "world")
12919 for s in required_set_names:
12920 required_sets[s] = InternalPackageSet(
12921 initial_atoms=getSetAtoms(s))
12924 # When removing packages, use a temporary version of world
12925 # which excludes packages that are intended to be eligible for
12927 world_temp_set = required_sets["world"]
12928 system_set = required_sets["system"]
12930 if not system_set or not world_temp_set:
12933 writemsg_level("!!! You have no system list.\n",
12934 level=logging.ERROR, noiselevel=-1)
12936 if not world_temp_set:
12937 writemsg_level("!!! You have no world file.\n",
12938 level=logging.WARNING, noiselevel=-1)
12940 writemsg_level("!!! Proceeding is likely to " + \
12941 "break your installation.\n",
12942 level=logging.WARNING, noiselevel=-1)
12943 if "--pretend" not in myopts:
12944 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12946 if action == "depclean":
12947 emergelog(xterm_titles, " >>> depclean")
12950 args_set = InternalPackageSet()
12953 if not is_valid_package_atom(x):
12954 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12955 level=logging.ERROR, noiselevel=-1)
12956 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12959 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12960 except portage.exception.AmbiguousPackageName, e:
12961 msg = "The short ebuild name \"" + x + \
12962 "\" is ambiguous. Please specify " + \
12963 "one of the following " + \
12964 "fully-qualified ebuild names instead:"
12965 for line in textwrap.wrap(msg, 70):
12966 writemsg_level("!!! %s\n" % (line,),
12967 level=logging.ERROR, noiselevel=-1)
12969 writemsg_level(" %s\n" % colorize("INFORM", i),
12970 level=logging.ERROR, noiselevel=-1)
12971 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12974 matched_packages = False
12977 matched_packages = True
12979 if not matched_packages:
12980 writemsg_level(">>> No packages selected for removal by %s\n" % \
12984 writemsg_level("\nCalculating dependencies ")
12985 resolver_params = create_depgraph_params(myopts, "remove")
12986 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12987 vardb = resolver.trees[myroot]["vartree"].dbapi
12989 if action == "depclean":
12992 # Pull in everything that's installed but not matched
12993 # by an argument atom since we don't want to clean any
12994 # package if something depends on it.
12996 world_temp_set.clear()
13001 if args_set.findAtomForPackage(pkg) is None:
13002 world_temp_set.add("=" + pkg.cpv)
13004 except portage.exception.InvalidDependString, e:
13005 show_invalid_depstring_notice(pkg,
13006 pkg.metadata["PROVIDE"], str(e))
13008 world_temp_set.add("=" + pkg.cpv)
13011 elif action == "prune":
13013 # Pull in everything that's installed since we don't
13014 # to prune a package if something depends on it.
13015 world_temp_set.clear()
13016 world_temp_set.update(vardb.cp_all())
13020 # Try to prune everything that's slotted.
13021 for cp in vardb.cp_all():
13022 if len(vardb.cp_list(cp)) > 1:
13025 # Remove atoms from world that match installed packages
13026 # that are also matched by argument atoms, but do not remove
13027 # them if they match the highest installed version.
13030 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13031 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13032 raise AssertionError("package expected in matches: " + \
13033 "cp = %s, cpv = %s matches = %s" % \
13034 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13036 highest_version = pkgs_for_cp[-1]
13037 if pkg == highest_version:
13038 # pkg is the highest version
13039 world_temp_set.add("=" + pkg.cpv)
13042 if len(pkgs_for_cp) <= 1:
13043 raise AssertionError("more packages expected: " + \
13044 "cp = %s, cpv = %s matches = %s" % \
13045 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13048 if args_set.findAtomForPackage(pkg) is None:
13049 world_temp_set.add("=" + pkg.cpv)
13051 except portage.exception.InvalidDependString, e:
13052 show_invalid_depstring_notice(pkg,
13053 pkg.metadata["PROVIDE"], str(e))
13055 world_temp_set.add("=" + pkg.cpv)
13059 for s, package_set in required_sets.iteritems():
13060 set_atom = SETPREFIX + s
13061 set_arg = SetArg(arg=set_atom, set=package_set,
13062 root_config=resolver.roots[myroot])
13063 set_args[s] = set_arg
13064 for atom in set_arg.set:
13065 resolver._dep_stack.append(
13066 Dependency(atom=atom, root=myroot, parent=set_arg))
13067 resolver.digraph.add(set_arg, None)
13069 success = resolver._complete_graph()
13070 writemsg_level("\b\b... done!\n")
13072 resolver.display_problems()
13077 def unresolved_deps():
13079 unresolvable = set()
13080 for dep in resolver._initially_unsatisfied_deps:
13081 if isinstance(dep.parent, Package) and \
13082 (dep.priority > UnmergeDepPriority.SOFT):
13083 unresolvable.add((dep.atom, dep.parent.cpv))
13085 if not unresolvable:
13088 if unresolvable and not allow_missing_deps:
13089 prefix = bad(" * ")
13091 msg.append("Dependencies could not be completely resolved due to")
13092 msg.append("the following required packages not being installed:")
13094 for atom, parent in unresolvable:
13095 msg.append(" %s pulled in by:" % (atom,))
13096 msg.append(" %s" % (parent,))
13098 msg.append("Have you forgotten to run " + \
13099 good("`emerge --update --newuse --deep @system @world`") + " prior")
13100 msg.append(("to %s? It may be necessary to manually " + \
13101 "uninstall packages that no longer") % action)
13102 msg.append("exist in the portage tree since " + \
13103 "it may not be possible to satisfy their")
13104 msg.append("dependencies. Also, be aware of " + \
13105 "the --with-bdeps option that is documented")
13106 msg.append("in " + good("`man emerge`") + ".")
13107 if action == "prune":
13109 msg.append("If you would like to ignore " + \
13110 "dependencies then use %s." % good("--nodeps"))
13111 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13112 level=logging.ERROR, noiselevel=-1)
13116 if unresolved_deps():
13119 graph = resolver.digraph.copy()
13120 required_pkgs_total = 0
13122 if isinstance(node, Package):
13123 required_pkgs_total += 1
13125 def show_parents(child_node):
13126 parent_nodes = graph.parent_nodes(child_node)
13127 if not parent_nodes:
13128 # With --prune, the highest version can be pulled in without any
13129 # real parent since all installed packages are pulled in. In that
13130 # case there's nothing to show here.
13133 for node in parent_nodes:
13134 parent_strs.append(str(getattr(node, "cpv", node)))
13137 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13138 for parent_str in parent_strs:
13139 msg.append(" %s\n" % (parent_str,))
13141 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13143 def create_cleanlist():
13144 pkgs_to_remove = []
13146 if action == "depclean":
13152 arg_atom = args_set.findAtomForPackage(pkg)
13153 except portage.exception.InvalidDependString:
13154 # this error has already been displayed by now
13158 if pkg not in graph:
13159 pkgs_to_remove.append(pkg)
13160 elif "--verbose" in myopts:
13165 if pkg not in graph:
13166 pkgs_to_remove.append(pkg)
13167 elif "--verbose" in myopts:
13170 elif action == "prune":
13171 # Prune really uses all installed instead of world. It's not
13172 # a real reverse dependency so don't display it as such.
13173 graph.remove(set_args["world"])
13175 for atom in args_set:
13176 for pkg in vardb.match_pkgs(atom):
13177 if pkg not in graph:
13178 pkgs_to_remove.append(pkg)
13179 elif "--verbose" in myopts:
13182 if not pkgs_to_remove:
13184 ">>> No packages selected for removal by %s\n" % action)
13185 if "--verbose" not in myopts:
13187 ">>> To see reverse dependencies, use %s\n" % \
13189 if action == "prune":
13191 ">>> To ignore dependencies, use %s\n" % \
13194 return pkgs_to_remove
13196 cleanlist = create_cleanlist()
13199 clean_set = set(cleanlist)
13201 # Check if any of these package are the sole providers of libraries
13202 # with consumers that have not been selected for removal. If so, these
13203 # packages and any dependencies need to be added to the graph.
13204 real_vardb = trees[myroot]["vartree"].dbapi
13205 linkmap = real_vardb.linkmap
13206 liblist = linkmap.listLibraryObjects()
13207 consumer_cache = {}
13208 provider_cache = {}
13212 writemsg_level(">>> Checking for lib consumers...\n")
13214 for pkg in cleanlist:
13215 pkg_dblink = real_vardb._dblink(pkg.cpv)
13216 provided_libs = set()
13218 for lib in liblist:
13219 if pkg_dblink.isowner(lib, myroot):
13220 provided_libs.add(lib)
13222 if not provided_libs:
13226 for lib in provided_libs:
13227 lib_consumers = consumer_cache.get(lib)
13228 if lib_consumers is None:
13229 lib_consumers = linkmap.findConsumers(lib)
13230 consumer_cache[lib] = lib_consumers
13232 consumers[lib] = lib_consumers
13237 for lib, lib_consumers in consumers.items():
13238 for consumer_file in list(lib_consumers):
13239 if pkg_dblink.isowner(consumer_file, myroot):
13240 lib_consumers.remove(consumer_file)
13241 if not lib_consumers:
13247 for lib, lib_consumers in consumers.iteritems():
13249 soname = soname_cache.get(lib)
13251 soname = linkmap.getSoname(lib)
13252 soname_cache[lib] = soname
13254 consumer_providers = []
13255 for lib_consumer in lib_consumers:
13256 providers = provider_cache.get(lib)
13257 if providers is None:
13258 providers = linkmap.findProviders(lib_consumer)
13259 provider_cache[lib_consumer] = providers
13260 if soname not in providers:
13261 # Why does this happen?
13263 consumer_providers.append(
13264 (lib_consumer, providers[soname]))
13266 consumers[lib] = consumer_providers
13268 consumer_map[pkg] = consumers
13272 search_files = set()
13273 for consumers in consumer_map.itervalues():
13274 for lib, consumer_providers in consumers.iteritems():
13275 for lib_consumer, providers in consumer_providers:
13276 search_files.add(lib_consumer)
13277 search_files.update(providers)
13279 writemsg_level(">>> Assigning files to packages...\n")
13280 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13282 for pkg, consumers in consumer_map.items():
13283 for lib, consumer_providers in consumers.items():
13284 lib_consumers = set()
13286 for lib_consumer, providers in consumer_providers:
13287 owner_set = file_owners.get(lib_consumer)
13288 provider_dblinks = set()
13289 provider_pkgs = set()
13291 if len(providers) > 1:
13292 for provider in providers:
13293 provider_set = file_owners.get(provider)
13294 if provider_set is not None:
13295 provider_dblinks.update(provider_set)
13297 if len(provider_dblinks) > 1:
13298 for provider_dblink in provider_dblinks:
13299 pkg_key = ("installed", myroot,
13300 provider_dblink.mycpv, "nomerge")
13301 if pkg_key not in clean_set:
13302 provider_pkgs.add(vardb.get(pkg_key))
13307 if owner_set is not None:
13308 lib_consumers.update(owner_set)
13310 for consumer_dblink in list(lib_consumers):
13311 if ("installed", myroot, consumer_dblink.mycpv,
13312 "nomerge") in clean_set:
13313 lib_consumers.remove(consumer_dblink)
13317 consumers[lib] = lib_consumers
13321 del consumer_map[pkg]
13324 # TODO: Implement a package set for rebuilding consumer packages.
13326 msg = "In order to avoid breakage of link level " + \
13327 "dependencies, one or more packages will not be removed. " + \
13328 "This can be solved by rebuilding " + \
13329 "the packages that pulled them in."
13331 prefix = bad(" * ")
13332 from textwrap import wrap
13333 writemsg_level("".join(prefix + "%s\n" % line for \
13334 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13337 for pkg, consumers in consumer_map.iteritems():
13338 unique_consumers = set(chain(*consumers.values()))
13339 unique_consumers = sorted(consumer.mycpv \
13340 for consumer in unique_consumers)
13342 msg.append(" %s pulled in by:" % (pkg.cpv,))
13343 for consumer in unique_consumers:
13344 msg.append(" %s" % (consumer,))
13346 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13347 level=logging.WARNING, noiselevel=-1)
13349 # Add lib providers to the graph as children of lib consumers,
13350 # and also add any dependencies pulled in by the provider.
13351 writemsg_level(">>> Adding lib providers to graph...\n")
13353 for pkg, consumers in consumer_map.iteritems():
13354 for consumer_dblink in set(chain(*consumers.values())):
13355 consumer_pkg = vardb.get(("installed", myroot,
13356 consumer_dblink.mycpv, "nomerge"))
13357 if not resolver._add_pkg(pkg,
13358 Dependency(parent=consumer_pkg,
13359 priority=UnmergeDepPriority(runtime=True),
13361 resolver.display_problems()
13364 writemsg_level("\nCalculating dependencies ")
13365 success = resolver._complete_graph()
13366 writemsg_level("\b\b... done!\n")
13367 resolver.display_problems()
13370 if unresolved_deps():
13373 graph = resolver.digraph.copy()
13374 required_pkgs_total = 0
13376 if isinstance(node, Package):
13377 required_pkgs_total += 1
13378 cleanlist = create_cleanlist()
13381 clean_set = set(cleanlist)
13383 # Use a topological sort to create an unmerge order such that
13384 # each package is unmerged before it's dependencies. This is
13385 # necessary to avoid breaking things that may need to run
13386 # during pkg_prerm or pkg_postrm phases.
13388 # Create a new graph to account for dependencies between the
13389 # packages being unmerged.
13393 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13394 runtime = UnmergeDepPriority(runtime=True)
13395 runtime_post = UnmergeDepPriority(runtime_post=True)
13396 buildtime = UnmergeDepPriority(buildtime=True)
13398 "RDEPEND": runtime,
13399 "PDEPEND": runtime_post,
13400 "DEPEND": buildtime,
13403 for node in clean_set:
13404 graph.add(node, None)
13406 node_use = node.metadata["USE"].split()
13407 for dep_type in dep_keys:
13408 depstr = node.metadata[dep_type]
13412 portage.dep._dep_check_strict = False
13413 success, atoms = portage.dep_check(depstr, None, settings,
13414 myuse=node_use, trees=resolver._graph_trees,
13417 portage.dep._dep_check_strict = True
13419 # Ignore invalid deps of packages that will
13420 # be uninstalled anyway.
13423 priority = priority_map[dep_type]
13425 if not isinstance(atom, portage.dep.Atom):
13426 # Ignore invalid atoms returned from dep_check().
13430 matches = vardb.match_pkgs(atom)
13433 for child_node in matches:
13434 if child_node in clean_set:
13435 graph.add(child_node, node, priority=priority)
13438 if len(graph.order) == len(graph.root_nodes()):
13439 # If there are no dependencies between packages
13440 # let unmerge() group them by cat/pn.
13442 cleanlist = [pkg.cpv for pkg in graph.order]
13444 # Order nodes from lowest to highest overall reference count for
13445 # optimal root node selection.
13446 node_refcounts = {}
13447 for node in graph.order:
13448 node_refcounts[node] = len(graph.parent_nodes(node))
13449 def cmp_reference_count(node1, node2):
13450 return node_refcounts[node1] - node_refcounts[node2]
13451 graph.order.sort(cmp_reference_count)
13453 ignore_priority_range = [None]
13454 ignore_priority_range.extend(
13455 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13456 while not graph.empty():
13457 for ignore_priority in ignore_priority_range:
13458 nodes = graph.root_nodes(ignore_priority=ignore_priority)
13462 raise AssertionError("no root nodes")
13463 if ignore_priority is not None:
13464 # Some deps have been dropped due to circular dependencies,
13465 # so only pop one node in order do minimize the number that
13470 cleanlist.append(node.cpv)
13472 unmerge(root_config, myopts, "unmerge", cleanlist,
13473 ldpath_mtimes, ordered=ordered)
13475 if action == "prune":
13478 if not cleanlist and "--quiet" in myopts:
13481 print "Packages installed: "+str(len(vardb.cpv_all()))
13482 print "Packages in world: " + \
13483 str(len(root_config.sets["world"].getAtoms()))
13484 print "Packages in system: " + \
13485 str(len(root_config.sets["system"].getAtoms()))
13486 print "Required packages: "+str(required_pkgs_total)
13487 if "--pretend" in myopts:
13488 print "Number to remove: "+str(len(cleanlist))
13490 print "Number removed: "+str(len(cleanlist))
13492 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13493 skip_masked=False, skip_unsatisfied=False):
13495 Construct a depgraph for the given resume list. This will raise
13496 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13498 @returns: (success, depgraph, dropped_tasks)
13500 mergelist = mtimedb["resume"]["mergelist"]
13501 dropped_tasks = set()
13503 mydepgraph = depgraph(settings, trees,
13504 myopts, myparams, spinner)
13506 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13507 skip_masked=skip_masked)
13508 except depgraph.UnsatisfiedResumeDep, e:
13509 if not skip_unsatisfied:
13512 graph = mydepgraph.digraph
13513 unsatisfied_parents = dict((dep.parent, dep.parent) \
13514 for dep in e.value)
13515 traversed_nodes = set()
13516 unsatisfied_stack = list(unsatisfied_parents)
13517 while unsatisfied_stack:
13518 pkg = unsatisfied_stack.pop()
13519 if pkg in traversed_nodes:
13521 traversed_nodes.add(pkg)
13523 # If this package was pulled in by a parent
13524 # package scheduled for merge, removing this
13525 # package may cause the the parent package's
13526 # dependency to become unsatisfied.
13527 for parent_node in graph.parent_nodes(pkg):
13528 if not isinstance(parent_node, Package) \
13529 or parent_node.operation not in ("merge", "nomerge"):
13532 graph.child_nodes(parent_node,
13533 ignore_priority=DepPriority.SOFT)
13534 if pkg in unsatisfied:
13535 unsatisfied_parents[parent_node] = parent_node
13536 unsatisfied_stack.append(parent_node)
13538 pruned_mergelist = [x for x in mergelist \
13539 if isinstance(x, list) and \
13540 tuple(x) not in unsatisfied_parents]
13542 # If the mergelist doesn't shrink then this loop is infinite.
13543 if len(pruned_mergelist) == len(mergelist):
13544 # This happens if a package can't be dropped because
13545 # it's already installed, but it has unsatisfied PDEPEND.
13547 mergelist[:] = pruned_mergelist
13549 # Exclude installed packages that have been removed from the graph due
13550 # to failure to build/install runtime dependencies after the dependent
13551 # package has already been installed.
13552 dropped_tasks.update(pkg for pkg in \
13553 unsatisfied_parents if pkg.operation != "nomerge")
13554 mydepgraph.break_refs(unsatisfied_parents)
13556 del e, graph, traversed_nodes, \
13557 unsatisfied_parents, unsatisfied_stack
13561 return (success, mydepgraph, dropped_tasks)
13563 def action_build(settings, trees, mtimedb,
13564 myopts, myaction, myfiles, spinner):
13566 # validate the state of the resume data
13567 # so that we can make assumptions later.
13568 for k in ("resume", "resume_backup"):
13569 if k not in mtimedb:
13571 resume_data = mtimedb[k]
13572 if not isinstance(resume_data, dict):
13575 mergelist = resume_data.get("mergelist")
13576 if not isinstance(mergelist, list):
13579 for x in mergelist:
13580 if not (isinstance(x, list) and len(x) == 4):
13582 pkg_type, pkg_root, pkg_key, pkg_action = x
13583 if pkg_root not in trees:
13584 # Current $ROOT setting differs,
13585 # so the list must be stale.
13591 resume_opts = resume_data.get("myopts")
13592 if not isinstance(resume_opts, (dict, list)):
13595 favorites = resume_data.get("favorites")
13596 if not isinstance(favorites, list):
13601 if "--resume" in myopts and \
13602 ("resume" in mtimedb or
13603 "resume_backup" in mtimedb):
13605 if "resume" not in mtimedb:
13606 mtimedb["resume"] = mtimedb["resume_backup"]
13607 del mtimedb["resume_backup"]
13609 # "myopts" is a list for backward compatibility.
13610 resume_opts = mtimedb["resume"].get("myopts", [])
13611 if isinstance(resume_opts, list):
13612 resume_opts = dict((k,True) for k in resume_opts)
13613 for opt in ("--skipfirst", "--ask", "--tree"):
13614 resume_opts.pop(opt, None)
13615 myopts.update(resume_opts)
13617 if "--debug" in myopts:
13618 writemsg_level("myopts %s\n" % (myopts,))
13620 # Adjust config according to options of the command being resumed.
13621 for myroot in trees:
13622 mysettings = trees[myroot]["vartree"].settings
13623 mysettings.unlock()
13624 adjust_config(myopts, mysettings)
13626 del myroot, mysettings
13628 ldpath_mtimes = mtimedb["ldpath"]
13631 buildpkgonly = "--buildpkgonly" in myopts
13632 pretend = "--pretend" in myopts
13633 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13634 ask = "--ask" in myopts
13635 nodeps = "--nodeps" in myopts
13636 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13637 tree = "--tree" in myopts
13638 if nodeps and tree:
13640 del myopts["--tree"]
13641 portage.writemsg(colorize("WARN", " * ") + \
13642 "--tree is broken with --nodeps. Disabling...\n")
13643 debug = "--debug" in myopts
13644 verbose = "--verbose" in myopts
13645 quiet = "--quiet" in myopts
13646 if pretend or fetchonly:
13647 # make the mtimedb readonly
13648 mtimedb.filename = None
13649 if "--digest" in myopts:
13650 msg = "The --digest option can prevent corruption from being" + \
13651 " noticed. The `repoman manifest` command is the preferred" + \
13652 " way to generate manifests and it is capable of doing an" + \
13653 " entire repository or category at once."
13654 prefix = bad(" * ")
13655 writemsg(prefix + "\n")
13656 from textwrap import wrap
13657 for line in wrap(msg, 72):
13658 writemsg("%s%s\n" % (prefix, line))
13659 writemsg(prefix + "\n")
13661 if "--quiet" not in myopts and \
13662 ("--pretend" in myopts or "--ask" in myopts or \
13663 "--tree" in myopts or "--verbose" in myopts):
13665 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13667 elif "--buildpkgonly" in myopts:
13671 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13673 print darkgreen("These are the packages that would be %s, in reverse order:") % action
13677 print darkgreen("These are the packages that would be %s, in order:") % action
13680 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13681 if not show_spinner:
13682 spinner.update = spinner.update_quiet
13685 favorites = mtimedb["resume"].get("favorites")
13686 if not isinstance(favorites, list):
13690 print "Calculating dependencies ",
13691 myparams = create_depgraph_params(myopts, myaction)
13693 resume_data = mtimedb["resume"]
13694 mergelist = resume_data["mergelist"]
13695 if mergelist and "--skipfirst" in myopts:
13696 for i, task in enumerate(mergelist):
13697 if isinstance(task, list) and \
13698 task and task[-1] == "merge":
13702 skip_masked = "--skipfirst" in myopts
13703 skip_unsatisfied = "--skipfirst" in myopts
13707 success, mydepgraph, dropped_tasks = resume_depgraph(
13708 settings, trees, mtimedb, myopts, myparams, spinner,
13709 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13710 except (portage.exception.PackageNotFound,
13711 depgraph.UnsatisfiedResumeDep), e:
13712 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13713 mydepgraph = e.depgraph
13716 from textwrap import wrap
13717 from portage.output import EOutput
13720 resume_data = mtimedb["resume"]
13721 mergelist = resume_data.get("mergelist")
13722 if not isinstance(mergelist, list):
13724 if mergelist and debug or (verbose and not quiet):
13725 out.eerror("Invalid resume list:")
13728 for task in mergelist:
13729 if isinstance(task, list):
13730 out.eerror(indent + str(tuple(task)))
13733 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13734 out.eerror("One or more packages are either masked or " + \
13735 "have missing dependencies:")
13738 for dep in e.value:
13739 if dep.atom is None:
13740 out.eerror(indent + "Masked package:")
13741 out.eerror(2 * indent + str(dep.parent))
13744 out.eerror(indent + str(dep.atom) + " pulled in by:")
13745 out.eerror(2 * indent + str(dep.parent))
13747 msg = "The resume list contains packages " + \
13748 "that are either masked or have " + \
13749 "unsatisfied dependencies. " + \
13750 "Please restart/continue " + \
13751 "the operation manually, or use --skipfirst " + \
13752 "to skip the first package in the list and " + \
13753 "any other packages that may be " + \
13754 "masked or have missing dependencies."
13755 for line in wrap(msg, 72):
13757 elif isinstance(e, portage.exception.PackageNotFound):
13758 out.eerror("An expected package is " + \
13759 "not available: %s" % str(e))
13761 msg = "The resume list contains one or more " + \
13762 "packages that are no longer " + \
13763 "available. Please restart/continue " + \
13764 "the operation manually."
13765 for line in wrap(msg, 72):
13769 print "\b\b... done!"
13773 portage.writemsg("!!! One or more packages have been " + \
13774 "dropped due to\n" + \
13775 "!!! masking or unsatisfied dependencies:\n\n",
13777 for task in dropped_tasks:
13778 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
13779 portage.writemsg("\n", noiselevel=-1)
13782 if mydepgraph is not None:
13783 mydepgraph.display_problems()
13784 if not (ask or pretend):
13785 # delete the current list and also the backup
13786 # since it's probably stale too.
13787 for k in ("resume", "resume_backup"):
13788 mtimedb.pop(k, None)
13793 if ("--resume" in myopts):
13794 print darkgreen("emerge: It seems we have nothing to resume...")
13797 myparams = create_depgraph_params(myopts, myaction)
13798 if "--quiet" not in myopts and "--nodeps" not in myopts:
13799 print "Calculating dependencies ",
13801 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13803 retval, favorites = mydepgraph.select_files(myfiles)
13804 except portage.exception.PackageNotFound, e:
13805 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13807 except portage.exception.PackageSetNotFound, e:
13808 root_config = trees[settings["ROOT"]]["root_config"]
13809 display_missing_pkg_set(root_config, e.value)
13812 print "\b\b... done!"
13814 mydepgraph.display_problems()
13817 if "--pretend" not in myopts and \
13818 ("--ask" in myopts or "--tree" in myopts or \
13819 "--verbose" in myopts) and \
13820 not ("--quiet" in myopts and "--ask" not in myopts):
13821 if "--resume" in myopts:
13822 mymergelist = mydepgraph.altlist()
13823 if len(mymergelist) == 0:
13824 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13826 favorites = mtimedb["resume"]["favorites"]
13827 retval = mydepgraph.display(
13828 mydepgraph.altlist(reversed=tree),
13829 favorites=favorites)
13830 mydepgraph.display_problems()
13831 if retval != os.EX_OK:
13833 prompt="Would you like to resume merging these packages?"
13835 retval = mydepgraph.display(
13836 mydepgraph.altlist(reversed=("--tree" in myopts)),
13837 favorites=favorites)
13838 mydepgraph.display_problems()
13839 if retval != os.EX_OK:
13842 for x in mydepgraph.altlist():
13843 if isinstance(x, Package) and x.operation == "merge":
13847 sets = trees[settings["ROOT"]]["root_config"].sets
13848 world_candidates = None
13849 if "--noreplace" in myopts and \
13850 not oneshot and favorites:
13851 # Sets that are not world candidates are filtered
13852 # out here since the favorites list needs to be
13853 # complete for depgraph.loadResumeCommand() to
13854 # operate correctly.
13855 world_candidates = [x for x in favorites \
13856 if not (x.startswith(SETPREFIX) and \
13857 not sets[x[1:]].world_candidate)]
13858 if "--noreplace" in myopts and \
13859 not oneshot and world_candidates:
13861 for x in world_candidates:
13862 print " %s %s" % (good("*"), x)
13863 prompt="Would you like to add these packages to your world favorites?"
13864 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13865 prompt="Nothing to merge; would you like to auto-clean packages?"
13868 print "Nothing to merge; quitting."
13871 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13872 prompt="Would you like to fetch the source files for these packages?"
13874 prompt="Would you like to merge these packages?"
13876 if "--ask" in myopts and userquery(prompt) == "No":
13881 # Don't ask again (e.g. when auto-cleaning packages after merge)
13882 myopts.pop("--ask", None)
13884 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13885 if ("--resume" in myopts):
13886 mymergelist = mydepgraph.altlist()
13887 if len(mymergelist) == 0:
13888 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13890 favorites = mtimedb["resume"]["favorites"]
13891 retval = mydepgraph.display(
13892 mydepgraph.altlist(reversed=tree),
13893 favorites=favorites)
13894 mydepgraph.display_problems()
13895 if retval != os.EX_OK:
13898 retval = mydepgraph.display(
13899 mydepgraph.altlist(reversed=("--tree" in myopts)),
13900 favorites=favorites)
13901 mydepgraph.display_problems()
13902 if retval != os.EX_OK:
13904 if "--buildpkgonly" in myopts:
13905 graph_copy = mydepgraph.digraph.clone()
13906 for node in list(graph_copy.order):
13907 if not isinstance(node, Package):
13908 graph_copy.remove(node)
13909 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13910 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13911 print "!!! You have to merge the dependencies before you can build this package.\n"
13914 if "--buildpkgonly" in myopts:
13915 graph_copy = mydepgraph.digraph.clone()
13916 for node in list(graph_copy.order):
13917 if not isinstance(node, Package):
13918 graph_copy.remove(node)
13919 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13920 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13921 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13924 if ("--resume" in myopts):
13925 favorites=mtimedb["resume"]["favorites"]
13926 mymergelist = mydepgraph.altlist()
13927 mydepgraph.break_refs(mymergelist)
13928 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13929 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13930 del mydepgraph, mymergelist
13931 clear_caches(trees)
13933 retval = mergetask.merge()
13934 merge_count = mergetask.curval
13936 if "resume" in mtimedb and \
13937 "mergelist" in mtimedb["resume"] and \
13938 len(mtimedb["resume"]["mergelist"]) > 1:
13939 mtimedb["resume_backup"] = mtimedb["resume"]
13940 del mtimedb["resume"]
13942 mtimedb["resume"]={}
13943 # Stored as a dict starting with portage-2.1.6_rc1, and supported
13944 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13945 # a list type for options.
13946 mtimedb["resume"]["myopts"] = myopts.copy()
13948 # Convert Atom instances to plain str since the mtimedb loader
13949 # sets unpickler.find_global = None which causes unpickler.load()
13950 # to raise the following exception:
13952 # cPickle.UnpicklingError: Global and instance pickles are not supported.
13954 # TODO: Maybe stop setting find_global = None, or find some other
13955 # way to avoid accidental triggering of the above UnpicklingError.
13956 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13958 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13959 for pkgline in mydepgraph.altlist():
13960 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13961 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13962 tmpsettings = portage.config(clone=settings)
13964 if settings.get("PORTAGE_DEBUG", "") == "1":
13966 retval = portage.doebuild(
13967 y, "digest", settings["ROOT"], tmpsettings, edebug,
13968 ("--pretend" in myopts),
13969 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13972 pkglist = mydepgraph.altlist()
13973 mydepgraph.saveNomergeFavorites()
13974 mydepgraph.break_refs(pkglist)
13975 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13976 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13977 del mydepgraph, pkglist
13978 clear_caches(trees)
13980 retval = mergetask.merge()
13981 merge_count = mergetask.curval
13983 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13984 if "yes" == settings.get("AUTOCLEAN"):
13985 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
13986 unmerge(trees[settings["ROOT"]]["root_config"],
13987 myopts, "clean", [],
13988 ldpath_mtimes, autoclean=1)
13990 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13991 + " AUTOCLEAN is disabled. This can cause serious"
13992 + " problems due to overlapping packages.\n")
13993 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
13997 def multiple_actions(action1, action2):
13998 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13999 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14002 def insert_optional_args(args):
14004 Parse optional arguments and insert a value if one has
14005 not been provided. This is done before feeding the args
14006 to the optparse parser since that parser does not support
14007 this feature natively.
14011 jobs_opts = ("-j", "--jobs")
14012 arg_stack = args[:]
14013 arg_stack.reverse()
14015 arg = arg_stack.pop()
14017 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14018 if not (short_job_opt or arg in jobs_opts):
14019 new_args.append(arg)
14022 # Insert an empty placeholder in order to
14023 # satisfy the requirements of optparse.
14025 new_args.append("--jobs")
14028 if short_job_opt and len(arg) > 2:
14029 if arg[:2] == "-j":
14031 job_count = int(arg[2:])
14033 saved_opts = arg[2:]
14036 saved_opts = arg[1:].replace("j", "")
14038 if job_count is None and arg_stack:
14040 job_count = int(arg_stack[-1])
14044 # Discard the job count from the stack
14045 # since we're consuming it here.
14048 if job_count is None:
14049 # unlimited number of jobs
14050 new_args.append("True")
14052 new_args.append(str(job_count))
14054 if saved_opts is not None:
14055 new_args.append("-" + saved_opts)
14059 def parse_opts(tmpcmdline, silent=False):
14064 global actions, options, shortmapping
14066 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14067 argument_options = {
14069 "help":"specify the location for portage configuration files",
14073 "help":"enable or disable color output",
14075 "choices":("y", "n")
14080 "help" : "Specifies the number of packages to build " + \
14086 "--load-average": {
14088 "help" :"Specifies that no new builds should be started " + \
14089 "if there are other builds running and the load average " + \
14090 "is at least LOAD (a floating-point number).",
14096 "help":"include unnecessary build time dependencies",
14098 "choices":("y", "n")
14101 "help":"specify conditions to trigger package reinstallation",
14103 "choices":["changed-use"]
14107 from optparse import OptionParser
14108 parser = OptionParser()
14109 if parser.has_option("--help"):
14110 parser.remove_option("--help")
14112 for action_opt in actions:
14113 parser.add_option("--" + action_opt, action="store_true",
14114 dest=action_opt.replace("-", "_"), default=False)
14115 for myopt in options:
14116 parser.add_option(myopt, action="store_true",
14117 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14118 for shortopt, longopt in shortmapping.iteritems():
14119 parser.add_option("-" + shortopt, action="store_true",
14120 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14121 for myalias, myopt in longopt_aliases.iteritems():
14122 parser.add_option(myalias, action="store_true",
14123 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14125 for myopt, kwargs in argument_options.iteritems():
14126 parser.add_option(myopt,
14127 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14129 tmpcmdline = insert_optional_args(tmpcmdline)
14131 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14135 if myoptions.jobs == "True":
14139 jobs = int(myoptions.jobs)
14143 if jobs is not True and \
14147 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14148 (myoptions.jobs,), noiselevel=-1)
14150 myoptions.jobs = jobs
14152 if myoptions.load_average:
14154 load_average = float(myoptions.load_average)
14158 if load_average <= 0.0:
14159 load_average = None
14161 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14162 (myoptions.load_average,), noiselevel=-1)
14164 myoptions.load_average = load_average
14166 for myopt in options:
14167 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14169 myopts[myopt] = True
14171 for myopt in argument_options:
14172 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14176 for action_opt in actions:
14177 v = getattr(myoptions, action_opt.replace("-", "_"))
14180 multiple_actions(myaction, action_opt)
14182 myaction = action_opt
14186 return myaction, myopts, myfiles
14188 def validate_ebuild_environment(trees):
14189 for myroot in trees:
14190 settings = trees[myroot]["vartree"].settings
14191 settings.validate()
14193 def clear_caches(trees):
14194 for d in trees.itervalues():
14195 d["porttree"].dbapi.melt()
14196 d["porttree"].dbapi._aux_cache.clear()
14197 d["bintree"].dbapi._aux_cache.clear()
14198 d["bintree"].dbapi._clear_cache()
14199 d["vartree"].dbapi.linkmap._clear_cache()
14200 portage.dircache.clear()
14203 def load_emerge_config(trees=None):
14205 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14206 v = os.environ.get(envvar, None)
14207 if v and v.strip():
14209 trees = portage.create_trees(trees=trees, **kwargs)
14211 for root, root_trees in trees.iteritems():
14212 settings = root_trees["vartree"].settings
14213 setconfig = load_default_config(settings, root_trees)
14214 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14216 settings = trees["/"]["vartree"].settings
14218 for myroot in trees:
14220 settings = trees[myroot]["vartree"].settings
14223 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14224 mtimedb = portage.MtimeDB(mtimedbfile)
14226 return settings, trees, mtimedb
14228 def adjust_config(myopts, settings):
14229 """Make emerge specific adjustments to the config."""
14231 # To enhance usability, make some vars case insensitive by forcing them to
14233 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14234 if myvar in settings:
14235 settings[myvar] = settings[myvar].lower()
14236 settings.backup_changes(myvar)
14239 # Kill noauto as it will break merges otherwise.
14240 if "noauto" in settings.features:
14241 while "noauto" in settings.features:
14242 settings.features.remove("noauto")
14243 settings["FEATURES"] = " ".join(settings.features)
14244 settings.backup_changes("FEATURES")
14248 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14249 except ValueError, e:
14250 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14251 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14252 settings["CLEAN_DELAY"], noiselevel=-1)
14253 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14254 settings.backup_changes("CLEAN_DELAY")
14256 EMERGE_WARNING_DELAY = 10
14258 EMERGE_WARNING_DELAY = int(settings.get(
14259 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14260 except ValueError, e:
14261 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14262 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14263 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14264 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14265 settings.backup_changes("EMERGE_WARNING_DELAY")
14267 if "--quiet" in myopts:
14268 settings["PORTAGE_QUIET"]="1"
14269 settings.backup_changes("PORTAGE_QUIET")
14271 if "--verbose" in myopts:
14272 settings["PORTAGE_VERBOSE"] = "1"
14273 settings.backup_changes("PORTAGE_VERBOSE")
14275 # Set so that configs will be merged regardless of remembered status
14276 if ("--noconfmem" in myopts):
14277 settings["NOCONFMEM"]="1"
14278 settings.backup_changes("NOCONFMEM")
14280 # Set various debug markers... They should be merged somehow.
14283 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14284 if PORTAGE_DEBUG not in (0, 1):
14285 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14286 PORTAGE_DEBUG, noiselevel=-1)
14287 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14290 except ValueError, e:
14291 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14292 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14293 settings["PORTAGE_DEBUG"], noiselevel=-1)
14295 if "--debug" in myopts:
14297 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14298 settings.backup_changes("PORTAGE_DEBUG")
14300 if settings.get("NOCOLOR") not in ("yes","true"):
14301 portage.output.havecolor = 1
14303 """The explicit --color < y | n > option overrides the NOCOLOR environment
14304 variable and stdout auto-detection."""
14305 if "--color" in myopts:
14306 if "y" == myopts["--color"]:
14307 portage.output.havecolor = 1
14308 settings["NOCOLOR"] = "false"
14310 portage.output.havecolor = 0
14311 settings["NOCOLOR"] = "true"
14312 settings.backup_changes("NOCOLOR")
14313 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14314 portage.output.havecolor = 0
14315 settings["NOCOLOR"] = "true"
14316 settings.backup_changes("NOCOLOR")
14318 def apply_priorities(settings):
14322 def nice(settings):
14324 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14325 except (OSError, ValueError), e:
14326 out = portage.output.EOutput()
14327 out.eerror("Failed to change nice value to '%s'" % \
14328 settings["PORTAGE_NICENESS"])
14329 out.eerror("%s\n" % str(e))
14331 def ionice(settings):
14333 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14335 ionice_cmd = shlex.split(ionice_cmd)
14339 from portage.util import varexpand
14340 variables = {"PID" : str(os.getpid())}
14341 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14344 rval = portage.process.spawn(cmd, env=os.environ)
14345 except portage.exception.CommandNotFound:
14346 # The OS kernel probably doesn't support ionice,
14347 # so return silently.
14350 if rval != os.EX_OK:
14351 out = portage.output.EOutput()
14352 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14353 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14355 def display_missing_pkg_set(root_config, set_name):
14358 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14359 "The following sets exist:") % \
14360 colorize("INFORM", set_name))
14363 for s in sorted(root_config.sets):
14364 msg.append(" %s" % s)
14367 writemsg_level("".join("%s\n" % l for l in msg),
14368 level=logging.ERROR, noiselevel=-1)
14370 def expand_set_arguments(myfiles, myaction, root_config):
14372 setconfig = root_config.setconfig
14374 sets = setconfig.getSets()
14376 # In order to know exactly which atoms/sets should be added to the
14377 # world file, the depgraph performs set expansion later. It will get
14378 # confused about where the atoms came from if it's not allowed to
14379 # expand them itself.
14380 do_not_expand = (None, )
14383 if a in ("system", "world"):
14384 newargs.append(SETPREFIX+a)
14391 # separators for set arguments
14395 # WARNING: all operators must be of equal length
14397 DIFF_OPERATOR = "-@"
14398 UNION_OPERATOR = "+@"
14400 for i in range(0, len(myfiles)):
14401 if myfiles[i].startswith(SETPREFIX):
14404 x = myfiles[i][len(SETPREFIX):]
14407 start = x.find(ARG_START)
14408 end = x.find(ARG_END)
14409 if start > 0 and start < end:
14410 namepart = x[:start]
14411 argpart = x[start+1:end]
14413 # TODO: implement proper quoting
14414 args = argpart.split(",")
14418 k, v = a.split("=", 1)
14421 options[a] = "True"
14422 setconfig.update(namepart, options)
14423 newset += (x[:start-len(namepart)]+namepart)
14424 x = x[end+len(ARG_END):]
14428 myfiles[i] = SETPREFIX+newset
14430 sets = setconfig.getSets()
14432 # display errors that occured while loading the SetConfig instance
14433 for e in setconfig.errors:
14434 print colorize("BAD", "Error during set creation: %s" % e)
14436 # emerge relies on the existance of sets with names "world" and "system"
14437 required_sets = ("world", "system")
14440 for s in required_sets:
14442 missing_sets.append(s)
14444 if len(missing_sets) > 2:
14445 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
14446 missing_sets_str += ', and "%s"' % missing_sets[-1]
14447 elif len(missing_sets) == 2:
14448 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
14450 missing_sets_str = '"%s"' % missing_sets[-1]
14451 msg = ["emerge: incomplete set configuration, " + \
14452 "missing set(s): %s" % missing_sets_str]
14454 msg.append(" sets defined: %s" % ", ".join(sets))
14455 msg.append(" This usually means that '%s'" % \
14456 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
14457 msg.append(" is missing or corrupt.")
14459 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
14461 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14464 if a.startswith(SETPREFIX):
14465 # support simple set operations (intersection, difference and union)
14466 # on the commandline. Expressions are evaluated strictly left-to-right
14467 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14468 expression = a[len(SETPREFIX):]
14471 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14472 is_pos = expression.rfind(IS_OPERATOR)
14473 diff_pos = expression.rfind(DIFF_OPERATOR)
14474 union_pos = expression.rfind(UNION_OPERATOR)
14475 op_pos = max(is_pos, diff_pos, union_pos)
14476 s1 = expression[:op_pos]
14477 s2 = expression[op_pos+len(IS_OPERATOR):]
14478 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14480 display_missing_pkg_set(root_config, s2)
14482 expr_sets.insert(0, s2)
14483 expr_ops.insert(0, op)
14485 if not expression in sets:
14486 display_missing_pkg_set(root_config, expression)
14488 expr_sets.insert(0, expression)
14489 result = set(setconfig.getSetAtoms(expression))
14490 for i in range(0, len(expr_ops)):
14491 s2 = setconfig.getSetAtoms(expr_sets[i+1])
14492 if expr_ops[i] == IS_OPERATOR:
14493 result.intersection_update(s2)
14494 elif expr_ops[i] == DIFF_OPERATOR:
14495 result.difference_update(s2)
14496 elif expr_ops[i] == UNION_OPERATOR:
14499 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14500 newargs.extend(result)
14502 s = a[len(SETPREFIX):]
14504 display_missing_pkg_set(root_config, s)
14506 setconfig.active.append(s)
14508 set_atoms = setconfig.getSetAtoms(s)
14509 except portage.exception.PackageSetNotFound, e:
14510 writemsg_level(("emerge: the given set '%s' " + \
14511 "contains a non-existent set named '%s'.\n") % \
14512 (s, e), level=logging.ERROR, noiselevel=-1)
14514 if myaction in unmerge_actions and \
14515 not sets[s].supportsOperation("unmerge"):
14516 sys.stderr.write("emerge: the given set '%s' does " % s + \
14517 "not support unmerge operations\n")
14519 elif not set_atoms:
14520 print "emerge: '%s' is an empty set" % s
14521 elif myaction not in do_not_expand:
14522 newargs.extend(set_atoms)
14524 newargs.append(SETPREFIX+s)
14525 for e in sets[s].errors:
14529 return (newargs, retval)
14531 def repo_name_check(trees):
14532 missing_repo_names = set()
14533 for root, root_trees in trees.iteritems():
14534 if "porttree" in root_trees:
14535 portdb = root_trees["porttree"].dbapi
14536 missing_repo_names.update(portdb.porttrees)
14537 repos = portdb.getRepositories()
14539 missing_repo_names.discard(portdb.getRepositoryPath(r))
14540 if portdb.porttree_root in missing_repo_names and \
14541 not os.path.exists(os.path.join(
14542 portdb.porttree_root, "profiles")):
14543 # This is normal if $PORTDIR happens to be empty,
14544 # so don't warn about it.
14545 missing_repo_names.remove(portdb.porttree_root)
14547 if missing_repo_names:
14549 msg.append("WARNING: One or more repositories " + \
14550 "have missing repo_name entries:")
14552 for p in missing_repo_names:
14553 msg.append("\t%s/profiles/repo_name" % (p,))
14555 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14556 "should be a plain text file containing a unique " + \
14557 "name for the repository on the first line.", 70))
14558 writemsg_level("".join("%s\n" % l for l in msg),
14559 level=logging.WARNING, noiselevel=-1)
14561 return bool(missing_repo_names)
14563 def config_protect_check(trees):
14564 for root, root_trees in trees.iteritems():
14565 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14566 msg = "!!! CONFIG_PROTECT is empty"
14568 msg += " for '%s'" % root
14569 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14571 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14573 if "--quiet" in myopts:
14574 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14575 print "!!! one of the following fully-qualified ebuild names instead:\n"
14576 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14577 print " " + colorize("INFORM", cp)
14580 s = search(root_config, spinner, "--searchdesc" in myopts,
14581 "--quiet" not in myopts, "--usepkg" in myopts,
14582 "--usepkgonly" in myopts)
14583 null_cp = portage.dep_getkey(insert_category_into_atom(
14585 cat, atom_pn = portage.catsplit(null_cp)
14586 s.searchkey = atom_pn
14587 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14590 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14591 print "!!! one of the above fully-qualified ebuild names instead.\n"
14593 def profile_check(trees, myaction, myopts):
14594 if myaction in ("info", "sync"):
14596 elif "--version" in myopts or "--help" in myopts:
14598 for root, root_trees in trees.iteritems():
14599 if root_trees["root_config"].settings.profiles:
14601 # generate some profile related warning messages
14602 validate_ebuild_environment(trees)
14603 msg = "If you have just changed your profile configuration, you " + \
14604 "should revert back to the previous configuration. Due to " + \
14605 "your current profile being invalid, allowed actions are " + \
14606 "limited to --help, --info, --sync, and --version."
14607 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14608 level=logging.ERROR, noiselevel=-1)
14613 global portage # NFC why this is necessary now - genone
14614 portage._disable_legacy_globals()
14615 # Disable color until we're sure that it should be enabled (after
14616 # EMERGE_DEFAULT_OPTS has been parsed).
14617 portage.output.havecolor = 0
14618 # This first pass is just for options that need to be known as early as
14619 # possible, such as --config-root. They will be parsed again later,
14620 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14621 # the value of --config-root).
14622 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14623 if "--debug" in myopts:
14624 os.environ["PORTAGE_DEBUG"] = "1"
14625 if "--config-root" in myopts:
14626 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14628 # Portage needs to ensure a sane umask for the files it creates.
14630 settings, trees, mtimedb = load_emerge_config()
14631 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14632 rval = profile_check(trees, myaction, myopts)
14633 if rval != os.EX_OK:
14636 if portage._global_updates(trees, mtimedb["updates"]):
14638 # Reload the whole config from scratch.
14639 settings, trees, mtimedb = load_emerge_config(trees=trees)
14640 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14642 xterm_titles = "notitles" not in settings.features
14645 if "--ignore-default-opts" not in myopts:
14646 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14647 tmpcmdline.extend(sys.argv[1:])
14648 myaction, myopts, myfiles = parse_opts(tmpcmdline)
14650 if "--digest" in myopts:
14651 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14652 # Reload the whole config from scratch so that the portdbapi internal
14653 # config is updated with new FEATURES.
14654 settings, trees, mtimedb = load_emerge_config(trees=trees)
14655 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14657 for myroot in trees:
14658 mysettings = trees[myroot]["vartree"].settings
14659 mysettings.unlock()
14660 adjust_config(myopts, mysettings)
14661 mysettings["PORTAGE_COUNTER_HASH"] = \
14662 trees[myroot]["vartree"].dbapi._counter_hash()
14663 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14665 del myroot, mysettings
14667 apply_priorities(settings)
14669 spinner = stdout_spinner()
14670 if "candy" in settings.features:
14671 spinner.update = spinner.update_scroll
14673 if "--quiet" not in myopts:
14674 portage.deprecated_profile_check(settings=settings)
14675 repo_name_check(trees)
14676 config_protect_check(trees)
14678 eclasses_overridden = {}
14679 for mytrees in trees.itervalues():
14680 mydb = mytrees["porttree"].dbapi
14681 # Freeze the portdbapi for performance (memoize all xmatch results).
14683 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14686 if eclasses_overridden and \
14687 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14688 prefix = bad(" * ")
14689 if len(eclasses_overridden) == 1:
14690 writemsg(prefix + "Overlay eclass overrides " + \
14691 "eclass from PORTDIR:\n", noiselevel=-1)
14693 writemsg(prefix + "Overlay eclasses override " + \
14694 "eclasses from PORTDIR:\n", noiselevel=-1)
14695 writemsg(prefix + "\n", noiselevel=-1)
14696 for eclass_name in sorted(eclasses_overridden):
14697 writemsg(prefix + " '%s/%s.eclass'\n" % \
14698 (eclasses_overridden[eclass_name], eclass_name),
14700 writemsg(prefix + "\n", noiselevel=-1)
14701 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14702 "because it will trigger invalidation of cached ebuild metadata " + \
14703 "that is distributed with the portage tree. If you must " + \
14704 "override eclasses from PORTDIR then you are advised to add " + \
14705 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14706 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14707 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14708 "you would like to disable this warning."
14709 from textwrap import wrap
14710 for line in wrap(msg, 72):
14711 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14713 if "moo" in myfiles:
14716 Larry loves Gentoo (""" + platform.system() + """)
14718 _______________________
14719 < Have you mooed today? >
14720 -----------------------
14730 ext = os.path.splitext(x)[1]
14731 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14732 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14735 root_config = trees[settings["ROOT"]]["root_config"]
14736 if myaction == "list-sets":
14737 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14741 # only expand sets for actions taking package arguments
14742 oldargs = myfiles[:]
14743 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14744 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14745 if retval != os.EX_OK:
14748 # Need to handle empty sets specially, otherwise emerge will react
14749 # with the help message for empty argument lists
14750 if oldargs and not myfiles:
14751 print "emerge: no targets left after set expansion"
14754 if ("--tree" in myopts) and ("--columns" in myopts):
14755 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14758 if ("--quiet" in myopts):
14759 spinner.update = spinner.update_quiet
14760 portage.util.noiselimit = -1
14762 # Always create packages if FEATURES=buildpkg
14763 # Imply --buildpkg if --buildpkgonly
14764 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14765 if "--buildpkg" not in myopts:
14766 myopts["--buildpkg"] = True
14768 # Also allow -S to invoke search action (-sS)
14769 if ("--searchdesc" in myopts):
14770 if myaction and myaction != "search":
14771 myfiles.append(myaction)
14772 if "--search" not in myopts:
14773 myopts["--search"] = True
14774 myaction = "search"
14776 # Always try and fetch binary packages if FEATURES=getbinpkg
14777 if ("getbinpkg" in settings.features):
14778 myopts["--getbinpkg"] = True
14780 if "--buildpkgonly" in myopts:
14781 # --buildpkgonly will not merge anything, so
14782 # it cancels all binary package options.
14783 for opt in ("--getbinpkg", "--getbinpkgonly",
14784 "--usepkg", "--usepkgonly"):
14785 myopts.pop(opt, None)
14787 if "--fetch-all-uri" in myopts:
14788 myopts["--fetchonly"] = True
14790 if "--skipfirst" in myopts and "--resume" not in myopts:
14791 myopts["--resume"] = True
14793 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14794 myopts["--usepkgonly"] = True
14796 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14797 myopts["--getbinpkg"] = True
14799 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14800 myopts["--usepkg"] = True
14802 # Also allow -K to apply --usepkg/-k
14803 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14804 myopts["--usepkg"] = True
14806 # Allow -p to remove --ask
14807 if ("--pretend" in myopts) and ("--ask" in myopts):
14808 print ">>> --pretend disables --ask... removing --ask from options."
14809 del myopts["--ask"]
14811 # forbid --ask when not in a terminal
14812 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14813 if ("--ask" in myopts) and (not sys.stdin.isatty()):
14814 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14818 if settings.get("PORTAGE_DEBUG", "") == "1":
14819 spinner.update = spinner.update_quiet
14821 if "python-trace" in settings.features:
14822 import portage.debug
14823 portage.debug.set_trace(True)
14825 if not ("--quiet" in myopts):
14826 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14827 spinner.update = spinner.update_basic
14829 if "--version" in myopts:
14830 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14831 settings.profile_path, settings["CHOST"],
14832 trees[settings["ROOT"]]["vartree"].dbapi)
14834 elif "--help" in myopts:
14835 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14838 if "--debug" in myopts:
14839 print "myaction", myaction
14840 print "myopts", myopts
14842 if not myaction and not myfiles and "--resume" not in myopts:
14843 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14846 pretend = "--pretend" in myopts
14847 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14848 buildpkgonly = "--buildpkgonly" in myopts
14850 # check if root user is the current user for the actions where emerge needs this
14851 if portage.secpass < 2:
14852 # We've already allowed "--version" and "--help" above.
14853 if "--pretend" not in myopts and myaction not in ("search","info"):
14854 need_superuser = not \
14856 (buildpkgonly and secpass >= 1) or \
14857 myaction in ("metadata", "regen") or \
14858 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
14859 if portage.secpass < 1 or \
14862 access_desc = "superuser"
14864 access_desc = "portage group"
14865 # Always show portage_group_warning() when only portage group
14866 # access is required but the user is not in the portage group.
14867 from portage.data import portage_group_warning
14868 if "--ask" in myopts:
14869 myopts["--pretend"] = True
14870 del myopts["--ask"]
14871 print ("%s access is required... " + \
14872 "adding --pretend to options.\n") % access_desc
14873 if portage.secpass < 1 and not need_superuser:
14874 portage_group_warning()
14876 sys.stderr.write(("emerge: %s access is " + \
14877 "required.\n\n") % access_desc)
14878 if portage.secpass < 1 and not need_superuser:
14879 portage_group_warning()
14882 disable_emergelog = False
14883 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
14885 disable_emergelog = True
14887 if myaction in ("search", "info"):
14888 disable_emergelog = True
14889 if disable_emergelog:
14890 """ Disable emergelog for everything except build or unmerge
14891 operations. This helps minimize parallel emerge.log entries that can
14892 confuse log parsers. We especially want it disabled during
14893 parallel-fetch, which uses --resume --fetchonly."""
14895 def emergelog(*pargs, **kargs):
14898 if not "--pretend" in myopts:
14899 emergelog(xterm_titles, "Started emerge on: "+\
14900 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
14903 myelogstr=" ".join(myopts)
14905 myelogstr+=" "+myaction
14907 myelogstr += " " + " ".join(oldargs)
14908 emergelog(xterm_titles, " *** emerge " + myelogstr)
14911 def emergeexitsig(signum, frame):
14912 signal.signal(signal.SIGINT, signal.SIG_IGN)
14913 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14914 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
14915 sys.exit(100+signum)
14916 signal.signal(signal.SIGINT, emergeexitsig)
14917 signal.signal(signal.SIGTERM, emergeexitsig)
14920 """This gets out final log message in before we quit."""
14921 if "--pretend" not in myopts:
14922 emergelog(xterm_titles, " *** terminating.")
14923 if "notitles" not in settings.features:
14925 portage.atexit_register(emergeexit)
14927 if myaction in ("config", "metadata", "regen", "sync"):
14928 if "--pretend" in myopts:
14929 sys.stderr.write(("emerge: The '%s' action does " + \
14930 "not support '--pretend'.\n") % myaction)
14933 if "sync" == myaction:
14934 return action_sync(settings, trees, mtimedb, myopts, myaction)
14935 elif "metadata" == myaction:
14936 action_metadata(settings, portdb, myopts)
14937 elif myaction=="regen":
14938 validate_ebuild_environment(trees)
14939 action_regen(settings, portdb, myopts.get("--jobs"),
14940 myopts.get("--load-average"))
14942 elif "config"==myaction:
14943 validate_ebuild_environment(trees)
14944 action_config(settings, trees, myopts, myfiles)
14947 elif "search"==myaction:
14948 validate_ebuild_environment(trees)
14949 action_search(trees[settings["ROOT"]]["root_config"],
14950 myopts, myfiles, spinner)
14951 elif myaction in ("clean", "unmerge") or \
14952 (myaction == "prune" and "--nodeps" in myopts):
14953 validate_ebuild_environment(trees)
14955 # Ensure atoms are valid before calling unmerge().
14956 # For backward compat, leading '=' is not required.
14958 if is_valid_package_atom(x) or \
14959 is_valid_package_atom("=" + x):
14962 msg.append("'%s' is not a valid package atom." % (x,))
14963 msg.append("Please check ebuild(5) for full details.")
14964 writemsg_level("".join("!!! %s\n" % line for line in msg),
14965 level=logging.ERROR, noiselevel=-1)
14968 # When given a list of atoms, unmerge
14969 # them in the order given.
14970 ordered = myaction == "unmerge"
14971 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14972 mtimedb["ldpath"], ordered=ordered):
14973 if not (buildpkgonly or fetchonly or pretend):
14974 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14976 elif myaction in ("depclean", "info", "prune"):
14978 # Ensure atoms are valid before calling unmerge().
14979 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14982 if is_valid_package_atom(x):
14984 valid_atoms.append(
14985 portage.dep_expand(x, mydb=vardb, settings=settings))
14986 except portage.exception.AmbiguousPackageName, e:
14987 msg = "The short ebuild name \"" + x + \
14988 "\" is ambiguous. Please specify " + \
14989 "one of the following " + \
14990 "fully-qualified ebuild names instead:"
14991 for line in textwrap.wrap(msg, 70):
14992 writemsg_level("!!! %s\n" % (line,),
14993 level=logging.ERROR, noiselevel=-1)
14995 writemsg_level(" %s\n" % colorize("INFORM", i),
14996 level=logging.ERROR, noiselevel=-1)
14997 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15001 msg.append("'%s' is not a valid package atom." % (x,))
15002 msg.append("Please check ebuild(5) for full details.")
15003 writemsg_level("".join("!!! %s\n" % line for line in msg),
15004 level=logging.ERROR, noiselevel=-1)
15007 if myaction == "info":
15008 return action_info(settings, trees, myopts, valid_atoms)
15010 validate_ebuild_environment(trees)
15011 action_depclean(settings, trees, mtimedb["ldpath"],
15012 myopts, myaction, valid_atoms, spinner)
15013 if not (buildpkgonly or fetchonly or pretend):
15014 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15015 # "update", "system", or just process files:
15017 validate_ebuild_environment(trees)
15018 if "--pretend" not in myopts:
15019 display_news_notification(root_config, myopts)
15020 retval = action_build(settings, trees, mtimedb,
15021 myopts, myaction, myfiles, spinner)
15022 root_config = trees[settings["ROOT"]]["root_config"]
15023 post_emerge(root_config, myopts, mtimedb, retval)