2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time, types
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
59 from UserDict import DictMixin
62 import cPickle as pickle
67 import cStringIO as StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--searchdesc", "--selective",
214 "--usepkg", "--usepkgonly",
215 "--verbose", "--version"
221 "b":"--buildpkg", "B":"--buildpkgonly",
222 "c":"--clean", "C":"--unmerge",
223 "d":"--debug", "D":"--deep",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "k":"--usepkg", "K":"--usepkgonly",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps", "O":"--nodeps",
232 "p":"--pretend", "P":"--prune",
234 "s":"--search", "S":"--searchdesc",
237 "v":"--verbose", "V":"--version"
240 def emergelog(xterm_titles, mystr, short_msg=None):
241 if xterm_titles and short_msg:
242 if "HOSTNAME" in os.environ:
243 short_msg = os.environ["HOSTNAME"]+": "+short_msg
244 xtermTitle(short_msg)
246 file_path = "/var/log/emerge.log"
247 mylogfile = open(file_path, "a")
248 portage.util.apply_secpass_permissions(file_path,
249 uid=portage.portage_uid, gid=portage.portage_gid,
253 mylock = portage.locks.lockfile(mylogfile)
254 # seek because we may have gotten held up by the lock.
255 # if so, we may not be positioned at the end of the file.
257 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
261 portage.locks.unlockfile(mylock)
263 except (IOError,OSError,portage.exception.PortageException), e:
265 print >> sys.stderr, "emergelog():",e
267 def countdown(secs=5, doing="Starting"):
269 print ">>> Waiting",secs,"seconds before starting..."
270 print ">>> (Control-C to abort)...\n"+doing+" in: ",
274 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281 if type(mysize) not in [types.IntType,types.LongType]:
283 if 0 != mysize % 1024:
284 # Always round up to the next kB so that it doesn't show 0 kB when
285 # some small file still needs to be fetched.
286 mysize += 1024 - mysize % 1024
287 mystr=str(mysize/1024)
291 mystr=mystr[:mycount]+","+mystr[mycount:]
295 def getgccversion(chost):
298 return: the current in-use gcc version
301 gcc_ver_command = 'gcc -dumpversion'
302 gcc_ver_prefix = 'gcc-'
304 gcc_not_found_error = red(
305 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306 "!!! to update the environment of this terminal and possibly\n" +
307 "!!! other terminals also.\n"
310 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314 mystatus, myoutput = commands.getstatusoutput(
315 chost + "-" + gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320 if mystatus == os.EX_OK:
321 return gcc_ver_prefix + myoutput
323 portage.writemsg(gcc_not_found_error, noiselevel=-1)
324 return "[unavailable]"
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327 profilever = "unavailable"
329 realpath = os.path.realpath(profile)
330 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
331 if realpath.startswith(basepath):
332 profilever = realpath[1 + len(basepath):]
335 profilever = "!" + os.readlink(profile)
338 del realpath, basepath
341 libclist = vardb.match("virtual/libc")
342 libclist += vardb.match("virtual/glibc")
343 libclist = portage.util.unique_array(libclist)
345 xs=portage.catpkgsplit(x)
347 libcver+=","+"-".join(xs[1:])
349 libcver="-".join(xs[1:])
351 libcver="unavailable"
353 gccver = getgccversion(chost)
354 unameout=platform.release()+" "+platform.machine()
356 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358 def create_depgraph_params(myopts, myaction):
359 #configure emerge engine parameters
361 # self: include _this_ package regardless of if it is merged.
362 # selective: exclude the package if it is merged
363 # recurse: go into the dependencies
364 # deep: go into the dependencies of already merged packages
365 # empty: pretend nothing is merged
366 # complete: completely account for all known dependencies
367 # remove: build graph for use in removing packages
368 myparams = set(["recurse"])
370 if myaction == "remove":
371 myparams.add("remove")
372 myparams.add("complete")
375 if "--update" in myopts or \
376 "--newuse" in myopts or \
377 "--reinstall" in myopts or \
378 "--noreplace" in myopts:
379 myparams.add("selective")
380 if "--emptytree" in myopts:
381 myparams.add("empty")
382 myparams.discard("selective")
383 if "--nodeps" in myopts:
384 myparams.discard("recurse")
385 if "--deep" in myopts:
387 if "--complete-graph" in myopts:
388 myparams.add("complete")
391 # search functionality
392 class search(object):
403 def __init__(self, root_config, spinner, searchdesc,
404 verbose, usepkg, usepkgonly):
405 """Searches the available and installed packages for the supplied search key.
406 The list of available and installed packages is created at object instantiation.
407 This makes successive searches faster."""
408 self.settings = root_config.settings
409 self.vartree = root_config.trees["vartree"]
410 self.spinner = spinner
411 self.verbose = verbose
412 self.searchdesc = searchdesc
413 self.root_config = root_config
414 self.setconfig = root_config.setconfig
415 self.matches = {"pkg" : []}
420 self.portdb = fake_portdb
421 for attrib in ("aux_get", "cp_all",
422 "xmatch", "findname", "getFetchMap"):
423 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
427 portdb = root_config.trees["porttree"].dbapi
428 bindb = root_config.trees["bintree"].dbapi
429 vardb = root_config.trees["vartree"].dbapi
431 if not usepkgonly and portdb._have_root_eclass_dir:
432 self._dbs.append(portdb)
434 if (usepkg or usepkgonly) and bindb.cp_all():
435 self._dbs.append(bindb)
437 self._dbs.append(vardb)
438 self._portdb = portdb
443 cp_all.update(db.cp_all())
444 return list(sorted(cp_all))
446 def _aux_get(self, *args, **kwargs):
449 return db.aux_get(*args, **kwargs)
454 def _findname(self, *args, **kwargs):
456 if db is not self._portdb:
457 # We don't want findname to return anything
458 # unless it's an ebuild in a portage tree.
459 # Otherwise, it's already built and we don't
462 func = getattr(db, "findname", None)
464 value = func(*args, **kwargs)
469 def _getFetchMap(self, *args, **kwargs):
471 func = getattr(db, "getFetchMap", None)
473 value = func(*args, **kwargs)
478 def _visible(self, db, cpv, metadata):
479 installed = db is self.vartree.dbapi
480 built = installed or db is not self._portdb
483 pkg_type = "installed"
486 return visible(self.settings,
487 Package(type_name=pkg_type, root_config=self.root_config,
488 cpv=cpv, built=built, installed=installed, metadata=metadata))
490 def _xmatch(self, level, atom):
492 This method does not expand old-style virtuals because it
493 is restricted to returning matches for a single ${CATEGORY}/${PN}
494 and old-style virual matches unreliable for that when querying
495 multiple package databases. If necessary, old-style virtuals
496 can be performed on atoms prior to calling this method.
498 cp = portage.dep_getkey(atom)
499 if level == "match-all":
502 if hasattr(db, "xmatch"):
503 matches.update(db.xmatch(level, atom))
505 matches.update(db.match(atom))
506 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507 db._cpv_sort_ascending(result)
508 elif level == "match-visible":
511 if hasattr(db, "xmatch"):
512 matches.update(db.xmatch(level, atom))
514 db_keys = list(db._aux_cache_keys)
515 for cpv in db.match(atom):
516 metadata = izip(db_keys,
517 db.aux_get(cpv, db_keys))
518 if not self._visible(db, cpv, metadata):
521 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522 db._cpv_sort_ascending(result)
523 elif level == "bestmatch-visible":
526 if hasattr(db, "xmatch"):
527 cpv = db.xmatch("bestmatch-visible", atom)
528 if not cpv or portage.cpv_getkey(cpv) != cp:
530 if not result or cpv == portage.best([cpv, result]):
533 db_keys = Package.metadata_keys
534 # break out of this loop with highest visible
535 # match, checked in descending order
536 for cpv in reversed(db.match(atom)):
537 if portage.cpv_getkey(cpv) != cp:
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
543 if not result or cpv == portage.best([cpv, result]):
547 raise NotImplementedError(level)
550 def execute(self,searchkey):
551 """Performs the search for the supplied search key"""
553 self.searchkey=searchkey
554 self.packagematches = []
557 self.matches = {"pkg":[], "desc":[], "set":[]}
560 self.matches = {"pkg":[], "set":[]}
561 print "Searching... ",
564 if self.searchkey.startswith('%'):
566 self.searchkey = self.searchkey[1:]
567 if self.searchkey.startswith('@'):
569 self.searchkey = self.searchkey[1:]
571 self.searchre=re.compile(self.searchkey,re.I)
573 self.searchre=re.compile(re.escape(self.searchkey), re.I)
574 for package in self.portdb.cp_all():
575 self.spinner.update()
578 match_string = package[:]
580 match_string = package.split("/")[-1]
583 if self.searchre.search(match_string):
584 if not self.portdb.xmatch("match-visible", package):
586 self.matches["pkg"].append([package,masked])
587 elif self.searchdesc: # DESCRIPTION searching
588 full_package = self.portdb.xmatch("bestmatch-visible", package)
590 #no match found; we don't want to query description
591 full_package = portage.best(
592 self.portdb.xmatch("match-all", package))
598 full_desc = self.portdb.aux_get(
599 full_package, ["DESCRIPTION"])[0]
601 print "emerge: search: aux_get() failed, skipping"
603 if self.searchre.search(full_desc):
604 self.matches["desc"].append([full_package,masked])
606 self.sdict = self.setconfig.getSets()
607 for setname in self.sdict:
608 self.spinner.update()
610 match_string = setname
612 match_string = setname.split("/")[-1]
614 if self.searchre.search(match_string):
615 self.matches["set"].append([setname, False])
616 elif self.searchdesc:
617 if self.searchre.search(
618 self.sdict[setname].getMetadata("DESCRIPTION")):
619 self.matches["set"].append([setname, False])
622 for mtype in self.matches:
623 self.matches[mtype].sort()
624 self.mlen += len(self.matches[mtype])
627 if not self.portdb.xmatch("match-all", cp):
630 if not self.portdb.xmatch("bestmatch-visible", cp):
632 self.matches["pkg"].append([cp, masked])
636 """Outputs the results of the search."""
637 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
638 print "[ Applications found : "+white(str(self.mlen))+" ]"
640 vardb = self.vartree.dbapi
641 for mtype in self.matches:
642 for match,masked in self.matches[mtype]:
646 full_package = self.portdb.xmatch(
647 "bestmatch-visible", match)
649 #no match found; we don't want to query description
651 full_package = portage.best(
652 self.portdb.xmatch("match-all",match))
653 elif mtype == "desc":
655 match = portage.cpv_getkey(match)
657 print green("*")+" "+white(match)
658 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
662 desc, homepage, license = self.portdb.aux_get(
663 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665 print "emerge: search: aux_get() failed, skipping"
668 print green("*")+" "+white(match)+" "+red("[ Masked ]")
670 print green("*")+" "+white(match)
671 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
675 mycat = match.split("/")[0]
676 mypkg = match.split("/")[1]
677 mycpv = match + "-" + myversion
678 myebuild = self.portdb.findname(mycpv)
680 pkgdir = os.path.dirname(myebuild)
681 from portage import manifest
682 mf = manifest.Manifest(
683 pkgdir, self.settings["DISTDIR"])
685 uri_map = self.portdb.getFetchMap(mycpv)
686 except portage.exception.InvalidDependString, e:
687 file_size_str = "Unknown (%s)" % (e,)
691 mysum[0] = mf.getDistfilesSize(uri_map)
693 file_size_str = "Unknown (missing " + \
694 "digest for %s)" % (e,)
699 if db is not vardb and \
700 db.cpv_exists(mycpv):
702 if not myebuild and hasattr(db, "bintree"):
703 myebuild = db.bintree.getname(mycpv)
705 mysum[0] = os.stat(myebuild).st_size
710 if myebuild and file_size_str is None:
711 mystr = str(mysum[0] / 1024)
715 mystr = mystr[:mycount] + "," + mystr[mycount:]
716 file_size_str = mystr + " kB"
720 print " ", darkgreen("Latest version available:"),myversion
721 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
724 (darkgreen("Size of files:"), file_size_str)
725 print " ", darkgreen("Homepage:")+" ",homepage
726 print " ", darkgreen("Description:")+" ",desc
727 print " ", darkgreen("License:")+" ",license
732 def getInstallationStatus(self,package):
733 installed_package = self.vartree.dep_bestmatch(package)
735 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737 result = darkgreen("Latest version installed:")+" "+version
739 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
742 def getVersion(self,full_package,detail):
743 if len(full_package) > 1:
744 package_parts = portage.catpkgsplit(full_package)
745 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746 result = package_parts[2]+ "-" + package_parts[3]
748 result = package_parts[2]
753 class RootConfig(object):
754 """This is used internally by depgraph to track information about a
758 "ebuild" : "porttree",
759 "binary" : "bintree",
760 "installed" : "vartree"
764 for k, v in pkg_tree_map.iteritems():
767 def __init__(self, settings, trees, setconfig):
769 self.settings = settings
770 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771 self.root = self.settings["ROOT"]
772 self.setconfig = setconfig
773 self.sets = self.setconfig.getSets()
774 self.visible_pkgs = PackageVirtualDbapi(self.settings)
776 def create_world_atom(pkg, args_set, root_config):
777 """Create a new atom for the world file if one does not exist. If the
778 argument atom is precise enough to identify a specific slot then a slot
779 atom will be returned. Atoms that are in the system set may also be stored
780 in world since system atoms can only match one slot while world atoms can
781 be greedy with respect to slots. Unslotted system packages will not be
784 arg_atom = args_set.findAtomForPackage(pkg)
787 cp = portage.dep_getkey(arg_atom)
789 sets = root_config.sets
790 portdb = root_config.trees["porttree"].dbapi
791 vardb = root_config.trees["vartree"].dbapi
792 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793 for cpv in portdb.match(cp))
794 slotted = len(available_slots) > 1 or \
795 (len(available_slots) == 1 and "0" not in available_slots)
797 # check the vdb in case this is multislot
798 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799 for cpv in vardb.match(cp))
800 slotted = len(available_slots) > 1 or \
801 (len(available_slots) == 1 and "0" not in available_slots)
802 if slotted and arg_atom != cp:
803 # If the user gave a specific atom, store it as a
804 # slot atom in the world file.
805 slot_atom = pkg.slot_atom
807 # For USE=multislot, there are a couple of cases to
810 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811 # unknown value, so just record an unslotted atom.
813 # 2) SLOT comes from an installed package and there is no
814 # matching SLOT in the portage tree.
816 # Make sure that the slot atom is available in either the
817 # portdb or the vardb, since otherwise the user certainly
818 # doesn't want the SLOT atom recorded in the world file
819 # (case 1 above). If it's only available in the vardb,
820 # the user may be trying to prevent a USE=multislot
821 # package from being removed by --depclean (case 2 above).
824 if not portdb.match(slot_atom):
825 # SLOT seems to come from an installed multislot package
827 # If there is no installed package matching the SLOT atom,
828 # it probably changed SLOT spontaneously due to USE=multislot,
829 # so just record an unslotted atom.
830 if vardb.match(slot_atom):
831 # Now verify that the argument is precise
832 # enough to identify a specific slot.
833 matches = mydb.match(arg_atom)
834 matched_slots = set()
836 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837 if len(matched_slots) == 1:
838 new_world_atom = slot_atom
840 if new_world_atom == sets["world"].findAtomForPackage(pkg):
841 # Both atoms would be identical, so there's nothing to add.
844 # Unlike world atoms, system atoms are not greedy for slots, so they
845 # can't be safely excluded from world if they are slotted.
846 system_atom = sets["system"].findAtomForPackage(pkg)
848 if not portage.dep_getkey(system_atom).startswith("virtual/"):
850 # System virtuals aren't safe to exclude from world since they can
851 # match multiple old-style virtuals but only one of them will be
852 # pulled in by update or depclean.
853 providers = portdb.mysettings.getvirtuals().get(
854 portage.dep_getkey(system_atom))
855 if providers and len(providers) == 1 and providers[0] == cp:
857 return new_world_atom
859 def filter_iuse_defaults(iuse):
861 if flag.startswith("+") or flag.startswith("-"):
866 class SlotObject(object):
867 __slots__ = ("__weakref__",)
869 def __init__(self, **kwargs):
870 classes = [self.__class__]
875 classes.extend(c.__bases__)
876 slots = getattr(c, "__slots__", None)
880 myvalue = kwargs.get(myattr, None)
881 setattr(self, myattr, myvalue)
885 Create a new instance and copy all attributes
886 defined from __slots__ (including those from
889 obj = self.__class__()
891 classes = [self.__class__]
896 classes.extend(c.__bases__)
897 slots = getattr(c, "__slots__", None)
901 setattr(obj, myattr, getattr(self, myattr))
905 class AbstractDepPriority(SlotObject):
906 __slots__ = ("buildtime", "runtime", "runtime_post")
908 def __lt__(self, other):
909 return self.__int__() < other
911 def __le__(self, other):
912 return self.__int__() <= other
914 def __eq__(self, other):
915 return self.__int__() == other
917 def __ne__(self, other):
918 return self.__int__() != other
920 def __gt__(self, other):
921 return self.__int__() > other
923 def __ge__(self, other):
924 return self.__int__() >= other
928 return copy.copy(self)
930 class DepPriority(AbstractDepPriority):
932 This class generates an integer priority level based of various
933 attributes of the dependency relationship. Attributes can be assigned
934 at any time and the new integer value will be generated on calls to the
935 __int__() method. Rich comparison operators are supported.
937 The boolean attributes that affect the integer value are "satisfied",
938 "buildtime", "runtime", and "system". Various combinations of
939 attributes lead to the following priority levels:
941 Combination of properties Priority Category
943 not satisfied and buildtime 0 HARD
944 not satisfied and runtime -1 MEDIUM
945 not satisfied and runtime_post -2 MEDIUM_SOFT
946 satisfied and buildtime and rebuild -3 SOFT
947 satisfied and buildtime -4 SOFT
948 satisfied and runtime -5 SOFT
949 satisfied and runtime_post -6 SOFT
950 (none of the above) -6 SOFT
952 Several integer constants are defined for categorization of priority
955 MEDIUM The upper boundary for medium dependencies.
956 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
957 SOFT The upper boundary for soft dependencies.
958 MIN The lower boundary for soft dependencies.
960 __slots__ = ("satisfied", "rebuild")
967 if not self.satisfied:
972 if self.runtime_post:
980 if self.runtime_post:
985 myvalue = self.__int__()
986 if myvalue > self.MEDIUM:
988 if myvalue > self.MEDIUM_SOFT:
990 if myvalue > self.SOFT:
994 class BlockerDepPriority(DepPriority):
999 BlockerDepPriority.instance = BlockerDepPriority()
1001 class UnmergeDepPriority(AbstractDepPriority):
1002 __slots__ = ("satisfied",)
1004 Combination of properties Priority Category
1007 runtime_post -1 HARD
1009 (none of the above) -2 SOFT
1019 if self.runtime_post:
1026 myvalue = self.__int__()
1027 if myvalue > self.SOFT:
1031 class FakeVartree(portage.vartree):
1032 """This is implements an in-memory copy of a vartree instance that provides
1033 all the interfaces required for use by the depgraph. The vardb is locked
1034 during the constructor call just long enough to read a copy of the
1035 installed package information. This allows the depgraph to do it's
1036 dependency calculations without holding a lock on the vardb. It also
1037 allows things like vardb global updates to be done in memory so that the
1038 user doesn't necessarily need write access to the vardb in cases where
1039 global updates are necessary (updates are performed when necessary if there
1040 is not a matching ebuild in the tree)."""
1041 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1042 self._root_config = root_config
1043 if pkg_cache is None:
1045 real_vartree = root_config.trees["vartree"]
1046 portdb = root_config.trees["porttree"].dbapi
1047 self.root = real_vartree.root
1048 self.settings = real_vartree.settings
1049 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1050 if "_mtime_" not in mykeys:
1051 mykeys.append("_mtime_")
1052 self._db_keys = mykeys
1053 self._pkg_cache = pkg_cache
1054 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1055 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1057 # At least the parent needs to exist for the lock file.
1058 portage.util.ensure_dirs(vdb_path)
1059 except portage.exception.PortageException:
1063 if acquire_lock and os.access(vdb_path, os.W_OK):
1064 vdb_lock = portage.locks.lockdir(vdb_path)
1065 real_dbapi = real_vartree.dbapi
1067 for cpv in real_dbapi.cpv_all():
1068 cache_key = ("installed", self.root, cpv, "nomerge")
1069 pkg = self._pkg_cache.get(cache_key)
1071 metadata = pkg.metadata
1073 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1074 myslot = metadata["SLOT"]
1075 mycp = portage.dep_getkey(cpv)
1076 myslot_atom = "%s:%s" % (mycp, myslot)
1078 mycounter = long(metadata["COUNTER"])
1081 metadata["COUNTER"] = str(mycounter)
1082 other_counter = slot_counters.get(myslot_atom, None)
1083 if other_counter is not None:
1084 if other_counter > mycounter:
1086 slot_counters[myslot_atom] = mycounter
1088 pkg = Package(built=True, cpv=cpv,
1089 installed=True, metadata=metadata,
1090 root_config=root_config, type_name="installed")
1091 self._pkg_cache[pkg] = pkg
1092 self.dbapi.cpv_inject(pkg)
1093 real_dbapi.flush_cache()
1096 portage.locks.unlockdir(vdb_lock)
1097 # Populate the old-style virtuals using the cached values.
1098 if not self.settings.treeVirtuals:
1099 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1100 portage.getCPFromCPV, self.get_all_provides())
1102 # Intialize variables needed for lazy cache pulls of the live ebuild
1103 # metadata. This ensures that the vardb lock is released ASAP, without
1104 # being delayed in case cache generation is triggered.
1105 self._aux_get = self.dbapi.aux_get
1106 self.dbapi.aux_get = self._aux_get_wrapper
1107 self._match = self.dbapi.match
1108 self.dbapi.match = self._match_wrapper
1109 self._aux_get_history = set()
1110 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1111 self._portdb = portdb
1112 self._global_updates = None
1114 def _match_wrapper(self, cpv, use_cache=1):
1116 Make sure the metadata in Package instances gets updated for any
1117 cpv that is returned from a match() call, since the metadata can
1118 be accessed directly from the Package instance instead of via
1121 matches = self._match(cpv, use_cache=use_cache)
1123 if cpv in self._aux_get_history:
1125 self._aux_get_wrapper(cpv, [])
1128 def _aux_get_wrapper(self, pkg, wants):
1129 if pkg in self._aux_get_history:
1130 return self._aux_get(pkg, wants)
1131 self._aux_get_history.add(pkg)
1133 # Use the live ebuild metadata if possible.
1134 live_metadata = dict(izip(self._portdb_keys,
1135 self._portdb.aux_get(pkg, self._portdb_keys)))
1136 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1138 self.dbapi.aux_update(pkg, live_metadata)
1139 except (KeyError, portage.exception.PortageException):
1140 if self._global_updates is None:
1141 self._global_updates = \
1142 grab_global_updates(self._portdb.porttree_root)
1143 perform_global_updates(
1144 pkg, self.dbapi, self._global_updates)
1145 return self._aux_get(pkg, wants)
1147 def sync(self, acquire_lock=1):
1149 Call this method to synchronize state with the real vardb
1150 after one or more packages may have been installed or
1153 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1155 # At least the parent needs to exist for the lock file.
1156 portage.util.ensure_dirs(vdb_path)
1157 except portage.exception.PortageException:
1161 if acquire_lock and os.access(vdb_path, os.W_OK):
1162 vdb_lock = portage.locks.lockdir(vdb_path)
1166 portage.locks.unlockdir(vdb_lock)
1170 real_vardb = self._root_config.trees["vartree"].dbapi
1171 current_cpv_set = frozenset(real_vardb.cpv_all())
1172 pkg_vardb = self.dbapi
1173 aux_get_history = self._aux_get_history
1175 # Remove any packages that have been uninstalled.
1176 for pkg in list(pkg_vardb):
1177 if pkg.cpv not in current_cpv_set:
1178 pkg_vardb.cpv_remove(pkg)
1179 aux_get_history.discard(pkg.cpv)
1181 # Validate counters and timestamps.
1184 validation_keys = ["COUNTER", "_mtime_"]
1185 for cpv in current_cpv_set:
1187 pkg_hash_key = ("installed", root, cpv, "nomerge")
1188 pkg = pkg_vardb.get(pkg_hash_key)
1190 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1192 counter = long(counter)
1196 if counter != pkg.counter or \
1198 pkg_vardb.cpv_remove(pkg)
1199 aux_get_history.discard(pkg.cpv)
1203 pkg = self._pkg(cpv)
1205 other_counter = slot_counters.get(pkg.slot_atom)
1206 if other_counter is not None:
1207 if other_counter > pkg.counter:
1210 slot_counters[pkg.slot_atom] = pkg.counter
1211 pkg_vardb.cpv_inject(pkg)
1213 real_vardb.flush_cache()
1215 def _pkg(self, cpv):
1216 root_config = self._root_config
1217 real_vardb = root_config.trees["vartree"].dbapi
1218 pkg = Package(cpv=cpv, installed=True,
1219 metadata=izip(self._db_keys,
1220 real_vardb.aux_get(cpv, self._db_keys)),
1221 root_config=root_config,
1222 type_name="installed")
1225 mycounter = long(pkg.metadata["COUNTER"])
1228 pkg.metadata["COUNTER"] = str(mycounter)
1232 def grab_global_updates(portdir):
1233 from portage.update import grab_updates, parse_updates
1234 updpath = os.path.join(portdir, "profiles", "updates")
1236 rawupdates = grab_updates(updpath)
1237 except portage.exception.DirectoryNotFound:
1240 for mykey, mystat, mycontent in rawupdates:
1241 commands, errors = parse_updates(mycontent)
1242 upd_commands.extend(commands)
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246 from portage.update import update_dbentries
1247 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249 updates = update_dbentries(mycommands, aux_dict)
1251 mydb.aux_update(mycpv, updates)
1253 def visible(pkgsettings, pkg):
1255 Check if a package is visible. This can raise an InvalidDependString
1256 exception if LICENSE is invalid.
1257 TODO: optionally generate a list of masking reasons
1259 @returns: True if the package is visible, False otherwise.
1261 if not pkg.metadata["SLOT"]:
1263 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264 if not pkgsettings._accept_chost(pkg):
1266 eapi = pkg.metadata["EAPI"]
1267 if not portage.eapi_is_supported(eapi):
1269 if not pkg.installed:
1270 if portage._eapi_is_deprecated(eapi):
1272 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1274 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1276 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1279 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1281 except portage.exception.InvalidDependString:
1285 def get_masking_status(pkg, pkgsettings, root_config):
1287 mreasons = portage.getmaskingstatus(
1288 pkg, settings=pkgsettings,
1289 portdb=root_config.trees["porttree"].dbapi)
1291 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292 if not pkgsettings._accept_chost(pkg):
1293 mreasons.append("CHOST: %s" % \
1294 pkg.metadata["CHOST"])
1296 if not pkg.metadata["SLOT"]:
1297 mreasons.append("invalid: SLOT is undefined")
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302 db, pkg_type, built, installed, db_keys):
1305 metadata = dict(izip(db_keys,
1306 db.aux_get(cpv, db_keys)))
1309 if metadata and not built:
1310 pkgsettings.setcpv(cpv, mydb=metadata)
1311 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312 if metadata is None:
1313 mreasons = ["corruption"]
1315 pkg = Package(type_name=pkg_type, root_config=root_config,
1316 cpv=cpv, built=built, installed=installed, metadata=metadata)
1317 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318 return metadata, mreasons
1320 def show_masked_packages(masked_packages):
1321 shown_licenses = set()
1322 shown_comments = set()
1323 # Maybe there is both an ebuild and a binary. Only
1324 # show one of them to avoid redundant appearance.
1326 have_eapi_mask = False
1327 for (root_config, pkgsettings, cpv,
1328 metadata, mreasons) in masked_packages:
1329 if cpv in shown_cpvs:
1332 comment, filename = None, None
1333 if "package.mask" in mreasons:
1334 comment, filename = \
1335 portage.getmaskingreason(
1336 cpv, metadata=metadata,
1337 settings=pkgsettings,
1338 portdb=root_config.trees["porttree"].dbapi,
1339 return_location=True)
1340 missing_licenses = []
1342 if not portage.eapi_is_supported(metadata["EAPI"]):
1343 have_eapi_mask = True
1345 missing_licenses = \
1346 pkgsettings._getMissingLicenses(
1348 except portage.exception.InvalidDependString:
1349 # This will have already been reported
1350 # above via mreasons.
1353 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354 if comment and comment not in shown_comments:
1357 shown_comments.add(comment)
1358 portdb = root_config.trees["porttree"].dbapi
1359 for l in missing_licenses:
1360 l_path = portdb.findLicensePath(l)
1361 if l in shown_licenses:
1363 msg = ("A copy of the '%s' license" + \
1364 " is located at '%s'.") % (l, l_path)
1367 shown_licenses.add(l)
1368 return have_eapi_mask
1370 class Task(SlotObject):
1371 __slots__ = ("_hash_key", "_hash_value")
1373 def _get_hash_key(self):
1374 hash_key = getattr(self, "_hash_key", None)
1375 if hash_key is None:
1376 raise NotImplementedError(self)
1379 def __eq__(self, other):
1380 return self._get_hash_key() == other
1382 def __ne__(self, other):
1383 return self._get_hash_key() != other
1386 hash_value = getattr(self, "_hash_value", None)
1387 if hash_value is None:
1388 self._hash_value = hash(self._get_hash_key())
1389 return self._hash_value
1392 return len(self._get_hash_key())
1394 def __getitem__(self, key):
1395 return self._get_hash_key()[key]
1398 return iter(self._get_hash_key())
1400 def __contains__(self, key):
1401 return key in self._get_hash_key()
1404 return str(self._get_hash_key())
1406 class Blocker(Task):
1408 __hash__ = Task.__hash__
1409 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1411 def __init__(self, **kwargs):
1412 Task.__init__(self, **kwargs)
1413 self.cp = portage.dep_getkey(self.atom)
1415 def _get_hash_key(self):
1416 hash_key = getattr(self, "_hash_key", None)
1417 if hash_key is None:
1419 ("blocks", self.root, self.atom, self.eapi)
1420 return self._hash_key
1422 class Package(Task):
1424 __hash__ = Task.__hash__
1425 __slots__ = ("built", "cpv", "depth",
1426 "installed", "metadata", "onlydeps", "operation",
1427 "root_config", "type_name",
1428 "category", "counter", "cp", "cpv_split",
1429 "inherited", "iuse", "mtime",
1430 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1433 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434 "INHERITED", "IUSE", "KEYWORDS",
1435 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1438 def __init__(self, **kwargs):
1439 Task.__init__(self, **kwargs)
1440 self.root = self.root_config.root
1441 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442 self.cp = portage.cpv_getkey(self.cpv)
1443 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444 self.category, self.pf = portage.catsplit(self.cpv)
1445 self.cpv_split = portage.catpkgsplit(self.cpv)
1446 self.pv_split = self.cpv_split[1:]
1450 __slots__ = ("__weakref__", "enabled")
1452 def __init__(self, use):
1453 self.enabled = frozenset(use)
1455 class _iuse(object):
1457 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1459 def __init__(self, tokens, iuse_implicit):
1460 self.tokens = tuple(tokens)
1461 self.iuse_implicit = iuse_implicit
1468 enabled.append(x[1:])
1470 disabled.append(x[1:])
1473 self.enabled = frozenset(enabled)
1474 self.disabled = frozenset(disabled)
1475 self.all = frozenset(chain(enabled, disabled, other))
1477 def __getattribute__(self, name):
1480 return object.__getattribute__(self, "regex")
1481 except AttributeError:
1482 all = object.__getattribute__(self, "all")
1483 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484 # Escape anything except ".*" which is supposed
1485 # to pass through from _get_implicit_iuse()
1486 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487 regex = "^(%s)$" % "|".join(regex)
1488 regex = regex.replace("\\.\\*", ".*")
1489 self.regex = re.compile(regex)
1490 return object.__getattribute__(self, name)
1492 def _get_hash_key(self):
1493 hash_key = getattr(self, "_hash_key", None)
1494 if hash_key is None:
1495 if self.operation is None:
1496 self.operation = "merge"
1497 if self.onlydeps or self.installed:
1498 self.operation = "nomerge"
1500 (self.type_name, self.root, self.cpv, self.operation)
1501 return self._hash_key
1503 def __lt__(self, other):
1504 if other.cp != self.cp:
1506 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1510 def __le__(self, other):
1511 if other.cp != self.cp:
1513 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1517 def __gt__(self, other):
1518 if other.cp != self.cp:
1520 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1524 def __ge__(self, other):
1525 if other.cp != self.cp:
1527 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1531 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1532 if not x.startswith("UNUSED_"))
1533 _all_metadata_keys.discard("CDEPEND")
1534 _all_metadata_keys.update(Package.metadata_keys)
1536 from portage.cache.mappings import slot_dict_class
1537 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1539 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1541 Detect metadata updates and synchronize Package attributes.
1544 __slots__ = ("_pkg",)
1545 _wrapped_keys = frozenset(
1546 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1548 def __init__(self, pkg, metadata):
1549 _PackageMetadataWrapperBase.__init__(self)
1551 self.update(metadata)
1553 def __setitem__(self, k, v):
1554 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1555 if k in self._wrapped_keys:
1556 getattr(self, "_set_" + k.lower())(k, v)
1558 def _set_inherited(self, k, v):
1559 if isinstance(v, basestring):
1560 v = frozenset(v.split())
1561 self._pkg.inherited = v
1563 def _set_iuse(self, k, v):
1564 self._pkg.iuse = self._pkg._iuse(
1565 v.split(), self._pkg.root_config.iuse_implicit)
1567 def _set_slot(self, k, v):
1570 def _set_use(self, k, v):
1571 self._pkg.use = self._pkg._use(v.split())
1573 def _set_counter(self, k, v):
1574 if isinstance(v, basestring):
1579 self._pkg.counter = v
1581 def _set__mtime_(self, k, v):
1582 if isinstance(v, basestring):
1589 class EbuildFetchonly(SlotObject):
1591 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1594 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1595 # ensuring sane $PWD (bug #239560) and storing elog
1596 # messages. Use a private temp directory, in order
1597 # to avoid locking the main one.
1598 settings = self.settings
1599 global_tmpdir = settings["PORTAGE_TMPDIR"]
1600 from tempfile import mkdtemp
1602 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1604 if e.errno != portage.exception.PermissionDenied.errno:
1606 raise portage.exception.PermissionDenied(global_tmpdir)
1607 settings["PORTAGE_TMPDIR"] = private_tmpdir
1608 settings.backup_changes("PORTAGE_TMPDIR")
1610 retval = self._execute()
1612 settings["PORTAGE_TMPDIR"] = global_tmpdir
1613 settings.backup_changes("PORTAGE_TMPDIR")
1614 shutil.rmtree(private_tmpdir)
1618 settings = self.settings
1620 root_config = pkg.root_config
1621 portdb = root_config.trees["porttree"].dbapi
1622 ebuild_path = portdb.findname(pkg.cpv)
1623 settings.setcpv(pkg)
1624 debug = settings.get("PORTAGE_DEBUG") == "1"
1625 use_cache = 1 # always true
1626 portage.doebuild_environment(ebuild_path, "fetch",
1627 root_config.root, settings, debug, use_cache, portdb)
1628 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1630 retval = portage.doebuild(ebuild_path, "fetch",
1631 self.settings["ROOT"], self.settings, debug=debug,
1632 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1633 mydbapi=portdb, tree="porttree")
1635 if retval != os.EX_OK:
1636 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1637 eerror(msg, phase="unpack", key=pkg.cpv)
1639 portage.elog.elog_process(self.pkg.cpv, self.settings)
1642 class PollConstants(object):
1645 Provides POLL* constants that are equivalent to those from the
1646 select module, for use by PollSelectAdapter.
1649 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1652 locals()[k] = getattr(select, k, v)
1656 class AsynchronousTask(SlotObject):
1658 Subclasses override _wait() and _poll() so that calls
1659 to public methods can be wrapped for implementing
1660 hooks such as exit listener notification.
1662 Sublasses should call self.wait() to notify exit listeners after
1663 the task is complete and self.returncode has been set.
1666 __slots__ = ("background", "cancelled", "returncode") + \
1667 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1671 Start an asynchronous task and then return as soon as possible.
1677 raise NotImplementedError(self)
1680 return self.returncode is None
1687 return self.returncode
1690 if self.returncode is None:
1693 return self.returncode
1696 return self.returncode
1699 self.cancelled = True
1702 def addStartListener(self, f):
1704 The function will be called with one argument, a reference to self.
1706 if self._start_listeners is None:
1707 self._start_listeners = []
1708 self._start_listeners.append(f)
1710 def removeStartListener(self, f):
1711 if self._start_listeners is None:
1713 self._start_listeners.remove(f)
1715 def _start_hook(self):
1716 if self._start_listeners is not None:
1717 start_listeners = self._start_listeners
1718 self._start_listeners = None
1720 for f in start_listeners:
1723 def addExitListener(self, f):
1725 The function will be called with one argument, a reference to self.
1727 if self._exit_listeners is None:
1728 self._exit_listeners = []
1729 self._exit_listeners.append(f)
1731 def removeExitListener(self, f):
1732 if self._exit_listeners is None:
1733 if self._exit_listener_stack is not None:
1734 self._exit_listener_stack.remove(f)
1736 self._exit_listeners.remove(f)
1738 def _wait_hook(self):
1740 Call this method after the task completes, just before returning
1741 the returncode from wait() or poll(). This hook is
1742 used to trigger exit listeners when the returncode first
1745 if self.returncode is not None and \
1746 self._exit_listeners is not None:
1748 # This prevents recursion, in case one of the
1749 # exit handlers triggers this method again by
1750 # calling wait(). Use a stack that gives
1751 # removeExitListener() an opportunity to consume
1752 # listeners from the stack, before they can get
1753 # called below. This is necessary because a call
1754 # to one exit listener may result in a call to
1755 # removeExitListener() for another listener on
1756 # the stack. That listener needs to be removed
1757 # from the stack since it would be inconsistent
1758 # to call it after it has been been passed into
1759 # removeExitListener().
1760 self._exit_listener_stack = self._exit_listeners
1761 self._exit_listeners = None
1763 self._exit_listener_stack.reverse()
1764 while self._exit_listener_stack:
1765 self._exit_listener_stack.pop()(self)
1767 class AbstractPollTask(AsynchronousTask):
1769 __slots__ = ("scheduler",) + \
1773 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1774 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1777 def _unregister(self):
1778 raise NotImplementedError(self)
1780 def _unregister_if_appropriate(self, event):
1781 if self._registered:
1782 if event & self._exceptional_events:
1785 elif event & PollConstants.POLLHUP:
1789 class PipeReader(AbstractPollTask):
1792 Reads output from one or more files and saves it in memory,
1793 for retrieval via the getvalue() method. This is driven by
1794 the scheduler's poll() loop, so it runs entirely within the
1798 __slots__ = ("input_files",) + \
1799 ("_read_data", "_reg_ids")
1802 self._reg_ids = set()
1803 self._read_data = []
1804 for k, f in self.input_files.iteritems():
1805 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1806 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1807 self._reg_ids.add(self.scheduler.register(f.fileno(),
1808 self._registered_events, self._output_handler))
1809 self._registered = True
1812 return self._registered
1815 if self.returncode is None:
1817 self.cancelled = True
1821 if self.returncode is not None:
1822 return self.returncode
1824 if self._registered:
1825 self.scheduler.schedule(self._reg_ids)
1828 self.returncode = os.EX_OK
1829 return self.returncode
1832 """Retrieve the entire contents"""
1833 return "".join(self._read_data)
1836 """Free the memory buffer."""
1837 self._read_data = None
1839 def _output_handler(self, fd, event):
1841 if event & PollConstants.POLLIN:
1843 for f in self.input_files.itervalues():
1844 if fd == f.fileno():
1847 buf = array.array('B')
1849 buf.fromfile(f, self._bufsize)
1854 self._read_data.append(buf.tostring())
1859 self._unregister_if_appropriate(event)
1860 return self._registered
1862 def _unregister(self):
1864 Unregister from the scheduler and close open files.
1867 self._registered = False
1869 if self._reg_ids is not None:
1870 for reg_id in self._reg_ids:
1871 self.scheduler.unregister(reg_id)
1872 self._reg_ids = None
1874 if self.input_files is not None:
1875 for f in self.input_files.itervalues():
1877 self.input_files = None
1879 class CompositeTask(AsynchronousTask):
1881 __slots__ = ("scheduler",) + ("_current_task",)
1884 return self._current_task is not None
1887 self.cancelled = True
1888 if self._current_task is not None:
1889 self._current_task.cancel()
1893 This does a loop calling self._current_task.poll()
1894 repeatedly as long as the value of self._current_task
1895 keeps changing. It calls poll() a maximum of one time
1896 for a given self._current_task instance. This is useful
1897 since calling poll() on a task can trigger advance to
1898 the next task could eventually lead to the returncode
1899 being set in cases when polling only a single task would
1900 not have the same effect.
1905 task = self._current_task
1906 if task is None or task is prev:
1907 # don't poll the same task more than once
1912 return self.returncode
1918 task = self._current_task
1920 # don't wait for the same task more than once
1923 # Before the task.wait() method returned, an exit
1924 # listener should have set self._current_task to either
1925 # a different task or None. Something is wrong.
1926 raise AssertionError("self._current_task has not " + \
1927 "changed since calling wait", self, task)
1931 return self.returncode
1933 def _assert_current(self, task):
1935 Raises an AssertionError if the given task is not the
1936 same one as self._current_task. This can be useful
1939 if task is not self._current_task:
1940 raise AssertionError("Unrecognized task: %s" % (task,))
1942 def _default_exit(self, task):
1944 Calls _assert_current() on the given task and then sets the
1945 composite returncode attribute if task.returncode != os.EX_OK.
1946 If the task failed then self._current_task will be set to None.
1947 Subclasses can use this as a generic task exit callback.
1950 @returns: The task.returncode attribute.
1952 self._assert_current(task)
1953 if task.returncode != os.EX_OK:
1954 self.returncode = task.returncode
1955 self._current_task = None
1956 return task.returncode
1958 def _final_exit(self, task):
1960 Assumes that task is the final task of this composite task.
1961 Calls _default_exit() and sets self.returncode to the task's
1962 returncode and sets self._current_task to None.
1964 self._default_exit(task)
1965 self._current_task = None
1966 self.returncode = task.returncode
1967 return self.returncode
1969 def _default_final_exit(self, task):
1971 This calls _final_exit() and then wait().
1973 Subclasses can use this as a generic final task exit callback.
1976 self._final_exit(task)
1979 def _start_task(self, task, exit_handler):
1981 Register exit handler for the given task, set it
1982 as self._current_task, and call task.start().
1984 Subclasses can use this as a generic way to start
1988 task.addExitListener(exit_handler)
1989 self._current_task = task
1992 class TaskSequence(CompositeTask):
1994 A collection of tasks that executes sequentially. Each task
1995 must have a addExitListener() method that can be used as
1996 a means to trigger movement from one task to the next.
1999 __slots__ = ("_task_queue",)
2001 def __init__(self, **kwargs):
2002 AsynchronousTask.__init__(self, **kwargs)
2003 self._task_queue = deque()
2005 def add(self, task):
2006 self._task_queue.append(task)
2009 self._start_next_task()
2012 self._task_queue.clear()
2013 CompositeTask.cancel(self)
2015 def _start_next_task(self):
2016 self._start_task(self._task_queue.popleft(),
2017 self._task_exit_handler)
2019 def _task_exit_handler(self, task):
2020 if self._default_exit(task) != os.EX_OK:
2022 elif self._task_queue:
2023 self._start_next_task()
2025 self._final_exit(task)
2028 class SubProcess(AbstractPollTask):
2030 __slots__ = ("pid",) + \
2031 ("_files", "_reg_id")
2033 # A file descriptor is required for the scheduler to monitor changes from
2034 # inside a poll() loop. When logging is not enabled, create a pipe just to
2035 # serve this purpose alone.
2039 if self.returncode is not None:
2040 return self.returncode
2041 if self.pid is None:
2042 return self.returncode
2043 if self._registered:
2044 return self.returncode
2047 retval = os.waitpid(self.pid, os.WNOHANG)
2049 if e.errno != errno.ECHILD:
2052 retval = (self.pid, 1)
2054 if retval == (0, 0):
2056 self._set_returncode(retval)
2057 return self.returncode
2062 os.kill(self.pid, signal.SIGTERM)
2064 if e.errno != errno.ESRCH:
2068 self.cancelled = True
2069 if self.pid is not None:
2071 return self.returncode
2074 return self.pid is not None and \
2075 self.returncode is None
2079 if self.returncode is not None:
2080 return self.returncode
2082 if self._registered:
2083 self.scheduler.schedule(self._reg_id)
2085 if self.returncode is not None:
2086 return self.returncode
2089 wait_retval = os.waitpid(self.pid, 0)
2091 if e.errno != errno.ECHILD:
2094 self._set_returncode((self.pid, 1))
2096 self._set_returncode(wait_retval)
2098 return self.returncode
2100 def _unregister(self):
2102 Unregister from the scheduler and close open files.
2105 self._registered = False
2107 if self._reg_id is not None:
2108 self.scheduler.unregister(self._reg_id)
2111 if self._files is not None:
2112 for f in self._files.itervalues():
2116 def _set_returncode(self, wait_retval):
2118 retval = wait_retval[1]
2120 if retval != os.EX_OK:
2122 retval = (retval & 0xff) << 8
2124 retval = retval >> 8
2126 self.returncode = retval
2128 class SpawnProcess(SubProcess):
2131 Constructor keyword args are passed into portage.process.spawn().
2132 The required "args" keyword argument will be passed as the first
2136 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2137 "uid", "gid", "groups", "umask", "logfile",
2138 "path_lookup", "pre_exec")
2140 __slots__ = ("args",) + \
2143 _file_names = ("log", "process", "stdout")
2144 _files_dict = slot_dict_class(_file_names, prefix="")
2151 if self.fd_pipes is None:
2153 fd_pipes = self.fd_pipes
2154 fd_pipes.setdefault(0, sys.stdin.fileno())
2155 fd_pipes.setdefault(1, sys.stdout.fileno())
2156 fd_pipes.setdefault(2, sys.stderr.fileno())
2158 # flush any pending output
2159 for fd in fd_pipes.itervalues():
2160 if fd == sys.stdout.fileno():
2162 if fd == sys.stderr.fileno():
2165 logfile = self.logfile
2166 self._files = self._files_dict()
2169 master_fd, slave_fd = self._pipe(fd_pipes)
2170 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2171 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2174 fd_pipes_orig = fd_pipes.copy()
2176 # TODO: Use job control functions like tcsetpgrp() to control
2177 # access to stdin. Until then, use /dev/null so that any
2178 # attempts to read from stdin will immediately return EOF
2179 # instead of blocking indefinitely.
2180 null_input = open('/dev/null', 'rb')
2181 fd_pipes[0] = null_input.fileno()
2183 fd_pipes[0] = fd_pipes_orig[0]
2185 files.process = os.fdopen(master_fd, 'r')
2186 if logfile is not None:
2188 fd_pipes[1] = slave_fd
2189 fd_pipes[2] = slave_fd
2191 files.log = open(logfile, "a")
2192 portage.util.apply_secpass_permissions(logfile,
2193 uid=portage.portage_uid, gid=portage.portage_gid,
2196 if not self.background:
2197 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2199 output_handler = self._output_handler
2203 # Create a dummy pipe so the scheduler can monitor
2204 # the process from inside a poll() loop.
2205 fd_pipes[self._dummy_pipe_fd] = slave_fd
2207 fd_pipes[1] = slave_fd
2208 fd_pipes[2] = slave_fd
2209 output_handler = self._dummy_handler
2212 for k in self._spawn_kwarg_names:
2213 v = getattr(self, k)
2217 kwargs["fd_pipes"] = fd_pipes
2218 kwargs["returnpid"] = True
2219 kwargs.pop("logfile", None)
2221 self._reg_id = self.scheduler.register(files.process.fileno(),
2222 self._registered_events, output_handler)
2223 self._registered = True
2225 retval = self._spawn(self.args, **kwargs)
2228 if null_input is not None:
2231 if isinstance(retval, int):
2234 self.returncode = retval
2238 self.pid = retval[0]
2239 portage.process.spawned_pids.remove(self.pid)
2241 def _pipe(self, fd_pipes):
2243 @type fd_pipes: dict
2244 @param fd_pipes: pipes from which to copy terminal size if desired.
2248 def _spawn(self, args, **kwargs):
2249 return portage.process.spawn(args, **kwargs)
2251 def _output_handler(self, fd, event):
2253 if event & PollConstants.POLLIN:
2256 buf = array.array('B')
2258 buf.fromfile(files.process, self._bufsize)
2263 if not self.background:
2264 buf.tofile(files.stdout)
2265 files.stdout.flush()
2266 buf.tofile(files.log)
2272 self._unregister_if_appropriate(event)
2273 return self._registered
2275 def _dummy_handler(self, fd, event):
2277 This method is mainly interested in detecting EOF, since
2278 the only purpose of the pipe is to allow the scheduler to
2279 monitor the process from inside a poll() loop.
2282 if event & PollConstants.POLLIN:
2284 buf = array.array('B')
2286 buf.fromfile(self._files.process, self._bufsize)
2296 self._unregister_if_appropriate(event)
2297 return self._registered
2299 class MiscFunctionsProcess(SpawnProcess):
2301 Spawns misc-functions.sh with an existing ebuild environment.
2304 __slots__ = ("commands", "phase", "pkg", "settings")
2307 settings = self.settings
2308 settings.pop("EBUILD_PHASE", None)
2309 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2310 misc_sh_binary = os.path.join(portage_bin_path,
2311 os.path.basename(portage.const.MISC_SH_BINARY))
2313 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2314 self.logfile = settings.get("PORTAGE_LOG_FILE")
2316 portage._doebuild_exit_status_unlink(
2317 settings.get("EBUILD_EXIT_STATUS_FILE"))
2319 SpawnProcess._start(self)
2321 def _spawn(self, args, **kwargs):
2322 settings = self.settings
2323 debug = settings.get("PORTAGE_DEBUG") == "1"
2324 return portage.spawn(" ".join(args), settings,
2325 debug=debug, **kwargs)
2327 def _set_returncode(self, wait_retval):
2328 SpawnProcess._set_returncode(self, wait_retval)
2329 self.returncode = portage._doebuild_exit_status_check_and_log(
2330 self.settings, self.phase, self.returncode)
2332 class EbuildFetcher(SpawnProcess):
2334 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2339 root_config = self.pkg.root_config
2340 portdb = root_config.trees["porttree"].dbapi
2341 ebuild_path = portdb.findname(self.pkg.cpv)
2342 settings = self.config_pool.allocate()
2343 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2344 self._build_dir.lock()
2345 self._build_dir.clean()
2346 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2347 if self.logfile is None:
2348 self.logfile = settings.get("PORTAGE_LOG_FILE")
2354 # If any incremental variables have been overridden
2355 # via the environment, those values need to be passed
2356 # along here so that they are correctly considered by
2357 # the config instance in the subproccess.
2358 fetch_env = os.environ.copy()
2360 fetch_env["PORTAGE_NICENESS"] = "0"
2362 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2364 ebuild_binary = os.path.join(
2365 settings["PORTAGE_BIN_PATH"], "ebuild")
2367 fetch_args = [ebuild_binary, ebuild_path, phase]
2368 debug = settings.get("PORTAGE_DEBUG") == "1"
2370 fetch_args.append("--debug")
2372 self.args = fetch_args
2373 self.env = fetch_env
2374 SpawnProcess._start(self)
2376 def _pipe(self, fd_pipes):
2377 """When appropriate, use a pty so that fetcher progress bars,
2378 like wget has, will work properly."""
2379 if self.background or not sys.stdout.isatty():
2380 # When the output only goes to a log file,
2381 # there's no point in creating a pty.
2383 stdout_pipe = fd_pipes.get(1)
2384 got_pty, master_fd, slave_fd = \
2385 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2386 return (master_fd, slave_fd)
2388 def _set_returncode(self, wait_retval):
2389 SpawnProcess._set_returncode(self, wait_retval)
2390 # Collect elog messages that might have been
2391 # created by the pkg_nofetch phase.
2392 if self._build_dir is not None:
2393 # Skip elog messages for prefetch, in order to avoid duplicates.
2394 if not self.prefetch and self.returncode != os.EX_OK:
2396 if self.logfile is not None:
2398 elog_out = open(self.logfile, 'a')
2399 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2400 if self.logfile is not None:
2401 msg += ", Log file:"
2402 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2403 if self.logfile is not None:
2404 eerror(" '%s'" % (self.logfile,),
2405 phase="unpack", key=self.pkg.cpv, out=elog_out)
2406 if elog_out is not None:
2408 if not self.prefetch:
2409 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2410 features = self._build_dir.settings.features
2411 if self.returncode == os.EX_OK:
2412 self._build_dir.clean()
2413 self._build_dir.unlock()
2414 self.config_pool.deallocate(self._build_dir.settings)
2415 self._build_dir = None
2417 class EbuildBuildDir(SlotObject):
2419 __slots__ = ("dir_path", "pkg", "settings",
2420 "locked", "_catdir", "_lock_obj")
2422 def __init__(self, **kwargs):
2423 SlotObject.__init__(self, **kwargs)
2428 This raises an AlreadyLocked exception if lock() is called
2429 while a lock is already held. In order to avoid this, call
2430 unlock() or check whether the "locked" attribute is True
2431 or False before calling lock().
2433 if self._lock_obj is not None:
2434 raise self.AlreadyLocked((self._lock_obj,))
2436 dir_path = self.dir_path
2437 if dir_path is None:
2438 root_config = self.pkg.root_config
2439 portdb = root_config.trees["porttree"].dbapi
2440 ebuild_path = portdb.findname(self.pkg.cpv)
2441 settings = self.settings
2442 settings.setcpv(self.pkg)
2443 debug = settings.get("PORTAGE_DEBUG") == "1"
2444 use_cache = 1 # always true
2445 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2446 self.settings, debug, use_cache, portdb)
2447 dir_path = self.settings["PORTAGE_BUILDDIR"]
2449 catdir = os.path.dirname(dir_path)
2450 self._catdir = catdir
2452 portage.util.ensure_dirs(os.path.dirname(catdir),
2453 gid=portage.portage_gid,
2457 catdir_lock = portage.locks.lockdir(catdir)
2458 portage.util.ensure_dirs(catdir,
2459 gid=portage.portage_gid,
2461 self._lock_obj = portage.locks.lockdir(dir_path)
2463 self.locked = self._lock_obj is not None
2464 if catdir_lock is not None:
2465 portage.locks.unlockdir(catdir_lock)
2468 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2469 by keepwork or keeptemp in FEATURES."""
2470 settings = self.settings
2471 features = settings.features
2472 if not ("keepwork" in features or "keeptemp" in features):
2474 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2475 except EnvironmentError, e:
2476 if e.errno != errno.ENOENT:
2481 if self._lock_obj is None:
2484 portage.locks.unlockdir(self._lock_obj)
2485 self._lock_obj = None
2488 catdir = self._catdir
2491 catdir_lock = portage.locks.lockdir(catdir)
2497 if e.errno not in (errno.ENOENT,
2498 errno.ENOTEMPTY, errno.EEXIST):
2501 portage.locks.unlockdir(catdir_lock)
2503 class AlreadyLocked(portage.exception.PortageException):
2506 class EbuildBuild(CompositeTask):
2508 __slots__ = ("args_set", "config_pool", "find_blockers",
2509 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2510 "prefetcher", "settings", "world_atom") + \
2511 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2515 logger = self.logger
2518 settings = self.settings
2519 world_atom = self.world_atom
2520 root_config = pkg.root_config
2523 portdb = root_config.trees[tree].dbapi
2524 settings.setcpv(pkg)
2525 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2526 ebuild_path = portdb.findname(self.pkg.cpv)
2527 self._ebuild_path = ebuild_path
2529 prefetcher = self.prefetcher
2530 if prefetcher is None:
2532 elif not prefetcher.isAlive():
2534 elif prefetcher.poll() is None:
2536 waiting_msg = "Fetching files " + \
2537 "in the background. " + \
2538 "To view fetch progress, run `tail -f " + \
2539 "/var/log/emerge-fetch.log` in another " + \
2541 msg_prefix = colorize("GOOD", " * ")
2542 from textwrap import wrap
2543 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2544 for line in wrap(waiting_msg, 65))
2545 if not self.background:
2546 writemsg(waiting_msg, noiselevel=-1)
2548 self._current_task = prefetcher
2549 prefetcher.addExitListener(self._prefetch_exit)
2552 self._prefetch_exit(prefetcher)
2554 def _prefetch_exit(self, prefetcher):
2558 settings = self.settings
2561 fetcher = EbuildFetchonly(
2562 fetch_all=opts.fetch_all_uri,
2563 pkg=pkg, pretend=opts.pretend,
2565 retval = fetcher.execute()
2566 self.returncode = retval
2570 fetcher = EbuildFetcher(config_pool=self.config_pool,
2571 fetchall=opts.fetch_all_uri,
2572 fetchonly=opts.fetchonly,
2573 background=self.background,
2574 pkg=pkg, scheduler=self.scheduler)
2576 self._start_task(fetcher, self._fetch_exit)
2578 def _fetch_exit(self, fetcher):
2582 fetch_failed = False
2584 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2586 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2588 if fetch_failed and fetcher.logfile is not None and \
2589 os.path.exists(fetcher.logfile):
2590 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2592 if not fetch_failed and fetcher.logfile is not None:
2593 # Fetch was successful, so remove the fetch log.
2595 os.unlink(fetcher.logfile)
2599 if fetch_failed or opts.fetchonly:
2603 logger = self.logger
2605 pkg_count = self.pkg_count
2606 scheduler = self.scheduler
2607 settings = self.settings
2608 features = settings.features
2609 ebuild_path = self._ebuild_path
2610 system_set = pkg.root_config.sets["system"]
2612 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2613 self._build_dir.lock()
2615 # Cleaning is triggered before the setup
2616 # phase, in portage.doebuild().
2617 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2618 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2619 short_msg = "emerge: (%s of %s) %s Clean" % \
2620 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2621 logger.log(msg, short_msg=short_msg)
2623 #buildsyspkg: Check if we need to _force_ binary package creation
2624 self._issyspkg = "buildsyspkg" in features and \
2625 system_set.findAtomForPackage(pkg) and \
2628 if opts.buildpkg or self._issyspkg:
2630 self._buildpkg = True
2632 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2633 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2634 short_msg = "emerge: (%s of %s) %s Compile" % \
2635 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2636 logger.log(msg, short_msg=short_msg)
2639 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2640 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2641 short_msg = "emerge: (%s of %s) %s Compile" % \
2642 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2643 logger.log(msg, short_msg=short_msg)
2645 build = EbuildExecuter(background=self.background, pkg=pkg,
2646 scheduler=scheduler, settings=settings)
2647 self._start_task(build, self._build_exit)
2649 def _unlock_builddir(self):
2650 portage.elog.elog_process(self.pkg.cpv, self.settings)
2651 self._build_dir.unlock()
2653 def _build_exit(self, build):
2654 if self._default_exit(build) != os.EX_OK:
2655 self._unlock_builddir()
2660 buildpkg = self._buildpkg
2663 self._final_exit(build)
2668 msg = ">>> This is a system package, " + \
2669 "let's pack a rescue tarball.\n"
2671 log_path = self.settings.get("PORTAGE_LOG_FILE")
2672 if log_path is not None:
2673 log_file = open(log_path, 'a')
2679 if not self.background:
2680 portage.writemsg_stdout(msg, noiselevel=-1)
2682 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2683 scheduler=self.scheduler, settings=self.settings)
2685 self._start_task(packager, self._buildpkg_exit)
2687 def _buildpkg_exit(self, packager):
2689 Released build dir lock when there is a failure or
2690 when in buildpkgonly mode. Otherwise, the lock will
2691 be released when merge() is called.
2694 if self._default_exit(packager) != os.EX_OK:
2695 self._unlock_builddir()
2699 if self.opts.buildpkgonly:
2700 # Need to call "clean" phase for buildpkgonly mode
2701 portage.elog.elog_process(self.pkg.cpv, self.settings)
2703 clean_phase = EbuildPhase(background=self.background,
2704 pkg=self.pkg, phase=phase,
2705 scheduler=self.scheduler, settings=self.settings,
2707 self._start_task(clean_phase, self._clean_exit)
2710 # Continue holding the builddir lock until
2711 # after the package has been installed.
2712 self._current_task = None
2713 self.returncode = packager.returncode
2716 def _clean_exit(self, clean_phase):
2717 if self._final_exit(clean_phase) != os.EX_OK or \
2718 self.opts.buildpkgonly:
2719 self._unlock_builddir()
2724 Install the package and then clean up and release locks.
2725 Only call this after the build has completed successfully
2726 and neither fetchonly nor buildpkgonly mode are enabled.
2729 find_blockers = self.find_blockers
2730 ldpath_mtimes = self.ldpath_mtimes
2731 logger = self.logger
2733 pkg_count = self.pkg_count
2734 settings = self.settings
2735 world_atom = self.world_atom
2736 ebuild_path = self._ebuild_path
2739 merge = EbuildMerge(find_blockers=self.find_blockers,
2740 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2741 pkg_count=pkg_count, pkg_path=ebuild_path,
2742 scheduler=self.scheduler,
2743 settings=settings, tree=tree, world_atom=world_atom)
2745 msg = " === (%s of %s) Merging (%s::%s)" % \
2746 (pkg_count.curval, pkg_count.maxval,
2747 pkg.cpv, ebuild_path)
2748 short_msg = "emerge: (%s of %s) %s Merge" % \
2749 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2750 logger.log(msg, short_msg=short_msg)
2753 rval = merge.execute()
2755 self._unlock_builddir()
2759 class EbuildExecuter(CompositeTask):
2761 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2763 _phases = ("prepare", "configure", "compile", "test", "install")
2765 _live_eclasses = frozenset([
2775 self._tree = "porttree"
2778 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2779 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2780 self._start_task(clean_phase, self._clean_phase_exit)
2782 def _clean_phase_exit(self, clean_phase):
2784 if self._default_exit(clean_phase) != os.EX_OK:
2789 scheduler = self.scheduler
2790 settings = self.settings
2793 # This initializes PORTAGE_LOG_FILE.
2794 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2796 setup_phase = EbuildPhase(background=self.background,
2797 pkg=pkg, phase="setup", scheduler=scheduler,
2798 settings=settings, tree=self._tree)
2800 setup_phase.addExitListener(self._setup_exit)
2801 self._current_task = setup_phase
2802 self.scheduler.scheduleSetup(setup_phase)
2804 def _setup_exit(self, setup_phase):
2806 if self._default_exit(setup_phase) != os.EX_OK:
2810 unpack_phase = EbuildPhase(background=self.background,
2811 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2812 settings=self.settings, tree=self._tree)
2814 if self._live_eclasses.intersection(self.pkg.inherited):
2815 # Serialize $DISTDIR access for live ebuilds since
2816 # otherwise they can interfere with eachother.
2818 unpack_phase.addExitListener(self._unpack_exit)
2819 self._current_task = unpack_phase
2820 self.scheduler.scheduleUnpack(unpack_phase)
2823 self._start_task(unpack_phase, self._unpack_exit)
2825 def _unpack_exit(self, unpack_phase):
2827 if self._default_exit(unpack_phase) != os.EX_OK:
2831 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2834 phases = self._phases
2835 eapi = pkg.metadata["EAPI"]
2836 if eapi in ("0", "1", "2_pre1"):
2837 # skip src_prepare and src_configure
2839 elif eapi in ("2_pre2",):
2843 for phase in phases:
2844 ebuild_phases.add(EbuildPhase(background=self.background,
2845 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2846 settings=self.settings, tree=self._tree))
2848 self._start_task(ebuild_phases, self._default_final_exit)
2850 class EbuildMetadataPhase(SubProcess):
2853 Asynchronous interface for the ebuild "depend" phase which is
2854 used to extract metadata from the ebuild.
2857 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2858 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2861 _file_names = ("ebuild",)
2862 _files_dict = slot_dict_class(_file_names, prefix="")
2866 settings = self.settings
2868 ebuild_path = self.ebuild_path
2869 debug = settings.get("PORTAGE_DEBUG") == "1"
2873 if self.fd_pipes is not None:
2874 fd_pipes = self.fd_pipes.copy()
2878 fd_pipes.setdefault(0, sys.stdin.fileno())
2879 fd_pipes.setdefault(1, sys.stdout.fileno())
2880 fd_pipes.setdefault(2, sys.stderr.fileno())
2882 # flush any pending output
2883 for fd in fd_pipes.itervalues():
2884 if fd == sys.stdout.fileno():
2886 if fd == sys.stderr.fileno():
2889 fd_pipes_orig = fd_pipes.copy()
2890 self._files = self._files_dict()
2893 master_fd, slave_fd = os.pipe()
2894 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2895 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2897 fd_pipes[self._metadata_fd] = slave_fd
2899 self._raw_metadata = []
2900 files.ebuild = os.fdopen(master_fd, 'r')
2901 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2902 self._registered_events, self._output_handler)
2903 self._registered = True
2905 retval = portage.doebuild(ebuild_path, "depend",
2906 settings["ROOT"], settings, debug,
2907 mydbapi=self.portdb, tree="porttree",
2908 fd_pipes=fd_pipes, returnpid=True)
2912 if isinstance(retval, int):
2913 # doebuild failed before spawning
2915 self.returncode = retval
2919 self.pid = retval[0]
2920 portage.process.spawned_pids.remove(self.pid)
2922 def _output_handler(self, fd, event):
2924 if event & PollConstants.POLLIN:
2925 self._raw_metadata.append(self._files.ebuild.read())
2926 if not self._raw_metadata[-1]:
2930 self._unregister_if_appropriate(event)
2931 return self._registered
2933 def _set_returncode(self, wait_retval):
2934 SubProcess._set_returncode(self, wait_retval)
2935 if self.returncode == os.EX_OK:
2936 metadata_lines = "".join(self._raw_metadata).splitlines()
2937 if len(portage.auxdbkeys) != len(metadata_lines):
2938 # Don't trust bash's returncode if the
2939 # number of lines is incorrect.
2942 metadata = izip(portage.auxdbkeys, metadata_lines)
2943 self.metadata_callback(self.cpv, self.ebuild_path,
2944 self.repo_path, metadata, self.ebuild_mtime)
2946 class EbuildProcess(SpawnProcess):
2948 __slots__ = ("phase", "pkg", "settings", "tree")
2951 # Don't open the log file during the clean phase since the
2952 # open file can result in an nfs lock on $T/build.log which
2953 # prevents the clean phase from removing $T.
2954 if self.phase not in ("clean", "cleanrm"):
2955 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2956 SpawnProcess._start(self)
2958 def _pipe(self, fd_pipes):
2959 stdout_pipe = fd_pipes.get(1)
2960 got_pty, master_fd, slave_fd = \
2961 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2962 return (master_fd, slave_fd)
2964 def _spawn(self, args, **kwargs):
2966 root_config = self.pkg.root_config
2968 mydbapi = root_config.trees[tree].dbapi
2969 settings = self.settings
2970 ebuild_path = settings["EBUILD"]
2971 debug = settings.get("PORTAGE_DEBUG") == "1"
2973 rval = portage.doebuild(ebuild_path, self.phase,
2974 root_config.root, settings, debug,
2975 mydbapi=mydbapi, tree=tree, **kwargs)
2979 def _set_returncode(self, wait_retval):
2980 SpawnProcess._set_returncode(self, wait_retval)
2982 if self.phase not in ("clean", "cleanrm"):
2983 self.returncode = portage._doebuild_exit_status_check_and_log(
2984 self.settings, self.phase, self.returncode)
2986 if self.phase == "test" and self.returncode != os.EX_OK and \
2987 "test-fail-continue" in self.settings.features:
2988 self.returncode = os.EX_OK
2990 portage._post_phase_userpriv_perms(self.settings)
2992 class EbuildPhase(CompositeTask):
2994 __slots__ = ("background", "pkg", "phase",
2995 "scheduler", "settings", "tree")
2997 _post_phase_cmds = portage._post_phase_cmds
3001 ebuild_process = EbuildProcess(background=self.background,
3002 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3003 settings=self.settings, tree=self.tree)
3005 self._start_task(ebuild_process, self._ebuild_exit)
3007 def _ebuild_exit(self, ebuild_process):
3009 if self.phase == "install":
3011 log_path = self.settings.get("PORTAGE_LOG_FILE")
3013 if self.background and log_path is not None:
3014 log_file = open(log_path, 'a')
3017 portage._check_build_log(self.settings, out=out)
3019 if log_file is not None:
3022 if self._default_exit(ebuild_process) != os.EX_OK:
3026 settings = self.settings
3028 if self.phase == "install":
3029 portage._post_src_install_uid_fix(settings)
3031 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3032 if post_phase_cmds is not None:
3033 post_phase = MiscFunctionsProcess(background=self.background,
3034 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3035 scheduler=self.scheduler, settings=settings)
3036 self._start_task(post_phase, self._post_phase_exit)
3039 self.returncode = ebuild_process.returncode
3040 self._current_task = None
3043 def _post_phase_exit(self, post_phase):
3044 if self._final_exit(post_phase) != os.EX_OK:
3045 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3047 self._current_task = None
3051 class EbuildBinpkg(EbuildProcess):
3053 This assumes that src_install() has successfully completed.
3055 __slots__ = ("_binpkg_tmpfile",)
3058 self.phase = "package"
3059 self.tree = "porttree"
3061 root_config = pkg.root_config
3062 portdb = root_config.trees["porttree"].dbapi
3063 bintree = root_config.trees["bintree"]
3064 ebuild_path = portdb.findname(self.pkg.cpv)
3065 settings = self.settings
3066 debug = settings.get("PORTAGE_DEBUG") == "1"
3068 bintree.prevent_collision(pkg.cpv)
3069 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3070 pkg.cpv + ".tbz2." + str(os.getpid()))
3071 self._binpkg_tmpfile = binpkg_tmpfile
3072 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3073 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3076 EbuildProcess._start(self)
3078 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3080 def _set_returncode(self, wait_retval):
3081 EbuildProcess._set_returncode(self, wait_retval)
3084 bintree = pkg.root_config.trees["bintree"]
3085 binpkg_tmpfile = self._binpkg_tmpfile
3086 if self.returncode == os.EX_OK:
3087 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3089 class EbuildMerge(SlotObject):
3091 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3092 "pkg", "pkg_count", "pkg_path", "pretend",
3093 "scheduler", "settings", "tree", "world_atom")
3096 root_config = self.pkg.root_config
3097 settings = self.settings
3098 retval = portage.merge(settings["CATEGORY"],
3099 settings["PF"], settings["D"],
3100 os.path.join(settings["PORTAGE_BUILDDIR"],
3101 "build-info"), root_config.root, settings,
3102 myebuild=settings["EBUILD"],
3103 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3104 vartree=root_config.trees["vartree"],
3105 prev_mtimes=self.ldpath_mtimes,
3106 scheduler=self.scheduler,
3107 blockers=self.find_blockers)
3109 if retval == os.EX_OK:
3110 self.world_atom(self.pkg)
3115 def _log_success(self):
3117 pkg_count = self.pkg_count
3118 pkg_path = self.pkg_path
3119 logger = self.logger
3120 if "noclean" not in self.settings.features:
3121 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3122 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3123 logger.log((" === (%s of %s) " + \
3124 "Post-Build Cleaning (%s::%s)") % \
3125 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3126 short_msg=short_msg)
3127 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3128 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3130 class PackageUninstall(AsynchronousTask):
3132 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3136 unmerge(self.pkg.root_config, self.opts, "unmerge",
3137 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3138 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3139 writemsg_level=self._writemsg_level)
3140 except UninstallFailure, e:
3141 self.returncode = e.status
3143 self.returncode = os.EX_OK
3146 def _writemsg_level(self, msg, level=0, noiselevel=0):
3148 log_path = self.settings.get("PORTAGE_LOG_FILE")
3149 background = self.background
3151 if log_path is None:
3152 if not (background and level < logging.WARNING):
3153 portage.util.writemsg_level(msg,
3154 level=level, noiselevel=noiselevel)
3157 portage.util.writemsg_level(msg,
3158 level=level, noiselevel=noiselevel)
3160 f = open(log_path, 'a')
3166 class Binpkg(CompositeTask):
3168 __slots__ = ("find_blockers",
3169 "ldpath_mtimes", "logger", "opts",
3170 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3171 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3172 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3174 def _writemsg_level(self, msg, level=0, noiselevel=0):
3176 if not self.background:
3177 portage.util.writemsg_level(msg,
3178 level=level, noiselevel=noiselevel)
3180 log_path = self.settings.get("PORTAGE_LOG_FILE")
3181 if log_path is not None:
3182 f = open(log_path, 'a')
3191 settings = self.settings
3192 settings.setcpv(pkg)
3193 self._tree = "bintree"
3194 self._bintree = self.pkg.root_config.trees[self._tree]
3195 self._verify = not self.opts.pretend
3197 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3198 "portage", pkg.category, pkg.pf)
3199 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3200 pkg=pkg, settings=settings)
3201 self._image_dir = os.path.join(dir_path, "image")
3202 self._infloc = os.path.join(dir_path, "build-info")
3203 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3204 settings["EBUILD"] = self._ebuild_path
3205 debug = settings.get("PORTAGE_DEBUG") == "1"
3206 portage.doebuild_environment(self._ebuild_path, "setup",
3207 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3208 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3210 # The prefetcher has already completed or it
3211 # could be running now. If it's running now,
3212 # wait for it to complete since it holds
3213 # a lock on the file being fetched. The
3214 # portage.locks functions are only designed
3215 # to work between separate processes. Since
3216 # the lock is held by the current process,
3217 # use the scheduler and fetcher methods to
3218 # synchronize with the fetcher.
3219 prefetcher = self.prefetcher
3220 if prefetcher is None:
3222 elif not prefetcher.isAlive():
3224 elif prefetcher.poll() is None:
3226 waiting_msg = ("Fetching '%s' " + \
3227 "in the background. " + \
3228 "To view fetch progress, run `tail -f " + \
3229 "/var/log/emerge-fetch.log` in another " + \
3230 "terminal.") % prefetcher.pkg_path
3231 msg_prefix = colorize("GOOD", " * ")
3232 from textwrap import wrap
3233 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3234 for line in wrap(waiting_msg, 65))
3235 if not self.background:
3236 writemsg(waiting_msg, noiselevel=-1)
3238 self._current_task = prefetcher
3239 prefetcher.addExitListener(self._prefetch_exit)
3242 self._prefetch_exit(prefetcher)
3244 def _prefetch_exit(self, prefetcher):
3247 pkg_count = self.pkg_count
3248 if not (self.opts.pretend or self.opts.fetchonly):
3249 self._build_dir.lock()
3251 shutil.rmtree(self._build_dir.dir_path)
3252 except EnvironmentError, e:
3253 if e.errno != errno.ENOENT:
3256 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3257 fetcher = BinpkgFetcher(background=self.background,
3258 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3259 pretend=self.opts.pretend, scheduler=self.scheduler)
3260 pkg_path = fetcher.pkg_path
3261 self._pkg_path = pkg_path
3263 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3265 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3266 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3267 short_msg = "emerge: (%s of %s) %s Fetch" % \
3268 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3269 self.logger.log(msg, short_msg=short_msg)
3270 self._start_task(fetcher, self._fetcher_exit)
3273 self._fetcher_exit(fetcher)
3275 def _fetcher_exit(self, fetcher):
3277 # The fetcher only has a returncode when
3278 # --getbinpkg is enabled.
3279 if fetcher.returncode is not None:
3280 self._fetched_pkg = True
3281 if self._default_exit(fetcher) != os.EX_OK:
3282 self._unlock_builddir()
3286 if self.opts.pretend:
3287 self._current_task = None
3288 self.returncode = os.EX_OK
3296 logfile = self.settings.get("PORTAGE_LOG_FILE")
3297 verifier = BinpkgVerifier(background=self.background,
3298 logfile=logfile, pkg=self.pkg)
3299 self._start_task(verifier, self._verifier_exit)
3302 self._verifier_exit(verifier)
3304 def _verifier_exit(self, verifier):
3305 if verifier is not None and \
3306 self._default_exit(verifier) != os.EX_OK:
3307 self._unlock_builddir()
3311 logger = self.logger
3313 pkg_count = self.pkg_count
3314 pkg_path = self._pkg_path
3316 if self._fetched_pkg:
3317 self._bintree.inject(pkg.cpv, filename=pkg_path)
3319 if self.opts.fetchonly:
3320 self._current_task = None
3321 self.returncode = os.EX_OK
3325 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3326 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3327 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3328 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3329 logger.log(msg, short_msg=short_msg)
3332 settings = self.settings
3333 ebuild_phase = EbuildPhase(background=self.background,
3334 pkg=pkg, phase=phase, scheduler=self.scheduler,
3335 settings=settings, tree=self._tree)
3337 self._start_task(ebuild_phase, self._clean_exit)
3339 def _clean_exit(self, clean_phase):
3340 if self._default_exit(clean_phase) != os.EX_OK:
3341 self._unlock_builddir()
3345 dir_path = self._build_dir.dir_path
3348 shutil.rmtree(dir_path)
3349 except (IOError, OSError), e:
3350 if e.errno != errno.ENOENT:
3354 infloc = self._infloc
3356 pkg_path = self._pkg_path
3359 for mydir in (dir_path, self._image_dir, infloc):
3360 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3361 gid=portage.data.portage_gid, mode=dir_mode)
3363 # This initializes PORTAGE_LOG_FILE.
3364 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3365 self._writemsg_level(">>> Extracting info\n")
3367 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3368 check_missing_metadata = ("CATEGORY", "PF")
3369 missing_metadata = set()
3370 for k in check_missing_metadata:
3371 v = pkg_xpak.getfile(k)
3373 missing_metadata.add(k)
3375 pkg_xpak.unpackinfo(infloc)
3376 for k in missing_metadata:
3384 f = open(os.path.join(infloc, k), 'wb')
3390 # Store the md5sum in the vdb.
3391 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3393 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3397 # This gives bashrc users an opportunity to do various things
3398 # such as remove binary packages after they're installed.
3399 settings = self.settings
3400 settings.setcpv(self.pkg)
3401 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3402 settings.backup_changes("PORTAGE_BINPKG_FILE")
3405 setup_phase = EbuildPhase(background=self.background,
3406 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3407 settings=settings, tree=self._tree)
3409 setup_phase.addExitListener(self._setup_exit)
3410 self._current_task = setup_phase
3411 self.scheduler.scheduleSetup(setup_phase)
3413 def _setup_exit(self, setup_phase):
3414 if self._default_exit(setup_phase) != os.EX_OK:
3415 self._unlock_builddir()
3419 extractor = BinpkgExtractorAsync(background=self.background,
3420 image_dir=self._image_dir,
3421 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3422 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3423 self._start_task(extractor, self._extractor_exit)
3425 def _extractor_exit(self, extractor):
3426 if self._final_exit(extractor) != os.EX_OK:
3427 self._unlock_builddir()
3428 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3432 def _unlock_builddir(self):
3433 if self.opts.pretend or self.opts.fetchonly:
3435 portage.elog.elog_process(self.pkg.cpv, self.settings)
3436 self._build_dir.unlock()
3440 # This gives bashrc users an opportunity to do various things
3441 # such as remove binary packages after they're installed.
3442 settings = self.settings
3443 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3444 settings.backup_changes("PORTAGE_BINPKG_FILE")
3446 merge = EbuildMerge(find_blockers=self.find_blockers,
3447 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3448 pkg=self.pkg, pkg_count=self.pkg_count,
3449 pkg_path=self._pkg_path, scheduler=self.scheduler,
3450 settings=settings, tree=self._tree, world_atom=self.world_atom)
3453 retval = merge.execute()
3455 settings.pop("PORTAGE_BINPKG_FILE", None)
3456 self._unlock_builddir()
3459 class BinpkgFetcher(SpawnProcess):
3461 __slots__ = ("pkg", "pretend",
3462 "locked", "pkg_path", "_lock_obj")
3464 def __init__(self, **kwargs):
3465 SpawnProcess.__init__(self, **kwargs)
3467 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3475 pretend = self.pretend
3476 bintree = pkg.root_config.trees["bintree"]
3477 settings = bintree.settings
3478 use_locks = "distlocks" in settings.features
3479 pkg_path = self.pkg_path
3482 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3485 exists = os.path.exists(pkg_path)
3486 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3487 if not (pretend or resume):
3488 # Remove existing file or broken symlink.
3494 # urljoin doesn't work correctly with
3495 # unrecognized protocols like sftp
3496 if bintree._remote_has_index:
3497 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3499 rel_uri = pkg.cpv + ".tbz2"
3500 uri = bintree._remote_base_uri.rstrip("/") + \
3501 "/" + rel_uri.lstrip("/")
3503 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3504 "/" + pkg.pf + ".tbz2"
3507 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3508 self.returncode = os.EX_OK
3512 protocol = urlparse.urlparse(uri)[0]
3513 fcmd_prefix = "FETCHCOMMAND"
3515 fcmd_prefix = "RESUMECOMMAND"
3516 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3518 fcmd = settings.get(fcmd_prefix)
3521 "DISTDIR" : os.path.dirname(pkg_path),
3523 "FILE" : os.path.basename(pkg_path)
3526 fetch_env = dict(settings.iteritems())
3527 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3528 for x in shlex.split(fcmd)]
3530 if self.fd_pipes is None:
3532 fd_pipes = self.fd_pipes
3534 # Redirect all output to stdout since some fetchers like
3535 # wget pollute stderr (if portage detects a problem then it
3536 # can send it's own message to stderr).
3537 fd_pipes.setdefault(0, sys.stdin.fileno())
3538 fd_pipes.setdefault(1, sys.stdout.fileno())
3539 fd_pipes.setdefault(2, sys.stdout.fileno())
3541 self.args = fetch_args
3542 self.env = fetch_env
3543 SpawnProcess._start(self)
3545 def _set_returncode(self, wait_retval):
3546 SpawnProcess._set_returncode(self, wait_retval)
3547 if self.returncode == os.EX_OK:
3548 # If possible, update the mtime to match the remote package if
3549 # the fetcher didn't already do it automatically.
3550 bintree = self.pkg.root_config.trees["bintree"]
3551 if bintree._remote_has_index:
3552 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3553 if remote_mtime is not None:
3555 remote_mtime = long(remote_mtime)
3560 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3564 if remote_mtime != local_mtime:
3566 os.utime(self.pkg_path,
3567 (remote_mtime, remote_mtime))
3576 This raises an AlreadyLocked exception if lock() is called
3577 while a lock is already held. In order to avoid this, call
3578 unlock() or check whether the "locked" attribute is True
3579 or False before calling lock().
3581 if self._lock_obj is not None:
3582 raise self.AlreadyLocked((self._lock_obj,))
3584 self._lock_obj = portage.locks.lockfile(
3585 self.pkg_path, wantnewlockfile=1)
3588 class AlreadyLocked(portage.exception.PortageException):
3592 if self._lock_obj is None:
3594 portage.locks.unlockfile(self._lock_obj)
3595 self._lock_obj = None
3598 class BinpkgVerifier(AsynchronousTask):
3599 __slots__ = ("logfile", "pkg",)
3603 Note: Unlike a normal AsynchronousTask.start() method,
3604 this one does all work is synchronously. The returncode
3605 attribute will be set before it returns.
3609 root_config = pkg.root_config
3610 bintree = root_config.trees["bintree"]
3612 stdout_orig = sys.stdout
3613 stderr_orig = sys.stderr
3615 if self.background and self.logfile is not None:
3616 log_file = open(self.logfile, 'a')
3618 if log_file is not None:
3619 sys.stdout = log_file
3620 sys.stderr = log_file
3622 bintree.digestCheck(pkg)
3623 except portage.exception.FileNotFound:
3624 writemsg("!!! Fetching Binary failed " + \
3625 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3627 except portage.exception.DigestException, e:
3628 writemsg("\n!!! Digest verification failed:\n",
3630 writemsg("!!! %s\n" % e.value[0],
3632 writemsg("!!! Reason: %s\n" % e.value[1],
3634 writemsg("!!! Got: %s\n" % e.value[2],
3636 writemsg("!!! Expected: %s\n" % e.value[3],
3639 if rval != os.EX_OK:
3640 pkg_path = bintree.getname(pkg.cpv)
3641 head, tail = os.path.split(pkg_path)
3642 temp_filename = portage._checksum_failure_temp_file(head, tail)
3643 writemsg("File renamed to '%s'\n" % (temp_filename,),
3646 sys.stdout = stdout_orig
3647 sys.stderr = stderr_orig
3648 if log_file is not None:
3651 self.returncode = rval
3654 class BinpkgPrefetcher(CompositeTask):
3656 __slots__ = ("pkg",) + \
3657 ("pkg_path", "_bintree",)
3660 self._bintree = self.pkg.root_config.trees["bintree"]
3661 fetcher = BinpkgFetcher(background=self.background,
3662 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3663 scheduler=self.scheduler)
3664 self.pkg_path = fetcher.pkg_path
3665 self._start_task(fetcher, self._fetcher_exit)
3667 def _fetcher_exit(self, fetcher):
3669 if self._default_exit(fetcher) != os.EX_OK:
3673 verifier = BinpkgVerifier(background=self.background,
3674 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3675 self._start_task(verifier, self._verifier_exit)
3677 def _verifier_exit(self, verifier):
3678 if self._default_exit(verifier) != os.EX_OK:
3682 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3684 self._current_task = None
3685 self.returncode = os.EX_OK
3688 class BinpkgExtractorAsync(SpawnProcess):
3690 __slots__ = ("image_dir", "pkg", "pkg_path")
3692 _shell_binary = portage.const.BASH_BINARY
3695 self.args = [self._shell_binary, "-c",
3696 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3697 (portage._shell_quote(self.pkg_path),
3698 portage._shell_quote(self.image_dir))]
3700 self.env = self.pkg.root_config.settings.environ()
3701 SpawnProcess._start(self)
3703 class MergeListItem(CompositeTask):
3706 TODO: For parallel scheduling, everything here needs asynchronous
3707 execution support (start, poll, and wait methods).
3710 __slots__ = ("args_set",
3711 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3712 "find_blockers", "logger", "mtimedb", "pkg",
3713 "pkg_count", "pkg_to_replace", "prefetcher",
3714 "settings", "statusMessage", "world_atom") + \
3720 build_opts = self.build_opts
3723 # uninstall, executed by self.merge()
3724 self.returncode = os.EX_OK
3728 args_set = self.args_set
3729 find_blockers = self.find_blockers
3730 logger = self.logger
3731 mtimedb = self.mtimedb
3732 pkg_count = self.pkg_count
3733 scheduler = self.scheduler
3734 settings = self.settings
3735 world_atom = self.world_atom
3736 ldpath_mtimes = mtimedb["ldpath"]
3738 action_desc = "Emerging"
3740 if pkg.type_name == "binary":
3741 action_desc += " binary"
3743 if build_opts.fetchonly:
3744 action_desc = "Fetching"
3746 msg = "%s (%s of %s) %s" % \
3748 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3749 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3750 colorize("GOOD", pkg.cpv))
3752 portdb = pkg.root_config.trees["porttree"].dbapi
3753 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3754 if portdir_repo_name:
3755 pkg_repo_name = pkg.metadata.get("repository")
3756 if pkg_repo_name != portdir_repo_name:
3757 if not pkg_repo_name:
3758 pkg_repo_name = "unknown repo"
3759 msg += " from %s" % pkg_repo_name
3762 msg += " %s %s" % (preposition, pkg.root)
3764 if not build_opts.pretend:
3765 self.statusMessage(msg)
3766 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3767 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3769 if pkg.type_name == "ebuild":
3771 build = EbuildBuild(args_set=args_set,
3772 background=self.background,
3773 config_pool=self.config_pool,
3774 find_blockers=find_blockers,
3775 ldpath_mtimes=ldpath_mtimes, logger=logger,
3776 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3777 prefetcher=self.prefetcher, scheduler=scheduler,
3778 settings=settings, world_atom=world_atom)
3780 self._install_task = build
3781 self._start_task(build, self._default_final_exit)
3784 elif pkg.type_name == "binary":
3786 binpkg = Binpkg(background=self.background,
3787 find_blockers=find_blockers,
3788 ldpath_mtimes=ldpath_mtimes, logger=logger,
3789 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3790 prefetcher=self.prefetcher, settings=settings,
3791 scheduler=scheduler, world_atom=world_atom)
3793 self._install_task = binpkg
3794 self._start_task(binpkg, self._default_final_exit)
3798 self._install_task.poll()
3799 return self.returncode
3802 self._install_task.wait()
3803 return self.returncode
3808 build_opts = self.build_opts
3809 find_blockers = self.find_blockers
3810 logger = self.logger
3811 mtimedb = self.mtimedb
3812 pkg_count = self.pkg_count
3813 prefetcher = self.prefetcher
3814 scheduler = self.scheduler
3815 settings = self.settings
3816 world_atom = self.world_atom
3817 ldpath_mtimes = mtimedb["ldpath"]
3820 if not (build_opts.buildpkgonly or \
3821 build_opts.fetchonly or build_opts.pretend):
3823 uninstall = PackageUninstall(background=self.background,
3824 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3825 pkg=pkg, scheduler=scheduler, settings=settings)
3828 retval = uninstall.wait()
3829 if retval != os.EX_OK:
3833 if build_opts.fetchonly or \
3834 build_opts.buildpkgonly:
3835 return self.returncode
3837 retval = self._install_task.install()
3840 class PackageMerge(AsynchronousTask):
3842 TODO: Implement asynchronous merge so that the scheduler can
3843 run while a merge is executing.
3846 __slots__ = ("merge",)
3850 pkg = self.merge.pkg
3851 pkg_count = self.merge.pkg_count
3854 action_desc = "Uninstalling"
3855 preposition = "from"
3857 action_desc = "Installing"
3860 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3863 msg += " %s %s" % (preposition, pkg.root)
3865 if not self.merge.build_opts.fetchonly and \
3866 not self.merge.build_opts.pretend and \
3867 not self.merge.build_opts.buildpkgonly:
3868 self.merge.statusMessage(msg)
3870 self.returncode = self.merge.merge()
3873 class DependencyArg(object):
3874 def __init__(self, arg=None, root_config=None):
3876 self.root_config = root_config
3879 return str(self.arg)
3881 class AtomArg(DependencyArg):
3882 def __init__(self, atom=None, **kwargs):
3883 DependencyArg.__init__(self, **kwargs)
3885 if not isinstance(self.atom, portage.dep.Atom):
3886 self.atom = portage.dep.Atom(self.atom)
3887 self.set = (self.atom, )
3889 class PackageArg(DependencyArg):
3890 def __init__(self, package=None, **kwargs):
3891 DependencyArg.__init__(self, **kwargs)
3892 self.package = package
3893 self.atom = portage.dep.Atom("=" + package.cpv)
3894 self.set = (self.atom, )
3896 class SetArg(DependencyArg):
3897 def __init__(self, set=None, **kwargs):
3898 DependencyArg.__init__(self, **kwargs)
3900 self.name = self.arg[len(SETPREFIX):]
3902 class Dependency(SlotObject):
3903 __slots__ = ("atom", "blocker", "depth",
3904 "parent", "onlydeps", "priority", "root")
3905 def __init__(self, **kwargs):
3906 SlotObject.__init__(self, **kwargs)
3907 if self.priority is None:
3908 self.priority = DepPriority()
3909 if self.depth is None:
3912 class BlockerCache(DictMixin):
3913 """This caches blockers of installed packages so that dep_check does not
3914 have to be done for every single installed package on every invocation of
3915 emerge. The cache is invalidated whenever it is detected that something
3916 has changed that might alter the results of dep_check() calls:
3917 1) the set of installed packages (including COUNTER) has changed
3918 2) the old-style virtuals have changed
3921 # Number of uncached packages to trigger cache update, since
3922 # it's wasteful to update it for every vdb change.
3923 _cache_threshold = 5
3925 class BlockerData(object):
3927 __slots__ = ("__weakref__", "atoms", "counter")
3929 def __init__(self, counter, atoms):
3930 self.counter = counter
3933 def __init__(self, myroot, vardb):
3935 self._virtuals = vardb.settings.getvirtuals()
3936 self._cache_filename = os.path.join(myroot,
3937 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3938 self._cache_version = "1"
3939 self._cache_data = None
3940 self._modified = set()
3945 f = open(self._cache_filename)
3946 mypickle = pickle.Unpickler(f)
3947 mypickle.find_global = None
3948 self._cache_data = mypickle.load()
3951 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3952 if isinstance(e, pickle.UnpicklingError):
3953 writemsg("!!! Error loading '%s': %s\n" % \
3954 (self._cache_filename, str(e)), noiselevel=-1)
3957 cache_valid = self._cache_data and \
3958 isinstance(self._cache_data, dict) and \
3959 self._cache_data.get("version") == self._cache_version and \
3960 isinstance(self._cache_data.get("blockers"), dict)
3962 # Validate all the atoms and counters so that
3963 # corruption is detected as soon as possible.
3964 invalid_items = set()
3965 for k, v in self._cache_data["blockers"].iteritems():
3966 if not isinstance(k, basestring):
3967 invalid_items.add(k)
3970 if portage.catpkgsplit(k) is None:
3971 invalid_items.add(k)
3973 except portage.exception.InvalidData:
3974 invalid_items.add(k)
3976 if not isinstance(v, tuple) or \
3978 invalid_items.add(k)
3981 if not isinstance(counter, (int, long)):
3982 invalid_items.add(k)
3984 if not isinstance(atoms, (list, tuple)):
3985 invalid_items.add(k)
3987 invalid_atom = False
3989 if not isinstance(atom, basestring):
3992 if atom[:1] != "!" or \
3993 not portage.isvalidatom(
3994 atom, allow_blockers=True):
3998 invalid_items.add(k)
4001 for k in invalid_items:
4002 del self._cache_data["blockers"][k]
4003 if not self._cache_data["blockers"]:
4007 self._cache_data = {"version":self._cache_version}
4008 self._cache_data["blockers"] = {}
4009 self._cache_data["virtuals"] = self._virtuals
4010 self._modified.clear()
4013 """If the current user has permission and the internal blocker cache
4014 been updated, save it to disk and mark it unmodified. This is called
4015 by emerge after it has proccessed blockers for all installed packages.
4016 Currently, the cache is only written if the user has superuser
4017 privileges (since that's required to obtain a lock), but all users
4018 have read access and benefit from faster blocker lookups (as long as
4019 the entire cache is still valid). The cache is stored as a pickled
4020 dict object with the following format:
4024 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4025 "virtuals" : vardb.settings.getvirtuals()
4028 if len(self._modified) >= self._cache_threshold and \
4031 f = portage.util.atomic_ofstream(self._cache_filename)
4032 pickle.dump(self._cache_data, f, -1)
4034 portage.util.apply_secpass_permissions(
4035 self._cache_filename, gid=portage.portage_gid, mode=0644)
4036 except (IOError, OSError), e:
4038 self._modified.clear()
4040 def __setitem__(self, cpv, blocker_data):
4042 Update the cache and mark it as modified for a future call to
4045 @param cpv: Package for which to cache blockers.
4047 @param blocker_data: An object with counter and atoms attributes.
4048 @type blocker_data: BlockerData
4050 self._cache_data["blockers"][cpv] = \
4051 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4052 self._modified.add(cpv)
4055 if self._cache_data is None:
4056 # triggered by python-trace
4058 return iter(self._cache_data["blockers"])
4060 def __delitem__(self, cpv):
4061 del self._cache_data["blockers"][cpv]
4063 def __getitem__(self, cpv):
4066 @returns: An object with counter and atoms attributes.
4068 return self.BlockerData(*self._cache_data["blockers"][cpv])
4071 """This needs to be implemented so that self.__repr__() doesn't raise
4072 an AttributeError."""
4075 class BlockerDB(object):
4077 def __init__(self, root_config):
4078 self._root_config = root_config
4079 self._vartree = root_config.trees["vartree"]
4080 self._portdb = root_config.trees["porttree"].dbapi
4082 self._dep_check_trees = None
4083 self._fake_vartree = None
4085 def _get_fake_vartree(self, acquire_lock=0):
4086 fake_vartree = self._fake_vartree
4087 if fake_vartree is None:
4088 fake_vartree = FakeVartree(self._root_config,
4089 acquire_lock=acquire_lock)
4090 self._fake_vartree = fake_vartree
4091 self._dep_check_trees = { self._vartree.root : {
4092 "porttree" : fake_vartree,
4093 "vartree" : fake_vartree,
4096 fake_vartree.sync(acquire_lock=acquire_lock)
4099 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4100 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4101 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4102 settings = self._vartree.settings
4103 stale_cache = set(blocker_cache)
4104 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4105 dep_check_trees = self._dep_check_trees
4106 vardb = fake_vartree.dbapi
4107 installed_pkgs = list(vardb)
4109 for inst_pkg in installed_pkgs:
4110 stale_cache.discard(inst_pkg.cpv)
4111 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4112 if cached_blockers is not None and \
4113 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4114 cached_blockers = None
4115 if cached_blockers is not None:
4116 blocker_atoms = cached_blockers.atoms
4118 # Use aux_get() to trigger FakeVartree global
4119 # updates on *DEPEND when appropriate.
4120 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4122 portage.dep._dep_check_strict = False
4123 success, atoms = portage.dep_check(depstr,
4124 vardb, settings, myuse=inst_pkg.use.enabled,
4125 trees=dep_check_trees, myroot=inst_pkg.root)
4127 portage.dep._dep_check_strict = True
4129 pkg_location = os.path.join(inst_pkg.root,
4130 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4131 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4132 (pkg_location, atoms), noiselevel=-1)
4135 blocker_atoms = [atom for atom in atoms \
4136 if atom.startswith("!")]
4137 blocker_atoms.sort()
4138 counter = long(inst_pkg.metadata["COUNTER"])
4139 blocker_cache[inst_pkg.cpv] = \
4140 blocker_cache.BlockerData(counter, blocker_atoms)
4141 for cpv in stale_cache:
4142 del blocker_cache[cpv]
4143 blocker_cache.flush()
4145 blocker_parents = digraph()
4147 for pkg in installed_pkgs:
4148 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4149 blocker_atom = blocker_atom.lstrip("!")
4150 blocker_atoms.append(blocker_atom)
4151 blocker_parents.add(blocker_atom, pkg)
4153 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4154 blocking_pkgs = set()
4155 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4156 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4158 # Check for blockers in the other direction.
4159 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4161 portage.dep._dep_check_strict = False
4162 success, atoms = portage.dep_check(depstr,
4163 vardb, settings, myuse=new_pkg.use.enabled,
4164 trees=dep_check_trees, myroot=new_pkg.root)
4166 portage.dep._dep_check_strict = True
4168 # We should never get this far with invalid deps.
4169 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4172 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4175 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4176 for inst_pkg in installed_pkgs:
4178 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4179 except (portage.exception.InvalidDependString, StopIteration):
4181 blocking_pkgs.add(inst_pkg)
4183 return blocking_pkgs
4185 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4187 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4188 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4189 p_type, p_root, p_key, p_status = parent_node
4191 if p_status == "nomerge":
4192 category, pf = portage.catsplit(p_key)
4193 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4194 msg.append("Portage is unable to process the dependencies of the ")
4195 msg.append("'%s' package. " % p_key)
4196 msg.append("In order to correct this problem, the package ")
4197 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4198 msg.append("As a temporary workaround, the --nodeps option can ")
4199 msg.append("be used to ignore all dependencies. For reference, ")
4200 msg.append("the problematic dependencies can be found in the ")
4201 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4203 msg.append("This package can not be installed. ")
4204 msg.append("Please notify the '%s' package maintainer " % p_key)
4205 msg.append("about this problem.")
4207 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4208 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4210 class PackageVirtualDbapi(portage.dbapi):
4212 A dbapi-like interface class that represents the state of the installed
4213 package database as new packages are installed, replacing any packages
4214 that previously existed in the same slot. The main difference between
4215 this class and fakedbapi is that this one uses Package instances
4216 internally (passed in via cpv_inject() and cpv_remove() calls).
4218 def __init__(self, settings):
4219 portage.dbapi.__init__(self)
4220 self.settings = settings
4221 self._match_cache = {}
4227 Remove all packages.
4231 self._cp_map.clear()
4232 self._cpv_map.clear()
4235 obj = PackageVirtualDbapi(self.settings)
4236 obj._match_cache = self._match_cache.copy()
4237 obj._cp_map = self._cp_map.copy()
4238 for k, v in obj._cp_map.iteritems():
4239 obj._cp_map[k] = v[:]
4240 obj._cpv_map = self._cpv_map.copy()
4244 return self._cpv_map.itervalues()
4246 def __contains__(self, item):
4247 existing = self._cpv_map.get(item.cpv)
4248 if existing is not None and \
4253 def get(self, item, default=None):
4254 cpv = getattr(item, "cpv", None)
4258 type_name, root, cpv, operation = item
4260 existing = self._cpv_map.get(cpv)
4261 if existing is not None and \
4266 def match_pkgs(self, atom):
4267 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4269 def _clear_cache(self):
4270 if self._categories is not None:
4271 self._categories = None
4272 if self._match_cache:
4273 self._match_cache = {}
4275 def match(self, origdep, use_cache=1):
4276 result = self._match_cache.get(origdep)
4277 if result is not None:
4279 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4280 self._match_cache[origdep] = result
4283 def cpv_exists(self, cpv):
4284 return cpv in self._cpv_map
4286 def cp_list(self, mycp, use_cache=1):
4287 cachelist = self._match_cache.get(mycp)
4288 # cp_list() doesn't expand old-style virtuals
4289 if cachelist and cachelist[0].startswith(mycp):
4291 cpv_list = self._cp_map.get(mycp)
4292 if cpv_list is None:
4295 cpv_list = [pkg.cpv for pkg in cpv_list]
4296 self._cpv_sort_ascending(cpv_list)
4297 if not (not cpv_list and mycp.startswith("virtual/")):
4298 self._match_cache[mycp] = cpv_list
4302 return list(self._cp_map)
4305 return list(self._cpv_map)
4307 def cpv_inject(self, pkg):
4308 cp_list = self._cp_map.get(pkg.cp)
4311 self._cp_map[pkg.cp] = cp_list
4312 e_pkg = self._cpv_map.get(pkg.cpv)
4313 if e_pkg is not None:
4316 self.cpv_remove(e_pkg)
4317 for e_pkg in cp_list:
4318 if e_pkg.slot_atom == pkg.slot_atom:
4321 self.cpv_remove(e_pkg)
4324 self._cpv_map[pkg.cpv] = pkg
4327 def cpv_remove(self, pkg):
4328 old_pkg = self._cpv_map.get(pkg.cpv)
4331 self._cp_map[pkg.cp].remove(pkg)
4332 del self._cpv_map[pkg.cpv]
4335 def aux_get(self, cpv, wants):
4336 metadata = self._cpv_map[cpv].metadata
4337 return [metadata.get(x, "") for x in wants]
4339 def aux_update(self, cpv, values):
4340 self._cpv_map[cpv].metadata.update(values)
4343 class depgraph(object):
4345 pkg_tree_map = RootConfig.pkg_tree_map
4347 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4349 def __init__(self, settings, trees, myopts, myparams, spinner):
4350 self.settings = settings
4351 self.target_root = settings["ROOT"]
4352 self.myopts = myopts
4353 self.myparams = myparams
4355 if settings.get("PORTAGE_DEBUG", "") == "1":
4357 self.spinner = spinner
4358 self._running_root = trees["/"]["root_config"]
4359 self._opts_no_restart = Scheduler._opts_no_restart
4360 self.pkgsettings = {}
4361 # Maps slot atom to package for each Package added to the graph.
4362 self._slot_pkg_map = {}
4363 # Maps nodes to the reasons they were selected for reinstallation.
4364 self._reinstall_nodes = {}
4367 self._trees_orig = trees
4369 # Contains a filtered view of preferred packages that are selected
4370 # from available repositories.
4371 self._filtered_trees = {}
4372 # Contains installed packages and new packages that have been added
4374 self._graph_trees = {}
4375 # All Package instances
4376 self._pkg_cache = {}
4377 for myroot in trees:
4378 self.trees[myroot] = {}
4379 # Create a RootConfig instance that references
4380 # the FakeVartree instead of the real one.
4381 self.roots[myroot] = RootConfig(
4382 trees[myroot]["vartree"].settings,
4384 trees[myroot]["root_config"].setconfig)
4385 for tree in ("porttree", "bintree"):
4386 self.trees[myroot][tree] = trees[myroot][tree]
4387 self.trees[myroot]["vartree"] = \
4388 FakeVartree(trees[myroot]["root_config"],
4389 pkg_cache=self._pkg_cache)
4390 self.pkgsettings[myroot] = portage.config(
4391 clone=self.trees[myroot]["vartree"].settings)
4392 self._slot_pkg_map[myroot] = {}
4393 vardb = self.trees[myroot]["vartree"].dbapi
4394 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4395 "--buildpkgonly" not in self.myopts
4396 # This fakedbapi instance will model the state that the vdb will
4397 # have after new packages have been installed.
4398 fakedb = PackageVirtualDbapi(vardb.settings)
4399 if preload_installed_pkgs:
4401 self.spinner.update()
4402 # This triggers metadata updates via FakeVartree.
4403 vardb.aux_get(pkg.cpv, [])
4404 fakedb.cpv_inject(pkg)
4406 # Now that the vardb state is cached in our FakeVartree,
4407 # we won't be needing the real vartree cache for awhile.
4408 # To make some room on the heap, clear the vardbapi
4410 trees[myroot]["vartree"].dbapi._clear_cache()
4413 self.mydbapi[myroot] = fakedb
4416 graph_tree.dbapi = fakedb
4417 self._graph_trees[myroot] = {}
4418 self._filtered_trees[myroot] = {}
4419 # Substitute the graph tree for the vartree in dep_check() since we
4420 # want atom selections to be consistent with package selections
4421 # have already been made.
4422 self._graph_trees[myroot]["porttree"] = graph_tree
4423 self._graph_trees[myroot]["vartree"] = graph_tree
4424 def filtered_tree():
4426 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4427 self._filtered_trees[myroot]["porttree"] = filtered_tree
4429 # Passing in graph_tree as the vartree here could lead to better
4430 # atom selections in some cases by causing atoms for packages that
4431 # have been added to the graph to be preferred over other choices.
4432 # However, it can trigger atom selections that result in
4433 # unresolvable direct circular dependencies. For example, this
4434 # happens with gwydion-dylan which depends on either itself or
4435 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4436 # gwydion-dylan-bin needs to be selected in order to avoid a
4437 # an unresolvable direct circular dependency.
4439 # To solve the problem described above, pass in "graph_db" so that
4440 # packages that have been added to the graph are distinguishable
4441 # from other available packages and installed packages. Also, pass
4442 # the parent package into self._select_atoms() calls so that
4443 # unresolvable direct circular dependencies can be detected and
4444 # avoided when possible.
4445 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4446 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4449 portdb = self.trees[myroot]["porttree"].dbapi
4450 bindb = self.trees[myroot]["bintree"].dbapi
4451 vardb = self.trees[myroot]["vartree"].dbapi
4452 # (db, pkg_type, built, installed, db_keys)
4453 if "--usepkgonly" not in self.myopts:
4454 db_keys = list(portdb._aux_cache_keys)
4455 dbs.append((portdb, "ebuild", False, False, db_keys))
4456 if "--usepkg" in self.myopts:
4457 db_keys = list(bindb._aux_cache_keys)
4458 dbs.append((bindb, "binary", True, False, db_keys))
4459 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4460 dbs.append((vardb, "installed", True, True, db_keys))
4461 self._filtered_trees[myroot]["dbs"] = dbs
4462 if "--usepkg" in self.myopts:
4463 self.trees[myroot]["bintree"].populate(
4464 "--getbinpkg" in self.myopts,
4465 "--getbinpkgonly" in self.myopts)
4468 self.digraph=portage.digraph()
4469 # contains all sets added to the graph
4471 # contains atoms given as arguments
4472 self._sets["args"] = InternalPackageSet()
4473 # contains all atoms from all sets added to the graph, including
4474 # atoms given as arguments
4475 self._set_atoms = InternalPackageSet()
4476 self._atom_arg_map = {}
4477 # contains all nodes pulled in by self._set_atoms
4478 self._set_nodes = set()
4479 # Contains only Blocker -> Uninstall edges
4480 self._blocker_uninstalls = digraph()
4481 # Contains only Package -> Blocker edges
4482 self._blocker_parents = digraph()
4483 # Contains only irrelevant Package -> Blocker edges
4484 self._irrelevant_blockers = digraph()
4485 # Contains only unsolvable Package -> Blocker edges
4486 self._unsolvable_blockers = digraph()
4487 # Contains all Blocker -> Blocked Package edges
4488 self._blocked_pkgs = digraph()
4489 # Contains world packages that have been protected from
4490 # uninstallation but may not have been added to the graph
4491 # if the graph is not complete yet.
4492 self._blocked_world_pkgs = {}
4493 self._slot_collision_info = {}
4494 # Slot collision nodes are not allowed to block other packages since
4495 # blocker validation is only able to account for one package per slot.
4496 self._slot_collision_nodes = set()
4497 self._parent_atoms = {}
4498 self._slot_conflict_parent_atoms = set()
4499 self._serialized_tasks_cache = None
4500 self._scheduler_graph = None
4501 self._displayed_list = None
4502 self._pprovided_args = []
4503 self._missing_args = []
4504 self._masked_installed = set()
4505 self._unsatisfied_deps_for_display = []
4506 self._unsatisfied_blockers_for_display = None
4507 self._circular_deps_for_display = None
4508 self._dep_stack = []
4509 self._unsatisfied_deps = []
4510 self._initially_unsatisfied_deps = []
4511 self._ignored_deps = []
4512 self._required_set_names = set(["system", "world"])
4513 self._select_atoms = self._select_atoms_highest_available
4514 self._select_package = self._select_pkg_highest_available
4515 self._highest_pkg_cache = {}
4517 def _show_slot_collision_notice(self):
4518 """Show an informational message advising the user to mask one of the
4519 the packages. In some cases it may be possible to resolve this
4520 automatically, but support for backtracking (removal nodes that have
4521 already been selected) will be required in order to handle all possible
4525 if not self._slot_collision_info:
4528 self._show_merge_list()
4531 msg.append("\n!!! Multiple package instances within a single " + \
4532 "package slot have been pulled\n")
4533 msg.append("!!! into the dependency graph, resulting" + \
4534 " in a slot conflict:\n\n")
4536 # Max number of parents shown, to avoid flooding the display.
4538 explanation_columns = 70
4540 for (slot_atom, root), slot_nodes \
4541 in self._slot_collision_info.iteritems():
4542 msg.append(str(slot_atom))
4545 for node in slot_nodes:
4547 msg.append(str(node))
4548 parent_atoms = self._parent_atoms.get(node)
4551 # Prefer conflict atoms over others.
4552 for parent_atom in parent_atoms:
4553 if len(pruned_list) >= max_parents:
4555 if parent_atom in self._slot_conflict_parent_atoms:
4556 pruned_list.add(parent_atom)
4558 # If this package was pulled in by conflict atoms then
4559 # show those alone since those are the most interesting.
4561 # When generating the pruned list, prefer instances
4562 # of DependencyArg over instances of Package.
4563 for parent_atom in parent_atoms:
4564 if len(pruned_list) >= max_parents:
4566 parent, atom = parent_atom
4567 if isinstance(parent, DependencyArg):
4568 pruned_list.add(parent_atom)
4569 # Prefer Packages instances that themselves have been
4570 # pulled into collision slots.
4571 for parent_atom in parent_atoms:
4572 if len(pruned_list) >= max_parents:
4574 parent, atom = parent_atom
4575 if isinstance(parent, Package) and \
4576 (parent.slot_atom, parent.root) \
4577 in self._slot_collision_info:
4578 pruned_list.add(parent_atom)
4579 for parent_atom in parent_atoms:
4580 if len(pruned_list) >= max_parents:
4582 pruned_list.add(parent_atom)
4583 omitted_parents = len(parent_atoms) - len(pruned_list)
4584 parent_atoms = pruned_list
4585 msg.append(" pulled in by\n")
4586 for parent_atom in parent_atoms:
4587 parent, atom = parent_atom
4588 msg.append(2*indent)
4589 if isinstance(parent,
4590 (PackageArg, AtomArg)):
4591 # For PackageArg and AtomArg types, it's
4592 # redundant to display the atom attribute.
4593 msg.append(str(parent))
4595 # Display the specific atom from SetArg or
4597 msg.append("%s required by %s" % (atom, parent))
4600 msg.append(2*indent)
4601 msg.append("(and %d more)\n" % omitted_parents)
4603 msg.append(" (no parents)\n")
4605 explanation = self._slot_conflict_explanation(slot_nodes)
4608 msg.append(indent + "Explanation:\n\n")
4609 for line in textwrap.wrap(explanation, explanation_columns):
4610 msg.append(2*indent + line + "\n")
4613 sys.stderr.write("".join(msg))
4616 explanations_for_all = explanations == len(self._slot_collision_info)
4618 if explanations_for_all or "--quiet" in self.myopts:
4622 msg.append("It may be possible to solve this problem ")
4623 msg.append("by using package.mask to prevent one of ")
4624 msg.append("those packages from being selected. ")
4625 msg.append("However, it is also possible that conflicting ")
4626 msg.append("dependencies exist such that they are impossible to ")
4627 msg.append("satisfy simultaneously. If such a conflict exists in ")
4628 msg.append("the dependencies of two different packages, then those ")
4629 msg.append("packages can not be installed simultaneously.")
4631 from formatter import AbstractFormatter, DumbWriter
4632 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4634 f.add_flowing_data(x)
4638 msg.append("For more information, see MASKED PACKAGES ")
4639 msg.append("section in the emerge man page or refer ")
4640 msg.append("to the Gentoo Handbook.")
4642 f.add_flowing_data(x)
4646 def _slot_conflict_explanation(self, slot_nodes):
4648 When a slot conflict occurs due to USE deps, there are a few
4649 different cases to consider:
4651 1) New USE are correctly set but --newuse wasn't requested so an
4652 installed package with incorrect USE happened to get pulled
4653 into graph before the new one.
4655 2) New USE are incorrectly set but an installed package has correct
4656 USE so it got pulled into the graph, and a new instance also got
4657 pulled in due to --newuse or an upgrade.
4659 3) Multiple USE deps exist that can't be satisfied simultaneously,
4660 and multiple package instances got pulled into the same slot to
4661 satisfy the conflicting deps.
4663 Currently, explanations and suggested courses of action are generated
4664 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4667 if len(slot_nodes) != 2:
4668 # Suggestions are only implemented for
4669 # conflicts between two packages.
4672 all_conflict_atoms = self._slot_conflict_parent_atoms
4674 matched_atoms = None
4675 unmatched_node = None
4676 for node in slot_nodes:
4677 parent_atoms = self._parent_atoms.get(node)
4678 if not parent_atoms:
4679 # Normally, there are always parent atoms. If there are
4680 # none then something unexpected is happening and there's
4681 # currently no suggestion for this case.
4683 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4684 for parent_atom in conflict_atoms:
4685 parent, atom = parent_atom
4687 # Suggestions are currently only implemented for cases
4688 # in which all conflict atoms have USE deps.
4691 if matched_node is not None:
4692 # If conflict atoms match multiple nodes
4693 # then there's no suggestion.
4696 matched_atoms = conflict_atoms
4698 if unmatched_node is not None:
4699 # Neither node is matched by conflict atoms, and
4700 # there is no suggestion for this case.
4702 unmatched_node = node
4704 if matched_node is None or unmatched_node is None:
4705 # This shouldn't happen.
4708 if unmatched_node.installed and not matched_node.installed:
4709 return "New USE are correctly set, but --newuse wasn't" + \
4710 " requested, so an installed package with incorrect USE " + \
4711 "happened to get pulled into the dependency graph. " + \
4712 "In order to solve " + \
4713 "this, either specify the --newuse option or explicitly " + \
4714 " reinstall '%s'." % matched_node.slot_atom
4716 if matched_node.installed and not unmatched_node.installed:
4717 atoms = sorted(set(atom for parent, atom in matched_atoms))
4718 explanation = ("New USE for '%s' are incorrectly set. " + \
4719 "In order to solve this, adjust USE to satisfy '%s'") % \
4720 (matched_node.slot_atom, atoms[0])
4722 for atom in atoms[1:-1]:
4723 explanation += ", '%s'" % (atom,)
4726 explanation += " and '%s'" % (atoms[-1],)
4732 def _process_slot_conflicts(self):
4734 Process slot conflict data to identify specific atoms which
4735 lead to conflict. These atoms only match a subset of the
4736 packages that have been pulled into a given slot.
4738 for (slot_atom, root), slot_nodes \
4739 in self._slot_collision_info.iteritems():
4741 all_parent_atoms = set()
4742 for pkg in slot_nodes:
4743 parent_atoms = self._parent_atoms.get(pkg)
4744 if not parent_atoms:
4746 all_parent_atoms.update(parent_atoms)
4748 for pkg in slot_nodes:
4749 parent_atoms = self._parent_atoms.get(pkg)
4750 if parent_atoms is None:
4751 parent_atoms = set()
4752 self._parent_atoms[pkg] = parent_atoms
4753 for parent_atom in all_parent_atoms:
4754 if parent_atom in parent_atoms:
4756 # Use package set for matching since it will match via
4757 # PROVIDE when necessary, while match_from_list does not.
4758 parent, atom = parent_atom
4759 atom_set = InternalPackageSet(
4760 initial_atoms=(atom,))
4761 if atom_set.findAtomForPackage(pkg):
4762 parent_atoms.add(parent_atom)
4764 self._slot_conflict_parent_atoms.add(parent_atom)
4766 def _reinstall_for_flags(self, forced_flags,
4767 orig_use, orig_iuse, cur_use, cur_iuse):
4768 """Return a set of flags that trigger reinstallation, or None if there
4769 are no such flags."""
4770 if "--newuse" in self.myopts:
4771 flags = set(orig_iuse.symmetric_difference(
4772 cur_iuse).difference(forced_flags))
4773 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4774 cur_iuse.intersection(cur_use)))
4777 elif "changed-use" == self.myopts.get("--reinstall"):
4778 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4779 cur_iuse.intersection(cur_use))
4784 def _create_graph(self, allow_unsatisfied=False):
4785 dep_stack = self._dep_stack
4787 self.spinner.update()
4788 dep = dep_stack.pop()
4789 if isinstance(dep, Package):
4790 if not self._add_pkg_deps(dep,
4791 allow_unsatisfied=allow_unsatisfied):
4794 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4798 def _add_dep(self, dep, allow_unsatisfied=False):
4799 debug = "--debug" in self.myopts
4800 buildpkgonly = "--buildpkgonly" in self.myopts
4801 nodeps = "--nodeps" in self.myopts
4802 empty = "empty" in self.myparams
4803 deep = "deep" in self.myparams
4804 update = "--update" in self.myopts and dep.depth <= 1
4806 if not buildpkgonly and \
4808 dep.parent not in self._slot_collision_nodes:
4809 if dep.parent.onlydeps:
4810 # It's safe to ignore blockers if the
4811 # parent is an --onlydeps node.
4813 # The blocker applies to the root where
4814 # the parent is or will be installed.
4815 blocker = Blocker(atom=dep.atom,
4816 eapi=dep.parent.metadata["EAPI"],
4817 root=dep.parent.root)
4818 self._blocker_parents.add(blocker, dep.parent)
4820 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4821 onlydeps=dep.onlydeps)
4823 if allow_unsatisfied:
4824 self._unsatisfied_deps.append(dep)
4826 self._unsatisfied_deps_for_display.append(
4827 ((dep.root, dep.atom), {"myparent":dep.parent}))
4829 # In some cases, dep_check will return deps that shouldn't
4830 # be proccessed any further, so they are identified and
4831 # discarded here. Try to discard as few as possible since
4832 # discarded dependencies reduce the amount of information
4833 # available for optimization of merge order.
4834 if dep.priority.satisfied and \
4835 not (existing_node or empty or deep or update):
4837 if dep.root == self.target_root:
4839 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4840 except StopIteration:
4842 except portage.exception.InvalidDependString:
4843 if not dep_pkg.installed:
4844 # This shouldn't happen since the package
4845 # should have been masked.
4848 self._ignored_deps.append(dep)
4851 if not self._add_pkg(dep_pkg, dep):
4855 def _add_pkg(self, pkg, dep):
4862 myparent = dep.parent
4863 priority = dep.priority
4865 if priority is None:
4866 priority = DepPriority()
4868 Fills the digraph with nodes comprised of packages to merge.
4869 mybigkey is the package spec of the package to merge.
4870 myparent is the package depending on mybigkey ( or None )
4871 addme = Should we add this package to the digraph or are we just looking at it's deps?
4872 Think --onlydeps, we need to ignore packages in that case.
4875 #IUSE-aware emerge -> USE DEP aware depgraph
4876 #"no downgrade" emerge
4878 # Ensure that the dependencies of the same package
4879 # are never processed more than once.
4880 previously_added = pkg in self.digraph
4882 # select the correct /var database that we'll be checking against
4883 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4884 pkgsettings = self.pkgsettings[pkg.root]
4889 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4890 except portage.exception.InvalidDependString, e:
4891 if not pkg.installed:
4892 show_invalid_depstring_notice(
4893 pkg, pkg.metadata["PROVIDE"], str(e))
4897 if not pkg.onlydeps:
4898 if not pkg.installed and \
4899 "empty" not in self.myparams and \
4900 vardbapi.match(pkg.slot_atom):
4901 # Increase the priority of dependencies on packages that
4902 # are being rebuilt. This optimizes merge order so that
4903 # dependencies are rebuilt/updated as soon as possible,
4904 # which is needed especially when emerge is called by
4905 # revdep-rebuild since dependencies may be affected by ABI
4906 # breakage that has rendered them useless. Don't adjust
4907 # priority here when in "empty" mode since all packages
4908 # are being merged in that case.
4909 priority.rebuild = True
4911 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4912 slot_collision = False
4914 existing_node_matches = pkg.cpv == existing_node.cpv
4915 if existing_node_matches and \
4916 pkg != existing_node and \
4917 dep.atom is not None:
4918 # Use package set for matching since it will match via
4919 # PROVIDE when necessary, while match_from_list does not.
4920 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4921 if not atom_set.findAtomForPackage(existing_node):
4922 existing_node_matches = False
4923 if existing_node_matches:
4924 # The existing node can be reused.
4926 for parent_atom in arg_atoms:
4927 parent, atom = parent_atom
4928 self.digraph.add(existing_node, parent,
4930 self._add_parent_atom(existing_node, parent_atom)
4931 # If a direct circular dependency is not an unsatisfied
4932 # buildtime dependency then drop it here since otherwise
4933 # it can skew the merge order calculation in an unwanted
4935 if existing_node != myparent or \
4936 (priority.buildtime and not priority.satisfied):
4937 self.digraph.addnode(existing_node, myparent,
4939 if dep.atom is not None and dep.parent is not None:
4940 self._add_parent_atom(existing_node,
4941 (dep.parent, dep.atom))
4945 # A slot collision has occurred. Sometimes this coincides
4946 # with unresolvable blockers, so the slot collision will be
4947 # shown later if there are no unresolvable blockers.
4948 self._add_slot_conflict(pkg)
4949 slot_collision = True
4952 # Now add this node to the graph so that self.display()
4953 # can show use flags and --tree portage.output. This node is
4954 # only being partially added to the graph. It must not be
4955 # allowed to interfere with the other nodes that have been
4956 # added. Do not overwrite data for existing nodes in
4957 # self.mydbapi since that data will be used for blocker
4959 # Even though the graph is now invalid, continue to process
4960 # dependencies so that things like --fetchonly can still
4961 # function despite collisions.
4964 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4965 self.mydbapi[pkg.root].cpv_inject(pkg)
4967 if not pkg.installed:
4968 # Allow this package to satisfy old-style virtuals in case it
4969 # doesn't already. Any pre-existing providers will be preferred
4972 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4973 # For consistency, also update the global virtuals.
4974 settings = self.roots[pkg.root].settings
4976 settings.setinst(pkg.cpv, pkg.metadata)
4978 except portage.exception.InvalidDependString, e:
4979 show_invalid_depstring_notice(
4980 pkg, pkg.metadata["PROVIDE"], str(e))
4985 self._set_nodes.add(pkg)
4987 # Do this even when addme is False (--onlydeps) so that the
4988 # parent/child relationship is always known in case
4989 # self._show_slot_collision_notice() needs to be called later.
4990 self.digraph.add(pkg, myparent, priority=priority)
4991 if dep.atom is not None and dep.parent is not None:
4992 self._add_parent_atom(pkg, (dep.parent, dep.atom))
4995 for parent_atom in arg_atoms:
4996 parent, atom = parent_atom
4997 self.digraph.add(pkg, parent, priority=priority)
4998 self._add_parent_atom(pkg, parent_atom)
5000 """ This section determines whether we go deeper into dependencies or not.
5001 We want to go deeper on a few occasions:
5002 Installing package A, we need to make sure package A's deps are met.
5003 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5004 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5006 dep_stack = self._dep_stack
5007 if "recurse" not in self.myparams:
5009 elif pkg.installed and \
5010 "deep" not in self.myparams:
5011 dep_stack = self._ignored_deps
5013 self.spinner.update()
5018 if not previously_added:
5019 dep_stack.append(pkg)
5022 def _add_parent_atom(self, pkg, parent_atom):
5023 parent_atoms = self._parent_atoms.get(pkg)
5024 if parent_atoms is None:
5025 parent_atoms = set()
5026 self._parent_atoms[pkg] = parent_atoms
5027 parent_atoms.add(parent_atom)
5029 def _add_slot_conflict(self, pkg):
5030 self._slot_collision_nodes.add(pkg)
5031 slot_key = (pkg.slot_atom, pkg.root)
5032 slot_nodes = self._slot_collision_info.get(slot_key)
5033 if slot_nodes is None:
5035 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5036 self._slot_collision_info[slot_key] = slot_nodes
5039 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5041 mytype = pkg.type_name
5044 metadata = pkg.metadata
5045 myuse = pkg.use.enabled
5047 depth = pkg.depth + 1
5048 removal_action = "remove" in self.myparams
5051 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5053 edepend[k] = metadata[k]
5055 if not pkg.built and \
5056 "--buildpkgonly" in self.myopts and \
5057 "deep" not in self.myparams and \
5058 "empty" not in self.myparams:
5059 edepend["RDEPEND"] = ""
5060 edepend["PDEPEND"] = ""
5061 bdeps_satisfied = False
5063 if pkg.built and not removal_action:
5064 if self.myopts.get("--with-bdeps", "n") == "y":
5065 # Pull in build time deps as requested, but marked them as
5066 # "satisfied" since they are not strictly required. This allows
5067 # more freedom in the merge order calculation for solving
5068 # circular dependencies. Don't convert to PDEPEND since that
5069 # could make --with-bdeps=y less effective if it is used to
5070 # adjust merge order to prevent built_with_use() calls from
5072 bdeps_satisfied = True
5074 # built packages do not have build time dependencies.
5075 edepend["DEPEND"] = ""
5077 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5078 edepend["DEPEND"] = ""
5081 ("/", edepend["DEPEND"],
5082 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
5083 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5084 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5087 debug = "--debug" in self.myopts
5088 strict = mytype != "installed"
5090 for dep_root, dep_string, dep_priority in deps:
5092 # Decrease priority so that --buildpkgonly
5093 # hasallzeros() works correctly.
5094 dep_priority = DepPriority()
5099 print "Parent: ", jbigkey
5100 print "Depstring:", dep_string
5101 print "Priority:", dep_priority
5102 vardb = self.roots[dep_root].trees["vartree"].dbapi
5104 selected_atoms = self._select_atoms(dep_root,
5105 dep_string, myuse=myuse, parent=pkg, strict=strict)
5106 except portage.exception.InvalidDependString, e:
5107 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5110 print "Candidates:", selected_atoms
5112 for atom in selected_atoms:
5115 atom = portage.dep.Atom(atom)
5117 mypriority = dep_priority.copy()
5118 if not atom.blocker and vardb.match(atom):
5119 mypriority.satisfied = True
5121 if not self._add_dep(Dependency(atom=atom,
5122 blocker=atom.blocker, depth=depth, parent=pkg,
5123 priority=mypriority, root=dep_root),
5124 allow_unsatisfied=allow_unsatisfied):
5127 except portage.exception.InvalidAtom, e:
5128 show_invalid_depstring_notice(
5129 pkg, dep_string, str(e))
5131 if not pkg.installed:
5135 print "Exiting...", jbigkey
5136 except portage.exception.AmbiguousPackageName, e:
5138 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5139 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5141 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5142 portage.writemsg("\n", noiselevel=-1)
5143 if mytype == "binary":
5145 "!!! This binary package cannot be installed: '%s'\n" % \
5146 mykey, noiselevel=-1)
5147 elif mytype == "ebuild":
5148 portdb = self.roots[myroot].trees["porttree"].dbapi
5149 myebuild, mylocation = portdb.findname2(mykey)
5150 portage.writemsg("!!! This ebuild cannot be installed: " + \
5151 "'%s'\n" % myebuild, noiselevel=-1)
5152 portage.writemsg("!!! Please notify the package maintainer " + \
5153 "that atoms must be fully-qualified.\n", noiselevel=-1)
5157 def _priority(self, **kwargs):
5158 if "remove" in self.myparams:
5159 priority_constructor = UnmergeDepPriority
5161 priority_constructor = DepPriority
5162 return priority_constructor(**kwargs)
5164 def _dep_expand(self, root_config, atom_without_category):
5166 @param root_config: a root config instance
5167 @type root_config: RootConfig
5168 @param atom_without_category: an atom without a category component
5169 @type atom_without_category: String
5171 @returns: a list of atoms containing categories (possibly empty)
5173 null_cp = portage.dep_getkey(insert_category_into_atom(
5174 atom_without_category, "null"))
5175 cat, atom_pn = portage.catsplit(null_cp)
5178 for db, pkg_type, built, installed, db_keys in \
5179 self._filtered_trees[root_config.root]["dbs"]:
5180 cp_set.update(db.cp_all())
5181 for cp in list(cp_set):
5182 cat, pn = portage.catsplit(cp)
5187 cat, pn = portage.catsplit(cp)
5188 deps.append(insert_category_into_atom(
5189 atom_without_category, cat))
5192 def _have_new_virt(self, root, atom_cp):
5194 for db, pkg_type, built, installed, db_keys in \
5195 self._filtered_trees[root]["dbs"]:
5196 if db.cp_list(atom_cp):
5201 def _iter_atoms_for_pkg(self, pkg):
5202 # TODO: add multiple $ROOT support
5203 if pkg.root != self.target_root:
5205 atom_arg_map = self._atom_arg_map
5206 root_config = self.roots[pkg.root]
5207 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5208 atom_cp = portage.dep_getkey(atom)
5209 if atom_cp != pkg.cp and \
5210 self._have_new_virt(pkg.root, atom_cp):
5212 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5213 visible_pkgs.reverse() # descending order
5215 for visible_pkg in visible_pkgs:
5216 if visible_pkg.cp != atom_cp:
5218 if pkg >= visible_pkg:
5219 # This is descending order, and we're not
5220 # interested in any versions <= pkg given.
5222 if pkg.slot_atom != visible_pkg.slot_atom:
5223 higher_slot = visible_pkg
5225 if higher_slot is not None:
5227 for arg in atom_arg_map[(atom, pkg.root)]:
5228 if isinstance(arg, PackageArg) and \
5233 def select_files(self, myfiles):
5234 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5235 appropriate depgraph and return a favorite list."""
5236 debug = "--debug" in self.myopts
5237 root_config = self.roots[self.target_root]
5238 sets = root_config.sets
5239 getSetAtoms = root_config.setconfig.getSetAtoms
5241 myroot = self.target_root
5242 dbs = self._filtered_trees[myroot]["dbs"]
5243 vardb = self.trees[myroot]["vartree"].dbapi
5244 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5245 portdb = self.trees[myroot]["porttree"].dbapi
5246 bindb = self.trees[myroot]["bintree"].dbapi
5247 pkgsettings = self.pkgsettings[myroot]
5249 onlydeps = "--onlydeps" in self.myopts
5252 ext = os.path.splitext(x)[1]
5254 if not os.path.exists(x):
5256 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5257 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5258 elif os.path.exists(
5259 os.path.join(pkgsettings["PKGDIR"], x)):
5260 x = os.path.join(pkgsettings["PKGDIR"], x)
5262 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5263 print "!!! Please ensure the tbz2 exists as specified.\n"
5264 return 0, myfavorites
5265 mytbz2=portage.xpak.tbz2(x)
5266 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5267 if os.path.realpath(x) != \
5268 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5269 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5270 return 0, myfavorites
5271 db_keys = list(bindb._aux_cache_keys)
5272 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5273 pkg = Package(type_name="binary", root_config=root_config,
5274 cpv=mykey, built=True, metadata=metadata,
5276 self._pkg_cache[pkg] = pkg
5277 args.append(PackageArg(arg=x, package=pkg,
5278 root_config=root_config))
5279 elif ext==".ebuild":
5280 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5281 pkgdir = os.path.dirname(ebuild_path)
5282 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5283 cp = pkgdir[len(tree_root)+1:]
5284 e = portage.exception.PackageNotFound(
5285 ("%s is not in a valid portage tree " + \
5286 "hierarchy or does not exist") % x)
5287 if not portage.isvalidatom(cp):
5289 cat = portage.catsplit(cp)[0]
5290 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5291 if not portage.isvalidatom("="+mykey):
5293 ebuild_path = portdb.findname(mykey)
5295 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5296 cp, os.path.basename(ebuild_path)):
5297 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5298 return 0, myfavorites
5299 if mykey not in portdb.xmatch(
5300 "match-visible", portage.dep_getkey(mykey)):
5301 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5302 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5303 print colorize("BAD", "*** page for details.")
5304 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5307 raise portage.exception.PackageNotFound(
5308 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5309 db_keys = list(portdb._aux_cache_keys)
5310 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5311 pkg = Package(type_name="ebuild", root_config=root_config,
5312 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5313 pkgsettings.setcpv(pkg)
5314 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5315 self._pkg_cache[pkg] = pkg
5316 args.append(PackageArg(arg=x, package=pkg,
5317 root_config=root_config))
5318 elif x.startswith(os.path.sep):
5319 if not x.startswith(myroot):
5320 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5321 " $ROOT.\n") % x, noiselevel=-1)
5323 # Queue these up since it's most efficient to handle
5324 # multiple files in a single iter_owners() call.
5325 lookup_owners.append(x)
5327 if x in ("system", "world"):
5329 if x.startswith(SETPREFIX):
5330 s = x[len(SETPREFIX):]
5332 raise portage.exception.PackageSetNotFound(s)
5335 # Recursively expand sets so that containment tests in
5336 # self._get_parent_sets() properly match atoms in nested
5337 # sets (like if world contains system).
5338 expanded_set = InternalPackageSet(
5339 initial_atoms=getSetAtoms(s))
5340 self._sets[s] = expanded_set
5341 args.append(SetArg(arg=x, set=expanded_set,
5342 root_config=root_config))
5344 if not is_valid_package_atom(x):
5345 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5347 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5348 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5350 # Don't expand categories or old-style virtuals here unless
5351 # necessary. Expansion of old-style virtuals here causes at
5352 # least the following problems:
5353 # 1) It's more difficult to determine which set(s) an atom
5354 # came from, if any.
5355 # 2) It takes away freedom from the resolver to choose other
5356 # possible expansions when necessary.
5358 args.append(AtomArg(arg=x, atom=x,
5359 root_config=root_config))
5361 expanded_atoms = self._dep_expand(root_config, x)
5362 installed_cp_set = set()
5363 for atom in expanded_atoms:
5364 atom_cp = portage.dep_getkey(atom)
5365 if vardb.cp_list(atom_cp):
5366 installed_cp_set.add(atom_cp)
5367 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5368 installed_cp = iter(installed_cp_set).next()
5369 expanded_atoms = [atom for atom in expanded_atoms \
5370 if portage.dep_getkey(atom) == installed_cp]
5372 if len(expanded_atoms) > 1:
5375 ambiguous_package_name(x, expanded_atoms, root_config,
5376 self.spinner, self.myopts)
5377 return False, myfavorites
5379 atom = expanded_atoms[0]
5381 null_atom = insert_category_into_atom(x, "null")
5382 null_cp = portage.dep_getkey(null_atom)
5383 cat, atom_pn = portage.catsplit(null_cp)
5384 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5386 # Allow the depgraph to choose which virtual.
5387 atom = insert_category_into_atom(x, "virtual")
5389 atom = insert_category_into_atom(x, "null")
5391 args.append(AtomArg(arg=x, atom=atom,
5392 root_config=root_config))
5396 search_for_multiple = False
5397 if len(lookup_owners) > 1:
5398 search_for_multiple = True
5400 for x in lookup_owners:
5401 if not search_for_multiple and os.path.isdir(x):
5402 search_for_multiple = True
5403 relative_paths.append(x[len(myroot):])
5406 for pkg, relative_path in \
5407 real_vardb._owners.iter_owners(relative_paths):
5408 owners.add(pkg.mycpv)
5409 if not search_for_multiple:
5413 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5414 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5418 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5420 # portage now masks packages with missing slot, but it's
5421 # possible that one was installed by an older version
5422 atom = portage.cpv_getkey(cpv)
5424 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5425 args.append(AtomArg(arg=atom, atom=atom,
5426 root_config=root_config))
5428 if "--update" in self.myopts:
5429 # In some cases, the greedy slots behavior can pull in a slot that
5430 # the user would want to uninstall due to it being blocked by a
5431 # newer version in a different slot. Therefore, it's necessary to
5432 # detect and discard the any that should be uninstalled. Each time
5433 # that arguments are updated, package selections are repeated in
5434 # ensure consistency with the current arguments:
5436 # 1) Initialize args
5437 # 2) Select packages and generate initial greedy atoms
5438 # 3) Update args with greedy atoms
5439 # 4) Select packages and generate greedy atoms again, while
5440 # accounting for any blockers between selected packages
5441 # 5) Update args with revised greedy atoms
5443 self._set_args(args)
5446 greedy_args.append(arg)
5447 if not isinstance(arg, AtomArg):
5449 for atom in self._greedy_slots(arg.root_config, arg.atom):
5451 AtomArg(arg=arg.arg, atom=atom,
5452 root_config=arg.root_config))
5454 self._set_args(greedy_args)
5457 # Revise greedy atoms, accounting for any blockers
5458 # between selected packages.
5459 revised_greedy_args = []
5461 revised_greedy_args.append(arg)
5462 if not isinstance(arg, AtomArg):
5464 for atom in self._greedy_slots(arg.root_config, arg.atom,
5465 blocker_lookahead=True):
5466 revised_greedy_args.append(
5467 AtomArg(arg=arg.arg, atom=atom,
5468 root_config=arg.root_config))
5469 args = revised_greedy_args
5470 del revised_greedy_args
5472 self._set_args(args)
5474 myfavorites = set(myfavorites)
5476 if isinstance(arg, (AtomArg, PackageArg)):
5477 myfavorites.add(arg.atom)
5478 elif isinstance(arg, SetArg):
5479 myfavorites.add(arg.arg)
5480 myfavorites = list(myfavorites)
5482 pprovideddict = pkgsettings.pprovideddict
5484 portage.writemsg("\n", noiselevel=-1)
5485 # Order needs to be preserved since a feature of --nodeps
5486 # is to allow the user to force a specific merge order.
5490 for atom in arg.set:
5491 self.spinner.update()
5492 dep = Dependency(atom=atom, onlydeps=onlydeps,
5493 root=myroot, parent=arg)
5494 atom_cp = portage.dep_getkey(atom)
5496 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5497 if pprovided and portage.match_from_list(atom, pprovided):
5498 # A provided package has been specified on the command line.
5499 self._pprovided_args.append((arg, atom))
5501 if isinstance(arg, PackageArg):
5502 if not self._add_pkg(arg.package, dep) or \
5503 not self._create_graph():
5504 sys.stderr.write(("\n\n!!! Problem resolving " + \
5505 "dependencies for %s\n") % arg.arg)
5506 return 0, myfavorites
5509 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5510 (arg, atom), noiselevel=-1)
5511 pkg, existing_node = self._select_package(
5512 myroot, atom, onlydeps=onlydeps)
5514 if not (isinstance(arg, SetArg) and \
5515 arg.name in ("system", "world")):
5516 self._unsatisfied_deps_for_display.append(
5517 ((myroot, atom), {}))
5518 return 0, myfavorites
5519 self._missing_args.append((arg, atom))
5521 if atom_cp != pkg.cp:
5522 # For old-style virtuals, we need to repeat the
5523 # package.provided check against the selected package.
5524 expanded_atom = atom.replace(atom_cp, pkg.cp)
5525 pprovided = pprovideddict.get(pkg.cp)
5527 portage.match_from_list(expanded_atom, pprovided):
5528 # A provided package has been
5529 # specified on the command line.
5530 self._pprovided_args.append((arg, atom))
5532 if pkg.installed and "selective" not in self.myparams:
5533 self._unsatisfied_deps_for_display.append(
5534 ((myroot, atom), {}))
5535 # Previous behavior was to bail out in this case, but
5536 # since the dep is satisfied by the installed package,
5537 # it's more friendly to continue building the graph
5538 # and just show a warning message. Therefore, only bail
5539 # out here if the atom is not from either the system or
5541 if not (isinstance(arg, SetArg) and \
5542 arg.name in ("system", "world")):
5543 return 0, myfavorites
5545 # Add the selected package to the graph as soon as possible
5546 # so that later dep_check() calls can use it as feedback
5547 # for making more consistent atom selections.
5548 if not self._add_pkg(pkg, dep):
5549 if isinstance(arg, SetArg):
5550 sys.stderr.write(("\n\n!!! Problem resolving " + \
5551 "dependencies for %s from %s\n") % \
5554 sys.stderr.write(("\n\n!!! Problem resolving " + \
5555 "dependencies for %s\n") % atom)
5556 return 0, myfavorites
5558 except portage.exception.MissingSignature, e:
5559 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5560 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5561 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5562 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5563 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5564 return 0, myfavorites
5565 except portage.exception.InvalidSignature, e:
5566 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5567 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5568 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5569 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5570 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5571 return 0, myfavorites
5572 except SystemExit, e:
5573 raise # Needed else can't exit
5574 except Exception, e:
5575 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5576 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5579 # Now that the root packages have been added to the graph,
5580 # process the dependencies.
5581 if not self._create_graph():
5582 return 0, myfavorites
5585 if "--usepkgonly" in self.myopts:
5586 for xs in self.digraph.all_nodes():
5587 if not isinstance(xs, Package):
5589 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5593 print "Missing binary for:",xs[2]
5597 except self._unknown_internal_error:
5598 return False, myfavorites
5600 # We're true here unless we are missing binaries.
5601 return (not missing,myfavorites)
5603 def _set_args(self, args):
5605 Create the "args" package set from atoms and packages given as
5606 arguments. This method can be called multiple times if necessary.
5607 The package selection cache is automatically invalidated, since
5608 arguments influence package selections.
5610 args_set = self._sets["args"]
5613 if not isinstance(arg, (AtomArg, PackageArg)):
5616 if atom in args_set:
5620 self._set_atoms.clear()
5621 self._set_atoms.update(chain(*self._sets.itervalues()))
5622 atom_arg_map = self._atom_arg_map
5623 atom_arg_map.clear()
5625 for atom in arg.set:
5626 atom_key = (atom, arg.root_config.root)
5627 refs = atom_arg_map.get(atom_key)
5630 atom_arg_map[atom_key] = refs
5634 # Invalidate the package selection cache, since
5635 # arguments influence package selections.
5636 self._highest_pkg_cache.clear()
5638 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5640 Return a list of slot atoms corresponding to installed slots that
5641 differ from the slot of the highest visible match. Slot atoms that
5642 would trigger a blocker conflict are automatically discarded,
5643 potentially allowing automatic uninstallation of older slots when
5646 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5647 if highest_pkg is None:
5649 vardb = root_config.trees["vartree"].dbapi
5651 for cpv in vardb.match(atom):
5652 # don't mix new virtuals with old virtuals
5653 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5654 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5656 slots.add(highest_pkg.metadata["SLOT"])
5660 slots.remove(highest_pkg.metadata["SLOT"])
5663 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5664 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5665 if pkg is not None and pkg < highest_pkg:
5666 greedy_pkgs.append(pkg)
5669 if not blocker_lookahead:
5670 return [pkg.slot_atom for pkg in greedy_pkgs]
5673 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5674 for pkg in greedy_pkgs + [highest_pkg]:
5675 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5677 atoms = self._select_atoms(
5678 pkg.root, dep_str, pkg.use.enabled,
5679 parent=pkg, strict=True)
5680 except portage.exception.InvalidDependString:
5682 blocker_atoms = (x for x in atoms if x.blocker)
5683 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5685 if highest_pkg not in blockers:
5688 # filter packages with invalid deps
5689 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5691 # filter packages that conflict with highest_pkg
5692 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5693 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5694 blockers[pkg].findAtomForPackage(highest_pkg))]
5699 # If two packages conflict, discard the lower version.
5700 discard_pkgs = set()
5701 greedy_pkgs.sort(reverse=True)
5702 for pkg1 in greedy_pkgs:
5703 if pkg1 in discard_pkgs:
5705 for pkg2 in greedy_pkgs:
5706 if pkg2 in discard_pkgs:
5710 if blockers[pkg1].findAtomForPackage(pkg2) or \
5711 blockers[pkg2].findAtomForPackage(pkg1):
5713 discard_pkgs.add(pkg2)
5715 return [pkg.slot_atom for pkg in greedy_pkgs \
5716 if pkg not in discard_pkgs]
5718 def _select_atoms_from_graph(self, *pargs, **kwargs):
5720 Prefer atoms matching packages that have already been
5721 added to the graph or those that are installed and have
5722 not been scheduled for replacement.
5724 kwargs["trees"] = self._graph_trees
5725 return self._select_atoms_highest_available(*pargs, **kwargs)
5727 def _select_atoms_highest_available(self, root, depstring,
5728 myuse=None, parent=None, strict=True, trees=None):
5729 """This will raise InvalidDependString if necessary. If trees is
5730 None then self._filtered_trees is used."""
5731 pkgsettings = self.pkgsettings[root]
5733 trees = self._filtered_trees
5736 if parent is not None:
5737 trees[root]["parent"] = parent
5739 portage.dep._dep_check_strict = False
5740 mycheck = portage.dep_check(depstring, None,
5741 pkgsettings, myuse=myuse,
5742 myroot=root, trees=trees)
5744 if parent is not None:
5745 trees[root].pop("parent")
5746 portage.dep._dep_check_strict = True
5748 raise portage.exception.InvalidDependString(mycheck[1])
5749 selected_atoms = mycheck[1]
5750 return selected_atoms
5752 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5753 atom = portage.dep.Atom(atom)
5754 atom_set = InternalPackageSet(initial_atoms=(atom,))
5755 atom_without_use = atom
5757 atom_without_use = portage.dep.remove_slot(atom)
5759 atom_without_use += ":" + atom.slot
5760 atom_without_use = portage.dep.Atom(atom_without_use)
5761 xinfo = '"%s"' % atom
5764 # Discard null/ from failed cpv_expand category expansion.
5765 xinfo = xinfo.replace("null/", "")
5766 masked_packages = []
5768 missing_licenses = []
5769 have_eapi_mask = False
5770 pkgsettings = self.pkgsettings[root]
5771 implicit_iuse = pkgsettings._get_implicit_iuse()
5772 root_config = self.roots[root]
5773 portdb = self.roots[root].trees["porttree"].dbapi
5774 dbs = self._filtered_trees[root]["dbs"]
5775 for db, pkg_type, built, installed, db_keys in dbs:
5779 if hasattr(db, "xmatch"):
5780 cpv_list = db.xmatch("match-all", atom_without_use)
5782 cpv_list = db.match(atom_without_use)
5785 for cpv in cpv_list:
5786 metadata, mreasons = get_mask_info(root_config, cpv,
5787 pkgsettings, db, pkg_type, built, installed, db_keys)
5788 if metadata is not None:
5789 pkg = Package(built=built, cpv=cpv,
5790 installed=installed, metadata=metadata,
5791 root_config=root_config)
5792 if pkg.cp != atom.cp:
5793 # A cpv can be returned from dbapi.match() as an
5794 # old-style virtual match even in cases when the
5795 # package does not actually PROVIDE the virtual.
5796 # Filter out any such false matches here.
5797 if not atom_set.findAtomForPackage(pkg):
5799 if atom.use and not mreasons:
5800 missing_use.append(pkg)
5802 masked_packages.append(
5803 (root_config, pkgsettings, cpv, metadata, mreasons))
5805 missing_use_reasons = []
5806 missing_iuse_reasons = []
5807 for pkg in missing_use:
5808 use = pkg.use.enabled
5809 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5810 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5812 for x in atom.use.required:
5813 if iuse_re.match(x) is None:
5814 missing_iuse.append(x)
5817 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5818 missing_iuse_reasons.append((pkg, mreasons))
5820 need_enable = sorted(atom.use.enabled.difference(use))
5821 need_disable = sorted(atom.use.disabled.intersection(use))
5822 if need_enable or need_disable:
5824 changes.extend(colorize("red", "+" + x) \
5825 for x in need_enable)
5826 changes.extend(colorize("blue", "-" + x) \
5827 for x in need_disable)
5828 mreasons.append("Change USE: %s" % " ".join(changes))
5829 missing_use_reasons.append((pkg, mreasons))
5831 if missing_iuse_reasons and not missing_use_reasons:
5832 missing_use_reasons = missing_iuse_reasons
5833 elif missing_use_reasons:
5834 # Only show the latest version.
5835 del missing_use_reasons[1:]
5837 if missing_use_reasons:
5838 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5839 print "!!! One of the following packages is required to complete your request:"
5840 for pkg, mreasons in missing_use_reasons:
5841 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5843 elif masked_packages:
5845 colorize("BAD", "All ebuilds that could satisfy ") + \
5846 colorize("INFORM", xinfo) + \
5847 colorize("BAD", " have been masked.")
5848 print "!!! One of the following masked packages is required to complete your request:"
5849 have_eapi_mask = show_masked_packages(masked_packages)
5852 msg = ("The current version of portage supports " + \
5853 "EAPI '%s'. You must upgrade to a newer version" + \
5854 " of portage before EAPI masked packages can" + \
5855 " be installed.") % portage.const.EAPI
5856 from textwrap import wrap
5857 for line in wrap(msg, 75):
5862 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5864 # Show parent nodes and the argument that pulled them in.
5865 traversed_nodes = set()
5868 while node is not None:
5869 traversed_nodes.add(node)
5870 msg.append('(dependency required by "%s" [%s])' % \
5871 (colorize('INFORM', str(node.cpv)), node.type_name))
5872 # When traversing to parents, prefer arguments over packages
5873 # since arguments are root nodes. Never traverse the same
5874 # package twice, in order to prevent an infinite loop.
5875 selected_parent = None
5876 for parent in self.digraph.parent_nodes(node):
5877 if isinstance(parent, DependencyArg):
5878 msg.append('(dependency required by "%s" [argument])' % \
5879 (colorize('INFORM', str(parent))))
5880 selected_parent = None
5882 if parent not in traversed_nodes:
5883 selected_parent = parent
5884 node = selected_parent
5890 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5891 cache_key = (root, atom, onlydeps)
5892 ret = self._highest_pkg_cache.get(cache_key)
5895 if pkg and not existing:
5896 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5897 if existing and existing == pkg:
5898 # Update the cache to reflect that the
5899 # package has been added to the graph.
5901 self._highest_pkg_cache[cache_key] = ret
5903 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5904 self._highest_pkg_cache[cache_key] = ret
5907 settings = pkg.root_config.settings
5908 if visible(settings, pkg) and not (pkg.installed and \
5909 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
5910 pkg.root_config.visible_pkgs.cpv_inject(pkg)
5913 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5914 root_config = self.roots[root]
5915 pkgsettings = self.pkgsettings[root]
5916 dbs = self._filtered_trees[root]["dbs"]
5917 vardb = self.roots[root].trees["vartree"].dbapi
5918 portdb = self.roots[root].trees["porttree"].dbapi
5919 # List of acceptable packages, ordered by type preference.
5920 matched_packages = []
5921 highest_version = None
5922 if not isinstance(atom, portage.dep.Atom):
5923 atom = portage.dep.Atom(atom)
5925 atom_set = InternalPackageSet(initial_atoms=(atom,))
5926 existing_node = None
5928 usepkgonly = "--usepkgonly" in self.myopts
5929 empty = "empty" in self.myparams
5930 selective = "selective" in self.myparams
5932 noreplace = "--noreplace" in self.myopts
5933 # Behavior of the "selective" parameter depends on
5934 # whether or not a package matches an argument atom.
5935 # If an installed package provides an old-style
5936 # virtual that is no longer provided by an available
5937 # package, the installed package may match an argument
5938 # atom even though none of the available packages do.
5939 # Therefore, "selective" logic does not consider
5940 # whether or not an installed package matches an
5941 # argument atom. It only considers whether or not
5942 # available packages match argument atoms, which is
5943 # represented by the found_available_arg flag.
5944 found_available_arg = False
5945 for find_existing_node in True, False:
5948 for db, pkg_type, built, installed, db_keys in dbs:
5951 if installed and not find_existing_node:
5952 want_reinstall = reinstall or empty or \
5953 (found_available_arg and not selective)
5954 if want_reinstall and matched_packages:
5956 if hasattr(db, "xmatch"):
5957 cpv_list = db.xmatch("match-all", atom)
5959 cpv_list = db.match(atom)
5961 # USE=multislot can make an installed package appear as if
5962 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5963 # won't do any good as long as USE=multislot is enabled since
5964 # the newly built package still won't have the expected slot.
5965 # Therefore, assume that such SLOT dependencies are already
5966 # satisfied rather than forcing a rebuild.
5967 if installed and not cpv_list and atom.slot:
5968 for cpv in db.match(atom.cp):
5969 slot_available = False
5970 for other_db, other_type, other_built, \
5971 other_installed, other_keys in dbs:
5974 other_db.aux_get(cpv, ["SLOT"])[0]:
5975 slot_available = True
5979 if not slot_available:
5981 inst_pkg = self._pkg(cpv, "installed",
5982 root_config, installed=installed)
5983 # Remove the slot from the atom and verify that
5984 # the package matches the resulting atom.
5985 atom_without_slot = portage.dep.remove_slot(atom)
5987 atom_without_slot += str(atom.use)
5988 atom_without_slot = portage.dep.Atom(atom_without_slot)
5989 if portage.match_from_list(
5990 atom_without_slot, [inst_pkg]):
5991 cpv_list = [inst_pkg.cpv]
5996 pkg_status = "merge"
5997 if installed or onlydeps:
5998 pkg_status = "nomerge"
6001 for cpv in cpv_list:
6002 # Make --noreplace take precedence over --newuse.
6003 if not installed and noreplace and \
6004 cpv in vardb.match(atom):
6005 # If the installed version is masked, it may
6006 # be necessary to look at lower versions,
6007 # in case there is a visible downgrade.
6009 reinstall_for_flags = None
6010 cache_key = (pkg_type, root, cpv, pkg_status)
6011 calculated_use = True
6012 pkg = self._pkg_cache.get(cache_key)
6014 calculated_use = False
6016 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6019 pkg = Package(built=built, cpv=cpv,
6020 installed=installed, metadata=metadata,
6021 onlydeps=onlydeps, root_config=root_config,
6023 metadata = pkg.metadata
6024 if not built and ("?" in metadata["LICENSE"] or \
6025 "?" in metadata["PROVIDE"]):
6026 # This is avoided whenever possible because
6027 # it's expensive. It only needs to be done here
6028 # if it has an effect on visibility.
6029 pkgsettings.setcpv(pkg)
6030 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6031 calculated_use = True
6032 self._pkg_cache[pkg] = pkg
6034 if not installed or (built and matched_packages):
6035 # Only enforce visibility on installed packages
6036 # if there is at least one other visible package
6037 # available. By filtering installed masked packages
6038 # here, packages that have been masked since they
6039 # were installed can be automatically downgraded
6040 # to an unmasked version.
6042 if not visible(pkgsettings, pkg):
6044 except portage.exception.InvalidDependString:
6048 # Enable upgrade or downgrade to a version
6049 # with visible KEYWORDS when the installed
6050 # version is masked by KEYWORDS, but never
6051 # reinstall the same exact version only due
6052 # to a KEYWORDS mask.
6053 if built and matched_packages:
6055 different_version = None
6056 for avail_pkg in matched_packages:
6057 if not portage.dep.cpvequal(
6058 pkg.cpv, avail_pkg.cpv):
6059 different_version = avail_pkg
6061 if different_version is not None:
6064 pkgsettings._getMissingKeywords(
6065 pkg.cpv, pkg.metadata):
6068 # If the ebuild no longer exists or it's
6069 # keywords have been dropped, reject built
6070 # instances (installed or binary).
6071 # If --usepkgonly is enabled, assume that
6072 # the ebuild status should be ignored.
6076 pkg.cpv, "ebuild", root_config)
6077 except portage.exception.PackageNotFound:
6080 if not visible(pkgsettings, pkg_eb):
6083 if not pkg.built and not calculated_use:
6084 # This is avoided whenever possible because
6086 pkgsettings.setcpv(pkg)
6087 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6089 if pkg.cp != atom.cp:
6090 # A cpv can be returned from dbapi.match() as an
6091 # old-style virtual match even in cases when the
6092 # package does not actually PROVIDE the virtual.
6093 # Filter out any such false matches here.
6094 if not atom_set.findAtomForPackage(pkg):
6098 if root == self.target_root:
6100 # Ebuild USE must have been calculated prior
6101 # to this point, in case atoms have USE deps.
6102 myarg = self._iter_atoms_for_pkg(pkg).next()
6103 except StopIteration:
6105 except portage.exception.InvalidDependString:
6107 # masked by corruption
6109 if not installed and myarg:
6110 found_available_arg = True
6112 if atom.use and not pkg.built:
6113 use = pkg.use.enabled
6114 if atom.use.enabled.difference(use):
6116 if atom.use.disabled.intersection(use):
6118 if pkg.cp == atom_cp:
6119 if highest_version is None:
6120 highest_version = pkg
6121 elif pkg > highest_version:
6122 highest_version = pkg
6123 # At this point, we've found the highest visible
6124 # match from the current repo. Any lower versions
6125 # from this repo are ignored, so this so the loop
6126 # will always end with a break statement below
6128 if find_existing_node:
6129 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6132 if portage.dep.match_from_list(atom, [e_pkg]):
6133 if highest_version and \
6134 e_pkg.cp == atom_cp and \
6135 e_pkg < highest_version and \
6136 e_pkg.slot_atom != highest_version.slot_atom:
6137 # There is a higher version available in a
6138 # different slot, so this existing node is
6142 matched_packages.append(e_pkg)
6143 existing_node = e_pkg
6145 # Compare built package to current config and
6146 # reject the built package if necessary.
6147 if built and not installed and \
6148 ("--newuse" in self.myopts or \
6149 "--reinstall" in self.myopts):
6150 iuses = pkg.iuse.all
6151 old_use = pkg.use.enabled
6153 pkgsettings.setcpv(myeb)
6155 pkgsettings.setcpv(pkg)
6156 now_use = pkgsettings["PORTAGE_USE"].split()
6157 forced_flags = set()
6158 forced_flags.update(pkgsettings.useforce)
6159 forced_flags.update(pkgsettings.usemask)
6161 if myeb and not usepkgonly:
6162 cur_iuse = myeb.iuse.all
6163 if self._reinstall_for_flags(forced_flags,
6167 # Compare current config to installed package
6168 # and do not reinstall if possible.
6169 if not installed and \
6170 ("--newuse" in self.myopts or \
6171 "--reinstall" in self.myopts) and \
6172 cpv in vardb.match(atom):
6173 pkgsettings.setcpv(pkg)
6174 forced_flags = set()
6175 forced_flags.update(pkgsettings.useforce)
6176 forced_flags.update(pkgsettings.usemask)
6177 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6178 old_iuse = set(filter_iuse_defaults(
6179 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6180 cur_use = pkgsettings["PORTAGE_USE"].split()
6181 cur_iuse = pkg.iuse.all
6182 reinstall_for_flags = \
6183 self._reinstall_for_flags(
6184 forced_flags, old_use, old_iuse,
6186 if reinstall_for_flags:
6190 matched_packages.append(pkg)
6191 if reinstall_for_flags:
6192 self._reinstall_nodes[pkg] = \
6196 if not matched_packages:
6199 if "--debug" in self.myopts:
6200 for pkg in matched_packages:
6201 portage.writemsg("%s %s\n" % \
6202 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6204 # Filter out any old-style virtual matches if they are
6205 # mixed with new-style virtual matches.
6206 cp = portage.dep_getkey(atom)
6207 if len(matched_packages) > 1 and \
6208 "virtual" == portage.catsplit(cp)[0]:
6209 for pkg in matched_packages:
6212 # Got a new-style virtual, so filter
6213 # out any old-style virtuals.
6214 matched_packages = [pkg for pkg in matched_packages \
6218 if len(matched_packages) > 1:
6219 bestmatch = portage.best(
6220 [pkg.cpv for pkg in matched_packages])
6221 matched_packages = [pkg for pkg in matched_packages \
6222 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6224 # ordered by type preference ("ebuild" type is the last resort)
6225 return matched_packages[-1], existing_node
6227 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6229 Select packages that have already been added to the graph or
6230 those that are installed and have not been scheduled for
6233 graph_db = self._graph_trees[root]["porttree"].dbapi
6234 matches = graph_db.match(atom)
6237 cpv = matches[-1] # highest match
6238 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
6239 graph_db.aux_get(cpv, ["SLOT"])[0])
6240 e_pkg = self._slot_pkg_map[root].get(slot_atom)
6243 # Since this cpv exists in the graph_db,
6244 # we must have a cached Package instance.
6245 cache_key = ("installed", root, cpv, "nomerge")
6246 return (self._pkg_cache[cache_key], None)
6248 def _complete_graph(self):
6250 Add any deep dependencies of required sets (args, system, world) that
6251 have not been pulled into the graph yet. This ensures that the graph
6252 is consistent such that initially satisfied deep dependencies are not
6253 broken in the new graph. Initially unsatisfied dependencies are
6254 irrelevant since we only want to avoid breaking dependencies that are
6257 Since this method can consume enough time to disturb users, it is
6258 currently only enabled by the --complete-graph option.
6260 if "--buildpkgonly" in self.myopts or \
6261 "recurse" not in self.myparams:
6264 if "complete" not in self.myparams:
6265 # Skip this to avoid consuming enough time to disturb users.
6268 # Put the depgraph into a mode that causes it to only
6269 # select packages that have already been added to the
6270 # graph or those that are installed and have not been
6271 # scheduled for replacement. Also, toggle the "deep"
6272 # parameter so that all dependencies are traversed and
6274 self._select_atoms = self._select_atoms_from_graph
6275 self._select_package = self._select_pkg_from_graph
6276 already_deep = "deep" in self.myparams
6277 if not already_deep:
6278 self.myparams.add("deep")
6280 for root in self.roots:
6281 required_set_names = self._required_set_names.copy()
6282 if root == self.target_root and \
6283 (already_deep or "empty" in self.myparams):
6284 required_set_names.difference_update(self._sets)
6285 if not required_set_names and not self._ignored_deps:
6287 root_config = self.roots[root]
6288 setconfig = root_config.setconfig
6290 # Reuse existing SetArg instances when available.
6291 for arg in self.digraph.root_nodes():
6292 if not isinstance(arg, SetArg):
6294 if arg.root_config != root_config:
6296 if arg.name in required_set_names:
6298 required_set_names.remove(arg.name)
6299 # Create new SetArg instances only when necessary.
6300 for s in required_set_names:
6301 expanded_set = InternalPackageSet(
6302 initial_atoms=setconfig.getSetAtoms(s))
6303 atom = SETPREFIX + s
6304 args.append(SetArg(arg=atom, set=expanded_set,
6305 root_config=root_config))
6306 vardb = root_config.trees["vartree"].dbapi
6308 for atom in arg.set:
6309 self._dep_stack.append(
6310 Dependency(atom=atom, root=root, parent=arg))
6311 if self._ignored_deps:
6312 self._dep_stack.extend(self._ignored_deps)
6313 self._ignored_deps = []
6314 if not self._create_graph(allow_unsatisfied=True):
6316 # Check the unsatisfied deps to see if any initially satisfied deps
6317 # will become unsatisfied due to an upgrade. Initially unsatisfied
6318 # deps are irrelevant since we only want to avoid breaking deps
6319 # that are initially satisfied.
6320 while self._unsatisfied_deps:
6321 dep = self._unsatisfied_deps.pop()
6322 matches = vardb.match_pkgs(dep.atom)
6324 self._initially_unsatisfied_deps.append(dep)
6326 # An scheduled installation broke a deep dependency.
6327 # Add the installed package to the graph so that it
6328 # will be appropriately reported as a slot collision
6329 # (possibly solvable via backtracking).
6330 pkg = matches[-1] # highest match
6331 if not self._add_pkg(pkg, dep):
6333 if not self._create_graph(allow_unsatisfied=True):
6337 def _pkg(self, cpv, type_name, root_config, installed=False):
6339 Get a package instance from the cache, or create a new
6340 one if necessary. Raises KeyError from aux_get if it
6341 failures for some reason (package does not exist or is
6346 operation = "nomerge"
6347 pkg = self._pkg_cache.get(
6348 (type_name, root_config.root, cpv, operation))
6350 tree_type = self.pkg_tree_map[type_name]
6351 db = root_config.trees[tree_type].dbapi
6352 db_keys = list(self._trees_orig[root_config.root][
6353 tree_type].dbapi._aux_cache_keys)
6355 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6357 raise portage.exception.PackageNotFound(cpv)
6358 pkg = Package(cpv=cpv, metadata=metadata,
6359 root_config=root_config, installed=installed)
6360 if type_name == "ebuild":
6361 settings = self.pkgsettings[root_config.root]
6362 settings.setcpv(pkg)
6363 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6364 self._pkg_cache[pkg] = pkg
6367 def validate_blockers(self):
6368 """Remove any blockers from the digraph that do not match any of the
6369 packages within the graph. If necessary, create hard deps to ensure
6370 correct merge order such that mutually blocking packages are never
6371 installed simultaneously."""
6373 if "--buildpkgonly" in self.myopts or \
6374 "--nodeps" in self.myopts:
6377 #if "deep" in self.myparams:
6379 # Pull in blockers from all installed packages that haven't already
6380 # been pulled into the depgraph. This is not enabled by default
6381 # due to the performance penalty that is incurred by all the
6382 # additional dep_check calls that are required.
6384 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6385 for myroot in self.trees:
6386 vardb = self.trees[myroot]["vartree"].dbapi
6387 portdb = self.trees[myroot]["porttree"].dbapi
6388 pkgsettings = self.pkgsettings[myroot]
6389 final_db = self.mydbapi[myroot]
6391 blocker_cache = BlockerCache(myroot, vardb)
6392 stale_cache = set(blocker_cache)
6395 stale_cache.discard(cpv)
6396 pkg_in_graph = self.digraph.contains(pkg)
6398 # Check for masked installed packages. Only warn about
6399 # packages that are in the graph in order to avoid warning
6400 # about those that will be automatically uninstalled during
6401 # the merge process or by --depclean.
6403 if pkg_in_graph and not visible(pkgsettings, pkg):
6404 self._masked_installed.add(pkg)
6406 blocker_atoms = None
6412 self._blocker_parents.child_nodes(pkg))
6417 self._irrelevant_blockers.child_nodes(pkg))
6420 if blockers is not None:
6421 blockers = set(str(blocker.atom) \
6422 for blocker in blockers)
6424 # If this node has any blockers, create a "nomerge"
6425 # node for it so that they can be enforced.
6426 self.spinner.update()
6427 blocker_data = blocker_cache.get(cpv)
6428 if blocker_data is not None and \
6429 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6432 # If blocker data from the graph is available, use
6433 # it to validate the cache and update the cache if
6435 if blocker_data is not None and \
6436 blockers is not None:
6437 if not blockers.symmetric_difference(
6438 blocker_data.atoms):
6442 if blocker_data is None and \
6443 blockers is not None:
6444 # Re-use the blockers from the graph.
6445 blocker_atoms = sorted(blockers)
6446 counter = long(pkg.metadata["COUNTER"])
6448 blocker_cache.BlockerData(counter, blocker_atoms)
6449 blocker_cache[pkg.cpv] = blocker_data
6453 blocker_atoms = blocker_data.atoms
6455 # Use aux_get() to trigger FakeVartree global
6456 # updates on *DEPEND when appropriate.
6457 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6458 # It is crucial to pass in final_db here in order to
6459 # optimize dep_check calls by eliminating atoms via
6460 # dep_wordreduce and dep_eval calls.
6462 portage.dep._dep_check_strict = False
6464 success, atoms = portage.dep_check(depstr,
6465 final_db, pkgsettings, myuse=pkg.use.enabled,
6466 trees=self._graph_trees, myroot=myroot)
6467 except Exception, e:
6468 if isinstance(e, SystemExit):
6470 # This is helpful, for example, if a ValueError
6471 # is thrown from cpv_expand due to multiple
6472 # matches (this can happen if an atom lacks a
6474 show_invalid_depstring_notice(
6475 pkg, depstr, str(e))
6479 portage.dep._dep_check_strict = True
6481 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6482 if replacement_pkg and \
6483 replacement_pkg[0].operation == "merge":
6484 # This package is being replaced anyway, so
6485 # ignore invalid dependencies so as not to
6486 # annoy the user too much (otherwise they'd be
6487 # forced to manually unmerge it first).
6489 show_invalid_depstring_notice(pkg, depstr, atoms)
6491 blocker_atoms = [myatom for myatom in atoms \
6492 if myatom.startswith("!")]
6493 blocker_atoms.sort()
6494 counter = long(pkg.metadata["COUNTER"])
6495 blocker_cache[cpv] = \
6496 blocker_cache.BlockerData(counter, blocker_atoms)
6499 for atom in blocker_atoms:
6500 blocker = Blocker(atom=portage.dep.Atom(atom),
6501 eapi=pkg.metadata["EAPI"], root=myroot)
6502 self._blocker_parents.add(blocker, pkg)
6503 except portage.exception.InvalidAtom, e:
6504 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6505 show_invalid_depstring_notice(
6506 pkg, depstr, "Invalid Atom: %s" % (e,))
6508 for cpv in stale_cache:
6509 del blocker_cache[cpv]
6510 blocker_cache.flush()
6513 # Discard any "uninstall" tasks scheduled by previous calls
6514 # to this method, since those tasks may not make sense given
6515 # the current graph state.
6516 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6517 if previous_uninstall_tasks:
6518 self._blocker_uninstalls = digraph()
6519 self.digraph.difference_update(previous_uninstall_tasks)
6521 for blocker in self._blocker_parents.leaf_nodes():
6522 self.spinner.update()
6523 root_config = self.roots[blocker.root]
6524 virtuals = root_config.settings.getvirtuals()
6525 myroot = blocker.root
6526 initial_db = self.trees[myroot]["vartree"].dbapi
6527 final_db = self.mydbapi[myroot]
6529 provider_virtual = False
6530 if blocker.cp in virtuals and \
6531 not self._have_new_virt(blocker.root, blocker.cp):
6532 provider_virtual = True
6534 if provider_virtual:
6536 for provider_entry in virtuals[blocker.cp]:
6538 portage.dep_getkey(provider_entry)
6539 atoms.append(blocker.atom.replace(
6540 blocker.cp, provider_cp))
6542 atoms = [blocker.atom]
6544 blocked_initial = []
6546 blocked_initial.extend(initial_db.match_pkgs(atom))
6550 blocked_final.extend(final_db.match_pkgs(atom))
6552 if not blocked_initial and not blocked_final:
6553 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6554 self._blocker_parents.remove(blocker)
6555 # Discard any parents that don't have any more blockers.
6556 for pkg in parent_pkgs:
6557 self._irrelevant_blockers.add(blocker, pkg)
6558 if not self._blocker_parents.child_nodes(pkg):
6559 self._blocker_parents.remove(pkg)
6561 for parent in self._blocker_parents.parent_nodes(blocker):
6562 unresolved_blocks = False
6563 depends_on_order = set()
6564 for pkg in blocked_initial:
6565 if pkg.slot_atom == parent.slot_atom:
6566 # TODO: Support blocks within slots in cases where it
6567 # might make sense. For example, a new version might
6568 # require that the old version be uninstalled at build
6571 if parent.installed:
6572 # Two currently installed packages conflict with
6573 # eachother. Ignore this case since the damage
6574 # is already done and this would be likely to
6575 # confuse users if displayed like a normal blocker.
6578 self._blocked_pkgs.add(pkg, blocker)
6580 if parent.operation == "merge":
6581 # Maybe the blocked package can be replaced or simply
6582 # unmerged to resolve this block.
6583 depends_on_order.add((pkg, parent))
6585 # None of the above blocker resolutions techniques apply,
6586 # so apparently this one is unresolvable.
6587 unresolved_blocks = True
6588 for pkg in blocked_final:
6589 if pkg.slot_atom == parent.slot_atom:
6590 # TODO: Support blocks within slots.
6592 if parent.operation == "nomerge" and \
6593 pkg.operation == "nomerge":
6594 # This blocker will be handled the next time that a
6595 # merge of either package is triggered.
6598 self._blocked_pkgs.add(pkg, blocker)
6600 # Maybe the blocking package can be
6601 # unmerged to resolve this block.
6602 if parent.operation == "merge" and pkg.installed:
6603 depends_on_order.add((pkg, parent))
6605 elif parent.operation == "nomerge":
6606 depends_on_order.add((parent, pkg))
6608 # None of the above blocker resolutions techniques apply,
6609 # so apparently this one is unresolvable.
6610 unresolved_blocks = True
6612 # Make sure we don't unmerge any package that have been pulled
6614 if not unresolved_blocks and depends_on_order:
6615 for inst_pkg, inst_task in depends_on_order:
6616 if self.digraph.contains(inst_pkg) and \
6617 self.digraph.parent_nodes(inst_pkg):
6618 unresolved_blocks = True
6621 if not unresolved_blocks and depends_on_order:
6622 for inst_pkg, inst_task in depends_on_order:
6623 uninst_task = Package(built=inst_pkg.built,
6624 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6625 metadata=inst_pkg.metadata,
6626 operation="uninstall",
6627 root_config=inst_pkg.root_config,
6628 type_name=inst_pkg.type_name)
6629 self._pkg_cache[uninst_task] = uninst_task
6630 # Enforce correct merge order with a hard dep.
6631 self.digraph.addnode(uninst_task, inst_task,
6632 priority=BlockerDepPriority.instance)
6633 # Count references to this blocker so that it can be
6634 # invalidated after nodes referencing it have been
6636 self._blocker_uninstalls.addnode(uninst_task, blocker)
6637 if not unresolved_blocks and not depends_on_order:
6638 self._irrelevant_blockers.add(blocker, parent)
6639 self._blocker_parents.remove_edge(blocker, parent)
6640 if not self._blocker_parents.parent_nodes(blocker):
6641 self._blocker_parents.remove(blocker)
6642 if not self._blocker_parents.child_nodes(parent):
6643 self._blocker_parents.remove(parent)
6644 if unresolved_blocks:
6645 self._unsolvable_blockers.add(blocker, parent)
6649 def _accept_blocker_conflicts(self):
6651 for x in ("--buildpkgonly", "--fetchonly",
6652 "--fetch-all-uri", "--nodeps"):
6653 if x in self.myopts:
6658 def _merge_order_bias(self, mygraph):
6659 """Order nodes from highest to lowest overall reference count for
6660 optimal leaf node selection."""
6662 for node in mygraph.order:
6663 node_info[node] = len(mygraph.parent_nodes(node))
6664 def cmp_merge_preference(node1, node2):
6665 return node_info[node2] - node_info[node1]
6666 mygraph.order.sort(cmp_merge_preference)
6668 def altlist(self, reversed=False):
6670 while self._serialized_tasks_cache is None:
6671 self._resolve_conflicts()
6673 self._serialized_tasks_cache, self._scheduler_graph = \
6674 self._serialize_tasks()
6675 except self._serialize_tasks_retry:
6678 retlist = self._serialized_tasks_cache[:]
6683 def schedulerGraph(self):
6685 The scheduler graph is identical to the normal one except that
6686 uninstall edges are reversed in specific cases that require
6687 conflicting packages to be temporarily installed simultaneously.
6688 This is intended for use by the Scheduler in it's parallelization
6689 logic. It ensures that temporary simultaneous installation of
6690 conflicting packages is avoided when appropriate (especially for
6691 !!atom blockers), but allowed in specific cases that require it.
6693 Note that this method calls break_refs() which alters the state of
6694 internal Package instances such that this depgraph instance should
6695 not be used to perform any more calculations.
6697 if self._scheduler_graph is None:
6699 self.break_refs(self._scheduler_graph.order)
6700 return self._scheduler_graph
6702 def break_refs(self, nodes):
6704 Take a mergelist like that returned from self.altlist() and
6705 break any references that lead back to the depgraph. This is
6706 useful if you want to hold references to packages without
6707 also holding the depgraph on the heap.
6710 if hasattr(node, "root_config"):
6711 # The FakeVartree references the _package_cache which
6712 # references the depgraph. So that Package instances don't
6713 # hold the depgraph and FakeVartree on the heap, replace
6714 # the RootConfig that references the FakeVartree with the
6715 # original RootConfig instance which references the actual
6717 node.root_config = \
6718 self._trees_orig[node.root_config.root]["root_config"]
6720 def _resolve_conflicts(self):
6721 if not self._complete_graph():
6722 raise self._unknown_internal_error()
6724 if not self.validate_blockers():
6725 raise self._unknown_internal_error()
6727 if self._slot_collision_info:
6728 self._process_slot_conflicts()
6730 def _serialize_tasks(self):
6732 if "--debug" in self.myopts:
6733 writemsg("\ndigraph:\n\n", noiselevel=-1)
6734 self.digraph.debug_print()
6735 writemsg("\n", noiselevel=-1)
6737 scheduler_graph = self.digraph.copy()
6738 mygraph=self.digraph.copy()
6739 # Prune "nomerge" root nodes if nothing depends on them, since
6740 # otherwise they slow down merge order calculation. Don't remove
6741 # non-root nodes since they help optimize merge order in some cases
6742 # such as revdep-rebuild.
6743 removed_nodes = set()
6745 for node in mygraph.root_nodes():
6746 if not isinstance(node, Package) or \
6747 node.installed or node.onlydeps:
6748 removed_nodes.add(node)
6750 self.spinner.update()
6751 mygraph.difference_update(removed_nodes)
6752 if not removed_nodes:
6754 removed_nodes.clear()
6755 self._merge_order_bias(mygraph)
6756 def cmp_circular_bias(n1, n2):
6758 RDEPEND is stronger than PDEPEND and this function
6759 measures such a strength bias within a circular
6760 dependency relationship.
6762 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6763 ignore_priority=DepPriority.MEDIUM_SOFT)
6764 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6765 ignore_priority=DepPriority.MEDIUM_SOFT)
6766 if n1_n2_medium == n2_n1_medium:
6771 myblocker_uninstalls = self._blocker_uninstalls.copy()
6773 # Contains uninstall tasks that have been scheduled to
6774 # occur after overlapping blockers have been installed.
6775 scheduled_uninstalls = set()
6776 # Contains any Uninstall tasks that have been ignored
6777 # in order to avoid the circular deps code path. These
6778 # correspond to blocker conflicts that could not be
6780 ignored_uninstall_tasks = set()
6781 have_uninstall_task = False
6782 complete = "complete" in self.myparams
6785 def get_nodes(**kwargs):
6787 Returns leaf nodes excluding Uninstall instances
6788 since those should be executed as late as possible.
6790 return [node for node in mygraph.leaf_nodes(**kwargs) \
6791 if isinstance(node, Package) and \
6792 (node.operation != "uninstall" or \
6793 node in scheduled_uninstalls)]
6795 # sys-apps/portage needs special treatment if ROOT="/"
6796 running_root = self._running_root.root
6797 from portage.const import PORTAGE_PACKAGE_ATOM
6798 runtime_deps = InternalPackageSet(
6799 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6800 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6801 PORTAGE_PACKAGE_ATOM)
6802 replacement_portage = self.mydbapi[running_root].match_pkgs(
6803 PORTAGE_PACKAGE_ATOM)
6806 running_portage = running_portage[0]
6808 running_portage = None
6810 if replacement_portage:
6811 replacement_portage = replacement_portage[0]
6813 replacement_portage = None
6815 if replacement_portage == running_portage:
6816 replacement_portage = None
6818 if replacement_portage is not None:
6819 # update from running_portage to replacement_portage asap
6820 asap_nodes.append(replacement_portage)
6822 if running_portage is not None:
6824 portage_rdepend = self._select_atoms_highest_available(
6825 running_root, running_portage.metadata["RDEPEND"],
6826 myuse=running_portage.use.enabled,
6827 parent=running_portage, strict=False)
6828 except portage.exception.InvalidDependString, e:
6829 portage.writemsg("!!! Invalid RDEPEND in " + \
6830 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6831 (running_root, running_portage.cpv, e), noiselevel=-1)
6833 portage_rdepend = []
6834 runtime_deps.update(atom for atom in portage_rdepend \
6835 if not atom.startswith("!"))
6837 ignore_priority_soft_range = [None]
6838 ignore_priority_soft_range.extend(
6839 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6840 tree_mode = "--tree" in self.myopts
6841 # Tracks whether or not the current iteration should prefer asap_nodes
6842 # if available. This is set to False when the previous iteration
6843 # failed to select any nodes. It is reset whenever nodes are
6844 # successfully selected.
6847 # By default, try to avoid selecting root nodes whenever possible. This
6848 # helps ensure that the maximimum possible number of soft dependencies
6849 # have been removed from the graph before their parent nodes have
6850 # selected. This is especially important when those dependencies are
6851 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6852 # CHOST has been changed (like when building a stage3 from a stage2).
6853 accept_root_node = False
6855 # State of prefer_asap and accept_root_node flags for successive
6856 # iterations that loosen the criteria for node selection.
6858 # iteration prefer_asap accept_root_node
6863 # If no nodes are selected on the 3rd iteration, it is due to
6864 # unresolved blockers or circular dependencies.
6866 while not mygraph.empty():
6867 self.spinner.update()
6868 selected_nodes = None
6869 ignore_priority = None
6870 if prefer_asap and asap_nodes:
6871 """ASAP nodes are merged before their soft deps."""
6872 asap_nodes = [node for node in asap_nodes \
6873 if mygraph.contains(node)]
6874 for node in asap_nodes:
6875 if not mygraph.child_nodes(node,
6876 ignore_priority=DepPriority.SOFT):
6877 selected_nodes = [node]
6878 asap_nodes.remove(node)
6880 if not selected_nodes and \
6881 not (prefer_asap and asap_nodes):
6882 for ignore_priority in ignore_priority_soft_range:
6883 nodes = get_nodes(ignore_priority=ignore_priority)
6887 if ignore_priority is None and not tree_mode:
6888 # Greedily pop all of these nodes since no relationship
6889 # has been ignored. This optimization destroys --tree
6890 # output, so it's disabled in reversed mode. If there
6891 # is a mix of merge and uninstall nodes, save the
6892 # uninstall nodes from later since sometimes a merge
6893 # node will render an install node unnecessary, and
6894 # we want to avoid doing a separate uninstall task in
6896 merge_nodes = [node for node in nodes \
6897 if node.operation == "merge"]
6899 selected_nodes = merge_nodes
6901 selected_nodes = nodes
6903 # For optimal merge order:
6904 # * Only pop one node.
6905 # * Removing a root node (node without a parent)
6906 # will not produce a leaf node, so avoid it.
6908 if mygraph.parent_nodes(node):
6909 # found a non-root node
6910 selected_nodes = [node]
6912 if not selected_nodes and \
6913 (accept_root_node or ignore_priority is None):
6914 # settle for a root node
6915 selected_nodes = [nodes[0]]
6917 if not selected_nodes:
6918 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6920 """Recursively gather a group of nodes that RDEPEND on
6921 eachother. This ensures that they are merged as a group
6922 and get their RDEPENDs satisfied as soon as possible."""
6923 def gather_deps(ignore_priority,
6924 mergeable_nodes, selected_nodes, node):
6925 if node in selected_nodes:
6927 if node not in mergeable_nodes:
6929 if node == replacement_portage and \
6930 mygraph.child_nodes(node,
6931 ignore_priority=DepPriority.MEDIUM_SOFT):
6932 # Make sure that portage always has all of it's
6933 # RDEPENDs installed first.
6935 selected_nodes.add(node)
6936 for child in mygraph.child_nodes(node,
6937 ignore_priority=ignore_priority):
6938 if not gather_deps(ignore_priority,
6939 mergeable_nodes, selected_nodes, child):
6942 mergeable_nodes = set(nodes)
6943 if prefer_asap and asap_nodes:
6945 for ignore_priority in xrange(DepPriority.SOFT,
6946 DepPriority.MEDIUM_SOFT + 1):
6948 if nodes is not asap_nodes and \
6949 not accept_root_node and \
6950 not mygraph.parent_nodes(node):
6952 selected_nodes = set()
6953 if gather_deps(ignore_priority,
6954 mergeable_nodes, selected_nodes, node):
6957 selected_nodes = None
6961 # If any nodes have been selected here, it's always
6962 # possible that anything up to a MEDIUM_SOFT priority
6963 # relationship has been ignored. This state is recorded
6964 # in ignore_priority so that relevant nodes will be
6965 # added to asap_nodes when appropriate.
6967 ignore_priority = DepPriority.MEDIUM_SOFT
6969 if prefer_asap and asap_nodes and not selected_nodes:
6970 # We failed to find any asap nodes to merge, so ignore
6971 # them for the next iteration.
6975 if not selected_nodes and not accept_root_node:
6976 # Maybe there are only root nodes left, so accept them
6977 # for the next iteration.
6978 accept_root_node = True
6981 if selected_nodes and ignore_priority > DepPriority.SOFT:
6982 # Try to merge ignored medium deps as soon as possible.
6983 for node in selected_nodes:
6984 children = set(mygraph.child_nodes(node))
6985 soft = children.difference(
6986 mygraph.child_nodes(node,
6987 ignore_priority=DepPriority.SOFT))
6988 medium_soft = children.difference(
6989 mygraph.child_nodes(node,
6990 ignore_priority=DepPriority.MEDIUM_SOFT))
6991 medium_soft.difference_update(soft)
6992 for child in medium_soft:
6993 if child in selected_nodes:
6995 if child in asap_nodes:
6997 asap_nodes.append(child)
6999 if selected_nodes and len(selected_nodes) > 1:
7000 if not isinstance(selected_nodes, list):
7001 selected_nodes = list(selected_nodes)
7002 selected_nodes.sort(cmp_circular_bias)
7004 if not selected_nodes and not myblocker_uninstalls.is_empty():
7005 # An Uninstall task needs to be executed in order to
7006 # avoid conflict if possible.
7007 min_parent_deps = None
7009 for task in myblocker_uninstalls.leaf_nodes():
7010 # Do some sanity checks so that system or world packages
7011 # don't get uninstalled inappropriately here (only really
7012 # necessary when --complete-graph has not been enabled).
7014 if task in ignored_uninstall_tasks:
7017 if task in scheduled_uninstalls:
7018 # It's been scheduled but it hasn't
7019 # been executed yet due to dependence
7020 # on installation of blocking packages.
7023 root_config = self.roots[task.root]
7024 inst_pkg = self._pkg_cache[
7025 ("installed", task.root, task.cpv, "nomerge")]
7027 if self.digraph.contains(inst_pkg):
7030 forbid_overlap = False
7031 heuristic_overlap = False
7032 for blocker in myblocker_uninstalls.parent_nodes(task):
7033 if blocker.eapi in ("0", "1"):
7034 heuristic_overlap = True
7035 elif blocker.atom.blocker.overlap.forbid:
7036 forbid_overlap = True
7038 if forbid_overlap and running_root == task.root:
7041 if heuristic_overlap and running_root == task.root:
7042 # Never uninstall sys-apps/portage or it's essential
7043 # dependencies, except through replacement.
7045 runtime_dep_atoms = \
7046 list(runtime_deps.iterAtomsForPackage(task))
7047 except portage.exception.InvalidDependString, e:
7048 portage.writemsg("!!! Invalid PROVIDE in " + \
7049 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7050 (task.root, task.cpv, e), noiselevel=-1)
7054 # Don't uninstall a runtime dep if it appears
7055 # to be the only suitable one installed.
7057 vardb = root_config.trees["vartree"].dbapi
7058 for atom in runtime_dep_atoms:
7059 other_version = None
7060 for pkg in vardb.match_pkgs(atom):
7061 if pkg.cpv == task.cpv and \
7062 pkg.metadata["COUNTER"] == \
7063 task.metadata["COUNTER"]:
7067 if other_version is None:
7073 # For packages in the system set, don't take
7074 # any chances. If the conflict can't be resolved
7075 # by a normal replacement operation then abort.
7078 for atom in root_config.sets[
7079 "system"].iterAtomsForPackage(task):
7082 except portage.exception.InvalidDependString, e:
7083 portage.writemsg("!!! Invalid PROVIDE in " + \
7084 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7085 (task.root, task.cpv, e), noiselevel=-1)
7091 # Note that the world check isn't always
7092 # necessary since self._complete_graph() will
7093 # add all packages from the system and world sets to the
7094 # graph. This just allows unresolved conflicts to be
7095 # detected as early as possible, which makes it possible
7096 # to avoid calling self._complete_graph() when it is
7097 # unnecessary due to blockers triggering an abortion.
7099 # For packages in the world set, go ahead an uninstall
7100 # when necessary, as long as the atom will be satisfied
7101 # in the final state.
7102 graph_db = self.mydbapi[task.root]
7105 for atom in root_config.sets[
7106 "world"].iterAtomsForPackage(task):
7108 for pkg in graph_db.match_pkgs(atom):
7115 self._blocked_world_pkgs[inst_pkg] = atom
7117 except portage.exception.InvalidDependString, e:
7118 portage.writemsg("!!! Invalid PROVIDE in " + \
7119 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7120 (task.root, task.cpv, e), noiselevel=-1)
7126 # Check the deps of parent nodes to ensure that
7127 # the chosen task produces a leaf node. Maybe
7128 # this can be optimized some more to make the
7129 # best possible choice, but the current algorithm
7130 # is simple and should be near optimal for most
7133 for parent in mygraph.parent_nodes(task):
7134 parent_deps.update(mygraph.child_nodes(parent,
7135 ignore_priority=DepPriority.MEDIUM_SOFT))
7136 parent_deps.remove(task)
7137 if min_parent_deps is None or \
7138 len(parent_deps) < min_parent_deps:
7139 min_parent_deps = len(parent_deps)
7142 if uninst_task is not None:
7143 # The uninstall is performed only after blocking
7144 # packages have been merged on top of it. File
7145 # collisions between blocking packages are detected
7146 # and removed from the list of files to be uninstalled.
7147 scheduled_uninstalls.add(uninst_task)
7148 parent_nodes = mygraph.parent_nodes(uninst_task)
7150 # Reverse the parent -> uninstall edges since we want
7151 # to do the uninstall after blocking packages have
7152 # been merged on top of it.
7153 mygraph.remove(uninst_task)
7154 for blocked_pkg in parent_nodes:
7155 mygraph.add(blocked_pkg, uninst_task,
7156 priority=BlockerDepPriority.instance)
7157 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7158 scheduler_graph.add(blocked_pkg, uninst_task,
7159 priority=BlockerDepPriority.instance)
7162 # None of the Uninstall tasks are acceptable, so
7163 # the corresponding blockers are unresolvable.
7164 # We need to drop an Uninstall task here in order
7165 # to avoid the circular deps code path, but the
7166 # blocker will still be counted as an unresolved
7168 for node in myblocker_uninstalls.leaf_nodes():
7170 mygraph.remove(node)
7175 ignored_uninstall_tasks.add(node)
7178 if uninst_task is not None:
7179 # After dropping an Uninstall task, reset
7180 # the state variables for leaf node selection and
7181 # continue trying to select leaf nodes.
7183 accept_root_node = False
7186 if not selected_nodes:
7187 self._circular_deps_for_display = mygraph
7188 raise self._unknown_internal_error()
7190 # At this point, we've succeeded in selecting one or more nodes, so
7191 # it's now safe to reset the prefer_asap and accept_root_node flags
7192 # to their default states.
7194 accept_root_node = False
7196 mygraph.difference_update(selected_nodes)
7198 for node in selected_nodes:
7199 if isinstance(node, Package) and \
7200 node.operation == "nomerge":
7203 # Handle interactions between blockers
7204 # and uninstallation tasks.
7205 solved_blockers = set()
7207 if isinstance(node, Package) and \
7208 "uninstall" == node.operation:
7209 have_uninstall_task = True
7212 vardb = self.trees[node.root]["vartree"].dbapi
7213 previous_cpv = vardb.match(node.slot_atom)
7215 # The package will be replaced by this one, so remove
7216 # the corresponding Uninstall task if necessary.
7217 previous_cpv = previous_cpv[0]
7219 ("installed", node.root, previous_cpv, "uninstall")
7221 mygraph.remove(uninst_task)
7225 if uninst_task is not None and \
7226 uninst_task not in ignored_uninstall_tasks and \
7227 myblocker_uninstalls.contains(uninst_task):
7228 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7229 myblocker_uninstalls.remove(uninst_task)
7230 # Discard any blockers that this Uninstall solves.
7231 for blocker in blocker_nodes:
7232 if not myblocker_uninstalls.child_nodes(blocker):
7233 myblocker_uninstalls.remove(blocker)
7234 solved_blockers.add(blocker)
7236 retlist.append(node)
7238 if (isinstance(node, Package) and \
7239 "uninstall" == node.operation) or \
7240 (uninst_task is not None and \
7241 uninst_task in scheduled_uninstalls):
7242 # Include satisfied blockers in the merge list
7243 # since the user might be interested and also
7244 # it serves as an indicator that blocking packages
7245 # will be temporarily installed simultaneously.
7246 for blocker in solved_blockers:
7247 retlist.append(Blocker(atom=blocker.atom,
7248 root=blocker.root, eapi=blocker.eapi,
7251 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7252 for node in myblocker_uninstalls.root_nodes():
7253 unsolvable_blockers.add(node)
7255 for blocker in unsolvable_blockers:
7256 retlist.append(blocker)
7258 # If any Uninstall tasks need to be executed in order
7259 # to avoid a conflict, complete the graph with any
7260 # dependencies that may have been initially
7261 # neglected (to ensure that unsafe Uninstall tasks
7262 # are properly identified and blocked from execution).
7263 if have_uninstall_task and \
7265 not unsolvable_blockers:
7266 self.myparams.add("complete")
7267 raise self._serialize_tasks_retry("")
7269 if unsolvable_blockers and \
7270 not self._accept_blocker_conflicts():
7271 self._unsatisfied_blockers_for_display = unsolvable_blockers
7272 self._serialized_tasks_cache = retlist[:]
7273 self._scheduler_graph = scheduler_graph
7274 raise self._unknown_internal_error()
7276 if self._slot_collision_info and \
7277 not self._accept_blocker_conflicts():
7278 self._serialized_tasks_cache = retlist[:]
7279 self._scheduler_graph = scheduler_graph
7280 raise self._unknown_internal_error()
7282 return retlist, scheduler_graph
7284 def _show_circular_deps(self, mygraph):
7285 # No leaf nodes are available, so we have a circular
7286 # dependency panic situation. Reduce the noise level to a
7287 # minimum via repeated elimination of root nodes since they
7288 # have no parents and thus can not be part of a cycle.
7290 root_nodes = mygraph.root_nodes(
7291 ignore_priority=DepPriority.MEDIUM_SOFT)
7294 mygraph.difference_update(root_nodes)
7295 # Display the USE flags that are enabled on nodes that are part
7296 # of dependency cycles in case that helps the user decide to
7297 # disable some of them.
7299 tempgraph = mygraph.copy()
7300 while not tempgraph.empty():
7301 nodes = tempgraph.leaf_nodes()
7303 node = tempgraph.order[0]
7306 display_order.append(node)
7307 tempgraph.remove(node)
7308 display_order.reverse()
7309 self.myopts.pop("--quiet", None)
7310 self.myopts.pop("--verbose", None)
7311 self.myopts["--tree"] = True
7312 portage.writemsg("\n\n", noiselevel=-1)
7313 self.display(display_order)
7314 prefix = colorize("BAD", " * ")
7315 portage.writemsg("\n", noiselevel=-1)
7316 portage.writemsg(prefix + "Error: circular dependencies:\n",
7318 portage.writemsg("\n", noiselevel=-1)
7319 mygraph.debug_print()
7320 portage.writemsg("\n", noiselevel=-1)
7321 portage.writemsg(prefix + "Note that circular dependencies " + \
7322 "can often be avoided by temporarily\n", noiselevel=-1)
7323 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7324 "optional dependencies.\n", noiselevel=-1)
7326 def _show_merge_list(self):
7327 if self._serialized_tasks_cache is not None and \
7328 not (self._displayed_list and \
7329 (self._displayed_list == self._serialized_tasks_cache or \
7330 self._displayed_list == \
7331 list(reversed(self._serialized_tasks_cache)))):
7332 display_list = self._serialized_tasks_cache[:]
7333 if "--tree" in self.myopts:
7334 display_list.reverse()
7335 self.display(display_list)
7337 def _show_unsatisfied_blockers(self, blockers):
7338 self._show_merge_list()
7339 msg = "Error: The above package list contains " + \
7340 "packages which cannot be installed " + \
7341 "at the same time on the same system."
7342 prefix = colorize("BAD", " * ")
7343 from textwrap import wrap
7344 portage.writemsg("\n", noiselevel=-1)
7345 for line in wrap(msg, 70):
7346 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7348 # Display the conflicting packages along with the packages
7349 # that pulled them in. This is helpful for troubleshooting
7350 # cases in which blockers don't solve automatically and
7351 # the reasons are not apparent from the normal merge list
7355 for blocker in blockers:
7356 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7357 self._blocker_parents.parent_nodes(blocker)):
7358 parent_atoms = self._parent_atoms.get(pkg)
7359 if not parent_atoms:
7360 atom = self._blocked_world_pkgs.get(pkg)
7361 if atom is not None:
7362 parent_atoms = set([("@world", atom)])
7364 conflict_pkgs[pkg] = parent_atoms
7367 # Reduce noise by pruning packages that are only
7368 # pulled in by other conflict packages.
7370 for pkg, parent_atoms in conflict_pkgs.iteritems():
7371 relevant_parent = False
7372 for parent, atom in parent_atoms:
7373 if parent not in conflict_pkgs:
7374 relevant_parent = True
7376 if not relevant_parent:
7377 pruned_pkgs.add(pkg)
7378 for pkg in pruned_pkgs:
7379 del conflict_pkgs[pkg]
7385 # Max number of parents shown, to avoid flooding the display.
7387 for pkg, parent_atoms in conflict_pkgs.iteritems():
7391 # Prefer packages that are not directly involved in a conflict.
7392 for parent_atom in parent_atoms:
7393 if len(pruned_list) >= max_parents:
7395 parent, atom = parent_atom
7396 if parent not in conflict_pkgs:
7397 pruned_list.add(parent_atom)
7399 for parent_atom in parent_atoms:
7400 if len(pruned_list) >= max_parents:
7402 pruned_list.add(parent_atom)
7404 omitted_parents = len(parent_atoms) - len(pruned_list)
7405 msg.append(indent + "%s pulled in by\n" % pkg)
7407 for parent_atom in pruned_list:
7408 parent, atom = parent_atom
7409 msg.append(2*indent)
7410 if isinstance(parent,
7411 (PackageArg, AtomArg)):
7412 # For PackageArg and AtomArg types, it's
7413 # redundant to display the atom attribute.
7414 msg.append(str(parent))
7416 # Display the specific atom from SetArg or
7418 msg.append("%s required by %s" % (atom, parent))
7422 msg.append(2*indent)
7423 msg.append("(and %d more)\n" % omitted_parents)
7427 sys.stderr.write("".join(msg))
7430 if "--quiet" not in self.myopts:
7431 show_blocker_docs_link()
7433 def display(self, mylist, favorites=[], verbosity=None):
7435 # This is used to prevent display_problems() from
7436 # redundantly displaying this exact same merge list
7437 # again via _show_merge_list().
7438 self._displayed_list = mylist
7440 if verbosity is None:
7441 verbosity = ("--quiet" in self.myopts and 1 or \
7442 "--verbose" in self.myopts and 3 or 2)
7443 favorites_set = InternalPackageSet(favorites)
7444 oneshot = "--oneshot" in self.myopts or \
7445 "--onlydeps" in self.myopts
7446 columns = "--columns" in self.myopts
7451 counters = PackageCounters()
7453 if verbosity == 1 and "--verbose" not in self.myopts:
7454 def create_use_string(*args):
7457 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7459 is_new, reinst_flags,
7460 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7461 alphabetical=("--alphabetical" in self.myopts)):
7469 cur_iuse = set(cur_iuse)
7470 enabled_flags = cur_iuse.intersection(cur_use)
7471 removed_iuse = set(old_iuse).difference(cur_iuse)
7472 any_iuse = cur_iuse.union(old_iuse)
7473 any_iuse = list(any_iuse)
7475 for flag in any_iuse:
7478 reinst_flag = reinst_flags and flag in reinst_flags
7479 if flag in enabled_flags:
7481 if is_new or flag in old_use and \
7482 (all_flags or reinst_flag):
7483 flag_str = red(flag)
7484 elif flag not in old_iuse:
7485 flag_str = yellow(flag) + "%*"
7486 elif flag not in old_use:
7487 flag_str = green(flag) + "*"
7488 elif flag in removed_iuse:
7489 if all_flags or reinst_flag:
7490 flag_str = yellow("-" + flag) + "%"
7493 flag_str = "(" + flag_str + ")"
7494 removed.append(flag_str)
7497 if is_new or flag in old_iuse and \
7498 flag not in old_use and \
7499 (all_flags or reinst_flag):
7500 flag_str = blue("-" + flag)
7501 elif flag not in old_iuse:
7502 flag_str = yellow("-" + flag)
7503 if flag not in iuse_forced:
7505 elif flag in old_use:
7506 flag_str = green("-" + flag) + "*"
7508 if flag in iuse_forced:
7509 flag_str = "(" + flag_str + ")"
7511 enabled.append(flag_str)
7513 disabled.append(flag_str)
7516 ret = " ".join(enabled)
7518 ret = " ".join(enabled + disabled + removed)
7520 ret = '%s="%s" ' % (name, ret)
7523 repo_display = RepoDisplay(self.roots)
7527 mygraph = self.digraph.copy()
7529 # If there are any Uninstall instances, add the corresponding
7530 # blockers to the digraph (useful for --tree display).
7532 executed_uninstalls = set(node for node in mylist \
7533 if isinstance(node, Package) and node.operation == "unmerge")
7535 for uninstall in self._blocker_uninstalls.leaf_nodes():
7536 uninstall_parents = \
7537 self._blocker_uninstalls.parent_nodes(uninstall)
7538 if not uninstall_parents:
7541 # Remove the corresponding "nomerge" node and substitute
7542 # the Uninstall node.
7543 inst_pkg = self._pkg_cache[
7544 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7546 mygraph.remove(inst_pkg)
7551 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7553 inst_pkg_blockers = []
7555 # Break the Package -> Uninstall edges.
7556 mygraph.remove(uninstall)
7558 # Resolution of a package's blockers
7559 # depend on it's own uninstallation.
7560 for blocker in inst_pkg_blockers:
7561 mygraph.add(uninstall, blocker)
7563 # Expand Package -> Uninstall edges into
7564 # Package -> Blocker -> Uninstall edges.
7565 for blocker in uninstall_parents:
7566 mygraph.add(uninstall, blocker)
7567 for parent in self._blocker_parents.parent_nodes(blocker):
7568 if parent != inst_pkg:
7569 mygraph.add(blocker, parent)
7571 # If the uninstall task did not need to be executed because
7572 # of an upgrade, display Blocker -> Upgrade edges since the
7573 # corresponding Blocker -> Uninstall edges will not be shown.
7575 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7576 if upgrade_node is not None and \
7577 uninstall not in executed_uninstalls:
7578 for blocker in uninstall_parents:
7579 mygraph.add(upgrade_node, blocker)
7581 unsatisfied_blockers = []
7586 if isinstance(x, Blocker) and not x.satisfied:
7587 unsatisfied_blockers.append(x)
7590 if "--tree" in self.myopts:
7591 depth = len(tree_nodes)
7592 while depth and graph_key not in \
7593 mygraph.child_nodes(tree_nodes[depth-1]):
7596 tree_nodes = tree_nodes[:depth]
7597 tree_nodes.append(graph_key)
7598 display_list.append((x, depth, True))
7599 shown_edges.add((graph_key, tree_nodes[depth-1]))
7601 traversed_nodes = set() # prevent endless circles
7602 traversed_nodes.add(graph_key)
7603 def add_parents(current_node, ordered):
7605 # Do not traverse to parents if this node is an
7606 # an argument or a direct member of a set that has
7607 # been specified as an argument (system or world).
7608 if current_node not in self._set_nodes:
7609 parent_nodes = mygraph.parent_nodes(current_node)
7611 child_nodes = set(mygraph.child_nodes(current_node))
7612 selected_parent = None
7613 # First, try to avoid a direct cycle.
7614 for node in parent_nodes:
7615 if not isinstance(node, (Blocker, Package)):
7617 if node not in traversed_nodes and \
7618 node not in child_nodes:
7619 edge = (current_node, node)
7620 if edge in shown_edges:
7622 selected_parent = node
7624 if not selected_parent:
7625 # A direct cycle is unavoidable.
7626 for node in parent_nodes:
7627 if not isinstance(node, (Blocker, Package)):
7629 if node not in traversed_nodes:
7630 edge = (current_node, node)
7631 if edge in shown_edges:
7633 selected_parent = node
7636 shown_edges.add((current_node, selected_parent))
7637 traversed_nodes.add(selected_parent)
7638 add_parents(selected_parent, False)
7639 display_list.append((current_node,
7640 len(tree_nodes), ordered))
7641 tree_nodes.append(current_node)
7643 add_parents(graph_key, True)
7645 display_list.append((x, depth, True))
7646 mylist = display_list
7647 for x in unsatisfied_blockers:
7648 mylist.append((x, 0, True))
7650 last_merge_depth = 0
7651 for i in xrange(len(mylist)-1,-1,-1):
7652 graph_key, depth, ordered = mylist[i]
7653 if not ordered and depth == 0 and i > 0 \
7654 and graph_key == mylist[i-1][0] and \
7655 mylist[i-1][1] == 0:
7656 # An ordered node got a consecutive duplicate when the tree was
7660 if ordered and graph_key[-1] != "nomerge":
7661 last_merge_depth = depth
7663 if depth >= last_merge_depth or \
7664 i < len(mylist) - 1 and \
7665 depth >= mylist[i+1][1]:
7668 from portage import flatten
7669 from portage.dep import use_reduce, paren_reduce
7670 # files to fetch list - avoids counting a same file twice
7671 # in size display (verbose mode)
7674 # Use this set to detect when all the "repoadd" strings are "[0]"
7675 # and disable the entire repo display in this case.
7678 for mylist_index in xrange(len(mylist)):
7679 x, depth, ordered = mylist[mylist_index]
7683 portdb = self.trees[myroot]["porttree"].dbapi
7684 bindb = self.trees[myroot]["bintree"].dbapi
7685 vardb = self.trees[myroot]["vartree"].dbapi
7686 vartree = self.trees[myroot]["vartree"]
7687 pkgsettings = self.pkgsettings[myroot]
7690 indent = " " * depth
7692 if isinstance(x, Blocker):
7694 blocker_style = "PKG_BLOCKER_SATISFIED"
7695 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7697 blocker_style = "PKG_BLOCKER"
7698 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7700 counters.blocks += 1
7702 counters.blocks_satisfied += 1
7703 resolved = portage.key_expand(
7704 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7705 if "--columns" in self.myopts and "--quiet" in self.myopts:
7706 addl += " " + colorize(blocker_style, resolved)
7708 addl = "[%s %s] %s%s" % \
7709 (colorize(blocker_style, "blocks"),
7710 addl, indent, colorize(blocker_style, resolved))
7711 block_parents = self._blocker_parents.parent_nodes(x)
7712 block_parents = set([pnode[2] for pnode in block_parents])
7713 block_parents = ", ".join(block_parents)
7715 addl += colorize(blocker_style,
7716 " (\"%s\" is blocking %s)") % \
7717 (str(x.atom).lstrip("!"), block_parents)
7719 addl += colorize(blocker_style,
7720 " (is blocking %s)") % block_parents
7721 if isinstance(x, Blocker) and x.satisfied:
7726 blockers.append(addl)
7729 pkg_merge = ordered and pkg_status == "merge"
7730 if not pkg_merge and pkg_status == "merge":
7731 pkg_status = "nomerge"
7732 built = pkg_type != "ebuild"
7733 installed = pkg_type == "installed"
7735 metadata = pkg.metadata
7737 repo_name = metadata["repository"]
7738 if pkg_type == "ebuild":
7739 ebuild_path = portdb.findname(pkg_key)
7740 if not ebuild_path: # shouldn't happen
7741 raise portage.exception.PackageNotFound(pkg_key)
7742 repo_path_real = os.path.dirname(os.path.dirname(
7743 os.path.dirname(ebuild_path)))
7745 repo_path_real = portdb.getRepositoryPath(repo_name)
7746 pkg_use = list(pkg.use.enabled)
7748 restrict = flatten(use_reduce(paren_reduce(
7749 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7750 except portage.exception.InvalidDependString, e:
7751 if not pkg.installed:
7752 show_invalid_depstring_notice(x,
7753 pkg.metadata["RESTRICT"], str(e))
7757 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7758 "fetch" in restrict:
7761 counters.restrict_fetch += 1
7762 if portdb.fetch_check(pkg_key, pkg_use):
7765 counters.restrict_fetch_satisfied += 1
7767 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7768 #param is used for -u, where you still *do* want to see when something is being upgraded.
7771 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7772 if vardb.cpv_exists(pkg_key):
7773 addl=" "+yellow("R")+fetch+" "
7776 counters.reinst += 1
7777 elif pkg_status == "uninstall":
7778 counters.uninst += 1
7779 # filter out old-style virtual matches
7780 elif installed_versions and \
7781 portage.cpv_getkey(installed_versions[0]) == \
7782 portage.cpv_getkey(pkg_key):
7783 myinslotlist = vardb.match(pkg.slot_atom)
7784 # If this is the first install of a new-style virtual, we
7785 # need to filter out old-style virtual matches.
7786 if myinslotlist and \
7787 portage.cpv_getkey(myinslotlist[0]) != \
7788 portage.cpv_getkey(pkg_key):
7791 myoldbest = myinslotlist[:]
7793 if not portage.dep.cpvequal(pkg_key,
7794 portage.best([pkg_key] + myoldbest)):
7796 addl += turquoise("U")+blue("D")
7798 counters.downgrades += 1
7801 addl += turquoise("U") + " "
7803 counters.upgrades += 1
7805 # New slot, mark it new.
7806 addl = " " + green("NS") + fetch + " "
7807 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7809 counters.newslot += 1
7811 if "--changelog" in self.myopts:
7812 inst_matches = vardb.match(pkg.slot_atom)
7814 changelogs.extend(self.calc_changelog(
7815 portdb.findname(pkg_key),
7816 inst_matches[0], pkg_key))
7818 addl = " " + green("N") + " " + fetch + " "
7827 forced_flags = set()
7828 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7829 forced_flags.update(pkgsettings.useforce)
7830 forced_flags.update(pkgsettings.usemask)
7832 cur_use = [flag for flag in pkg.use.enabled \
7833 if flag in pkg.iuse.all]
7834 cur_iuse = sorted(pkg.iuse.all)
7836 if myoldbest and myinslotlist:
7837 previous_cpv = myoldbest[0]
7839 previous_cpv = pkg.cpv
7840 if vardb.cpv_exists(previous_cpv):
7841 old_iuse, old_use = vardb.aux_get(
7842 previous_cpv, ["IUSE", "USE"])
7843 old_iuse = list(set(
7844 filter_iuse_defaults(old_iuse.split())))
7846 old_use = old_use.split()
7853 old_use = [flag for flag in old_use if flag in old_iuse]
7855 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7857 use_expand.reverse()
7858 use_expand_hidden = \
7859 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7861 def map_to_use_expand(myvals, forcedFlags=False,
7865 for exp in use_expand:
7868 for val in myvals[:]:
7869 if val.startswith(exp.lower()+"_"):
7870 if val in forced_flags:
7871 forced[exp].add(val[len(exp)+1:])
7872 ret[exp].append(val[len(exp)+1:])
7875 forced["USE"] = [val for val in myvals \
7876 if val in forced_flags]
7878 for exp in use_expand_hidden:
7884 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7885 # are the only thing that triggered reinstallation.
7886 reinst_flags_map = {}
7887 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7888 reinst_expand_map = None
7889 if reinstall_for_flags:
7890 reinst_flags_map = map_to_use_expand(
7891 list(reinstall_for_flags), removeHidden=False)
7892 for k in list(reinst_flags_map):
7893 if not reinst_flags_map[k]:
7894 del reinst_flags_map[k]
7895 if not reinst_flags_map.get("USE"):
7896 reinst_expand_map = reinst_flags_map.copy()
7897 reinst_expand_map.pop("USE", None)
7898 if reinst_expand_map and \
7899 not set(reinst_expand_map).difference(
7901 use_expand_hidden = \
7902 set(use_expand_hidden).difference(
7905 cur_iuse_map, iuse_forced = \
7906 map_to_use_expand(cur_iuse, forcedFlags=True)
7907 cur_use_map = map_to_use_expand(cur_use)
7908 old_iuse_map = map_to_use_expand(old_iuse)
7909 old_use_map = map_to_use_expand(old_use)
7912 use_expand.insert(0, "USE")
7914 for key in use_expand:
7915 if key in use_expand_hidden:
7917 verboseadd += create_use_string(key.upper(),
7918 cur_iuse_map[key], iuse_forced[key],
7919 cur_use_map[key], old_iuse_map[key],
7920 old_use_map[key], is_new,
7921 reinst_flags_map.get(key))
7926 if pkg_type == "ebuild" and pkg_merge:
7928 myfilesdict = portdb.getfetchsizes(pkg_key,
7929 useflags=pkg_use, debug=self.edebug)
7930 except portage.exception.InvalidDependString, e:
7931 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7932 show_invalid_depstring_notice(x, src_uri, str(e))
7935 if myfilesdict is None:
7936 myfilesdict="[empty/missing/bad digest]"
7938 for myfetchfile in myfilesdict:
7939 if myfetchfile not in myfetchlist:
7940 mysize+=myfilesdict[myfetchfile]
7941 myfetchlist.append(myfetchfile)
7943 counters.totalsize += mysize
7944 verboseadd += format_size(mysize)
7947 # assign index for a previous version in the same slot
7948 has_previous = False
7949 repo_name_prev = None
7950 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7952 slot_matches = vardb.match(slot_atom)
7955 repo_name_prev = vardb.aux_get(slot_matches[0],
7958 # now use the data to generate output
7959 if pkg.installed or not has_previous:
7960 repoadd = repo_display.repoStr(repo_path_real)
7962 repo_path_prev = None
7964 repo_path_prev = portdb.getRepositoryPath(
7966 if repo_path_prev == repo_path_real:
7967 repoadd = repo_display.repoStr(repo_path_real)
7969 repoadd = "%s=>%s" % (
7970 repo_display.repoStr(repo_path_prev),
7971 repo_display.repoStr(repo_path_real))
7973 repoadd_set.add(repoadd)
7975 xs = [portage.cpv_getkey(pkg_key)] + \
7976 list(portage.catpkgsplit(pkg_key)[2:])
7983 if "COLUMNWIDTH" in self.settings:
7985 mywidth = int(self.settings["COLUMNWIDTH"])
7986 except ValueError, e:
7987 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7989 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7990 self.settings["COLUMNWIDTH"], noiselevel=-1)
7992 oldlp = mywidth - 30
7995 # Convert myoldbest from a list to a string.
7999 for pos, key in enumerate(myoldbest):
8000 key = portage.catpkgsplit(key)[2] + \
8001 "-" + portage.catpkgsplit(key)[3]
8002 if key[-3:] == "-r0":
8004 myoldbest[pos] = key
8005 myoldbest = blue("["+", ".join(myoldbest)+"]")
8008 root_config = self.roots[myroot]
8009 system_set = root_config.sets["system"]
8010 world_set = root_config.sets["world"]
8015 pkg_system = system_set.findAtomForPackage(pkg)
8016 pkg_world = world_set.findAtomForPackage(pkg)
8017 if not (oneshot or pkg_world) and \
8018 myroot == self.target_root and \
8019 favorites_set.findAtomForPackage(pkg):
8020 # Maybe it will be added to world now.
8021 if create_world_atom(pkg, favorites_set, root_config):
8023 except portage.exception.InvalidDependString:
8024 # This is reported elsewhere if relevant.
8027 def pkgprint(pkg_str):
8030 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8032 return colorize("PKG_MERGE_WORLD", pkg_str)
8034 return colorize("PKG_MERGE", pkg_str)
8035 elif pkg_status == "uninstall":
8036 return colorize("PKG_UNINSTALL", pkg_str)
8039 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8041 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8043 return colorize("PKG_NOMERGE", pkg_str)
8046 properties = flatten(use_reduce(paren_reduce(
8047 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8048 except portage.exception.InvalidDependString, e:
8049 if not pkg.installed:
8050 show_invalid_depstring_notice(pkg,
8051 pkg.metadata["PROPERTIES"], str(e))
8055 interactive = "interactive" in properties
8056 if interactive and pkg.operation == "merge":
8057 addl = colorize("WARN", "I") + addl[1:]
8059 counters.interactive += 1
8064 if "--columns" in self.myopts:
8065 if "--quiet" in self.myopts:
8066 myprint=addl+" "+indent+pkgprint(pkg_cp)
8067 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8068 myprint=myprint+myoldbest
8069 myprint=myprint+darkgreen("to "+x[1])
8073 myprint = "[%s] %s%s" % \
8074 (pkgprint(pkg_status.ljust(13)),
8075 indent, pkgprint(pkg.cp))
8077 myprint = "[%s %s] %s%s" % \
8078 (pkgprint(pkg.type_name), addl,
8079 indent, pkgprint(pkg.cp))
8080 if (newlp-nc_len(myprint)) > 0:
8081 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8082 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8083 if (oldlp-nc_len(myprint)) > 0:
8084 myprint=myprint+" "*(oldlp-nc_len(myprint))
8085 myprint=myprint+myoldbest
8086 myprint += darkgreen("to " + pkg.root)
8089 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8091 myprint = "[" + pkg_type + " " + addl + "] "
8092 myprint += indent + pkgprint(pkg_key) + " " + \
8093 myoldbest + darkgreen("to " + myroot)
8095 if "--columns" in self.myopts:
8096 if "--quiet" in self.myopts:
8097 myprint=addl+" "+indent+pkgprint(pkg_cp)
8098 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8099 myprint=myprint+myoldbest
8103 myprint = "[%s] %s%s" % \
8104 (pkgprint(pkg_status.ljust(13)),
8105 indent, pkgprint(pkg.cp))
8107 myprint = "[%s %s] %s%s" % \
8108 (pkgprint(pkg.type_name), addl,
8109 indent, pkgprint(pkg.cp))
8110 if (newlp-nc_len(myprint)) > 0:
8111 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8112 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8113 if (oldlp-nc_len(myprint)) > 0:
8114 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8115 myprint += myoldbest
8118 myprint = "[%s] %s%s %s" % \
8119 (pkgprint(pkg_status.ljust(13)),
8120 indent, pkgprint(pkg.cpv),
8123 myprint = "[%s %s] %s%s %s" % \
8124 (pkgprint(pkg_type), addl, indent,
8125 pkgprint(pkg.cpv), myoldbest)
8127 if columns and pkg.operation == "uninstall":
8129 p.append((myprint, verboseadd, repoadd))
8131 if "--tree" not in self.myopts and \
8132 "--quiet" not in self.myopts and \
8133 not self._opts_no_restart.intersection(self.myopts) and \
8134 pkg.root == self._running_root.root and \
8135 portage.match_from_list(
8136 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8137 not vardb.cpv_exists(pkg.cpv) and \
8138 "--quiet" not in self.myopts:
8139 if mylist_index < len(mylist) - 1:
8140 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8141 p.append(colorize("WARN", " then resume the merge."))
8144 show_repos = repoadd_set and repoadd_set != set(["0"])
8147 if isinstance(x, basestring):
8148 out.write("%s\n" % (x,))
8151 myprint, verboseadd, repoadd = x
8154 myprint += " " + verboseadd
8156 if show_repos and repoadd:
8157 myprint += " " + teal("[%s]" % repoadd)
8159 out.write("%s\n" % (myprint,))
8168 sys.stdout.write(str(repo_display))
8170 if "--changelog" in self.myopts:
8172 for revision,text in changelogs:
8173 print bold('*'+revision)
8174 sys.stdout.write(text)
8179 def display_problems(self):
8181 Display problems with the dependency graph such as slot collisions.
8182 This is called internally by display() to show the problems _after_
8183 the merge list where it is most likely to be seen, but if display()
8184 is not going to be called then this method should be called explicitly
8185 to ensure that the user is notified of problems with the graph.
8187 All output goes to stderr, except for unsatisfied dependencies which
8188 go to stdout for parsing by programs such as autounmask.
8191 # Note that show_masked_packages() sends it's output to
8192 # stdout, and some programs such as autounmask parse the
8193 # output in cases when emerge bails out. However, when
8194 # show_masked_packages() is called for installed packages
8195 # here, the message is a warning that is more appropriate
8196 # to send to stderr, so temporarily redirect stdout to
8197 # stderr. TODO: Fix output code so there's a cleaner way
8198 # to redirect everything to stderr.
8203 sys.stdout = sys.stderr
8204 self._display_problems()
8210 # This goes to stdout for parsing by programs like autounmask.
8211 for pargs, kwargs in self._unsatisfied_deps_for_display:
8212 self._show_unsatisfied_dep(*pargs, **kwargs)
8214 def _display_problems(self):
8215 if self._circular_deps_for_display is not None:
8216 self._show_circular_deps(
8217 self._circular_deps_for_display)
8219 # The user is only notified of a slot conflict if
8220 # there are no unresolvable blocker conflicts.
8221 if self._unsatisfied_blockers_for_display is not None:
8222 self._show_unsatisfied_blockers(
8223 self._unsatisfied_blockers_for_display)
8225 self._show_slot_collision_notice()
8227 # TODO: Add generic support for "set problem" handlers so that
8228 # the below warnings aren't special cases for world only.
8230 if self._missing_args:
8231 world_problems = False
8232 if "world" in self._sets:
8233 # Filter out indirect members of world (from nested sets)
8234 # since only direct members of world are desired here.
8235 world_set = self.roots[self.target_root].sets["world"]
8236 for arg, atom in self._missing_args:
8237 if arg.name == "world" and atom in world_set:
8238 world_problems = True
8242 sys.stderr.write("\n!!! Problems have been " + \
8243 "detected with your world file\n")
8244 sys.stderr.write("!!! Please run " + \
8245 green("emaint --check world")+"\n\n")
8247 if self._missing_args:
8248 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8249 " Ebuilds for the following packages are either all\n")
8250 sys.stderr.write(colorize("BAD", "!!!") + \
8251 " masked or don't exist:\n")
8252 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8253 self._missing_args) + "\n")
8255 if self._pprovided_args:
8257 for arg, atom in self._pprovided_args:
8258 if isinstance(arg, SetArg):
8260 arg_atom = (atom, atom)
8263 arg_atom = (arg.arg, atom)
8264 refs = arg_refs.setdefault(arg_atom, [])
8265 if parent not in refs:
8268 msg.append(bad("\nWARNING: "))
8269 if len(self._pprovided_args) > 1:
8270 msg.append("Requested packages will not be " + \
8271 "merged because they are listed in\n")
8273 msg.append("A requested package will not be " + \
8274 "merged because it is listed in\n")
8275 msg.append("package.provided:\n\n")
8276 problems_sets = set()
8277 for (arg, atom), refs in arg_refs.iteritems():
8280 problems_sets.update(refs)
8282 ref_string = ", ".join(["'%s'" % name for name in refs])
8283 ref_string = " pulled in by " + ref_string
8284 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8286 if "world" in problems_sets:
8287 msg.append("This problem can be solved in one of the following ways:\n\n")
8288 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8289 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8290 msg.append(" C) Remove offending entries from package.provided.\n\n")
8291 msg.append("The best course of action depends on the reason that an offending\n")
8292 msg.append("package.provided entry exists.\n\n")
8293 sys.stderr.write("".join(msg))
8295 masked_packages = []
8296 for pkg in self._masked_installed:
8297 root_config = pkg.root_config
8298 pkgsettings = self.pkgsettings[pkg.root]
8299 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8300 masked_packages.append((root_config, pkgsettings,
8301 pkg.cpv, pkg.metadata, mreasons))
8303 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8304 " The following installed packages are masked:\n")
8305 show_masked_packages(masked_packages)
8309 def calc_changelog(self,ebuildpath,current,next):
8310 if ebuildpath == None or not os.path.exists(ebuildpath):
8312 current = '-'.join(portage.catpkgsplit(current)[1:])
8313 if current.endswith('-r0'):
8314 current = current[:-3]
8315 next = '-'.join(portage.catpkgsplit(next)[1:])
8316 if next.endswith('-r0'):
8318 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8320 changelog = open(changelogpath).read()
8321 except SystemExit, e:
8322 raise # Needed else can't exit
8325 divisions = self.find_changelog_tags(changelog)
8326 #print 'XX from',current,'to',next
8327 #for div,text in divisions: print 'XX',div
8328 # skip entries for all revisions above the one we are about to emerge
8329 for i in range(len(divisions)):
8330 if divisions[i][0]==next:
8331 divisions = divisions[i:]
8333 # find out how many entries we are going to display
8334 for i in range(len(divisions)):
8335 if divisions[i][0]==current:
8336 divisions = divisions[:i]
8339 # couldnt find the current revision in the list. display nothing
8343 def find_changelog_tags(self,changelog):
8347 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8349 if release is not None:
8350 divs.append((release,changelog))
8352 if release is not None:
8353 divs.append((release,changelog[:match.start()]))
8354 changelog = changelog[match.end():]
8355 release = match.group(1)
8356 if release.endswith('.ebuild'):
8357 release = release[:-7]
8358 if release.endswith('-r0'):
8359 release = release[:-3]
8361 def saveNomergeFavorites(self):
8362 """Find atoms in favorites that are not in the mergelist and add them
8363 to the world file if necessary."""
8364 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8365 "--oneshot", "--onlydeps", "--pretend"):
8366 if x in self.myopts:
8368 root_config = self.roots[self.target_root]
8369 world_set = root_config.sets["world"]
8371 world_locked = False
8372 if hasattr(world_set, "lock"):
8376 if hasattr(world_set, "load"):
8377 world_set.load() # maybe it's changed on disk
8379 args_set = self._sets["args"]
8380 portdb = self.trees[self.target_root]["porttree"].dbapi
8381 added_favorites = set()
8382 for x in self._set_nodes:
8383 pkg_type, root, pkg_key, pkg_status = x
8384 if pkg_status != "nomerge":
8388 myfavkey = create_world_atom(x, args_set, root_config)
8390 if myfavkey in added_favorites:
8392 added_favorites.add(myfavkey)
8393 except portage.exception.InvalidDependString, e:
8394 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8395 (pkg_key, str(e)), noiselevel=-1)
8396 writemsg("!!! see '%s'\n\n" % os.path.join(
8397 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8400 for k in self._sets:
8401 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8406 all_added.append(SETPREFIX + k)
8407 all_added.extend(added_favorites)
8410 print ">>> Recording %s in \"world\" favorites file..." % \
8411 colorize("INFORM", str(a))
8413 world_set.update(all_added)
8418 def loadResumeCommand(self, resume_data, skip_masked=False):
8420 Add a resume command to the graph and validate it in the process. This
8421 will raise a PackageNotFound exception if a package is not available.
8424 if not isinstance(resume_data, dict):
8427 mergelist = resume_data.get("mergelist")
8428 if not isinstance(mergelist, list):
8431 fakedb = self.mydbapi
8433 serialized_tasks = []
8436 if not (isinstance(x, list) and len(x) == 4):
8438 pkg_type, myroot, pkg_key, action = x
8439 if pkg_type not in self.pkg_tree_map:
8441 if action != "merge":
8443 tree_type = self.pkg_tree_map[pkg_type]
8444 mydb = trees[myroot][tree_type].dbapi
8445 db_keys = list(self._trees_orig[myroot][
8446 tree_type].dbapi._aux_cache_keys)
8448 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8450 # It does no exist or it is corrupt.
8451 if action == "uninstall":
8453 raise portage.exception.PackageNotFound(pkg_key)
8454 installed = action == "uninstall"
8455 built = pkg_type != "ebuild"
8456 root_config = self.roots[myroot]
8457 pkg = Package(built=built, cpv=pkg_key,
8458 installed=installed, metadata=metadata,
8459 operation=action, root_config=root_config,
8461 if pkg_type == "ebuild":
8462 pkgsettings = self.pkgsettings[myroot]
8463 pkgsettings.setcpv(pkg)
8464 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8465 self._pkg_cache[pkg] = pkg
8467 root_config = self.roots[pkg.root]
8468 if "merge" == pkg.operation and \
8469 not visible(root_config.settings, pkg):
8471 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8473 self._unsatisfied_deps_for_display.append(
8474 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8476 fakedb[myroot].cpv_inject(pkg)
8477 serialized_tasks.append(pkg)
8478 self.spinner.update()
8480 if self._unsatisfied_deps_for_display:
8483 if not serialized_tasks or "--nodeps" in self.myopts:
8484 self._serialized_tasks_cache = serialized_tasks
8485 self._scheduler_graph = self.digraph
8487 self._select_package = self._select_pkg_from_graph
8488 self.myparams.add("selective")
8490 favorites = resume_data.get("favorites")
8491 args_set = self._sets["args"]
8492 if isinstance(favorites, list):
8493 args = self._load_favorites(favorites)
8497 for task in serialized_tasks:
8498 if isinstance(task, Package) and \
8499 task.operation == "merge":
8500 if not self._add_pkg(task, None):
8503 # Packages for argument atoms need to be explicitly
8504 # added via _add_pkg() so that they are included in the
8505 # digraph (needed at least for --tree display).
8507 for atom in arg.set:
8508 pkg, existing_node = self._select_package(
8509 arg.root_config.root, atom)
8510 if existing_node is None and \
8512 if not self._add_pkg(pkg, Dependency(atom=atom,
8513 root=pkg.root, parent=arg)):
8516 # Allow unsatisfied deps here to avoid showing a masking
8517 # message for an unsatisfied dep that isn't necessarily
8519 if not self._create_graph(allow_unsatisfied=True):
8521 if masked_tasks or self._unsatisfied_deps:
8522 # This probably means that a required package
8523 # was dropped via --skipfirst. It makes the
8524 # resume list invalid, so convert it to a
8525 # UnsatisfiedResumeDep exception.
8526 raise self.UnsatisfiedResumeDep(self,
8527 masked_tasks + self._unsatisfied_deps)
8528 self._serialized_tasks_cache = None
8531 except self._unknown_internal_error:
8536 def _load_favorites(self, favorites):
8538 Use a list of favorites to resume state from a
8539 previous select_files() call. This creates similar
8540 DependencyArg instances to those that would have
8541 been created by the original select_files() call.
8542 This allows Package instances to be matched with
8543 DependencyArg instances during graph creation.
8545 root_config = self.roots[self.target_root]
8546 getSetAtoms = root_config.setconfig.getSetAtoms
8547 sets = root_config.sets
8550 if not isinstance(x, basestring):
8552 if x in ("system", "world"):
8554 if x.startswith(SETPREFIX):
8555 s = x[len(SETPREFIX):]
8560 # Recursively expand sets so that containment tests in
8561 # self._get_parent_sets() properly match atoms in nested
8562 # sets (like if world contains system).
8563 expanded_set = InternalPackageSet(
8564 initial_atoms=getSetAtoms(s))
8565 self._sets[s] = expanded_set
8566 args.append(SetArg(arg=x, set=expanded_set,
8567 root_config=root_config))
8569 if not portage.isvalidatom(x):
8571 args.append(AtomArg(arg=x, atom=x,
8572 root_config=root_config))
8574 # Create the "args" package set from atoms and
8575 # packages given as arguments.
8576 args_set = self._sets["args"]
8578 if not isinstance(arg, (AtomArg, PackageArg)):
8581 if myatom in args_set:
8583 args_set.add(myatom)
8584 self._set_atoms.update(chain(*self._sets.itervalues()))
8585 atom_arg_map = self._atom_arg_map
8587 for atom in arg.set:
8588 atom_key = (atom, arg.root_config.root)
8589 refs = atom_arg_map.get(atom_key)
8592 atom_arg_map[atom_key] = refs
8597 class UnsatisfiedResumeDep(portage.exception.PortageException):
8599 A dependency of a resume list is not installed. This
8600 can occur when a required package is dropped from the
8601 merge list via --skipfirst.
8603 def __init__(self, depgraph, value):
8604 portage.exception.PortageException.__init__(self, value)
8605 self.depgraph = depgraph
8607 class _internal_exception(portage.exception.PortageException):
8608 def __init__(self, value=""):
8609 portage.exception.PortageException.__init__(self, value)
8611 class _unknown_internal_error(_internal_exception):
8613 Used by the depgraph internally to terminate graph creation.
8614 The specific reason for the failure should have been dumped
8615 to stderr, unfortunately, the exact reason for the failure
8619 class _serialize_tasks_retry(_internal_exception):
8621 This is raised by the _serialize_tasks() method when it needs to
8622 be called again for some reason. The only case that it's currently
8623 used for is when neglected dependencies need to be added to the
8624 graph in order to avoid making a potentially unsafe decision.
8627 class _dep_check_composite_db(portage.dbapi):
8629 A dbapi-like interface that is optimized for use in dep_check() calls.
8630 This is built on top of the existing depgraph package selection logic.
8631 Some packages that have been added to the graph may be masked from this
8632 view in order to influence the atom preference selection that occurs
8635 def __init__(self, depgraph, root):
8636 portage.dbapi.__init__(self)
8637 self._depgraph = depgraph
8639 self._match_cache = {}
8640 self._cpv_pkg_map = {}
8642 def match(self, atom):
8643 ret = self._match_cache.get(atom)
8648 atom = self._dep_expand(atom)
8649 pkg, existing = self._depgraph._select_package(self._root, atom)
8653 # Return the highest available from select_package() as well as
8654 # any matching slots in the graph db.
8656 slots.add(pkg.metadata["SLOT"])
8657 atom_cp = portage.dep_getkey(atom)
8658 if pkg.cp.startswith("virtual/"):
8659 # For new-style virtual lookahead that occurs inside
8660 # dep_check(), examine all slots. This is needed
8661 # so that newer slots will not unnecessarily be pulled in
8662 # when a satisfying lower slot is already installed. For
8663 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8664 # there's no need to pull in a newer slot to satisfy a
8665 # virtual/jdk dependency.
8666 for db, pkg_type, built, installed, db_keys in \
8667 self._depgraph._filtered_trees[self._root]["dbs"]:
8668 for cpv in db.match(atom):
8669 if portage.cpv_getkey(cpv) != pkg.cp:
8671 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8673 if self._visible(pkg):
8674 self._cpv_pkg_map[pkg.cpv] = pkg
8676 slots.remove(pkg.metadata["SLOT"])
8678 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8679 pkg, existing = self._depgraph._select_package(
8680 self._root, slot_atom)
8683 if not self._visible(pkg):
8685 self._cpv_pkg_map[pkg.cpv] = pkg
8688 self._cpv_sort_ascending(ret)
8689 self._match_cache[orig_atom] = ret
8692 def _visible(self, pkg):
8693 if pkg.installed and "selective" not in self._depgraph.myparams:
8695 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8696 except (StopIteration, portage.exception.InvalidDependString):
8703 self._depgraph.pkgsettings[pkg.root], pkg):
8705 except portage.exception.InvalidDependString:
8709 def _dep_expand(self, atom):
8711 This is only needed for old installed packages that may
8712 contain atoms that are not fully qualified with a specific
8713 category. Emulate the cpv_expand() function that's used by
8714 dbapi.match() in cases like this. If there are multiple
8715 matches, it's often due to a new-style virtual that has
8716 been added, so try to filter those out to avoid raising
8719 root_config = self._depgraph.roots[self._root]
8721 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8722 if len(expanded_atoms) > 1:
8723 non_virtual_atoms = []
8724 for x in expanded_atoms:
8725 if not portage.dep_getkey(x).startswith("virtual/"):
8726 non_virtual_atoms.append(x)
8727 if len(non_virtual_atoms) == 1:
8728 expanded_atoms = non_virtual_atoms
8729 if len(expanded_atoms) > 1:
8730 # compatible with portage.cpv_expand()
8731 raise portage.exception.AmbiguousPackageName(
8732 [portage.dep_getkey(x) for x in expanded_atoms])
8734 atom = expanded_atoms[0]
8736 null_atom = insert_category_into_atom(atom, "null")
8737 null_cp = portage.dep_getkey(null_atom)
8738 cat, atom_pn = portage.catsplit(null_cp)
8739 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8741 # Allow the resolver to choose which virtual.
8742 atom = insert_category_into_atom(atom, "virtual")
8744 atom = insert_category_into_atom(atom, "null")
8747 def aux_get(self, cpv, wants):
8748 metadata = self._cpv_pkg_map[cpv].metadata
8749 return [metadata.get(x, "") for x in wants]
8751 class RepoDisplay(object):
8752 def __init__(self, roots):
8753 self._shown_repos = {}
8754 self._unknown_repo = False
8756 for root_config in roots.itervalues():
8757 portdir = root_config.settings.get("PORTDIR")
8759 repo_paths.add(portdir)
8760 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8762 repo_paths.update(overlays.split())
8763 repo_paths = list(repo_paths)
8764 self._repo_paths = repo_paths
8765 self._repo_paths_real = [ os.path.realpath(repo_path) \
8766 for repo_path in repo_paths ]
8768 # pre-allocate index for PORTDIR so that it always has index 0.
8769 for root_config in roots.itervalues():
8770 portdb = root_config.trees["porttree"].dbapi
8771 portdir = portdb.porttree_root
8773 self.repoStr(portdir)
8775 def repoStr(self, repo_path_real):
8778 real_index = self._repo_paths_real.index(repo_path_real)
8779 if real_index == -1:
8781 self._unknown_repo = True
8783 shown_repos = self._shown_repos
8784 repo_paths = self._repo_paths
8785 repo_path = repo_paths[real_index]
8786 index = shown_repos.get(repo_path)
8788 index = len(shown_repos)
8789 shown_repos[repo_path] = index
8795 shown_repos = self._shown_repos
8796 unknown_repo = self._unknown_repo
8797 if shown_repos or self._unknown_repo:
8798 output.append("Portage tree and overlays:\n")
8799 show_repo_paths = list(shown_repos)
8800 for repo_path, repo_index in shown_repos.iteritems():
8801 show_repo_paths[repo_index] = repo_path
8803 for index, repo_path in enumerate(show_repo_paths):
8804 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8806 output.append(" "+teal("[?]") + \
8807 " indicates that the source repository could not be determined\n")
8808 return "".join(output)
8810 class PackageCounters(object):
8820 self.blocks_satisfied = 0
8822 self.restrict_fetch = 0
8823 self.restrict_fetch_satisfied = 0
8824 self.interactive = 0
8827 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8830 myoutput.append("Total: %s package" % total_installs)
8831 if total_installs != 1:
8832 myoutput.append("s")
8833 if total_installs != 0:
8834 myoutput.append(" (")
8835 if self.upgrades > 0:
8836 details.append("%s upgrade" % self.upgrades)
8837 if self.upgrades > 1:
8839 if self.downgrades > 0:
8840 details.append("%s downgrade" % self.downgrades)
8841 if self.downgrades > 1:
8844 details.append("%s new" % self.new)
8845 if self.newslot > 0:
8846 details.append("%s in new slot" % self.newslot)
8847 if self.newslot > 1:
8850 details.append("%s reinstall" % self.reinst)
8854 details.append("%s uninstall" % self.uninst)
8857 if self.interactive > 0:
8858 details.append("%s %s" % (self.interactive,
8859 colorize("WARN", "interactive")))
8860 myoutput.append(", ".join(details))
8861 if total_installs != 0:
8862 myoutput.append(")")
8863 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8864 if self.restrict_fetch:
8865 myoutput.append("\nFetch Restriction: %s package" % \
8866 self.restrict_fetch)
8867 if self.restrict_fetch > 1:
8868 myoutput.append("s")
8869 if self.restrict_fetch_satisfied < self.restrict_fetch:
8870 myoutput.append(bad(" (%s unsatisfied)") % \
8871 (self.restrict_fetch - self.restrict_fetch_satisfied))
8873 myoutput.append("\nConflict: %s block" % \
8876 myoutput.append("s")
8877 if self.blocks_satisfied < self.blocks:
8878 myoutput.append(bad(" (%s unsatisfied)") % \
8879 (self.blocks - self.blocks_satisfied))
8880 return "".join(myoutput)
8882 class PollSelectAdapter(PollConstants):
8885 Use select to emulate a poll object, for
8886 systems that don't support poll().
8890 self._registered = {}
8891 self._select_args = [[], [], []]
8893 def register(self, fd, *args):
8895 Only POLLIN is currently supported!
8899 "register expected at most 2 arguments, got " + \
8900 repr(1 + len(args)))
8902 eventmask = PollConstants.POLLIN | \
8903 PollConstants.POLLPRI | PollConstants.POLLOUT
8907 self._registered[fd] = eventmask
8908 self._select_args = None
8910 def unregister(self, fd):
8911 self._select_args = None
8912 del self._registered[fd]
8914 def poll(self, *args):
8917 "poll expected at most 2 arguments, got " + \
8918 repr(1 + len(args)))
8924 select_args = self._select_args
8925 if select_args is None:
8926 select_args = [self._registered.keys(), [], []]
8928 if timeout is not None:
8929 select_args = select_args[:]
8930 # Translate poll() timeout args to select() timeout args:
8932 # | units | value(s) for indefinite block
8933 # ---------|--------------|------------------------------
8934 # poll | milliseconds | omitted, negative, or None
8935 # ---------|--------------|------------------------------
8936 # select | seconds | omitted
8937 # ---------|--------------|------------------------------
8939 if timeout is not None and timeout < 0:
8941 if timeout is not None:
8942 select_args.append(timeout / 1000)
8944 select_events = select.select(*select_args)
8946 for fd in select_events[0]:
8947 poll_events.append((fd, PollConstants.POLLIN))
8950 class SequentialTaskQueue(SlotObject):
8952 __slots__ = ("max_jobs", "running_tasks") + \
8953 ("_dirty", "_scheduling", "_task_queue")
8955 def __init__(self, **kwargs):
8956 SlotObject.__init__(self, **kwargs)
8957 self._task_queue = deque()
8958 self.running_tasks = set()
8959 if self.max_jobs is None:
8963 def add(self, task):
8964 self._task_queue.append(task)
8967 def addFront(self, task):
8968 self._task_queue.appendleft(task)
8979 if self._scheduling:
8980 # Ignore any recursive schedule() calls triggered via
8981 # self._task_exit().
8984 self._scheduling = True
8986 task_queue = self._task_queue
8987 running_tasks = self.running_tasks
8988 max_jobs = self.max_jobs
8989 state_changed = False
8991 while task_queue and \
8992 (max_jobs is True or len(running_tasks) < max_jobs):
8993 task = task_queue.popleft()
8994 cancelled = getattr(task, "cancelled", None)
8996 running_tasks.add(task)
8997 task.addExitListener(self._task_exit)
8999 state_changed = True
9002 self._scheduling = False
9004 return state_changed
9006 def _task_exit(self, task):
9008 Since we can always rely on exit listeners being called, the set of
9009 running tasks is always pruned automatically and there is never any need
9010 to actively prune it.
9012 self.running_tasks.remove(task)
9013 if self._task_queue:
9017 self._task_queue.clear()
9018 running_tasks = self.running_tasks
9019 while running_tasks:
9020 task = running_tasks.pop()
9021 task.removeExitListener(self._task_exit)
9025 def __nonzero__(self):
9026 return bool(self._task_queue or self.running_tasks)
9029 return len(self._task_queue) + len(self.running_tasks)
9031 _can_poll_device = None
9033 def can_poll_device():
9035 Test if it's possible to use poll() on a device such as a pty. This
9036 is known to fail on Darwin.
9038 @returns: True if poll() on a device succeeds, False otherwise.
9041 global _can_poll_device
9042 if _can_poll_device is not None:
9043 return _can_poll_device
9045 if not hasattr(select, "poll"):
9046 _can_poll_device = False
9047 return _can_poll_device
9050 dev_null = open('/dev/null', 'rb')
9052 _can_poll_device = False
9053 return _can_poll_device
9056 p.register(dev_null.fileno(), PollConstants.POLLIN)
9058 invalid_request = False
9059 for f, event in p.poll():
9060 if event & PollConstants.POLLNVAL:
9061 invalid_request = True
9065 _can_poll_device = not invalid_request
9066 return _can_poll_device
9068 def create_poll_instance():
9070 Create an instance of select.poll, or an instance of
9071 PollSelectAdapter there is no poll() implementation or
9072 it is broken somehow.
9074 if can_poll_device():
9075 return select.poll()
9076 return PollSelectAdapter()
9078 getloadavg = getattr(os, "getloadavg", None)
9079 if getloadavg is None:
9082 Uses /proc/loadavg to emulate os.getloadavg().
9083 Raises OSError if the load average was unobtainable.
9086 loadavg_str = open('/proc/loadavg').readline()
9088 # getloadavg() is only supposed to raise OSError, so convert
9089 raise OSError('unknown')
9090 loadavg_split = loadavg_str.split()
9091 if len(loadavg_split) < 3:
9092 raise OSError('unknown')
9096 loadavg_floats.append(float(loadavg_split[i]))
9098 raise OSError('unknown')
9099 return tuple(loadavg_floats)
9101 class PollScheduler(object):
9103 class _sched_iface_class(SlotObject):
9104 __slots__ = ("register", "schedule", "unregister")
9108 self._max_load = None
9110 self._poll_event_queue = []
9111 self._poll_event_handlers = {}
9112 self._poll_event_handler_ids = {}
9113 # Increment id for each new handler.
9114 self._event_handler_id = 0
9115 self._poll_obj = create_poll_instance()
9116 self._scheduling = False
9118 def _schedule(self):
9120 Calls _schedule_tasks() and automatically returns early from
9121 any recursive calls to this method that the _schedule_tasks()
9122 call might trigger. This makes _schedule() safe to call from
9123 inside exit listeners.
9125 if self._scheduling:
9127 self._scheduling = True
9129 return self._schedule_tasks()
9131 self._scheduling = False
9133 def _running_job_count(self):
9136 def _can_add_job(self):
9137 max_jobs = self._max_jobs
9138 max_load = self._max_load
9140 if self._max_jobs is not True and \
9141 self._running_job_count() >= self._max_jobs:
9144 if max_load is not None and \
9145 (max_jobs is True or max_jobs > 1) and \
9146 self._running_job_count() >= 1:
9148 avg1, avg5, avg15 = getloadavg()
9152 if avg1 >= max_load:
9157 def _poll(self, timeout=None):
9159 All poll() calls pass through here. The poll events
9160 are added directly to self._poll_event_queue.
9161 In order to avoid endless blocking, this raises
9162 StopIteration if timeout is None and there are
9163 no file descriptors to poll.
9165 if not self._poll_event_handlers:
9167 if timeout is None and \
9168 not self._poll_event_handlers:
9169 raise StopIteration(
9170 "timeout is None and there are no poll() event handlers")
9172 # The following error is known to occur with Linux kernel versions
9175 # select.error: (4, 'Interrupted system call')
9177 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9178 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9179 # without any events.
9182 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9184 except select.error, e:
9185 writemsg_level("\n!!! select error: %s\n" % (e,),
9186 level=logging.ERROR, noiselevel=-1)
9188 if timeout is not None:
9191 def _next_poll_event(self, timeout=None):
9193 Since the _schedule_wait() loop is called by event
9194 handlers from _poll_loop(), maintain a central event
9195 queue for both of them to share events from a single
9196 poll() call. In order to avoid endless blocking, this
9197 raises StopIteration if timeout is None and there are
9198 no file descriptors to poll.
9200 if not self._poll_event_queue:
9202 return self._poll_event_queue.pop()
9204 def _poll_loop(self):
9206 event_handlers = self._poll_event_handlers
9207 event_handled = False
9210 while event_handlers:
9211 f, event = self._next_poll_event()
9212 handler, reg_id = event_handlers[f]
9214 event_handled = True
9215 except StopIteration:
9216 event_handled = True
9218 if not event_handled:
9219 raise AssertionError("tight loop")
9221 def _schedule_yield(self):
9223 Schedule for a short period of time chosen by the scheduler based
9224 on internal state. Synchronous tasks should call this periodically
9225 in order to allow the scheduler to service pending poll events. The
9226 scheduler will call poll() exactly once, without blocking, and any
9227 resulting poll events will be serviced.
9229 event_handlers = self._poll_event_handlers
9232 if not event_handlers:
9233 return bool(events_handled)
9235 if not self._poll_event_queue:
9239 while event_handlers and self._poll_event_queue:
9240 f, event = self._next_poll_event()
9241 handler, reg_id = event_handlers[f]
9244 except StopIteration:
9247 return bool(events_handled)
9249 def _register(self, f, eventmask, handler):
9252 @return: A unique registration id, for use in schedule() or
9255 if f in self._poll_event_handlers:
9256 raise AssertionError("fd %d is already registered" % f)
9257 self._event_handler_id += 1
9258 reg_id = self._event_handler_id
9259 self._poll_event_handler_ids[reg_id] = f
9260 self._poll_event_handlers[f] = (handler, reg_id)
9261 self._poll_obj.register(f, eventmask)
9264 def _unregister(self, reg_id):
9265 f = self._poll_event_handler_ids[reg_id]
9266 self._poll_obj.unregister(f)
9267 del self._poll_event_handlers[f]
9268 del self._poll_event_handler_ids[reg_id]
9270 def _schedule_wait(self, wait_ids):
9272 Schedule until wait_id is not longer registered
9275 @param wait_id: a task id to wait for
9277 event_handlers = self._poll_event_handlers
9278 handler_ids = self._poll_event_handler_ids
9279 event_handled = False
9281 if isinstance(wait_ids, int):
9282 wait_ids = frozenset([wait_ids])
9285 while wait_ids.intersection(handler_ids):
9286 f, event = self._next_poll_event()
9287 handler, reg_id = event_handlers[f]
9289 event_handled = True
9290 except StopIteration:
9291 event_handled = True
9293 return event_handled
9295 class QueueScheduler(PollScheduler):
9298 Add instances of SequentialTaskQueue and then call run(). The
9299 run() method returns when no tasks remain.
9302 def __init__(self, max_jobs=None, max_load=None):
9303 PollScheduler.__init__(self)
9305 if max_jobs is None:
9308 self._max_jobs = max_jobs
9309 self._max_load = max_load
9310 self.sched_iface = self._sched_iface_class(
9311 register=self._register,
9312 schedule=self._schedule_wait,
9313 unregister=self._unregister)
9316 self._schedule_listeners = []
9319 self._queues.append(q)
9321 def remove(self, q):
9322 self._queues.remove(q)
9326 while self._schedule():
9329 while self._running_job_count():
9332 def _schedule_tasks(self):
9335 @returns: True if there may be remaining tasks to schedule,
9338 while self._can_add_job():
9339 n = self._max_jobs - self._running_job_count()
9343 if not self._start_next_job(n):
9346 for q in self._queues:
9351 def _running_job_count(self):
9353 for q in self._queues:
9354 job_count += len(q.running_tasks)
9355 self._jobs = job_count
9358 def _start_next_job(self, n=1):
9360 for q in self._queues:
9361 initial_job_count = len(q.running_tasks)
9363 final_job_count = len(q.running_tasks)
9364 if final_job_count > initial_job_count:
9365 started_count += (final_job_count - initial_job_count)
9366 if started_count >= n:
9368 return started_count
9370 class TaskScheduler(object):
9373 A simple way to handle scheduling of AsynchrousTask instances. Simply
9374 add tasks and call run(). The run() method returns when no tasks remain.
9377 def __init__(self, max_jobs=None, max_load=None):
9378 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9379 self._scheduler = QueueScheduler(
9380 max_jobs=max_jobs, max_load=max_load)
9381 self.sched_iface = self._scheduler.sched_iface
9382 self.run = self._scheduler.run
9383 self._scheduler.add(self._queue)
9385 def add(self, task):
9386 self._queue.add(task)
9388 class JobStatusDisplay(object):
9390 _bound_properties = ("curval", "failed", "running")
9391 _jobs_column_width = 48
9393 # Don't update the display unless at least this much
9394 # time has passed, in units of seconds.
9395 _min_display_latency = 2
9397 _default_term_codes = {
9403 _termcap_name_map = {
9404 'carriage_return' : 'cr',
9409 def __init__(self, out=sys.stdout, quiet=False):
9410 object.__setattr__(self, "out", out)
9411 object.__setattr__(self, "quiet", quiet)
9412 object.__setattr__(self, "maxval", 0)
9413 object.__setattr__(self, "merges", 0)
9414 object.__setattr__(self, "_changed", False)
9415 object.__setattr__(self, "_displayed", False)
9416 object.__setattr__(self, "_last_display_time", 0)
9417 object.__setattr__(self, "width", 80)
9420 isatty = hasattr(out, "isatty") and out.isatty()
9421 object.__setattr__(self, "_isatty", isatty)
9422 if not isatty or not self._init_term():
9424 for k, capname in self._termcap_name_map.iteritems():
9425 term_codes[k] = self._default_term_codes[capname]
9426 object.__setattr__(self, "_term_codes", term_codes)
9428 def _init_term(self):
9430 Initialize term control codes.
9432 @returns: True if term codes were successfully initialized,
9436 term_type = os.environ.get("TERM", "vt100")
9442 curses.setupterm(term_type, self.out.fileno())
9443 tigetstr = curses.tigetstr
9444 except curses.error:
9449 if tigetstr is None:
9453 for k, capname in self._termcap_name_map.iteritems():
9454 code = tigetstr(capname)
9456 code = self._default_term_codes[capname]
9457 term_codes[k] = code
9458 object.__setattr__(self, "_term_codes", term_codes)
9461 def _format_msg(self, msg):
9462 return ">>> %s" % msg
9466 self._term_codes['carriage_return'] + \
9467 self._term_codes['clr_eol'])
9469 self._displayed = False
9471 def _display(self, line):
9472 self.out.write(line)
9474 self._displayed = True
9476 def _update(self, msg):
9479 if not self._isatty:
9480 out.write(self._format_msg(msg) + self._term_codes['newline'])
9482 self._displayed = True
9488 self._display(self._format_msg(msg))
9490 def displayMessage(self, msg):
9492 was_displayed = self._displayed
9494 if self._isatty and self._displayed:
9497 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9499 self._displayed = False
9502 self._changed = True
9508 for name in self._bound_properties:
9509 object.__setattr__(self, name, 0)
9512 self.out.write(self._term_codes['newline'])
9514 self._displayed = False
9516 def __setattr__(self, name, value):
9517 old_value = getattr(self, name)
9518 if value == old_value:
9520 object.__setattr__(self, name, value)
9521 if name in self._bound_properties:
9522 self._property_change(name, old_value, value)
9524 def _property_change(self, name, old_value, new_value):
9525 self._changed = True
9528 def _load_avg_str(self):
9543 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9547 Display status on stdout, but only if something has
9548 changed since the last call.
9554 current_time = time.time()
9555 time_delta = current_time - self._last_display_time
9556 if self._displayed and \
9558 if not self._isatty:
9560 if time_delta < self._min_display_latency:
9563 self._last_display_time = current_time
9564 self._changed = False
9565 self._display_status()
9567 def _display_status(self):
9568 # Don't use len(self._completed_tasks) here since that also
9569 # can include uninstall tasks.
9570 curval_str = str(self.curval)
9571 maxval_str = str(self.maxval)
9572 running_str = str(self.running)
9573 failed_str = str(self.failed)
9574 load_avg_str = self._load_avg_str()
9576 color_output = StringIO.StringIO()
9577 plain_output = StringIO.StringIO()
9578 style_file = portage.output.ConsoleStyleFile(color_output)
9579 style_file.write_listener = plain_output
9580 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9581 style_writer.style_listener = style_file.new_styles
9582 f = formatter.AbstractFormatter(style_writer)
9584 number_style = "INFORM"
9585 f.add_literal_data("Jobs: ")
9586 f.push_style(number_style)
9587 f.add_literal_data(curval_str)
9589 f.add_literal_data(" of ")
9590 f.push_style(number_style)
9591 f.add_literal_data(maxval_str)
9593 f.add_literal_data(" complete")
9596 f.add_literal_data(", ")
9597 f.push_style(number_style)
9598 f.add_literal_data(running_str)
9600 f.add_literal_data(" running")
9603 f.add_literal_data(", ")
9604 f.push_style(number_style)
9605 f.add_literal_data(failed_str)
9607 f.add_literal_data(" failed")
9609 padding = self._jobs_column_width - len(plain_output.getvalue())
9611 f.add_literal_data(padding * " ")
9613 f.add_literal_data("Load avg: ")
9614 f.add_literal_data(load_avg_str)
9616 # Truncate to fit width, to avoid making the terminal scroll if the
9617 # line overflows (happens when the load average is large).
9618 plain_output = plain_output.getvalue()
9619 if self._isatty and len(plain_output) > self.width:
9620 # Use plain_output here since it's easier to truncate
9621 # properly than the color output which contains console
9623 self._update(plain_output[:self.width])
9625 self._update(color_output.getvalue())
9627 xtermTitle(" ".join(plain_output.split()))
9629 class Scheduler(PollScheduler):
9631 _opts_ignore_blockers = \
9632 frozenset(["--buildpkgonly",
9633 "--fetchonly", "--fetch-all-uri",
9634 "--nodeps", "--pretend"])
9636 _opts_no_background = \
9637 frozenset(["--pretend",
9638 "--fetchonly", "--fetch-all-uri"])
9640 _opts_no_restart = frozenset(["--buildpkgonly",
9641 "--fetchonly", "--fetch-all-uri", "--pretend"])
9643 _bad_resume_opts = set(["--ask", "--changelog",
9644 "--resume", "--skipfirst"])
9646 _fetch_log = "/var/log/emerge-fetch.log"
9648 class _iface_class(SlotObject):
9649 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9650 "dblinkElog", "fetch", "register", "schedule",
9651 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9654 class _fetch_iface_class(SlotObject):
9655 __slots__ = ("log_file", "schedule")
9657 _task_queues_class = slot_dict_class(
9658 ("merge", "jobs", "fetch", "unpack"), prefix="")
9660 class _build_opts_class(SlotObject):
9661 __slots__ = ("buildpkg", "buildpkgonly",
9662 "fetch_all_uri", "fetchonly", "pretend")
9664 class _binpkg_opts_class(SlotObject):
9665 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9667 class _pkg_count_class(SlotObject):
9668 __slots__ = ("curval", "maxval")
9670 class _emerge_log_class(SlotObject):
9671 __slots__ = ("xterm_titles",)
9673 def log(self, *pargs, **kwargs):
9674 if not self.xterm_titles:
9675 # Avoid interference with the scheduler's status display.
9676 kwargs.pop("short_msg", None)
9677 emergelog(self.xterm_titles, *pargs, **kwargs)
9679 class _failed_pkg(SlotObject):
9680 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9682 class _ConfigPool(object):
9683 """Interface for a task to temporarily allocate a config
9684 instance from a pool. This allows a task to be constructed
9685 long before the config instance actually becomes needed, like
9686 when prefetchers are constructed for the whole merge list."""
9687 __slots__ = ("_root", "_allocate", "_deallocate")
9688 def __init__(self, root, allocate, deallocate):
9690 self._allocate = allocate
9691 self._deallocate = deallocate
9693 return self._allocate(self._root)
9694 def deallocate(self, settings):
9695 self._deallocate(settings)
9697 class _unknown_internal_error(portage.exception.PortageException):
9699 Used internally to terminate scheduling. The specific reason for
9700 the failure should have been dumped to stderr.
9702 def __init__(self, value=""):
9703 portage.exception.PortageException.__init__(self, value)
9705 def __init__(self, settings, trees, mtimedb, myopts,
9706 spinner, mergelist, favorites, digraph):
9707 PollScheduler.__init__(self)
9708 self.settings = settings
9709 self.target_root = settings["ROOT"]
9711 self.myopts = myopts
9712 self._spinner = spinner
9713 self._mtimedb = mtimedb
9714 self._mergelist = mergelist
9715 self._favorites = favorites
9716 self._args_set = InternalPackageSet(favorites)
9717 self._build_opts = self._build_opts_class()
9718 for k in self._build_opts.__slots__:
9719 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9720 self._binpkg_opts = self._binpkg_opts_class()
9721 for k in self._binpkg_opts.__slots__:
9722 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9725 self._logger = self._emerge_log_class()
9726 self._task_queues = self._task_queues_class()
9727 for k in self._task_queues.allowed_keys:
9728 setattr(self._task_queues, k,
9729 SequentialTaskQueue())
9730 self._status_display = JobStatusDisplay()
9731 self._max_load = myopts.get("--load-average")
9732 max_jobs = myopts.get("--jobs")
9733 if max_jobs is None:
9735 self._set_max_jobs(max_jobs)
9737 # The root where the currently running
9738 # portage instance is installed.
9739 self._running_root = trees["/"]["root_config"]
9741 if settings.get("PORTAGE_DEBUG", "") == "1":
9743 self.pkgsettings = {}
9744 self._config_pool = {}
9745 self._blocker_db = {}
9747 self._config_pool[root] = []
9748 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9750 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9751 schedule=self._schedule_fetch)
9752 self._sched_iface = self._iface_class(
9753 dblinkEbuildPhase=self._dblink_ebuild_phase,
9754 dblinkDisplayMerge=self._dblink_display_merge,
9755 dblinkElog=self._dblink_elog,
9756 fetch=fetch_iface, register=self._register,
9757 schedule=self._schedule_wait,
9758 scheduleSetup=self._schedule_setup,
9759 scheduleUnpack=self._schedule_unpack,
9760 scheduleYield=self._schedule_yield,
9761 unregister=self._unregister)
9763 self._prefetchers = weakref.WeakValueDictionary()
9764 self._pkg_queue = []
9765 self._completed_tasks = set()
9767 self._failed_pkgs = []
9768 self._failed_pkgs_all = []
9769 self._failed_pkgs_die_msgs = []
9770 self._post_mod_echo_msgs = []
9771 self._parallel_fetch = False
9772 merge_count = len([x for x in mergelist \
9773 if isinstance(x, Package) and x.operation == "merge"])
9774 self._pkg_count = self._pkg_count_class(
9775 curval=0, maxval=merge_count)
9776 self._status_display.maxval = self._pkg_count.maxval
9778 # The load average takes some time to respond when new
9779 # jobs are added, so we need to limit the rate of adding
9781 self._job_delay_max = 10
9782 self._job_delay_factor = 1.0
9783 self._job_delay_exp = 1.5
9784 self._previous_job_start_time = None
9786 self._set_digraph(digraph)
9788 # This is used to memoize the _choose_pkg() result when
9789 # no packages can be chosen until one of the existing
9791 self._choose_pkg_return_early = False
9793 features = self.settings.features
9794 if "parallel-fetch" in features and \
9795 not ("--pretend" in self.myopts or \
9796 "--fetch-all-uri" in self.myopts or \
9797 "--fetchonly" in self.myopts):
9798 if "distlocks" not in features:
9799 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9800 portage.writemsg(red("!!!")+" parallel-fetching " + \
9801 "requires the distlocks feature enabled"+"\n",
9803 portage.writemsg(red("!!!")+" you have it disabled, " + \
9804 "thus parallel-fetching is being disabled"+"\n",
9806 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9807 elif len(mergelist) > 1:
9808 self._parallel_fetch = True
9810 if self._parallel_fetch:
9811 # clear out existing fetch log if it exists
9813 open(self._fetch_log, 'w')
9814 except EnvironmentError:
9817 self._running_portage = None
9818 portage_match = self._running_root.trees["vartree"].dbapi.match(
9819 portage.const.PORTAGE_PACKAGE_ATOM)
9821 cpv = portage_match.pop()
9822 self._running_portage = self._pkg(cpv, "installed",
9823 self._running_root, installed=True)
9825 def _poll(self, timeout=None):
9827 PollScheduler._poll(self, timeout=timeout)
9829 def _set_max_jobs(self, max_jobs):
9830 self._max_jobs = max_jobs
9831 self._task_queues.jobs.max_jobs = max_jobs
9833 def _background_mode(self):
9835 Check if background mode is enabled and adjust states as necessary.
9838 @returns: True if background mode is enabled, False otherwise.
9840 background = (self._max_jobs is True or \
9841 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9842 not bool(self._opts_no_background.intersection(self.myopts))
9845 interactive_tasks = self._get_interactive_tasks()
9846 if interactive_tasks:
9848 writemsg_level(">>> Sending package output to stdio due " + \
9849 "to interactive package(s):\n",
9850 level=logging.INFO, noiselevel=-1)
9852 for pkg in interactive_tasks:
9853 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9855 pkg_str += " for " + pkg.root
9858 writemsg_level("".join("%s\n" % (l,) for l in msg),
9859 level=logging.INFO, noiselevel=-1)
9860 if self._max_jobs is True or self._max_jobs > 1:
9861 self._set_max_jobs(1)
9862 writemsg_level(">>> Setting --jobs=1 due " + \
9863 "to the above interactive package(s)\n",
9864 level=logging.INFO, noiselevel=-1)
9866 self._status_display.quiet = \
9868 ("--quiet" in self.myopts and \
9869 "--verbose" not in self.myopts)
9871 self._logger.xterm_titles = \
9872 "notitles" not in self.settings.features and \
9873 self._status_display.quiet
9877 def _get_interactive_tasks(self):
9878 from portage import flatten
9879 from portage.dep import use_reduce, paren_reduce
9880 interactive_tasks = []
9881 for task in self._mergelist:
9882 if not (isinstance(task, Package) and \
9883 task.operation == "merge"):
9886 properties = flatten(use_reduce(paren_reduce(
9887 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9888 except portage.exception.InvalidDependString, e:
9889 show_invalid_depstring_notice(task,
9890 task.metadata["PROPERTIES"], str(e))
9891 raise self._unknown_internal_error()
9892 if "interactive" in properties:
9893 interactive_tasks.append(task)
9894 return interactive_tasks
9896 def _set_digraph(self, digraph):
9897 if "--nodeps" in self.myopts or \
9898 (self._max_jobs is not True and self._max_jobs < 2):
9900 self._digraph = None
9903 self._digraph = digraph
9904 self._prune_digraph()
9906 def _prune_digraph(self):
9908 Prune any root nodes that are irrelevant.
9911 graph = self._digraph
9912 completed_tasks = self._completed_tasks
9913 removed_nodes = set()
9915 for node in graph.root_nodes():
9916 if not isinstance(node, Package) or \
9917 (node.installed and node.operation == "nomerge") or \
9919 node in completed_tasks:
9920 removed_nodes.add(node)
9922 graph.difference_update(removed_nodes)
9923 if not removed_nodes:
9925 removed_nodes.clear()
9927 class _pkg_failure(portage.exception.PortageException):
9929 An instance of this class is raised by unmerge() when
9930 an uninstallation fails.
9933 def __init__(self, *pargs):
9934 portage.exception.PortageException.__init__(self, pargs)
9936 self.status = pargs[0]
9938 def _schedule_fetch(self, fetcher):
9940 Schedule a fetcher on the fetch queue, in order to
9941 serialize access to the fetch log.
9943 self._task_queues.fetch.addFront(fetcher)
9945 def _schedule_setup(self, setup_phase):
9947 Schedule a setup phase on the merge queue, in order to
9948 serialize unsandboxed access to the live filesystem.
9950 self._task_queues.merge.addFront(setup_phase)
9953 def _schedule_unpack(self, unpack_phase):
9955 Schedule an unpack phase on the unpack queue, in order
9956 to serialize $DISTDIR access for live ebuilds.
9958 self._task_queues.unpack.add(unpack_phase)
9960 def _find_blockers(self, new_pkg):
9962 Returns a callable which should be called only when
9963 the vdb lock has been acquired.
9966 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9969 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9970 if self._opts_ignore_blockers.intersection(self.myopts):
9973 # Call gc.collect() here to avoid heap overflow that
9974 # triggers 'Cannot allocate memory' errors (reported
9979 blocker_db = self._blocker_db[new_pkg.root]
9981 blocker_dblinks = []
9982 for blocking_pkg in blocker_db.findInstalledBlockers(
9983 new_pkg, acquire_lock=acquire_lock):
9984 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9986 if new_pkg.cpv == blocking_pkg.cpv:
9988 blocker_dblinks.append(portage.dblink(
9989 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9990 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9991 vartree=self.trees[blocking_pkg.root]["vartree"]))
9995 return blocker_dblinks
9997 def _dblink_pkg(self, pkg_dblink):
9998 cpv = pkg_dblink.mycpv
9999 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10000 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10001 installed = type_name == "installed"
10002 return self._pkg(cpv, type_name, root_config, installed=installed)
10004 def _append_to_log_path(self, log_path, msg):
10005 f = open(log_path, 'a')
10011 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10013 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10016 background = self._background
10018 if background and log_path is not None:
10019 log_file = open(log_path, 'a')
10024 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10026 if log_file is not None:
10029 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10030 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10031 background = self._background
10033 if log_path is None:
10034 if not (background and level < logging.WARN):
10035 portage.util.writemsg_level(msg,
10036 level=level, noiselevel=noiselevel)
10039 portage.util.writemsg_level(msg,
10040 level=level, noiselevel=noiselevel)
10041 self._append_to_log_path(log_path, msg)
10043 def _dblink_ebuild_phase(self,
10044 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10046 Using this callback for merge phases allows the scheduler
10047 to run while these phases execute asynchronously, and allows
10048 the scheduler control output handling.
10051 scheduler = self._sched_iface
10052 settings = pkg_dblink.settings
10053 pkg = self._dblink_pkg(pkg_dblink)
10054 background = self._background
10055 log_path = settings.get("PORTAGE_LOG_FILE")
10057 ebuild_phase = EbuildPhase(background=background,
10058 pkg=pkg, phase=phase, scheduler=scheduler,
10059 settings=settings, tree=pkg_dblink.treetype)
10060 ebuild_phase.start()
10061 ebuild_phase.wait()
10063 return ebuild_phase.returncode
10065 def _check_manifests(self):
10066 # Verify all the manifests now so that the user is notified of failure
10067 # as soon as possible.
10068 if "strict" not in self.settings.features or \
10069 "--fetchonly" in self.myopts or \
10070 "--fetch-all-uri" in self.myopts:
10073 shown_verifying_msg = False
10074 quiet_settings = {}
10075 for myroot, pkgsettings in self.pkgsettings.iteritems():
10076 quiet_config = portage.config(clone=pkgsettings)
10077 quiet_config["PORTAGE_QUIET"] = "1"
10078 quiet_config.backup_changes("PORTAGE_QUIET")
10079 quiet_settings[myroot] = quiet_config
10082 for x in self._mergelist:
10083 if not isinstance(x, Package) or \
10084 x.type_name != "ebuild":
10087 if not shown_verifying_msg:
10088 shown_verifying_msg = True
10089 self._status_msg("Verifying ebuild manifests")
10091 root_config = x.root_config
10092 portdb = root_config.trees["porttree"].dbapi
10093 quiet_config = quiet_settings[root_config.root]
10094 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10095 if not portage.digestcheck([], quiet_config, strict=True):
10100 def _add_prefetchers(self):
10102 if not self._parallel_fetch:
10105 if self._parallel_fetch:
10106 self._status_msg("Starting parallel fetch")
10108 prefetchers = self._prefetchers
10109 getbinpkg = "--getbinpkg" in self.myopts
10111 # In order to avoid "waiting for lock" messages
10112 # at the beginning, which annoy users, never
10113 # spawn a prefetcher for the first package.
10114 for pkg in self._mergelist[1:]:
10115 prefetcher = self._create_prefetcher(pkg)
10116 if prefetcher is not None:
10117 self._task_queues.fetch.add(prefetcher)
10118 prefetchers[pkg] = prefetcher
10120 def _create_prefetcher(self, pkg):
10122 @return: a prefetcher, or None if not applicable
10126 if not isinstance(pkg, Package):
10129 elif pkg.type_name == "ebuild":
10131 prefetcher = EbuildFetcher(background=True,
10132 config_pool=self._ConfigPool(pkg.root,
10133 self._allocate_config, self._deallocate_config),
10134 fetchonly=1, logfile=self._fetch_log,
10135 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10137 elif pkg.type_name == "binary" and \
10138 "--getbinpkg" in self.myopts and \
10139 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10141 prefetcher = BinpkgPrefetcher(background=True,
10142 pkg=pkg, scheduler=self._sched_iface)
10146 def _is_restart_scheduled(self):
10148 Check if the merge list contains a replacement
10149 for the current running instance, that will result
10150 in restart after merge.
10152 @returns: True if a restart is scheduled, False otherwise.
10154 if self._opts_no_restart.intersection(self.myopts):
10157 mergelist = self._mergelist
10159 for i, pkg in enumerate(mergelist):
10160 if self._is_restart_necessary(pkg) and \
10161 i != len(mergelist) - 1:
10166 def _is_restart_necessary(self, pkg):
10168 @return: True if merging the given package
10169 requires restart, False otherwise.
10172 # Figure out if we need a restart.
10173 if pkg.root == self._running_root.root and \
10174 portage.match_from_list(
10175 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10176 if self._running_portage:
10177 return pkg.cpv != self._running_portage.cpv
10181 def _restart_if_necessary(self, pkg):
10183 Use execv() to restart emerge. This happens
10184 if portage upgrades itself and there are
10185 remaining packages in the list.
10188 if self._opts_no_restart.intersection(self.myopts):
10191 if not self._is_restart_necessary(pkg):
10194 if pkg == self._mergelist[-1]:
10197 self._main_loop_cleanup()
10199 logger = self._logger
10200 pkg_count = self._pkg_count
10201 mtimedb = self._mtimedb
10202 bad_resume_opts = self._bad_resume_opts
10204 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10205 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10207 logger.log(" *** RESTARTING " + \
10208 "emerge via exec() after change of " + \
10209 "portage version.")
10211 mtimedb["resume"]["mergelist"].remove(list(pkg))
10213 portage.run_exitfuncs()
10214 mynewargv = [sys.argv[0], "--resume"]
10215 resume_opts = self.myopts.copy()
10216 # For automatic resume, we need to prevent
10217 # any of bad_resume_opts from leaking in
10218 # via EMERGE_DEFAULT_OPTS.
10219 resume_opts["--ignore-default-opts"] = True
10220 for myopt, myarg in resume_opts.iteritems():
10221 if myopt not in bad_resume_opts:
10223 mynewargv.append(myopt)
10225 mynewargv.append(myopt +"="+ str(myarg))
10226 # priority only needs to be adjusted on the first run
10227 os.environ["PORTAGE_NICENESS"] = "0"
10228 os.execv(mynewargv[0], mynewargv)
10232 if "--resume" in self.myopts:
10234 portage.writemsg_stdout(
10235 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10236 self._logger.log(" *** Resuming merge...")
10238 self._save_resume_list()
10241 self._background = self._background_mode()
10242 except self._unknown_internal_error:
10245 for root in self.trees:
10246 root_config = self.trees[root]["root_config"]
10248 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10249 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10250 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10251 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10252 if not tmpdir or not os.path.isdir(tmpdir):
10253 msg = "The directory specified in your " + \
10254 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10255 "does not exist. Please create this " + \
10256 "directory or correct your PORTAGE_TMPDIR setting."
10257 msg = textwrap.wrap(msg, 70)
10258 out = portage.output.EOutput()
10263 if self._background:
10264 root_config.settings.unlock()
10265 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10266 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10267 root_config.settings.lock()
10269 self.pkgsettings[root] = portage.config(
10270 clone=root_config.settings)
10272 rval = self._check_manifests()
10273 if rval != os.EX_OK:
10276 keep_going = "--keep-going" in self.myopts
10277 fetchonly = self._build_opts.fetchonly
10278 mtimedb = self._mtimedb
10279 failed_pkgs = self._failed_pkgs
10282 rval = self._merge()
10283 if rval == os.EX_OK or fetchonly or not keep_going:
10285 if "resume" not in mtimedb:
10287 mergelist = self._mtimedb["resume"].get("mergelist")
10291 if not failed_pkgs:
10294 for failed_pkg in failed_pkgs:
10295 mergelist.remove(list(failed_pkg.pkg))
10297 self._failed_pkgs_all.extend(failed_pkgs)
10303 if not self._calc_resume_list():
10306 clear_caches(self.trees)
10307 if not self._mergelist:
10310 self._save_resume_list()
10311 self._pkg_count.curval = 0
10312 self._pkg_count.maxval = len([x for x in self._mergelist \
10313 if isinstance(x, Package) and x.operation == "merge"])
10314 self._status_display.maxval = self._pkg_count.maxval
10316 self._logger.log(" *** Finished. Cleaning up...")
10319 self._failed_pkgs_all.extend(failed_pkgs)
10322 background = self._background
10323 failure_log_shown = False
10324 if background and len(self._failed_pkgs_all) == 1:
10325 # If only one package failed then just show it's
10326 # whole log for easy viewing.
10327 failed_pkg = self._failed_pkgs_all[-1]
10328 build_dir = failed_pkg.build_dir
10331 log_paths = [failed_pkg.build_log]
10333 log_path = self._locate_failure_log(failed_pkg)
10334 if log_path is not None:
10336 log_file = open(log_path, 'rb')
10340 if log_file is not None:
10342 for line in log_file:
10343 writemsg_level(line, noiselevel=-1)
10346 failure_log_shown = True
10348 # Dump mod_echo output now since it tends to flood the terminal.
10349 # This allows us to avoid having more important output, generated
10350 # later, from being swept away by the mod_echo output.
10351 mod_echo_output = _flush_elog_mod_echo()
10353 if background and not failure_log_shown and \
10354 self._failed_pkgs_all and \
10355 self._failed_pkgs_die_msgs and \
10356 not mod_echo_output:
10358 printer = portage.output.EOutput()
10359 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10361 if mysettings["ROOT"] != "/":
10362 root_msg = " merged to %s" % mysettings["ROOT"]
10364 printer.einfo("Error messages for package %s%s:" % \
10365 (colorize("INFORM", key), root_msg))
10367 for phase in portage.const.EBUILD_PHASES:
10368 if phase not in logentries:
10370 for msgtype, msgcontent in logentries[phase]:
10371 if isinstance(msgcontent, basestring):
10372 msgcontent = [msgcontent]
10373 for line in msgcontent:
10374 printer.eerror(line.strip("\n"))
10376 if self._post_mod_echo_msgs:
10377 for msg in self._post_mod_echo_msgs:
10380 if len(self._failed_pkgs_all) > 1:
10381 msg = "The following packages have " + \
10382 "failed to build or install:"
10383 prefix = bad(" * ")
10384 writemsg(prefix + "\n", noiselevel=-1)
10385 from textwrap import wrap
10386 for line in wrap(msg, 72):
10387 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10388 writemsg(prefix + "\n", noiselevel=-1)
10389 for failed_pkg in self._failed_pkgs_all:
10390 writemsg("%s\t%s\n" % (prefix,
10391 colorize("INFORM", str(failed_pkg.pkg))),
10393 writemsg(prefix + "\n", noiselevel=-1)
10397 def _elog_listener(self, mysettings, key, logentries, fulltext):
10398 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10400 self._failed_pkgs_die_msgs.append(
10401 (mysettings, key, errors))
10403 def _locate_failure_log(self, failed_pkg):
10405 build_dir = failed_pkg.build_dir
10408 log_paths = [failed_pkg.build_log]
10410 for log_path in log_paths:
10415 log_size = os.stat(log_path).st_size
10426 def _add_packages(self):
10427 pkg_queue = self._pkg_queue
10428 for pkg in self._mergelist:
10429 if isinstance(pkg, Package):
10430 pkg_queue.append(pkg)
10431 elif isinstance(pkg, Blocker):
10434 def _merge_exit(self, merge):
10435 self._do_merge_exit(merge)
10436 self._deallocate_config(merge.merge.settings)
10437 if merge.returncode == os.EX_OK and \
10438 not merge.merge.pkg.installed:
10439 self._status_display.curval += 1
10440 self._status_display.merges = len(self._task_queues.merge)
10443 def _do_merge_exit(self, merge):
10444 pkg = merge.merge.pkg
10445 if merge.returncode != os.EX_OK:
10446 settings = merge.merge.settings
10447 build_dir = settings.get("PORTAGE_BUILDDIR")
10448 build_log = settings.get("PORTAGE_LOG_FILE")
10450 self._failed_pkgs.append(self._failed_pkg(
10451 build_dir=build_dir, build_log=build_log,
10453 returncode=merge.returncode))
10454 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10456 self._status_display.failed = len(self._failed_pkgs)
10459 self._task_complete(pkg)
10460 pkg_to_replace = merge.merge.pkg_to_replace
10461 if pkg_to_replace is not None:
10462 # When a package is replaced, mark it's uninstall
10463 # task complete (if any).
10464 uninst_hash_key = \
10465 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10466 self._task_complete(uninst_hash_key)
10471 self._restart_if_necessary(pkg)
10473 # Call mtimedb.commit() after each merge so that
10474 # --resume still works after being interrupted
10475 # by reboot, sigkill or similar.
10476 mtimedb = self._mtimedb
10477 mtimedb["resume"]["mergelist"].remove(list(pkg))
10478 if not mtimedb["resume"]["mergelist"]:
10479 del mtimedb["resume"]
10482 def _build_exit(self, build):
10483 if build.returncode == os.EX_OK:
10485 merge = PackageMerge(merge=build)
10486 merge.addExitListener(self._merge_exit)
10487 self._task_queues.merge.add(merge)
10488 self._status_display.merges = len(self._task_queues.merge)
10490 settings = build.settings
10491 build_dir = settings.get("PORTAGE_BUILDDIR")
10492 build_log = settings.get("PORTAGE_LOG_FILE")
10494 self._failed_pkgs.append(self._failed_pkg(
10495 build_dir=build_dir, build_log=build_log,
10497 returncode=build.returncode))
10498 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10500 self._status_display.failed = len(self._failed_pkgs)
10501 self._deallocate_config(build.settings)
10503 self._status_display.running = self._jobs
10506 def _extract_exit(self, build):
10507 self._build_exit(build)
10509 def _task_complete(self, pkg):
10510 self._completed_tasks.add(pkg)
10511 self._choose_pkg_return_early = False
10515 self._add_prefetchers()
10516 self._add_packages()
10517 pkg_queue = self._pkg_queue
10518 failed_pkgs = self._failed_pkgs
10519 portage.locks._quiet = self._background
10520 portage.elog._emerge_elog_listener = self._elog_listener
10526 self._main_loop_cleanup()
10527 portage.locks._quiet = False
10528 portage.elog._emerge_elog_listener = None
10530 rval = failed_pkgs[-1].returncode
10534 def _main_loop_cleanup(self):
10535 del self._pkg_queue[:]
10536 self._completed_tasks.clear()
10537 self._choose_pkg_return_early = False
10538 self._status_display.reset()
10539 self._digraph = None
10540 self._task_queues.fetch.clear()
10542 def _choose_pkg(self):
10544 Choose a task that has all it's dependencies satisfied.
10547 if self._choose_pkg_return_early:
10550 if self._digraph is None:
10551 if (self._jobs or self._task_queues.merge) and \
10552 not ("--nodeps" in self.myopts and \
10553 (self._max_jobs is True or self._max_jobs > 1)):
10554 self._choose_pkg_return_early = True
10556 return self._pkg_queue.pop(0)
10558 if not (self._jobs or self._task_queues.merge):
10559 return self._pkg_queue.pop(0)
10561 self._prune_digraph()
10564 later = set(self._pkg_queue)
10565 for pkg in self._pkg_queue:
10567 if not self._dependent_on_scheduled_merges(pkg, later):
10571 if chosen_pkg is not None:
10572 self._pkg_queue.remove(chosen_pkg)
10574 if chosen_pkg is None:
10575 # There's no point in searching for a package to
10576 # choose until at least one of the existing jobs
10578 self._choose_pkg_return_early = True
10582 def _dependent_on_scheduled_merges(self, pkg, later):
10584 Traverse the subgraph of the given packages deep dependencies
10585 to see if it contains any scheduled merges.
10586 @param pkg: a package to check dependencies for
10588 @param later: packages for which dependence should be ignored
10589 since they will be merged later than pkg anyway and therefore
10590 delaying the merge of pkg will not result in a more optimal
10594 @returns: True if the package is dependent, False otherwise.
10597 graph = self._digraph
10598 completed_tasks = self._completed_tasks
10601 traversed_nodes = set([pkg])
10602 direct_deps = graph.child_nodes(pkg)
10603 node_stack = direct_deps
10604 direct_deps = frozenset(direct_deps)
10606 node = node_stack.pop()
10607 if node in traversed_nodes:
10609 traversed_nodes.add(node)
10610 if not ((node.installed and node.operation == "nomerge") or \
10611 (node.operation == "uninstall" and \
10612 node not in direct_deps) or \
10613 node in completed_tasks or \
10617 node_stack.extend(graph.child_nodes(node))
10621 def _allocate_config(self, root):
10623 Allocate a unique config instance for a task in order
10624 to prevent interference between parallel tasks.
10626 if self._config_pool[root]:
10627 temp_settings = self._config_pool[root].pop()
10629 temp_settings = portage.config(clone=self.pkgsettings[root])
10630 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10631 # performance reasons, call it here to make sure all settings from the
10632 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10633 temp_settings.reload()
10634 temp_settings.reset()
10635 return temp_settings
10637 def _deallocate_config(self, settings):
10638 self._config_pool[settings["ROOT"]].append(settings)
10640 def _main_loop(self):
10642 # Only allow 1 job max if a restart is scheduled
10643 # due to portage update.
10644 if self._is_restart_scheduled() or \
10645 self._opts_no_background.intersection(self.myopts):
10646 self._set_max_jobs(1)
10648 merge_queue = self._task_queues.merge
10650 while self._schedule():
10651 if self._poll_event_handlers:
10656 if not (self._jobs or merge_queue):
10658 if self._poll_event_handlers:
10661 def _keep_scheduling(self):
10662 return bool(self._pkg_queue and \
10663 not (self._failed_pkgs and not self._build_opts.fetchonly))
10665 def _schedule_tasks(self):
10666 self._schedule_tasks_imp()
10667 self._status_display.display()
10670 for q in self._task_queues.values():
10674 # Cancel prefetchers if they're the only reason
10675 # the main poll loop is still running.
10676 if self._failed_pkgs and not self._build_opts.fetchonly and \
10677 not (self._jobs or self._task_queues.merge) and \
10678 self._task_queues.fetch:
10679 self._task_queues.fetch.clear()
10683 self._schedule_tasks_imp()
10684 self._status_display.display()
10686 return self._keep_scheduling()
10688 def _job_delay(self):
10691 @returns: True if job scheduling should be delayed, False otherwise.
10694 if self._jobs and self._max_load is not None:
10696 current_time = time.time()
10698 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10699 if delay > self._job_delay_max:
10700 delay = self._job_delay_max
10701 if (current_time - self._previous_job_start_time) < delay:
10706 def _schedule_tasks_imp(self):
10709 @returns: True if state changed, False otherwise.
10716 if not self._keep_scheduling():
10717 return bool(state_change)
10719 if self._choose_pkg_return_early or \
10720 not self._can_add_job() or \
10722 return bool(state_change)
10724 pkg = self._choose_pkg()
10726 return bool(state_change)
10730 if not pkg.installed:
10731 self._pkg_count.curval += 1
10733 task = self._task(pkg)
10736 merge = PackageMerge(merge=task)
10737 merge.addExitListener(self._merge_exit)
10738 self._task_queues.merge.add(merge)
10742 self._previous_job_start_time = time.time()
10743 self._status_display.running = self._jobs
10744 task.addExitListener(self._extract_exit)
10745 self._task_queues.jobs.add(task)
10749 self._previous_job_start_time = time.time()
10750 self._status_display.running = self._jobs
10751 task.addExitListener(self._build_exit)
10752 self._task_queues.jobs.add(task)
10754 return bool(state_change)
10756 def _task(self, pkg):
10758 pkg_to_replace = None
10759 if pkg.operation != "uninstall":
10760 vardb = pkg.root_config.trees["vartree"].dbapi
10761 previous_cpv = vardb.match(pkg.slot_atom)
10763 previous_cpv = previous_cpv.pop()
10764 pkg_to_replace = self._pkg(previous_cpv,
10765 "installed", pkg.root_config, installed=True)
10767 task = MergeListItem(args_set=self._args_set,
10768 background=self._background, binpkg_opts=self._binpkg_opts,
10769 build_opts=self._build_opts,
10770 config_pool=self._ConfigPool(pkg.root,
10771 self._allocate_config, self._deallocate_config),
10772 emerge_opts=self.myopts,
10773 find_blockers=self._find_blockers(pkg), logger=self._logger,
10774 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10775 pkg_to_replace=pkg_to_replace,
10776 prefetcher=self._prefetchers.get(pkg),
10777 scheduler=self._sched_iface,
10778 settings=self._allocate_config(pkg.root),
10779 statusMessage=self._status_msg,
10780 world_atom=self._world_atom)
10784 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10785 pkg = failed_pkg.pkg
10786 msg = "%s to %s %s" % \
10787 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10788 if pkg.root != "/":
10789 msg += " %s %s" % (preposition, pkg.root)
10791 log_path = self._locate_failure_log(failed_pkg)
10792 if log_path is not None:
10793 msg += ", Log file:"
10794 self._status_msg(msg)
10796 if log_path is not None:
10797 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10799 def _status_msg(self, msg):
10801 Display a brief status message (no newlines) in the status display.
10802 This is called by tasks to provide feedback to the user. This
10803 delegates the resposibility of generating \r and \n control characters,
10804 to guarantee that lines are created or erased when necessary and
10808 @param msg: a brief status message (no newlines allowed)
10810 if not self._background:
10811 writemsg_level("\n")
10812 self._status_display.displayMessage(msg)
10814 def _save_resume_list(self):
10816 Do this before verifying the ebuild Manifests since it might
10817 be possible for the user to use --resume --skipfirst get past
10818 a non-essential package with a broken digest.
10820 mtimedb = self._mtimedb
10821 mtimedb["resume"]["mergelist"] = [list(x) \
10822 for x in self._mergelist \
10823 if isinstance(x, Package) and x.operation == "merge"]
10827 def _calc_resume_list(self):
10829 Use the current resume list to calculate a new one,
10830 dropping any packages with unsatisfied deps.
10832 @returns: True if successful, False otherwise.
10834 print colorize("GOOD", "*** Resuming merge...")
10836 if self._show_list():
10837 if "--tree" in self.myopts:
10838 portage.writemsg_stdout("\n" + \
10839 darkgreen("These are the packages that " + \
10840 "would be merged, in reverse order:\n\n"))
10843 portage.writemsg_stdout("\n" + \
10844 darkgreen("These are the packages that " + \
10845 "would be merged, in order:\n\n"))
10847 show_spinner = "--quiet" not in self.myopts and \
10848 "--nodeps" not in self.myopts
10851 print "Calculating dependencies ",
10853 myparams = create_depgraph_params(self.myopts, None)
10857 success, mydepgraph, dropped_tasks = resume_depgraph(
10858 self.settings, self.trees, self._mtimedb, self.myopts,
10859 myparams, self._spinner, skip_unsatisfied=True)
10860 except depgraph.UnsatisfiedResumeDep, e:
10861 mydepgraph = e.depgraph
10862 dropped_tasks = set()
10865 print "\b\b... done!"
10868 def unsatisfied_resume_dep_msg():
10869 mydepgraph.display_problems()
10870 out = portage.output.EOutput()
10871 out.eerror("One or more packages are either masked or " + \
10872 "have missing dependencies:")
10875 show_parents = set()
10876 for dep in e.value:
10877 if dep.parent in show_parents:
10879 show_parents.add(dep.parent)
10880 if dep.atom is None:
10881 out.eerror(indent + "Masked package:")
10882 out.eerror(2 * indent + str(dep.parent))
10885 out.eerror(indent + str(dep.atom) + " pulled in by:")
10886 out.eerror(2 * indent + str(dep.parent))
10888 msg = "The resume list contains packages " + \
10889 "that are either masked or have " + \
10890 "unsatisfied dependencies. " + \
10891 "Please restart/continue " + \
10892 "the operation manually, or use --skipfirst " + \
10893 "to skip the first package in the list and " + \
10894 "any other packages that may be " + \
10895 "masked or have missing dependencies."
10896 for line in textwrap.wrap(msg, 72):
10898 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10901 if success and self._show_list():
10902 mylist = mydepgraph.altlist()
10904 if "--tree" in self.myopts:
10906 mydepgraph.display(mylist, favorites=self._favorites)
10909 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10911 mydepgraph.display_problems()
10913 mylist = mydepgraph.altlist()
10914 mydepgraph.break_refs(mylist)
10915 mydepgraph.break_refs(dropped_tasks)
10916 self._mergelist = mylist
10917 self._set_digraph(mydepgraph.schedulerGraph())
10920 for task in dropped_tasks:
10921 if not (isinstance(task, Package) and task.operation == "merge"):
10924 msg = "emerge --keep-going:" + \
10926 if pkg.root != "/":
10927 msg += " for %s" % (pkg.root,)
10928 msg += " dropped due to unsatisfied dependency."
10929 for line in textwrap.wrap(msg, msg_width):
10930 eerror(line, phase="other", key=pkg.cpv)
10931 settings = self.pkgsettings[pkg.root]
10932 # Ensure that log collection from $T is disabled inside
10933 # elog_process(), since any logs that might exist are
10935 settings.pop("T", None)
10936 portage.elog.elog_process(pkg.cpv, settings)
10937 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10941 def _show_list(self):
10942 myopts = self.myopts
10943 if "--quiet" not in myopts and \
10944 ("--ask" in myopts or "--tree" in myopts or \
10945 "--verbose" in myopts):
10949 def _world_atom(self, pkg):
10951 Add the package to the world file, but only if
10952 it's supposed to be added. Otherwise, do nothing.
10955 if set(("--buildpkgonly", "--fetchonly",
10957 "--oneshot", "--onlydeps",
10958 "--pretend")).intersection(self.myopts):
10961 if pkg.root != self.target_root:
10964 args_set = self._args_set
10965 if not args_set.findAtomForPackage(pkg):
10968 logger = self._logger
10969 pkg_count = self._pkg_count
10970 root_config = pkg.root_config
10971 world_set = root_config.sets["world"]
10972 world_locked = False
10973 if hasattr(world_set, "lock"):
10975 world_locked = True
10978 if hasattr(world_set, "load"):
10979 world_set.load() # maybe it's changed on disk
10981 atom = create_world_atom(pkg, args_set, root_config)
10983 if hasattr(world_set, "add"):
10984 self._status_msg(('Recording %s in "world" ' + \
10985 'favorites file...') % atom)
10986 logger.log(" === (%s of %s) Updating world file (%s)" % \
10987 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10988 world_set.add(atom)
10990 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10991 (atom,), level=logging.WARN, noiselevel=-1)
10996 def _pkg(self, cpv, type_name, root_config, installed=False):
10998 Get a package instance from the cache, or create a new
10999 one if necessary. Raises KeyError from aux_get if it
11000 failures for some reason (package does not exist or is
11003 operation = "merge"
11005 operation = "nomerge"
11007 if self._digraph is not None:
11008 # Reuse existing instance when available.
11009 pkg = self._digraph.get(
11010 (type_name, root_config.root, cpv, operation))
11011 if pkg is not None:
11014 tree_type = depgraph.pkg_tree_map[type_name]
11015 db = root_config.trees[tree_type].dbapi
11016 db_keys = list(self.trees[root_config.root][
11017 tree_type].dbapi._aux_cache_keys)
11018 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11019 pkg = Package(cpv=cpv, metadata=metadata,
11020 root_config=root_config, installed=installed)
11021 if type_name == "ebuild":
11022 settings = self.pkgsettings[root_config.root]
11023 settings.setcpv(pkg)
11024 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11028 class MetadataRegen(PollScheduler):
11030 def __init__(self, portdb, max_jobs=None, max_load=None):
11031 PollScheduler.__init__(self)
11032 self._portdb = portdb
11034 if max_jobs is None:
11037 self._max_jobs = max_jobs
11038 self._max_load = max_load
11039 self._sched_iface = self._sched_iface_class(
11040 register=self._register,
11041 schedule=self._schedule_wait,
11042 unregister=self._unregister)
11044 self._valid_pkgs = set()
11045 self._process_iter = self._iter_metadata_processes()
11047 def _iter_metadata_processes(self):
11048 portdb = self._portdb
11049 valid_pkgs = self._valid_pkgs
11050 every_cp = portdb.cp_all()
11051 every_cp.sort(reverse=True)
11054 cp = every_cp.pop()
11055 portage.writemsg_stdout("Processing %s\n" % cp)
11056 cpv_list = portdb.cp_list(cp)
11057 for cpv in cpv_list:
11058 valid_pkgs.add(cpv)
11059 ebuild_path, repo_path = portdb.findname2(cpv)
11060 metadata_process = portdb._metadata_process(
11061 cpv, ebuild_path, repo_path)
11062 if metadata_process is None:
11064 yield metadata_process
11068 portdb = self._portdb
11069 from portage.cache.cache_errors import CacheError
11072 for mytree in portdb.porttrees:
11074 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11075 except CacheError, e:
11076 portage.writemsg("Error listing cache entries for " + \
11077 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11082 while self._schedule():
11089 for y in self._valid_pkgs:
11090 for mytree in portdb.porttrees:
11091 if portdb.findname2(y, mytree=mytree)[0]:
11092 dead_nodes[mytree].discard(y)
11094 for mytree, nodes in dead_nodes.iteritems():
11095 auxdb = portdb.auxdb[mytree]
11099 except (KeyError, CacheError):
11102 def _schedule_tasks(self):
11105 @returns: True if there may be remaining tasks to schedule,
11108 while self._can_add_job():
11110 metadata_process = self._process_iter.next()
11111 except StopIteration:
11115 metadata_process.scheduler = self._sched_iface
11116 metadata_process.addExitListener(self._metadata_exit)
11117 metadata_process.start()
11120 def _metadata_exit(self, metadata_process):
11122 if metadata_process.returncode != os.EX_OK:
11123 self._valid_pkgs.discard(metadata_process.cpv)
11124 portage.writemsg("Error processing %s, continuing...\n" % \
11125 (metadata_process.cpv,))
11128 class UninstallFailure(portage.exception.PortageException):
11130 An instance of this class is raised by unmerge() when
11131 an uninstallation fails.
11134 def __init__(self, *pargs):
11135 portage.exception.PortageException.__init__(self, pargs)
11137 self.status = pargs[0]
11139 def unmerge(root_config, myopts, unmerge_action,
11140 unmerge_files, ldpath_mtimes, autoclean=0,
11141 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11142 scheduler=None, writemsg_level=portage.util.writemsg_level):
11144 quiet = "--quiet" in myopts
11145 settings = root_config.settings
11146 sets = root_config.sets
11147 vartree = root_config.trees["vartree"]
11148 candidate_catpkgs=[]
11150 xterm_titles = "notitles" not in settings.features
11151 out = portage.output.EOutput()
11153 db_keys = list(vartree.dbapi._aux_cache_keys)
11156 pkg = pkg_cache.get(cpv)
11158 pkg = Package(cpv=cpv, installed=True,
11159 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11160 root_config=root_config,
11161 type_name="installed")
11162 pkg_cache[cpv] = pkg
11165 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11167 # At least the parent needs to exist for the lock file.
11168 portage.util.ensure_dirs(vdb_path)
11169 except portage.exception.PortageException:
11173 if os.access(vdb_path, os.W_OK):
11174 vdb_lock = portage.locks.lockdir(vdb_path)
11175 realsyslist = sets["system"].getAtoms()
11177 for x in realsyslist:
11178 mycp = portage.dep_getkey(x)
11179 if mycp in settings.getvirtuals():
11181 for provider in settings.getvirtuals()[mycp]:
11182 if vartree.dbapi.match(provider):
11183 providers.append(provider)
11184 if len(providers) == 1:
11185 syslist.extend(providers)
11187 syslist.append(mycp)
11189 mysettings = portage.config(clone=settings)
11191 if not unmerge_files:
11192 if unmerge_action == "unmerge":
11194 print bold("emerge unmerge") + " can only be used with specific package names"
11200 localtree = vartree
11201 # process all arguments and add all
11202 # valid db entries to candidate_catpkgs
11204 if not unmerge_files:
11205 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11207 #we've got command-line arguments
11208 if not unmerge_files:
11209 print "\nNo packages to unmerge have been provided.\n"
11211 for x in unmerge_files:
11212 arg_parts = x.split('/')
11213 if x[0] not in [".","/"] and \
11214 arg_parts[-1][-7:] != ".ebuild":
11215 #possible cat/pkg or dep; treat as such
11216 candidate_catpkgs.append(x)
11217 elif unmerge_action in ["prune","clean"]:
11218 print "\n!!! Prune and clean do not accept individual" + \
11219 " ebuilds as arguments;\n skipping.\n"
11222 # it appears that the user is specifying an installed
11223 # ebuild and we're in "unmerge" mode, so it's ok.
11224 if not os.path.exists(x):
11225 print "\n!!! The path '"+x+"' doesn't exist.\n"
11228 absx = os.path.abspath(x)
11229 sp_absx = absx.split("/")
11230 if sp_absx[-1][-7:] == ".ebuild":
11232 absx = "/".join(sp_absx)
11234 sp_absx_len = len(sp_absx)
11236 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11237 vdb_len = len(vdb_path)
11239 sp_vdb = vdb_path.split("/")
11240 sp_vdb_len = len(sp_vdb)
11242 if not os.path.exists(absx+"/CONTENTS"):
11243 print "!!! Not a valid db dir: "+str(absx)
11246 if sp_absx_len <= sp_vdb_len:
11247 # The Path is shorter... so it can't be inside the vdb.
11250 print "\n!!!",x,"cannot be inside "+ \
11251 vdb_path+"; aborting.\n"
11254 for idx in range(0,sp_vdb_len):
11255 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11258 print "\n!!!", x, "is not inside "+\
11259 vdb_path+"; aborting.\n"
11262 print "="+"/".join(sp_absx[sp_vdb_len:])
11263 candidate_catpkgs.append(
11264 "="+"/".join(sp_absx[sp_vdb_len:]))
11267 if (not "--quiet" in myopts):
11269 if settings["ROOT"] != "/":
11270 writemsg_level(darkgreen(newline+ \
11271 ">>> Using system located in ROOT tree %s\n" % \
11274 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11275 not ("--quiet" in myopts):
11276 writemsg_level(darkgreen(newline+\
11277 ">>> These are the packages that would be unmerged:\n"))
11279 # Preservation of order is required for --depclean and --prune so
11280 # that dependencies are respected. Use all_selected to eliminate
11281 # duplicate packages since the same package may be selected by
11284 all_selected = set()
11285 for x in candidate_catpkgs:
11286 # cycle through all our candidate deps and determine
11287 # what will and will not get unmerged
11289 mymatch = vartree.dbapi.match(x)
11290 except portage.exception.AmbiguousPackageName, errpkgs:
11291 print "\n\n!!! The short ebuild name \"" + \
11292 x + "\" is ambiguous. Please specify"
11293 print "!!! one of the following fully-qualified " + \
11294 "ebuild names instead:\n"
11295 for i in errpkgs[0]:
11296 print " " + green(i)
11300 if not mymatch and x[0] not in "<>=~":
11301 mymatch = localtree.dep_match(x)
11303 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11304 (x, unmerge_action), noiselevel=-1)
11308 {"protected": set(), "selected": set(), "omitted": set()})
11309 mykey = len(pkgmap) - 1
11310 if unmerge_action=="unmerge":
11312 if y not in all_selected:
11313 pkgmap[mykey]["selected"].add(y)
11314 all_selected.add(y)
11315 elif unmerge_action == "prune":
11316 if len(mymatch) == 1:
11318 best_version = mymatch[0]
11319 best_slot = vartree.getslot(best_version)
11320 best_counter = vartree.dbapi.cpv_counter(best_version)
11321 for mypkg in mymatch[1:]:
11322 myslot = vartree.getslot(mypkg)
11323 mycounter = vartree.dbapi.cpv_counter(mypkg)
11324 if (myslot == best_slot and mycounter > best_counter) or \
11325 mypkg == portage.best([mypkg, best_version]):
11326 if myslot == best_slot:
11327 if mycounter < best_counter:
11328 # On slot collision, keep the one with the
11329 # highest counter since it is the most
11330 # recently installed.
11332 best_version = mypkg
11334 best_counter = mycounter
11335 pkgmap[mykey]["protected"].add(best_version)
11336 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11337 if mypkg != best_version and mypkg not in all_selected)
11338 all_selected.update(pkgmap[mykey]["selected"])
11340 # unmerge_action == "clean"
11342 for mypkg in mymatch:
11343 if unmerge_action == "clean":
11344 myslot = localtree.getslot(mypkg)
11346 # since we're pruning, we don't care about slots
11347 # and put all the pkgs in together
11349 if myslot not in slotmap:
11350 slotmap[myslot] = {}
11351 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11353 for mypkg in vartree.dbapi.cp_list(
11354 portage.dep_getkey(mymatch[0])):
11355 myslot = vartree.getslot(mypkg)
11356 if myslot not in slotmap:
11357 slotmap[myslot] = {}
11358 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11360 for myslot in slotmap:
11361 counterkeys = slotmap[myslot].keys()
11362 if not counterkeys:
11365 pkgmap[mykey]["protected"].add(
11366 slotmap[myslot][counterkeys[-1]])
11367 del counterkeys[-1]
11369 for counter in counterkeys[:]:
11370 mypkg = slotmap[myslot][counter]
11371 if mypkg not in mymatch:
11372 counterkeys.remove(counter)
11373 pkgmap[mykey]["protected"].add(
11374 slotmap[myslot][counter])
11376 #be pretty and get them in order of merge:
11377 for ckey in counterkeys:
11378 mypkg = slotmap[myslot][ckey]
11379 if mypkg not in all_selected:
11380 pkgmap[mykey]["selected"].add(mypkg)
11381 all_selected.add(mypkg)
11382 # ok, now the last-merged package
11383 # is protected, and the rest are selected
11384 numselected = len(all_selected)
11385 if global_unmerge and not numselected:
11386 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11389 if not numselected:
11390 portage.writemsg_stdout(
11391 "\n>>> No packages selected for removal by " + \
11392 unmerge_action + "\n")
11396 vartree.dbapi.flush_cache()
11397 portage.locks.unlockdir(vdb_lock)
11399 from portage.sets.base import EditablePackageSet
11401 # generate a list of package sets that are directly or indirectly listed in "world",
11402 # as there is no persistent list of "installed" sets
11403 installed_sets = ["world"]
11408 pos = len(installed_sets)
11409 for s in installed_sets[pos - 1:]:
11412 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11415 installed_sets += candidates
11416 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11419 # we don't want to unmerge packages that are still listed in user-editable package sets
11420 # listed in "world" as they would be remerged on the next update of "world" or the
11421 # relevant package sets.
11422 unknown_sets = set()
11423 for cp in xrange(len(pkgmap)):
11424 for cpv in pkgmap[cp]["selected"].copy():
11428 # It could have been uninstalled
11429 # by a concurrent process.
11432 if unmerge_action != "clean" and \
11433 root_config.root == "/" and \
11434 portage.match_from_list(
11435 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11436 msg = ("Not unmerging package %s since there is no valid " + \
11437 "reason for portage to unmerge itself.") % (pkg.cpv,)
11438 for line in textwrap.wrap(msg, 75):
11440 # adjust pkgmap so the display output is correct
11441 pkgmap[cp]["selected"].remove(cpv)
11442 all_selected.remove(cpv)
11443 pkgmap[cp]["protected"].add(cpv)
11447 for s in installed_sets:
11448 # skip sets that the user requested to unmerge, and skip world
11449 # unless we're unmerging a package set (as the package would be
11450 # removed from "world" later on)
11451 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11455 if s in unknown_sets:
11457 unknown_sets.add(s)
11458 out = portage.output.EOutput()
11459 out.eerror(("Unknown set '@%s' in " + \
11460 "%svar/lib/portage/world_sets") % \
11461 (s, root_config.root))
11464 # only check instances of EditablePackageSet as other classes are generally used for
11465 # special purposes and can be ignored here (and are usually generated dynamically, so the
11466 # user can't do much about them anyway)
11467 if isinstance(sets[s], EditablePackageSet):
11469 # This is derived from a snippet of code in the
11470 # depgraph._iter_atoms_for_pkg() method.
11471 for atom in sets[s].iterAtomsForPackage(pkg):
11472 inst_matches = vartree.dbapi.match(atom)
11473 inst_matches.reverse() # descending order
11475 for inst_cpv in inst_matches:
11477 inst_pkg = _pkg(inst_cpv)
11479 # It could have been uninstalled
11480 # by a concurrent process.
11483 if inst_pkg.cp != atom.cp:
11485 if pkg >= inst_pkg:
11486 # This is descending order, and we're not
11487 # interested in any versions <= pkg given.
11489 if pkg.slot_atom != inst_pkg.slot_atom:
11490 higher_slot = inst_pkg
11492 if higher_slot is None:
11496 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11497 #print colorize("WARN", "but still listed in the following package sets:")
11498 #print " %s\n" % ", ".join(parents)
11499 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11500 print colorize("WARN", "still referenced by the following package sets:")
11501 print " %s\n" % ", ".join(parents)
11502 # adjust pkgmap so the display output is correct
11503 pkgmap[cp]["selected"].remove(cpv)
11504 all_selected.remove(cpv)
11505 pkgmap[cp]["protected"].add(cpv)
11509 numselected = len(all_selected)
11510 if not numselected:
11512 "\n>>> No packages selected for removal by " + \
11513 unmerge_action + "\n")
11516 # Unmerge order only matters in some cases
11520 selected = d["selected"]
11523 cp = portage.cpv_getkey(iter(selected).next())
11524 cp_dict = unordered.get(cp)
11525 if cp_dict is None:
11527 unordered[cp] = cp_dict
11530 for k, v in d.iteritems():
11531 cp_dict[k].update(v)
11532 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11534 for x in xrange(len(pkgmap)):
11535 selected = pkgmap[x]["selected"]
11538 for mytype, mylist in pkgmap[x].iteritems():
11539 if mytype == "selected":
11541 mylist.difference_update(all_selected)
11542 cp = portage.cpv_getkey(iter(selected).next())
11543 for y in localtree.dep_match(cp):
11544 if y not in pkgmap[x]["omitted"] and \
11545 y not in pkgmap[x]["selected"] and \
11546 y not in pkgmap[x]["protected"] and \
11547 y not in all_selected:
11548 pkgmap[x]["omitted"].add(y)
11549 if global_unmerge and not pkgmap[x]["selected"]:
11550 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11552 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11553 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11554 "'%s' is part of your system profile.\n" % cp),
11555 level=logging.WARNING, noiselevel=-1)
11556 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11557 "be damaging to your system.\n\n"),
11558 level=logging.WARNING, noiselevel=-1)
11559 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11560 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11561 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11563 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11565 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11566 for mytype in ["selected","protected","omitted"]:
11568 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11569 if pkgmap[x][mytype]:
11570 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11571 sorted_pkgs.sort(portage.pkgcmp)
11572 for pn, ver, rev in sorted_pkgs:
11576 myversion = ver + "-" + rev
11577 if mytype == "selected":
11579 colorize("UNMERGE_WARN", myversion + " "),
11583 colorize("GOOD", myversion + " "), noiselevel=-1)
11585 writemsg_level("none ", noiselevel=-1)
11587 writemsg_level("\n", noiselevel=-1)
11589 writemsg_level("\n", noiselevel=-1)
11591 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11592 " packages are slated for removal.\n")
11593 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11594 " and " + colorize("GOOD", "'omitted'") + \
11595 " packages will not be removed.\n\n")
11597 if "--pretend" in myopts:
11598 #we're done... return
11600 if "--ask" in myopts:
11601 if userquery("Would you like to unmerge these packages?")=="No":
11602 # enter pretend mode for correct formatting of results
11603 myopts["--pretend"] = True
11608 #the real unmerging begins, after a short delay....
11609 if clean_delay and not autoclean:
11610 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11612 for x in xrange(len(pkgmap)):
11613 for y in pkgmap[x]["selected"]:
11614 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11615 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11616 mysplit = y.split("/")
11618 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11619 mysettings, unmerge_action not in ["clean","prune"],
11620 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11621 scheduler=scheduler)
11623 if retval != os.EX_OK:
11624 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11626 raise UninstallFailure(retval)
11629 if clean_world and hasattr(sets["world"], "cleanPackage"):
11630 sets["world"].cleanPackage(vartree.dbapi, y)
11631 emergelog(xterm_titles, " >>> unmerge success: "+y)
11632 if clean_world and hasattr(sets["world"], "remove"):
11633 for s in root_config.setconfig.active:
11634 sets["world"].remove(SETPREFIX+s)
11637 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11639 if os.path.exists("/usr/bin/install-info"):
11640 out = portage.output.EOutput()
11645 inforoot=normpath(root+z)
11646 if os.path.isdir(inforoot):
11647 infomtime = long(os.stat(inforoot).st_mtime)
11648 if inforoot not in prev_mtimes or \
11649 prev_mtimes[inforoot] != infomtime:
11650 regen_infodirs.append(inforoot)
11652 if not regen_infodirs:
11653 portage.writemsg_stdout("\n")
11654 out.einfo("GNU info directory index is up-to-date.")
11656 portage.writemsg_stdout("\n")
11657 out.einfo("Regenerating GNU info directory index...")
11659 dir_extensions = ("", ".gz", ".bz2")
11663 for inforoot in regen_infodirs:
11667 if not os.path.isdir(inforoot) or \
11668 not os.access(inforoot, os.W_OK):
11671 file_list = os.listdir(inforoot)
11673 dir_file = os.path.join(inforoot, "dir")
11674 moved_old_dir = False
11675 processed_count = 0
11676 for x in file_list:
11677 if x.startswith(".") or \
11678 os.path.isdir(os.path.join(inforoot, x)):
11680 if x.startswith("dir"):
11682 for ext in dir_extensions:
11683 if x == "dir" + ext or \
11684 x == "dir" + ext + ".old":
11689 if processed_count == 0:
11690 for ext in dir_extensions:
11692 os.rename(dir_file + ext, dir_file + ext + ".old")
11693 moved_old_dir = True
11694 except EnvironmentError, e:
11695 if e.errno != errno.ENOENT:
11698 processed_count += 1
11699 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11700 existsstr="already exists, for file `"
11702 if re.search(existsstr,myso):
11703 # Already exists... Don't increment the count for this.
11705 elif myso[:44]=="install-info: warning: no info dir entry in ":
11706 # This info file doesn't contain a DIR-header: install-info produces this
11707 # (harmless) warning (the --quiet switch doesn't seem to work).
11708 # Don't increment the count for this.
11711 badcount=badcount+1
11712 errmsg += myso + "\n"
11715 if moved_old_dir and not os.path.exists(dir_file):
11716 # We didn't generate a new dir file, so put the old file
11717 # back where it was originally found.
11718 for ext in dir_extensions:
11720 os.rename(dir_file + ext + ".old", dir_file + ext)
11721 except EnvironmentError, e:
11722 if e.errno != errno.ENOENT:
11726 # Clean dir.old cruft so that they don't prevent
11727 # unmerge of otherwise empty directories.
11728 for ext in dir_extensions:
11730 os.unlink(dir_file + ext + ".old")
11731 except EnvironmentError, e:
11732 if e.errno != errno.ENOENT:
11736 #update mtime so we can potentially avoid regenerating.
11737 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11740 out.eerror("Processed %d info files; %d errors." % \
11741 (icount, badcount))
11742 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11745 out.einfo("Processed %d info files." % (icount,))
11748 def display_news_notification(root_config, myopts):
11749 target_root = root_config.root
11750 trees = root_config.trees
11751 settings = trees["vartree"].settings
11752 portdb = trees["porttree"].dbapi
11753 vardb = trees["vartree"].dbapi
11754 NEWS_PATH = os.path.join("metadata", "news")
11755 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11756 newsReaderDisplay = False
11757 update = "--pretend" not in myopts
11759 for repo in portdb.getRepositories():
11760 unreadItems = checkUpdatedNewsItems(
11761 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11763 if not newsReaderDisplay:
11764 newsReaderDisplay = True
11766 print colorize("WARN", " * IMPORTANT:"),
11767 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11770 if newsReaderDisplay:
11771 print colorize("WARN", " *"),
11772 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11775 def display_preserved_libs(vardbapi):
11778 # Ensure the registry is consistent with existing files.
11779 vardbapi.plib_registry.pruneNonExisting()
11781 if vardbapi.plib_registry.hasEntries():
11783 print colorize("WARN", "!!!") + " existing preserved libs:"
11784 plibdata = vardbapi.plib_registry.getPreservedLibs()
11785 linkmap = vardbapi.linkmap
11788 linkmap_broken = False
11792 except portage.exception.CommandNotFound, e:
11793 writemsg_level("!!! Command Not Found: %s\n" % (e,),
11794 level=logging.ERROR, noiselevel=-1)
11796 linkmap_broken = True
11798 search_for_owners = set()
11799 for cpv in plibdata:
11800 internal_plib_keys = set(linkmap._obj_key(f) \
11801 for f in plibdata[cpv])
11802 for f in plibdata[cpv]:
11803 if f in consumer_map:
11806 for c in linkmap.findConsumers(f):
11807 # Filter out any consumers that are also preserved libs
11808 # belonging to the same package as the provider.
11809 if linkmap._obj_key(c) not in internal_plib_keys:
11810 consumers.append(c)
11812 consumer_map[f] = consumers
11813 search_for_owners.update(consumers[:MAX_DISPLAY+1])
11815 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11817 for cpv in plibdata:
11818 print colorize("WARN", ">>>") + " package: %s" % cpv
11820 for f in plibdata[cpv]:
11821 obj_key = linkmap._obj_key(f)
11822 alt_paths = samefile_map.get(obj_key)
11823 if alt_paths is None:
11825 samefile_map[obj_key] = alt_paths
11828 for alt_paths in samefile_map.itervalues():
11829 alt_paths = sorted(alt_paths)
11830 for p in alt_paths:
11831 print colorize("WARN", " * ") + " - %s" % (p,)
11833 consumers = consumer_map.get(f, [])
11834 for c in consumers[:MAX_DISPLAY]:
11835 print colorize("WARN", " * ") + " used by %s (%s)" % \
11836 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11837 if len(consumers) == MAX_DISPLAY + 1:
11838 print colorize("WARN", " * ") + " used by %s (%s)" % \
11839 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11840 for x in owners.get(consumers[MAX_DISPLAY], [])))
11841 elif len(consumers) > MAX_DISPLAY:
11842 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
11843 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11846 def _flush_elog_mod_echo():
11848 Dump the mod_echo output now so that our other
11849 notifications are shown last.
11851 @returns: True if messages were shown, False otherwise.
11853 messages_shown = False
11855 from portage.elog import mod_echo
11856 except ImportError:
11857 pass # happens during downgrade to a version without the module
11859 messages_shown = bool(mod_echo._items)
11860 mod_echo.finalize()
11861 return messages_shown
11863 def post_emerge(root_config, myopts, mtimedb, retval):
11865 Misc. things to run at the end of a merge session.
11868 Update Config Files
11871 Display preserved libs warnings
11874 @param trees: A dictionary mapping each ROOT to it's package databases
11876 @param mtimedb: The mtimeDB to store data needed across merge invocations
11877 @type mtimedb: MtimeDB class instance
11878 @param retval: Emerge's return value
11882 1. Calls sys.exit(retval)
11885 target_root = root_config.root
11886 trees = { target_root : root_config.trees }
11887 vardbapi = trees[target_root]["vartree"].dbapi
11888 settings = vardbapi.settings
11889 info_mtimes = mtimedb["info"]
11891 # Load the most current variables from ${ROOT}/etc/profile.env
11894 settings.regenerate()
11897 config_protect = settings.get("CONFIG_PROTECT","").split()
11898 infodirs = settings.get("INFOPATH","").split(":") + \
11899 settings.get("INFODIR","").split(":")
11903 if retval == os.EX_OK:
11904 exit_msg = " *** exiting successfully."
11906 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11907 emergelog("notitles" not in settings.features, exit_msg)
11909 _flush_elog_mod_echo()
11911 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11912 if counter_hash is not None and \
11913 counter_hash == vardbapi._counter_hash():
11914 display_news_notification(root_config, myopts)
11915 # If vdb state has not changed then there's nothing else to do.
11918 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11919 portage.util.ensure_dirs(vdb_path)
11921 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11922 vdb_lock = portage.locks.lockdir(vdb_path)
11926 if "noinfo" not in settings.features:
11927 chk_updated_info_files(target_root,
11928 infodirs, info_mtimes, retval)
11932 portage.locks.unlockdir(vdb_lock)
11934 chk_updated_cfg_files(target_root, config_protect)
11936 display_news_notification(root_config, myopts)
11937 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11938 display_preserved_libs(vardbapi)
11943 def chk_updated_cfg_files(target_root, config_protect):
11945 #number of directories with some protect files in them
11947 for x in config_protect:
11948 x = os.path.join(target_root, x.lstrip(os.path.sep))
11949 if not os.access(x, os.W_OK):
11950 # Avoid Permission denied errors generated
11954 mymode = os.lstat(x).st_mode
11957 if stat.S_ISLNK(mymode):
11958 # We want to treat it like a directory if it
11959 # is a symlink to an existing directory.
11961 real_mode = os.stat(x).st_mode
11962 if stat.S_ISDIR(real_mode):
11966 if stat.S_ISDIR(mymode):
11967 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11969 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11970 os.path.split(x.rstrip(os.path.sep))
11971 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11972 a = commands.getstatusoutput(mycommand)
11974 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11976 # Show the error message alone, sending stdout to /dev/null.
11977 os.system(mycommand + " 1>/dev/null")
11979 files = a[1].split('\0')
11980 # split always produces an empty string as the last element
11981 if files and not files[-1]:
11985 print "\n"+colorize("WARN", " * IMPORTANT:"),
11986 if stat.S_ISDIR(mymode):
11987 print "%d config files in '%s' need updating." % \
11990 print "config file '%s' needs updating." % x
11993 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11994 " section of the " + bold("emerge")
11995 print " "+yellow("*")+" man page to learn how to update config files."
11997 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12000 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12001 Returns the number of unread (yet relevent) items.
12003 @param portdb: a portage tree database
12004 @type portdb: pordbapi
12005 @param vardb: an installed package database
12006 @type vardb: vardbapi
12009 @param UNREAD_PATH:
12015 1. The number of unread but relevant news items.
12018 from portage.news import NewsManager
12019 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12020 return manager.getUnreadItems( repo_id, update=update )
12022 def insert_category_into_atom(atom, category):
12023 alphanum = re.search(r'\w', atom)
12025 ret = atom[:alphanum.start()] + "%s/" % category + \
12026 atom[alphanum.start():]
12031 def is_valid_package_atom(x):
12033 alphanum = re.search(r'\w', x)
12035 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12036 return portage.isvalidatom(x)
12038 def show_blocker_docs_link():
12040 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12041 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12043 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12046 def show_mask_docs():
12047 print "For more information, see the MASKED PACKAGES section in the emerge"
12048 print "man page or refer to the Gentoo Handbook."
12050 def action_sync(settings, trees, mtimedb, myopts, myaction):
12051 xterm_titles = "notitles" not in settings.features
12052 emergelog(xterm_titles, " === sync")
12053 myportdir = settings.get("PORTDIR", None)
12054 out = portage.output.EOutput()
12056 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12058 if myportdir[-1]=="/":
12059 myportdir=myportdir[:-1]
12061 st = os.stat(myportdir)
12065 print ">>>",myportdir,"not found, creating it."
12066 os.makedirs(myportdir,0755)
12067 st = os.stat(myportdir)
12070 spawn_kwargs["env"] = settings.environ()
12071 if portage.data.secpass >= 2 and \
12072 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12073 st.st_gid != os.getgid() and st.st_mode & 0070):
12075 homedir = pwd.getpwuid(st.st_uid).pw_dir
12079 # Drop privileges when syncing, in order to match
12080 # existing uid/gid settings.
12081 spawn_kwargs["uid"] = st.st_uid
12082 spawn_kwargs["gid"] = st.st_gid
12083 spawn_kwargs["groups"] = [st.st_gid]
12084 spawn_kwargs["env"]["HOME"] = homedir
12086 if not st.st_mode & 0020:
12087 umask = umask | 0020
12088 spawn_kwargs["umask"] = umask
12090 syncuri = settings.get("SYNC", "").strip()
12092 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12093 noiselevel=-1, level=logging.ERROR)
12096 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12097 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12100 dosyncuri = syncuri
12101 updatecache_flg = False
12102 if myaction == "metadata":
12103 print "skipping sync"
12104 updatecache_flg = True
12105 elif ".git" in vcs_dirs:
12106 # Update existing git repository, and ignore the syncuri. We are
12107 # going to trust the user and assume that the user is in the branch
12108 # that he/she wants updated. We'll let the user manage branches with
12110 if portage.process.find_binary("git") is None:
12111 msg = ["Command not found: git",
12112 "Type \"emerge dev-util/git\" to enable git support."]
12114 writemsg_level("!!! %s\n" % l,
12115 level=logging.ERROR, noiselevel=-1)
12117 msg = ">>> Starting git pull in %s..." % myportdir
12118 emergelog(xterm_titles, msg )
12119 writemsg_level(msg + "\n")
12120 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12121 (portage._shell_quote(myportdir),), **spawn_kwargs)
12122 if exitcode != os.EX_OK:
12123 msg = "!!! git pull error in %s." % myportdir
12124 emergelog(xterm_titles, msg)
12125 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12127 msg = ">>> Git pull in %s successful" % myportdir
12128 emergelog(xterm_titles, msg)
12129 writemsg_level(msg + "\n")
12130 exitcode = git_sync_timestamps(settings, myportdir)
12131 if exitcode == os.EX_OK:
12132 updatecache_flg = True
12133 elif syncuri[:8]=="rsync://":
12134 for vcs_dir in vcs_dirs:
12135 writemsg_level(("!!! %s appears to be under revision " + \
12136 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12137 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12139 if not os.path.exists("/usr/bin/rsync"):
12140 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12141 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12146 import shlex, StringIO
12147 if settings["PORTAGE_RSYNC_OPTS"] == "":
12148 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12149 rsync_opts.extend([
12150 "--recursive", # Recurse directories
12151 "--links", # Consider symlinks
12152 "--safe-links", # Ignore links outside of tree
12153 "--perms", # Preserve permissions
12154 "--times", # Preserive mod times
12155 "--compress", # Compress the data transmitted
12156 "--force", # Force deletion on non-empty dirs
12157 "--whole-file", # Don't do block transfers, only entire files
12158 "--delete", # Delete files that aren't in the master tree
12159 "--stats", # Show final statistics about what was transfered
12160 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12161 "--exclude=/distfiles", # Exclude distfiles from consideration
12162 "--exclude=/local", # Exclude local from consideration
12163 "--exclude=/packages", # Exclude packages from consideration
12167 # The below validation is not needed when using the above hardcoded
12170 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12171 lexer = shlex.shlex(StringIO.StringIO(
12172 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
12173 lexer.whitespace_split = True
12174 rsync_opts.extend(lexer)
12177 for opt in ("--recursive", "--times"):
12178 if opt not in rsync_opts:
12179 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12180 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12181 rsync_opts.append(opt)
12183 for exclude in ("distfiles", "local", "packages"):
12184 opt = "--exclude=/%s" % exclude
12185 if opt not in rsync_opts:
12186 portage.writemsg(yellow("WARNING:") + \
12187 " adding required option %s not included in " % opt + \
12188 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12189 rsync_opts.append(opt)
12191 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12192 def rsync_opt_startswith(opt_prefix):
12193 for x in rsync_opts:
12194 if x.startswith(opt_prefix):
12198 if not rsync_opt_startswith("--timeout="):
12199 rsync_opts.append("--timeout=%d" % mytimeout)
12201 for opt in ("--compress", "--whole-file"):
12202 if opt not in rsync_opts:
12203 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12204 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12205 rsync_opts.append(opt)
12207 if "--quiet" in myopts:
12208 rsync_opts.append("--quiet") # Shut up a lot
12210 rsync_opts.append("--verbose") # Print filelist
12212 if "--verbose" in myopts:
12213 rsync_opts.append("--progress") # Progress meter for each file
12215 if "--debug" in myopts:
12216 rsync_opts.append("--checksum") # Force checksum on all files
12218 # Real local timestamp file.
12219 servertimestampfile = os.path.join(
12220 myportdir, "metadata", "timestamp.chk")
12222 content = portage.util.grabfile(servertimestampfile)
12226 mytimestamp = time.mktime(time.strptime(content[0],
12227 "%a, %d %b %Y %H:%M:%S +0000"))
12228 except (OverflowError, ValueError):
12233 rsync_initial_timeout = \
12234 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12236 rsync_initial_timeout = 15
12239 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12240 except SystemExit, e:
12241 raise # Needed else can't exit
12243 maxretries=3 #default number of retries
12246 user_name, hostname, port = re.split(
12247 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12250 if user_name is None:
12252 updatecache_flg=True
12253 all_rsync_opts = set(rsync_opts)
12254 lexer = shlex.shlex(StringIO.StringIO(
12255 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
12256 lexer.whitespace_split = True
12257 extra_rsync_opts = list(lexer)
12259 all_rsync_opts.update(extra_rsync_opts)
12260 family = socket.AF_INET
12261 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12262 family = socket.AF_INET
12263 elif socket.has_ipv6 and \
12264 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12265 family = socket.AF_INET6
12267 SERVER_OUT_OF_DATE = -1
12268 EXCEEDED_MAX_RETRIES = -2
12274 for addrinfo in socket.getaddrinfo(
12275 hostname, None, family, socket.SOCK_STREAM):
12276 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12277 # IPv6 addresses need to be enclosed in square brackets
12278 ips.append("[%s]" % addrinfo[4][0])
12280 ips.append(addrinfo[4][0])
12281 from random import shuffle
12283 except SystemExit, e:
12284 raise # Needed else can't exit
12285 except Exception, e:
12286 print "Notice:",str(e)
12291 dosyncuri = syncuri.replace(
12292 "//" + user_name + hostname + port + "/",
12293 "//" + user_name + ips[0] + port + "/", 1)
12294 except SystemExit, e:
12295 raise # Needed else can't exit
12296 except Exception, e:
12297 print "Notice:",str(e)
12301 if "--ask" in myopts:
12302 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12307 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12308 if "--quiet" not in myopts:
12309 print ">>> Starting rsync with "+dosyncuri+"..."
12311 emergelog(xterm_titles,
12312 ">>> Starting retry %d of %d with %s" % \
12313 (retries,maxretries,dosyncuri))
12314 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12316 if mytimestamp != 0 and "--quiet" not in myopts:
12317 print ">>> Checking server timestamp ..."
12319 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12321 if "--debug" in myopts:
12324 exitcode = os.EX_OK
12325 servertimestamp = 0
12326 # Even if there's no timestamp available locally, fetch the
12327 # timestamp anyway as an initial probe to verify that the server is
12328 # responsive. This protects us from hanging indefinitely on a
12329 # connection attempt to an unresponsive server which rsync's
12330 # --timeout option does not prevent.
12332 # Temporary file for remote server timestamp comparison.
12333 from tempfile import mkstemp
12334 fd, tmpservertimestampfile = mkstemp()
12336 mycommand = rsynccommand[:]
12337 mycommand.append(dosyncuri.rstrip("/") + \
12338 "/metadata/timestamp.chk")
12339 mycommand.append(tmpservertimestampfile)
12343 def timeout_handler(signum, frame):
12344 raise portage.exception.PortageException("timed out")
12345 signal.signal(signal.SIGALRM, timeout_handler)
12346 # Timeout here in case the server is unresponsive. The
12347 # --timeout rsync option doesn't apply to the initial
12348 # connection attempt.
12349 if rsync_initial_timeout:
12350 signal.alarm(rsync_initial_timeout)
12352 mypids.extend(portage.process.spawn(
12353 mycommand, env=settings.environ(), returnpid=True))
12354 exitcode = os.waitpid(mypids[0], 0)[1]
12355 content = portage.grabfile(tmpservertimestampfile)
12357 if rsync_initial_timeout:
12360 os.unlink(tmpservertimestampfile)
12363 except portage.exception.PortageException, e:
12367 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12368 os.kill(mypids[0], signal.SIGTERM)
12369 os.waitpid(mypids[0], 0)
12370 # This is the same code rsync uses for timeout.
12373 if exitcode != os.EX_OK:
12374 if exitcode & 0xff:
12375 exitcode = (exitcode & 0xff) << 8
12377 exitcode = exitcode >> 8
12379 portage.process.spawned_pids.remove(mypids[0])
12382 servertimestamp = time.mktime(time.strptime(
12383 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12384 except (OverflowError, ValueError):
12386 del mycommand, mypids, content
12387 if exitcode == os.EX_OK:
12388 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12389 emergelog(xterm_titles,
12390 ">>> Cancelling sync -- Already current.")
12393 print ">>> Timestamps on the server and in the local repository are the same."
12394 print ">>> Cancelling all further sync action. You are already up to date."
12396 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12400 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12401 emergelog(xterm_titles,
12402 ">>> Server out of date: %s" % dosyncuri)
12405 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12407 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12410 exitcode = SERVER_OUT_OF_DATE
12411 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12413 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12414 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12415 if exitcode in [0,1,3,4,11,14,20,21]:
12417 elif exitcode in [1,3,4,11,14,20,21]:
12420 # Code 2 indicates protocol incompatibility, which is expected
12421 # for servers with protocol < 29 that don't support
12422 # --prune-empty-directories. Retry for a server that supports
12423 # at least rsync protocol version 29 (>=rsync-2.6.4).
12428 if retries<=maxretries:
12429 print ">>> Retrying..."
12434 updatecache_flg=False
12435 exitcode = EXCEEDED_MAX_RETRIES
12439 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12440 elif exitcode == SERVER_OUT_OF_DATE:
12442 elif exitcode == EXCEEDED_MAX_RETRIES:
12444 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12449 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12450 msg.append("that your SYNC statement is proper.")
12451 msg.append("SYNC=" + settings["SYNC"])
12453 msg.append("Rsync has reported that there is a File IO error. Normally")
12454 msg.append("this means your disk is full, but can be caused by corruption")
12455 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12456 msg.append("and try again after the problem has been fixed.")
12457 msg.append("PORTDIR=" + settings["PORTDIR"])
12459 msg.append("Rsync was killed before it finished.")
12461 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12462 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12463 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12464 msg.append("temporary problem unless complications exist with your network")
12465 msg.append("(and possibly your system's filesystem) configuration.")
12469 elif syncuri[:6]=="cvs://":
12470 if not os.path.exists("/usr/bin/cvs"):
12471 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12472 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12474 cvsroot=syncuri[6:]
12475 cvsdir=os.path.dirname(myportdir)
12476 if not os.path.exists(myportdir+"/CVS"):
12478 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12479 if os.path.exists(cvsdir+"/gentoo-x86"):
12480 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12483 os.rmdir(myportdir)
12485 if e.errno != errno.ENOENT:
12487 "!!! existing '%s' directory; exiting.\n" % myportdir)
12490 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12491 print "!!! cvs checkout error; exiting."
12493 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12496 print ">>> Starting cvs update with "+syncuri+"..."
12497 retval = portage.process.spawn_bash(
12498 "cd %s; cvs -z0 -q update -dP" % \
12499 (portage._shell_quote(myportdir),), **spawn_kwargs)
12500 if retval != os.EX_OK:
12502 dosyncuri = syncuri
12504 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12505 noiselevel=-1, level=logging.ERROR)
12508 if updatecache_flg and \
12509 myaction != "metadata" and \
12510 "metadata-transfer" not in settings.features:
12511 updatecache_flg = False
12513 # Reload the whole config from scratch.
12514 settings, trees, mtimedb = load_emerge_config(trees=trees)
12515 root_config = trees[settings["ROOT"]]["root_config"]
12516 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12518 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12519 action_metadata(settings, portdb, myopts)
12521 if portage._global_updates(trees, mtimedb["updates"]):
12523 # Reload the whole config from scratch.
12524 settings, trees, mtimedb = load_emerge_config(trees=trees)
12525 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12526 root_config = trees[settings["ROOT"]]["root_config"]
12528 mybestpv = portdb.xmatch("bestmatch-visible",
12529 portage.const.PORTAGE_PACKAGE_ATOM)
12530 mypvs = portage.best(
12531 trees[settings["ROOT"]]["vartree"].dbapi.match(
12532 portage.const.PORTAGE_PACKAGE_ATOM))
12534 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12536 if myaction != "metadata":
12537 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12538 retval = portage.process.spawn(
12539 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12540 dosyncuri], env=settings.environ())
12541 if retval != os.EX_OK:
12542 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12544 if(mybestpv != mypvs) and not "--quiet" in myopts:
12546 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12547 print red(" * ")+"that you update portage now, before any other packages are updated."
12549 print red(" * ")+"To update portage, run 'emerge portage' now."
12552 display_news_notification(root_config, myopts)
12555 def git_sync_timestamps(settings, portdir):
12557 Since git doesn't preserve timestamps, synchronize timestamps between
12558 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12559 for a given file as long as the file in the working tree is not modified
12560 (relative to HEAD).
12562 cache_dir = os.path.join(portdir, "metadata", "cache")
12563 if not os.path.isdir(cache_dir):
12565 writemsg_level(">>> Synchronizing timestamps...\n")
12567 from portage.cache.cache_errors import CacheError
12569 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12570 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12571 except CacheError, e:
12572 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12573 level=logging.ERROR, noiselevel=-1)
12576 ec_dir = os.path.join(portdir, "eclass")
12578 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12579 if f.endswith(".eclass"))
12581 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12582 level=logging.ERROR, noiselevel=-1)
12585 args = [portage.const.BASH_BINARY, "-c",
12586 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
12587 portage._shell_quote(portdir)]
12589 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
12590 modified_files = set(l.rstrip("\n") for l in proc.stdout)
12592 if rval != os.EX_OK:
12595 modified_eclasses = set(ec for ec in ec_names \
12596 if os.path.join("eclass", ec + ".eclass") in modified_files)
12598 updated_ec_mtimes = {}
12600 for cpv in cache_db:
12601 cpv_split = portage.catpkgsplit(cpv)
12602 if cpv_split is None:
12603 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
12604 level=logging.ERROR, noiselevel=-1)
12607 cat, pn, ver, rev = cpv_split
12608 cat, pf = portage.catsplit(cpv)
12609 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
12610 if relative_eb_path in modified_files:
12614 cache_entry = cache_db[cpv]
12615 eb_mtime = cache_entry.get("_mtime_")
12616 ec_mtimes = cache_entry.get("_eclasses_")
12618 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
12619 level=logging.ERROR, noiselevel=-1)
12621 except CacheError, e:
12622 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
12623 (cpv, e), level=logging.ERROR, noiselevel=-1)
12626 if eb_mtime is None:
12627 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
12628 level=logging.ERROR, noiselevel=-1)
12632 eb_mtime = long(eb_mtime)
12634 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
12635 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
12638 if ec_mtimes is None:
12639 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
12640 level=logging.ERROR, noiselevel=-1)
12643 if modified_eclasses.intersection(ec_mtimes):
12646 missing_eclasses = set(ec_mtimes).difference(ec_names)
12647 if missing_eclasses:
12648 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
12649 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
12653 eb_path = os.path.join(portdir, relative_eb_path)
12655 current_eb_mtime = os.stat(eb_path)
12657 writemsg_level("!!! Missing ebuild: %s\n" % \
12658 (cpv,), level=logging.ERROR, noiselevel=-1)
12661 inconsistent = False
12662 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12663 updated_mtime = updated_ec_mtimes.get(ec)
12664 if updated_mtime is not None and updated_mtime != ec_mtime:
12665 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
12666 (cpv, ec), level=logging.ERROR, noiselevel=-1)
12667 inconsistent = True
12673 if current_eb_mtime != eb_mtime:
12674 os.utime(eb_path, (eb_mtime, eb_mtime))
12676 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12677 if ec in updated_ec_mtimes:
12679 ec_path = os.path.join(ec_dir, ec + ".eclass")
12680 current_mtime = long(os.stat(ec_path).st_mtime)
12681 if current_mtime != ec_mtime:
12682 os.utime(ec_path, (ec_mtime, ec_mtime))
12683 updated_ec_mtimes[ec] = ec_mtime
12687 def action_metadata(settings, portdb, myopts):
12688 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
12689 old_umask = os.umask(0002)
12690 cachedir = os.path.normpath(settings.depcachedir)
12691 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
12692 "/lib", "/opt", "/proc", "/root", "/sbin",
12693 "/sys", "/tmp", "/usr", "/var"]:
12694 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12695 "ROOT DIRECTORY ON YOUR SYSTEM."
12696 print >> sys.stderr, \
12697 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12699 if not os.path.exists(cachedir):
12702 ec = portage.eclass_cache.cache(portdb.porttree_root)
12703 myportdir = os.path.realpath(settings["PORTDIR"])
12704 cm = settings.load_best_module("portdbapi.metadbmodule")(
12705 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12707 from portage.cache import util
12709 class percentage_noise_maker(util.quiet_mirroring):
12710 def __init__(self, dbapi):
12712 self.cp_all = dbapi.cp_all()
12713 l = len(self.cp_all)
12714 self.call_update_min = 100000000
12715 self.min_cp_all = l/100.0
12719 def __iter__(self):
12720 for x in self.cp_all:
12722 if self.count > self.min_cp_all:
12723 self.call_update_min = 0
12725 for y in self.dbapi.cp_list(x):
12727 self.call_update_mine = 0
12729 def update(self, *arg):
12730 try: self.pstr = int(self.pstr) + 1
12731 except ValueError: self.pstr = 1
12732 sys.stdout.write("%s%i%%" % \
12733 ("\b" * (len(str(self.pstr))+1), self.pstr))
12735 self.call_update_min = 10000000
12737 def finish(self, *arg):
12738 sys.stdout.write("\b\b\b\b100%\n")
12741 if "--quiet" in myopts:
12742 def quicky_cpv_generator(cp_all_list):
12743 for x in cp_all_list:
12744 for y in portdb.cp_list(x):
12746 source = quicky_cpv_generator(portdb.cp_all())
12747 noise_maker = portage.cache.util.quiet_mirroring()
12749 noise_maker = source = percentage_noise_maker(portdb)
12750 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12751 eclass_cache=ec, verbose_instance=noise_maker)
12754 os.umask(old_umask)
12756 def action_regen(settings, portdb, max_jobs, max_load):
12757 xterm_titles = "notitles" not in settings.features
12758 emergelog(xterm_titles, " === regen")
12759 #regenerate cache entries
12760 portage.writemsg_stdout("Regenerating cache entries...\n")
12762 os.close(sys.stdin.fileno())
12763 except SystemExit, e:
12764 raise # Needed else can't exit
12769 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12772 portage.writemsg_stdout("done!\n")
12774 def action_config(settings, trees, myopts, myfiles):
12775 if len(myfiles) != 1:
12776 print red("!!! config can only take a single package atom at this time\n")
12778 if not is_valid_package_atom(myfiles[0]):
12779 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12781 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12782 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12786 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12787 except portage.exception.AmbiguousPackageName, e:
12788 # Multiple matches thrown from cpv_expand
12791 print "No packages found.\n"
12793 elif len(pkgs) > 1:
12794 if "--ask" in myopts:
12796 print "Please select a package to configure:"
12800 options.append(str(idx))
12801 print options[-1]+") "+pkg
12803 options.append("X")
12804 idx = userquery("Selection?", options)
12807 pkg = pkgs[int(idx)-1]
12809 print "The following packages available:"
12812 print "\nPlease use a specific atom or the --ask option."
12818 if "--ask" in myopts:
12819 if userquery("Ready to configure "+pkg+"?") == "No":
12822 print "Configuring pkg..."
12824 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12825 mysettings = portage.config(clone=settings)
12826 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12827 debug = mysettings.get("PORTAGE_DEBUG") == "1"
12828 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12830 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12831 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12832 if retval == os.EX_OK:
12833 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12834 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12837 def action_info(settings, trees, myopts, myfiles):
12838 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12839 settings.profile_path, settings["CHOST"],
12840 trees[settings["ROOT"]]["vartree"].dbapi)
12842 header_title = "System Settings"
12844 print header_width * "="
12845 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12846 print header_width * "="
12847 print "System uname: "+platform.platform(aliased=1)
12849 lastSync = portage.grabfile(os.path.join(
12850 settings["PORTDIR"], "metadata", "timestamp.chk"))
12851 print "Timestamp of tree:",
12857 output=commands.getstatusoutput("distcc --version")
12859 print str(output[1].split("\n",1)[0]),
12860 if "distcc" in settings.features:
12865 output=commands.getstatusoutput("ccache -V")
12867 print str(output[1].split("\n",1)[0]),
12868 if "ccache" in settings.features:
12873 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12874 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
12875 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12876 myvars = portage.util.unique_array(myvars)
12880 if portage.isvalidatom(x):
12881 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12882 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12883 pkg_matches.sort(portage.pkgcmp)
12885 for pn, ver, rev in pkg_matches:
12887 pkgs.append(ver + "-" + rev)
12891 pkgs = ", ".join(pkgs)
12892 print "%-20s %s" % (x+":", pkgs)
12894 print "%-20s %s" % (x+":", "[NOT VALID]")
12896 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12898 if "--verbose" in myopts:
12899 myvars=settings.keys()
12901 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12902 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12903 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12904 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12906 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12908 myvars = portage.util.unique_array(myvars)
12914 print '%s="%s"' % (x, settings[x])
12916 use = set(settings["USE"].split())
12917 use_expand = settings["USE_EXPAND"].split()
12919 for varname in use_expand:
12920 flag_prefix = varname.lower() + "_"
12921 for f in list(use):
12922 if f.startswith(flag_prefix):
12926 print 'USE="%s"' % " ".join(use),
12927 for varname in use_expand:
12928 myval = settings.get(varname)
12930 print '%s="%s"' % (varname, myval),
12933 unset_vars.append(x)
12935 print "Unset: "+", ".join(unset_vars)
12938 if "--debug" in myopts:
12939 for x in dir(portage):
12940 module = getattr(portage, x)
12941 if "cvs_id_string" in dir(module):
12942 print "%s: %s" % (str(x), str(module.cvs_id_string))
12944 # See if we can find any packages installed matching the strings
12945 # passed on the command line
12947 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12948 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12950 mypkgs.extend(vardb.match(x))
12952 # If some packages were found...
12954 # Get our global settings (we only print stuff if it varies from
12955 # the current config)
12956 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12957 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12959 pkgsettings = portage.config(clone=settings)
12961 for myvar in mydesiredvars:
12962 global_vals[myvar] = set(settings.get(myvar, "").split())
12964 # Loop through each package
12965 # Only print settings if they differ from global settings
12966 header_title = "Package Settings"
12967 print header_width * "="
12968 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12969 print header_width * "="
12970 from portage.output import EOutput
12973 # Get all package specific variables
12974 auxvalues = vardb.aux_get(pkg, auxkeys)
12976 for i in xrange(len(auxkeys)):
12977 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12979 for myvar in mydesiredvars:
12980 # If the package variable doesn't match the
12981 # current global variable, something has changed
12982 # so set diff_found so we know to print
12983 if valuesmap[myvar] != global_vals[myvar]:
12984 diff_values[myvar] = valuesmap[myvar]
12985 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12986 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12987 pkgsettings.reset()
12988 # If a matching ebuild is no longer available in the tree, maybe it
12989 # would make sense to compare against the flags for the best
12990 # available version with the same slot?
12992 if portdb.cpv_exists(pkg):
12994 pkgsettings.setcpv(pkg, mydb=mydb)
12995 if valuesmap["IUSE"].intersection(
12996 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12997 diff_values["USE"] = valuesmap["USE"]
12998 # If a difference was found, print the info for
13001 # Print package info
13002 print "%s was built with the following:" % pkg
13003 for myvar in mydesiredvars + ["USE"]:
13004 if myvar in diff_values:
13005 mylist = list(diff_values[myvar])
13007 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13009 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13010 ebuildpath = vardb.findname(pkg)
13011 if not ebuildpath or not os.path.exists(ebuildpath):
13012 out.ewarn("No ebuild found for '%s'" % pkg)
13014 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13015 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13016 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13019 def action_search(root_config, myopts, myfiles, spinner):
13021 print "emerge: no search terms provided."
13023 searchinstance = search(root_config,
13024 spinner, "--searchdesc" in myopts,
13025 "--quiet" not in myopts, "--usepkg" in myopts,
13026 "--usepkgonly" in myopts)
13027 for mysearch in myfiles:
13029 searchinstance.execute(mysearch)
13030 except re.error, comment:
13031 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13033 searchinstance.output()
13035 def action_depclean(settings, trees, ldpath_mtimes,
13036 myopts, action, myfiles, spinner):
13037 # Kill packages that aren't explicitly merged or are required as a
13038 # dependency of another package. World file is explicit.
13040 # Global depclean or prune operations are not very safe when there are
13041 # missing dependencies since it's unknown how badly incomplete
13042 # the dependency graph is, and we might accidentally remove packages
13043 # that should have been pulled into the graph. On the other hand, it's
13044 # relatively safe to ignore missing deps when only asked to remove
13045 # specific packages.
13046 allow_missing_deps = len(myfiles) > 0
13049 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13050 msg.append("mistakes. Packages that are part of the world set will always\n")
13051 msg.append("be kept. They can be manually added to this set with\n")
13052 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13053 msg.append("package.provided (see portage(5)) will be removed by\n")
13054 msg.append("depclean, even if they are part of the world set.\n")
13056 msg.append("As a safety measure, depclean will not remove any packages\n")
13057 msg.append("unless *all* required dependencies have been resolved. As a\n")
13058 msg.append("consequence, it is often necessary to run %s\n" % \
13059 good("`emerge --update"))
13060 msg.append(good("--newuse --deep @system @world`") + \
13061 " prior to depclean.\n")
13063 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13064 portage.writemsg_stdout("\n")
13066 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13068 xterm_titles = "notitles" not in settings.features
13069 myroot = settings["ROOT"]
13070 root_config = trees[myroot]["root_config"]
13071 getSetAtoms = root_config.setconfig.getSetAtoms
13072 vardb = trees[myroot]["vartree"].dbapi
13074 required_set_names = ("system", "world")
13078 for s in required_set_names:
13079 required_sets[s] = InternalPackageSet(
13080 initial_atoms=getSetAtoms(s))
13083 # When removing packages, use a temporary version of world
13084 # which excludes packages that are intended to be eligible for
13086 world_temp_set = required_sets["world"]
13087 system_set = required_sets["system"]
13089 if not system_set or not world_temp_set:
13092 writemsg_level("!!! You have no system list.\n",
13093 level=logging.ERROR, noiselevel=-1)
13095 if not world_temp_set:
13096 writemsg_level("!!! You have no world file.\n",
13097 level=logging.WARNING, noiselevel=-1)
13099 writemsg_level("!!! Proceeding is likely to " + \
13100 "break your installation.\n",
13101 level=logging.WARNING, noiselevel=-1)
13102 if "--pretend" not in myopts:
13103 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13105 if action == "depclean":
13106 emergelog(xterm_titles, " >>> depclean")
13109 args_set = InternalPackageSet()
13112 if not is_valid_package_atom(x):
13113 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13114 level=logging.ERROR, noiselevel=-1)
13115 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13118 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13119 except portage.exception.AmbiguousPackageName, e:
13120 msg = "The short ebuild name \"" + x + \
13121 "\" is ambiguous. Please specify " + \
13122 "one of the following " + \
13123 "fully-qualified ebuild names instead:"
13124 for line in textwrap.wrap(msg, 70):
13125 writemsg_level("!!! %s\n" % (line,),
13126 level=logging.ERROR, noiselevel=-1)
13128 writemsg_level(" %s\n" % colorize("INFORM", i),
13129 level=logging.ERROR, noiselevel=-1)
13130 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13133 matched_packages = False
13136 matched_packages = True
13138 if not matched_packages:
13139 writemsg_level(">>> No packages selected for removal by %s\n" % \
13143 writemsg_level("\nCalculating dependencies ")
13144 resolver_params = create_depgraph_params(myopts, "remove")
13145 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13146 vardb = resolver.trees[myroot]["vartree"].dbapi
13148 if action == "depclean":
13151 # Pull in everything that's installed but not matched
13152 # by an argument atom since we don't want to clean any
13153 # package if something depends on it.
13155 world_temp_set.clear()
13160 if args_set.findAtomForPackage(pkg) is None:
13161 world_temp_set.add("=" + pkg.cpv)
13163 except portage.exception.InvalidDependString, e:
13164 show_invalid_depstring_notice(pkg,
13165 pkg.metadata["PROVIDE"], str(e))
13167 world_temp_set.add("=" + pkg.cpv)
13170 elif action == "prune":
13172 # Pull in everything that's installed since we don't
13173 # to prune a package if something depends on it.
13174 world_temp_set.clear()
13175 world_temp_set.update(vardb.cp_all())
13179 # Try to prune everything that's slotted.
13180 for cp in vardb.cp_all():
13181 if len(vardb.cp_list(cp)) > 1:
13184 # Remove atoms from world that match installed packages
13185 # that are also matched by argument atoms, but do not remove
13186 # them if they match the highest installed version.
13189 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13190 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13191 raise AssertionError("package expected in matches: " + \
13192 "cp = %s, cpv = %s matches = %s" % \
13193 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13195 highest_version = pkgs_for_cp[-1]
13196 if pkg == highest_version:
13197 # pkg is the highest version
13198 world_temp_set.add("=" + pkg.cpv)
13201 if len(pkgs_for_cp) <= 1:
13202 raise AssertionError("more packages expected: " + \
13203 "cp = %s, cpv = %s matches = %s" % \
13204 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13207 if args_set.findAtomForPackage(pkg) is None:
13208 world_temp_set.add("=" + pkg.cpv)
13210 except portage.exception.InvalidDependString, e:
13211 show_invalid_depstring_notice(pkg,
13212 pkg.metadata["PROVIDE"], str(e))
13214 world_temp_set.add("=" + pkg.cpv)
13218 for s, package_set in required_sets.iteritems():
13219 set_atom = SETPREFIX + s
13220 set_arg = SetArg(arg=set_atom, set=package_set,
13221 root_config=resolver.roots[myroot])
13222 set_args[s] = set_arg
13223 for atom in set_arg.set:
13224 resolver._dep_stack.append(
13225 Dependency(atom=atom, root=myroot, parent=set_arg))
13226 resolver.digraph.add(set_arg, None)
13228 success = resolver._complete_graph()
13229 writemsg_level("\b\b... done!\n")
13231 resolver.display_problems()
13236 def unresolved_deps():
13238 unresolvable = set()
13239 for dep in resolver._initially_unsatisfied_deps:
13240 if isinstance(dep.parent, Package) and \
13241 (dep.priority > UnmergeDepPriority.SOFT):
13242 unresolvable.add((dep.atom, dep.parent.cpv))
13244 if not unresolvable:
13247 if unresolvable and not allow_missing_deps:
13248 prefix = bad(" * ")
13250 msg.append("Dependencies could not be completely resolved due to")
13251 msg.append("the following required packages not being installed:")
13253 for atom, parent in unresolvable:
13254 msg.append(" %s pulled in by:" % (atom,))
13255 msg.append(" %s" % (parent,))
13257 msg.append("Have you forgotten to run " + \
13258 good("`emerge --update --newuse --deep @system @world`") + " prior")
13259 msg.append(("to %s? It may be necessary to manually " + \
13260 "uninstall packages that no longer") % action)
13261 msg.append("exist in the portage tree since " + \
13262 "it may not be possible to satisfy their")
13263 msg.append("dependencies. Also, be aware of " + \
13264 "the --with-bdeps option that is documented")
13265 msg.append("in " + good("`man emerge`") + ".")
13266 if action == "prune":
13268 msg.append("If you would like to ignore " + \
13269 "dependencies then use %s." % good("--nodeps"))
13270 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13271 level=logging.ERROR, noiselevel=-1)
13275 if unresolved_deps():
13278 graph = resolver.digraph.copy()
13279 required_pkgs_total = 0
13281 if isinstance(node, Package):
13282 required_pkgs_total += 1
13284 def show_parents(child_node):
13285 parent_nodes = graph.parent_nodes(child_node)
13286 if not parent_nodes:
13287 # With --prune, the highest version can be pulled in without any
13288 # real parent since all installed packages are pulled in. In that
13289 # case there's nothing to show here.
13292 for node in parent_nodes:
13293 parent_strs.append(str(getattr(node, "cpv", node)))
13296 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13297 for parent_str in parent_strs:
13298 msg.append(" %s\n" % (parent_str,))
13300 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13302 def create_cleanlist():
13303 pkgs_to_remove = []
13305 if action == "depclean":
13311 arg_atom = args_set.findAtomForPackage(pkg)
13312 except portage.exception.InvalidDependString:
13313 # this error has already been displayed by now
13317 if pkg not in graph:
13318 pkgs_to_remove.append(pkg)
13319 elif "--verbose" in myopts:
13324 if pkg not in graph:
13325 pkgs_to_remove.append(pkg)
13326 elif "--verbose" in myopts:
13329 elif action == "prune":
13330 # Prune really uses all installed instead of world. It's not
13331 # a real reverse dependency so don't display it as such.
13332 graph.remove(set_args["world"])
13334 for atom in args_set:
13335 for pkg in vardb.match_pkgs(atom):
13336 if pkg not in graph:
13337 pkgs_to_remove.append(pkg)
13338 elif "--verbose" in myopts:
13341 if not pkgs_to_remove:
13343 ">>> No packages selected for removal by %s\n" % action)
13344 if "--verbose" not in myopts:
13346 ">>> To see reverse dependencies, use %s\n" % \
13348 if action == "prune":
13350 ">>> To ignore dependencies, use %s\n" % \
13353 return pkgs_to_remove
13355 cleanlist = create_cleanlist()
13358 clean_set = set(cleanlist)
13360 # Check if any of these package are the sole providers of libraries
13361 # with consumers that have not been selected for removal. If so, these
13362 # packages and any dependencies need to be added to the graph.
13363 real_vardb = trees[myroot]["vartree"].dbapi
13364 linkmap = real_vardb.linkmap
13365 liblist = linkmap.listLibraryObjects()
13366 consumer_cache = {}
13367 provider_cache = {}
13371 writemsg_level(">>> Checking for lib consumers...\n")
13373 for pkg in cleanlist:
13374 pkg_dblink = real_vardb._dblink(pkg.cpv)
13375 provided_libs = set()
13377 for lib in liblist:
13378 if pkg_dblink.isowner(lib, myroot):
13379 provided_libs.add(lib)
13381 if not provided_libs:
13385 for lib in provided_libs:
13386 lib_consumers = consumer_cache.get(lib)
13387 if lib_consumers is None:
13388 lib_consumers = linkmap.findConsumers(lib)
13389 consumer_cache[lib] = lib_consumers
13391 consumers[lib] = lib_consumers
13396 for lib, lib_consumers in consumers.items():
13397 for consumer_file in list(lib_consumers):
13398 if pkg_dblink.isowner(consumer_file, myroot):
13399 lib_consumers.remove(consumer_file)
13400 if not lib_consumers:
13406 for lib, lib_consumers in consumers.iteritems():
13408 soname = soname_cache.get(lib)
13410 soname = linkmap.getSoname(lib)
13411 soname_cache[lib] = soname
13413 consumer_providers = []
13414 for lib_consumer in lib_consumers:
13415 providers = provider_cache.get(lib)
13416 if providers is None:
13417 providers = linkmap.findProviders(lib_consumer)
13418 provider_cache[lib_consumer] = providers
13419 if soname not in providers:
13420 # Why does this happen?
13422 consumer_providers.append(
13423 (lib_consumer, providers[soname]))
13425 consumers[lib] = consumer_providers
13427 consumer_map[pkg] = consumers
13431 search_files = set()
13432 for consumers in consumer_map.itervalues():
13433 for lib, consumer_providers in consumers.iteritems():
13434 for lib_consumer, providers in consumer_providers:
13435 search_files.add(lib_consumer)
13436 search_files.update(providers)
13438 writemsg_level(">>> Assigning files to packages...\n")
13439 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13441 for pkg, consumers in consumer_map.items():
13442 for lib, consumer_providers in consumers.items():
13443 lib_consumers = set()
13445 for lib_consumer, providers in consumer_providers:
13446 owner_set = file_owners.get(lib_consumer)
13447 provider_dblinks = set()
13448 provider_pkgs = set()
13450 if len(providers) > 1:
13451 for provider in providers:
13452 provider_set = file_owners.get(provider)
13453 if provider_set is not None:
13454 provider_dblinks.update(provider_set)
13456 if len(provider_dblinks) > 1:
13457 for provider_dblink in provider_dblinks:
13458 pkg_key = ("installed", myroot,
13459 provider_dblink.mycpv, "nomerge")
13460 if pkg_key not in clean_set:
13461 provider_pkgs.add(vardb.get(pkg_key))
13466 if owner_set is not None:
13467 lib_consumers.update(owner_set)
13469 for consumer_dblink in list(lib_consumers):
13470 if ("installed", myroot, consumer_dblink.mycpv,
13471 "nomerge") in clean_set:
13472 lib_consumers.remove(consumer_dblink)
13476 consumers[lib] = lib_consumers
13480 del consumer_map[pkg]
13483 # TODO: Implement a package set for rebuilding consumer packages.
13485 msg = "In order to avoid breakage of link level " + \
13486 "dependencies, one or more packages will not be removed. " + \
13487 "This can be solved by rebuilding " + \
13488 "the packages that pulled them in."
13490 prefix = bad(" * ")
13491 from textwrap import wrap
13492 writemsg_level("".join(prefix + "%s\n" % line for \
13493 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13496 for pkg, consumers in consumer_map.iteritems():
13497 unique_consumers = set(chain(*consumers.values()))
13498 unique_consumers = sorted(consumer.mycpv \
13499 for consumer in unique_consumers)
13501 msg.append(" %s pulled in by:" % (pkg.cpv,))
13502 for consumer in unique_consumers:
13503 msg.append(" %s" % (consumer,))
13505 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13506 level=logging.WARNING, noiselevel=-1)
13508 # Add lib providers to the graph as children of lib consumers,
13509 # and also add any dependencies pulled in by the provider.
13510 writemsg_level(">>> Adding lib providers to graph...\n")
13512 for pkg, consumers in consumer_map.iteritems():
13513 for consumer_dblink in set(chain(*consumers.values())):
13514 consumer_pkg = vardb.get(("installed", myroot,
13515 consumer_dblink.mycpv, "nomerge"))
13516 if not resolver._add_pkg(pkg,
13517 Dependency(parent=consumer_pkg,
13518 priority=UnmergeDepPriority(runtime=True),
13520 resolver.display_problems()
13523 writemsg_level("\nCalculating dependencies ")
13524 success = resolver._complete_graph()
13525 writemsg_level("\b\b... done!\n")
13526 resolver.display_problems()
13529 if unresolved_deps():
13532 graph = resolver.digraph.copy()
13533 required_pkgs_total = 0
13535 if isinstance(node, Package):
13536 required_pkgs_total += 1
13537 cleanlist = create_cleanlist()
13540 clean_set = set(cleanlist)
13542 # Use a topological sort to create an unmerge order such that
13543 # each package is unmerged before it's dependencies. This is
13544 # necessary to avoid breaking things that may need to run
13545 # during pkg_prerm or pkg_postrm phases.
13547 # Create a new graph to account for dependencies between the
13548 # packages being unmerged.
13552 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13553 runtime = UnmergeDepPriority(runtime=True)
13554 runtime_post = UnmergeDepPriority(runtime_post=True)
13555 buildtime = UnmergeDepPriority(buildtime=True)
13557 "RDEPEND": runtime,
13558 "PDEPEND": runtime_post,
13559 "DEPEND": buildtime,
13562 for node in clean_set:
13563 graph.add(node, None)
13565 node_use = node.metadata["USE"].split()
13566 for dep_type in dep_keys:
13567 depstr = node.metadata[dep_type]
13571 portage.dep._dep_check_strict = False
13572 success, atoms = portage.dep_check(depstr, None, settings,
13573 myuse=node_use, trees=resolver._graph_trees,
13576 portage.dep._dep_check_strict = True
13578 # Ignore invalid deps of packages that will
13579 # be uninstalled anyway.
13582 priority = priority_map[dep_type]
13584 if not isinstance(atom, portage.dep.Atom):
13585 # Ignore invalid atoms returned from dep_check().
13589 matches = vardb.match_pkgs(atom)
13592 for child_node in matches:
13593 if child_node in clean_set:
13594 graph.add(child_node, node, priority=priority)
13597 if len(graph.order) == len(graph.root_nodes()):
13598 # If there are no dependencies between packages
13599 # let unmerge() group them by cat/pn.
13601 cleanlist = [pkg.cpv for pkg in graph.order]
13603 # Order nodes from lowest to highest overall reference count for
13604 # optimal root node selection.
13605 node_refcounts = {}
13606 for node in graph.order:
13607 node_refcounts[node] = len(graph.parent_nodes(node))
13608 def cmp_reference_count(node1, node2):
13609 return node_refcounts[node1] - node_refcounts[node2]
13610 graph.order.sort(cmp_reference_count)
13612 ignore_priority_range = [None]
13613 ignore_priority_range.extend(
13614 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13615 while not graph.empty():
13616 for ignore_priority in ignore_priority_range:
13617 nodes = graph.root_nodes(ignore_priority=ignore_priority)
13621 raise AssertionError("no root nodes")
13622 if ignore_priority is not None:
13623 # Some deps have been dropped due to circular dependencies,
13624 # so only pop one node in order do minimize the number that
13629 cleanlist.append(node.cpv)
13631 unmerge(root_config, myopts, "unmerge", cleanlist,
13632 ldpath_mtimes, ordered=ordered)
13634 if action == "prune":
13637 if not cleanlist and "--quiet" in myopts:
13640 print "Packages installed: "+str(len(vardb.cpv_all()))
13641 print "Packages in world: " + \
13642 str(len(root_config.sets["world"].getAtoms()))
13643 print "Packages in system: " + \
13644 str(len(root_config.sets["system"].getAtoms()))
13645 print "Required packages: "+str(required_pkgs_total)
13646 if "--pretend" in myopts:
13647 print "Number to remove: "+str(len(cleanlist))
13649 print "Number removed: "+str(len(cleanlist))
13651 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13652 skip_masked=False, skip_unsatisfied=False):
13654 Construct a depgraph for the given resume list. This will raise
13655 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13657 @returns: (success, depgraph, dropped_tasks)
13659 mergelist = mtimedb["resume"]["mergelist"]
13660 dropped_tasks = set()
13662 mydepgraph = depgraph(settings, trees,
13663 myopts, myparams, spinner)
13665 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13666 skip_masked=skip_masked)
13667 except depgraph.UnsatisfiedResumeDep, e:
13668 if not skip_unsatisfied:
13671 graph = mydepgraph.digraph
13672 unsatisfied_parents = dict((dep.parent, dep.parent) \
13673 for dep in e.value)
13674 traversed_nodes = set()
13675 unsatisfied_stack = list(unsatisfied_parents)
13676 while unsatisfied_stack:
13677 pkg = unsatisfied_stack.pop()
13678 if pkg in traversed_nodes:
13680 traversed_nodes.add(pkg)
13682 # If this package was pulled in by a parent
13683 # package scheduled for merge, removing this
13684 # package may cause the the parent package's
13685 # dependency to become unsatisfied.
13686 for parent_node in graph.parent_nodes(pkg):
13687 if not isinstance(parent_node, Package) \
13688 or parent_node.operation not in ("merge", "nomerge"):
13691 graph.child_nodes(parent_node,
13692 ignore_priority=DepPriority.SOFT)
13693 if pkg in unsatisfied:
13694 unsatisfied_parents[parent_node] = parent_node
13695 unsatisfied_stack.append(parent_node)
13697 pruned_mergelist = [x for x in mergelist \
13698 if isinstance(x, list) and \
13699 tuple(x) not in unsatisfied_parents]
13701 # If the mergelist doesn't shrink then this loop is infinite.
13702 if len(pruned_mergelist) == len(mergelist):
13703 # This happens if a package can't be dropped because
13704 # it's already installed, but it has unsatisfied PDEPEND.
13706 mergelist[:] = pruned_mergelist
13708 # Exclude installed packages that have been removed from the graph due
13709 # to failure to build/install runtime dependencies after the dependent
13710 # package has already been installed.
13711 dropped_tasks.update(pkg for pkg in \
13712 unsatisfied_parents if pkg.operation != "nomerge")
13713 mydepgraph.break_refs(unsatisfied_parents)
13715 del e, graph, traversed_nodes, \
13716 unsatisfied_parents, unsatisfied_stack
13720 return (success, mydepgraph, dropped_tasks)
13722 def action_build(settings, trees, mtimedb,
13723 myopts, myaction, myfiles, spinner):
13725 # validate the state of the resume data
13726 # so that we can make assumptions later.
13727 for k in ("resume", "resume_backup"):
13728 if k not in mtimedb:
13730 resume_data = mtimedb[k]
13731 if not isinstance(resume_data, dict):
13734 mergelist = resume_data.get("mergelist")
13735 if not isinstance(mergelist, list):
13738 for x in mergelist:
13739 if not (isinstance(x, list) and len(x) == 4):
13741 pkg_type, pkg_root, pkg_key, pkg_action = x
13742 if pkg_root not in trees:
13743 # Current $ROOT setting differs,
13744 # so the list must be stale.
13750 resume_opts = resume_data.get("myopts")
13751 if not isinstance(resume_opts, (dict, list)):
13754 favorites = resume_data.get("favorites")
13755 if not isinstance(favorites, list):
13760 if "--resume" in myopts and \
13761 ("resume" in mtimedb or
13762 "resume_backup" in mtimedb):
13764 if "resume" not in mtimedb:
13765 mtimedb["resume"] = mtimedb["resume_backup"]
13766 del mtimedb["resume_backup"]
13768 # "myopts" is a list for backward compatibility.
13769 resume_opts = mtimedb["resume"].get("myopts", [])
13770 if isinstance(resume_opts, list):
13771 resume_opts = dict((k,True) for k in resume_opts)
13772 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
13773 resume_opts.pop(opt, None)
13774 myopts.update(resume_opts)
13776 if "--debug" in myopts:
13777 writemsg_level("myopts %s\n" % (myopts,))
13779 # Adjust config according to options of the command being resumed.
13780 for myroot in trees:
13781 mysettings = trees[myroot]["vartree"].settings
13782 mysettings.unlock()
13783 adjust_config(myopts, mysettings)
13785 del myroot, mysettings
13787 ldpath_mtimes = mtimedb["ldpath"]
13790 buildpkgonly = "--buildpkgonly" in myopts
13791 pretend = "--pretend" in myopts
13792 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13793 ask = "--ask" in myopts
13794 nodeps = "--nodeps" in myopts
13795 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13796 tree = "--tree" in myopts
13797 if nodeps and tree:
13799 del myopts["--tree"]
13800 portage.writemsg(colorize("WARN", " * ") + \
13801 "--tree is broken with --nodeps. Disabling...\n")
13802 debug = "--debug" in myopts
13803 verbose = "--verbose" in myopts
13804 quiet = "--quiet" in myopts
13805 if pretend or fetchonly:
13806 # make the mtimedb readonly
13807 mtimedb.filename = None
13808 if "--digest" in myopts:
13809 msg = "The --digest option can prevent corruption from being" + \
13810 " noticed. The `repoman manifest` command is the preferred" + \
13811 " way to generate manifests and it is capable of doing an" + \
13812 " entire repository or category at once."
13813 prefix = bad(" * ")
13814 writemsg(prefix + "\n")
13815 from textwrap import wrap
13816 for line in wrap(msg, 72):
13817 writemsg("%s%s\n" % (prefix, line))
13818 writemsg(prefix + "\n")
13820 if "--quiet" not in myopts and \
13821 ("--pretend" in myopts or "--ask" in myopts or \
13822 "--tree" in myopts or "--verbose" in myopts):
13824 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13826 elif "--buildpkgonly" in myopts:
13830 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13832 print darkgreen("These are the packages that would be %s, in reverse order:") % action
13836 print darkgreen("These are the packages that would be %s, in order:") % action
13839 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13840 if not show_spinner:
13841 spinner.update = spinner.update_quiet
13844 favorites = mtimedb["resume"].get("favorites")
13845 if not isinstance(favorites, list):
13849 print "Calculating dependencies ",
13850 myparams = create_depgraph_params(myopts, myaction)
13852 resume_data = mtimedb["resume"]
13853 mergelist = resume_data["mergelist"]
13854 if mergelist and "--skipfirst" in myopts:
13855 for i, task in enumerate(mergelist):
13856 if isinstance(task, list) and \
13857 task and task[-1] == "merge":
13861 skip_masked = "--skipfirst" in myopts
13862 skip_unsatisfied = "--skipfirst" in myopts
13866 success, mydepgraph, dropped_tasks = resume_depgraph(
13867 settings, trees, mtimedb, myopts, myparams, spinner,
13868 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13869 except (portage.exception.PackageNotFound,
13870 depgraph.UnsatisfiedResumeDep), e:
13871 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13872 mydepgraph = e.depgraph
13875 from textwrap import wrap
13876 from portage.output import EOutput
13879 resume_data = mtimedb["resume"]
13880 mergelist = resume_data.get("mergelist")
13881 if not isinstance(mergelist, list):
13883 if mergelist and debug or (verbose and not quiet):
13884 out.eerror("Invalid resume list:")
13887 for task in mergelist:
13888 if isinstance(task, list):
13889 out.eerror(indent + str(tuple(task)))
13892 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13893 out.eerror("One or more packages are either masked or " + \
13894 "have missing dependencies:")
13897 for dep in e.value:
13898 if dep.atom is None:
13899 out.eerror(indent + "Masked package:")
13900 out.eerror(2 * indent + str(dep.parent))
13903 out.eerror(indent + str(dep.atom) + " pulled in by:")
13904 out.eerror(2 * indent + str(dep.parent))
13906 msg = "The resume list contains packages " + \
13907 "that are either masked or have " + \
13908 "unsatisfied dependencies. " + \
13909 "Please restart/continue " + \
13910 "the operation manually, or use --skipfirst " + \
13911 "to skip the first package in the list and " + \
13912 "any other packages that may be " + \
13913 "masked or have missing dependencies."
13914 for line in wrap(msg, 72):
13916 elif isinstance(e, portage.exception.PackageNotFound):
13917 out.eerror("An expected package is " + \
13918 "not available: %s" % str(e))
13920 msg = "The resume list contains one or more " + \
13921 "packages that are no longer " + \
13922 "available. Please restart/continue " + \
13923 "the operation manually."
13924 for line in wrap(msg, 72):
13928 print "\b\b... done!"
13932 portage.writemsg("!!! One or more packages have been " + \
13933 "dropped due to\n" + \
13934 "!!! masking or unsatisfied dependencies:\n\n",
13936 for task in dropped_tasks:
13937 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
13938 portage.writemsg("\n", noiselevel=-1)
13941 if mydepgraph is not None:
13942 mydepgraph.display_problems()
13943 if not (ask or pretend):
13944 # delete the current list and also the backup
13945 # since it's probably stale too.
13946 for k in ("resume", "resume_backup"):
13947 mtimedb.pop(k, None)
13952 if ("--resume" in myopts):
13953 print darkgreen("emerge: It seems we have nothing to resume...")
13956 myparams = create_depgraph_params(myopts, myaction)
13957 if "--quiet" not in myopts and "--nodeps" not in myopts:
13958 print "Calculating dependencies ",
13960 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13962 retval, favorites = mydepgraph.select_files(myfiles)
13963 except portage.exception.PackageNotFound, e:
13964 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13966 except portage.exception.PackageSetNotFound, e:
13967 root_config = trees[settings["ROOT"]]["root_config"]
13968 display_missing_pkg_set(root_config, e.value)
13971 print "\b\b... done!"
13973 mydepgraph.display_problems()
13976 if "--pretend" not in myopts and \
13977 ("--ask" in myopts or "--tree" in myopts or \
13978 "--verbose" in myopts) and \
13979 not ("--quiet" in myopts and "--ask" not in myopts):
13980 if "--resume" in myopts:
13981 mymergelist = mydepgraph.altlist()
13982 if len(mymergelist) == 0:
13983 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13985 favorites = mtimedb["resume"]["favorites"]
13986 retval = mydepgraph.display(
13987 mydepgraph.altlist(reversed=tree),
13988 favorites=favorites)
13989 mydepgraph.display_problems()
13990 if retval != os.EX_OK:
13992 prompt="Would you like to resume merging these packages?"
13994 retval = mydepgraph.display(
13995 mydepgraph.altlist(reversed=("--tree" in myopts)),
13996 favorites=favorites)
13997 mydepgraph.display_problems()
13998 if retval != os.EX_OK:
14001 for x in mydepgraph.altlist():
14002 if isinstance(x, Package) and x.operation == "merge":
14006 sets = trees[settings["ROOT"]]["root_config"].sets
14007 world_candidates = None
14008 if "--noreplace" in myopts and \
14009 not oneshot and favorites:
14010 # Sets that are not world candidates are filtered
14011 # out here since the favorites list needs to be
14012 # complete for depgraph.loadResumeCommand() to
14013 # operate correctly.
14014 world_candidates = [x for x in favorites \
14015 if not (x.startswith(SETPREFIX) and \
14016 not sets[x[1:]].world_candidate)]
14017 if "--noreplace" in myopts and \
14018 not oneshot and world_candidates:
14020 for x in world_candidates:
14021 print " %s %s" % (good("*"), x)
14022 prompt="Would you like to add these packages to your world favorites?"
14023 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14024 prompt="Nothing to merge; would you like to auto-clean packages?"
14027 print "Nothing to merge; quitting."
14030 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14031 prompt="Would you like to fetch the source files for these packages?"
14033 prompt="Would you like to merge these packages?"
14035 if "--ask" in myopts and userquery(prompt) == "No":
14040 # Don't ask again (e.g. when auto-cleaning packages after merge)
14041 myopts.pop("--ask", None)
14043 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14044 if ("--resume" in myopts):
14045 mymergelist = mydepgraph.altlist()
14046 if len(mymergelist) == 0:
14047 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14049 favorites = mtimedb["resume"]["favorites"]
14050 retval = mydepgraph.display(
14051 mydepgraph.altlist(reversed=tree),
14052 favorites=favorites)
14053 mydepgraph.display_problems()
14054 if retval != os.EX_OK:
14057 retval = mydepgraph.display(
14058 mydepgraph.altlist(reversed=("--tree" in myopts)),
14059 favorites=favorites)
14060 mydepgraph.display_problems()
14061 if retval != os.EX_OK:
14063 if "--buildpkgonly" in myopts:
14064 graph_copy = mydepgraph.digraph.clone()
14065 for node in list(graph_copy.order):
14066 if not isinstance(node, Package):
14067 graph_copy.remove(node)
14068 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
14069 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14070 print "!!! You have to merge the dependencies before you can build this package.\n"
14073 if "--buildpkgonly" in myopts:
14074 graph_copy = mydepgraph.digraph.clone()
14075 for node in list(graph_copy.order):
14076 if not isinstance(node, Package):
14077 graph_copy.remove(node)
14078 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
14079 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14080 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14083 if ("--resume" in myopts):
14084 favorites=mtimedb["resume"]["favorites"]
14085 mymergelist = mydepgraph.altlist()
14086 mydepgraph.break_refs(mymergelist)
14087 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14088 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14089 del mydepgraph, mymergelist
14090 clear_caches(trees)
14092 retval = mergetask.merge()
14093 merge_count = mergetask.curval
14095 if "resume" in mtimedb and \
14096 "mergelist" in mtimedb["resume"] and \
14097 len(mtimedb["resume"]["mergelist"]) > 1:
14098 mtimedb["resume_backup"] = mtimedb["resume"]
14099 del mtimedb["resume"]
14101 mtimedb["resume"]={}
14102 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14103 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14104 # a list type for options.
14105 mtimedb["resume"]["myopts"] = myopts.copy()
14107 # Convert Atom instances to plain str since the mtimedb loader
14108 # sets unpickler.find_global = None which causes unpickler.load()
14109 # to raise the following exception:
14111 # cPickle.UnpicklingError: Global and instance pickles are not supported.
14113 # TODO: Maybe stop setting find_global = None, or find some other
14114 # way to avoid accidental triggering of the above UnpicklingError.
14115 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14117 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14118 for pkgline in mydepgraph.altlist():
14119 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14120 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14121 tmpsettings = portage.config(clone=settings)
14123 if settings.get("PORTAGE_DEBUG", "") == "1":
14125 retval = portage.doebuild(
14126 y, "digest", settings["ROOT"], tmpsettings, edebug,
14127 ("--pretend" in myopts),
14128 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14131 pkglist = mydepgraph.altlist()
14132 mydepgraph.saveNomergeFavorites()
14133 mydepgraph.break_refs(pkglist)
14134 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14135 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14136 del mydepgraph, pkglist
14137 clear_caches(trees)
14139 retval = mergetask.merge()
14140 merge_count = mergetask.curval
14142 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14143 if "yes" == settings.get("AUTOCLEAN"):
14144 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14145 unmerge(trees[settings["ROOT"]]["root_config"],
14146 myopts, "clean", [],
14147 ldpath_mtimes, autoclean=1)
14149 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14150 + " AUTOCLEAN is disabled. This can cause serious"
14151 + " problems due to overlapping packages.\n")
14152 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14156 def multiple_actions(action1, action2):
14157 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14158 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14161 def insert_optional_args(args):
14163 Parse optional arguments and insert a value if one has
14164 not been provided. This is done before feeding the args
14165 to the optparse parser since that parser does not support
14166 this feature natively.
14170 jobs_opts = ("-j", "--jobs")
14171 arg_stack = args[:]
14172 arg_stack.reverse()
14174 arg = arg_stack.pop()
14176 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14177 if not (short_job_opt or arg in jobs_opts):
14178 new_args.append(arg)
14181 # Insert an empty placeholder in order to
14182 # satisfy the requirements of optparse.
14184 new_args.append("--jobs")
14187 if short_job_opt and len(arg) > 2:
14188 if arg[:2] == "-j":
14190 job_count = int(arg[2:])
14192 saved_opts = arg[2:]
14195 saved_opts = arg[1:].replace("j", "")
14197 if job_count is None and arg_stack:
14199 job_count = int(arg_stack[-1])
14203 # Discard the job count from the stack
14204 # since we're consuming it here.
14207 if job_count is None:
14208 # unlimited number of jobs
14209 new_args.append("True")
14211 new_args.append(str(job_count))
14213 if saved_opts is not None:
14214 new_args.append("-" + saved_opts)
14218 def parse_opts(tmpcmdline, silent=False):
14223 global actions, options, shortmapping
14225 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14226 argument_options = {
14228 "help":"specify the location for portage configuration files",
14232 "help":"enable or disable color output",
14234 "choices":("y", "n")
14239 "help" : "Specifies the number of packages to build " + \
14245 "--load-average": {
14247 "help" :"Specifies that no new builds should be started " + \
14248 "if there are other builds running and the load average " + \
14249 "is at least LOAD (a floating-point number).",
14255 "help":"include unnecessary build time dependencies",
14257 "choices":("y", "n")
14260 "help":"specify conditions to trigger package reinstallation",
14262 "choices":["changed-use"]
14266 from optparse import OptionParser
14267 parser = OptionParser()
14268 if parser.has_option("--help"):
14269 parser.remove_option("--help")
14271 for action_opt in actions:
14272 parser.add_option("--" + action_opt, action="store_true",
14273 dest=action_opt.replace("-", "_"), default=False)
14274 for myopt in options:
14275 parser.add_option(myopt, action="store_true",
14276 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14277 for shortopt, longopt in shortmapping.iteritems():
14278 parser.add_option("-" + shortopt, action="store_true",
14279 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14280 for myalias, myopt in longopt_aliases.iteritems():
14281 parser.add_option(myalias, action="store_true",
14282 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14284 for myopt, kwargs in argument_options.iteritems():
14285 parser.add_option(myopt,
14286 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14288 tmpcmdline = insert_optional_args(tmpcmdline)
14290 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14294 if myoptions.jobs == "True":
14298 jobs = int(myoptions.jobs)
14302 if jobs is not True and \
14306 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14307 (myoptions.jobs,), noiselevel=-1)
14309 myoptions.jobs = jobs
14311 if myoptions.load_average:
14313 load_average = float(myoptions.load_average)
14317 if load_average <= 0.0:
14318 load_average = None
14320 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14321 (myoptions.load_average,), noiselevel=-1)
14323 myoptions.load_average = load_average
14325 for myopt in options:
14326 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14328 myopts[myopt] = True
14330 for myopt in argument_options:
14331 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14335 for action_opt in actions:
14336 v = getattr(myoptions, action_opt.replace("-", "_"))
14339 multiple_actions(myaction, action_opt)
14341 myaction = action_opt
14345 return myaction, myopts, myfiles
14347 def validate_ebuild_environment(trees):
14348 for myroot in trees:
14349 settings = trees[myroot]["vartree"].settings
14350 settings.validate()
14352 def clear_caches(trees):
14353 for d in trees.itervalues():
14354 d["porttree"].dbapi.melt()
14355 d["porttree"].dbapi._aux_cache.clear()
14356 d["bintree"].dbapi._aux_cache.clear()
14357 d["bintree"].dbapi._clear_cache()
14358 d["vartree"].dbapi.linkmap._clear_cache()
14359 portage.dircache.clear()
14362 def load_emerge_config(trees=None):
14364 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14365 v = os.environ.get(envvar, None)
14366 if v and v.strip():
14368 trees = portage.create_trees(trees=trees, **kwargs)
14370 for root, root_trees in trees.iteritems():
14371 settings = root_trees["vartree"].settings
14372 setconfig = load_default_config(settings, root_trees)
14373 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14375 settings = trees["/"]["vartree"].settings
14377 for myroot in trees:
14379 settings = trees[myroot]["vartree"].settings
14382 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14383 mtimedb = portage.MtimeDB(mtimedbfile)
14385 return settings, trees, mtimedb
14387 def adjust_config(myopts, settings):
14388 """Make emerge specific adjustments to the config."""
14390 # To enhance usability, make some vars case insensitive by forcing them to
14392 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14393 if myvar in settings:
14394 settings[myvar] = settings[myvar].lower()
14395 settings.backup_changes(myvar)
14398 # Kill noauto as it will break merges otherwise.
14399 if "noauto" in settings.features:
14400 while "noauto" in settings.features:
14401 settings.features.remove("noauto")
14402 settings["FEATURES"] = " ".join(settings.features)
14403 settings.backup_changes("FEATURES")
14407 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14408 except ValueError, e:
14409 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14410 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14411 settings["CLEAN_DELAY"], noiselevel=-1)
14412 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14413 settings.backup_changes("CLEAN_DELAY")
14415 EMERGE_WARNING_DELAY = 10
14417 EMERGE_WARNING_DELAY = int(settings.get(
14418 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14419 except ValueError, e:
14420 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14421 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14422 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14423 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14424 settings.backup_changes("EMERGE_WARNING_DELAY")
14426 if "--quiet" in myopts:
14427 settings["PORTAGE_QUIET"]="1"
14428 settings.backup_changes("PORTAGE_QUIET")
14430 if "--verbose" in myopts:
14431 settings["PORTAGE_VERBOSE"] = "1"
14432 settings.backup_changes("PORTAGE_VERBOSE")
14434 # Set so that configs will be merged regardless of remembered status
14435 if ("--noconfmem" in myopts):
14436 settings["NOCONFMEM"]="1"
14437 settings.backup_changes("NOCONFMEM")
14439 # Set various debug markers... They should be merged somehow.
14442 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14443 if PORTAGE_DEBUG not in (0, 1):
14444 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14445 PORTAGE_DEBUG, noiselevel=-1)
14446 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14449 except ValueError, e:
14450 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14451 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14452 settings["PORTAGE_DEBUG"], noiselevel=-1)
14454 if "--debug" in myopts:
14456 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14457 settings.backup_changes("PORTAGE_DEBUG")
14459 if settings.get("NOCOLOR") not in ("yes","true"):
14460 portage.output.havecolor = 1
14462 """The explicit --color < y | n > option overrides the NOCOLOR environment
14463 variable and stdout auto-detection."""
14464 if "--color" in myopts:
14465 if "y" == myopts["--color"]:
14466 portage.output.havecolor = 1
14467 settings["NOCOLOR"] = "false"
14469 portage.output.havecolor = 0
14470 settings["NOCOLOR"] = "true"
14471 settings.backup_changes("NOCOLOR")
14472 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14473 portage.output.havecolor = 0
14474 settings["NOCOLOR"] = "true"
14475 settings.backup_changes("NOCOLOR")
14477 def apply_priorities(settings):
14481 def nice(settings):
14483 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14484 except (OSError, ValueError), e:
14485 out = portage.output.EOutput()
14486 out.eerror("Failed to change nice value to '%s'" % \
14487 settings["PORTAGE_NICENESS"])
14488 out.eerror("%s\n" % str(e))
14490 def ionice(settings):
14492 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14494 ionice_cmd = shlex.split(ionice_cmd)
14498 from portage.util import varexpand
14499 variables = {"PID" : str(os.getpid())}
14500 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14503 rval = portage.process.spawn(cmd, env=os.environ)
14504 except portage.exception.CommandNotFound:
14505 # The OS kernel probably doesn't support ionice,
14506 # so return silently.
14509 if rval != os.EX_OK:
14510 out = portage.output.EOutput()
14511 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14512 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14514 def display_missing_pkg_set(root_config, set_name):
14517 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14518 "The following sets exist:") % \
14519 colorize("INFORM", set_name))
14522 for s in sorted(root_config.sets):
14523 msg.append(" %s" % s)
14526 writemsg_level("".join("%s\n" % l for l in msg),
14527 level=logging.ERROR, noiselevel=-1)
14529 def expand_set_arguments(myfiles, myaction, root_config):
14531 setconfig = root_config.setconfig
14533 sets = setconfig.getSets()
14535 # In order to know exactly which atoms/sets should be added to the
14536 # world file, the depgraph performs set expansion later. It will get
14537 # confused about where the atoms came from if it's not allowed to
14538 # expand them itself.
14539 do_not_expand = (None, )
14542 if a in ("system", "world"):
14543 newargs.append(SETPREFIX+a)
14550 # separators for set arguments
14554 # WARNING: all operators must be of equal length
14556 DIFF_OPERATOR = "-@"
14557 UNION_OPERATOR = "+@"
14559 for i in range(0, len(myfiles)):
14560 if myfiles[i].startswith(SETPREFIX):
14563 x = myfiles[i][len(SETPREFIX):]
14566 start = x.find(ARG_START)
14567 end = x.find(ARG_END)
14568 if start > 0 and start < end:
14569 namepart = x[:start]
14570 argpart = x[start+1:end]
14572 # TODO: implement proper quoting
14573 args = argpart.split(",")
14577 k, v = a.split("=", 1)
14580 options[a] = "True"
14581 setconfig.update(namepart, options)
14582 newset += (x[:start-len(namepart)]+namepart)
14583 x = x[end+len(ARG_END):]
14587 myfiles[i] = SETPREFIX+newset
14589 sets = setconfig.getSets()
14591 # display errors that occured while loading the SetConfig instance
14592 for e in setconfig.errors:
14593 print colorize("BAD", "Error during set creation: %s" % e)
14595 # emerge relies on the existance of sets with names "world" and "system"
14596 required_sets = ("world", "system")
14599 for s in required_sets:
14601 missing_sets.append(s)
14603 if len(missing_sets) > 2:
14604 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
14605 missing_sets_str += ', and "%s"' % missing_sets[-1]
14606 elif len(missing_sets) == 2:
14607 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
14609 missing_sets_str = '"%s"' % missing_sets[-1]
14610 msg = ["emerge: incomplete set configuration, " + \
14611 "missing set(s): %s" % missing_sets_str]
14613 msg.append(" sets defined: %s" % ", ".join(sets))
14614 msg.append(" This usually means that '%s'" % \
14615 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
14616 msg.append(" is missing or corrupt.")
14618 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
14620 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14623 if a.startswith(SETPREFIX):
14624 # support simple set operations (intersection, difference and union)
14625 # on the commandline. Expressions are evaluated strictly left-to-right
14626 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14627 expression = a[len(SETPREFIX):]
14630 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14631 is_pos = expression.rfind(IS_OPERATOR)
14632 diff_pos = expression.rfind(DIFF_OPERATOR)
14633 union_pos = expression.rfind(UNION_OPERATOR)
14634 op_pos = max(is_pos, diff_pos, union_pos)
14635 s1 = expression[:op_pos]
14636 s2 = expression[op_pos+len(IS_OPERATOR):]
14637 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14639 display_missing_pkg_set(root_config, s2)
14641 expr_sets.insert(0, s2)
14642 expr_ops.insert(0, op)
14644 if not expression in sets:
14645 display_missing_pkg_set(root_config, expression)
14647 expr_sets.insert(0, expression)
14648 result = set(setconfig.getSetAtoms(expression))
14649 for i in range(0, len(expr_ops)):
14650 s2 = setconfig.getSetAtoms(expr_sets[i+1])
14651 if expr_ops[i] == IS_OPERATOR:
14652 result.intersection_update(s2)
14653 elif expr_ops[i] == DIFF_OPERATOR:
14654 result.difference_update(s2)
14655 elif expr_ops[i] == UNION_OPERATOR:
14658 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14659 newargs.extend(result)
14661 s = a[len(SETPREFIX):]
14663 display_missing_pkg_set(root_config, s)
14665 setconfig.active.append(s)
14667 set_atoms = setconfig.getSetAtoms(s)
14668 except portage.exception.PackageSetNotFound, e:
14669 writemsg_level(("emerge: the given set '%s' " + \
14670 "contains a non-existent set named '%s'.\n") % \
14671 (s, e), level=logging.ERROR, noiselevel=-1)
14673 if myaction in unmerge_actions and \
14674 not sets[s].supportsOperation("unmerge"):
14675 sys.stderr.write("emerge: the given set '%s' does " % s + \
14676 "not support unmerge operations\n")
14678 elif not set_atoms:
14679 print "emerge: '%s' is an empty set" % s
14680 elif myaction not in do_not_expand:
14681 newargs.extend(set_atoms)
14683 newargs.append(SETPREFIX+s)
14684 for e in sets[s].errors:
14688 return (newargs, retval)
14690 def repo_name_check(trees):
14691 missing_repo_names = set()
14692 for root, root_trees in trees.iteritems():
14693 if "porttree" in root_trees:
14694 portdb = root_trees["porttree"].dbapi
14695 missing_repo_names.update(portdb.porttrees)
14696 repos = portdb.getRepositories()
14698 missing_repo_names.discard(portdb.getRepositoryPath(r))
14699 if portdb.porttree_root in missing_repo_names and \
14700 not os.path.exists(os.path.join(
14701 portdb.porttree_root, "profiles")):
14702 # This is normal if $PORTDIR happens to be empty,
14703 # so don't warn about it.
14704 missing_repo_names.remove(portdb.porttree_root)
14706 if missing_repo_names:
14708 msg.append("WARNING: One or more repositories " + \
14709 "have missing repo_name entries:")
14711 for p in missing_repo_names:
14712 msg.append("\t%s/profiles/repo_name" % (p,))
14714 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14715 "should be a plain text file containing a unique " + \
14716 "name for the repository on the first line.", 70))
14717 writemsg_level("".join("%s\n" % l for l in msg),
14718 level=logging.WARNING, noiselevel=-1)
14720 return bool(missing_repo_names)
14722 def config_protect_check(trees):
14723 for root, root_trees in trees.iteritems():
14724 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14725 msg = "!!! CONFIG_PROTECT is empty"
14727 msg += " for '%s'" % root
14728 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14730 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14732 if "--quiet" in myopts:
14733 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14734 print "!!! one of the following fully-qualified ebuild names instead:\n"
14735 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14736 print " " + colorize("INFORM", cp)
14739 s = search(root_config, spinner, "--searchdesc" in myopts,
14740 "--quiet" not in myopts, "--usepkg" in myopts,
14741 "--usepkgonly" in myopts)
14742 null_cp = portage.dep_getkey(insert_category_into_atom(
14744 cat, atom_pn = portage.catsplit(null_cp)
14745 s.searchkey = atom_pn
14746 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14749 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14750 print "!!! one of the above fully-qualified ebuild names instead.\n"
14752 def profile_check(trees, myaction, myopts):
14753 if myaction in ("info", "sync"):
14755 elif "--version" in myopts or "--help" in myopts:
14757 for root, root_trees in trees.iteritems():
14758 if root_trees["root_config"].settings.profiles:
14760 # generate some profile related warning messages
14761 validate_ebuild_environment(trees)
14762 msg = "If you have just changed your profile configuration, you " + \
14763 "should revert back to the previous configuration. Due to " + \
14764 "your current profile being invalid, allowed actions are " + \
14765 "limited to --help, --info, --sync, and --version."
14766 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14767 level=logging.ERROR, noiselevel=-1)
14772 global portage # NFC why this is necessary now - genone
14773 portage._disable_legacy_globals()
14774 # Disable color until we're sure that it should be enabled (after
14775 # EMERGE_DEFAULT_OPTS has been parsed).
14776 portage.output.havecolor = 0
14777 # This first pass is just for options that need to be known as early as
14778 # possible, such as --config-root. They will be parsed again later,
14779 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14780 # the value of --config-root).
14781 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14782 if "--debug" in myopts:
14783 os.environ["PORTAGE_DEBUG"] = "1"
14784 if "--config-root" in myopts:
14785 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14787 # Portage needs to ensure a sane umask for the files it creates.
14789 settings, trees, mtimedb = load_emerge_config()
14790 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14791 rval = profile_check(trees, myaction, myopts)
14792 if rval != os.EX_OK:
14795 if portage._global_updates(trees, mtimedb["updates"]):
14797 # Reload the whole config from scratch.
14798 settings, trees, mtimedb = load_emerge_config(trees=trees)
14799 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14801 xterm_titles = "notitles" not in settings.features
14804 if "--ignore-default-opts" not in myopts:
14805 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14806 tmpcmdline.extend(sys.argv[1:])
14807 myaction, myopts, myfiles = parse_opts(tmpcmdline)
14809 if "--digest" in myopts:
14810 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14811 # Reload the whole config from scratch so that the portdbapi internal
14812 # config is updated with new FEATURES.
14813 settings, trees, mtimedb = load_emerge_config(trees=trees)
14814 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14816 for myroot in trees:
14817 mysettings = trees[myroot]["vartree"].settings
14818 mysettings.unlock()
14819 adjust_config(myopts, mysettings)
14820 mysettings["PORTAGE_COUNTER_HASH"] = \
14821 trees[myroot]["vartree"].dbapi._counter_hash()
14822 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14824 del myroot, mysettings
14826 apply_priorities(settings)
14828 spinner = stdout_spinner()
14829 if "candy" in settings.features:
14830 spinner.update = spinner.update_scroll
14832 if "--quiet" not in myopts:
14833 portage.deprecated_profile_check(settings=settings)
14834 repo_name_check(trees)
14835 config_protect_check(trees)
14837 eclasses_overridden = {}
14838 for mytrees in trees.itervalues():
14839 mydb = mytrees["porttree"].dbapi
14840 # Freeze the portdbapi for performance (memoize all xmatch results).
14842 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14845 if eclasses_overridden and \
14846 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14847 prefix = bad(" * ")
14848 if len(eclasses_overridden) == 1:
14849 writemsg(prefix + "Overlay eclass overrides " + \
14850 "eclass from PORTDIR:\n", noiselevel=-1)
14852 writemsg(prefix + "Overlay eclasses override " + \
14853 "eclasses from PORTDIR:\n", noiselevel=-1)
14854 writemsg(prefix + "\n", noiselevel=-1)
14855 for eclass_name in sorted(eclasses_overridden):
14856 writemsg(prefix + " '%s/%s.eclass'\n" % \
14857 (eclasses_overridden[eclass_name], eclass_name),
14859 writemsg(prefix + "\n", noiselevel=-1)
14860 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14861 "because it will trigger invalidation of cached ebuild metadata " + \
14862 "that is distributed with the portage tree. If you must " + \
14863 "override eclasses from PORTDIR then you are advised to add " + \
14864 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14865 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14866 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14867 "you would like to disable this warning."
14868 from textwrap import wrap
14869 for line in wrap(msg, 72):
14870 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14872 if "moo" in myfiles:
14875 Larry loves Gentoo (""" + platform.system() + """)
14877 _______________________
14878 < Have you mooed today? >
14879 -----------------------
14889 ext = os.path.splitext(x)[1]
14890 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14891 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14894 root_config = trees[settings["ROOT"]]["root_config"]
14895 if myaction == "list-sets":
14896 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14900 # only expand sets for actions taking package arguments
14901 oldargs = myfiles[:]
14902 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14903 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14904 if retval != os.EX_OK:
14907 # Need to handle empty sets specially, otherwise emerge will react
14908 # with the help message for empty argument lists
14909 if oldargs and not myfiles:
14910 print "emerge: no targets left after set expansion"
14913 if ("--tree" in myopts) and ("--columns" in myopts):
14914 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14917 if ("--quiet" in myopts):
14918 spinner.update = spinner.update_quiet
14919 portage.util.noiselimit = -1
14921 # Always create packages if FEATURES=buildpkg
14922 # Imply --buildpkg if --buildpkgonly
14923 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14924 if "--buildpkg" not in myopts:
14925 myopts["--buildpkg"] = True
14927 # Also allow -S to invoke search action (-sS)
14928 if ("--searchdesc" in myopts):
14929 if myaction and myaction != "search":
14930 myfiles.append(myaction)
14931 if "--search" not in myopts:
14932 myopts["--search"] = True
14933 myaction = "search"
14935 # Always try and fetch binary packages if FEATURES=getbinpkg
14936 if ("getbinpkg" in settings.features):
14937 myopts["--getbinpkg"] = True
14939 if "--buildpkgonly" in myopts:
14940 # --buildpkgonly will not merge anything, so
14941 # it cancels all binary package options.
14942 for opt in ("--getbinpkg", "--getbinpkgonly",
14943 "--usepkg", "--usepkgonly"):
14944 myopts.pop(opt, None)
14946 if "--fetch-all-uri" in myopts:
14947 myopts["--fetchonly"] = True
14949 if "--skipfirst" in myopts and "--resume" not in myopts:
14950 myopts["--resume"] = True
14952 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14953 myopts["--usepkgonly"] = True
14955 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14956 myopts["--getbinpkg"] = True
14958 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14959 myopts["--usepkg"] = True
14961 # Also allow -K to apply --usepkg/-k
14962 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14963 myopts["--usepkg"] = True
14965 # Allow -p to remove --ask
14966 if ("--pretend" in myopts) and ("--ask" in myopts):
14967 print ">>> --pretend disables --ask... removing --ask from options."
14968 del myopts["--ask"]
14970 # forbid --ask when not in a terminal
14971 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14972 if ("--ask" in myopts) and (not sys.stdin.isatty()):
14973 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14977 if settings.get("PORTAGE_DEBUG", "") == "1":
14978 spinner.update = spinner.update_quiet
14980 if "python-trace" in settings.features:
14981 import portage.debug
14982 portage.debug.set_trace(True)
14984 if not ("--quiet" in myopts):
14985 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14986 spinner.update = spinner.update_basic
14988 if "--version" in myopts:
14989 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14990 settings.profile_path, settings["CHOST"],
14991 trees[settings["ROOT"]]["vartree"].dbapi)
14993 elif "--help" in myopts:
14994 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14997 if "--debug" in myopts:
14998 print "myaction", myaction
14999 print "myopts", myopts
15001 if not myaction and not myfiles and "--resume" not in myopts:
15002 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15005 pretend = "--pretend" in myopts
15006 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15007 buildpkgonly = "--buildpkgonly" in myopts
15009 # check if root user is the current user for the actions where emerge needs this
15010 if portage.secpass < 2:
15011 # We've already allowed "--version" and "--help" above.
15012 if "--pretend" not in myopts and myaction not in ("search","info"):
15013 need_superuser = not \
15015 (buildpkgonly and secpass >= 1) or \
15016 myaction in ("metadata", "regen") or \
15017 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15018 if portage.secpass < 1 or \
15021 access_desc = "superuser"
15023 access_desc = "portage group"
15024 # Always show portage_group_warning() when only portage group
15025 # access is required but the user is not in the portage group.
15026 from portage.data import portage_group_warning
15027 if "--ask" in myopts:
15028 myopts["--pretend"] = True
15029 del myopts["--ask"]
15030 print ("%s access is required... " + \
15031 "adding --pretend to options.\n") % access_desc
15032 if portage.secpass < 1 and not need_superuser:
15033 portage_group_warning()
15035 sys.stderr.write(("emerge: %s access is " + \
15036 "required.\n\n") % access_desc)
15037 if portage.secpass < 1 and not need_superuser:
15038 portage_group_warning()
15041 disable_emergelog = False
15042 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15044 disable_emergelog = True
15046 if myaction in ("search", "info"):
15047 disable_emergelog = True
15048 if disable_emergelog:
15049 """ Disable emergelog for everything except build or unmerge
15050 operations. This helps minimize parallel emerge.log entries that can
15051 confuse log parsers. We especially want it disabled during
15052 parallel-fetch, which uses --resume --fetchonly."""
15054 def emergelog(*pargs, **kargs):
15057 if not "--pretend" in myopts:
15058 emergelog(xterm_titles, "Started emerge on: "+\
15059 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15062 myelogstr=" ".join(myopts)
15064 myelogstr+=" "+myaction
15066 myelogstr += " " + " ".join(oldargs)
15067 emergelog(xterm_titles, " *** emerge " + myelogstr)
15070 def emergeexitsig(signum, frame):
15071 signal.signal(signal.SIGINT, signal.SIG_IGN)
15072 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15073 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15074 sys.exit(100+signum)
15075 signal.signal(signal.SIGINT, emergeexitsig)
15076 signal.signal(signal.SIGTERM, emergeexitsig)
15079 """This gets out final log message in before we quit."""
15080 if "--pretend" not in myopts:
15081 emergelog(xterm_titles, " *** terminating.")
15082 if "notitles" not in settings.features:
15084 portage.atexit_register(emergeexit)
15086 if myaction in ("config", "metadata", "regen", "sync"):
15087 if "--pretend" in myopts:
15088 sys.stderr.write(("emerge: The '%s' action does " + \
15089 "not support '--pretend'.\n") % myaction)
15092 if "sync" == myaction:
15093 return action_sync(settings, trees, mtimedb, myopts, myaction)
15094 elif "metadata" == myaction:
15095 action_metadata(settings, portdb, myopts)
15096 elif myaction=="regen":
15097 validate_ebuild_environment(trees)
15098 action_regen(settings, portdb, myopts.get("--jobs"),
15099 myopts.get("--load-average"))
15101 elif "config"==myaction:
15102 validate_ebuild_environment(trees)
15103 action_config(settings, trees, myopts, myfiles)
15106 elif "search"==myaction:
15107 validate_ebuild_environment(trees)
15108 action_search(trees[settings["ROOT"]]["root_config"],
15109 myopts, myfiles, spinner)
15110 elif myaction in ("clean", "unmerge") or \
15111 (myaction == "prune" and "--nodeps" in myopts):
15112 validate_ebuild_environment(trees)
15114 # Ensure atoms are valid before calling unmerge().
15115 # For backward compat, leading '=' is not required.
15117 if is_valid_package_atom(x) or \
15118 is_valid_package_atom("=" + x):
15121 msg.append("'%s' is not a valid package atom." % (x,))
15122 msg.append("Please check ebuild(5) for full details.")
15123 writemsg_level("".join("!!! %s\n" % line for line in msg),
15124 level=logging.ERROR, noiselevel=-1)
15127 # When given a list of atoms, unmerge
15128 # them in the order given.
15129 ordered = myaction == "unmerge"
15130 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15131 mtimedb["ldpath"], ordered=ordered):
15132 if not (buildpkgonly or fetchonly or pretend):
15133 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15135 elif myaction in ("depclean", "info", "prune"):
15137 # Ensure atoms are valid before calling unmerge().
15138 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15141 if is_valid_package_atom(x):
15143 valid_atoms.append(
15144 portage.dep_expand(x, mydb=vardb, settings=settings))
15145 except portage.exception.AmbiguousPackageName, e:
15146 msg = "The short ebuild name \"" + x + \
15147 "\" is ambiguous. Please specify " + \
15148 "one of the following " + \
15149 "fully-qualified ebuild names instead:"
15150 for line in textwrap.wrap(msg, 70):
15151 writemsg_level("!!! %s\n" % (line,),
15152 level=logging.ERROR, noiselevel=-1)
15154 writemsg_level(" %s\n" % colorize("INFORM", i),
15155 level=logging.ERROR, noiselevel=-1)
15156 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15160 msg.append("'%s' is not a valid package atom." % (x,))
15161 msg.append("Please check ebuild(5) for full details.")
15162 writemsg_level("".join("!!! %s\n" % line for line in msg),
15163 level=logging.ERROR, noiselevel=-1)
15166 if myaction == "info":
15167 return action_info(settings, trees, myopts, valid_atoms)
15169 validate_ebuild_environment(trees)
15170 action_depclean(settings, trees, mtimedb["ldpath"],
15171 myopts, myaction, valid_atoms, spinner)
15172 if not (buildpkgonly or fetchonly or pretend):
15173 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15174 # "update", "system", or just process files:
15176 validate_ebuild_environment(trees)
15177 if "--pretend" not in myopts:
15178 display_news_notification(root_config, myopts)
15179 retval = action_build(settings, trees, mtimedb,
15180 myopts, myaction, myfiles, spinner)
15181 root_config = trees[settings["ROOT"]]["root_config"]
15182 post_emerge(root_config, myopts, mtimedb, retval)