2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time, types
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
59 from UserDict import DictMixin
62 import cPickle as pickle
67 import cStringIO as StringIO
71 class stdout_spinner(object):
73 "Gentoo Rocks ("+platform.system()+")",
74 "Thank you for using Gentoo. :)",
75 "Are you actually trying to read this?",
76 "How many times have you stared at this?",
77 "We are generating the cache right now",
78 "You are paying too much attention.",
79 "A theory is better than its explanation.",
80 "Phasers locked on target, Captain.",
81 "Thrashing is just virtual crashing.",
82 "To be is to program.",
83 "Real Users hate Real Programmers.",
84 "When all else fails, read the instructions.",
85 "Functionality breeds Contempt.",
86 "The future lies ahead.",
87 "3.1415926535897932384626433832795028841971694",
88 "Sometimes insanity is the only alternative.",
89 "Inaccuracy saves a world of explanation.",
92 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
96 self.update = self.update_twirl
97 self.scroll_sequence = self.scroll_msgs[
98 int(time.time() * 100) % len(self.scroll_msgs)]
100 self.min_display_latency = 0.05
102 def _return_early(self):
104 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105 each update* method should return without doing any output when this
108 cur_time = time.time()
109 if cur_time - self.last_update < self.min_display_latency:
111 self.last_update = cur_time
114 def update_basic(self):
115 self.spinpos = (self.spinpos + 1) % 500
116 if self._return_early():
118 if (self.spinpos % 100) == 0:
119 if self.spinpos == 0:
120 sys.stdout.write(". ")
122 sys.stdout.write(".")
125 def update_scroll(self):
126 if self._return_early():
128 if(self.spinpos >= len(self.scroll_sequence)):
129 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136 def update_twirl(self):
137 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138 if self._return_early():
140 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
143 def update_quiet(self):
146 def userquery(prompt, responses=None, colours=None):
147 """Displays a prompt and a set of responses, then waits for a response
148 which is checked against the responses and the first to match is
149 returned. An empty response will match the first value in responses. The
150 input buffer is *not* cleared prior to the prompt!
153 responses: a List of Strings.
154 colours: a List of Functions taking and returning a String, used to
155 process the responses for display. Typically these will be functions
156 like red() but could be e.g. lambda x: "DisplayString".
157 If responses is omitted, defaults to ["Yes", "No"], [green, red].
158 If only colours is omitted, defaults to [bold, ...].
160 Returns a member of the List responses. (If called without optional
161 arguments, returns "Yes" or "No".)
162 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164 if responses is None:
165 responses = ["Yes", "No"]
167 create_color_func("PROMPT_CHOICE_DEFAULT"),
168 create_color_func("PROMPT_CHOICE_OTHER")
170 elif colours is None:
172 colours=(colours*len(responses))[:len(responses)]
176 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177 for key in responses:
178 # An empty response will match the first value in responses.
179 if response.upper()==key[:len(response)].upper():
181 print "Sorry, response '%s' not understood." % response,
182 except (EOFError, KeyboardInterrupt):
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen", "search",
193 "--ask", "--alphabetical",
194 "--buildpkg", "--buildpkgonly",
195 "--changelog", "--columns",
200 "--fetchonly", "--fetch-all-uri",
201 "--getbinpkg", "--getbinpkgonly",
202 "--help", "--ignore-default-opts",
205 "--newuse", "--nocolor",
206 "--nodeps", "--noreplace",
207 "--nospinner", "--oneshot",
208 "--onlydeps", "--pretend",
209 "--quiet", "--resume",
210 "--searchdesc", "--selective",
214 "--usepkg", "--usepkgonly",
215 "--verbose", "--version"
221 "b":"--buildpkg", "B":"--buildpkgonly",
222 "c":"--clean", "C":"--unmerge",
223 "d":"--debug", "D":"--deep",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "k":"--usepkg", "K":"--usepkgonly",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps", "O":"--nodeps",
232 "p":"--pretend", "P":"--prune",
234 "s":"--search", "S":"--searchdesc",
237 "v":"--verbose", "V":"--version"
240 def emergelog(xterm_titles, mystr, short_msg=None):
241 if xterm_titles and short_msg:
242 if "HOSTNAME" in os.environ:
243 short_msg = os.environ["HOSTNAME"]+": "+short_msg
244 xtermTitle(short_msg)
246 file_path = "/var/log/emerge.log"
247 mylogfile = open(file_path, "a")
248 portage.util.apply_secpass_permissions(file_path,
249 uid=portage.portage_uid, gid=portage.portage_gid,
253 mylock = portage.locks.lockfile(mylogfile)
254 # seek because we may have gotten held up by the lock.
255 # if so, we may not be positioned at the end of the file.
257 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
261 portage.locks.unlockfile(mylock)
263 except (IOError,OSError,portage.exception.PortageException), e:
265 print >> sys.stderr, "emergelog():",e
267 def countdown(secs=5, doing="Starting"):
269 print ">>> Waiting",secs,"seconds before starting..."
270 print ">>> (Control-C to abort)...\n"+doing+" in: ",
274 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281 if type(mysize) not in [types.IntType,types.LongType]:
283 if 0 != mysize % 1024:
284 # Always round up to the next kB so that it doesn't show 0 kB when
285 # some small file still needs to be fetched.
286 mysize += 1024 - mysize % 1024
287 mystr=str(mysize/1024)
291 mystr=mystr[:mycount]+","+mystr[mycount:]
295 def getgccversion(chost):
298 return: the current in-use gcc version
301 gcc_ver_command = 'gcc -dumpversion'
302 gcc_ver_prefix = 'gcc-'
304 gcc_not_found_error = red(
305 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306 "!!! to update the environment of this terminal and possibly\n" +
307 "!!! other terminals also.\n"
310 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314 mystatus, myoutput = commands.getstatusoutput(
315 chost + "-" + gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320 if mystatus == os.EX_OK:
321 return gcc_ver_prefix + myoutput
323 portage.writemsg(gcc_not_found_error, noiselevel=-1)
324 return "[unavailable]"
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327 profilever = "unavailable"
329 realpath = os.path.realpath(profile)
330 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
331 if realpath.startswith(basepath):
332 profilever = realpath[1 + len(basepath):]
335 profilever = "!" + os.readlink(profile)
338 del realpath, basepath
341 libclist = vardb.match("virtual/libc")
342 libclist += vardb.match("virtual/glibc")
343 libclist = portage.util.unique_array(libclist)
345 xs=portage.catpkgsplit(x)
347 libcver+=","+"-".join(xs[1:])
349 libcver="-".join(xs[1:])
351 libcver="unavailable"
353 gccver = getgccversion(chost)
354 unameout=platform.release()+" "+platform.machine()
356 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358 def create_depgraph_params(myopts, myaction):
359 #configure emerge engine parameters
361 # self: include _this_ package regardless of if it is merged.
362 # selective: exclude the package if it is merged
363 # recurse: go into the dependencies
364 # deep: go into the dependencies of already merged packages
365 # empty: pretend nothing is merged
366 # complete: completely account for all known dependencies
367 # remove: build graph for use in removing packages
368 myparams = set(["recurse"])
370 if myaction == "remove":
371 myparams.add("remove")
372 myparams.add("complete")
375 if "--update" in myopts or \
376 "--newuse" in myopts or \
377 "--reinstall" in myopts or \
378 "--noreplace" in myopts:
379 myparams.add("selective")
380 if "--emptytree" in myopts:
381 myparams.add("empty")
382 myparams.discard("selective")
383 if "--nodeps" in myopts:
384 myparams.discard("recurse")
385 if "--deep" in myopts:
387 if "--complete-graph" in myopts:
388 myparams.add("complete")
391 # search functionality
392 class search(object):
403 def __init__(self, root_config, spinner, searchdesc,
404 verbose, usepkg, usepkgonly):
405 """Searches the available and installed packages for the supplied search key.
406 The list of available and installed packages is created at object instantiation.
407 This makes successive searches faster."""
408 self.settings = root_config.settings
409 self.vartree = root_config.trees["vartree"]
410 self.spinner = spinner
411 self.verbose = verbose
412 self.searchdesc = searchdesc
413 self.root_config = root_config
414 self.setconfig = root_config.setconfig
415 self.matches = {"pkg" : []}
420 self.portdb = fake_portdb
421 for attrib in ("aux_get", "cp_all",
422 "xmatch", "findname", "getFetchMap"):
423 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
427 portdb = root_config.trees["porttree"].dbapi
428 bindb = root_config.trees["bintree"].dbapi
429 vardb = root_config.trees["vartree"].dbapi
431 if not usepkgonly and portdb._have_root_eclass_dir:
432 self._dbs.append(portdb)
434 if (usepkg or usepkgonly) and bindb.cp_all():
435 self._dbs.append(bindb)
437 self._dbs.append(vardb)
438 self._portdb = portdb
443 cp_all.update(db.cp_all())
444 return list(sorted(cp_all))
446 def _aux_get(self, *args, **kwargs):
449 return db.aux_get(*args, **kwargs)
454 def _findname(self, *args, **kwargs):
456 if db is not self._portdb:
457 # We don't want findname to return anything
458 # unless it's an ebuild in a portage tree.
459 # Otherwise, it's already built and we don't
462 func = getattr(db, "findname", None)
464 value = func(*args, **kwargs)
469 def _getFetchMap(self, *args, **kwargs):
471 func = getattr(db, "getFetchMap", None)
473 value = func(*args, **kwargs)
478 def _visible(self, db, cpv, metadata):
479 installed = db is self.vartree.dbapi
480 built = installed or db is not self._portdb
483 pkg_type = "installed"
486 return visible(self.settings,
487 Package(type_name=pkg_type, root_config=self.root_config,
488 cpv=cpv, built=built, installed=installed, metadata=metadata))
490 def _xmatch(self, level, atom):
492 This method does not expand old-style virtuals because it
493 is restricted to returning matches for a single ${CATEGORY}/${PN}
494 and old-style virual matches unreliable for that when querying
495 multiple package databases. If necessary, old-style virtuals
496 can be performed on atoms prior to calling this method.
498 cp = portage.dep_getkey(atom)
499 if level == "match-all":
502 if hasattr(db, "xmatch"):
503 matches.update(db.xmatch(level, atom))
505 matches.update(db.match(atom))
506 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507 db._cpv_sort_ascending(result)
508 elif level == "match-visible":
511 if hasattr(db, "xmatch"):
512 matches.update(db.xmatch(level, atom))
514 db_keys = list(db._aux_cache_keys)
515 for cpv in db.match(atom):
516 metadata = izip(db_keys,
517 db.aux_get(cpv, db_keys))
518 if not self._visible(db, cpv, metadata):
521 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522 db._cpv_sort_ascending(result)
523 elif level == "bestmatch-visible":
526 if hasattr(db, "xmatch"):
527 cpv = db.xmatch("bestmatch-visible", atom)
528 if not cpv or portage.cpv_getkey(cpv) != cp:
530 if not result or cpv == portage.best([cpv, result]):
533 db_keys = Package.metadata_keys
534 # break out of this loop with highest visible
535 # match, checked in descending order
536 for cpv in reversed(db.match(atom)):
537 if portage.cpv_getkey(cpv) != cp:
539 metadata = izip(db_keys,
540 db.aux_get(cpv, db_keys))
541 if not self._visible(db, cpv, metadata):
543 if not result or cpv == portage.best([cpv, result]):
547 raise NotImplementedError(level)
550 def execute(self,searchkey):
551 """Performs the search for the supplied search key"""
553 self.searchkey=searchkey
554 self.packagematches = []
557 self.matches = {"pkg":[], "desc":[], "set":[]}
560 self.matches = {"pkg":[], "set":[]}
561 print "Searching... ",
564 if self.searchkey.startswith('%'):
566 self.searchkey = self.searchkey[1:]
567 if self.searchkey.startswith('@'):
569 self.searchkey = self.searchkey[1:]
571 self.searchre=re.compile(self.searchkey,re.I)
573 self.searchre=re.compile(re.escape(self.searchkey), re.I)
574 for package in self.portdb.cp_all():
575 self.spinner.update()
578 match_string = package[:]
580 match_string = package.split("/")[-1]
583 if self.searchre.search(match_string):
584 if not self.portdb.xmatch("match-visible", package):
586 self.matches["pkg"].append([package,masked])
587 elif self.searchdesc: # DESCRIPTION searching
588 full_package = self.portdb.xmatch("bestmatch-visible", package)
590 #no match found; we don't want to query description
591 full_package = portage.best(
592 self.portdb.xmatch("match-all", package))
598 full_desc = self.portdb.aux_get(
599 full_package, ["DESCRIPTION"])[0]
601 print "emerge: search: aux_get() failed, skipping"
603 if self.searchre.search(full_desc):
604 self.matches["desc"].append([full_package,masked])
606 self.sdict = self.setconfig.getSets()
607 for setname in self.sdict:
608 self.spinner.update()
610 match_string = setname
612 match_string = setname.split("/")[-1]
614 if self.searchre.search(match_string):
615 self.matches["set"].append([setname, False])
616 elif self.searchdesc:
617 if self.searchre.search(
618 self.sdict[setname].getMetadata("DESCRIPTION")):
619 self.matches["set"].append([setname, False])
622 for mtype in self.matches:
623 self.matches[mtype].sort()
624 self.mlen += len(self.matches[mtype])
627 if not self.portdb.xmatch("match-all", cp):
630 if not self.portdb.xmatch("bestmatch-visible", cp):
632 self.matches["pkg"].append([cp, masked])
636 """Outputs the results of the search."""
637 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
638 print "[ Applications found : "+white(str(self.mlen))+" ]"
640 vardb = self.vartree.dbapi
641 for mtype in self.matches:
642 for match,masked in self.matches[mtype]:
646 full_package = self.portdb.xmatch(
647 "bestmatch-visible", match)
649 #no match found; we don't want to query description
651 full_package = portage.best(
652 self.portdb.xmatch("match-all",match))
653 elif mtype == "desc":
655 match = portage.cpv_getkey(match)
657 print green("*")+" "+white(match)
658 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
662 desc, homepage, license = self.portdb.aux_get(
663 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665 print "emerge: search: aux_get() failed, skipping"
668 print green("*")+" "+white(match)+" "+red("[ Masked ]")
670 print green("*")+" "+white(match)
671 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
675 mycat = match.split("/")[0]
676 mypkg = match.split("/")[1]
677 mycpv = match + "-" + myversion
678 myebuild = self.portdb.findname(mycpv)
680 pkgdir = os.path.dirname(myebuild)
681 from portage import manifest
682 mf = manifest.Manifest(
683 pkgdir, self.settings["DISTDIR"])
685 uri_map = self.portdb.getFetchMap(mycpv)
686 except portage.exception.InvalidDependString, e:
687 file_size_str = "Unknown (%s)" % (e,)
691 mysum[0] = mf.getDistfilesSize(uri_map)
693 file_size_str = "Unknown (missing " + \
694 "digest for %s)" % (e,)
699 if db is not vardb and \
700 db.cpv_exists(mycpv):
702 if not myebuild and hasattr(db, "bintree"):
703 myebuild = db.bintree.getname(mycpv)
705 mysum[0] = os.stat(myebuild).st_size
710 if myebuild and file_size_str is None:
711 mystr = str(mysum[0] / 1024)
715 mystr = mystr[:mycount] + "," + mystr[mycount:]
716 file_size_str = mystr + " kB"
720 print " ", darkgreen("Latest version available:"),myversion
721 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
724 (darkgreen("Size of files:"), file_size_str)
725 print " ", darkgreen("Homepage:")+" ",homepage
726 print " ", darkgreen("Description:")+" ",desc
727 print " ", darkgreen("License:")+" ",license
732 def getInstallationStatus(self,package):
733 installed_package = self.vartree.dep_bestmatch(package)
735 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737 result = darkgreen("Latest version installed:")+" "+version
739 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
742 def getVersion(self,full_package,detail):
743 if len(full_package) > 1:
744 package_parts = portage.catpkgsplit(full_package)
745 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746 result = package_parts[2]+ "-" + package_parts[3]
748 result = package_parts[2]
753 class RootConfig(object):
754 """This is used internally by depgraph to track information about a
758 "ebuild" : "porttree",
759 "binary" : "bintree",
760 "installed" : "vartree"
764 for k, v in pkg_tree_map.iteritems():
767 def __init__(self, settings, trees, setconfig):
769 self.settings = settings
770 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771 self.root = self.settings["ROOT"]
772 self.setconfig = setconfig
773 self.sets = self.setconfig.getSets()
774 self.visible_pkgs = PackageVirtualDbapi(self.settings)
776 def create_world_atom(pkg, args_set, root_config):
777 """Create a new atom for the world file if one does not exist. If the
778 argument atom is precise enough to identify a specific slot then a slot
779 atom will be returned. Atoms that are in the system set may also be stored
780 in world since system atoms can only match one slot while world atoms can
781 be greedy with respect to slots. Unslotted system packages will not be
784 arg_atom = args_set.findAtomForPackage(pkg)
787 cp = portage.dep_getkey(arg_atom)
789 sets = root_config.sets
790 portdb = root_config.trees["porttree"].dbapi
791 vardb = root_config.trees["vartree"].dbapi
792 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793 for cpv in portdb.match(cp))
794 slotted = len(available_slots) > 1 or \
795 (len(available_slots) == 1 and "0" not in available_slots)
797 # check the vdb in case this is multislot
798 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799 for cpv in vardb.match(cp))
800 slotted = len(available_slots) > 1 or \
801 (len(available_slots) == 1 and "0" not in available_slots)
802 if slotted and arg_atom != cp:
803 # If the user gave a specific atom, store it as a
804 # slot atom in the world file.
805 slot_atom = pkg.slot_atom
807 # For USE=multislot, there are a couple of cases to
810 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811 # unknown value, so just record an unslotted atom.
813 # 2) SLOT comes from an installed package and there is no
814 # matching SLOT in the portage tree.
816 # Make sure that the slot atom is available in either the
817 # portdb or the vardb, since otherwise the user certainly
818 # doesn't want the SLOT atom recorded in the world file
819 # (case 1 above). If it's only available in the vardb,
820 # the user may be trying to prevent a USE=multislot
821 # package from being removed by --depclean (case 2 above).
824 if not portdb.match(slot_atom):
825 # SLOT seems to come from an installed multislot package
827 # If there is no installed package matching the SLOT atom,
828 # it probably changed SLOT spontaneously due to USE=multislot,
829 # so just record an unslotted atom.
830 if vardb.match(slot_atom):
831 # Now verify that the argument is precise
832 # enough to identify a specific slot.
833 matches = mydb.match(arg_atom)
834 matched_slots = set()
836 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837 if len(matched_slots) == 1:
838 new_world_atom = slot_atom
840 if new_world_atom == sets["world"].findAtomForPackage(pkg):
841 # Both atoms would be identical, so there's nothing to add.
844 # Unlike world atoms, system atoms are not greedy for slots, so they
845 # can't be safely excluded from world if they are slotted.
846 system_atom = sets["system"].findAtomForPackage(pkg)
848 if not portage.dep_getkey(system_atom).startswith("virtual/"):
850 # System virtuals aren't safe to exclude from world since they can
851 # match multiple old-style virtuals but only one of them will be
852 # pulled in by update or depclean.
853 providers = portdb.mysettings.getvirtuals().get(
854 portage.dep_getkey(system_atom))
855 if providers and len(providers) == 1 and providers[0] == cp:
857 return new_world_atom
859 def filter_iuse_defaults(iuse):
861 if flag.startswith("+") or flag.startswith("-"):
866 class SlotObject(object):
867 __slots__ = ("__weakref__",)
869 def __init__(self, **kwargs):
870 classes = [self.__class__]
875 classes.extend(c.__bases__)
876 slots = getattr(c, "__slots__", None)
880 myvalue = kwargs.get(myattr, None)
881 setattr(self, myattr, myvalue)
885 Create a new instance and copy all attributes
886 defined from __slots__ (including those from
889 obj = self.__class__()
891 classes = [self.__class__]
896 classes.extend(c.__bases__)
897 slots = getattr(c, "__slots__", None)
901 setattr(obj, myattr, getattr(self, myattr))
905 class AbstractDepPriority(SlotObject):
906 __slots__ = ("buildtime", "runtime", "runtime_post")
908 def __lt__(self, other):
909 return self.__int__() < other
911 def __le__(self, other):
912 return self.__int__() <= other
914 def __eq__(self, other):
915 return self.__int__() == other
917 def __ne__(self, other):
918 return self.__int__() != other
920 def __gt__(self, other):
921 return self.__int__() > other
923 def __ge__(self, other):
924 return self.__int__() >= other
928 return copy.copy(self)
930 class DepPriority(AbstractDepPriority):
932 This class generates an integer priority level based of various
933 attributes of the dependency relationship. Attributes can be assigned
934 at any time and the new integer value will be generated on calls to the
935 __int__() method. Rich comparison operators are supported.
937 The boolean attributes that affect the integer value are "satisfied",
938 "buildtime", "runtime", and "system". Various combinations of
939 attributes lead to the following priority levels:
941 Combination of properties Priority Category
943 not satisfied and buildtime 0 HARD
944 not satisfied and runtime -1 MEDIUM
945 not satisfied and runtime_post -2 MEDIUM_SOFT
946 satisfied and buildtime and rebuild -3 SOFT
947 satisfied and buildtime -4 SOFT
948 satisfied and runtime -5 SOFT
949 satisfied and runtime_post -6 SOFT
950 (none of the above) -6 SOFT
952 Several integer constants are defined for categorization of priority
955 MEDIUM The upper boundary for medium dependencies.
956 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
957 SOFT The upper boundary for soft dependencies.
958 MIN The lower boundary for soft dependencies.
960 __slots__ = ("satisfied", "rebuild")
967 if not self.satisfied:
972 if self.runtime_post:
980 if self.runtime_post:
985 myvalue = self.__int__()
986 if myvalue > self.MEDIUM:
988 if myvalue > self.MEDIUM_SOFT:
990 if myvalue > self.SOFT:
994 class BlockerDepPriority(DepPriority):
999 BlockerDepPriority.instance = BlockerDepPriority()
1001 class UnmergeDepPriority(AbstractDepPriority):
1002 __slots__ = ("satisfied",)
1004 Combination of properties Priority Category
1007 runtime_post -1 HARD
1009 (none of the above) -2 SOFT
1019 if self.runtime_post:
1026 myvalue = self.__int__()
1027 if myvalue > self.SOFT:
1031 class FakeVartree(portage.vartree):
1032 """This is implements an in-memory copy of a vartree instance that provides
1033 all the interfaces required for use by the depgraph. The vardb is locked
1034 during the constructor call just long enough to read a copy of the
1035 installed package information. This allows the depgraph to do it's
1036 dependency calculations without holding a lock on the vardb. It also
1037 allows things like vardb global updates to be done in memory so that the
1038 user doesn't necessarily need write access to the vardb in cases where
1039 global updates are necessary (updates are performed when necessary if there
1040 is not a matching ebuild in the tree)."""
1041 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1042 self._root_config = root_config
1043 if pkg_cache is None:
1045 real_vartree = root_config.trees["vartree"]
1046 portdb = root_config.trees["porttree"].dbapi
1047 self.root = real_vartree.root
1048 self.settings = real_vartree.settings
1049 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1050 if "_mtime_" not in mykeys:
1051 mykeys.append("_mtime_")
1052 self._db_keys = mykeys
1053 self._pkg_cache = pkg_cache
1054 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1055 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1057 # At least the parent needs to exist for the lock file.
1058 portage.util.ensure_dirs(vdb_path)
1059 except portage.exception.PortageException:
1063 if acquire_lock and os.access(vdb_path, os.W_OK):
1064 vdb_lock = portage.locks.lockdir(vdb_path)
1065 real_dbapi = real_vartree.dbapi
1067 for cpv in real_dbapi.cpv_all():
1068 cache_key = ("installed", self.root, cpv, "nomerge")
1069 pkg = self._pkg_cache.get(cache_key)
1071 metadata = pkg.metadata
1073 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1074 myslot = metadata["SLOT"]
1075 mycp = portage.dep_getkey(cpv)
1076 myslot_atom = "%s:%s" % (mycp, myslot)
1078 mycounter = long(metadata["COUNTER"])
1081 metadata["COUNTER"] = str(mycounter)
1082 other_counter = slot_counters.get(myslot_atom, None)
1083 if other_counter is not None:
1084 if other_counter > mycounter:
1086 slot_counters[myslot_atom] = mycounter
1088 pkg = Package(built=True, cpv=cpv,
1089 installed=True, metadata=metadata,
1090 root_config=root_config, type_name="installed")
1091 self._pkg_cache[pkg] = pkg
1092 self.dbapi.cpv_inject(pkg)
1093 real_dbapi.flush_cache()
1096 portage.locks.unlockdir(vdb_lock)
1097 # Populate the old-style virtuals using the cached values.
1098 if not self.settings.treeVirtuals:
1099 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1100 portage.getCPFromCPV, self.get_all_provides())
1102 # Intialize variables needed for lazy cache pulls of the live ebuild
1103 # metadata. This ensures that the vardb lock is released ASAP, without
1104 # being delayed in case cache generation is triggered.
1105 self._aux_get = self.dbapi.aux_get
1106 self.dbapi.aux_get = self._aux_get_wrapper
1107 self._match = self.dbapi.match
1108 self.dbapi.match = self._match_wrapper
1109 self._aux_get_history = set()
1110 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1111 self._portdb = portdb
1112 self._global_updates = None
1114 def _match_wrapper(self, cpv, use_cache=1):
1116 Make sure the metadata in Package instances gets updated for any
1117 cpv that is returned from a match() call, since the metadata can
1118 be accessed directly from the Package instance instead of via
1121 matches = self._match(cpv, use_cache=use_cache)
1123 if cpv in self._aux_get_history:
1125 self._aux_get_wrapper(cpv, [])
1128 def _aux_get_wrapper(self, pkg, wants):
1129 if pkg in self._aux_get_history:
1130 return self._aux_get(pkg, wants)
1131 self._aux_get_history.add(pkg)
1133 # Use the live ebuild metadata if possible.
1134 live_metadata = dict(izip(self._portdb_keys,
1135 self._portdb.aux_get(pkg, self._portdb_keys)))
1136 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1138 self.dbapi.aux_update(pkg, live_metadata)
1139 except (KeyError, portage.exception.PortageException):
1140 if self._global_updates is None:
1141 self._global_updates = \
1142 grab_global_updates(self._portdb.porttree_root)
1143 perform_global_updates(
1144 pkg, self.dbapi, self._global_updates)
1145 return self._aux_get(pkg, wants)
1147 def sync(self, acquire_lock=1):
1149 Call this method to synchronize state with the real vardb
1150 after one or more packages may have been installed or
1153 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1155 # At least the parent needs to exist for the lock file.
1156 portage.util.ensure_dirs(vdb_path)
1157 except portage.exception.PortageException:
1161 if acquire_lock and os.access(vdb_path, os.W_OK):
1162 vdb_lock = portage.locks.lockdir(vdb_path)
1166 portage.locks.unlockdir(vdb_lock)
1170 real_vardb = self._root_config.trees["vartree"].dbapi
1171 current_cpv_set = frozenset(real_vardb.cpv_all())
1172 pkg_vardb = self.dbapi
1173 aux_get_history = self._aux_get_history
1175 # Remove any packages that have been uninstalled.
1176 for pkg in list(pkg_vardb):
1177 if pkg.cpv not in current_cpv_set:
1178 pkg_vardb.cpv_remove(pkg)
1179 aux_get_history.discard(pkg.cpv)
1181 # Validate counters and timestamps.
1184 validation_keys = ["COUNTER", "_mtime_"]
1185 for cpv in current_cpv_set:
1187 pkg_hash_key = ("installed", root, cpv, "nomerge")
1188 pkg = pkg_vardb.get(pkg_hash_key)
1190 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1192 counter = long(counter)
1196 if counter != pkg.counter or \
1198 pkg_vardb.cpv_remove(pkg)
1199 aux_get_history.discard(pkg.cpv)
1203 pkg = self._pkg(cpv)
1205 other_counter = slot_counters.get(pkg.slot_atom)
1206 if other_counter is not None:
1207 if other_counter > pkg.counter:
1210 slot_counters[pkg.slot_atom] = pkg.counter
1211 pkg_vardb.cpv_inject(pkg)
1213 real_vardb.flush_cache()
1215 def _pkg(self, cpv):
1216 root_config = self._root_config
1217 real_vardb = root_config.trees["vartree"].dbapi
1218 pkg = Package(cpv=cpv, installed=True,
1219 metadata=izip(self._db_keys,
1220 real_vardb.aux_get(cpv, self._db_keys)),
1221 root_config=root_config,
1222 type_name="installed")
1225 mycounter = long(pkg.metadata["COUNTER"])
1228 pkg.metadata["COUNTER"] = str(mycounter)
1232 def grab_global_updates(portdir):
1233 from portage.update import grab_updates, parse_updates
1234 updpath = os.path.join(portdir, "profiles", "updates")
1236 rawupdates = grab_updates(updpath)
1237 except portage.exception.DirectoryNotFound:
1240 for mykey, mystat, mycontent in rawupdates:
1241 commands, errors = parse_updates(mycontent)
1242 upd_commands.extend(commands)
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246 from portage.update import update_dbentries
1247 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249 updates = update_dbentries(mycommands, aux_dict)
1251 mydb.aux_update(mycpv, updates)
1253 def visible(pkgsettings, pkg):
1255 Check if a package is visible. This can raise an InvalidDependString
1256 exception if LICENSE is invalid.
1257 TODO: optionally generate a list of masking reasons
1259 @returns: True if the package is visible, False otherwise.
1261 if not pkg.metadata["SLOT"]:
1263 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264 if not pkgsettings._accept_chost(pkg):
1266 eapi = pkg.metadata["EAPI"]
1267 if not portage.eapi_is_supported(eapi):
1269 if not pkg.installed:
1270 if portage._eapi_is_deprecated(eapi):
1272 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1274 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1276 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1279 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1281 except portage.exception.InvalidDependString:
1285 def get_masking_status(pkg, pkgsettings, root_config):
1287 mreasons = portage.getmaskingstatus(
1288 pkg, settings=pkgsettings,
1289 portdb=root_config.trees["porttree"].dbapi)
1291 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292 if not pkgsettings._accept_chost(pkg):
1293 mreasons.append("CHOST: %s" % \
1294 pkg.metadata["CHOST"])
1296 if not pkg.metadata["SLOT"]:
1297 mreasons.append("invalid: SLOT is undefined")
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302 db, pkg_type, built, installed, db_keys):
1305 metadata = dict(izip(db_keys,
1306 db.aux_get(cpv, db_keys)))
1309 if metadata and not built:
1310 pkgsettings.setcpv(cpv, mydb=metadata)
1311 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312 if metadata is None:
1313 mreasons = ["corruption"]
1315 pkg = Package(type_name=pkg_type, root_config=root_config,
1316 cpv=cpv, built=built, installed=installed, metadata=metadata)
1317 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318 return metadata, mreasons
1320 def show_masked_packages(masked_packages):
1321 shown_licenses = set()
1322 shown_comments = set()
1323 # Maybe there is both an ebuild and a binary. Only
1324 # show one of them to avoid redundant appearance.
1326 have_eapi_mask = False
1327 for (root_config, pkgsettings, cpv,
1328 metadata, mreasons) in masked_packages:
1329 if cpv in shown_cpvs:
1332 comment, filename = None, None
1333 if "package.mask" in mreasons:
1334 comment, filename = \
1335 portage.getmaskingreason(
1336 cpv, metadata=metadata,
1337 settings=pkgsettings,
1338 portdb=root_config.trees["porttree"].dbapi,
1339 return_location=True)
1340 missing_licenses = []
1342 if not portage.eapi_is_supported(metadata["EAPI"]):
1343 have_eapi_mask = True
1345 missing_licenses = \
1346 pkgsettings._getMissingLicenses(
1348 except portage.exception.InvalidDependString:
1349 # This will have already been reported
1350 # above via mreasons.
1353 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354 if comment and comment not in shown_comments:
1357 shown_comments.add(comment)
1358 portdb = root_config.trees["porttree"].dbapi
1359 for l in missing_licenses:
1360 l_path = portdb.findLicensePath(l)
1361 if l in shown_licenses:
1363 msg = ("A copy of the '%s' license" + \
1364 " is located at '%s'.") % (l, l_path)
1367 shown_licenses.add(l)
1368 return have_eapi_mask
1370 class Task(SlotObject):
1371 __slots__ = ("_hash_key", "_hash_value")
1373 def _get_hash_key(self):
1374 hash_key = getattr(self, "_hash_key", None)
1375 if hash_key is None:
1376 raise NotImplementedError(self)
1379 def __eq__(self, other):
1380 return self._get_hash_key() == other
1382 def __ne__(self, other):
1383 return self._get_hash_key() != other
1386 hash_value = getattr(self, "_hash_value", None)
1387 if hash_value is None:
1388 self._hash_value = hash(self._get_hash_key())
1389 return self._hash_value
1392 return len(self._get_hash_key())
1394 def __getitem__(self, key):
1395 return self._get_hash_key()[key]
1398 return iter(self._get_hash_key())
1400 def __contains__(self, key):
1401 return key in self._get_hash_key()
1404 return str(self._get_hash_key())
1406 class Blocker(Task):
1408 __hash__ = Task.__hash__
1409 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1411 def __init__(self, **kwargs):
1412 Task.__init__(self, **kwargs)
1413 self.cp = portage.dep_getkey(self.atom)
1415 def _get_hash_key(self):
1416 hash_key = getattr(self, "_hash_key", None)
1417 if hash_key is None:
1419 ("blocks", self.root, self.atom, self.eapi)
1420 return self._hash_key
1422 class Package(Task):
1424 __hash__ = Task.__hash__
1425 __slots__ = ("built", "cpv", "depth",
1426 "installed", "metadata", "onlydeps", "operation",
1427 "root_config", "type_name",
1428 "category", "counter", "cp", "cpv_split",
1429 "inherited", "iuse", "mtime",
1430 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1433 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434 "INHERITED", "IUSE", "KEYWORDS",
1435 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1438 def __init__(self, **kwargs):
1439 Task.__init__(self, **kwargs)
1440 self.root = self.root_config.root
1441 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442 self.cp = portage.cpv_getkey(self.cpv)
1443 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444 self.category, self.pf = portage.catsplit(self.cpv)
1445 self.cpv_split = portage.catpkgsplit(self.cpv)
1446 self.pv_split = self.cpv_split[1:]
1450 __slots__ = ("__weakref__", "enabled")
1452 def __init__(self, use):
1453 self.enabled = frozenset(use)
1455 class _iuse(object):
1457 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1459 def __init__(self, tokens, iuse_implicit):
1460 self.tokens = tuple(tokens)
1461 self.iuse_implicit = iuse_implicit
1468 enabled.append(x[1:])
1470 disabled.append(x[1:])
1473 self.enabled = frozenset(enabled)
1474 self.disabled = frozenset(disabled)
1475 self.all = frozenset(chain(enabled, disabled, other))
1477 def __getattribute__(self, name):
1480 return object.__getattribute__(self, "regex")
1481 except AttributeError:
1482 all = object.__getattribute__(self, "all")
1483 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484 # Escape anything except ".*" which is supposed
1485 # to pass through from _get_implicit_iuse()
1486 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487 regex = "^(%s)$" % "|".join(regex)
1488 regex = regex.replace("\\.\\*", ".*")
1489 self.regex = re.compile(regex)
1490 return object.__getattribute__(self, name)
1492 def _get_hash_key(self):
1493 hash_key = getattr(self, "_hash_key", None)
1494 if hash_key is None:
1495 if self.operation is None:
1496 self.operation = "merge"
1497 if self.onlydeps or self.installed:
1498 self.operation = "nomerge"
1500 (self.type_name, self.root, self.cpv, self.operation)
1501 return self._hash_key
1503 def __lt__(self, other):
1504 if other.cp != self.cp:
1506 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1510 def __le__(self, other):
1511 if other.cp != self.cp:
1513 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1517 def __gt__(self, other):
1518 if other.cp != self.cp:
1520 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1524 def __ge__(self, other):
1525 if other.cp != self.cp:
1527 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1531 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1532 if not x.startswith("UNUSED_"))
1533 _all_metadata_keys.discard("CDEPEND")
1534 _all_metadata_keys.update(Package.metadata_keys)
1536 from portage.cache.mappings import slot_dict_class
1537 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1539 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1541 Detect metadata updates and synchronize Package attributes.
1544 __slots__ = ("_pkg",)
1545 _wrapped_keys = frozenset(
1546 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1548 def __init__(self, pkg, metadata):
1549 _PackageMetadataWrapperBase.__init__(self)
1551 self.update(metadata)
1553 def __setitem__(self, k, v):
1554 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1555 if k in self._wrapped_keys:
1556 getattr(self, "_set_" + k.lower())(k, v)
1558 def _set_inherited(self, k, v):
1559 if isinstance(v, basestring):
1560 v = frozenset(v.split())
1561 self._pkg.inherited = v
1563 def _set_iuse(self, k, v):
1564 self._pkg.iuse = self._pkg._iuse(
1565 v.split(), self._pkg.root_config.iuse_implicit)
1567 def _set_slot(self, k, v):
1570 def _set_use(self, k, v):
1571 self._pkg.use = self._pkg._use(v.split())
1573 def _set_counter(self, k, v):
1574 if isinstance(v, basestring):
1579 self._pkg.counter = v
1581 def _set__mtime_(self, k, v):
1582 if isinstance(v, basestring):
1589 class EbuildFetchonly(SlotObject):
1591 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1594 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1595 # ensuring sane $PWD (bug #239560) and storing elog
1596 # messages. Use a private temp directory, in order
1597 # to avoid locking the main one.
1598 settings = self.settings
1599 global_tmpdir = settings["PORTAGE_TMPDIR"]
1600 from tempfile import mkdtemp
1602 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1604 if e.errno != portage.exception.PermissionDenied.errno:
1606 raise portage.exception.PermissionDenied(global_tmpdir)
1607 settings["PORTAGE_TMPDIR"] = private_tmpdir
1608 settings.backup_changes("PORTAGE_TMPDIR")
1610 retval = self._execute()
1612 settings["PORTAGE_TMPDIR"] = global_tmpdir
1613 settings.backup_changes("PORTAGE_TMPDIR")
1614 shutil.rmtree(private_tmpdir)
1618 settings = self.settings
1620 root_config = pkg.root_config
1621 portdb = root_config.trees["porttree"].dbapi
1622 ebuild_path = portdb.findname(pkg.cpv)
1623 settings.setcpv(pkg)
1624 debug = settings.get("PORTAGE_DEBUG") == "1"
1625 use_cache = 1 # always true
1626 portage.doebuild_environment(ebuild_path, "fetch",
1627 root_config.root, settings, debug, use_cache, portdb)
1628 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1630 retval = portage.doebuild(ebuild_path, "fetch",
1631 self.settings["ROOT"], self.settings, debug=debug,
1632 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1633 mydbapi=portdb, tree="porttree")
1635 if retval != os.EX_OK:
1636 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1637 eerror(msg, phase="unpack", key=pkg.cpv)
1639 portage.elog.elog_process(self.pkg.cpv, self.settings)
1642 class PollConstants(object):
1645 Provides POLL* constants that are equivalent to those from the
1646 select module, for use by PollSelectAdapter.
1649 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1652 locals()[k] = getattr(select, k, v)
1656 class AsynchronousTask(SlotObject):
1658 Subclasses override _wait() and _poll() so that calls
1659 to public methods can be wrapped for implementing
1660 hooks such as exit listener notification.
1662 Sublasses should call self.wait() to notify exit listeners after
1663 the task is complete and self.returncode has been set.
1666 __slots__ = ("background", "cancelled", "returncode") + \
1667 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1671 Start an asynchronous task and then return as soon as possible.
1677 raise NotImplementedError(self)
1680 return self.returncode is None
1687 return self.returncode
1690 if self.returncode is None:
1693 return self.returncode
1696 return self.returncode
1699 self.cancelled = True
1702 def addStartListener(self, f):
1704 The function will be called with one argument, a reference to self.
1706 if self._start_listeners is None:
1707 self._start_listeners = []
1708 self._start_listeners.append(f)
1710 def removeStartListener(self, f):
1711 if self._start_listeners is None:
1713 self._start_listeners.remove(f)
1715 def _start_hook(self):
1716 if self._start_listeners is not None:
1717 start_listeners = self._start_listeners
1718 self._start_listeners = None
1720 for f in start_listeners:
1723 def addExitListener(self, f):
1725 The function will be called with one argument, a reference to self.
1727 if self._exit_listeners is None:
1728 self._exit_listeners = []
1729 self._exit_listeners.append(f)
1731 def removeExitListener(self, f):
1732 if self._exit_listeners is None:
1733 if self._exit_listener_stack is not None:
1734 self._exit_listener_stack.remove(f)
1736 self._exit_listeners.remove(f)
1738 def _wait_hook(self):
1740 Call this method after the task completes, just before returning
1741 the returncode from wait() or poll(). This hook is
1742 used to trigger exit listeners when the returncode first
1745 if self.returncode is not None and \
1746 self._exit_listeners is not None:
1748 # This prevents recursion, in case one of the
1749 # exit handlers triggers this method again by
1750 # calling wait(). Use a stack that gives
1751 # removeExitListener() an opportunity to consume
1752 # listeners from the stack, before they can get
1753 # called below. This is necessary because a call
1754 # to one exit listener may result in a call to
1755 # removeExitListener() for another listener on
1756 # the stack. That listener needs to be removed
1757 # from the stack since it would be inconsistent
1758 # to call it after it has been been passed into
1759 # removeExitListener().
1760 self._exit_listener_stack = self._exit_listeners
1761 self._exit_listeners = None
1763 self._exit_listener_stack.reverse()
1764 while self._exit_listener_stack:
1765 self._exit_listener_stack.pop()(self)
1767 class AbstractPollTask(AsynchronousTask):
1769 __slots__ = ("scheduler",) + \
1773 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1774 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1777 def _unregister(self):
1778 raise NotImplementedError(self)
1780 def _unregister_if_appropriate(self, event):
1781 if self._registered:
1782 if event & self._exceptional_events:
1785 elif event & PollConstants.POLLHUP:
1789 class PipeReader(AbstractPollTask):
1792 Reads output from one or more files and saves it in memory,
1793 for retrieval via the getvalue() method. This is driven by
1794 the scheduler's poll() loop, so it runs entirely within the
1798 __slots__ = ("input_files",) + \
1799 ("_read_data", "_reg_ids")
1802 self._reg_ids = set()
1803 self._read_data = []
1804 for k, f in self.input_files.iteritems():
1805 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1806 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1807 self._reg_ids.add(self.scheduler.register(f.fileno(),
1808 self._registered_events, self._output_handler))
1809 self._registered = True
1812 return self._registered
1815 if self.returncode is None:
1817 self.cancelled = True
1821 if self.returncode is not None:
1822 return self.returncode
1824 if self._registered:
1825 self.scheduler.schedule(self._reg_ids)
1828 self.returncode = os.EX_OK
1829 return self.returncode
1832 """Retrieve the entire contents"""
1833 return "".join(self._read_data)
1836 """Free the memory buffer."""
1837 self._read_data = None
1839 def _output_handler(self, fd, event):
1841 if event & PollConstants.POLLIN:
1843 for f in self.input_files.itervalues():
1844 if fd == f.fileno():
1847 buf = array.array('B')
1849 buf.fromfile(f, self._bufsize)
1854 self._read_data.append(buf.tostring())
1859 self._unregister_if_appropriate(event)
1860 return self._registered
1862 def _unregister(self):
1864 Unregister from the scheduler and close open files.
1867 self._registered = False
1869 if self._reg_ids is not None:
1870 for reg_id in self._reg_ids:
1871 self.scheduler.unregister(reg_id)
1872 self._reg_ids = None
1874 if self.input_files is not None:
1875 for f in self.input_files.itervalues():
1877 self.input_files = None
1879 class CompositeTask(AsynchronousTask):
1881 __slots__ = ("scheduler",) + ("_current_task",)
1884 return self._current_task is not None
1887 self.cancelled = True
1888 if self._current_task is not None:
1889 self._current_task.cancel()
1893 This does a loop calling self._current_task.poll()
1894 repeatedly as long as the value of self._current_task
1895 keeps changing. It calls poll() a maximum of one time
1896 for a given self._current_task instance. This is useful
1897 since calling poll() on a task can trigger advance to
1898 the next task could eventually lead to the returncode
1899 being set in cases when polling only a single task would
1900 not have the same effect.
1905 task = self._current_task
1906 if task is None or task is prev:
1907 # don't poll the same task more than once
1912 return self.returncode
1918 task = self._current_task
1920 # don't wait for the same task more than once
1923 # Before the task.wait() method returned, an exit
1924 # listener should have set self._current_task to either
1925 # a different task or None. Something is wrong.
1926 raise AssertionError("self._current_task has not " + \
1927 "changed since calling wait", self, task)
1931 return self.returncode
1933 def _assert_current(self, task):
1935 Raises an AssertionError if the given task is not the
1936 same one as self._current_task. This can be useful
1939 if task is not self._current_task:
1940 raise AssertionError("Unrecognized task: %s" % (task,))
1942 def _default_exit(self, task):
1944 Calls _assert_current() on the given task and then sets the
1945 composite returncode attribute if task.returncode != os.EX_OK.
1946 If the task failed then self._current_task will be set to None.
1947 Subclasses can use this as a generic task exit callback.
1950 @returns: The task.returncode attribute.
1952 self._assert_current(task)
1953 if task.returncode != os.EX_OK:
1954 self.returncode = task.returncode
1955 self._current_task = None
1956 return task.returncode
1958 def _final_exit(self, task):
1960 Assumes that task is the final task of this composite task.
1961 Calls _default_exit() and sets self.returncode to the task's
1962 returncode and sets self._current_task to None.
1964 self._default_exit(task)
1965 self._current_task = None
1966 self.returncode = task.returncode
1967 return self.returncode
1969 def _default_final_exit(self, task):
1971 This calls _final_exit() and then wait().
1973 Subclasses can use this as a generic final task exit callback.
1976 self._final_exit(task)
1979 def _start_task(self, task, exit_handler):
1981 Register exit handler for the given task, set it
1982 as self._current_task, and call task.start().
1984 Subclasses can use this as a generic way to start
1988 task.addExitListener(exit_handler)
1989 self._current_task = task
1992 class TaskSequence(CompositeTask):
1994 A collection of tasks that executes sequentially. Each task
1995 must have a addExitListener() method that can be used as
1996 a means to trigger movement from one task to the next.
1999 __slots__ = ("_task_queue",)
2001 def __init__(self, **kwargs):
2002 AsynchronousTask.__init__(self, **kwargs)
2003 self._task_queue = deque()
2005 def add(self, task):
2006 self._task_queue.append(task)
2009 self._start_next_task()
2012 self._task_queue.clear()
2013 CompositeTask.cancel(self)
2015 def _start_next_task(self):
2016 self._start_task(self._task_queue.popleft(),
2017 self._task_exit_handler)
2019 def _task_exit_handler(self, task):
2020 if self._default_exit(task) != os.EX_OK:
2022 elif self._task_queue:
2023 self._start_next_task()
2025 self._final_exit(task)
2028 class SubProcess(AbstractPollTask):
2030 __slots__ = ("pid",) + \
2031 ("_files", "_reg_id")
2033 # A file descriptor is required for the scheduler to monitor changes from
2034 # inside a poll() loop. When logging is not enabled, create a pipe just to
2035 # serve this purpose alone.
2039 if self.returncode is not None:
2040 return self.returncode
2041 if self.pid is None:
2042 return self.returncode
2043 if self._registered:
2044 return self.returncode
2047 retval = os.waitpid(self.pid, os.WNOHANG)
2049 if e.errno != errno.ECHILD:
2052 retval = (self.pid, 1)
2054 if retval == (0, 0):
2056 self._set_returncode(retval)
2057 return self.returncode
2062 os.kill(self.pid, signal.SIGTERM)
2064 if e.errno != errno.ESRCH:
2068 self.cancelled = True
2069 if self.pid is not None:
2071 return self.returncode
2074 return self.pid is not None and \
2075 self.returncode is None
2079 if self.returncode is not None:
2080 return self.returncode
2082 if self._registered:
2083 self.scheduler.schedule(self._reg_id)
2085 if self.returncode is not None:
2086 return self.returncode
2089 wait_retval = os.waitpid(self.pid, 0)
2091 if e.errno != errno.ECHILD:
2094 self._set_returncode((self.pid, 1))
2096 self._set_returncode(wait_retval)
2098 return self.returncode
2100 def _unregister(self):
2102 Unregister from the scheduler and close open files.
2105 self._registered = False
2107 if self._reg_id is not None:
2108 self.scheduler.unregister(self._reg_id)
2111 if self._files is not None:
2112 for f in self._files.itervalues():
2116 def _set_returncode(self, wait_retval):
2118 retval = wait_retval[1]
2120 if retval != os.EX_OK:
2122 retval = (retval & 0xff) << 8
2124 retval = retval >> 8
2126 self.returncode = retval
2128 class SpawnProcess(SubProcess):
2131 Constructor keyword args are passed into portage.process.spawn().
2132 The required "args" keyword argument will be passed as the first
2136 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2137 "uid", "gid", "groups", "umask", "logfile",
2138 "path_lookup", "pre_exec")
2140 __slots__ = ("args",) + \
2143 _file_names = ("log", "process", "stdout")
2144 _files_dict = slot_dict_class(_file_names, prefix="")
2151 if self.fd_pipes is None:
2153 fd_pipes = self.fd_pipes
2154 fd_pipes.setdefault(0, sys.stdin.fileno())
2155 fd_pipes.setdefault(1, sys.stdout.fileno())
2156 fd_pipes.setdefault(2, sys.stderr.fileno())
2158 # flush any pending output
2159 for fd in fd_pipes.itervalues():
2160 if fd == sys.stdout.fileno():
2162 if fd == sys.stderr.fileno():
2165 logfile = self.logfile
2166 self._files = self._files_dict()
2169 master_fd, slave_fd = self._pipe(fd_pipes)
2170 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2171 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2174 fd_pipes_orig = fd_pipes.copy()
2176 # TODO: Use job control functions like tcsetpgrp() to control
2177 # access to stdin. Until then, use /dev/null so that any
2178 # attempts to read from stdin will immediately return EOF
2179 # instead of blocking indefinitely.
2180 null_input = open('/dev/null', 'rb')
2181 fd_pipes[0] = null_input.fileno()
2183 fd_pipes[0] = fd_pipes_orig[0]
2185 files.process = os.fdopen(master_fd, 'r')
2186 if logfile is not None:
2188 fd_pipes[1] = slave_fd
2189 fd_pipes[2] = slave_fd
2191 files.log = open(logfile, "a")
2192 portage.util.apply_secpass_permissions(logfile,
2193 uid=portage.portage_uid, gid=portage.portage_gid,
2196 if not self.background:
2197 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2199 output_handler = self._output_handler
2203 # Create a dummy pipe so the scheduler can monitor
2204 # the process from inside a poll() loop.
2205 fd_pipes[self._dummy_pipe_fd] = slave_fd
2207 fd_pipes[1] = slave_fd
2208 fd_pipes[2] = slave_fd
2209 output_handler = self._dummy_handler
2212 for k in self._spawn_kwarg_names:
2213 v = getattr(self, k)
2217 kwargs["fd_pipes"] = fd_pipes
2218 kwargs["returnpid"] = True
2219 kwargs.pop("logfile", None)
2221 self._reg_id = self.scheduler.register(files.process.fileno(),
2222 self._registered_events, output_handler)
2223 self._registered = True
2225 retval = self._spawn(self.args, **kwargs)
2228 if null_input is not None:
2231 if isinstance(retval, int):
2234 self.returncode = retval
2238 self.pid = retval[0]
2239 portage.process.spawned_pids.remove(self.pid)
2241 def _pipe(self, fd_pipes):
2243 @type fd_pipes: dict
2244 @param fd_pipes: pipes from which to copy terminal size if desired.
2248 def _spawn(self, args, **kwargs):
2249 return portage.process.spawn(args, **kwargs)
2251 def _output_handler(self, fd, event):
2253 if event & PollConstants.POLLIN:
2256 buf = array.array('B')
2258 buf.fromfile(files.process, self._bufsize)
2263 if not self.background:
2264 buf.tofile(files.stdout)
2265 files.stdout.flush()
2266 buf.tofile(files.log)
2272 self._unregister_if_appropriate(event)
2273 return self._registered
2275 def _dummy_handler(self, fd, event):
2277 This method is mainly interested in detecting EOF, since
2278 the only purpose of the pipe is to allow the scheduler to
2279 monitor the process from inside a poll() loop.
2282 if event & PollConstants.POLLIN:
2284 buf = array.array('B')
2286 buf.fromfile(self._files.process, self._bufsize)
2296 self._unregister_if_appropriate(event)
2297 return self._registered
2299 class MiscFunctionsProcess(SpawnProcess):
2301 Spawns misc-functions.sh with an existing ebuild environment.
2304 __slots__ = ("commands", "phase", "pkg", "settings")
2307 settings = self.settings
2308 settings.pop("EBUILD_PHASE", None)
2309 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2310 misc_sh_binary = os.path.join(portage_bin_path,
2311 os.path.basename(portage.const.MISC_SH_BINARY))
2313 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2314 self.logfile = settings.get("PORTAGE_LOG_FILE")
2316 portage._doebuild_exit_status_unlink(
2317 settings.get("EBUILD_EXIT_STATUS_FILE"))
2319 SpawnProcess._start(self)
2321 def _spawn(self, args, **kwargs):
2322 settings = self.settings
2323 debug = settings.get("PORTAGE_DEBUG") == "1"
2324 return portage.spawn(" ".join(args), settings,
2325 debug=debug, **kwargs)
2327 def _set_returncode(self, wait_retval):
2328 SpawnProcess._set_returncode(self, wait_retval)
2329 self.returncode = portage._doebuild_exit_status_check_and_log(
2330 self.settings, self.phase, self.returncode)
2332 class EbuildFetcher(SpawnProcess):
2334 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2339 root_config = self.pkg.root_config
2340 portdb = root_config.trees["porttree"].dbapi
2341 ebuild_path = portdb.findname(self.pkg.cpv)
2342 settings = self.config_pool.allocate()
2343 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2344 self._build_dir.lock()
2345 self._build_dir.clean()
2346 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2347 if self.logfile is None:
2348 self.logfile = settings.get("PORTAGE_LOG_FILE")
2354 # If any incremental variables have been overridden
2355 # via the environment, those values need to be passed
2356 # along here so that they are correctly considered by
2357 # the config instance in the subproccess.
2358 fetch_env = os.environ.copy()
2360 fetch_env["PORTAGE_NICENESS"] = "0"
2362 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2364 ebuild_binary = os.path.join(
2365 settings["PORTAGE_BIN_PATH"], "ebuild")
2367 fetch_args = [ebuild_binary, ebuild_path, phase]
2368 debug = settings.get("PORTAGE_DEBUG") == "1"
2370 fetch_args.append("--debug")
2372 self.args = fetch_args
2373 self.env = fetch_env
2374 SpawnProcess._start(self)
2376 def _pipe(self, fd_pipes):
2377 """When appropriate, use a pty so that fetcher progress bars,
2378 like wget has, will work properly."""
2379 if self.background or not sys.stdout.isatty():
2380 # When the output only goes to a log file,
2381 # there's no point in creating a pty.
2383 stdout_pipe = fd_pipes.get(1)
2384 got_pty, master_fd, slave_fd = \
2385 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2386 return (master_fd, slave_fd)
2388 def _set_returncode(self, wait_retval):
2389 SpawnProcess._set_returncode(self, wait_retval)
2390 # Collect elog messages that might have been
2391 # created by the pkg_nofetch phase.
2392 if self._build_dir is not None:
2393 # Skip elog messages for prefetch, in order to avoid duplicates.
2394 if not self.prefetch and self.returncode != os.EX_OK:
2396 if self.logfile is not None:
2398 elog_out = open(self.logfile, 'a')
2399 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2400 if self.logfile is not None:
2401 msg += ", Log file:"
2402 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2403 if self.logfile is not None:
2404 eerror(" '%s'" % (self.logfile,),
2405 phase="unpack", key=self.pkg.cpv, out=elog_out)
2406 if elog_out is not None:
2408 if not self.prefetch:
2409 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2410 features = self._build_dir.settings.features
2411 if self.returncode == os.EX_OK:
2412 self._build_dir.clean()
2413 self._build_dir.unlock()
2414 self.config_pool.deallocate(self._build_dir.settings)
2415 self._build_dir = None
2417 class EbuildBuildDir(SlotObject):
2419 __slots__ = ("dir_path", "pkg", "settings",
2420 "locked", "_catdir", "_lock_obj")
2422 def __init__(self, **kwargs):
2423 SlotObject.__init__(self, **kwargs)
2428 This raises an AlreadyLocked exception if lock() is called
2429 while a lock is already held. In order to avoid this, call
2430 unlock() or check whether the "locked" attribute is True
2431 or False before calling lock().
2433 if self._lock_obj is not None:
2434 raise self.AlreadyLocked((self._lock_obj,))
2436 dir_path = self.dir_path
2437 if dir_path is None:
2438 root_config = self.pkg.root_config
2439 portdb = root_config.trees["porttree"].dbapi
2440 ebuild_path = portdb.findname(self.pkg.cpv)
2441 settings = self.settings
2442 settings.setcpv(self.pkg)
2443 debug = settings.get("PORTAGE_DEBUG") == "1"
2444 use_cache = 1 # always true
2445 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2446 self.settings, debug, use_cache, portdb)
2447 dir_path = self.settings["PORTAGE_BUILDDIR"]
2449 catdir = os.path.dirname(dir_path)
2450 self._catdir = catdir
2452 portage.util.ensure_dirs(os.path.dirname(catdir),
2453 gid=portage.portage_gid,
2457 catdir_lock = portage.locks.lockdir(catdir)
2458 portage.util.ensure_dirs(catdir,
2459 gid=portage.portage_gid,
2461 self._lock_obj = portage.locks.lockdir(dir_path)
2463 self.locked = self._lock_obj is not None
2464 if catdir_lock is not None:
2465 portage.locks.unlockdir(catdir_lock)
2468 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2469 by keepwork or keeptemp in FEATURES."""
2470 settings = self.settings
2471 features = settings.features
2472 if not ("keepwork" in features or "keeptemp" in features):
2474 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2475 except EnvironmentError, e:
2476 if e.errno != errno.ENOENT:
2481 if self._lock_obj is None:
2484 portage.locks.unlockdir(self._lock_obj)
2485 self._lock_obj = None
2488 catdir = self._catdir
2491 catdir_lock = portage.locks.lockdir(catdir)
2497 if e.errno not in (errno.ENOENT,
2498 errno.ENOTEMPTY, errno.EEXIST):
2501 portage.locks.unlockdir(catdir_lock)
2503 class AlreadyLocked(portage.exception.PortageException):
2506 class EbuildBuild(CompositeTask):
2508 __slots__ = ("args_set", "config_pool", "find_blockers",
2509 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2510 "prefetcher", "settings", "world_atom") + \
2511 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2515 logger = self.logger
2518 settings = self.settings
2519 world_atom = self.world_atom
2520 root_config = pkg.root_config
2523 portdb = root_config.trees[tree].dbapi
2524 settings.setcpv(pkg)
2525 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2526 ebuild_path = portdb.findname(self.pkg.cpv)
2527 self._ebuild_path = ebuild_path
2529 prefetcher = self.prefetcher
2530 if prefetcher is None:
2532 elif not prefetcher.isAlive():
2534 elif prefetcher.poll() is None:
2536 waiting_msg = "Fetching files " + \
2537 "in the background. " + \
2538 "To view fetch progress, run `tail -f " + \
2539 "/var/log/emerge-fetch.log` in another " + \
2541 msg_prefix = colorize("GOOD", " * ")
2542 from textwrap import wrap
2543 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2544 for line in wrap(waiting_msg, 65))
2545 if not self.background:
2546 writemsg(waiting_msg, noiselevel=-1)
2548 self._current_task = prefetcher
2549 prefetcher.addExitListener(self._prefetch_exit)
2552 self._prefetch_exit(prefetcher)
2554 def _prefetch_exit(self, prefetcher):
2558 settings = self.settings
2561 fetcher = EbuildFetchonly(
2562 fetch_all=opts.fetch_all_uri,
2563 pkg=pkg, pretend=opts.pretend,
2565 retval = fetcher.execute()
2566 self.returncode = retval
2570 fetcher = EbuildFetcher(config_pool=self.config_pool,
2571 fetchall=opts.fetch_all_uri,
2572 fetchonly=opts.fetchonly,
2573 background=self.background,
2574 pkg=pkg, scheduler=self.scheduler)
2576 self._start_task(fetcher, self._fetch_exit)
2578 def _fetch_exit(self, fetcher):
2582 fetch_failed = False
2584 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2586 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2588 if fetch_failed and fetcher.logfile is not None and \
2589 os.path.exists(fetcher.logfile):
2590 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2592 if not fetch_failed and fetcher.logfile is not None:
2593 # Fetch was successful, so remove the fetch log.
2595 os.unlink(fetcher.logfile)
2599 if fetch_failed or opts.fetchonly:
2603 logger = self.logger
2605 pkg_count = self.pkg_count
2606 scheduler = self.scheduler
2607 settings = self.settings
2608 features = settings.features
2609 ebuild_path = self._ebuild_path
2610 system_set = pkg.root_config.sets["system"]
2612 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2613 self._build_dir.lock()
2615 # Cleaning is triggered before the setup
2616 # phase, in portage.doebuild().
2617 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2618 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2619 short_msg = "emerge: (%s of %s) %s Clean" % \
2620 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2621 logger.log(msg, short_msg=short_msg)
2623 #buildsyspkg: Check if we need to _force_ binary package creation
2624 self._issyspkg = "buildsyspkg" in features and \
2625 system_set.findAtomForPackage(pkg) and \
2628 if opts.buildpkg or self._issyspkg:
2630 self._buildpkg = True
2632 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2633 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2634 short_msg = "emerge: (%s of %s) %s Compile" % \
2635 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2636 logger.log(msg, short_msg=short_msg)
2639 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2640 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2641 short_msg = "emerge: (%s of %s) %s Compile" % \
2642 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2643 logger.log(msg, short_msg=short_msg)
2645 build = EbuildExecuter(background=self.background, pkg=pkg,
2646 scheduler=scheduler, settings=settings)
2647 self._start_task(build, self._build_exit)
2649 def _unlock_builddir(self):
2650 portage.elog.elog_process(self.pkg.cpv, self.settings)
2651 self._build_dir.unlock()
2653 def _build_exit(self, build):
2654 if self._default_exit(build) != os.EX_OK:
2655 self._unlock_builddir()
2660 buildpkg = self._buildpkg
2663 self._final_exit(build)
2668 msg = ">>> This is a system package, " + \
2669 "let's pack a rescue tarball.\n"
2671 log_path = self.settings.get("PORTAGE_LOG_FILE")
2672 if log_path is not None:
2673 log_file = open(log_path, 'a')
2679 if not self.background:
2680 portage.writemsg_stdout(msg, noiselevel=-1)
2682 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2683 scheduler=self.scheduler, settings=self.settings)
2685 self._start_task(packager, self._buildpkg_exit)
2687 def _buildpkg_exit(self, packager):
2689 Released build dir lock when there is a failure or
2690 when in buildpkgonly mode. Otherwise, the lock will
2691 be released when merge() is called.
2694 if self._default_exit(packager) != os.EX_OK:
2695 self._unlock_builddir()
2699 if self.opts.buildpkgonly:
2700 # Need to call "clean" phase for buildpkgonly mode
2701 portage.elog.elog_process(self.pkg.cpv, self.settings)
2703 clean_phase = EbuildPhase(background=self.background,
2704 pkg=self.pkg, phase=phase,
2705 scheduler=self.scheduler, settings=self.settings,
2707 self._start_task(clean_phase, self._clean_exit)
2710 # Continue holding the builddir lock until
2711 # after the package has been installed.
2712 self._current_task = None
2713 self.returncode = packager.returncode
2716 def _clean_exit(self, clean_phase):
2717 if self._final_exit(clean_phase) != os.EX_OK or \
2718 self.opts.buildpkgonly:
2719 self._unlock_builddir()
2724 Install the package and then clean up and release locks.
2725 Only call this after the build has completed successfully
2726 and neither fetchonly nor buildpkgonly mode are enabled.
2729 find_blockers = self.find_blockers
2730 ldpath_mtimes = self.ldpath_mtimes
2731 logger = self.logger
2733 pkg_count = self.pkg_count
2734 settings = self.settings
2735 world_atom = self.world_atom
2736 ebuild_path = self._ebuild_path
2739 merge = EbuildMerge(find_blockers=self.find_blockers,
2740 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2741 pkg_count=pkg_count, pkg_path=ebuild_path,
2742 scheduler=self.scheduler,
2743 settings=settings, tree=tree, world_atom=world_atom)
2745 msg = " === (%s of %s) Merging (%s::%s)" % \
2746 (pkg_count.curval, pkg_count.maxval,
2747 pkg.cpv, ebuild_path)
2748 short_msg = "emerge: (%s of %s) %s Merge" % \
2749 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2750 logger.log(msg, short_msg=short_msg)
2753 rval = merge.execute()
2755 self._unlock_builddir()
2759 class EbuildExecuter(CompositeTask):
2761 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2763 _phases = ("prepare", "configure", "compile", "test", "install")
2765 _live_eclasses = frozenset([
2775 self._tree = "porttree"
2778 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2779 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2780 self._start_task(clean_phase, self._clean_phase_exit)
2782 def _clean_phase_exit(self, clean_phase):
2784 if self._default_exit(clean_phase) != os.EX_OK:
2789 scheduler = self.scheduler
2790 settings = self.settings
2793 # This initializes PORTAGE_LOG_FILE.
2794 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2796 setup_phase = EbuildPhase(background=self.background,
2797 pkg=pkg, phase="setup", scheduler=scheduler,
2798 settings=settings, tree=self._tree)
2800 setup_phase.addExitListener(self._setup_exit)
2801 self._current_task = setup_phase
2802 self.scheduler.scheduleSetup(setup_phase)
2804 def _setup_exit(self, setup_phase):
2806 if self._default_exit(setup_phase) != os.EX_OK:
2810 unpack_phase = EbuildPhase(background=self.background,
2811 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2812 settings=self.settings, tree=self._tree)
2814 if self._live_eclasses.intersection(self.pkg.inherited):
2815 # Serialize $DISTDIR access for live ebuilds since
2816 # otherwise they can interfere with eachother.
2818 unpack_phase.addExitListener(self._unpack_exit)
2819 self._current_task = unpack_phase
2820 self.scheduler.scheduleUnpack(unpack_phase)
2823 self._start_task(unpack_phase, self._unpack_exit)
2825 def _unpack_exit(self, unpack_phase):
2827 if self._default_exit(unpack_phase) != os.EX_OK:
2831 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2834 phases = self._phases
2835 eapi = pkg.metadata["EAPI"]
2836 if eapi in ("0", "1", "2_pre1"):
2837 # skip src_prepare and src_configure
2839 elif eapi in ("2_pre2",):
2843 for phase in phases:
2844 ebuild_phases.add(EbuildPhase(background=self.background,
2845 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2846 settings=self.settings, tree=self._tree))
2848 self._start_task(ebuild_phases, self._default_final_exit)
2850 class EbuildMetadataPhase(SubProcess):
2853 Asynchronous interface for the ebuild "depend" phase which is
2854 used to extract metadata from the ebuild.
2857 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2858 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2861 _file_names = ("ebuild",)
2862 _files_dict = slot_dict_class(_file_names, prefix="")
2866 settings = self.settings
2868 ebuild_path = self.ebuild_path
2869 debug = settings.get("PORTAGE_DEBUG") == "1"
2873 if self.fd_pipes is not None:
2874 fd_pipes = self.fd_pipes.copy()
2878 fd_pipes.setdefault(0, sys.stdin.fileno())
2879 fd_pipes.setdefault(1, sys.stdout.fileno())
2880 fd_pipes.setdefault(2, sys.stderr.fileno())
2882 # flush any pending output
2883 for fd in fd_pipes.itervalues():
2884 if fd == sys.stdout.fileno():
2886 if fd == sys.stderr.fileno():
2889 fd_pipes_orig = fd_pipes.copy()
2890 self._files = self._files_dict()
2893 master_fd, slave_fd = os.pipe()
2894 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2895 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2897 fd_pipes[self._metadata_fd] = slave_fd
2899 self._raw_metadata = []
2900 files.ebuild = os.fdopen(master_fd, 'r')
2901 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2902 self._registered_events, self._output_handler)
2903 self._registered = True
2905 retval = portage.doebuild(ebuild_path, "depend",
2906 settings["ROOT"], settings, debug,
2907 mydbapi=self.portdb, tree="porttree",
2908 fd_pipes=fd_pipes, returnpid=True)
2912 if isinstance(retval, int):
2913 # doebuild failed before spawning
2915 self.returncode = retval
2919 self.pid = retval[0]
2920 portage.process.spawned_pids.remove(self.pid)
2922 def _output_handler(self, fd, event):
2924 if event & PollConstants.POLLIN:
2925 self._raw_metadata.append(self._files.ebuild.read())
2926 if not self._raw_metadata[-1]:
2930 self._unregister_if_appropriate(event)
2931 return self._registered
2933 def _set_returncode(self, wait_retval):
2934 SubProcess._set_returncode(self, wait_retval)
2935 if self.returncode == os.EX_OK:
2936 metadata_lines = "".join(self._raw_metadata).splitlines()
2937 if len(portage.auxdbkeys) != len(metadata_lines):
2938 # Don't trust bash's returncode if the
2939 # number of lines is incorrect.
2942 metadata = izip(portage.auxdbkeys, metadata_lines)
2943 self.metadata_callback(self.cpv, self.ebuild_path,
2944 self.repo_path, metadata, self.ebuild_mtime)
2946 class EbuildProcess(SpawnProcess):
2948 __slots__ = ("phase", "pkg", "settings", "tree")
2951 # Don't open the log file during the clean phase since the
2952 # open file can result in an nfs lock on $T/build.log which
2953 # prevents the clean phase from removing $T.
2954 if self.phase not in ("clean", "cleanrm"):
2955 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2956 SpawnProcess._start(self)
2958 def _pipe(self, fd_pipes):
2959 stdout_pipe = fd_pipes.get(1)
2960 got_pty, master_fd, slave_fd = \
2961 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2962 return (master_fd, slave_fd)
2964 def _spawn(self, args, **kwargs):
2966 root_config = self.pkg.root_config
2968 mydbapi = root_config.trees[tree].dbapi
2969 settings = self.settings
2970 ebuild_path = settings["EBUILD"]
2971 debug = settings.get("PORTAGE_DEBUG") == "1"
2973 rval = portage.doebuild(ebuild_path, self.phase,
2974 root_config.root, settings, debug,
2975 mydbapi=mydbapi, tree=tree, **kwargs)
2979 def _set_returncode(self, wait_retval):
2980 SpawnProcess._set_returncode(self, wait_retval)
2982 if self.phase not in ("clean", "cleanrm"):
2983 self.returncode = portage._doebuild_exit_status_check_and_log(
2984 self.settings, self.phase, self.returncode)
2986 if self.phase == "test" and self.returncode != os.EX_OK and \
2987 "test-fail-continue" in self.settings.features:
2988 self.returncode = os.EX_OK
2990 portage._post_phase_userpriv_perms(self.settings)
2992 class EbuildPhase(CompositeTask):
2994 __slots__ = ("background", "pkg", "phase",
2995 "scheduler", "settings", "tree")
2997 _post_phase_cmds = portage._post_phase_cmds
3001 ebuild_process = EbuildProcess(background=self.background,
3002 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3003 settings=self.settings, tree=self.tree)
3005 self._start_task(ebuild_process, self._ebuild_exit)
3007 def _ebuild_exit(self, ebuild_process):
3009 if self.phase == "install":
3011 log_path = self.settings.get("PORTAGE_LOG_FILE")
3013 if self.background and log_path is not None:
3014 log_file = open(log_path, 'a')
3017 portage._check_build_log(self.settings, out=out)
3019 if log_file is not None:
3022 if self._default_exit(ebuild_process) != os.EX_OK:
3026 settings = self.settings
3028 if self.phase == "install":
3029 portage._post_src_install_uid_fix(settings)
3031 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3032 if post_phase_cmds is not None:
3033 post_phase = MiscFunctionsProcess(background=self.background,
3034 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3035 scheduler=self.scheduler, settings=settings)
3036 self._start_task(post_phase, self._post_phase_exit)
3039 self.returncode = ebuild_process.returncode
3040 self._current_task = None
3043 def _post_phase_exit(self, post_phase):
3044 if self._final_exit(post_phase) != os.EX_OK:
3045 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3047 self._current_task = None
3051 class EbuildBinpkg(EbuildProcess):
3053 This assumes that src_install() has successfully completed.
3055 __slots__ = ("_binpkg_tmpfile",)
3058 self.phase = "package"
3059 self.tree = "porttree"
3061 root_config = pkg.root_config
3062 portdb = root_config.trees["porttree"].dbapi
3063 bintree = root_config.trees["bintree"]
3064 ebuild_path = portdb.findname(self.pkg.cpv)
3065 settings = self.settings
3066 debug = settings.get("PORTAGE_DEBUG") == "1"
3068 bintree.prevent_collision(pkg.cpv)
3069 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3070 pkg.cpv + ".tbz2." + str(os.getpid()))
3071 self._binpkg_tmpfile = binpkg_tmpfile
3072 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3073 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3076 EbuildProcess._start(self)
3078 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3080 def _set_returncode(self, wait_retval):
3081 EbuildProcess._set_returncode(self, wait_retval)
3084 bintree = pkg.root_config.trees["bintree"]
3085 binpkg_tmpfile = self._binpkg_tmpfile
3086 if self.returncode == os.EX_OK:
3087 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3089 class EbuildMerge(SlotObject):
3091 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3092 "pkg", "pkg_count", "pkg_path", "pretend",
3093 "scheduler", "settings", "tree", "world_atom")
3096 root_config = self.pkg.root_config
3097 settings = self.settings
3098 retval = portage.merge(settings["CATEGORY"],
3099 settings["PF"], settings["D"],
3100 os.path.join(settings["PORTAGE_BUILDDIR"],
3101 "build-info"), root_config.root, settings,
3102 myebuild=settings["EBUILD"],
3103 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3104 vartree=root_config.trees["vartree"],
3105 prev_mtimes=self.ldpath_mtimes,
3106 scheduler=self.scheduler,
3107 blockers=self.find_blockers)
3109 if retval == os.EX_OK:
3110 self.world_atom(self.pkg)
3115 def _log_success(self):
3117 pkg_count = self.pkg_count
3118 pkg_path = self.pkg_path
3119 logger = self.logger
3120 if "noclean" not in self.settings.features:
3121 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3122 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3123 logger.log((" === (%s of %s) " + \
3124 "Post-Build Cleaning (%s::%s)") % \
3125 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3126 short_msg=short_msg)
3127 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3128 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3130 class PackageUninstall(AsynchronousTask):
3132 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3136 unmerge(self.pkg.root_config, self.opts, "unmerge",
3137 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3138 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3139 writemsg_level=self._writemsg_level)
3140 except UninstallFailure, e:
3141 self.returncode = e.status
3143 self.returncode = os.EX_OK
3146 def _writemsg_level(self, msg, level=0, noiselevel=0):
3148 log_path = self.settings.get("PORTAGE_LOG_FILE")
3149 background = self.background
3151 if log_path is None:
3152 if not (background and level < logging.WARNING):
3153 portage.util.writemsg_level(msg,
3154 level=level, noiselevel=noiselevel)
3157 portage.util.writemsg_level(msg,
3158 level=level, noiselevel=noiselevel)
3160 f = open(log_path, 'a')
3166 class Binpkg(CompositeTask):
3168 __slots__ = ("find_blockers",
3169 "ldpath_mtimes", "logger", "opts",
3170 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3171 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3172 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3174 def _writemsg_level(self, msg, level=0, noiselevel=0):
3176 if not self.background:
3177 portage.util.writemsg_level(msg,
3178 level=level, noiselevel=noiselevel)
3180 log_path = self.settings.get("PORTAGE_LOG_FILE")
3181 if log_path is not None:
3182 f = open(log_path, 'a')
3191 settings = self.settings
3192 settings.setcpv(pkg)
3193 self._tree = "bintree"
3194 self._bintree = self.pkg.root_config.trees[self._tree]
3195 self._verify = not self.opts.pretend
3197 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3198 "portage", pkg.category, pkg.pf)
3199 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3200 pkg=pkg, settings=settings)
3201 self._image_dir = os.path.join(dir_path, "image")
3202 self._infloc = os.path.join(dir_path, "build-info")
3203 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3204 settings["EBUILD"] = self._ebuild_path
3205 debug = settings.get("PORTAGE_DEBUG") == "1"
3206 portage.doebuild_environment(self._ebuild_path, "setup",
3207 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3208 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3210 # The prefetcher has already completed or it
3211 # could be running now. If it's running now,
3212 # wait for it to complete since it holds
3213 # a lock on the file being fetched. The
3214 # portage.locks functions are only designed
3215 # to work between separate processes. Since
3216 # the lock is held by the current process,
3217 # use the scheduler and fetcher methods to
3218 # synchronize with the fetcher.
3219 prefetcher = self.prefetcher
3220 if prefetcher is None:
3222 elif not prefetcher.isAlive():
3224 elif prefetcher.poll() is None:
3226 waiting_msg = ("Fetching '%s' " + \
3227 "in the background. " + \
3228 "To view fetch progress, run `tail -f " + \
3229 "/var/log/emerge-fetch.log` in another " + \
3230 "terminal.") % prefetcher.pkg_path
3231 msg_prefix = colorize("GOOD", " * ")
3232 from textwrap import wrap
3233 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3234 for line in wrap(waiting_msg, 65))
3235 if not self.background:
3236 writemsg(waiting_msg, noiselevel=-1)
3238 self._current_task = prefetcher
3239 prefetcher.addExitListener(self._prefetch_exit)
3242 self._prefetch_exit(prefetcher)
3244 def _prefetch_exit(self, prefetcher):
3247 pkg_count = self.pkg_count
3248 if not (self.opts.pretend or self.opts.fetchonly):
3249 self._build_dir.lock()
3251 shutil.rmtree(self._build_dir.dir_path)
3252 except EnvironmentError, e:
3253 if e.errno != errno.ENOENT:
3256 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3257 fetcher = BinpkgFetcher(background=self.background,
3258 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3259 pretend=self.opts.pretend, scheduler=self.scheduler)
3260 pkg_path = fetcher.pkg_path
3261 self._pkg_path = pkg_path
3263 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3265 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3266 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3267 short_msg = "emerge: (%s of %s) %s Fetch" % \
3268 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3269 self.logger.log(msg, short_msg=short_msg)
3270 self._start_task(fetcher, self._fetcher_exit)
3273 self._fetcher_exit(fetcher)
3275 def _fetcher_exit(self, fetcher):
3277 # The fetcher only has a returncode when
3278 # --getbinpkg is enabled.
3279 if fetcher.returncode is not None:
3280 self._fetched_pkg = True
3281 if self._default_exit(fetcher) != os.EX_OK:
3282 self._unlock_builddir()
3286 if self.opts.pretend:
3287 self._current_task = None
3288 self.returncode = os.EX_OK
3296 logfile = self.settings.get("PORTAGE_LOG_FILE")
3297 verifier = BinpkgVerifier(background=self.background,
3298 logfile=logfile, pkg=self.pkg)
3299 self._start_task(verifier, self._verifier_exit)
3302 self._verifier_exit(verifier)
3304 def _verifier_exit(self, verifier):
3305 if verifier is not None and \
3306 self._default_exit(verifier) != os.EX_OK:
3307 self._unlock_builddir()
3311 logger = self.logger
3313 pkg_count = self.pkg_count
3314 pkg_path = self._pkg_path
3316 if self._fetched_pkg:
3317 self._bintree.inject(pkg.cpv, filename=pkg_path)
3319 if self.opts.fetchonly:
3320 self._current_task = None
3321 self.returncode = os.EX_OK
3325 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3326 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3327 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3328 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3329 logger.log(msg, short_msg=short_msg)
3332 settings = self.settings
3333 ebuild_phase = EbuildPhase(background=self.background,
3334 pkg=pkg, phase=phase, scheduler=self.scheduler,
3335 settings=settings, tree=self._tree)
3337 self._start_task(ebuild_phase, self._clean_exit)
3339 def _clean_exit(self, clean_phase):
3340 if self._default_exit(clean_phase) != os.EX_OK:
3341 self._unlock_builddir()
3345 dir_path = self._build_dir.dir_path
3348 shutil.rmtree(dir_path)
3349 except (IOError, OSError), e:
3350 if e.errno != errno.ENOENT:
3354 infloc = self._infloc
3356 pkg_path = self._pkg_path
3359 for mydir in (dir_path, self._image_dir, infloc):
3360 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3361 gid=portage.data.portage_gid, mode=dir_mode)
3363 # This initializes PORTAGE_LOG_FILE.
3364 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3365 self._writemsg_level(">>> Extracting info\n")
3367 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3368 check_missing_metadata = ("CATEGORY", "PF")
3369 missing_metadata = set()
3370 for k in check_missing_metadata:
3371 v = pkg_xpak.getfile(k)
3373 missing_metadata.add(k)
3375 pkg_xpak.unpackinfo(infloc)
3376 for k in missing_metadata:
3384 f = open(os.path.join(infloc, k), 'wb')
3390 # Store the md5sum in the vdb.
3391 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3393 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3397 # This gives bashrc users an opportunity to do various things
3398 # such as remove binary packages after they're installed.
3399 settings = self.settings
3400 settings.setcpv(self.pkg)
3401 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3402 settings.backup_changes("PORTAGE_BINPKG_FILE")
3405 setup_phase = EbuildPhase(background=self.background,
3406 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3407 settings=settings, tree=self._tree)
3409 setup_phase.addExitListener(self._setup_exit)
3410 self._current_task = setup_phase
3411 self.scheduler.scheduleSetup(setup_phase)
3413 def _setup_exit(self, setup_phase):
3414 if self._default_exit(setup_phase) != os.EX_OK:
3415 self._unlock_builddir()
3419 extractor = BinpkgExtractorAsync(background=self.background,
3420 image_dir=self._image_dir,
3421 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3422 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3423 self._start_task(extractor, self._extractor_exit)
3425 def _extractor_exit(self, extractor):
3426 if self._final_exit(extractor) != os.EX_OK:
3427 self._unlock_builddir()
3428 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3432 def _unlock_builddir(self):
3433 if self.opts.pretend or self.opts.fetchonly:
3435 portage.elog.elog_process(self.pkg.cpv, self.settings)
3436 self._build_dir.unlock()
3440 # This gives bashrc users an opportunity to do various things
3441 # such as remove binary packages after they're installed.
3442 settings = self.settings
3443 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3444 settings.backup_changes("PORTAGE_BINPKG_FILE")
3446 merge = EbuildMerge(find_blockers=self.find_blockers,
3447 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3448 pkg=self.pkg, pkg_count=self.pkg_count,
3449 pkg_path=self._pkg_path, scheduler=self.scheduler,
3450 settings=settings, tree=self._tree, world_atom=self.world_atom)
3453 retval = merge.execute()
3455 settings.pop("PORTAGE_BINPKG_FILE", None)
3456 self._unlock_builddir()
3459 class BinpkgFetcher(SpawnProcess):
3461 __slots__ = ("pkg", "pretend",
3462 "locked", "pkg_path", "_lock_obj")
3464 def __init__(self, **kwargs):
3465 SpawnProcess.__init__(self, **kwargs)
3467 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3475 pretend = self.pretend
3476 bintree = pkg.root_config.trees["bintree"]
3477 settings = bintree.settings
3478 use_locks = "distlocks" in settings.features
3479 pkg_path = self.pkg_path
3482 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3485 exists = os.path.exists(pkg_path)
3486 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3487 if not (pretend or resume):
3488 # Remove existing file or broken symlink.
3494 # urljoin doesn't work correctly with
3495 # unrecognized protocols like sftp
3496 if bintree._remote_has_index:
3497 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3499 rel_uri = pkg.cpv + ".tbz2"
3500 uri = bintree._remote_base_uri.rstrip("/") + \
3501 "/" + rel_uri.lstrip("/")
3503 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3504 "/" + pkg.pf + ".tbz2"
3507 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3508 self.returncode = os.EX_OK
3512 protocol = urlparse.urlparse(uri)[0]
3513 fcmd_prefix = "FETCHCOMMAND"
3515 fcmd_prefix = "RESUMECOMMAND"
3516 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3518 fcmd = settings.get(fcmd_prefix)
3521 "DISTDIR" : os.path.dirname(pkg_path),
3523 "FILE" : os.path.basename(pkg_path)
3526 fetch_env = dict(settings.iteritems())
3527 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3528 for x in shlex.split(fcmd)]
3530 if self.fd_pipes is None:
3532 fd_pipes = self.fd_pipes
3534 # Redirect all output to stdout since some fetchers like
3535 # wget pollute stderr (if portage detects a problem then it
3536 # can send it's own message to stderr).
3537 fd_pipes.setdefault(0, sys.stdin.fileno())
3538 fd_pipes.setdefault(1, sys.stdout.fileno())
3539 fd_pipes.setdefault(2, sys.stdout.fileno())
3541 self.args = fetch_args
3542 self.env = fetch_env
3543 SpawnProcess._start(self)
3545 def _set_returncode(self, wait_retval):
3546 SpawnProcess._set_returncode(self, wait_retval)
3547 if self.returncode == os.EX_OK:
3548 # If possible, update the mtime to match the remote package if
3549 # the fetcher didn't already do it automatically.
3550 bintree = self.pkg.root_config.trees["bintree"]
3551 if bintree._remote_has_index:
3552 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3553 if remote_mtime is not None:
3555 remote_mtime = long(remote_mtime)
3560 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3564 if remote_mtime != local_mtime:
3566 os.utime(self.pkg_path,
3567 (remote_mtime, remote_mtime))
3576 This raises an AlreadyLocked exception if lock() is called
3577 while a lock is already held. In order to avoid this, call
3578 unlock() or check whether the "locked" attribute is True
3579 or False before calling lock().
3581 if self._lock_obj is not None:
3582 raise self.AlreadyLocked((self._lock_obj,))
3584 self._lock_obj = portage.locks.lockfile(
3585 self.pkg_path, wantnewlockfile=1)
3588 class AlreadyLocked(portage.exception.PortageException):
3592 if self._lock_obj is None:
3594 portage.locks.unlockfile(self._lock_obj)
3595 self._lock_obj = None
3598 class BinpkgVerifier(AsynchronousTask):
3599 __slots__ = ("logfile", "pkg",)
3603 Note: Unlike a normal AsynchronousTask.start() method,
3604 this one does all work is synchronously. The returncode
3605 attribute will be set before it returns.
3609 root_config = pkg.root_config
3610 bintree = root_config.trees["bintree"]
3612 stdout_orig = sys.stdout
3613 stderr_orig = sys.stderr
3615 if self.background and self.logfile is not None:
3616 log_file = open(self.logfile, 'a')
3618 if log_file is not None:
3619 sys.stdout = log_file
3620 sys.stderr = log_file
3622 bintree.digestCheck(pkg)
3623 except portage.exception.FileNotFound:
3624 writemsg("!!! Fetching Binary failed " + \
3625 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3627 except portage.exception.DigestException, e:
3628 writemsg("\n!!! Digest verification failed:\n",
3630 writemsg("!!! %s\n" % e.value[0],
3632 writemsg("!!! Reason: %s\n" % e.value[1],
3634 writemsg("!!! Got: %s\n" % e.value[2],
3636 writemsg("!!! Expected: %s\n" % e.value[3],
3639 if rval != os.EX_OK:
3640 pkg_path = bintree.getname(pkg.cpv)
3641 head, tail = os.path.split(pkg_path)
3642 temp_filename = portage._checksum_failure_temp_file(head, tail)
3643 writemsg("File renamed to '%s'\n" % (temp_filename,),
3646 sys.stdout = stdout_orig
3647 sys.stderr = stderr_orig
3648 if log_file is not None:
3651 self.returncode = rval
3654 class BinpkgPrefetcher(CompositeTask):
3656 __slots__ = ("pkg",) + \
3657 ("pkg_path", "_bintree",)
3660 self._bintree = self.pkg.root_config.trees["bintree"]
3661 fetcher = BinpkgFetcher(background=self.background,
3662 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3663 scheduler=self.scheduler)
3664 self.pkg_path = fetcher.pkg_path
3665 self._start_task(fetcher, self._fetcher_exit)
3667 def _fetcher_exit(self, fetcher):
3669 if self._default_exit(fetcher) != os.EX_OK:
3673 verifier = BinpkgVerifier(background=self.background,
3674 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3675 self._start_task(verifier, self._verifier_exit)
3677 def _verifier_exit(self, verifier):
3678 if self._default_exit(verifier) != os.EX_OK:
3682 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3684 self._current_task = None
3685 self.returncode = os.EX_OK
3688 class BinpkgExtractorAsync(SpawnProcess):
3690 __slots__ = ("image_dir", "pkg", "pkg_path")
3692 _shell_binary = portage.const.BASH_BINARY
3695 self.args = [self._shell_binary, "-c",
3696 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3697 (portage._shell_quote(self.pkg_path),
3698 portage._shell_quote(self.image_dir))]
3700 self.env = self.pkg.root_config.settings.environ()
3701 SpawnProcess._start(self)
3703 class MergeListItem(CompositeTask):
3706 TODO: For parallel scheduling, everything here needs asynchronous
3707 execution support (start, poll, and wait methods).
3710 __slots__ = ("args_set",
3711 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3712 "find_blockers", "logger", "mtimedb", "pkg",
3713 "pkg_count", "pkg_to_replace", "prefetcher",
3714 "settings", "statusMessage", "world_atom") + \
3720 build_opts = self.build_opts
3723 # uninstall, executed by self.merge()
3724 self.returncode = os.EX_OK
3728 args_set = self.args_set
3729 find_blockers = self.find_blockers
3730 logger = self.logger
3731 mtimedb = self.mtimedb
3732 pkg_count = self.pkg_count
3733 scheduler = self.scheduler
3734 settings = self.settings
3735 world_atom = self.world_atom
3736 ldpath_mtimes = mtimedb["ldpath"]
3738 action_desc = "Emerging"
3740 if pkg.type_name == "binary":
3741 action_desc += " binary"
3743 if build_opts.fetchonly:
3744 action_desc = "Fetching"
3746 msg = "%s (%s of %s) %s" % \
3748 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3749 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3750 colorize("GOOD", pkg.cpv))
3752 portdb = pkg.root_config.trees["porttree"].dbapi
3753 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3754 if portdir_repo_name:
3755 pkg_repo_name = pkg.metadata.get("repository")
3756 if pkg_repo_name != portdir_repo_name:
3757 if not pkg_repo_name:
3758 pkg_repo_name = "unknown repo"
3759 msg += " from %s" % pkg_repo_name
3762 msg += " %s %s" % (preposition, pkg.root)
3764 if not build_opts.pretend:
3765 self.statusMessage(msg)
3766 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3767 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3769 if pkg.type_name == "ebuild":
3771 build = EbuildBuild(args_set=args_set,
3772 background=self.background,
3773 config_pool=self.config_pool,
3774 find_blockers=find_blockers,
3775 ldpath_mtimes=ldpath_mtimes, logger=logger,
3776 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3777 prefetcher=self.prefetcher, scheduler=scheduler,
3778 settings=settings, world_atom=world_atom)
3780 self._install_task = build
3781 self._start_task(build, self._default_final_exit)
3784 elif pkg.type_name == "binary":
3786 binpkg = Binpkg(background=self.background,
3787 find_blockers=find_blockers,
3788 ldpath_mtimes=ldpath_mtimes, logger=logger,
3789 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3790 prefetcher=self.prefetcher, settings=settings,
3791 scheduler=scheduler, world_atom=world_atom)
3793 self._install_task = binpkg
3794 self._start_task(binpkg, self._default_final_exit)
3798 self._install_task.poll()
3799 return self.returncode
3802 self._install_task.wait()
3803 return self.returncode
3808 build_opts = self.build_opts
3809 find_blockers = self.find_blockers
3810 logger = self.logger
3811 mtimedb = self.mtimedb
3812 pkg_count = self.pkg_count
3813 prefetcher = self.prefetcher
3814 scheduler = self.scheduler
3815 settings = self.settings
3816 world_atom = self.world_atom
3817 ldpath_mtimes = mtimedb["ldpath"]
3820 if not (build_opts.buildpkgonly or \
3821 build_opts.fetchonly or build_opts.pretend):
3823 uninstall = PackageUninstall(background=self.background,
3824 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3825 pkg=pkg, scheduler=scheduler, settings=settings)
3828 retval = uninstall.wait()
3829 if retval != os.EX_OK:
3833 if build_opts.fetchonly or \
3834 build_opts.buildpkgonly:
3835 return self.returncode
3837 retval = self._install_task.install()
3840 class PackageMerge(AsynchronousTask):
3842 TODO: Implement asynchronous merge so that the scheduler can
3843 run while a merge is executing.
3846 __slots__ = ("merge",)
3850 pkg = self.merge.pkg
3851 pkg_count = self.merge.pkg_count
3854 action_desc = "Uninstalling"
3855 preposition = "from"
3857 action_desc = "Installing"
3860 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3863 msg += " %s %s" % (preposition, pkg.root)
3865 if not self.merge.build_opts.fetchonly and \
3866 not self.merge.build_opts.pretend and \
3867 not self.merge.build_opts.buildpkgonly:
3868 self.merge.statusMessage(msg)
3870 self.returncode = self.merge.merge()
3873 class DependencyArg(object):
3874 def __init__(self, arg=None, root_config=None):
3876 self.root_config = root_config
3879 return str(self.arg)
3881 class AtomArg(DependencyArg):
3882 def __init__(self, atom=None, **kwargs):
3883 DependencyArg.__init__(self, **kwargs)
3885 if not isinstance(self.atom, portage.dep.Atom):
3886 self.atom = portage.dep.Atom(self.atom)
3887 self.set = (self.atom, )
3889 class PackageArg(DependencyArg):
3890 def __init__(self, package=None, **kwargs):
3891 DependencyArg.__init__(self, **kwargs)
3892 self.package = package
3893 self.atom = portage.dep.Atom("=" + package.cpv)
3894 self.set = (self.atom, )
3896 class SetArg(DependencyArg):
3897 def __init__(self, set=None, **kwargs):
3898 DependencyArg.__init__(self, **kwargs)
3900 self.name = self.arg[len(SETPREFIX):]
3902 class Dependency(SlotObject):
3903 __slots__ = ("atom", "blocker", "depth",
3904 "parent", "onlydeps", "priority", "root")
3905 def __init__(self, **kwargs):
3906 SlotObject.__init__(self, **kwargs)
3907 if self.priority is None:
3908 self.priority = DepPriority()
3909 if self.depth is None:
3912 class BlockerCache(DictMixin):
3913 """This caches blockers of installed packages so that dep_check does not
3914 have to be done for every single installed package on every invocation of
3915 emerge. The cache is invalidated whenever it is detected that something
3916 has changed that might alter the results of dep_check() calls:
3917 1) the set of installed packages (including COUNTER) has changed
3918 2) the old-style virtuals have changed
3921 # Number of uncached packages to trigger cache update, since
3922 # it's wasteful to update it for every vdb change.
3923 _cache_threshold = 5
3925 class BlockerData(object):
3927 __slots__ = ("__weakref__", "atoms", "counter")
3929 def __init__(self, counter, atoms):
3930 self.counter = counter
3933 def __init__(self, myroot, vardb):
3935 self._virtuals = vardb.settings.getvirtuals()
3936 self._cache_filename = os.path.join(myroot,
3937 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3938 self._cache_version = "1"
3939 self._cache_data = None
3940 self._modified = set()
3945 f = open(self._cache_filename)
3946 mypickle = pickle.Unpickler(f)
3947 mypickle.find_global = None
3948 self._cache_data = mypickle.load()
3951 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3952 if isinstance(e, pickle.UnpicklingError):
3953 writemsg("!!! Error loading '%s': %s\n" % \
3954 (self._cache_filename, str(e)), noiselevel=-1)
3957 cache_valid = self._cache_data and \
3958 isinstance(self._cache_data, dict) and \
3959 self._cache_data.get("version") == self._cache_version and \
3960 isinstance(self._cache_data.get("blockers"), dict)
3962 # Validate all the atoms and counters so that
3963 # corruption is detected as soon as possible.
3964 invalid_items = set()
3965 for k, v in self._cache_data["blockers"].iteritems():
3966 if not isinstance(k, basestring):
3967 invalid_items.add(k)
3970 if portage.catpkgsplit(k) is None:
3971 invalid_items.add(k)
3973 except portage.exception.InvalidData:
3974 invalid_items.add(k)
3976 if not isinstance(v, tuple) or \
3978 invalid_items.add(k)
3981 if not isinstance(counter, (int, long)):
3982 invalid_items.add(k)
3984 if not isinstance(atoms, (list, tuple)):
3985 invalid_items.add(k)
3987 invalid_atom = False
3989 if not isinstance(atom, basestring):
3992 if atom[:1] != "!" or \
3993 not portage.isvalidatom(
3994 atom, allow_blockers=True):
3998 invalid_items.add(k)
4001 for k in invalid_items:
4002 del self._cache_data["blockers"][k]
4003 if not self._cache_data["blockers"]:
4007 self._cache_data = {"version":self._cache_version}
4008 self._cache_data["blockers"] = {}
4009 self._cache_data["virtuals"] = self._virtuals
4010 self._modified.clear()
4013 """If the current user has permission and the internal blocker cache
4014 been updated, save it to disk and mark it unmodified. This is called
4015 by emerge after it has proccessed blockers for all installed packages.
4016 Currently, the cache is only written if the user has superuser
4017 privileges (since that's required to obtain a lock), but all users
4018 have read access and benefit from faster blocker lookups (as long as
4019 the entire cache is still valid). The cache is stored as a pickled
4020 dict object with the following format:
4024 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4025 "virtuals" : vardb.settings.getvirtuals()
4028 if len(self._modified) >= self._cache_threshold and \
4031 f = portage.util.atomic_ofstream(self._cache_filename)
4032 pickle.dump(self._cache_data, f, -1)
4034 portage.util.apply_secpass_permissions(
4035 self._cache_filename, gid=portage.portage_gid, mode=0644)
4036 except (IOError, OSError), e:
4038 self._modified.clear()
4040 def __setitem__(self, cpv, blocker_data):
4042 Update the cache and mark it as modified for a future call to
4045 @param cpv: Package for which to cache blockers.
4047 @param blocker_data: An object with counter and atoms attributes.
4048 @type blocker_data: BlockerData
4050 self._cache_data["blockers"][cpv] = \
4051 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4052 self._modified.add(cpv)
4055 if self._cache_data is None:
4056 # triggered by python-trace
4058 return iter(self._cache_data["blockers"])
4060 def __delitem__(self, cpv):
4061 del self._cache_data["blockers"][cpv]
4063 def __getitem__(self, cpv):
4066 @returns: An object with counter and atoms attributes.
4068 return self.BlockerData(*self._cache_data["blockers"][cpv])
4071 """This needs to be implemented so that self.__repr__() doesn't raise
4072 an AttributeError."""
4075 class BlockerDB(object):
4077 def __init__(self, root_config):
4078 self._root_config = root_config
4079 self._vartree = root_config.trees["vartree"]
4080 self._portdb = root_config.trees["porttree"].dbapi
4082 self._dep_check_trees = None
4083 self._fake_vartree = None
4085 def _get_fake_vartree(self, acquire_lock=0):
4086 fake_vartree = self._fake_vartree
4087 if fake_vartree is None:
4088 fake_vartree = FakeVartree(self._root_config,
4089 acquire_lock=acquire_lock)
4090 self._fake_vartree = fake_vartree
4091 self._dep_check_trees = { self._vartree.root : {
4092 "porttree" : fake_vartree,
4093 "vartree" : fake_vartree,
4096 fake_vartree.sync(acquire_lock=acquire_lock)
4099 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4100 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4101 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4102 settings = self._vartree.settings
4103 stale_cache = set(blocker_cache)
4104 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4105 dep_check_trees = self._dep_check_trees
4106 vardb = fake_vartree.dbapi
4107 installed_pkgs = list(vardb)
4109 for inst_pkg in installed_pkgs:
4110 stale_cache.discard(inst_pkg.cpv)
4111 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4112 if cached_blockers is not None and \
4113 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4114 cached_blockers = None
4115 if cached_blockers is not None:
4116 blocker_atoms = cached_blockers.atoms
4118 # Use aux_get() to trigger FakeVartree global
4119 # updates on *DEPEND when appropriate.
4120 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4122 portage.dep._dep_check_strict = False
4123 success, atoms = portage.dep_check(depstr,
4124 vardb, settings, myuse=inst_pkg.use.enabled,
4125 trees=dep_check_trees, myroot=inst_pkg.root)
4127 portage.dep._dep_check_strict = True
4129 pkg_location = os.path.join(inst_pkg.root,
4130 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4131 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4132 (pkg_location, atoms), noiselevel=-1)
4135 blocker_atoms = [atom for atom in atoms \
4136 if atom.startswith("!")]
4137 blocker_atoms.sort()
4138 counter = long(inst_pkg.metadata["COUNTER"])
4139 blocker_cache[inst_pkg.cpv] = \
4140 blocker_cache.BlockerData(counter, blocker_atoms)
4141 for cpv in stale_cache:
4142 del blocker_cache[cpv]
4143 blocker_cache.flush()
4145 blocker_parents = digraph()
4147 for pkg in installed_pkgs:
4148 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4149 blocker_atom = blocker_atom.lstrip("!")
4150 blocker_atoms.append(blocker_atom)
4151 blocker_parents.add(blocker_atom, pkg)
4153 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4154 blocking_pkgs = set()
4155 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4156 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4158 # Check for blockers in the other direction.
4159 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4161 portage.dep._dep_check_strict = False
4162 success, atoms = portage.dep_check(depstr,
4163 vardb, settings, myuse=new_pkg.use.enabled,
4164 trees=dep_check_trees, myroot=new_pkg.root)
4166 portage.dep._dep_check_strict = True
4168 # We should never get this far with invalid deps.
4169 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4172 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4175 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4176 for inst_pkg in installed_pkgs:
4178 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4179 except (portage.exception.InvalidDependString, StopIteration):
4181 blocking_pkgs.add(inst_pkg)
4183 return blocking_pkgs
4185 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4187 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4188 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4189 p_type, p_root, p_key, p_status = parent_node
4191 if p_status == "nomerge":
4192 category, pf = portage.catsplit(p_key)
4193 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4194 msg.append("Portage is unable to process the dependencies of the ")
4195 msg.append("'%s' package. " % p_key)
4196 msg.append("In order to correct this problem, the package ")
4197 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4198 msg.append("As a temporary workaround, the --nodeps option can ")
4199 msg.append("be used to ignore all dependencies. For reference, ")
4200 msg.append("the problematic dependencies can be found in the ")
4201 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4203 msg.append("This package can not be installed. ")
4204 msg.append("Please notify the '%s' package maintainer " % p_key)
4205 msg.append("about this problem.")
4207 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4208 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4210 class PackageVirtualDbapi(portage.dbapi):
4212 A dbapi-like interface class that represents the state of the installed
4213 package database as new packages are installed, replacing any packages
4214 that previously existed in the same slot. The main difference between
4215 this class and fakedbapi is that this one uses Package instances
4216 internally (passed in via cpv_inject() and cpv_remove() calls).
4218 def __init__(self, settings):
4219 portage.dbapi.__init__(self)
4220 self.settings = settings
4221 self._match_cache = {}
4227 Remove all packages.
4231 self._cp_map.clear()
4232 self._cpv_map.clear()
4235 obj = PackageVirtualDbapi(self.settings)
4236 obj._match_cache = self._match_cache.copy()
4237 obj._cp_map = self._cp_map.copy()
4238 for k, v in obj._cp_map.iteritems():
4239 obj._cp_map[k] = v[:]
4240 obj._cpv_map = self._cpv_map.copy()
4244 return self._cpv_map.itervalues()
4246 def __contains__(self, item):
4247 existing = self._cpv_map.get(item.cpv)
4248 if existing is not None and \
4253 def get(self, item, default=None):
4254 cpv = getattr(item, "cpv", None)
4258 type_name, root, cpv, operation = item
4260 existing = self._cpv_map.get(cpv)
4261 if existing is not None and \
4266 def match_pkgs(self, atom):
4267 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4269 def _clear_cache(self):
4270 if self._categories is not None:
4271 self._categories = None
4272 if self._match_cache:
4273 self._match_cache = {}
4275 def match(self, origdep, use_cache=1):
4276 result = self._match_cache.get(origdep)
4277 if result is not None:
4279 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4280 self._match_cache[origdep] = result
4283 def cpv_exists(self, cpv):
4284 return cpv in self._cpv_map
4286 def cp_list(self, mycp, use_cache=1):
4287 cachelist = self._match_cache.get(mycp)
4288 # cp_list() doesn't expand old-style virtuals
4289 if cachelist and cachelist[0].startswith(mycp):
4291 cpv_list = self._cp_map.get(mycp)
4292 if cpv_list is None:
4295 cpv_list = [pkg.cpv for pkg in cpv_list]
4296 self._cpv_sort_ascending(cpv_list)
4297 if not (not cpv_list and mycp.startswith("virtual/")):
4298 self._match_cache[mycp] = cpv_list
4302 return list(self._cp_map)
4305 return list(self._cpv_map)
4307 def cpv_inject(self, pkg):
4308 cp_list = self._cp_map.get(pkg.cp)
4311 self._cp_map[pkg.cp] = cp_list
4312 e_pkg = self._cpv_map.get(pkg.cpv)
4313 if e_pkg is not None:
4316 self.cpv_remove(e_pkg)
4317 for e_pkg in cp_list:
4318 if e_pkg.slot_atom == pkg.slot_atom:
4321 self.cpv_remove(e_pkg)
4324 self._cpv_map[pkg.cpv] = pkg
4327 def cpv_remove(self, pkg):
4328 old_pkg = self._cpv_map.get(pkg.cpv)
4331 self._cp_map[pkg.cp].remove(pkg)
4332 del self._cpv_map[pkg.cpv]
4335 def aux_get(self, cpv, wants):
4336 metadata = self._cpv_map[cpv].metadata
4337 return [metadata.get(x, "") for x in wants]
4339 def aux_update(self, cpv, values):
4340 self._cpv_map[cpv].metadata.update(values)
4343 class depgraph(object):
4345 pkg_tree_map = RootConfig.pkg_tree_map
4347 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4349 def __init__(self, settings, trees, myopts, myparams, spinner):
4350 self.settings = settings
4351 self.target_root = settings["ROOT"]
4352 self.myopts = myopts
4353 self.myparams = myparams
4355 if settings.get("PORTAGE_DEBUG", "") == "1":
4357 self.spinner = spinner
4358 self._running_root = trees["/"]["root_config"]
4359 self._opts_no_restart = Scheduler._opts_no_restart
4360 self.pkgsettings = {}
4361 # Maps slot atom to package for each Package added to the graph.
4362 self._slot_pkg_map = {}
4363 # Maps nodes to the reasons they were selected for reinstallation.
4364 self._reinstall_nodes = {}
4367 self._trees_orig = trees
4369 # Contains a filtered view of preferred packages that are selected
4370 # from available repositories.
4371 self._filtered_trees = {}
4372 # Contains installed packages and new packages that have been added
4374 self._graph_trees = {}
4375 # All Package instances
4376 self._pkg_cache = {}
4377 for myroot in trees:
4378 self.trees[myroot] = {}
4379 # Create a RootConfig instance that references
4380 # the FakeVartree instead of the real one.
4381 self.roots[myroot] = RootConfig(
4382 trees[myroot]["vartree"].settings,
4384 trees[myroot]["root_config"].setconfig)
4385 for tree in ("porttree", "bintree"):
4386 self.trees[myroot][tree] = trees[myroot][tree]
4387 self.trees[myroot]["vartree"] = \
4388 FakeVartree(trees[myroot]["root_config"],
4389 pkg_cache=self._pkg_cache)
4390 self.pkgsettings[myroot] = portage.config(
4391 clone=self.trees[myroot]["vartree"].settings)
4392 self._slot_pkg_map[myroot] = {}
4393 vardb = self.trees[myroot]["vartree"].dbapi
4394 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4395 "--buildpkgonly" not in self.myopts
4396 # This fakedbapi instance will model the state that the vdb will
4397 # have after new packages have been installed.
4398 fakedb = PackageVirtualDbapi(vardb.settings)
4399 if preload_installed_pkgs:
4401 self.spinner.update()
4402 # This triggers metadata updates via FakeVartree.
4403 vardb.aux_get(pkg.cpv, [])
4404 fakedb.cpv_inject(pkg)
4406 # Now that the vardb state is cached in our FakeVartree,
4407 # we won't be needing the real vartree cache for awhile.
4408 # To make some room on the heap, clear the vardbapi
4410 trees[myroot]["vartree"].dbapi._clear_cache()
4413 self.mydbapi[myroot] = fakedb
4416 graph_tree.dbapi = fakedb
4417 self._graph_trees[myroot] = {}
4418 self._filtered_trees[myroot] = {}
4419 # Substitute the graph tree for the vartree in dep_check() since we
4420 # want atom selections to be consistent with package selections
4421 # have already been made.
4422 self._graph_trees[myroot]["porttree"] = graph_tree
4423 self._graph_trees[myroot]["vartree"] = graph_tree
4424 def filtered_tree():
4426 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4427 self._filtered_trees[myroot]["porttree"] = filtered_tree
4429 # Passing in graph_tree as the vartree here could lead to better
4430 # atom selections in some cases by causing atoms for packages that
4431 # have been added to the graph to be preferred over other choices.
4432 # However, it can trigger atom selections that result in
4433 # unresolvable direct circular dependencies. For example, this
4434 # happens with gwydion-dylan which depends on either itself or
4435 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4436 # gwydion-dylan-bin needs to be selected in order to avoid a
4437 # an unresolvable direct circular dependency.
4439 # To solve the problem described above, pass in "graph_db" so that
4440 # packages that have been added to the graph are distinguishable
4441 # from other available packages and installed packages. Also, pass
4442 # the parent package into self._select_atoms() calls so that
4443 # unresolvable direct circular dependencies can be detected and
4444 # avoided when possible.
4445 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4446 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4449 portdb = self.trees[myroot]["porttree"].dbapi
4450 bindb = self.trees[myroot]["bintree"].dbapi
4451 vardb = self.trees[myroot]["vartree"].dbapi
4452 # (db, pkg_type, built, installed, db_keys)
4453 if "--usepkgonly" not in self.myopts:
4454 db_keys = list(portdb._aux_cache_keys)
4455 dbs.append((portdb, "ebuild", False, False, db_keys))
4456 if "--usepkg" in self.myopts:
4457 db_keys = list(bindb._aux_cache_keys)
4458 dbs.append((bindb, "binary", True, False, db_keys))
4459 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4460 dbs.append((vardb, "installed", True, True, db_keys))
4461 self._filtered_trees[myroot]["dbs"] = dbs
4462 if "--usepkg" in self.myopts:
4463 self.trees[myroot]["bintree"].populate(
4464 "--getbinpkg" in self.myopts,
4465 "--getbinpkgonly" in self.myopts)
4468 self.digraph=portage.digraph()
4469 # contains all sets added to the graph
4471 # contains atoms given as arguments
4472 self._sets["args"] = InternalPackageSet()
4473 # contains all atoms from all sets added to the graph, including
4474 # atoms given as arguments
4475 self._set_atoms = InternalPackageSet()
4476 self._atom_arg_map = {}
4477 # contains all nodes pulled in by self._set_atoms
4478 self._set_nodes = set()
4479 # Contains only Blocker -> Uninstall edges
4480 self._blocker_uninstalls = digraph()
4481 # Contains only Package -> Blocker edges
4482 self._blocker_parents = digraph()
4483 # Contains only irrelevant Package -> Blocker edges
4484 self._irrelevant_blockers = digraph()
4485 # Contains only unsolvable Package -> Blocker edges
4486 self._unsolvable_blockers = digraph()
4487 # Contains all Blocker -> Blocked Package edges
4488 self._blocked_pkgs = digraph()
4489 # Contains world packages that have been protected from
4490 # uninstallation but may not have been added to the graph
4491 # if the graph is not complete yet.
4492 self._blocked_world_pkgs = {}
4493 self._slot_collision_info = {}
4494 # Slot collision nodes are not allowed to block other packages since
4495 # blocker validation is only able to account for one package per slot.
4496 self._slot_collision_nodes = set()
4497 self._parent_atoms = {}
4498 self._slot_conflict_parent_atoms = set()
4499 self._serialized_tasks_cache = None
4500 self._scheduler_graph = None
4501 self._displayed_list = None
4502 self._pprovided_args = []
4503 self._missing_args = []
4504 self._masked_installed = set()
4505 self._unsatisfied_deps_for_display = []
4506 self._unsatisfied_blockers_for_display = None
4507 self._circular_deps_for_display = None
4508 self._dep_stack = []
4509 self._unsatisfied_deps = []
4510 self._initially_unsatisfied_deps = []
4511 self._ignored_deps = []
4512 self._required_set_names = set(["system", "world"])
4513 self._select_atoms = self._select_atoms_highest_available
4514 self._select_package = self._select_pkg_highest_available
4515 self._highest_pkg_cache = {}
4517 def _show_slot_collision_notice(self):
4518 """Show an informational message advising the user to mask one of the
4519 the packages. In some cases it may be possible to resolve this
4520 automatically, but support for backtracking (removal nodes that have
4521 already been selected) will be required in order to handle all possible
4525 if not self._slot_collision_info:
4528 self._show_merge_list()
4531 msg.append("\n!!! Multiple package instances within a single " + \
4532 "package slot have been pulled\n")
4533 msg.append("!!! into the dependency graph, resulting" + \
4534 " in a slot conflict:\n\n")
4536 # Max number of parents shown, to avoid flooding the display.
4538 explanation_columns = 70
4540 for (slot_atom, root), slot_nodes \
4541 in self._slot_collision_info.iteritems():
4542 msg.append(str(slot_atom))
4545 for node in slot_nodes:
4547 msg.append(str(node))
4548 parent_atoms = self._parent_atoms.get(node)
4551 # Prefer conflict atoms over others.
4552 for parent_atom in parent_atoms:
4553 if len(pruned_list) >= max_parents:
4555 if parent_atom in self._slot_conflict_parent_atoms:
4556 pruned_list.add(parent_atom)
4558 # If this package was pulled in by conflict atoms then
4559 # show those alone since those are the most interesting.
4561 # When generating the pruned list, prefer instances
4562 # of DependencyArg over instances of Package.
4563 for parent_atom in parent_atoms:
4564 if len(pruned_list) >= max_parents:
4566 parent, atom = parent_atom
4567 if isinstance(parent, DependencyArg):
4568 pruned_list.add(parent_atom)
4569 # Prefer Packages instances that themselves have been
4570 # pulled into collision slots.
4571 for parent_atom in parent_atoms:
4572 if len(pruned_list) >= max_parents:
4574 parent, atom = parent_atom
4575 if isinstance(parent, Package) and \
4576 (parent.slot_atom, parent.root) \
4577 in self._slot_collision_info:
4578 pruned_list.add(parent_atom)
4579 for parent_atom in parent_atoms:
4580 if len(pruned_list) >= max_parents:
4582 pruned_list.add(parent_atom)
4583 omitted_parents = len(parent_atoms) - len(pruned_list)
4584 parent_atoms = pruned_list
4585 msg.append(" pulled in by\n")
4586 for parent_atom in parent_atoms:
4587 parent, atom = parent_atom
4588 msg.append(2*indent)
4589 if isinstance(parent,
4590 (PackageArg, AtomArg)):
4591 # For PackageArg and AtomArg types, it's
4592 # redundant to display the atom attribute.
4593 msg.append(str(parent))
4595 # Display the specific atom from SetArg or
4597 msg.append("%s required by %s" % (atom, parent))
4600 msg.append(2*indent)
4601 msg.append("(and %d more)\n" % omitted_parents)
4603 msg.append(" (no parents)\n")
4605 explanation = self._slot_conflict_explanation(slot_nodes)
4608 msg.append(indent + "Explanation:\n\n")
4609 for line in textwrap.wrap(explanation, explanation_columns):
4610 msg.append(2*indent + line + "\n")
4613 sys.stderr.write("".join(msg))
4616 explanations_for_all = explanations == len(self._slot_collision_info)
4618 if explanations_for_all or "--quiet" in self.myopts:
4622 msg.append("It may be possible to solve this problem ")
4623 msg.append("by using package.mask to prevent one of ")
4624 msg.append("those packages from being selected. ")
4625 msg.append("However, it is also possible that conflicting ")
4626 msg.append("dependencies exist such that they are impossible to ")
4627 msg.append("satisfy simultaneously. If such a conflict exists in ")
4628 msg.append("the dependencies of two different packages, then those ")
4629 msg.append("packages can not be installed simultaneously.")
4631 from formatter import AbstractFormatter, DumbWriter
4632 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4634 f.add_flowing_data(x)
4638 msg.append("For more information, see MASKED PACKAGES ")
4639 msg.append("section in the emerge man page or refer ")
4640 msg.append("to the Gentoo Handbook.")
4642 f.add_flowing_data(x)
4646 def _slot_conflict_explanation(self, slot_nodes):
4648 When a slot conflict occurs due to USE deps, there are a few
4649 different cases to consider:
4651 1) New USE are correctly set but --newuse wasn't requested so an
4652 installed package with incorrect USE happened to get pulled
4653 into graph before the new one.
4655 2) New USE are incorrectly set but an installed package has correct
4656 USE so it got pulled into the graph, and a new instance also got
4657 pulled in due to --newuse or an upgrade.
4659 3) Multiple USE deps exist that can't be satisfied simultaneously,
4660 and multiple package instances got pulled into the same slot to
4661 satisfy the conflicting deps.
4663 Currently, explanations and suggested courses of action are generated
4664 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4667 if len(slot_nodes) != 2:
4668 # Suggestions are only implemented for
4669 # conflicts between two packages.
4672 all_conflict_atoms = self._slot_conflict_parent_atoms
4674 matched_atoms = None
4675 unmatched_node = None
4676 for node in slot_nodes:
4677 parent_atoms = self._parent_atoms.get(node)
4678 if not parent_atoms:
4679 # Normally, there are always parent atoms. If there are
4680 # none then something unexpected is happening and there's
4681 # currently no suggestion for this case.
4683 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4684 for parent_atom in conflict_atoms:
4685 parent, atom = parent_atom
4687 # Suggestions are currently only implemented for cases
4688 # in which all conflict atoms have USE deps.
4691 if matched_node is not None:
4692 # If conflict atoms match multiple nodes
4693 # then there's no suggestion.
4696 matched_atoms = conflict_atoms
4698 if unmatched_node is not None:
4699 # Neither node is matched by conflict atoms, and
4700 # there is no suggestion for this case.
4702 unmatched_node = node
4704 if matched_node is None or unmatched_node is None:
4705 # This shouldn't happen.
4708 if unmatched_node.installed and not matched_node.installed:
4709 return "New USE are correctly set, but --newuse wasn't" + \
4710 " requested, so an installed package with incorrect USE " + \
4711 "happened to get pulled into the dependency graph. " + \
4712 "In order to solve " + \
4713 "this, either specify the --newuse option or explicitly " + \
4714 " reinstall '%s'." % matched_node.slot_atom
4716 if matched_node.installed and not unmatched_node.installed:
4717 atoms = sorted(set(atom for parent, atom in matched_atoms))
4718 explanation = ("New USE for '%s' are incorrectly set. " + \
4719 "In order to solve this, adjust USE to satisfy '%s'") % \
4720 (matched_node.slot_atom, atoms[0])
4722 for atom in atoms[1:-1]:
4723 explanation += ", '%s'" % (atom,)
4726 explanation += " and '%s'" % (atoms[-1],)
4732 def _process_slot_conflicts(self):
4734 Process slot conflict data to identify specific atoms which
4735 lead to conflict. These atoms only match a subset of the
4736 packages that have been pulled into a given slot.
4738 for (slot_atom, root), slot_nodes \
4739 in self._slot_collision_info.iteritems():
4741 all_parent_atoms = set()
4742 for pkg in slot_nodes:
4743 parent_atoms = self._parent_atoms.get(pkg)
4744 if not parent_atoms:
4746 all_parent_atoms.update(parent_atoms)
4748 for pkg in slot_nodes:
4749 parent_atoms = self._parent_atoms.get(pkg)
4750 if parent_atoms is None:
4751 parent_atoms = set()
4752 self._parent_atoms[pkg] = parent_atoms
4753 for parent_atom in all_parent_atoms:
4754 if parent_atom in parent_atoms:
4756 # Use package set for matching since it will match via
4757 # PROVIDE when necessary, while match_from_list does not.
4758 parent, atom = parent_atom
4759 atom_set = InternalPackageSet(
4760 initial_atoms=(atom,))
4761 if atom_set.findAtomForPackage(pkg):
4762 parent_atoms.add(parent_atom)
4764 self._slot_conflict_parent_atoms.add(parent_atom)
4766 def _reinstall_for_flags(self, forced_flags,
4767 orig_use, orig_iuse, cur_use, cur_iuse):
4768 """Return a set of flags that trigger reinstallation, or None if there
4769 are no such flags."""
4770 if "--newuse" in self.myopts:
4771 flags = set(orig_iuse.symmetric_difference(
4772 cur_iuse).difference(forced_flags))
4773 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4774 cur_iuse.intersection(cur_use)))
4777 elif "changed-use" == self.myopts.get("--reinstall"):
4778 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4779 cur_iuse.intersection(cur_use))
4784 def _create_graph(self, allow_unsatisfied=False):
4785 dep_stack = self._dep_stack
4787 self.spinner.update()
4788 dep = dep_stack.pop()
4789 if isinstance(dep, Package):
4790 if not self._add_pkg_deps(dep,
4791 allow_unsatisfied=allow_unsatisfied):
4794 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4798 def _add_dep(self, dep, allow_unsatisfied=False):
4799 debug = "--debug" in self.myopts
4800 buildpkgonly = "--buildpkgonly" in self.myopts
4801 nodeps = "--nodeps" in self.myopts
4802 empty = "empty" in self.myparams
4803 deep = "deep" in self.myparams
4804 update = "--update" in self.myopts and dep.depth <= 1
4806 if not buildpkgonly and \
4808 dep.parent not in self._slot_collision_nodes:
4809 if dep.parent.onlydeps:
4810 # It's safe to ignore blockers if the
4811 # parent is an --onlydeps node.
4813 # The blocker applies to the root where
4814 # the parent is or will be installed.
4815 blocker = Blocker(atom=dep.atom,
4816 eapi=dep.parent.metadata["EAPI"],
4817 root=dep.parent.root)
4818 self._blocker_parents.add(blocker, dep.parent)
4820 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4821 onlydeps=dep.onlydeps)
4823 if allow_unsatisfied:
4824 self._unsatisfied_deps.append(dep)
4826 self._unsatisfied_deps_for_display.append(
4827 ((dep.root, dep.atom), {"myparent":dep.parent}))
4829 # In some cases, dep_check will return deps that shouldn't
4830 # be proccessed any further, so they are identified and
4831 # discarded here. Try to discard as few as possible since
4832 # discarded dependencies reduce the amount of information
4833 # available for optimization of merge order.
4834 if dep.priority.satisfied and \
4835 not (existing_node or empty or deep or update):
4837 if dep.root == self.target_root:
4839 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4840 except StopIteration:
4842 except portage.exception.InvalidDependString:
4843 if not dep_pkg.installed:
4844 # This shouldn't happen since the package
4845 # should have been masked.
4848 self._ignored_deps.append(dep)
4851 if not self._add_pkg(dep_pkg, dep):
4855 def _add_pkg(self, pkg, dep):
4862 myparent = dep.parent
4863 priority = dep.priority
4865 if priority is None:
4866 priority = DepPriority()
4868 Fills the digraph with nodes comprised of packages to merge.
4869 mybigkey is the package spec of the package to merge.
4870 myparent is the package depending on mybigkey ( or None )
4871 addme = Should we add this package to the digraph or are we just looking at it's deps?
4872 Think --onlydeps, we need to ignore packages in that case.
4875 #IUSE-aware emerge -> USE DEP aware depgraph
4876 #"no downgrade" emerge
4878 # Ensure that the dependencies of the same package
4879 # are never processed more than once.
4880 previously_added = pkg in self.digraph
4882 # select the correct /var database that we'll be checking against
4883 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4884 pkgsettings = self.pkgsettings[pkg.root]
4889 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4890 except portage.exception.InvalidDependString, e:
4891 if not pkg.installed:
4892 show_invalid_depstring_notice(
4893 pkg, pkg.metadata["PROVIDE"], str(e))
4897 if not pkg.onlydeps:
4898 if not pkg.installed and \
4899 "empty" not in self.myparams and \
4900 vardbapi.match(pkg.slot_atom):
4901 # Increase the priority of dependencies on packages that
4902 # are being rebuilt. This optimizes merge order so that
4903 # dependencies are rebuilt/updated as soon as possible,
4904 # which is needed especially when emerge is called by
4905 # revdep-rebuild since dependencies may be affected by ABI
4906 # breakage that has rendered them useless. Don't adjust
4907 # priority here when in "empty" mode since all packages
4908 # are being merged in that case.
4909 priority.rebuild = True
4911 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4912 slot_collision = False
4914 existing_node_matches = pkg.cpv == existing_node.cpv
4915 if existing_node_matches and \
4916 pkg != existing_node and \
4917 dep.atom is not None:
4918 # Use package set for matching since it will match via
4919 # PROVIDE when necessary, while match_from_list does not.
4920 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4921 if not atom_set.findAtomForPackage(existing_node):
4922 existing_node_matches = False
4923 if existing_node_matches:
4924 # The existing node can be reused.
4926 for parent_atom in arg_atoms:
4927 parent, atom = parent_atom
4928 self.digraph.add(existing_node, parent,
4930 self._add_parent_atom(existing_node, parent_atom)
4931 # If a direct circular dependency is not an unsatisfied
4932 # buildtime dependency then drop it here since otherwise
4933 # it can skew the merge order calculation in an unwanted
4935 if existing_node != myparent or \
4936 (priority.buildtime and not priority.satisfied):
4937 self.digraph.addnode(existing_node, myparent,
4939 if dep.atom is not None and dep.parent is not None:
4940 self._add_parent_atom(existing_node,
4941 (dep.parent, dep.atom))
4945 # A slot collision has occurred. Sometimes this coincides
4946 # with unresolvable blockers, so the slot collision will be
4947 # shown later if there are no unresolvable blockers.
4948 self._add_slot_conflict(pkg)
4949 slot_collision = True
4952 # Now add this node to the graph so that self.display()
4953 # can show use flags and --tree portage.output. This node is
4954 # only being partially added to the graph. It must not be
4955 # allowed to interfere with the other nodes that have been
4956 # added. Do not overwrite data for existing nodes in
4957 # self.mydbapi since that data will be used for blocker
4959 # Even though the graph is now invalid, continue to process
4960 # dependencies so that things like --fetchonly can still
4961 # function despite collisions.
4964 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4965 self.mydbapi[pkg.root].cpv_inject(pkg)
4967 if not pkg.installed:
4968 # Allow this package to satisfy old-style virtuals in case it
4969 # doesn't already. Any pre-existing providers will be preferred
4972 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4973 # For consistency, also update the global virtuals.
4974 settings = self.roots[pkg.root].settings
4976 settings.setinst(pkg.cpv, pkg.metadata)
4978 except portage.exception.InvalidDependString, e:
4979 show_invalid_depstring_notice(
4980 pkg, pkg.metadata["PROVIDE"], str(e))
4985 self._set_nodes.add(pkg)
4987 # Do this even when addme is False (--onlydeps) so that the
4988 # parent/child relationship is always known in case
4989 # self._show_slot_collision_notice() needs to be called later.
4990 self.digraph.add(pkg, myparent, priority=priority)
4991 if dep.atom is not None and dep.parent is not None:
4992 self._add_parent_atom(pkg, (dep.parent, dep.atom))
4995 for parent_atom in arg_atoms:
4996 parent, atom = parent_atom
4997 self.digraph.add(pkg, parent, priority=priority)
4998 self._add_parent_atom(pkg, parent_atom)
5000 """ This section determines whether we go deeper into dependencies or not.
5001 We want to go deeper on a few occasions:
5002 Installing package A, we need to make sure package A's deps are met.
5003 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5004 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5006 dep_stack = self._dep_stack
5007 if "recurse" not in self.myparams:
5009 elif pkg.installed and \
5010 "deep" not in self.myparams:
5011 dep_stack = self._ignored_deps
5013 self.spinner.update()
5018 if not previously_added:
5019 dep_stack.append(pkg)
5022 def _add_parent_atom(self, pkg, parent_atom):
5023 parent_atoms = self._parent_atoms.get(pkg)
5024 if parent_atoms is None:
5025 parent_atoms = set()
5026 self._parent_atoms[pkg] = parent_atoms
5027 parent_atoms.add(parent_atom)
5029 def _add_slot_conflict(self, pkg):
5030 self._slot_collision_nodes.add(pkg)
5031 slot_key = (pkg.slot_atom, pkg.root)
5032 slot_nodes = self._slot_collision_info.get(slot_key)
5033 if slot_nodes is None:
5035 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5036 self._slot_collision_info[slot_key] = slot_nodes
5039 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5041 mytype = pkg.type_name
5044 metadata = pkg.metadata
5045 myuse = pkg.use.enabled
5047 depth = pkg.depth + 1
5048 removal_action = "remove" in self.myparams
5051 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5053 edepend[k] = metadata[k]
5055 if not pkg.built and \
5056 "--buildpkgonly" in self.myopts and \
5057 "deep" not in self.myparams and \
5058 "empty" not in self.myparams:
5059 edepend["RDEPEND"] = ""
5060 edepend["PDEPEND"] = ""
5061 bdeps_satisfied = False
5063 if pkg.built and not removal_action:
5064 if self.myopts.get("--with-bdeps", "n") == "y":
5065 # Pull in build time deps as requested, but marked them as
5066 # "satisfied" since they are not strictly required. This allows
5067 # more freedom in the merge order calculation for solving
5068 # circular dependencies. Don't convert to PDEPEND since that
5069 # could make --with-bdeps=y less effective if it is used to
5070 # adjust merge order to prevent built_with_use() calls from
5072 bdeps_satisfied = True
5074 # built packages do not have build time dependencies.
5075 edepend["DEPEND"] = ""
5077 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5078 edepend["DEPEND"] = ""
5081 ("/", edepend["DEPEND"],
5082 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
5083 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5084 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5087 debug = "--debug" in self.myopts
5088 strict = mytype != "installed"
5090 for dep_root, dep_string, dep_priority in deps:
5092 # Decrease priority so that --buildpkgonly
5093 # hasallzeros() works correctly.
5094 dep_priority = DepPriority()
5099 print "Parent: ", jbigkey
5100 print "Depstring:", dep_string
5101 print "Priority:", dep_priority
5102 vardb = self.roots[dep_root].trees["vartree"].dbapi
5104 selected_atoms = self._select_atoms(dep_root,
5105 dep_string, myuse=myuse, parent=pkg, strict=strict)
5106 except portage.exception.InvalidDependString, e:
5107 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5110 print "Candidates:", selected_atoms
5112 for atom in selected_atoms:
5115 atom = portage.dep.Atom(atom)
5117 mypriority = dep_priority.copy()
5118 if not atom.blocker and vardb.match(atom):
5119 mypriority.satisfied = True
5121 if not self._add_dep(Dependency(atom=atom,
5122 blocker=atom.blocker, depth=depth, parent=pkg,
5123 priority=mypriority, root=dep_root),
5124 allow_unsatisfied=allow_unsatisfied):
5127 except portage.exception.InvalidAtom, e:
5128 show_invalid_depstring_notice(
5129 pkg, dep_string, str(e))
5131 if not pkg.installed:
5135 print "Exiting...", jbigkey
5136 except portage.exception.AmbiguousPackageName, e:
5138 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5139 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5141 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5142 portage.writemsg("\n", noiselevel=-1)
5143 if mytype == "binary":
5145 "!!! This binary package cannot be installed: '%s'\n" % \
5146 mykey, noiselevel=-1)
5147 elif mytype == "ebuild":
5148 portdb = self.roots[myroot].trees["porttree"].dbapi
5149 myebuild, mylocation = portdb.findname2(mykey)
5150 portage.writemsg("!!! This ebuild cannot be installed: " + \
5151 "'%s'\n" % myebuild, noiselevel=-1)
5152 portage.writemsg("!!! Please notify the package maintainer " + \
5153 "that atoms must be fully-qualified.\n", noiselevel=-1)
5157 def _priority(self, **kwargs):
5158 if "remove" in self.myparams:
5159 priority_constructor = UnmergeDepPriority
5161 priority_constructor = DepPriority
5162 return priority_constructor(**kwargs)
5164 def _dep_expand(self, root_config, atom_without_category):
5166 @param root_config: a root config instance
5167 @type root_config: RootConfig
5168 @param atom_without_category: an atom without a category component
5169 @type atom_without_category: String
5171 @returns: a list of atoms containing categories (possibly empty)
5173 null_cp = portage.dep_getkey(insert_category_into_atom(
5174 atom_without_category, "null"))
5175 cat, atom_pn = portage.catsplit(null_cp)
5178 for db, pkg_type, built, installed, db_keys in \
5179 self._filtered_trees[root_config.root]["dbs"]:
5180 cp_set.update(db.cp_all())
5181 for cp in list(cp_set):
5182 cat, pn = portage.catsplit(cp)
5187 cat, pn = portage.catsplit(cp)
5188 deps.append(insert_category_into_atom(
5189 atom_without_category, cat))
5192 def _have_new_virt(self, root, atom_cp):
5194 for db, pkg_type, built, installed, db_keys in \
5195 self._filtered_trees[root]["dbs"]:
5196 if db.cp_list(atom_cp):
5201 def _iter_atoms_for_pkg(self, pkg):
5202 # TODO: add multiple $ROOT support
5203 if pkg.root != self.target_root:
5205 atom_arg_map = self._atom_arg_map
5206 root_config = self.roots[pkg.root]
5207 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5208 atom_cp = portage.dep_getkey(atom)
5209 if atom_cp != pkg.cp and \
5210 self._have_new_virt(pkg.root, atom_cp):
5212 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5213 visible_pkgs.reverse() # descending order
5215 for visible_pkg in visible_pkgs:
5216 if visible_pkg.cp != atom_cp:
5218 if pkg >= visible_pkg:
5219 # This is descending order, and we're not
5220 # interested in any versions <= pkg given.
5222 if pkg.slot_atom != visible_pkg.slot_atom:
5223 higher_slot = visible_pkg
5225 if higher_slot is not None:
5227 for arg in atom_arg_map[(atom, pkg.root)]:
5228 if isinstance(arg, PackageArg) and \
5233 def select_files(self, myfiles):
5234 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5235 appropriate depgraph and return a favorite list."""
5236 debug = "--debug" in self.myopts
5237 root_config = self.roots[self.target_root]
5238 sets = root_config.sets
5239 getSetAtoms = root_config.setconfig.getSetAtoms
5241 myroot = self.target_root
5242 dbs = self._filtered_trees[myroot]["dbs"]
5243 vardb = self.trees[myroot]["vartree"].dbapi
5244 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5245 portdb = self.trees[myroot]["porttree"].dbapi
5246 bindb = self.trees[myroot]["bintree"].dbapi
5247 pkgsettings = self.pkgsettings[myroot]
5249 onlydeps = "--onlydeps" in self.myopts
5252 ext = os.path.splitext(x)[1]
5254 if not os.path.exists(x):
5256 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5257 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5258 elif os.path.exists(
5259 os.path.join(pkgsettings["PKGDIR"], x)):
5260 x = os.path.join(pkgsettings["PKGDIR"], x)
5262 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5263 print "!!! Please ensure the tbz2 exists as specified.\n"
5264 return 0, myfavorites
5265 mytbz2=portage.xpak.tbz2(x)
5266 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5267 if os.path.realpath(x) != \
5268 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5269 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5270 return 0, myfavorites
5271 db_keys = list(bindb._aux_cache_keys)
5272 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5273 pkg = Package(type_name="binary", root_config=root_config,
5274 cpv=mykey, built=True, metadata=metadata,
5276 self._pkg_cache[pkg] = pkg
5277 args.append(PackageArg(arg=x, package=pkg,
5278 root_config=root_config))
5279 elif ext==".ebuild":
5280 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5281 pkgdir = os.path.dirname(ebuild_path)
5282 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5283 cp = pkgdir[len(tree_root)+1:]
5284 e = portage.exception.PackageNotFound(
5285 ("%s is not in a valid portage tree " + \
5286 "hierarchy or does not exist") % x)
5287 if not portage.isvalidatom(cp):
5289 cat = portage.catsplit(cp)[0]
5290 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5291 if not portage.isvalidatom("="+mykey):
5293 ebuild_path = portdb.findname(mykey)
5295 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5296 cp, os.path.basename(ebuild_path)):
5297 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5298 return 0, myfavorites
5299 if mykey not in portdb.xmatch(
5300 "match-visible", portage.dep_getkey(mykey)):
5301 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5302 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5303 print colorize("BAD", "*** page for details.")
5304 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5307 raise portage.exception.PackageNotFound(
5308 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5309 db_keys = list(portdb._aux_cache_keys)
5310 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5311 pkg = Package(type_name="ebuild", root_config=root_config,
5312 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5313 pkgsettings.setcpv(pkg)
5314 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5315 self._pkg_cache[pkg] = pkg
5316 args.append(PackageArg(arg=x, package=pkg,
5317 root_config=root_config))
5318 elif x.startswith(os.path.sep):
5319 if not x.startswith(myroot):
5320 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5321 " $ROOT.\n") % x, noiselevel=-1)
5323 # Queue these up since it's most efficient to handle
5324 # multiple files in a single iter_owners() call.
5325 lookup_owners.append(x)
5327 if x in ("system", "world"):
5329 if x.startswith(SETPREFIX):
5330 s = x[len(SETPREFIX):]
5332 raise portage.exception.PackageSetNotFound(s)
5335 # Recursively expand sets so that containment tests in
5336 # self._get_parent_sets() properly match atoms in nested
5337 # sets (like if world contains system).
5338 expanded_set = InternalPackageSet(
5339 initial_atoms=getSetAtoms(s))
5340 self._sets[s] = expanded_set
5341 args.append(SetArg(arg=x, set=expanded_set,
5342 root_config=root_config))
5343 myfavorites.append(x)
5345 if not is_valid_package_atom(x):
5346 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5348 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5349 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5351 # Don't expand categories or old-style virtuals here unless
5352 # necessary. Expansion of old-style virtuals here causes at
5353 # least the following problems:
5354 # 1) It's more difficult to determine which set(s) an atom
5355 # came from, if any.
5356 # 2) It takes away freedom from the resolver to choose other
5357 # possible expansions when necessary.
5359 args.append(AtomArg(arg=x, atom=x,
5360 root_config=root_config))
5362 expanded_atoms = self._dep_expand(root_config, x)
5363 installed_cp_set = set()
5364 for atom in expanded_atoms:
5365 atom_cp = portage.dep_getkey(atom)
5366 if vardb.cp_list(atom_cp):
5367 installed_cp_set.add(atom_cp)
5368 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5369 installed_cp = iter(installed_cp_set).next()
5370 expanded_atoms = [atom for atom in expanded_atoms \
5371 if portage.dep_getkey(atom) == installed_cp]
5373 if len(expanded_atoms) > 1:
5376 ambiguous_package_name(x, expanded_atoms, root_config,
5377 self.spinner, self.myopts)
5378 return False, myfavorites
5380 atom = expanded_atoms[0]
5382 null_atom = insert_category_into_atom(x, "null")
5383 null_cp = portage.dep_getkey(null_atom)
5384 cat, atom_pn = portage.catsplit(null_cp)
5385 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5387 # Allow the depgraph to choose which virtual.
5388 atom = insert_category_into_atom(x, "virtual")
5390 atom = insert_category_into_atom(x, "null")
5392 args.append(AtomArg(arg=x, atom=atom,
5393 root_config=root_config))
5397 search_for_multiple = False
5398 if len(lookup_owners) > 1:
5399 search_for_multiple = True
5401 for x in lookup_owners:
5402 if not search_for_multiple and os.path.isdir(x):
5403 search_for_multiple = True
5404 relative_paths.append(x[len(myroot):])
5407 for pkg, relative_path in \
5408 real_vardb._owners.iter_owners(relative_paths):
5409 owners.add(pkg.mycpv)
5410 if not search_for_multiple:
5414 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5415 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5419 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5421 # portage now masks packages with missing slot, but it's
5422 # possible that one was installed by an older version
5423 atom = portage.cpv_getkey(cpv)
5425 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5426 args.append(AtomArg(arg=atom, atom=atom,
5427 root_config=root_config))
5429 if "--update" in self.myopts:
5430 # Enable greedy SLOT atoms for atoms given as arguments.
5431 # This is currently disabled for sets since greedy SLOT
5432 # atoms could be a property of the set itself.
5435 # In addition to any installed slots, also try to pull
5436 # in the latest new slot that may be available.
5437 greedy_atoms.append(arg)
5438 if not isinstance(arg, (AtomArg, PackageArg)):
5440 atom_cp = portage.dep_getkey(arg.atom)
5442 for cpv in vardb.match(arg.atom):
5443 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5445 greedy_atoms.append(
5446 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5447 root_config=root_config))
5451 # Create the "args" package set from atoms and
5452 # packages given as arguments.
5453 args_set = self._sets["args"]
5455 if not isinstance(arg, (AtomArg, PackageArg)):
5458 if myatom in args_set:
5460 args_set.add(myatom)
5461 myfavorites.append(myatom)
5462 self._set_atoms.update(chain(*self._sets.itervalues()))
5463 atom_arg_map = self._atom_arg_map
5465 for atom in arg.set:
5466 atom_key = (atom, myroot)
5467 refs = atom_arg_map.get(atom_key)
5470 atom_arg_map[atom_key] = refs
5473 pprovideddict = pkgsettings.pprovideddict
5475 portage.writemsg("\n", noiselevel=-1)
5476 # Order needs to be preserved since a feature of --nodeps
5477 # is to allow the user to force a specific merge order.
5481 for atom in arg.set:
5482 self.spinner.update()
5483 dep = Dependency(atom=atom, onlydeps=onlydeps,
5484 root=myroot, parent=arg)
5485 atom_cp = portage.dep_getkey(atom)
5487 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5488 if pprovided and portage.match_from_list(atom, pprovided):
5489 # A provided package has been specified on the command line.
5490 self._pprovided_args.append((arg, atom))
5492 if isinstance(arg, PackageArg):
5493 if not self._add_pkg(arg.package, dep) or \
5494 not self._create_graph():
5495 sys.stderr.write(("\n\n!!! Problem resolving " + \
5496 "dependencies for %s\n") % arg.arg)
5497 return 0, myfavorites
5500 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5501 (arg, atom), noiselevel=-1)
5502 pkg, existing_node = self._select_package(
5503 myroot, atom, onlydeps=onlydeps)
5505 if not (isinstance(arg, SetArg) and \
5506 arg.name in ("system", "world")):
5507 self._unsatisfied_deps_for_display.append(
5508 ((myroot, atom), {}))
5509 return 0, myfavorites
5510 self._missing_args.append((arg, atom))
5512 if atom_cp != pkg.cp:
5513 # For old-style virtuals, we need to repeat the
5514 # package.provided check against the selected package.
5515 expanded_atom = atom.replace(atom_cp, pkg.cp)
5516 pprovided = pprovideddict.get(pkg.cp)
5518 portage.match_from_list(expanded_atom, pprovided):
5519 # A provided package has been
5520 # specified on the command line.
5521 self._pprovided_args.append((arg, atom))
5523 if pkg.installed and "selective" not in self.myparams:
5524 self._unsatisfied_deps_for_display.append(
5525 ((myroot, atom), {}))
5526 # Previous behavior was to bail out in this case, but
5527 # since the dep is satisfied by the installed package,
5528 # it's more friendly to continue building the graph
5529 # and just show a warning message. Therefore, only bail
5530 # out here if the atom is not from either the system or
5532 if not (isinstance(arg, SetArg) and \
5533 arg.name in ("system", "world")):
5534 return 0, myfavorites
5536 # Add the selected package to the graph as soon as possible
5537 # so that later dep_check() calls can use it as feedback
5538 # for making more consistent atom selections.
5539 if not self._add_pkg(pkg, dep):
5540 if isinstance(arg, SetArg):
5541 sys.stderr.write(("\n\n!!! Problem resolving " + \
5542 "dependencies for %s from %s\n") % \
5545 sys.stderr.write(("\n\n!!! Problem resolving " + \
5546 "dependencies for %s\n") % atom)
5547 return 0, myfavorites
5549 except portage.exception.MissingSignature, e:
5550 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5551 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5552 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5553 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5554 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5555 return 0, myfavorites
5556 except portage.exception.InvalidSignature, e:
5557 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5558 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5559 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5560 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5561 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5562 return 0, myfavorites
5563 except SystemExit, e:
5564 raise # Needed else can't exit
5565 except Exception, e:
5566 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5567 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5570 # Now that the root packages have been added to the graph,
5571 # process the dependencies.
5572 if not self._create_graph():
5573 return 0, myfavorites
5576 if "--usepkgonly" in self.myopts:
5577 for xs in self.digraph.all_nodes():
5578 if not isinstance(xs, Package):
5580 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5584 print "Missing binary for:",xs[2]
5588 except self._unknown_internal_error:
5589 return False, myfavorites
5591 # We're true here unless we are missing binaries.
5592 return (not missing,myfavorites)
5594 def _select_atoms_from_graph(self, *pargs, **kwargs):
5596 Prefer atoms matching packages that have already been
5597 added to the graph or those that are installed and have
5598 not been scheduled for replacement.
5600 kwargs["trees"] = self._graph_trees
5601 return self._select_atoms_highest_available(*pargs, **kwargs)
5603 def _select_atoms_highest_available(self, root, depstring,
5604 myuse=None, parent=None, strict=True, trees=None):
5605 """This will raise InvalidDependString if necessary. If trees is
5606 None then self._filtered_trees is used."""
5607 pkgsettings = self.pkgsettings[root]
5609 trees = self._filtered_trees
5612 if parent is not None:
5613 trees[root]["parent"] = parent
5615 portage.dep._dep_check_strict = False
5616 mycheck = portage.dep_check(depstring, None,
5617 pkgsettings, myuse=myuse,
5618 myroot=root, trees=trees)
5620 if parent is not None:
5621 trees[root].pop("parent")
5622 portage.dep._dep_check_strict = True
5624 raise portage.exception.InvalidDependString(mycheck[1])
5625 selected_atoms = mycheck[1]
5626 return selected_atoms
5628 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5629 atom = portage.dep.Atom(atom)
5630 atom_set = InternalPackageSet(initial_atoms=(atom,))
5631 atom_without_use = atom
5633 atom_without_use = portage.dep.remove_slot(atom)
5635 atom_without_use += ":" + atom.slot
5636 atom_without_use = portage.dep.Atom(atom_without_use)
5637 xinfo = '"%s"' % atom
5640 # Discard null/ from failed cpv_expand category expansion.
5641 xinfo = xinfo.replace("null/", "")
5642 masked_packages = []
5644 missing_licenses = []
5645 have_eapi_mask = False
5646 pkgsettings = self.pkgsettings[root]
5647 implicit_iuse = pkgsettings._get_implicit_iuse()
5648 root_config = self.roots[root]
5649 portdb = self.roots[root].trees["porttree"].dbapi
5650 dbs = self._filtered_trees[root]["dbs"]
5651 for db, pkg_type, built, installed, db_keys in dbs:
5655 if hasattr(db, "xmatch"):
5656 cpv_list = db.xmatch("match-all", atom_without_use)
5658 cpv_list = db.match(atom_without_use)
5661 for cpv in cpv_list:
5662 metadata, mreasons = get_mask_info(root_config, cpv,
5663 pkgsettings, db, pkg_type, built, installed, db_keys)
5664 if metadata is not None:
5665 pkg = Package(built=built, cpv=cpv,
5666 installed=installed, metadata=metadata,
5667 root_config=root_config)
5668 if pkg.cp != atom.cp:
5669 # A cpv can be returned from dbapi.match() as an
5670 # old-style virtual match even in cases when the
5671 # package does not actually PROVIDE the virtual.
5672 # Filter out any such false matches here.
5673 if not atom_set.findAtomForPackage(pkg):
5675 if atom.use and not mreasons:
5676 missing_use.append(pkg)
5678 masked_packages.append(
5679 (root_config, pkgsettings, cpv, metadata, mreasons))
5681 missing_use_reasons = []
5682 missing_iuse_reasons = []
5683 for pkg in missing_use:
5684 use = pkg.use.enabled
5685 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5686 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5688 for x in atom.use.required:
5689 if iuse_re.match(x) is None:
5690 missing_iuse.append(x)
5693 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5694 missing_iuse_reasons.append((pkg, mreasons))
5696 need_enable = sorted(atom.use.enabled.difference(use))
5697 need_disable = sorted(atom.use.disabled.intersection(use))
5698 if need_enable or need_disable:
5700 changes.extend(colorize("red", "+" + x) \
5701 for x in need_enable)
5702 changes.extend(colorize("blue", "-" + x) \
5703 for x in need_disable)
5704 mreasons.append("Change USE: %s" % " ".join(changes))
5705 missing_use_reasons.append((pkg, mreasons))
5707 if missing_iuse_reasons and not missing_use_reasons:
5708 missing_use_reasons = missing_iuse_reasons
5709 elif missing_use_reasons:
5710 # Only show the latest version.
5711 del missing_use_reasons[1:]
5713 if missing_use_reasons:
5714 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5715 print "!!! One of the following packages is required to complete your request:"
5716 for pkg, mreasons in missing_use_reasons:
5717 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5719 elif masked_packages:
5721 colorize("BAD", "All ebuilds that could satisfy ") + \
5722 colorize("INFORM", xinfo) + \
5723 colorize("BAD", " have been masked.")
5724 print "!!! One of the following masked packages is required to complete your request:"
5725 have_eapi_mask = show_masked_packages(masked_packages)
5728 msg = ("The current version of portage supports " + \
5729 "EAPI '%s'. You must upgrade to a newer version" + \
5730 " of portage before EAPI masked packages can" + \
5731 " be installed.") % portage.const.EAPI
5732 from textwrap import wrap
5733 for line in wrap(msg, 75):
5738 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5740 # Show parent nodes and the argument that pulled them in.
5741 traversed_nodes = set()
5744 while node is not None:
5745 traversed_nodes.add(node)
5746 msg.append('(dependency required by "%s" [%s])' % \
5747 (colorize('INFORM', str(node.cpv)), node.type_name))
5748 # When traversing to parents, prefer arguments over packages
5749 # since arguments are root nodes. Never traverse the same
5750 # package twice, in order to prevent an infinite loop.
5751 selected_parent = None
5752 for parent in self.digraph.parent_nodes(node):
5753 if isinstance(parent, DependencyArg):
5754 msg.append('(dependency required by "%s" [argument])' % \
5755 (colorize('INFORM', str(parent))))
5756 selected_parent = None
5758 if parent not in traversed_nodes:
5759 selected_parent = parent
5760 node = selected_parent
5766 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5767 cache_key = (root, atom, onlydeps)
5768 ret = self._highest_pkg_cache.get(cache_key)
5771 if pkg and not existing:
5772 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5773 if existing and existing == pkg:
5774 # Update the cache to reflect that the
5775 # package has been added to the graph.
5777 self._highest_pkg_cache[cache_key] = ret
5779 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5780 self._highest_pkg_cache[cache_key] = ret
5783 settings = pkg.root_config.settings
5784 if visible(settings, pkg) and not (pkg.installed and \
5785 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
5786 pkg.root_config.visible_pkgs.cpv_inject(pkg)
5789 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5790 root_config = self.roots[root]
5791 pkgsettings = self.pkgsettings[root]
5792 dbs = self._filtered_trees[root]["dbs"]
5793 vardb = self.roots[root].trees["vartree"].dbapi
5794 portdb = self.roots[root].trees["porttree"].dbapi
5795 # List of acceptable packages, ordered by type preference.
5796 matched_packages = []
5797 highest_version = None
5798 if not isinstance(atom, portage.dep.Atom):
5799 atom = portage.dep.Atom(atom)
5801 atom_set = InternalPackageSet(initial_atoms=(atom,))
5802 existing_node = None
5804 usepkgonly = "--usepkgonly" in self.myopts
5805 empty = "empty" in self.myparams
5806 selective = "selective" in self.myparams
5808 noreplace = "--noreplace" in self.myopts
5809 # Behavior of the "selective" parameter depends on
5810 # whether or not a package matches an argument atom.
5811 # If an installed package provides an old-style
5812 # virtual that is no longer provided by an available
5813 # package, the installed package may match an argument
5814 # atom even though none of the available packages do.
5815 # Therefore, "selective" logic does not consider
5816 # whether or not an installed package matches an
5817 # argument atom. It only considers whether or not
5818 # available packages match argument atoms, which is
5819 # represented by the found_available_arg flag.
5820 found_available_arg = False
5821 for find_existing_node in True, False:
5824 for db, pkg_type, built, installed, db_keys in dbs:
5827 if installed and not find_existing_node:
5828 want_reinstall = reinstall or empty or \
5829 (found_available_arg and not selective)
5830 if want_reinstall and matched_packages:
5832 if hasattr(db, "xmatch"):
5833 cpv_list = db.xmatch("match-all", atom)
5835 cpv_list = db.match(atom)
5837 # USE=multislot can make an installed package appear as if
5838 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5839 # won't do any good as long as USE=multislot is enabled since
5840 # the newly built package still won't have the expected slot.
5841 # Therefore, assume that such SLOT dependencies are already
5842 # satisfied rather than forcing a rebuild.
5843 if installed and not cpv_list and atom.slot:
5844 for cpv in db.match(atom.cp):
5845 slot_available = False
5846 for other_db, other_type, other_built, \
5847 other_installed, other_keys in dbs:
5850 other_db.aux_get(cpv, ["SLOT"])[0]:
5851 slot_available = True
5855 if not slot_available:
5857 inst_pkg = self._pkg(cpv, "installed",
5858 root_config, installed=installed)
5859 # Remove the slot from the atom and verify that
5860 # the package matches the resulting atom.
5861 atom_without_slot = portage.dep.remove_slot(atom)
5863 atom_without_slot += str(atom.use)
5864 atom_without_slot = portage.dep.Atom(atom_without_slot)
5865 if portage.match_from_list(
5866 atom_without_slot, [inst_pkg]):
5867 cpv_list = [inst_pkg.cpv]
5872 pkg_status = "merge"
5873 if installed or onlydeps:
5874 pkg_status = "nomerge"
5877 for cpv in cpv_list:
5878 # Make --noreplace take precedence over --newuse.
5879 if not installed and noreplace and \
5880 cpv in vardb.match(atom):
5881 # If the installed version is masked, it may
5882 # be necessary to look at lower versions,
5883 # in case there is a visible downgrade.
5885 reinstall_for_flags = None
5886 cache_key = (pkg_type, root, cpv, pkg_status)
5887 calculated_use = True
5888 pkg = self._pkg_cache.get(cache_key)
5890 calculated_use = False
5892 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5895 pkg = Package(built=built, cpv=cpv,
5896 installed=installed, metadata=metadata,
5897 onlydeps=onlydeps, root_config=root_config,
5899 metadata = pkg.metadata
5900 if not built and ("?" in metadata["LICENSE"] or \
5901 "?" in metadata["PROVIDE"]):
5902 # This is avoided whenever possible because
5903 # it's expensive. It only needs to be done here
5904 # if it has an effect on visibility.
5905 pkgsettings.setcpv(pkg)
5906 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5907 calculated_use = True
5908 self._pkg_cache[pkg] = pkg
5910 if not installed or (built and matched_packages):
5911 # Only enforce visibility on installed packages
5912 # if there is at least one other visible package
5913 # available. By filtering installed masked packages
5914 # here, packages that have been masked since they
5915 # were installed can be automatically downgraded
5916 # to an unmasked version.
5918 if not visible(pkgsettings, pkg):
5920 except portage.exception.InvalidDependString:
5924 # Enable upgrade or downgrade to a version
5925 # with visible KEYWORDS when the installed
5926 # version is masked by KEYWORDS, but never
5927 # reinstall the same exact version only due
5928 # to a KEYWORDS mask.
5929 if built and matched_packages:
5931 different_version = None
5932 for avail_pkg in matched_packages:
5933 if not portage.dep.cpvequal(
5934 pkg.cpv, avail_pkg.cpv):
5935 different_version = avail_pkg
5937 if different_version is not None:
5940 pkgsettings._getMissingKeywords(
5941 pkg.cpv, pkg.metadata):
5944 # If the ebuild no longer exists or it's
5945 # keywords have been dropped, reject built
5946 # instances (installed or binary).
5947 # If --usepkgonly is enabled, assume that
5948 # the ebuild status should be ignored.
5952 pkg.cpv, "ebuild", root_config)
5953 except portage.exception.PackageNotFound:
5956 if not visible(pkgsettings, pkg_eb):
5959 if not pkg.built and not calculated_use:
5960 # This is avoided whenever possible because
5962 pkgsettings.setcpv(pkg)
5963 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5965 if pkg.cp != atom.cp:
5966 # A cpv can be returned from dbapi.match() as an
5967 # old-style virtual match even in cases when the
5968 # package does not actually PROVIDE the virtual.
5969 # Filter out any such false matches here.
5970 if not atom_set.findAtomForPackage(pkg):
5974 if root == self.target_root:
5976 # Ebuild USE must have been calculated prior
5977 # to this point, in case atoms have USE deps.
5978 myarg = self._iter_atoms_for_pkg(pkg).next()
5979 except StopIteration:
5981 except portage.exception.InvalidDependString:
5983 # masked by corruption
5985 if not installed and myarg:
5986 found_available_arg = True
5988 if atom.use and not pkg.built:
5989 use = pkg.use.enabled
5990 if atom.use.enabled.difference(use):
5992 if atom.use.disabled.intersection(use):
5994 if pkg.cp == atom_cp:
5995 if highest_version is None:
5996 highest_version = pkg
5997 elif pkg > highest_version:
5998 highest_version = pkg
5999 # At this point, we've found the highest visible
6000 # match from the current repo. Any lower versions
6001 # from this repo are ignored, so this so the loop
6002 # will always end with a break statement below
6004 if find_existing_node:
6005 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6008 if portage.dep.match_from_list(atom, [e_pkg]):
6009 if highest_version and \
6010 e_pkg.cp == atom_cp and \
6011 e_pkg < highest_version and \
6012 e_pkg.slot_atom != highest_version.slot_atom:
6013 # There is a higher version available in a
6014 # different slot, so this existing node is
6018 matched_packages.append(e_pkg)
6019 existing_node = e_pkg
6021 # Compare built package to current config and
6022 # reject the built package if necessary.
6023 if built and not installed and \
6024 ("--newuse" in self.myopts or \
6025 "--reinstall" in self.myopts):
6026 iuses = pkg.iuse.all
6027 old_use = pkg.use.enabled
6029 pkgsettings.setcpv(myeb)
6031 pkgsettings.setcpv(pkg)
6032 now_use = pkgsettings["PORTAGE_USE"].split()
6033 forced_flags = set()
6034 forced_flags.update(pkgsettings.useforce)
6035 forced_flags.update(pkgsettings.usemask)
6037 if myeb and not usepkgonly:
6038 cur_iuse = myeb.iuse.all
6039 if self._reinstall_for_flags(forced_flags,
6043 # Compare current config to installed package
6044 # and do not reinstall if possible.
6045 if not installed and \
6046 ("--newuse" in self.myopts or \
6047 "--reinstall" in self.myopts) and \
6048 cpv in vardb.match(atom):
6049 pkgsettings.setcpv(pkg)
6050 forced_flags = set()
6051 forced_flags.update(pkgsettings.useforce)
6052 forced_flags.update(pkgsettings.usemask)
6053 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6054 old_iuse = set(filter_iuse_defaults(
6055 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6056 cur_use = pkgsettings["PORTAGE_USE"].split()
6057 cur_iuse = pkg.iuse.all
6058 reinstall_for_flags = \
6059 self._reinstall_for_flags(
6060 forced_flags, old_use, old_iuse,
6062 if reinstall_for_flags:
6066 matched_packages.append(pkg)
6067 if reinstall_for_flags:
6068 self._reinstall_nodes[pkg] = \
6072 if not matched_packages:
6075 if "--debug" in self.myopts:
6076 for pkg in matched_packages:
6077 portage.writemsg("%s %s\n" % \
6078 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6080 # Filter out any old-style virtual matches if they are
6081 # mixed with new-style virtual matches.
6082 cp = portage.dep_getkey(atom)
6083 if len(matched_packages) > 1 and \
6084 "virtual" == portage.catsplit(cp)[0]:
6085 for pkg in matched_packages:
6088 # Got a new-style virtual, so filter
6089 # out any old-style virtuals.
6090 matched_packages = [pkg for pkg in matched_packages \
6094 if len(matched_packages) > 1:
6095 bestmatch = portage.best(
6096 [pkg.cpv for pkg in matched_packages])
6097 matched_packages = [pkg for pkg in matched_packages \
6098 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6100 # ordered by type preference ("ebuild" type is the last resort)
6101 return matched_packages[-1], existing_node
6103 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6105 Select packages that have already been added to the graph or
6106 those that are installed and have not been scheduled for
6109 graph_db = self._graph_trees[root]["porttree"].dbapi
6110 matches = graph_db.match(atom)
6113 cpv = matches[-1] # highest match
6114 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
6115 graph_db.aux_get(cpv, ["SLOT"])[0])
6116 e_pkg = self._slot_pkg_map[root].get(slot_atom)
6119 # Since this cpv exists in the graph_db,
6120 # we must have a cached Package instance.
6121 cache_key = ("installed", root, cpv, "nomerge")
6122 return (self._pkg_cache[cache_key], None)
6124 def _complete_graph(self):
6126 Add any deep dependencies of required sets (args, system, world) that
6127 have not been pulled into the graph yet. This ensures that the graph
6128 is consistent such that initially satisfied deep dependencies are not
6129 broken in the new graph. Initially unsatisfied dependencies are
6130 irrelevant since we only want to avoid breaking dependencies that are
6133 Since this method can consume enough time to disturb users, it is
6134 currently only enabled by the --complete-graph option.
6136 if "--buildpkgonly" in self.myopts or \
6137 "recurse" not in self.myparams:
6140 if "complete" not in self.myparams:
6141 # Skip this to avoid consuming enough time to disturb users.
6144 # Put the depgraph into a mode that causes it to only
6145 # select packages that have already been added to the
6146 # graph or those that are installed and have not been
6147 # scheduled for replacement. Also, toggle the "deep"
6148 # parameter so that all dependencies are traversed and
6150 self._select_atoms = self._select_atoms_from_graph
6151 self._select_package = self._select_pkg_from_graph
6152 already_deep = "deep" in self.myparams
6153 if not already_deep:
6154 self.myparams.add("deep")
6156 for root in self.roots:
6157 required_set_names = self._required_set_names.copy()
6158 if root == self.target_root and \
6159 (already_deep or "empty" in self.myparams):
6160 required_set_names.difference_update(self._sets)
6161 if not required_set_names and not self._ignored_deps:
6163 root_config = self.roots[root]
6164 setconfig = root_config.setconfig
6166 # Reuse existing SetArg instances when available.
6167 for arg in self.digraph.root_nodes():
6168 if not isinstance(arg, SetArg):
6170 if arg.root_config != root_config:
6172 if arg.name in required_set_names:
6174 required_set_names.remove(arg.name)
6175 # Create new SetArg instances only when necessary.
6176 for s in required_set_names:
6177 expanded_set = InternalPackageSet(
6178 initial_atoms=setconfig.getSetAtoms(s))
6179 atom = SETPREFIX + s
6180 args.append(SetArg(arg=atom, set=expanded_set,
6181 root_config=root_config))
6182 vardb = root_config.trees["vartree"].dbapi
6184 for atom in arg.set:
6185 self._dep_stack.append(
6186 Dependency(atom=atom, root=root, parent=arg))
6187 if self._ignored_deps:
6188 self._dep_stack.extend(self._ignored_deps)
6189 self._ignored_deps = []
6190 if not self._create_graph(allow_unsatisfied=True):
6192 # Check the unsatisfied deps to see if any initially satisfied deps
6193 # will become unsatisfied due to an upgrade. Initially unsatisfied
6194 # deps are irrelevant since we only want to avoid breaking deps
6195 # that are initially satisfied.
6196 while self._unsatisfied_deps:
6197 dep = self._unsatisfied_deps.pop()
6198 matches = vardb.match_pkgs(dep.atom)
6200 self._initially_unsatisfied_deps.append(dep)
6202 # An scheduled installation broke a deep dependency.
6203 # Add the installed package to the graph so that it
6204 # will be appropriately reported as a slot collision
6205 # (possibly solvable via backtracking).
6206 pkg = matches[-1] # highest match
6207 if not self._add_pkg(pkg, dep):
6209 if not self._create_graph(allow_unsatisfied=True):
6213 def _pkg(self, cpv, type_name, root_config, installed=False):
6215 Get a package instance from the cache, or create a new
6216 one if necessary. Raises KeyError from aux_get if it
6217 failures for some reason (package does not exist or is
6222 operation = "nomerge"
6223 pkg = self._pkg_cache.get(
6224 (type_name, root_config.root, cpv, operation))
6226 tree_type = self.pkg_tree_map[type_name]
6227 db = root_config.trees[tree_type].dbapi
6228 db_keys = list(self._trees_orig[root_config.root][
6229 tree_type].dbapi._aux_cache_keys)
6231 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6233 raise portage.exception.PackageNotFound(cpv)
6234 pkg = Package(cpv=cpv, metadata=metadata,
6235 root_config=root_config, installed=installed)
6236 if type_name == "ebuild":
6237 settings = self.pkgsettings[root_config.root]
6238 settings.setcpv(pkg)
6239 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6240 self._pkg_cache[pkg] = pkg
6243 def validate_blockers(self):
6244 """Remove any blockers from the digraph that do not match any of the
6245 packages within the graph. If necessary, create hard deps to ensure
6246 correct merge order such that mutually blocking packages are never
6247 installed simultaneously."""
6249 if "--buildpkgonly" in self.myopts or \
6250 "--nodeps" in self.myopts:
6253 #if "deep" in self.myparams:
6255 # Pull in blockers from all installed packages that haven't already
6256 # been pulled into the depgraph. This is not enabled by default
6257 # due to the performance penalty that is incurred by all the
6258 # additional dep_check calls that are required.
6260 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6261 for myroot in self.trees:
6262 vardb = self.trees[myroot]["vartree"].dbapi
6263 portdb = self.trees[myroot]["porttree"].dbapi
6264 pkgsettings = self.pkgsettings[myroot]
6265 final_db = self.mydbapi[myroot]
6267 blocker_cache = BlockerCache(myroot, vardb)
6268 stale_cache = set(blocker_cache)
6271 stale_cache.discard(cpv)
6272 pkg_in_graph = self.digraph.contains(pkg)
6274 # Check for masked installed packages. Only warn about
6275 # packages that are in the graph in order to avoid warning
6276 # about those that will be automatically uninstalled during
6277 # the merge process or by --depclean.
6279 if pkg_in_graph and not visible(pkgsettings, pkg):
6280 self._masked_installed.add(pkg)
6282 blocker_atoms = None
6288 self._blocker_parents.child_nodes(pkg))
6293 self._irrelevant_blockers.child_nodes(pkg))
6296 if blockers is not None:
6297 blockers = set(str(blocker.atom) \
6298 for blocker in blockers)
6300 # If this node has any blockers, create a "nomerge"
6301 # node for it so that they can be enforced.
6302 self.spinner.update()
6303 blocker_data = blocker_cache.get(cpv)
6304 if blocker_data is not None and \
6305 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6308 # If blocker data from the graph is available, use
6309 # it to validate the cache and update the cache if
6311 if blocker_data is not None and \
6312 blockers is not None:
6313 if not blockers.symmetric_difference(
6314 blocker_data.atoms):
6318 if blocker_data is None and \
6319 blockers is not None:
6320 # Re-use the blockers from the graph.
6321 blocker_atoms = sorted(blockers)
6322 counter = long(pkg.metadata["COUNTER"])
6324 blocker_cache.BlockerData(counter, blocker_atoms)
6325 blocker_cache[pkg.cpv] = blocker_data
6329 blocker_atoms = blocker_data.atoms
6331 # Use aux_get() to trigger FakeVartree global
6332 # updates on *DEPEND when appropriate.
6333 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6334 # It is crucial to pass in final_db here in order to
6335 # optimize dep_check calls by eliminating atoms via
6336 # dep_wordreduce and dep_eval calls.
6338 portage.dep._dep_check_strict = False
6340 success, atoms = portage.dep_check(depstr,
6341 final_db, pkgsettings, myuse=pkg.use.enabled,
6342 trees=self._graph_trees, myroot=myroot)
6343 except Exception, e:
6344 if isinstance(e, SystemExit):
6346 # This is helpful, for example, if a ValueError
6347 # is thrown from cpv_expand due to multiple
6348 # matches (this can happen if an atom lacks a
6350 show_invalid_depstring_notice(
6351 pkg, depstr, str(e))
6355 portage.dep._dep_check_strict = True
6357 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6358 if replacement_pkg and \
6359 replacement_pkg[0].operation == "merge":
6360 # This package is being replaced anyway, so
6361 # ignore invalid dependencies so as not to
6362 # annoy the user too much (otherwise they'd be
6363 # forced to manually unmerge it first).
6365 show_invalid_depstring_notice(pkg, depstr, atoms)
6367 blocker_atoms = [myatom for myatom in atoms \
6368 if myatom.startswith("!")]
6369 blocker_atoms.sort()
6370 counter = long(pkg.metadata["COUNTER"])
6371 blocker_cache[cpv] = \
6372 blocker_cache.BlockerData(counter, blocker_atoms)
6375 for atom in blocker_atoms:
6376 blocker = Blocker(atom=portage.dep.Atom(atom),
6377 eapi=pkg.metadata["EAPI"], root=myroot)
6378 self._blocker_parents.add(blocker, pkg)
6379 except portage.exception.InvalidAtom, e:
6380 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6381 show_invalid_depstring_notice(
6382 pkg, depstr, "Invalid Atom: %s" % (e,))
6384 for cpv in stale_cache:
6385 del blocker_cache[cpv]
6386 blocker_cache.flush()
6389 # Discard any "uninstall" tasks scheduled by previous calls
6390 # to this method, since those tasks may not make sense given
6391 # the current graph state.
6392 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6393 if previous_uninstall_tasks:
6394 self._blocker_uninstalls = digraph()
6395 self.digraph.difference_update(previous_uninstall_tasks)
6397 for blocker in self._blocker_parents.leaf_nodes():
6398 self.spinner.update()
6399 root_config = self.roots[blocker.root]
6400 virtuals = root_config.settings.getvirtuals()
6401 myroot = blocker.root
6402 initial_db = self.trees[myroot]["vartree"].dbapi
6403 final_db = self.mydbapi[myroot]
6405 provider_virtual = False
6406 if blocker.cp in virtuals and \
6407 not self._have_new_virt(blocker.root, blocker.cp):
6408 provider_virtual = True
6410 if provider_virtual:
6412 for provider_entry in virtuals[blocker.cp]:
6414 portage.dep_getkey(provider_entry)
6415 atoms.append(blocker.atom.replace(
6416 blocker.cp, provider_cp))
6418 atoms = [blocker.atom]
6420 blocked_initial = []
6422 blocked_initial.extend(initial_db.match_pkgs(atom))
6426 blocked_final.extend(final_db.match_pkgs(atom))
6428 if not blocked_initial and not blocked_final:
6429 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6430 self._blocker_parents.remove(blocker)
6431 # Discard any parents that don't have any more blockers.
6432 for pkg in parent_pkgs:
6433 self._irrelevant_blockers.add(blocker, pkg)
6434 if not self._blocker_parents.child_nodes(pkg):
6435 self._blocker_parents.remove(pkg)
6437 for parent in self._blocker_parents.parent_nodes(blocker):
6438 unresolved_blocks = False
6439 depends_on_order = set()
6440 for pkg in blocked_initial:
6441 if pkg.slot_atom == parent.slot_atom:
6442 # TODO: Support blocks within slots in cases where it
6443 # might make sense. For example, a new version might
6444 # require that the old version be uninstalled at build
6447 if parent.installed:
6448 # Two currently installed packages conflict with
6449 # eachother. Ignore this case since the damage
6450 # is already done and this would be likely to
6451 # confuse users if displayed like a normal blocker.
6454 self._blocked_pkgs.add(pkg, blocker)
6456 if parent.operation == "merge":
6457 # Maybe the blocked package can be replaced or simply
6458 # unmerged to resolve this block.
6459 depends_on_order.add((pkg, parent))
6461 # None of the above blocker resolutions techniques apply,
6462 # so apparently this one is unresolvable.
6463 unresolved_blocks = True
6464 for pkg in blocked_final:
6465 if pkg.slot_atom == parent.slot_atom:
6466 # TODO: Support blocks within slots.
6468 if parent.operation == "nomerge" and \
6469 pkg.operation == "nomerge":
6470 # This blocker will be handled the next time that a
6471 # merge of either package is triggered.
6474 self._blocked_pkgs.add(pkg, blocker)
6476 # Maybe the blocking package can be
6477 # unmerged to resolve this block.
6478 if parent.operation == "merge" and pkg.installed:
6479 depends_on_order.add((pkg, parent))
6481 elif parent.operation == "nomerge":
6482 depends_on_order.add((parent, pkg))
6484 # None of the above blocker resolutions techniques apply,
6485 # so apparently this one is unresolvable.
6486 unresolved_blocks = True
6488 # Make sure we don't unmerge any package that have been pulled
6490 if not unresolved_blocks and depends_on_order:
6491 for inst_pkg, inst_task in depends_on_order:
6492 if self.digraph.contains(inst_pkg) and \
6493 self.digraph.parent_nodes(inst_pkg):
6494 unresolved_blocks = True
6497 if not unresolved_blocks and depends_on_order:
6498 for inst_pkg, inst_task in depends_on_order:
6499 uninst_task = Package(built=inst_pkg.built,
6500 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6501 metadata=inst_pkg.metadata,
6502 operation="uninstall",
6503 root_config=inst_pkg.root_config,
6504 type_name=inst_pkg.type_name)
6505 self._pkg_cache[uninst_task] = uninst_task
6506 # Enforce correct merge order with a hard dep.
6507 self.digraph.addnode(uninst_task, inst_task,
6508 priority=BlockerDepPriority.instance)
6509 # Count references to this blocker so that it can be
6510 # invalidated after nodes referencing it have been
6512 self._blocker_uninstalls.addnode(uninst_task, blocker)
6513 if not unresolved_blocks and not depends_on_order:
6514 self._irrelevant_blockers.add(blocker, parent)
6515 self._blocker_parents.remove_edge(blocker, parent)
6516 if not self._blocker_parents.parent_nodes(blocker):
6517 self._blocker_parents.remove(blocker)
6518 if not self._blocker_parents.child_nodes(parent):
6519 self._blocker_parents.remove(parent)
6520 if unresolved_blocks:
6521 self._unsolvable_blockers.add(blocker, parent)
6525 def _accept_blocker_conflicts(self):
6527 for x in ("--buildpkgonly", "--fetchonly",
6528 "--fetch-all-uri", "--nodeps"):
6529 if x in self.myopts:
6534 def _merge_order_bias(self, mygraph):
6535 """Order nodes from highest to lowest overall reference count for
6536 optimal leaf node selection."""
6538 for node in mygraph.order:
6539 node_info[node] = len(mygraph.parent_nodes(node))
6540 def cmp_merge_preference(node1, node2):
6541 return node_info[node2] - node_info[node1]
6542 mygraph.order.sort(cmp_merge_preference)
6544 def altlist(self, reversed=False):
6546 while self._serialized_tasks_cache is None:
6547 self._resolve_conflicts()
6549 self._serialized_tasks_cache, self._scheduler_graph = \
6550 self._serialize_tasks()
6551 except self._serialize_tasks_retry:
6554 retlist = self._serialized_tasks_cache[:]
6559 def schedulerGraph(self):
6561 The scheduler graph is identical to the normal one except that
6562 uninstall edges are reversed in specific cases that require
6563 conflicting packages to be temporarily installed simultaneously.
6564 This is intended for use by the Scheduler in it's parallelization
6565 logic. It ensures that temporary simultaneous installation of
6566 conflicting packages is avoided when appropriate (especially for
6567 !!atom blockers), but allowed in specific cases that require it.
6569 Note that this method calls break_refs() which alters the state of
6570 internal Package instances such that this depgraph instance should
6571 not be used to perform any more calculations.
6573 if self._scheduler_graph is None:
6575 self.break_refs(self._scheduler_graph.order)
6576 return self._scheduler_graph
6578 def break_refs(self, nodes):
6580 Take a mergelist like that returned from self.altlist() and
6581 break any references that lead back to the depgraph. This is
6582 useful if you want to hold references to packages without
6583 also holding the depgraph on the heap.
6586 if hasattr(node, "root_config"):
6587 # The FakeVartree references the _package_cache which
6588 # references the depgraph. So that Package instances don't
6589 # hold the depgraph and FakeVartree on the heap, replace
6590 # the RootConfig that references the FakeVartree with the
6591 # original RootConfig instance which references the actual
6593 node.root_config = \
6594 self._trees_orig[node.root_config.root]["root_config"]
6596 def _resolve_conflicts(self):
6597 if not self._complete_graph():
6598 raise self._unknown_internal_error()
6600 if not self.validate_blockers():
6601 raise self._unknown_internal_error()
6603 if self._slot_collision_info:
6604 self._process_slot_conflicts()
6606 def _serialize_tasks(self):
6608 if "--debug" in self.myopts:
6609 writemsg("\ndigraph:\n\n", noiselevel=-1)
6610 self.digraph.debug_print()
6611 writemsg("\n", noiselevel=-1)
6613 scheduler_graph = self.digraph.copy()
6614 mygraph=self.digraph.copy()
6615 # Prune "nomerge" root nodes if nothing depends on them, since
6616 # otherwise they slow down merge order calculation. Don't remove
6617 # non-root nodes since they help optimize merge order in some cases
6618 # such as revdep-rebuild.
6619 removed_nodes = set()
6621 for node in mygraph.root_nodes():
6622 if not isinstance(node, Package) or \
6623 node.installed or node.onlydeps:
6624 removed_nodes.add(node)
6626 self.spinner.update()
6627 mygraph.difference_update(removed_nodes)
6628 if not removed_nodes:
6630 removed_nodes.clear()
6631 self._merge_order_bias(mygraph)
6632 def cmp_circular_bias(n1, n2):
6634 RDEPEND is stronger than PDEPEND and this function
6635 measures such a strength bias within a circular
6636 dependency relationship.
6638 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6639 ignore_priority=DepPriority.MEDIUM_SOFT)
6640 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6641 ignore_priority=DepPriority.MEDIUM_SOFT)
6642 if n1_n2_medium == n2_n1_medium:
6647 myblocker_uninstalls = self._blocker_uninstalls.copy()
6649 # Contains uninstall tasks that have been scheduled to
6650 # occur after overlapping blockers have been installed.
6651 scheduled_uninstalls = set()
6652 # Contains any Uninstall tasks that have been ignored
6653 # in order to avoid the circular deps code path. These
6654 # correspond to blocker conflicts that could not be
6656 ignored_uninstall_tasks = set()
6657 have_uninstall_task = False
6658 complete = "complete" in self.myparams
6661 def get_nodes(**kwargs):
6663 Returns leaf nodes excluding Uninstall instances
6664 since those should be executed as late as possible.
6666 return [node for node in mygraph.leaf_nodes(**kwargs) \
6667 if isinstance(node, Package) and \
6668 (node.operation != "uninstall" or \
6669 node in scheduled_uninstalls)]
6671 # sys-apps/portage needs special treatment if ROOT="/"
6672 running_root = self._running_root.root
6673 from portage.const import PORTAGE_PACKAGE_ATOM
6674 runtime_deps = InternalPackageSet(
6675 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6676 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6677 PORTAGE_PACKAGE_ATOM)
6678 replacement_portage = self.mydbapi[running_root].match_pkgs(
6679 PORTAGE_PACKAGE_ATOM)
6682 running_portage = running_portage[0]
6684 running_portage = None
6686 if replacement_portage:
6687 replacement_portage = replacement_portage[0]
6689 replacement_portage = None
6691 if replacement_portage == running_portage:
6692 replacement_portage = None
6694 if replacement_portage is not None:
6695 # update from running_portage to replacement_portage asap
6696 asap_nodes.append(replacement_portage)
6698 if running_portage is not None:
6700 portage_rdepend = self._select_atoms_highest_available(
6701 running_root, running_portage.metadata["RDEPEND"],
6702 myuse=running_portage.use.enabled,
6703 parent=running_portage, strict=False)
6704 except portage.exception.InvalidDependString, e:
6705 portage.writemsg("!!! Invalid RDEPEND in " + \
6706 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6707 (running_root, running_portage.cpv, e), noiselevel=-1)
6709 portage_rdepend = []
6710 runtime_deps.update(atom for atom in portage_rdepend \
6711 if not atom.startswith("!"))
6713 ignore_priority_soft_range = [None]
6714 ignore_priority_soft_range.extend(
6715 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6716 tree_mode = "--tree" in self.myopts
6717 # Tracks whether or not the current iteration should prefer asap_nodes
6718 # if available. This is set to False when the previous iteration
6719 # failed to select any nodes. It is reset whenever nodes are
6720 # successfully selected.
6723 # By default, try to avoid selecting root nodes whenever possible. This
6724 # helps ensure that the maximimum possible number of soft dependencies
6725 # have been removed from the graph before their parent nodes have
6726 # selected. This is especially important when those dependencies are
6727 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6728 # CHOST has been changed (like when building a stage3 from a stage2).
6729 accept_root_node = False
6731 # State of prefer_asap and accept_root_node flags for successive
6732 # iterations that loosen the criteria for node selection.
6734 # iteration prefer_asap accept_root_node
6739 # If no nodes are selected on the 3rd iteration, it is due to
6740 # unresolved blockers or circular dependencies.
6742 while not mygraph.empty():
6743 self.spinner.update()
6744 selected_nodes = None
6745 ignore_priority = None
6746 if prefer_asap and asap_nodes:
6747 """ASAP nodes are merged before their soft deps."""
6748 asap_nodes = [node for node in asap_nodes \
6749 if mygraph.contains(node)]
6750 for node in asap_nodes:
6751 if not mygraph.child_nodes(node,
6752 ignore_priority=DepPriority.SOFT):
6753 selected_nodes = [node]
6754 asap_nodes.remove(node)
6756 if not selected_nodes and \
6757 not (prefer_asap and asap_nodes):
6758 for ignore_priority in ignore_priority_soft_range:
6759 nodes = get_nodes(ignore_priority=ignore_priority)
6763 if ignore_priority is None and not tree_mode:
6764 # Greedily pop all of these nodes since no relationship
6765 # has been ignored. This optimization destroys --tree
6766 # output, so it's disabled in reversed mode. If there
6767 # is a mix of merge and uninstall nodes, save the
6768 # uninstall nodes from later since sometimes a merge
6769 # node will render an install node unnecessary, and
6770 # we want to avoid doing a separate uninstall task in
6772 merge_nodes = [node for node in nodes \
6773 if node.operation == "merge"]
6775 selected_nodes = merge_nodes
6777 selected_nodes = nodes
6779 # For optimal merge order:
6780 # * Only pop one node.
6781 # * Removing a root node (node without a parent)
6782 # will not produce a leaf node, so avoid it.
6784 if mygraph.parent_nodes(node):
6785 # found a non-root node
6786 selected_nodes = [node]
6788 if not selected_nodes and \
6789 (accept_root_node or ignore_priority is None):
6790 # settle for a root node
6791 selected_nodes = [nodes[0]]
6793 if not selected_nodes:
6794 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6796 """Recursively gather a group of nodes that RDEPEND on
6797 eachother. This ensures that they are merged as a group
6798 and get their RDEPENDs satisfied as soon as possible."""
6799 def gather_deps(ignore_priority,
6800 mergeable_nodes, selected_nodes, node):
6801 if node in selected_nodes:
6803 if node not in mergeable_nodes:
6805 if node == replacement_portage and \
6806 mygraph.child_nodes(node,
6807 ignore_priority=DepPriority.MEDIUM_SOFT):
6808 # Make sure that portage always has all of it's
6809 # RDEPENDs installed first.
6811 selected_nodes.add(node)
6812 for child in mygraph.child_nodes(node,
6813 ignore_priority=ignore_priority):
6814 if not gather_deps(ignore_priority,
6815 mergeable_nodes, selected_nodes, child):
6818 mergeable_nodes = set(nodes)
6819 if prefer_asap and asap_nodes:
6821 for ignore_priority in xrange(DepPriority.SOFT,
6822 DepPriority.MEDIUM_SOFT + 1):
6824 if nodes is not asap_nodes and \
6825 not accept_root_node and \
6826 not mygraph.parent_nodes(node):
6828 selected_nodes = set()
6829 if gather_deps(ignore_priority,
6830 mergeable_nodes, selected_nodes, node):
6833 selected_nodes = None
6837 # If any nodes have been selected here, it's always
6838 # possible that anything up to a MEDIUM_SOFT priority
6839 # relationship has been ignored. This state is recorded
6840 # in ignore_priority so that relevant nodes will be
6841 # added to asap_nodes when appropriate.
6843 ignore_priority = DepPriority.MEDIUM_SOFT
6845 if prefer_asap and asap_nodes and not selected_nodes:
6846 # We failed to find any asap nodes to merge, so ignore
6847 # them for the next iteration.
6851 if not selected_nodes and not accept_root_node:
6852 # Maybe there are only root nodes left, so accept them
6853 # for the next iteration.
6854 accept_root_node = True
6857 if selected_nodes and ignore_priority > DepPriority.SOFT:
6858 # Try to merge ignored medium deps as soon as possible.
6859 for node in selected_nodes:
6860 children = set(mygraph.child_nodes(node))
6861 soft = children.difference(
6862 mygraph.child_nodes(node,
6863 ignore_priority=DepPriority.SOFT))
6864 medium_soft = children.difference(
6865 mygraph.child_nodes(node,
6866 ignore_priority=DepPriority.MEDIUM_SOFT))
6867 medium_soft.difference_update(soft)
6868 for child in medium_soft:
6869 if child in selected_nodes:
6871 if child in asap_nodes:
6873 asap_nodes.append(child)
6875 if selected_nodes and len(selected_nodes) > 1:
6876 if not isinstance(selected_nodes, list):
6877 selected_nodes = list(selected_nodes)
6878 selected_nodes.sort(cmp_circular_bias)
6880 if not selected_nodes and not myblocker_uninstalls.is_empty():
6881 # An Uninstall task needs to be executed in order to
6882 # avoid conflict if possible.
6883 min_parent_deps = None
6885 for task in myblocker_uninstalls.leaf_nodes():
6886 # Do some sanity checks so that system or world packages
6887 # don't get uninstalled inappropriately here (only really
6888 # necessary when --complete-graph has not been enabled).
6890 if task in ignored_uninstall_tasks:
6893 if task in scheduled_uninstalls:
6894 # It's been scheduled but it hasn't
6895 # been executed yet due to dependence
6896 # on installation of blocking packages.
6899 root_config = self.roots[task.root]
6900 inst_pkg = self._pkg_cache[
6901 ("installed", task.root, task.cpv, "nomerge")]
6903 if self.digraph.contains(inst_pkg):
6906 forbid_overlap = False
6907 heuristic_overlap = False
6908 for blocker in myblocker_uninstalls.parent_nodes(task):
6909 if blocker.eapi in ("0", "1"):
6910 heuristic_overlap = True
6911 elif blocker.atom.blocker.overlap.forbid:
6912 forbid_overlap = True
6914 if forbid_overlap and running_root == task.root:
6917 if heuristic_overlap and running_root == task.root:
6918 # Never uninstall sys-apps/portage or it's essential
6919 # dependencies, except through replacement.
6921 runtime_dep_atoms = \
6922 list(runtime_deps.iterAtomsForPackage(task))
6923 except portage.exception.InvalidDependString, e:
6924 portage.writemsg("!!! Invalid PROVIDE in " + \
6925 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6926 (task.root, task.cpv, e), noiselevel=-1)
6930 # Don't uninstall a runtime dep if it appears
6931 # to be the only suitable one installed.
6933 vardb = root_config.trees["vartree"].dbapi
6934 for atom in runtime_dep_atoms:
6935 other_version = None
6936 for pkg in vardb.match_pkgs(atom):
6937 if pkg.cpv == task.cpv and \
6938 pkg.metadata["COUNTER"] == \
6939 task.metadata["COUNTER"]:
6943 if other_version is None:
6949 # For packages in the system set, don't take
6950 # any chances. If the conflict can't be resolved
6951 # by a normal replacement operation then abort.
6954 for atom in root_config.sets[
6955 "system"].iterAtomsForPackage(task):
6958 except portage.exception.InvalidDependString, e:
6959 portage.writemsg("!!! Invalid PROVIDE in " + \
6960 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6961 (task.root, task.cpv, e), noiselevel=-1)
6967 # Note that the world check isn't always
6968 # necessary since self._complete_graph() will
6969 # add all packages from the system and world sets to the
6970 # graph. This just allows unresolved conflicts to be
6971 # detected as early as possible, which makes it possible
6972 # to avoid calling self._complete_graph() when it is
6973 # unnecessary due to blockers triggering an abortion.
6975 # For packages in the world set, go ahead an uninstall
6976 # when necessary, as long as the atom will be satisfied
6977 # in the final state.
6978 graph_db = self.mydbapi[task.root]
6981 for atom in root_config.sets[
6982 "world"].iterAtomsForPackage(task):
6984 for pkg in graph_db.match_pkgs(atom):
6991 self._blocked_world_pkgs[inst_pkg] = atom
6993 except portage.exception.InvalidDependString, e:
6994 portage.writemsg("!!! Invalid PROVIDE in " + \
6995 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6996 (task.root, task.cpv, e), noiselevel=-1)
7002 # Check the deps of parent nodes to ensure that
7003 # the chosen task produces a leaf node. Maybe
7004 # this can be optimized some more to make the
7005 # best possible choice, but the current algorithm
7006 # is simple and should be near optimal for most
7009 for parent in mygraph.parent_nodes(task):
7010 parent_deps.update(mygraph.child_nodes(parent,
7011 ignore_priority=DepPriority.MEDIUM_SOFT))
7012 parent_deps.remove(task)
7013 if min_parent_deps is None or \
7014 len(parent_deps) < min_parent_deps:
7015 min_parent_deps = len(parent_deps)
7018 if uninst_task is not None:
7019 # The uninstall is performed only after blocking
7020 # packages have been merged on top of it. File
7021 # collisions between blocking packages are detected
7022 # and removed from the list of files to be uninstalled.
7023 scheduled_uninstalls.add(uninst_task)
7024 parent_nodes = mygraph.parent_nodes(uninst_task)
7026 # Reverse the parent -> uninstall edges since we want
7027 # to do the uninstall after blocking packages have
7028 # been merged on top of it.
7029 mygraph.remove(uninst_task)
7030 for blocked_pkg in parent_nodes:
7031 mygraph.add(blocked_pkg, uninst_task,
7032 priority=BlockerDepPriority.instance)
7033 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7034 scheduler_graph.add(blocked_pkg, uninst_task,
7035 priority=BlockerDepPriority.instance)
7038 # None of the Uninstall tasks are acceptable, so
7039 # the corresponding blockers are unresolvable.
7040 # We need to drop an Uninstall task here in order
7041 # to avoid the circular deps code path, but the
7042 # blocker will still be counted as an unresolved
7044 for node in myblocker_uninstalls.leaf_nodes():
7046 mygraph.remove(node)
7051 ignored_uninstall_tasks.add(node)
7054 if uninst_task is not None:
7055 # After dropping an Uninstall task, reset
7056 # the state variables for leaf node selection and
7057 # continue trying to select leaf nodes.
7059 accept_root_node = False
7062 if not selected_nodes:
7063 self._circular_deps_for_display = mygraph
7064 raise self._unknown_internal_error()
7066 # At this point, we've succeeded in selecting one or more nodes, so
7067 # it's now safe to reset the prefer_asap and accept_root_node flags
7068 # to their default states.
7070 accept_root_node = False
7072 mygraph.difference_update(selected_nodes)
7074 for node in selected_nodes:
7075 if isinstance(node, Package) and \
7076 node.operation == "nomerge":
7079 # Handle interactions between blockers
7080 # and uninstallation tasks.
7081 solved_blockers = set()
7083 if isinstance(node, Package) and \
7084 "uninstall" == node.operation:
7085 have_uninstall_task = True
7088 vardb = self.trees[node.root]["vartree"].dbapi
7089 previous_cpv = vardb.match(node.slot_atom)
7091 # The package will be replaced by this one, so remove
7092 # the corresponding Uninstall task if necessary.
7093 previous_cpv = previous_cpv[0]
7095 ("installed", node.root, previous_cpv, "uninstall")
7097 mygraph.remove(uninst_task)
7101 if uninst_task is not None and \
7102 uninst_task not in ignored_uninstall_tasks and \
7103 myblocker_uninstalls.contains(uninst_task):
7104 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7105 myblocker_uninstalls.remove(uninst_task)
7106 # Discard any blockers that this Uninstall solves.
7107 for blocker in blocker_nodes:
7108 if not myblocker_uninstalls.child_nodes(blocker):
7109 myblocker_uninstalls.remove(blocker)
7110 solved_blockers.add(blocker)
7112 retlist.append(node)
7114 if (isinstance(node, Package) and \
7115 "uninstall" == node.operation) or \
7116 (uninst_task is not None and \
7117 uninst_task in scheduled_uninstalls):
7118 # Include satisfied blockers in the merge list
7119 # since the user might be interested and also
7120 # it serves as an indicator that blocking packages
7121 # will be temporarily installed simultaneously.
7122 for blocker in solved_blockers:
7123 retlist.append(Blocker(atom=blocker.atom,
7124 root=blocker.root, eapi=blocker.eapi,
7127 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7128 for node in myblocker_uninstalls.root_nodes():
7129 unsolvable_blockers.add(node)
7131 for blocker in unsolvable_blockers:
7132 retlist.append(blocker)
7134 # If any Uninstall tasks need to be executed in order
7135 # to avoid a conflict, complete the graph with any
7136 # dependencies that may have been initially
7137 # neglected (to ensure that unsafe Uninstall tasks
7138 # are properly identified and blocked from execution).
7139 if have_uninstall_task and \
7141 not unsolvable_blockers:
7142 self.myparams.add("complete")
7143 raise self._serialize_tasks_retry("")
7145 if unsolvable_blockers and \
7146 not self._accept_blocker_conflicts():
7147 self._unsatisfied_blockers_for_display = unsolvable_blockers
7148 self._serialized_tasks_cache = retlist[:]
7149 self._scheduler_graph = scheduler_graph
7150 raise self._unknown_internal_error()
7152 if self._slot_collision_info and \
7153 not self._accept_blocker_conflicts():
7154 self._serialized_tasks_cache = retlist[:]
7155 self._scheduler_graph = scheduler_graph
7156 raise self._unknown_internal_error()
7158 return retlist, scheduler_graph
7160 def _show_circular_deps(self, mygraph):
7161 # No leaf nodes are available, so we have a circular
7162 # dependency panic situation. Reduce the noise level to a
7163 # minimum via repeated elimination of root nodes since they
7164 # have no parents and thus can not be part of a cycle.
7166 root_nodes = mygraph.root_nodes(
7167 ignore_priority=DepPriority.MEDIUM_SOFT)
7170 mygraph.difference_update(root_nodes)
7171 # Display the USE flags that are enabled on nodes that are part
7172 # of dependency cycles in case that helps the user decide to
7173 # disable some of them.
7175 tempgraph = mygraph.copy()
7176 while not tempgraph.empty():
7177 nodes = tempgraph.leaf_nodes()
7179 node = tempgraph.order[0]
7182 display_order.append(node)
7183 tempgraph.remove(node)
7184 display_order.reverse()
7185 self.myopts.pop("--quiet", None)
7186 self.myopts.pop("--verbose", None)
7187 self.myopts["--tree"] = True
7188 portage.writemsg("\n\n", noiselevel=-1)
7189 self.display(display_order)
7190 prefix = colorize("BAD", " * ")
7191 portage.writemsg("\n", noiselevel=-1)
7192 portage.writemsg(prefix + "Error: circular dependencies:\n",
7194 portage.writemsg("\n", noiselevel=-1)
7195 mygraph.debug_print()
7196 portage.writemsg("\n", noiselevel=-1)
7197 portage.writemsg(prefix + "Note that circular dependencies " + \
7198 "can often be avoided by temporarily\n", noiselevel=-1)
7199 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7200 "optional dependencies.\n", noiselevel=-1)
7202 def _show_merge_list(self):
7203 if self._serialized_tasks_cache is not None and \
7204 not (self._displayed_list and \
7205 (self._displayed_list == self._serialized_tasks_cache or \
7206 self._displayed_list == \
7207 list(reversed(self._serialized_tasks_cache)))):
7208 display_list = self._serialized_tasks_cache[:]
7209 if "--tree" in self.myopts:
7210 display_list.reverse()
7211 self.display(display_list)
7213 def _show_unsatisfied_blockers(self, blockers):
7214 self._show_merge_list()
7215 msg = "Error: The above package list contains " + \
7216 "packages which cannot be installed " + \
7217 "at the same time on the same system."
7218 prefix = colorize("BAD", " * ")
7219 from textwrap import wrap
7220 portage.writemsg("\n", noiselevel=-1)
7221 for line in wrap(msg, 70):
7222 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7224 # Display the conflicting packages along with the packages
7225 # that pulled them in. This is helpful for troubleshooting
7226 # cases in which blockers don't solve automatically and
7227 # the reasons are not apparent from the normal merge list
7231 for blocker in blockers:
7232 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7233 self._blocker_parents.parent_nodes(blocker)):
7234 parent_atoms = self._parent_atoms.get(pkg)
7235 if not parent_atoms:
7236 atom = self._blocked_world_pkgs.get(pkg)
7237 if atom is not None:
7238 parent_atoms = set([("@world", atom)])
7240 conflict_pkgs[pkg] = parent_atoms
7243 # Reduce noise by pruning packages that are only
7244 # pulled in by other conflict packages.
7246 for pkg, parent_atoms in conflict_pkgs.iteritems():
7247 relevant_parent = False
7248 for parent, atom in parent_atoms:
7249 if parent not in conflict_pkgs:
7250 relevant_parent = True
7252 if not relevant_parent:
7253 pruned_pkgs.add(pkg)
7254 for pkg in pruned_pkgs:
7255 del conflict_pkgs[pkg]
7261 # Max number of parents shown, to avoid flooding the display.
7263 for pkg, parent_atoms in conflict_pkgs.iteritems():
7267 # Prefer packages that are not directly involved in a conflict.
7268 for parent_atom in parent_atoms:
7269 if len(pruned_list) >= max_parents:
7271 parent, atom = parent_atom
7272 if parent not in conflict_pkgs:
7273 pruned_list.add(parent_atom)
7275 for parent_atom in parent_atoms:
7276 if len(pruned_list) >= max_parents:
7278 pruned_list.add(parent_atom)
7280 omitted_parents = len(parent_atoms) - len(pruned_list)
7281 msg.append(indent + "%s pulled in by\n" % pkg)
7283 for parent_atom in pruned_list:
7284 parent, atom = parent_atom
7285 msg.append(2*indent)
7286 if isinstance(parent,
7287 (PackageArg, AtomArg)):
7288 # For PackageArg and AtomArg types, it's
7289 # redundant to display the atom attribute.
7290 msg.append(str(parent))
7292 # Display the specific atom from SetArg or
7294 msg.append("%s required by %s" % (atom, parent))
7298 msg.append(2*indent)
7299 msg.append("(and %d more)\n" % omitted_parents)
7303 sys.stderr.write("".join(msg))
7306 if "--quiet" not in self.myopts:
7307 show_blocker_docs_link()
7309 def display(self, mylist, favorites=[], verbosity=None):
7311 # This is used to prevent display_problems() from
7312 # redundantly displaying this exact same merge list
7313 # again via _show_merge_list().
7314 self._displayed_list = mylist
7316 if verbosity is None:
7317 verbosity = ("--quiet" in self.myopts and 1 or \
7318 "--verbose" in self.myopts and 3 or 2)
7319 favorites_set = InternalPackageSet(favorites)
7320 oneshot = "--oneshot" in self.myopts or \
7321 "--onlydeps" in self.myopts
7322 columns = "--columns" in self.myopts
7327 counters = PackageCounters()
7329 if verbosity == 1 and "--verbose" not in self.myopts:
7330 def create_use_string(*args):
7333 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7335 is_new, reinst_flags,
7336 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7337 alphabetical=("--alphabetical" in self.myopts)):
7345 cur_iuse = set(cur_iuse)
7346 enabled_flags = cur_iuse.intersection(cur_use)
7347 removed_iuse = set(old_iuse).difference(cur_iuse)
7348 any_iuse = cur_iuse.union(old_iuse)
7349 any_iuse = list(any_iuse)
7351 for flag in any_iuse:
7354 reinst_flag = reinst_flags and flag in reinst_flags
7355 if flag in enabled_flags:
7357 if is_new or flag in old_use and \
7358 (all_flags or reinst_flag):
7359 flag_str = red(flag)
7360 elif flag not in old_iuse:
7361 flag_str = yellow(flag) + "%*"
7362 elif flag not in old_use:
7363 flag_str = green(flag) + "*"
7364 elif flag in removed_iuse:
7365 if all_flags or reinst_flag:
7366 flag_str = yellow("-" + flag) + "%"
7369 flag_str = "(" + flag_str + ")"
7370 removed.append(flag_str)
7373 if is_new or flag in old_iuse and \
7374 flag not in old_use and \
7375 (all_flags or reinst_flag):
7376 flag_str = blue("-" + flag)
7377 elif flag not in old_iuse:
7378 flag_str = yellow("-" + flag)
7379 if flag not in iuse_forced:
7381 elif flag in old_use:
7382 flag_str = green("-" + flag) + "*"
7384 if flag in iuse_forced:
7385 flag_str = "(" + flag_str + ")"
7387 enabled.append(flag_str)
7389 disabled.append(flag_str)
7392 ret = " ".join(enabled)
7394 ret = " ".join(enabled + disabled + removed)
7396 ret = '%s="%s" ' % (name, ret)
7399 repo_display = RepoDisplay(self.roots)
7403 mygraph = self.digraph.copy()
7405 # If there are any Uninstall instances, add the corresponding
7406 # blockers to the digraph (useful for --tree display).
7408 executed_uninstalls = set(node for node in mylist \
7409 if isinstance(node, Package) and node.operation == "unmerge")
7411 for uninstall in self._blocker_uninstalls.leaf_nodes():
7412 uninstall_parents = \
7413 self._blocker_uninstalls.parent_nodes(uninstall)
7414 if not uninstall_parents:
7417 # Remove the corresponding "nomerge" node and substitute
7418 # the Uninstall node.
7419 inst_pkg = self._pkg_cache[
7420 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7422 mygraph.remove(inst_pkg)
7427 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7429 inst_pkg_blockers = []
7431 # Break the Package -> Uninstall edges.
7432 mygraph.remove(uninstall)
7434 # Resolution of a package's blockers
7435 # depend on it's own uninstallation.
7436 for blocker in inst_pkg_blockers:
7437 mygraph.add(uninstall, blocker)
7439 # Expand Package -> Uninstall edges into
7440 # Package -> Blocker -> Uninstall edges.
7441 for blocker in uninstall_parents:
7442 mygraph.add(uninstall, blocker)
7443 for parent in self._blocker_parents.parent_nodes(blocker):
7444 if parent != inst_pkg:
7445 mygraph.add(blocker, parent)
7447 # If the uninstall task did not need to be executed because
7448 # of an upgrade, display Blocker -> Upgrade edges since the
7449 # corresponding Blocker -> Uninstall edges will not be shown.
7451 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7452 if upgrade_node is not None and \
7453 uninstall not in executed_uninstalls:
7454 for blocker in uninstall_parents:
7455 mygraph.add(upgrade_node, blocker)
7457 unsatisfied_blockers = []
7462 if isinstance(x, Blocker) and not x.satisfied:
7463 unsatisfied_blockers.append(x)
7466 if "--tree" in self.myopts:
7467 depth = len(tree_nodes)
7468 while depth and graph_key not in \
7469 mygraph.child_nodes(tree_nodes[depth-1]):
7472 tree_nodes = tree_nodes[:depth]
7473 tree_nodes.append(graph_key)
7474 display_list.append((x, depth, True))
7475 shown_edges.add((graph_key, tree_nodes[depth-1]))
7477 traversed_nodes = set() # prevent endless circles
7478 traversed_nodes.add(graph_key)
7479 def add_parents(current_node, ordered):
7481 # Do not traverse to parents if this node is an
7482 # an argument or a direct member of a set that has
7483 # been specified as an argument (system or world).
7484 if current_node not in self._set_nodes:
7485 parent_nodes = mygraph.parent_nodes(current_node)
7487 child_nodes = set(mygraph.child_nodes(current_node))
7488 selected_parent = None
7489 # First, try to avoid a direct cycle.
7490 for node in parent_nodes:
7491 if not isinstance(node, (Blocker, Package)):
7493 if node not in traversed_nodes and \
7494 node not in child_nodes:
7495 edge = (current_node, node)
7496 if edge in shown_edges:
7498 selected_parent = node
7500 if not selected_parent:
7501 # A direct cycle is unavoidable.
7502 for node in parent_nodes:
7503 if not isinstance(node, (Blocker, Package)):
7505 if node not in traversed_nodes:
7506 edge = (current_node, node)
7507 if edge in shown_edges:
7509 selected_parent = node
7512 shown_edges.add((current_node, selected_parent))
7513 traversed_nodes.add(selected_parent)
7514 add_parents(selected_parent, False)
7515 display_list.append((current_node,
7516 len(tree_nodes), ordered))
7517 tree_nodes.append(current_node)
7519 add_parents(graph_key, True)
7521 display_list.append((x, depth, True))
7522 mylist = display_list
7523 for x in unsatisfied_blockers:
7524 mylist.append((x, 0, True))
7526 last_merge_depth = 0
7527 for i in xrange(len(mylist)-1,-1,-1):
7528 graph_key, depth, ordered = mylist[i]
7529 if not ordered and depth == 0 and i > 0 \
7530 and graph_key == mylist[i-1][0] and \
7531 mylist[i-1][1] == 0:
7532 # An ordered node got a consecutive duplicate when the tree was
7536 if ordered and graph_key[-1] != "nomerge":
7537 last_merge_depth = depth
7539 if depth >= last_merge_depth or \
7540 i < len(mylist) - 1 and \
7541 depth >= mylist[i+1][1]:
7544 from portage import flatten
7545 from portage.dep import use_reduce, paren_reduce
7546 # files to fetch list - avoids counting a same file twice
7547 # in size display (verbose mode)
7550 # Use this set to detect when all the "repoadd" strings are "[0]"
7551 # and disable the entire repo display in this case.
7554 for mylist_index in xrange(len(mylist)):
7555 x, depth, ordered = mylist[mylist_index]
7559 portdb = self.trees[myroot]["porttree"].dbapi
7560 bindb = self.trees[myroot]["bintree"].dbapi
7561 vardb = self.trees[myroot]["vartree"].dbapi
7562 vartree = self.trees[myroot]["vartree"]
7563 pkgsettings = self.pkgsettings[myroot]
7566 indent = " " * depth
7568 if isinstance(x, Blocker):
7570 blocker_style = "PKG_BLOCKER_SATISFIED"
7571 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7573 blocker_style = "PKG_BLOCKER"
7574 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7576 counters.blocks += 1
7578 counters.blocks_satisfied += 1
7579 resolved = portage.key_expand(
7580 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7581 if "--columns" in self.myopts and "--quiet" in self.myopts:
7582 addl += " " + colorize(blocker_style, resolved)
7584 addl = "[%s %s] %s%s" % \
7585 (colorize(blocker_style, "blocks"),
7586 addl, indent, colorize(blocker_style, resolved))
7587 block_parents = self._blocker_parents.parent_nodes(x)
7588 block_parents = set([pnode[2] for pnode in block_parents])
7589 block_parents = ", ".join(block_parents)
7591 addl += colorize(blocker_style,
7592 " (\"%s\" is blocking %s)") % \
7593 (str(x.atom).lstrip("!"), block_parents)
7595 addl += colorize(blocker_style,
7596 " (is blocking %s)") % block_parents
7597 if isinstance(x, Blocker) and x.satisfied:
7602 blockers.append(addl)
7605 pkg_merge = ordered and pkg_status == "merge"
7606 if not pkg_merge and pkg_status == "merge":
7607 pkg_status = "nomerge"
7608 built = pkg_type != "ebuild"
7609 installed = pkg_type == "installed"
7611 metadata = pkg.metadata
7613 repo_name = metadata["repository"]
7614 if pkg_type == "ebuild":
7615 ebuild_path = portdb.findname(pkg_key)
7616 if not ebuild_path: # shouldn't happen
7617 raise portage.exception.PackageNotFound(pkg_key)
7618 repo_path_real = os.path.dirname(os.path.dirname(
7619 os.path.dirname(ebuild_path)))
7621 repo_path_real = portdb.getRepositoryPath(repo_name)
7622 pkg_use = list(pkg.use.enabled)
7624 restrict = flatten(use_reduce(paren_reduce(
7625 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7626 except portage.exception.InvalidDependString, e:
7627 if not pkg.installed:
7628 show_invalid_depstring_notice(x,
7629 pkg.metadata["RESTRICT"], str(e))
7633 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7634 "fetch" in restrict:
7637 counters.restrict_fetch += 1
7638 if portdb.fetch_check(pkg_key, pkg_use):
7641 counters.restrict_fetch_satisfied += 1
7643 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7644 #param is used for -u, where you still *do* want to see when something is being upgraded.
7647 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7648 if vardb.cpv_exists(pkg_key):
7649 addl=" "+yellow("R")+fetch+" "
7652 counters.reinst += 1
7653 elif pkg_status == "uninstall":
7654 counters.uninst += 1
7655 # filter out old-style virtual matches
7656 elif installed_versions and \
7657 portage.cpv_getkey(installed_versions[0]) == \
7658 portage.cpv_getkey(pkg_key):
7659 myinslotlist = vardb.match(pkg.slot_atom)
7660 # If this is the first install of a new-style virtual, we
7661 # need to filter out old-style virtual matches.
7662 if myinslotlist and \
7663 portage.cpv_getkey(myinslotlist[0]) != \
7664 portage.cpv_getkey(pkg_key):
7667 myoldbest = myinslotlist[:]
7669 if not portage.dep.cpvequal(pkg_key,
7670 portage.best([pkg_key] + myoldbest)):
7672 addl += turquoise("U")+blue("D")
7674 counters.downgrades += 1
7677 addl += turquoise("U") + " "
7679 counters.upgrades += 1
7681 # New slot, mark it new.
7682 addl = " " + green("NS") + fetch + " "
7683 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7685 counters.newslot += 1
7687 if "--changelog" in self.myopts:
7688 inst_matches = vardb.match(pkg.slot_atom)
7690 changelogs.extend(self.calc_changelog(
7691 portdb.findname(pkg_key),
7692 inst_matches[0], pkg_key))
7694 addl = " " + green("N") + " " + fetch + " "
7703 forced_flags = set()
7704 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7705 forced_flags.update(pkgsettings.useforce)
7706 forced_flags.update(pkgsettings.usemask)
7708 cur_use = [flag for flag in pkg.use.enabled \
7709 if flag in pkg.iuse.all]
7710 cur_iuse = sorted(pkg.iuse.all)
7712 if myoldbest and myinslotlist:
7713 previous_cpv = myoldbest[0]
7715 previous_cpv = pkg.cpv
7716 if vardb.cpv_exists(previous_cpv):
7717 old_iuse, old_use = vardb.aux_get(
7718 previous_cpv, ["IUSE", "USE"])
7719 old_iuse = list(set(
7720 filter_iuse_defaults(old_iuse.split())))
7722 old_use = old_use.split()
7729 old_use = [flag for flag in old_use if flag in old_iuse]
7731 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7733 use_expand.reverse()
7734 use_expand_hidden = \
7735 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7737 def map_to_use_expand(myvals, forcedFlags=False,
7741 for exp in use_expand:
7744 for val in myvals[:]:
7745 if val.startswith(exp.lower()+"_"):
7746 if val in forced_flags:
7747 forced[exp].add(val[len(exp)+1:])
7748 ret[exp].append(val[len(exp)+1:])
7751 forced["USE"] = [val for val in myvals \
7752 if val in forced_flags]
7754 for exp in use_expand_hidden:
7760 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7761 # are the only thing that triggered reinstallation.
7762 reinst_flags_map = {}
7763 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7764 reinst_expand_map = None
7765 if reinstall_for_flags:
7766 reinst_flags_map = map_to_use_expand(
7767 list(reinstall_for_flags), removeHidden=False)
7768 for k in list(reinst_flags_map):
7769 if not reinst_flags_map[k]:
7770 del reinst_flags_map[k]
7771 if not reinst_flags_map.get("USE"):
7772 reinst_expand_map = reinst_flags_map.copy()
7773 reinst_expand_map.pop("USE", None)
7774 if reinst_expand_map and \
7775 not set(reinst_expand_map).difference(
7777 use_expand_hidden = \
7778 set(use_expand_hidden).difference(
7781 cur_iuse_map, iuse_forced = \
7782 map_to_use_expand(cur_iuse, forcedFlags=True)
7783 cur_use_map = map_to_use_expand(cur_use)
7784 old_iuse_map = map_to_use_expand(old_iuse)
7785 old_use_map = map_to_use_expand(old_use)
7788 use_expand.insert(0, "USE")
7790 for key in use_expand:
7791 if key in use_expand_hidden:
7793 verboseadd += create_use_string(key.upper(),
7794 cur_iuse_map[key], iuse_forced[key],
7795 cur_use_map[key], old_iuse_map[key],
7796 old_use_map[key], is_new,
7797 reinst_flags_map.get(key))
7802 if pkg_type == "ebuild" and pkg_merge:
7804 myfilesdict = portdb.getfetchsizes(pkg_key,
7805 useflags=pkg_use, debug=self.edebug)
7806 except portage.exception.InvalidDependString, e:
7807 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7808 show_invalid_depstring_notice(x, src_uri, str(e))
7811 if myfilesdict is None:
7812 myfilesdict="[empty/missing/bad digest]"
7814 for myfetchfile in myfilesdict:
7815 if myfetchfile not in myfetchlist:
7816 mysize+=myfilesdict[myfetchfile]
7817 myfetchlist.append(myfetchfile)
7819 counters.totalsize += mysize
7820 verboseadd += format_size(mysize)
7823 # assign index for a previous version in the same slot
7824 has_previous = False
7825 repo_name_prev = None
7826 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7828 slot_matches = vardb.match(slot_atom)
7831 repo_name_prev = vardb.aux_get(slot_matches[0],
7834 # now use the data to generate output
7835 if pkg.installed or not has_previous:
7836 repoadd = repo_display.repoStr(repo_path_real)
7838 repo_path_prev = None
7840 repo_path_prev = portdb.getRepositoryPath(
7842 if repo_path_prev == repo_path_real:
7843 repoadd = repo_display.repoStr(repo_path_real)
7845 repoadd = "%s=>%s" % (
7846 repo_display.repoStr(repo_path_prev),
7847 repo_display.repoStr(repo_path_real))
7849 repoadd_set.add(repoadd)
7851 xs = [portage.cpv_getkey(pkg_key)] + \
7852 list(portage.catpkgsplit(pkg_key)[2:])
7859 if "COLUMNWIDTH" in self.settings:
7861 mywidth = int(self.settings["COLUMNWIDTH"])
7862 except ValueError, e:
7863 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7865 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7866 self.settings["COLUMNWIDTH"], noiselevel=-1)
7868 oldlp = mywidth - 30
7871 # Convert myoldbest from a list to a string.
7875 for pos, key in enumerate(myoldbest):
7876 key = portage.catpkgsplit(key)[2] + \
7877 "-" + portage.catpkgsplit(key)[3]
7878 if key[-3:] == "-r0":
7880 myoldbest[pos] = key
7881 myoldbest = blue("["+", ".join(myoldbest)+"]")
7884 root_config = self.roots[myroot]
7885 system_set = root_config.sets["system"]
7886 world_set = root_config.sets["world"]
7891 pkg_system = system_set.findAtomForPackage(pkg)
7892 pkg_world = world_set.findAtomForPackage(pkg)
7893 if not (oneshot or pkg_world) and \
7894 myroot == self.target_root and \
7895 favorites_set.findAtomForPackage(pkg):
7896 # Maybe it will be added to world now.
7897 if create_world_atom(pkg, favorites_set, root_config):
7899 except portage.exception.InvalidDependString:
7900 # This is reported elsewhere if relevant.
7903 def pkgprint(pkg_str):
7906 return colorize("PKG_MERGE_SYSTEM", pkg_str)
7908 return colorize("PKG_MERGE_WORLD", pkg_str)
7910 return colorize("PKG_MERGE", pkg_str)
7911 elif pkg_status == "uninstall":
7912 return colorize("PKG_UNINSTALL", pkg_str)
7915 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7917 return colorize("PKG_NOMERGE_WORLD", pkg_str)
7919 return colorize("PKG_NOMERGE", pkg_str)
7922 properties = flatten(use_reduce(paren_reduce(
7923 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7924 except portage.exception.InvalidDependString, e:
7925 if not pkg.installed:
7926 show_invalid_depstring_notice(pkg,
7927 pkg.metadata["PROPERTIES"], str(e))
7931 interactive = "interactive" in properties
7932 if interactive and pkg.operation == "merge":
7933 addl = colorize("WARN", "I") + addl[1:]
7935 counters.interactive += 1
7940 if "--columns" in self.myopts:
7941 if "--quiet" in self.myopts:
7942 myprint=addl+" "+indent+pkgprint(pkg_cp)
7943 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7944 myprint=myprint+myoldbest
7945 myprint=myprint+darkgreen("to "+x[1])
7949 myprint = "[%s] %s%s" % \
7950 (pkgprint(pkg_status.ljust(13)),
7951 indent, pkgprint(pkg.cp))
7953 myprint = "[%s %s] %s%s" % \
7954 (pkgprint(pkg.type_name), addl,
7955 indent, pkgprint(pkg.cp))
7956 if (newlp-nc_len(myprint)) > 0:
7957 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7958 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7959 if (oldlp-nc_len(myprint)) > 0:
7960 myprint=myprint+" "*(oldlp-nc_len(myprint))
7961 myprint=myprint+myoldbest
7962 myprint += darkgreen("to " + pkg.root)
7965 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7967 myprint = "[" + pkg_type + " " + addl + "] "
7968 myprint += indent + pkgprint(pkg_key) + " " + \
7969 myoldbest + darkgreen("to " + myroot)
7971 if "--columns" in self.myopts:
7972 if "--quiet" in self.myopts:
7973 myprint=addl+" "+indent+pkgprint(pkg_cp)
7974 myprint=myprint+" "+green(xs[1]+xs[2])+" "
7975 myprint=myprint+myoldbest
7979 myprint = "[%s] %s%s" % \
7980 (pkgprint(pkg_status.ljust(13)),
7981 indent, pkgprint(pkg.cp))
7983 myprint = "[%s %s] %s%s" % \
7984 (pkgprint(pkg.type_name), addl,
7985 indent, pkgprint(pkg.cp))
7986 if (newlp-nc_len(myprint)) > 0:
7987 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7988 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7989 if (oldlp-nc_len(myprint)) > 0:
7990 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7991 myprint += myoldbest
7994 myprint = "[%s] %s%s %s" % \
7995 (pkgprint(pkg_status.ljust(13)),
7996 indent, pkgprint(pkg.cpv),
7999 myprint = "[%s %s] %s%s %s" % \
8000 (pkgprint(pkg_type), addl, indent,
8001 pkgprint(pkg.cpv), myoldbest)
8003 if columns and pkg.operation == "uninstall":
8005 p.append((myprint, verboseadd, repoadd))
8007 if "--tree" not in self.myopts and \
8008 "--quiet" not in self.myopts and \
8009 not self._opts_no_restart.intersection(self.myopts) and \
8010 pkg.root == self._running_root.root and \
8011 portage.match_from_list(
8012 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8013 not vardb.cpv_exists(pkg.cpv) and \
8014 "--quiet" not in self.myopts:
8015 if mylist_index < len(mylist) - 1:
8016 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8017 p.append(colorize("WARN", " then resume the merge."))
8020 show_repos = repoadd_set and repoadd_set != set(["0"])
8023 if isinstance(x, basestring):
8024 out.write("%s\n" % (x,))
8027 myprint, verboseadd, repoadd = x
8030 myprint += " " + verboseadd
8032 if show_repos and repoadd:
8033 myprint += " " + teal("[%s]" % repoadd)
8035 out.write("%s\n" % (myprint,))
8044 sys.stdout.write(str(repo_display))
8046 if "--changelog" in self.myopts:
8048 for revision,text in changelogs:
8049 print bold('*'+revision)
8050 sys.stdout.write(text)
8055 def display_problems(self):
8057 Display problems with the dependency graph such as slot collisions.
8058 This is called internally by display() to show the problems _after_
8059 the merge list where it is most likely to be seen, but if display()
8060 is not going to be called then this method should be called explicitly
8061 to ensure that the user is notified of problems with the graph.
8063 All output goes to stderr, except for unsatisfied dependencies which
8064 go to stdout for parsing by programs such as autounmask.
8067 # Note that show_masked_packages() sends it's output to
8068 # stdout, and some programs such as autounmask parse the
8069 # output in cases when emerge bails out. However, when
8070 # show_masked_packages() is called for installed packages
8071 # here, the message is a warning that is more appropriate
8072 # to send to stderr, so temporarily redirect stdout to
8073 # stderr. TODO: Fix output code so there's a cleaner way
8074 # to redirect everything to stderr.
8079 sys.stdout = sys.stderr
8080 self._display_problems()
8086 # This goes to stdout for parsing by programs like autounmask.
8087 for pargs, kwargs in self._unsatisfied_deps_for_display:
8088 self._show_unsatisfied_dep(*pargs, **kwargs)
8090 def _display_problems(self):
8091 if self._circular_deps_for_display is not None:
8092 self._show_circular_deps(
8093 self._circular_deps_for_display)
8095 # The user is only notified of a slot conflict if
8096 # there are no unresolvable blocker conflicts.
8097 if self._unsatisfied_blockers_for_display is not None:
8098 self._show_unsatisfied_blockers(
8099 self._unsatisfied_blockers_for_display)
8101 self._show_slot_collision_notice()
8103 # TODO: Add generic support for "set problem" handlers so that
8104 # the below warnings aren't special cases for world only.
8106 if self._missing_args:
8107 world_problems = False
8108 if "world" in self._sets:
8109 # Filter out indirect members of world (from nested sets)
8110 # since only direct members of world are desired here.
8111 world_set = self.roots[self.target_root].sets["world"]
8112 for arg, atom in self._missing_args:
8113 if arg.name == "world" and atom in world_set:
8114 world_problems = True
8118 sys.stderr.write("\n!!! Problems have been " + \
8119 "detected with your world file\n")
8120 sys.stderr.write("!!! Please run " + \
8121 green("emaint --check world")+"\n\n")
8123 if self._missing_args:
8124 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8125 " Ebuilds for the following packages are either all\n")
8126 sys.stderr.write(colorize("BAD", "!!!") + \
8127 " masked or don't exist:\n")
8128 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8129 self._missing_args) + "\n")
8131 if self._pprovided_args:
8133 for arg, atom in self._pprovided_args:
8134 if isinstance(arg, SetArg):
8136 arg_atom = (atom, atom)
8139 arg_atom = (arg.arg, atom)
8140 refs = arg_refs.setdefault(arg_atom, [])
8141 if parent not in refs:
8144 msg.append(bad("\nWARNING: "))
8145 if len(self._pprovided_args) > 1:
8146 msg.append("Requested packages will not be " + \
8147 "merged because they are listed in\n")
8149 msg.append("A requested package will not be " + \
8150 "merged because it is listed in\n")
8151 msg.append("package.provided:\n\n")
8152 problems_sets = set()
8153 for (arg, atom), refs in arg_refs.iteritems():
8156 problems_sets.update(refs)
8158 ref_string = ", ".join(["'%s'" % name for name in refs])
8159 ref_string = " pulled in by " + ref_string
8160 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8162 if "world" in problems_sets:
8163 msg.append("This problem can be solved in one of the following ways:\n\n")
8164 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8165 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8166 msg.append(" C) Remove offending entries from package.provided.\n\n")
8167 msg.append("The best course of action depends on the reason that an offending\n")
8168 msg.append("package.provided entry exists.\n\n")
8169 sys.stderr.write("".join(msg))
8171 masked_packages = []
8172 for pkg in self._masked_installed:
8173 root_config = pkg.root_config
8174 pkgsettings = self.pkgsettings[pkg.root]
8175 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8176 masked_packages.append((root_config, pkgsettings,
8177 pkg.cpv, pkg.metadata, mreasons))
8179 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8180 " The following installed packages are masked:\n")
8181 show_masked_packages(masked_packages)
8185 def calc_changelog(self,ebuildpath,current,next):
8186 if ebuildpath == None or not os.path.exists(ebuildpath):
8188 current = '-'.join(portage.catpkgsplit(current)[1:])
8189 if current.endswith('-r0'):
8190 current = current[:-3]
8191 next = '-'.join(portage.catpkgsplit(next)[1:])
8192 if next.endswith('-r0'):
8194 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8196 changelog = open(changelogpath).read()
8197 except SystemExit, e:
8198 raise # Needed else can't exit
8201 divisions = self.find_changelog_tags(changelog)
8202 #print 'XX from',current,'to',next
8203 #for div,text in divisions: print 'XX',div
8204 # skip entries for all revisions above the one we are about to emerge
8205 for i in range(len(divisions)):
8206 if divisions[i][0]==next:
8207 divisions = divisions[i:]
8209 # find out how many entries we are going to display
8210 for i in range(len(divisions)):
8211 if divisions[i][0]==current:
8212 divisions = divisions[:i]
8215 # couldnt find the current revision in the list. display nothing
8219 def find_changelog_tags(self,changelog):
8223 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8225 if release is not None:
8226 divs.append((release,changelog))
8228 if release is not None:
8229 divs.append((release,changelog[:match.start()]))
8230 changelog = changelog[match.end():]
8231 release = match.group(1)
8232 if release.endswith('.ebuild'):
8233 release = release[:-7]
8234 if release.endswith('-r0'):
8235 release = release[:-3]
8237 def saveNomergeFavorites(self):
8238 """Find atoms in favorites that are not in the mergelist and add them
8239 to the world file if necessary."""
8240 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8241 "--oneshot", "--onlydeps", "--pretend"):
8242 if x in self.myopts:
8244 root_config = self.roots[self.target_root]
8245 world_set = root_config.sets["world"]
8247 world_locked = False
8248 if hasattr(world_set, "lock"):
8252 if hasattr(world_set, "load"):
8253 world_set.load() # maybe it's changed on disk
8255 args_set = self._sets["args"]
8256 portdb = self.trees[self.target_root]["porttree"].dbapi
8257 added_favorites = set()
8258 for x in self._set_nodes:
8259 pkg_type, root, pkg_key, pkg_status = x
8260 if pkg_status != "nomerge":
8264 myfavkey = create_world_atom(x, args_set, root_config)
8266 if myfavkey in added_favorites:
8268 added_favorites.add(myfavkey)
8269 except portage.exception.InvalidDependString, e:
8270 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8271 (pkg_key, str(e)), noiselevel=-1)
8272 writemsg("!!! see '%s'\n\n" % os.path.join(
8273 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8276 for k in self._sets:
8277 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8282 all_added.append(SETPREFIX + k)
8283 all_added.extend(added_favorites)
8286 print ">>> Recording %s in \"world\" favorites file..." % \
8287 colorize("INFORM", str(a))
8289 world_set.update(all_added)
8294 def loadResumeCommand(self, resume_data, skip_masked=False):
8296 Add a resume command to the graph and validate it in the process. This
8297 will raise a PackageNotFound exception if a package is not available.
8300 if not isinstance(resume_data, dict):
8303 mergelist = resume_data.get("mergelist")
8304 if not isinstance(mergelist, list):
8307 fakedb = self.mydbapi
8309 serialized_tasks = []
8312 if not (isinstance(x, list) and len(x) == 4):
8314 pkg_type, myroot, pkg_key, action = x
8315 if pkg_type not in self.pkg_tree_map:
8317 if action != "merge":
8319 tree_type = self.pkg_tree_map[pkg_type]
8320 mydb = trees[myroot][tree_type].dbapi
8321 db_keys = list(self._trees_orig[myroot][
8322 tree_type].dbapi._aux_cache_keys)
8324 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8326 # It does no exist or it is corrupt.
8327 if action == "uninstall":
8329 raise portage.exception.PackageNotFound(pkg_key)
8330 installed = action == "uninstall"
8331 built = pkg_type != "ebuild"
8332 root_config = self.roots[myroot]
8333 pkg = Package(built=built, cpv=pkg_key,
8334 installed=installed, metadata=metadata,
8335 operation=action, root_config=root_config,
8337 if pkg_type == "ebuild":
8338 pkgsettings = self.pkgsettings[myroot]
8339 pkgsettings.setcpv(pkg)
8340 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8341 self._pkg_cache[pkg] = pkg
8343 root_config = self.roots[pkg.root]
8344 if "merge" == pkg.operation and \
8345 not visible(root_config.settings, pkg):
8347 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8349 self._unsatisfied_deps_for_display.append(
8350 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8352 fakedb[myroot].cpv_inject(pkg)
8353 serialized_tasks.append(pkg)
8354 self.spinner.update()
8356 if self._unsatisfied_deps_for_display:
8359 if not serialized_tasks or "--nodeps" in self.myopts:
8360 self._serialized_tasks_cache = serialized_tasks
8361 self._scheduler_graph = self.digraph
8363 self._select_package = self._select_pkg_from_graph
8364 self.myparams.add("selective")
8366 favorites = resume_data.get("favorites")
8367 args_set = self._sets["args"]
8368 if isinstance(favorites, list):
8369 args = self._load_favorites(favorites)
8373 for task in serialized_tasks:
8374 if isinstance(task, Package) and \
8375 task.operation == "merge":
8376 if not self._add_pkg(task, None):
8379 # Packages for argument atoms need to be explicitly
8380 # added via _add_pkg() so that they are included in the
8381 # digraph (needed at least for --tree display).
8383 for atom in arg.set:
8384 pkg, existing_node = self._select_package(
8385 arg.root_config.root, atom)
8386 if existing_node is None and \
8388 if not self._add_pkg(pkg, Dependency(atom=atom,
8389 root=pkg.root, parent=arg)):
8392 # Allow unsatisfied deps here to avoid showing a masking
8393 # message for an unsatisfied dep that isn't necessarily
8395 if not self._create_graph(allow_unsatisfied=True):
8397 if masked_tasks or self._unsatisfied_deps:
8398 # This probably means that a required package
8399 # was dropped via --skipfirst. It makes the
8400 # resume list invalid, so convert it to a
8401 # UnsatisfiedResumeDep exception.
8402 raise self.UnsatisfiedResumeDep(self,
8403 masked_tasks + self._unsatisfied_deps)
8404 self._serialized_tasks_cache = None
8407 except self._unknown_internal_error:
8412 def _load_favorites(self, favorites):
8414 Use a list of favorites to resume state from a
8415 previous select_files() call. This creates similar
8416 DependencyArg instances to those that would have
8417 been created by the original select_files() call.
8418 This allows Package instances to be matched with
8419 DependencyArg instances during graph creation.
8421 root_config = self.roots[self.target_root]
8422 getSetAtoms = root_config.setconfig.getSetAtoms
8423 sets = root_config.sets
8426 if not isinstance(x, basestring):
8428 if x in ("system", "world"):
8430 if x.startswith(SETPREFIX):
8431 s = x[len(SETPREFIX):]
8436 # Recursively expand sets so that containment tests in
8437 # self._get_parent_sets() properly match atoms in nested
8438 # sets (like if world contains system).
8439 expanded_set = InternalPackageSet(
8440 initial_atoms=getSetAtoms(s))
8441 self._sets[s] = expanded_set
8442 args.append(SetArg(arg=x, set=expanded_set,
8443 root_config=root_config))
8445 if not portage.isvalidatom(x):
8447 args.append(AtomArg(arg=x, atom=x,
8448 root_config=root_config))
8450 # Create the "args" package set from atoms and
8451 # packages given as arguments.
8452 args_set = self._sets["args"]
8454 if not isinstance(arg, (AtomArg, PackageArg)):
8457 if myatom in args_set:
8459 args_set.add(myatom)
8460 self._set_atoms.update(chain(*self._sets.itervalues()))
8461 atom_arg_map = self._atom_arg_map
8463 for atom in arg.set:
8464 atom_key = (atom, arg.root_config.root)
8465 refs = atom_arg_map.get(atom_key)
8468 atom_arg_map[atom_key] = refs
8473 class UnsatisfiedResumeDep(portage.exception.PortageException):
8475 A dependency of a resume list is not installed. This
8476 can occur when a required package is dropped from the
8477 merge list via --skipfirst.
8479 def __init__(self, depgraph, value):
8480 portage.exception.PortageException.__init__(self, value)
8481 self.depgraph = depgraph
8483 class _internal_exception(portage.exception.PortageException):
8484 def __init__(self, value=""):
8485 portage.exception.PortageException.__init__(self, value)
8487 class _unknown_internal_error(_internal_exception):
8489 Used by the depgraph internally to terminate graph creation.
8490 The specific reason for the failure should have been dumped
8491 to stderr, unfortunately, the exact reason for the failure
8495 class _serialize_tasks_retry(_internal_exception):
8497 This is raised by the _serialize_tasks() method when it needs to
8498 be called again for some reason. The only case that it's currently
8499 used for is when neglected dependencies need to be added to the
8500 graph in order to avoid making a potentially unsafe decision.
8503 class _dep_check_composite_db(portage.dbapi):
8505 A dbapi-like interface that is optimized for use in dep_check() calls.
8506 This is built on top of the existing depgraph package selection logic.
8507 Some packages that have been added to the graph may be masked from this
8508 view in order to influence the atom preference selection that occurs
8511 def __init__(self, depgraph, root):
8512 portage.dbapi.__init__(self)
8513 self._depgraph = depgraph
8515 self._match_cache = {}
8516 self._cpv_pkg_map = {}
8518 def match(self, atom):
8519 ret = self._match_cache.get(atom)
8524 atom = self._dep_expand(atom)
8525 pkg, existing = self._depgraph._select_package(self._root, atom)
8529 # Return the highest available from select_package() as well as
8530 # any matching slots in the graph db.
8532 slots.add(pkg.metadata["SLOT"])
8533 atom_cp = portage.dep_getkey(atom)
8534 if pkg.cp.startswith("virtual/"):
8535 # For new-style virtual lookahead that occurs inside
8536 # dep_check(), examine all slots. This is needed
8537 # so that newer slots will not unnecessarily be pulled in
8538 # when a satisfying lower slot is already installed. For
8539 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8540 # there's no need to pull in a newer slot to satisfy a
8541 # virtual/jdk dependency.
8542 for db, pkg_type, built, installed, db_keys in \
8543 self._depgraph._filtered_trees[self._root]["dbs"]:
8544 for cpv in db.match(atom):
8545 if portage.cpv_getkey(cpv) != pkg.cp:
8547 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8549 if self._visible(pkg):
8550 self._cpv_pkg_map[pkg.cpv] = pkg
8552 slots.remove(pkg.metadata["SLOT"])
8554 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8555 pkg, existing = self._depgraph._select_package(
8556 self._root, slot_atom)
8559 if not self._visible(pkg):
8561 self._cpv_pkg_map[pkg.cpv] = pkg
8564 self._cpv_sort_ascending(ret)
8565 self._match_cache[orig_atom] = ret
8568 def _visible(self, pkg):
8569 if pkg.installed and "selective" not in self._depgraph.myparams:
8571 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8572 except (StopIteration, portage.exception.InvalidDependString):
8579 self._depgraph.pkgsettings[pkg.root], pkg):
8581 except portage.exception.InvalidDependString:
8585 def _dep_expand(self, atom):
8587 This is only needed for old installed packages that may
8588 contain atoms that are not fully qualified with a specific
8589 category. Emulate the cpv_expand() function that's used by
8590 dbapi.match() in cases like this. If there are multiple
8591 matches, it's often due to a new-style virtual that has
8592 been added, so try to filter those out to avoid raising
8595 root_config = self._depgraph.roots[self._root]
8597 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8598 if len(expanded_atoms) > 1:
8599 non_virtual_atoms = []
8600 for x in expanded_atoms:
8601 if not portage.dep_getkey(x).startswith("virtual/"):
8602 non_virtual_atoms.append(x)
8603 if len(non_virtual_atoms) == 1:
8604 expanded_atoms = non_virtual_atoms
8605 if len(expanded_atoms) > 1:
8606 # compatible with portage.cpv_expand()
8607 raise portage.exception.AmbiguousPackageName(
8608 [portage.dep_getkey(x) for x in expanded_atoms])
8610 atom = expanded_atoms[0]
8612 null_atom = insert_category_into_atom(atom, "null")
8613 null_cp = portage.dep_getkey(null_atom)
8614 cat, atom_pn = portage.catsplit(null_cp)
8615 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8617 # Allow the resolver to choose which virtual.
8618 atom = insert_category_into_atom(atom, "virtual")
8620 atom = insert_category_into_atom(atom, "null")
8623 def aux_get(self, cpv, wants):
8624 metadata = self._cpv_pkg_map[cpv].metadata
8625 return [metadata.get(x, "") for x in wants]
8627 class RepoDisplay(object):
8628 def __init__(self, roots):
8629 self._shown_repos = {}
8630 self._unknown_repo = False
8632 for root_config in roots.itervalues():
8633 portdir = root_config.settings.get("PORTDIR")
8635 repo_paths.add(portdir)
8636 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8638 repo_paths.update(overlays.split())
8639 repo_paths = list(repo_paths)
8640 self._repo_paths = repo_paths
8641 self._repo_paths_real = [ os.path.realpath(repo_path) \
8642 for repo_path in repo_paths ]
8644 # pre-allocate index for PORTDIR so that it always has index 0.
8645 for root_config in roots.itervalues():
8646 portdb = root_config.trees["porttree"].dbapi
8647 portdir = portdb.porttree_root
8649 self.repoStr(portdir)
8651 def repoStr(self, repo_path_real):
8654 real_index = self._repo_paths_real.index(repo_path_real)
8655 if real_index == -1:
8657 self._unknown_repo = True
8659 shown_repos = self._shown_repos
8660 repo_paths = self._repo_paths
8661 repo_path = repo_paths[real_index]
8662 index = shown_repos.get(repo_path)
8664 index = len(shown_repos)
8665 shown_repos[repo_path] = index
8671 shown_repos = self._shown_repos
8672 unknown_repo = self._unknown_repo
8673 if shown_repos or self._unknown_repo:
8674 output.append("Portage tree and overlays:\n")
8675 show_repo_paths = list(shown_repos)
8676 for repo_path, repo_index in shown_repos.iteritems():
8677 show_repo_paths[repo_index] = repo_path
8679 for index, repo_path in enumerate(show_repo_paths):
8680 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8682 output.append(" "+teal("[?]") + \
8683 " indicates that the source repository could not be determined\n")
8684 return "".join(output)
8686 class PackageCounters(object):
8696 self.blocks_satisfied = 0
8698 self.restrict_fetch = 0
8699 self.restrict_fetch_satisfied = 0
8700 self.interactive = 0
8703 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8706 myoutput.append("Total: %s package" % total_installs)
8707 if total_installs != 1:
8708 myoutput.append("s")
8709 if total_installs != 0:
8710 myoutput.append(" (")
8711 if self.upgrades > 0:
8712 details.append("%s upgrade" % self.upgrades)
8713 if self.upgrades > 1:
8715 if self.downgrades > 0:
8716 details.append("%s downgrade" % self.downgrades)
8717 if self.downgrades > 1:
8720 details.append("%s new" % self.new)
8721 if self.newslot > 0:
8722 details.append("%s in new slot" % self.newslot)
8723 if self.newslot > 1:
8726 details.append("%s reinstall" % self.reinst)
8730 details.append("%s uninstall" % self.uninst)
8733 if self.interactive > 0:
8734 details.append("%s %s" % (self.interactive,
8735 colorize("WARN", "interactive")))
8736 myoutput.append(", ".join(details))
8737 if total_installs != 0:
8738 myoutput.append(")")
8739 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8740 if self.restrict_fetch:
8741 myoutput.append("\nFetch Restriction: %s package" % \
8742 self.restrict_fetch)
8743 if self.restrict_fetch > 1:
8744 myoutput.append("s")
8745 if self.restrict_fetch_satisfied < self.restrict_fetch:
8746 myoutput.append(bad(" (%s unsatisfied)") % \
8747 (self.restrict_fetch - self.restrict_fetch_satisfied))
8749 myoutput.append("\nConflict: %s block" % \
8752 myoutput.append("s")
8753 if self.blocks_satisfied < self.blocks:
8754 myoutput.append(bad(" (%s unsatisfied)") % \
8755 (self.blocks - self.blocks_satisfied))
8756 return "".join(myoutput)
8758 class PollSelectAdapter(PollConstants):
8761 Use select to emulate a poll object, for
8762 systems that don't support poll().
8766 self._registered = {}
8767 self._select_args = [[], [], []]
8769 def register(self, fd, *args):
8771 Only POLLIN is currently supported!
8775 "register expected at most 2 arguments, got " + \
8776 repr(1 + len(args)))
8778 eventmask = PollConstants.POLLIN | \
8779 PollConstants.POLLPRI | PollConstants.POLLOUT
8783 self._registered[fd] = eventmask
8784 self._select_args = None
8786 def unregister(self, fd):
8787 self._select_args = None
8788 del self._registered[fd]
8790 def poll(self, *args):
8793 "poll expected at most 2 arguments, got " + \
8794 repr(1 + len(args)))
8800 select_args = self._select_args
8801 if select_args is None:
8802 select_args = [self._registered.keys(), [], []]
8804 if timeout is not None:
8805 select_args = select_args[:]
8806 # Translate poll() timeout args to select() timeout args:
8808 # | units | value(s) for indefinite block
8809 # ---------|--------------|------------------------------
8810 # poll | milliseconds | omitted, negative, or None
8811 # ---------|--------------|------------------------------
8812 # select | seconds | omitted
8813 # ---------|--------------|------------------------------
8815 if timeout is not None and timeout < 0:
8817 if timeout is not None:
8818 select_args.append(timeout / 1000)
8820 select_events = select.select(*select_args)
8822 for fd in select_events[0]:
8823 poll_events.append((fd, PollConstants.POLLIN))
8826 class SequentialTaskQueue(SlotObject):
8828 __slots__ = ("max_jobs", "running_tasks") + \
8829 ("_dirty", "_scheduling", "_task_queue")
8831 def __init__(self, **kwargs):
8832 SlotObject.__init__(self, **kwargs)
8833 self._task_queue = deque()
8834 self.running_tasks = set()
8835 if self.max_jobs is None:
8839 def add(self, task):
8840 self._task_queue.append(task)
8843 def addFront(self, task):
8844 self._task_queue.appendleft(task)
8855 if self._scheduling:
8856 # Ignore any recursive schedule() calls triggered via
8857 # self._task_exit().
8860 self._scheduling = True
8862 task_queue = self._task_queue
8863 running_tasks = self.running_tasks
8864 max_jobs = self.max_jobs
8865 state_changed = False
8867 while task_queue and \
8868 (max_jobs is True or len(running_tasks) < max_jobs):
8869 task = task_queue.popleft()
8870 cancelled = getattr(task, "cancelled", None)
8872 running_tasks.add(task)
8873 task.addExitListener(self._task_exit)
8875 state_changed = True
8878 self._scheduling = False
8880 return state_changed
8882 def _task_exit(self, task):
8884 Since we can always rely on exit listeners being called, the set of
8885 running tasks is always pruned automatically and there is never any need
8886 to actively prune it.
8888 self.running_tasks.remove(task)
8889 if self._task_queue:
8893 self._task_queue.clear()
8894 running_tasks = self.running_tasks
8895 while running_tasks:
8896 task = running_tasks.pop()
8897 task.removeExitListener(self._task_exit)
8901 def __nonzero__(self):
8902 return bool(self._task_queue or self.running_tasks)
8905 return len(self._task_queue) + len(self.running_tasks)
8907 _can_poll_device = None
8909 def can_poll_device():
8911 Test if it's possible to use poll() on a device such as a pty. This
8912 is known to fail on Darwin.
8914 @returns: True if poll() on a device succeeds, False otherwise.
8917 global _can_poll_device
8918 if _can_poll_device is not None:
8919 return _can_poll_device
8921 if not hasattr(select, "poll"):
8922 _can_poll_device = False
8923 return _can_poll_device
8926 dev_null = open('/dev/null', 'rb')
8928 _can_poll_device = False
8929 return _can_poll_device
8932 p.register(dev_null.fileno(), PollConstants.POLLIN)
8934 invalid_request = False
8935 for f, event in p.poll():
8936 if event & PollConstants.POLLNVAL:
8937 invalid_request = True
8941 _can_poll_device = not invalid_request
8942 return _can_poll_device
8944 def create_poll_instance():
8946 Create an instance of select.poll, or an instance of
8947 PollSelectAdapter there is no poll() implementation or
8948 it is broken somehow.
8950 if can_poll_device():
8951 return select.poll()
8952 return PollSelectAdapter()
8954 class PollScheduler(object):
8956 class _sched_iface_class(SlotObject):
8957 __slots__ = ("register", "schedule", "unregister")
8961 self._max_load = None
8963 self._poll_event_queue = []
8964 self._poll_event_handlers = {}
8965 self._poll_event_handler_ids = {}
8966 # Increment id for each new handler.
8967 self._event_handler_id = 0
8968 self._poll_obj = create_poll_instance()
8969 self._scheduling = False
8971 def _schedule(self):
8973 Calls _schedule_tasks() and automatically returns early from
8974 any recursive calls to this method that the _schedule_tasks()
8975 call might trigger. This makes _schedule() safe to call from
8976 inside exit listeners.
8978 if self._scheduling:
8980 self._scheduling = True
8982 return self._schedule_tasks()
8984 self._scheduling = False
8986 def _running_job_count(self):
8989 def _can_add_job(self):
8990 max_jobs = self._max_jobs
8991 max_load = self._max_load
8993 if self._max_jobs is not True and \
8994 self._running_job_count() >= self._max_jobs:
8997 if max_load is not None and \
8998 (max_jobs is True or max_jobs > 1) and \
8999 self._running_job_count() >= 1:
9001 avg1, avg5, avg15 = os.getloadavg()
9002 except (AttributeError, OSError), e:
9003 writemsg("!!! getloadavg() failed: %s\n" % (e,),
9008 if avg1 >= max_load:
9013 def _poll(self, timeout=None):
9015 All poll() calls pass through here. The poll events
9016 are added directly to self._poll_event_queue.
9017 In order to avoid endless blocking, this raises
9018 StopIteration if timeout is None and there are
9019 no file descriptors to poll.
9021 if not self._poll_event_handlers:
9023 if timeout is None and \
9024 not self._poll_event_handlers:
9025 raise StopIteration(
9026 "timeout is None and there are no poll() event handlers")
9028 # The following error is known to occur with Linux kernel versions
9031 # select.error: (4, 'Interrupted system call')
9033 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9034 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9035 # without any events.
9038 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9040 except select.error, e:
9041 writemsg_level("\n!!! select error: %s\n" % (e,),
9042 level=logging.ERROR, noiselevel=-1)
9044 if timeout is not None:
9047 def _next_poll_event(self, timeout=None):
9049 Since the _schedule_wait() loop is called by event
9050 handlers from _poll_loop(), maintain a central event
9051 queue for both of them to share events from a single
9052 poll() call. In order to avoid endless blocking, this
9053 raises StopIteration if timeout is None and there are
9054 no file descriptors to poll.
9056 if not self._poll_event_queue:
9058 return self._poll_event_queue.pop()
9060 def _poll_loop(self):
9062 event_handlers = self._poll_event_handlers
9063 event_handled = False
9066 while event_handlers:
9067 f, event = self._next_poll_event()
9068 handler, reg_id = event_handlers[f]
9070 event_handled = True
9071 except StopIteration:
9072 event_handled = True
9074 if not event_handled:
9075 raise AssertionError("tight loop")
9077 def _schedule_yield(self):
9079 Schedule for a short period of time chosen by the scheduler based
9080 on internal state. Synchronous tasks should call this periodically
9081 in order to allow the scheduler to service pending poll events. The
9082 scheduler will call poll() exactly once, without blocking, and any
9083 resulting poll events will be serviced.
9085 event_handlers = self._poll_event_handlers
9088 if not event_handlers:
9089 return bool(events_handled)
9091 if not self._poll_event_queue:
9095 while event_handlers and self._poll_event_queue:
9096 f, event = self._next_poll_event()
9097 handler, reg_id = event_handlers[f]
9100 except StopIteration:
9103 return bool(events_handled)
9105 def _register(self, f, eventmask, handler):
9108 @return: A unique registration id, for use in schedule() or
9111 if f in self._poll_event_handlers:
9112 raise AssertionError("fd %d is already registered" % f)
9113 self._event_handler_id += 1
9114 reg_id = self._event_handler_id
9115 self._poll_event_handler_ids[reg_id] = f
9116 self._poll_event_handlers[f] = (handler, reg_id)
9117 self._poll_obj.register(f, eventmask)
9120 def _unregister(self, reg_id):
9121 f = self._poll_event_handler_ids[reg_id]
9122 self._poll_obj.unregister(f)
9123 del self._poll_event_handlers[f]
9124 del self._poll_event_handler_ids[reg_id]
9126 def _schedule_wait(self, wait_ids):
9128 Schedule until wait_id is not longer registered
9131 @param wait_id: a task id to wait for
9133 event_handlers = self._poll_event_handlers
9134 handler_ids = self._poll_event_handler_ids
9135 event_handled = False
9137 if isinstance(wait_ids, int):
9138 wait_ids = frozenset([wait_ids])
9141 while wait_ids.intersection(handler_ids):
9142 f, event = self._next_poll_event()
9143 handler, reg_id = event_handlers[f]
9145 event_handled = True
9146 except StopIteration:
9147 event_handled = True
9149 return event_handled
9151 class QueueScheduler(PollScheduler):
9154 Add instances of SequentialTaskQueue and then call run(). The
9155 run() method returns when no tasks remain.
9158 def __init__(self, max_jobs=None, max_load=None):
9159 PollScheduler.__init__(self)
9161 if max_jobs is None:
9164 self._max_jobs = max_jobs
9165 self._max_load = max_load
9166 self.sched_iface = self._sched_iface_class(
9167 register=self._register,
9168 schedule=self._schedule_wait,
9169 unregister=self._unregister)
9172 self._schedule_listeners = []
9175 self._queues.append(q)
9177 def remove(self, q):
9178 self._queues.remove(q)
9182 while self._schedule():
9185 while self._running_job_count():
9188 def _schedule_tasks(self):
9191 @returns: True if there may be remaining tasks to schedule,
9194 while self._can_add_job():
9195 n = self._max_jobs - self._running_job_count()
9199 if not self._start_next_job(n):
9202 for q in self._queues:
9207 def _running_job_count(self):
9209 for q in self._queues:
9210 job_count += len(q.running_tasks)
9211 self._jobs = job_count
9214 def _start_next_job(self, n=1):
9216 for q in self._queues:
9217 initial_job_count = len(q.running_tasks)
9219 final_job_count = len(q.running_tasks)
9220 if final_job_count > initial_job_count:
9221 started_count += (final_job_count - initial_job_count)
9222 if started_count >= n:
9224 return started_count
9226 class TaskScheduler(object):
9229 A simple way to handle scheduling of AsynchrousTask instances. Simply
9230 add tasks and call run(). The run() method returns when no tasks remain.
9233 def __init__(self, max_jobs=None, max_load=None):
9234 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9235 self._scheduler = QueueScheduler(
9236 max_jobs=max_jobs, max_load=max_load)
9237 self.sched_iface = self._scheduler.sched_iface
9238 self.run = self._scheduler.run
9239 self._scheduler.add(self._queue)
9241 def add(self, task):
9242 self._queue.add(task)
9244 class JobStatusDisplay(object):
9246 _bound_properties = ("curval", "failed", "running")
9247 _jobs_column_width = 48
9249 # Don't update the display unless at least this much
9250 # time has passed, in units of seconds.
9251 _min_display_latency = 2
9253 _default_term_codes = {
9259 _termcap_name_map = {
9260 'carriage_return' : 'cr',
9265 def __init__(self, out=sys.stdout, quiet=False):
9266 object.__setattr__(self, "out", out)
9267 object.__setattr__(self, "quiet", quiet)
9268 object.__setattr__(self, "maxval", 0)
9269 object.__setattr__(self, "merges", 0)
9270 object.__setattr__(self, "_changed", False)
9271 object.__setattr__(self, "_displayed", False)
9272 object.__setattr__(self, "_last_display_time", 0)
9273 object.__setattr__(self, "width", 80)
9276 isatty = hasattr(out, "isatty") and out.isatty()
9277 object.__setattr__(self, "_isatty", isatty)
9278 if not isatty or not self._init_term():
9280 for k, capname in self._termcap_name_map.iteritems():
9281 term_codes[k] = self._default_term_codes[capname]
9282 object.__setattr__(self, "_term_codes", term_codes)
9284 def _init_term(self):
9286 Initialize term control codes.
9288 @returns: True if term codes were successfully initialized,
9292 term_type = os.environ.get("TERM", "vt100")
9298 curses.setupterm(term_type, self.out.fileno())
9299 tigetstr = curses.tigetstr
9300 except curses.error:
9305 if tigetstr is None:
9309 for k, capname in self._termcap_name_map.iteritems():
9310 code = tigetstr(capname)
9312 code = self._default_term_codes[capname]
9313 term_codes[k] = code
9314 object.__setattr__(self, "_term_codes", term_codes)
9317 def _format_msg(self, msg):
9318 return ">>> %s" % msg
9322 self._term_codes['carriage_return'] + \
9323 self._term_codes['clr_eol'])
9325 self._displayed = False
9327 def _display(self, line):
9328 self.out.write(line)
9330 self._displayed = True
9332 def _update(self, msg):
9335 if not self._isatty:
9336 out.write(self._format_msg(msg) + self._term_codes['newline'])
9338 self._displayed = True
9344 self._display(self._format_msg(msg))
9346 def displayMessage(self, msg):
9348 was_displayed = self._displayed
9350 if self._isatty and self._displayed:
9353 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9355 self._displayed = False
9358 self._changed = True
9364 for name in self._bound_properties:
9365 object.__setattr__(self, name, 0)
9368 self.out.write(self._term_codes['newline'])
9370 self._displayed = False
9372 def __setattr__(self, name, value):
9373 old_value = getattr(self, name)
9374 if value == old_value:
9376 object.__setattr__(self, name, value)
9377 if name in self._bound_properties:
9378 self._property_change(name, old_value, value)
9380 def _property_change(self, name, old_value, new_value):
9381 self._changed = True
9384 def _load_avg_str(self):
9386 avg = os.getloadavg()
9387 except (AttributeError, OSError), e:
9399 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9403 Display status on stdout, but only if something has
9404 changed since the last call.
9410 current_time = time.time()
9411 time_delta = current_time - self._last_display_time
9412 if self._displayed and \
9414 if not self._isatty:
9416 if time_delta < self._min_display_latency:
9419 self._last_display_time = current_time
9420 self._changed = False
9421 self._display_status()
9423 def _display_status(self):
9424 # Don't use len(self._completed_tasks) here since that also
9425 # can include uninstall tasks.
9426 curval_str = str(self.curval)
9427 maxval_str = str(self.maxval)
9428 running_str = str(self.running)
9429 failed_str = str(self.failed)
9430 load_avg_str = self._load_avg_str()
9432 color_output = StringIO.StringIO()
9433 plain_output = StringIO.StringIO()
9434 style_file = portage.output.ConsoleStyleFile(color_output)
9435 style_file.write_listener = plain_output
9436 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9437 style_writer.style_listener = style_file.new_styles
9438 f = formatter.AbstractFormatter(style_writer)
9440 number_style = "INFORM"
9441 f.add_literal_data("Jobs: ")
9442 f.push_style(number_style)
9443 f.add_literal_data(curval_str)
9445 f.add_literal_data(" of ")
9446 f.push_style(number_style)
9447 f.add_literal_data(maxval_str)
9449 f.add_literal_data(" complete")
9452 f.add_literal_data(", ")
9453 f.push_style(number_style)
9454 f.add_literal_data(running_str)
9456 f.add_literal_data(" running")
9459 f.add_literal_data(", ")
9460 f.push_style(number_style)
9461 f.add_literal_data(failed_str)
9463 f.add_literal_data(" failed")
9465 padding = self._jobs_column_width - len(plain_output.getvalue())
9467 f.add_literal_data(padding * " ")
9469 f.add_literal_data("Load avg: ")
9470 f.add_literal_data(load_avg_str)
9472 # Truncate to fit width, to avoid making the terminal scroll if the
9473 # line overflows (happens when the load average is large).
9474 plain_output = plain_output.getvalue()
9475 if self._isatty and len(plain_output) > self.width:
9476 # Use plain_output here since it's easier to truncate
9477 # properly than the color output which contains console
9479 self._update(plain_output[:self.width])
9481 self._update(color_output.getvalue())
9483 xtermTitle(" ".join(plain_output.split()))
9485 class Scheduler(PollScheduler):
9487 _opts_ignore_blockers = \
9488 frozenset(["--buildpkgonly",
9489 "--fetchonly", "--fetch-all-uri",
9490 "--nodeps", "--pretend"])
9492 _opts_no_background = \
9493 frozenset(["--pretend",
9494 "--fetchonly", "--fetch-all-uri"])
9496 _opts_no_restart = frozenset(["--buildpkgonly",
9497 "--fetchonly", "--fetch-all-uri", "--pretend"])
9499 _bad_resume_opts = set(["--ask", "--changelog",
9500 "--resume", "--skipfirst"])
9502 _fetch_log = "/var/log/emerge-fetch.log"
9504 class _iface_class(SlotObject):
9505 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9506 "dblinkElog", "fetch", "register", "schedule",
9507 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9510 class _fetch_iface_class(SlotObject):
9511 __slots__ = ("log_file", "schedule")
9513 _task_queues_class = slot_dict_class(
9514 ("merge", "jobs", "fetch", "unpack"), prefix="")
9516 class _build_opts_class(SlotObject):
9517 __slots__ = ("buildpkg", "buildpkgonly",
9518 "fetch_all_uri", "fetchonly", "pretend")
9520 class _binpkg_opts_class(SlotObject):
9521 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9523 class _pkg_count_class(SlotObject):
9524 __slots__ = ("curval", "maxval")
9526 class _emerge_log_class(SlotObject):
9527 __slots__ = ("xterm_titles",)
9529 def log(self, *pargs, **kwargs):
9530 if not self.xterm_titles:
9531 # Avoid interference with the scheduler's status display.
9532 kwargs.pop("short_msg", None)
9533 emergelog(self.xterm_titles, *pargs, **kwargs)
9535 class _failed_pkg(SlotObject):
9536 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9538 class _ConfigPool(object):
9539 """Interface for a task to temporarily allocate a config
9540 instance from a pool. This allows a task to be constructed
9541 long before the config instance actually becomes needed, like
9542 when prefetchers are constructed for the whole merge list."""
9543 __slots__ = ("_root", "_allocate", "_deallocate")
9544 def __init__(self, root, allocate, deallocate):
9546 self._allocate = allocate
9547 self._deallocate = deallocate
9549 return self._allocate(self._root)
9550 def deallocate(self, settings):
9551 self._deallocate(settings)
9553 class _unknown_internal_error(portage.exception.PortageException):
9555 Used internally to terminate scheduling. The specific reason for
9556 the failure should have been dumped to stderr.
9558 def __init__(self, value=""):
9559 portage.exception.PortageException.__init__(self, value)
9561 def __init__(self, settings, trees, mtimedb, myopts,
9562 spinner, mergelist, favorites, digraph):
9563 PollScheduler.__init__(self)
9564 self.settings = settings
9565 self.target_root = settings["ROOT"]
9567 self.myopts = myopts
9568 self._spinner = spinner
9569 self._mtimedb = mtimedb
9570 self._mergelist = mergelist
9571 self._favorites = favorites
9572 self._args_set = InternalPackageSet(favorites)
9573 self._build_opts = self._build_opts_class()
9574 for k in self._build_opts.__slots__:
9575 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9576 self._binpkg_opts = self._binpkg_opts_class()
9577 for k in self._binpkg_opts.__slots__:
9578 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9581 self._logger = self._emerge_log_class()
9582 self._task_queues = self._task_queues_class()
9583 for k in self._task_queues.allowed_keys:
9584 setattr(self._task_queues, k,
9585 SequentialTaskQueue())
9586 self._status_display = JobStatusDisplay()
9587 self._max_load = myopts.get("--load-average")
9588 max_jobs = myopts.get("--jobs")
9589 if max_jobs is None:
9591 self._set_max_jobs(max_jobs)
9593 # The root where the currently running
9594 # portage instance is installed.
9595 self._running_root = trees["/"]["root_config"]
9597 if settings.get("PORTAGE_DEBUG", "") == "1":
9599 self.pkgsettings = {}
9600 self._config_pool = {}
9601 self._blocker_db = {}
9603 self._config_pool[root] = []
9604 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9606 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9607 schedule=self._schedule_fetch)
9608 self._sched_iface = self._iface_class(
9609 dblinkEbuildPhase=self._dblink_ebuild_phase,
9610 dblinkDisplayMerge=self._dblink_display_merge,
9611 dblinkElog=self._dblink_elog,
9612 fetch=fetch_iface, register=self._register,
9613 schedule=self._schedule_wait,
9614 scheduleSetup=self._schedule_setup,
9615 scheduleUnpack=self._schedule_unpack,
9616 scheduleYield=self._schedule_yield,
9617 unregister=self._unregister)
9619 self._prefetchers = weakref.WeakValueDictionary()
9620 self._pkg_queue = []
9621 self._completed_tasks = set()
9623 self._failed_pkgs = []
9624 self._failed_pkgs_all = []
9625 self._failed_pkgs_die_msgs = []
9626 self._post_mod_echo_msgs = []
9627 self._parallel_fetch = False
9628 merge_count = len([x for x in mergelist \
9629 if isinstance(x, Package) and x.operation == "merge"])
9630 self._pkg_count = self._pkg_count_class(
9631 curval=0, maxval=merge_count)
9632 self._status_display.maxval = self._pkg_count.maxval
9634 # The load average takes some time to respond when new
9635 # jobs are added, so we need to limit the rate of adding
9637 self._job_delay_max = 10
9638 self._job_delay_factor = 1.0
9639 self._job_delay_exp = 1.5
9640 self._previous_job_start_time = None
9642 self._set_digraph(digraph)
9644 # This is used to memoize the _choose_pkg() result when
9645 # no packages can be chosen until one of the existing
9647 self._choose_pkg_return_early = False
9649 features = self.settings.features
9650 if "parallel-fetch" in features and \
9651 not ("--pretend" in self.myopts or \
9652 "--fetch-all-uri" in self.myopts or \
9653 "--fetchonly" in self.myopts):
9654 if "distlocks" not in features:
9655 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9656 portage.writemsg(red("!!!")+" parallel-fetching " + \
9657 "requires the distlocks feature enabled"+"\n",
9659 portage.writemsg(red("!!!")+" you have it disabled, " + \
9660 "thus parallel-fetching is being disabled"+"\n",
9662 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9663 elif len(mergelist) > 1:
9664 self._parallel_fetch = True
9666 if self._parallel_fetch:
9667 # clear out existing fetch log if it exists
9669 open(self._fetch_log, 'w')
9670 except EnvironmentError:
9673 self._running_portage = None
9674 portage_match = self._running_root.trees["vartree"].dbapi.match(
9675 portage.const.PORTAGE_PACKAGE_ATOM)
9677 cpv = portage_match.pop()
9678 self._running_portage = self._pkg(cpv, "installed",
9679 self._running_root, installed=True)
9681 def _poll(self, timeout=None):
9683 PollScheduler._poll(self, timeout=timeout)
9685 def _set_max_jobs(self, max_jobs):
9686 self._max_jobs = max_jobs
9687 self._task_queues.jobs.max_jobs = max_jobs
9689 def _background_mode(self):
9691 Check if background mode is enabled and adjust states as necessary.
9694 @returns: True if background mode is enabled, False otherwise.
9696 background = (self._max_jobs is True or \
9697 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9698 not bool(self._opts_no_background.intersection(self.myopts))
9701 interactive_tasks = self._get_interactive_tasks()
9702 if interactive_tasks:
9704 writemsg_level(">>> Sending package output to stdio due " + \
9705 "to interactive package(s):\n",
9706 level=logging.INFO, noiselevel=-1)
9708 for pkg in interactive_tasks:
9709 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9711 pkg_str += " for " + pkg.root
9714 writemsg_level("".join("%s\n" % (l,) for l in msg),
9715 level=logging.INFO, noiselevel=-1)
9716 if self._max_jobs is True or self._max_jobs > 1:
9717 self._set_max_jobs(1)
9718 writemsg_level(">>> Setting --jobs=1 due " + \
9719 "to the above interactive package(s)\n",
9720 level=logging.INFO, noiselevel=-1)
9722 self._status_display.quiet = \
9724 ("--quiet" in self.myopts and \
9725 "--verbose" not in self.myopts)
9727 self._logger.xterm_titles = \
9728 "notitles" not in self.settings.features and \
9729 self._status_display.quiet
9733 def _get_interactive_tasks(self):
9734 from portage import flatten
9735 from portage.dep import use_reduce, paren_reduce
9736 interactive_tasks = []
9737 for task in self._mergelist:
9738 if not (isinstance(task, Package) and \
9739 task.operation == "merge"):
9742 properties = flatten(use_reduce(paren_reduce(
9743 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9744 except portage.exception.InvalidDependString, e:
9745 show_invalid_depstring_notice(task,
9746 task.metadata["PROPERTIES"], str(e))
9747 raise self._unknown_internal_error()
9748 if "interactive" in properties:
9749 interactive_tasks.append(task)
9750 return interactive_tasks
9752 def _set_digraph(self, digraph):
9753 if "--nodeps" in self.myopts or \
9754 (self._max_jobs is not True and self._max_jobs < 2):
9756 self._digraph = None
9759 self._digraph = digraph
9760 self._prune_digraph()
9762 def _prune_digraph(self):
9764 Prune any root nodes that are irrelevant.
9767 graph = self._digraph
9768 completed_tasks = self._completed_tasks
9769 removed_nodes = set()
9771 for node in graph.root_nodes():
9772 if not isinstance(node, Package) or \
9773 (node.installed and node.operation == "nomerge") or \
9775 node in completed_tasks:
9776 removed_nodes.add(node)
9778 graph.difference_update(removed_nodes)
9779 if not removed_nodes:
9781 removed_nodes.clear()
9783 class _pkg_failure(portage.exception.PortageException):
9785 An instance of this class is raised by unmerge() when
9786 an uninstallation fails.
9789 def __init__(self, *pargs):
9790 portage.exception.PortageException.__init__(self, pargs)
9792 self.status = pargs[0]
9794 def _schedule_fetch(self, fetcher):
9796 Schedule a fetcher on the fetch queue, in order to
9797 serialize access to the fetch log.
9799 self._task_queues.fetch.addFront(fetcher)
9801 def _schedule_setup(self, setup_phase):
9803 Schedule a setup phase on the merge queue, in order to
9804 serialize unsandboxed access to the live filesystem.
9806 self._task_queues.merge.addFront(setup_phase)
9809 def _schedule_unpack(self, unpack_phase):
9811 Schedule an unpack phase on the unpack queue, in order
9812 to serialize $DISTDIR access for live ebuilds.
9814 self._task_queues.unpack.add(unpack_phase)
9816 def _find_blockers(self, new_pkg):
9818 Returns a callable which should be called only when
9819 the vdb lock has been acquired.
9822 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9825 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9826 if self._opts_ignore_blockers.intersection(self.myopts):
9829 # Call gc.collect() here to avoid heap overflow that
9830 # triggers 'Cannot allocate memory' errors (reported
9835 blocker_db = self._blocker_db[new_pkg.root]
9837 blocker_dblinks = []
9838 for blocking_pkg in blocker_db.findInstalledBlockers(
9839 new_pkg, acquire_lock=acquire_lock):
9840 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9842 if new_pkg.cpv == blocking_pkg.cpv:
9844 blocker_dblinks.append(portage.dblink(
9845 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9846 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9847 vartree=self.trees[blocking_pkg.root]["vartree"]))
9851 return blocker_dblinks
9853 def _dblink_pkg(self, pkg_dblink):
9854 cpv = pkg_dblink.mycpv
9855 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9856 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9857 installed = type_name == "installed"
9858 return self._pkg(cpv, type_name, root_config, installed=installed)
9860 def _append_to_log_path(self, log_path, msg):
9861 f = open(log_path, 'a')
9867 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9869 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9872 background = self._background
9874 if background and log_path is not None:
9875 log_file = open(log_path, 'a')
9880 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9882 if log_file is not None:
9885 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9886 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9887 background = self._background
9889 if log_path is None:
9890 if not (background and level < logging.WARN):
9891 portage.util.writemsg_level(msg,
9892 level=level, noiselevel=noiselevel)
9895 portage.util.writemsg_level(msg,
9896 level=level, noiselevel=noiselevel)
9897 self._append_to_log_path(log_path, msg)
9899 def _dblink_ebuild_phase(self,
9900 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9902 Using this callback for merge phases allows the scheduler
9903 to run while these phases execute asynchronously, and allows
9904 the scheduler control output handling.
9907 scheduler = self._sched_iface
9908 settings = pkg_dblink.settings
9909 pkg = self._dblink_pkg(pkg_dblink)
9910 background = self._background
9911 log_path = settings.get("PORTAGE_LOG_FILE")
9913 ebuild_phase = EbuildPhase(background=background,
9914 pkg=pkg, phase=phase, scheduler=scheduler,
9915 settings=settings, tree=pkg_dblink.treetype)
9916 ebuild_phase.start()
9919 return ebuild_phase.returncode
9921 def _check_manifests(self):
9922 # Verify all the manifests now so that the user is notified of failure
9923 # as soon as possible.
9924 if "strict" not in self.settings.features or \
9925 "--fetchonly" in self.myopts or \
9926 "--fetch-all-uri" in self.myopts:
9929 shown_verifying_msg = False
9931 for myroot, pkgsettings in self.pkgsettings.iteritems():
9932 quiet_config = portage.config(clone=pkgsettings)
9933 quiet_config["PORTAGE_QUIET"] = "1"
9934 quiet_config.backup_changes("PORTAGE_QUIET")
9935 quiet_settings[myroot] = quiet_config
9938 for x in self._mergelist:
9939 if not isinstance(x, Package) or \
9940 x.type_name != "ebuild":
9943 if not shown_verifying_msg:
9944 shown_verifying_msg = True
9945 self._status_msg("Verifying ebuild manifests")
9947 root_config = x.root_config
9948 portdb = root_config.trees["porttree"].dbapi
9949 quiet_config = quiet_settings[root_config.root]
9950 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9951 if not portage.digestcheck([], quiet_config, strict=True):
9956 def _add_prefetchers(self):
9958 if not self._parallel_fetch:
9961 if self._parallel_fetch:
9962 self._status_msg("Starting parallel fetch")
9964 prefetchers = self._prefetchers
9965 getbinpkg = "--getbinpkg" in self.myopts
9967 # In order to avoid "waiting for lock" messages
9968 # at the beginning, which annoy users, never
9969 # spawn a prefetcher for the first package.
9970 for pkg in self._mergelist[1:]:
9971 prefetcher = self._create_prefetcher(pkg)
9972 if prefetcher is not None:
9973 self._task_queues.fetch.add(prefetcher)
9974 prefetchers[pkg] = prefetcher
9976 def _create_prefetcher(self, pkg):
9978 @return: a prefetcher, or None if not applicable
9982 if not isinstance(pkg, Package):
9985 elif pkg.type_name == "ebuild":
9987 prefetcher = EbuildFetcher(background=True,
9988 config_pool=self._ConfigPool(pkg.root,
9989 self._allocate_config, self._deallocate_config),
9990 fetchonly=1, logfile=self._fetch_log,
9991 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9993 elif pkg.type_name == "binary" and \
9994 "--getbinpkg" in self.myopts and \
9995 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9997 prefetcher = BinpkgPrefetcher(background=True,
9998 pkg=pkg, scheduler=self._sched_iface)
10002 def _is_restart_scheduled(self):
10004 Check if the merge list contains a replacement
10005 for the current running instance, that will result
10006 in restart after merge.
10008 @returns: True if a restart is scheduled, False otherwise.
10010 if self._opts_no_restart.intersection(self.myopts):
10013 mergelist = self._mergelist
10015 for i, pkg in enumerate(mergelist):
10016 if self._is_restart_necessary(pkg) and \
10017 i != len(mergelist) - 1:
10022 def _is_restart_necessary(self, pkg):
10024 @return: True if merging the given package
10025 requires restart, False otherwise.
10028 # Figure out if we need a restart.
10029 if pkg.root == self._running_root.root and \
10030 portage.match_from_list(
10031 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10032 if self._running_portage:
10033 return pkg.cpv != self._running_portage.cpv
10037 def _restart_if_necessary(self, pkg):
10039 Use execv() to restart emerge. This happens
10040 if portage upgrades itself and there are
10041 remaining packages in the list.
10044 if self._opts_no_restart.intersection(self.myopts):
10047 if not self._is_restart_necessary(pkg):
10050 if pkg == self._mergelist[-1]:
10053 self._main_loop_cleanup()
10055 logger = self._logger
10056 pkg_count = self._pkg_count
10057 mtimedb = self._mtimedb
10058 bad_resume_opts = self._bad_resume_opts
10060 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10061 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10063 logger.log(" *** RESTARTING " + \
10064 "emerge via exec() after change of " + \
10065 "portage version.")
10067 mtimedb["resume"]["mergelist"].remove(list(pkg))
10069 portage.run_exitfuncs()
10070 mynewargv = [sys.argv[0], "--resume"]
10071 resume_opts = self.myopts.copy()
10072 # For automatic resume, we need to prevent
10073 # any of bad_resume_opts from leaking in
10074 # via EMERGE_DEFAULT_OPTS.
10075 resume_opts["--ignore-default-opts"] = True
10076 for myopt, myarg in resume_opts.iteritems():
10077 if myopt not in bad_resume_opts:
10079 mynewargv.append(myopt)
10081 mynewargv.append(myopt +"="+ str(myarg))
10082 # priority only needs to be adjusted on the first run
10083 os.environ["PORTAGE_NICENESS"] = "0"
10084 os.execv(mynewargv[0], mynewargv)
10088 if "--resume" in self.myopts:
10090 portage.writemsg_stdout(
10091 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10092 self._logger.log(" *** Resuming merge...")
10094 self._save_resume_list()
10097 self._background = self._background_mode()
10098 except self._unknown_internal_error:
10101 for root in self.trees:
10102 root_config = self.trees[root]["root_config"]
10104 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10105 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10106 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10107 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10108 if not tmpdir or not os.path.isdir(tmpdir):
10109 msg = "The directory specified in your " + \
10110 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10111 "does not exist. Please create this " + \
10112 "directory or correct your PORTAGE_TMPDIR setting."
10113 msg = textwrap.wrap(msg, 70)
10114 out = portage.output.EOutput()
10119 if self._background:
10120 root_config.settings.unlock()
10121 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10122 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10123 root_config.settings.lock()
10125 self.pkgsettings[root] = portage.config(
10126 clone=root_config.settings)
10128 rval = self._check_manifests()
10129 if rval != os.EX_OK:
10132 keep_going = "--keep-going" in self.myopts
10133 fetchonly = self._build_opts.fetchonly
10134 mtimedb = self._mtimedb
10135 failed_pkgs = self._failed_pkgs
10138 rval = self._merge()
10139 if rval == os.EX_OK or fetchonly or not keep_going:
10141 if "resume" not in mtimedb:
10143 mergelist = self._mtimedb["resume"].get("mergelist")
10147 if not failed_pkgs:
10150 for failed_pkg in failed_pkgs:
10151 mergelist.remove(list(failed_pkg.pkg))
10153 self._failed_pkgs_all.extend(failed_pkgs)
10159 if not self._calc_resume_list():
10162 clear_caches(self.trees)
10163 if not self._mergelist:
10166 self._save_resume_list()
10167 self._pkg_count.curval = 0
10168 self._pkg_count.maxval = len([x for x in self._mergelist \
10169 if isinstance(x, Package) and x.operation == "merge"])
10170 self._status_display.maxval = self._pkg_count.maxval
10172 self._logger.log(" *** Finished. Cleaning up...")
10175 self._failed_pkgs_all.extend(failed_pkgs)
10178 background = self._background
10179 failure_log_shown = False
10180 if background and len(self._failed_pkgs_all) == 1:
10181 # If only one package failed then just show it's
10182 # whole log for easy viewing.
10183 failed_pkg = self._failed_pkgs_all[-1]
10184 build_dir = failed_pkg.build_dir
10187 log_paths = [failed_pkg.build_log]
10189 log_path = self._locate_failure_log(failed_pkg)
10190 if log_path is not None:
10192 log_file = open(log_path, 'rb')
10196 if log_file is not None:
10198 for line in log_file:
10199 writemsg_level(line, noiselevel=-1)
10202 failure_log_shown = True
10204 # Dump mod_echo output now since it tends to flood the terminal.
10205 # This allows us to avoid having more important output, generated
10206 # later, from being swept away by the mod_echo output.
10207 mod_echo_output = _flush_elog_mod_echo()
10209 if background and not failure_log_shown and \
10210 self._failed_pkgs_all and \
10211 self._failed_pkgs_die_msgs and \
10212 not mod_echo_output:
10214 printer = portage.output.EOutput()
10215 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10217 if mysettings["ROOT"] != "/":
10218 root_msg = " merged to %s" % mysettings["ROOT"]
10220 printer.einfo("Error messages for package %s%s:" % \
10221 (colorize("INFORM", key), root_msg))
10223 for phase in portage.const.EBUILD_PHASES:
10224 if phase not in logentries:
10226 for msgtype, msgcontent in logentries[phase]:
10227 if isinstance(msgcontent, basestring):
10228 msgcontent = [msgcontent]
10229 for line in msgcontent:
10230 printer.eerror(line.strip("\n"))
10232 if self._post_mod_echo_msgs:
10233 for msg in self._post_mod_echo_msgs:
10236 if len(self._failed_pkgs_all) > 1:
10237 msg = "The following packages have " + \
10238 "failed to build or install:"
10239 prefix = bad(" * ")
10240 writemsg(prefix + "\n", noiselevel=-1)
10241 from textwrap import wrap
10242 for line in wrap(msg, 72):
10243 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10244 writemsg(prefix + "\n", noiselevel=-1)
10245 for failed_pkg in self._failed_pkgs_all:
10246 writemsg("%s\t%s\n" % (prefix,
10247 colorize("INFORM", str(failed_pkg.pkg))),
10249 writemsg(prefix + "\n", noiselevel=-1)
10253 def _elog_listener(self, mysettings, key, logentries, fulltext):
10254 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10256 self._failed_pkgs_die_msgs.append(
10257 (mysettings, key, errors))
10259 def _locate_failure_log(self, failed_pkg):
10261 build_dir = failed_pkg.build_dir
10264 log_paths = [failed_pkg.build_log]
10266 for log_path in log_paths:
10271 log_size = os.stat(log_path).st_size
10282 def _add_packages(self):
10283 pkg_queue = self._pkg_queue
10284 for pkg in self._mergelist:
10285 if isinstance(pkg, Package):
10286 pkg_queue.append(pkg)
10287 elif isinstance(pkg, Blocker):
10290 def _merge_exit(self, merge):
10291 self._do_merge_exit(merge)
10292 self._deallocate_config(merge.merge.settings)
10293 if merge.returncode == os.EX_OK and \
10294 not merge.merge.pkg.installed:
10295 self._status_display.curval += 1
10296 self._status_display.merges = len(self._task_queues.merge)
10299 def _do_merge_exit(self, merge):
10300 pkg = merge.merge.pkg
10301 if merge.returncode != os.EX_OK:
10302 settings = merge.merge.settings
10303 build_dir = settings.get("PORTAGE_BUILDDIR")
10304 build_log = settings.get("PORTAGE_LOG_FILE")
10306 self._failed_pkgs.append(self._failed_pkg(
10307 build_dir=build_dir, build_log=build_log,
10309 returncode=merge.returncode))
10310 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10312 self._status_display.failed = len(self._failed_pkgs)
10315 self._task_complete(pkg)
10316 pkg_to_replace = merge.merge.pkg_to_replace
10317 if pkg_to_replace is not None:
10318 # When a package is replaced, mark it's uninstall
10319 # task complete (if any).
10320 uninst_hash_key = \
10321 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10322 self._task_complete(uninst_hash_key)
10327 self._restart_if_necessary(pkg)
10329 # Call mtimedb.commit() after each merge so that
10330 # --resume still works after being interrupted
10331 # by reboot, sigkill or similar.
10332 mtimedb = self._mtimedb
10333 mtimedb["resume"]["mergelist"].remove(list(pkg))
10334 if not mtimedb["resume"]["mergelist"]:
10335 del mtimedb["resume"]
10338 def _build_exit(self, build):
10339 if build.returncode == os.EX_OK:
10341 merge = PackageMerge(merge=build)
10342 merge.addExitListener(self._merge_exit)
10343 self._task_queues.merge.add(merge)
10344 self._status_display.merges = len(self._task_queues.merge)
10346 settings = build.settings
10347 build_dir = settings.get("PORTAGE_BUILDDIR")
10348 build_log = settings.get("PORTAGE_LOG_FILE")
10350 self._failed_pkgs.append(self._failed_pkg(
10351 build_dir=build_dir, build_log=build_log,
10353 returncode=build.returncode))
10354 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10356 self._status_display.failed = len(self._failed_pkgs)
10357 self._deallocate_config(build.settings)
10359 self._status_display.running = self._jobs
10362 def _extract_exit(self, build):
10363 self._build_exit(build)
10365 def _task_complete(self, pkg):
10366 self._completed_tasks.add(pkg)
10367 self._choose_pkg_return_early = False
10371 self._add_prefetchers()
10372 self._add_packages()
10373 pkg_queue = self._pkg_queue
10374 failed_pkgs = self._failed_pkgs
10375 portage.locks._quiet = self._background
10376 portage.elog._emerge_elog_listener = self._elog_listener
10382 self._main_loop_cleanup()
10383 portage.locks._quiet = False
10384 portage.elog._emerge_elog_listener = None
10386 rval = failed_pkgs[-1].returncode
10390 def _main_loop_cleanup(self):
10391 del self._pkg_queue[:]
10392 self._completed_tasks.clear()
10393 self._choose_pkg_return_early = False
10394 self._status_display.reset()
10395 self._digraph = None
10396 self._task_queues.fetch.clear()
10398 def _choose_pkg(self):
10400 Choose a task that has all it's dependencies satisfied.
10403 if self._choose_pkg_return_early:
10406 if self._digraph is None:
10407 if (self._jobs or self._task_queues.merge) and \
10408 not ("--nodeps" in self.myopts and \
10409 (self._max_jobs is True or self._max_jobs > 1)):
10410 self._choose_pkg_return_early = True
10412 return self._pkg_queue.pop(0)
10414 if not (self._jobs or self._task_queues.merge):
10415 return self._pkg_queue.pop(0)
10417 self._prune_digraph()
10420 later = set(self._pkg_queue)
10421 for pkg in self._pkg_queue:
10423 if not self._dependent_on_scheduled_merges(pkg, later):
10427 if chosen_pkg is not None:
10428 self._pkg_queue.remove(chosen_pkg)
10430 if chosen_pkg is None:
10431 # There's no point in searching for a package to
10432 # choose until at least one of the existing jobs
10434 self._choose_pkg_return_early = True
10438 def _dependent_on_scheduled_merges(self, pkg, later):
10440 Traverse the subgraph of the given packages deep dependencies
10441 to see if it contains any scheduled merges.
10442 @param pkg: a package to check dependencies for
10444 @param later: packages for which dependence should be ignored
10445 since they will be merged later than pkg anyway and therefore
10446 delaying the merge of pkg will not result in a more optimal
10450 @returns: True if the package is dependent, False otherwise.
10453 graph = self._digraph
10454 completed_tasks = self._completed_tasks
10457 traversed_nodes = set([pkg])
10458 direct_deps = graph.child_nodes(pkg)
10459 node_stack = direct_deps
10460 direct_deps = frozenset(direct_deps)
10462 node = node_stack.pop()
10463 if node in traversed_nodes:
10465 traversed_nodes.add(node)
10466 if not ((node.installed and node.operation == "nomerge") or \
10467 (node.operation == "uninstall" and \
10468 node not in direct_deps) or \
10469 node in completed_tasks or \
10473 node_stack.extend(graph.child_nodes(node))
10477 def _allocate_config(self, root):
10479 Allocate a unique config instance for a task in order
10480 to prevent interference between parallel tasks.
10482 if self._config_pool[root]:
10483 temp_settings = self._config_pool[root].pop()
10485 temp_settings = portage.config(clone=self.pkgsettings[root])
10486 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10487 # performance reasons, call it here to make sure all settings from the
10488 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10489 temp_settings.reload()
10490 temp_settings.reset()
10491 return temp_settings
10493 def _deallocate_config(self, settings):
10494 self._config_pool[settings["ROOT"]].append(settings)
10496 def _main_loop(self):
10498 # Only allow 1 job max if a restart is scheduled
10499 # due to portage update.
10500 if self._is_restart_scheduled() or \
10501 self._opts_no_background.intersection(self.myopts):
10502 self._set_max_jobs(1)
10504 merge_queue = self._task_queues.merge
10506 while self._schedule():
10507 if self._poll_event_handlers:
10512 if not (self._jobs or merge_queue):
10514 if self._poll_event_handlers:
10517 def _keep_scheduling(self):
10518 return bool(self._pkg_queue and \
10519 not (self._failed_pkgs and not self._build_opts.fetchonly))
10521 def _schedule_tasks(self):
10522 self._schedule_tasks_imp()
10523 self._status_display.display()
10526 for q in self._task_queues.values():
10530 # Cancel prefetchers if they're the only reason
10531 # the main poll loop is still running.
10532 if self._failed_pkgs and not self._build_opts.fetchonly and \
10533 not (self._jobs or self._task_queues.merge) and \
10534 self._task_queues.fetch:
10535 self._task_queues.fetch.clear()
10539 self._schedule_tasks_imp()
10540 self._status_display.display()
10542 return self._keep_scheduling()
10544 def _job_delay(self):
10547 @returns: True if job scheduling should be delayed, False otherwise.
10550 if self._jobs and self._max_load is not None:
10552 current_time = time.time()
10554 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10555 if delay > self._job_delay_max:
10556 delay = self._job_delay_max
10557 if (current_time - self._previous_job_start_time) < delay:
10562 def _schedule_tasks_imp(self):
10565 @returns: True if state changed, False otherwise.
10572 if not self._keep_scheduling():
10573 return bool(state_change)
10575 if self._choose_pkg_return_early or \
10576 not self._can_add_job() or \
10578 return bool(state_change)
10580 pkg = self._choose_pkg()
10582 return bool(state_change)
10586 if not pkg.installed:
10587 self._pkg_count.curval += 1
10589 task = self._task(pkg)
10592 merge = PackageMerge(merge=task)
10593 merge.addExitListener(self._merge_exit)
10594 self._task_queues.merge.add(merge)
10598 self._previous_job_start_time = time.time()
10599 self._status_display.running = self._jobs
10600 task.addExitListener(self._extract_exit)
10601 self._task_queues.jobs.add(task)
10605 self._previous_job_start_time = time.time()
10606 self._status_display.running = self._jobs
10607 task.addExitListener(self._build_exit)
10608 self._task_queues.jobs.add(task)
10610 return bool(state_change)
10612 def _task(self, pkg):
10614 pkg_to_replace = None
10615 if pkg.operation != "uninstall":
10616 vardb = pkg.root_config.trees["vartree"].dbapi
10617 previous_cpv = vardb.match(pkg.slot_atom)
10619 previous_cpv = previous_cpv.pop()
10620 pkg_to_replace = self._pkg(previous_cpv,
10621 "installed", pkg.root_config, installed=True)
10623 task = MergeListItem(args_set=self._args_set,
10624 background=self._background, binpkg_opts=self._binpkg_opts,
10625 build_opts=self._build_opts,
10626 config_pool=self._ConfigPool(pkg.root,
10627 self._allocate_config, self._deallocate_config),
10628 emerge_opts=self.myopts,
10629 find_blockers=self._find_blockers(pkg), logger=self._logger,
10630 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10631 pkg_to_replace=pkg_to_replace,
10632 prefetcher=self._prefetchers.get(pkg),
10633 scheduler=self._sched_iface,
10634 settings=self._allocate_config(pkg.root),
10635 statusMessage=self._status_msg,
10636 world_atom=self._world_atom)
10640 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10641 pkg = failed_pkg.pkg
10642 msg = "%s to %s %s" % \
10643 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10644 if pkg.root != "/":
10645 msg += " %s %s" % (preposition, pkg.root)
10647 log_path = self._locate_failure_log(failed_pkg)
10648 if log_path is not None:
10649 msg += ", Log file:"
10650 self._status_msg(msg)
10652 if log_path is not None:
10653 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10655 def _status_msg(self, msg):
10657 Display a brief status message (no newlines) in the status display.
10658 This is called by tasks to provide feedback to the user. This
10659 delegates the resposibility of generating \r and \n control characters,
10660 to guarantee that lines are created or erased when necessary and
10664 @param msg: a brief status message (no newlines allowed)
10666 if not self._background:
10667 writemsg_level("\n")
10668 self._status_display.displayMessage(msg)
10670 def _save_resume_list(self):
10672 Do this before verifying the ebuild Manifests since it might
10673 be possible for the user to use --resume --skipfirst get past
10674 a non-essential package with a broken digest.
10676 mtimedb = self._mtimedb
10677 mtimedb["resume"]["mergelist"] = [list(x) \
10678 for x in self._mergelist \
10679 if isinstance(x, Package) and x.operation == "merge"]
10683 def _calc_resume_list(self):
10685 Use the current resume list to calculate a new one,
10686 dropping any packages with unsatisfied deps.
10688 @returns: True if successful, False otherwise.
10690 print colorize("GOOD", "*** Resuming merge...")
10692 if self._show_list():
10693 if "--tree" in self.myopts:
10694 portage.writemsg_stdout("\n" + \
10695 darkgreen("These are the packages that " + \
10696 "would be merged, in reverse order:\n\n"))
10699 portage.writemsg_stdout("\n" + \
10700 darkgreen("These are the packages that " + \
10701 "would be merged, in order:\n\n"))
10703 show_spinner = "--quiet" not in self.myopts and \
10704 "--nodeps" not in self.myopts
10707 print "Calculating dependencies ",
10709 myparams = create_depgraph_params(self.myopts, None)
10713 success, mydepgraph, dropped_tasks = resume_depgraph(
10714 self.settings, self.trees, self._mtimedb, self.myopts,
10715 myparams, self._spinner, skip_unsatisfied=True)
10716 except depgraph.UnsatisfiedResumeDep, e:
10717 mydepgraph = e.depgraph
10718 dropped_tasks = set()
10721 print "\b\b... done!"
10724 def unsatisfied_resume_dep_msg():
10725 mydepgraph.display_problems()
10726 out = portage.output.EOutput()
10727 out.eerror("One or more packages are either masked or " + \
10728 "have missing dependencies:")
10731 show_parents = set()
10732 for dep in e.value:
10733 if dep.parent in show_parents:
10735 show_parents.add(dep.parent)
10736 if dep.atom is None:
10737 out.eerror(indent + "Masked package:")
10738 out.eerror(2 * indent + str(dep.parent))
10741 out.eerror(indent + str(dep.atom) + " pulled in by:")
10742 out.eerror(2 * indent + str(dep.parent))
10744 msg = "The resume list contains packages " + \
10745 "that are either masked or have " + \
10746 "unsatisfied dependencies. " + \
10747 "Please restart/continue " + \
10748 "the operation manually, or use --skipfirst " + \
10749 "to skip the first package in the list and " + \
10750 "any other packages that may be " + \
10751 "masked or have missing dependencies."
10752 for line in textwrap.wrap(msg, 72):
10754 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10757 if success and self._show_list():
10758 mylist = mydepgraph.altlist()
10760 if "--tree" in self.myopts:
10762 mydepgraph.display(mylist, favorites=self._favorites)
10765 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10767 mydepgraph.display_problems()
10769 mylist = mydepgraph.altlist()
10770 mydepgraph.break_refs(mylist)
10771 mydepgraph.break_refs(dropped_tasks)
10772 self._mergelist = mylist
10773 self._set_digraph(mydepgraph.schedulerGraph())
10776 for task in dropped_tasks:
10777 if not (isinstance(task, Package) and task.operation == "merge"):
10780 msg = "emerge --keep-going:" + \
10782 if pkg.root != "/":
10783 msg += " for %s" % (pkg.root,)
10784 msg += " dropped due to unsatisfied dependency."
10785 for line in textwrap.wrap(msg, msg_width):
10786 eerror(line, phase="other", key=pkg.cpv)
10787 settings = self.pkgsettings[pkg.root]
10788 # Ensure that log collection from $T is disabled inside
10789 # elog_process(), since any logs that might exist are
10791 settings.pop("T", None)
10792 portage.elog.elog_process(pkg.cpv, settings)
10793 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10797 def _show_list(self):
10798 myopts = self.myopts
10799 if "--quiet" not in myopts and \
10800 ("--ask" in myopts or "--tree" in myopts or \
10801 "--verbose" in myopts):
10805 def _world_atom(self, pkg):
10807 Add the package to the world file, but only if
10808 it's supposed to be added. Otherwise, do nothing.
10811 if set(("--buildpkgonly", "--fetchonly",
10813 "--oneshot", "--onlydeps",
10814 "--pretend")).intersection(self.myopts):
10817 if pkg.root != self.target_root:
10820 args_set = self._args_set
10821 if not args_set.findAtomForPackage(pkg):
10824 logger = self._logger
10825 pkg_count = self._pkg_count
10826 root_config = pkg.root_config
10827 world_set = root_config.sets["world"]
10828 world_locked = False
10829 if hasattr(world_set, "lock"):
10831 world_locked = True
10834 if hasattr(world_set, "load"):
10835 world_set.load() # maybe it's changed on disk
10837 atom = create_world_atom(pkg, args_set, root_config)
10839 if hasattr(world_set, "add"):
10840 self._status_msg(('Recording %s in "world" ' + \
10841 'favorites file...') % atom)
10842 logger.log(" === (%s of %s) Updating world file (%s)" % \
10843 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10844 world_set.add(atom)
10846 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10847 (atom,), level=logging.WARN, noiselevel=-1)
10852 def _pkg(self, cpv, type_name, root_config, installed=False):
10854 Get a package instance from the cache, or create a new
10855 one if necessary. Raises KeyError from aux_get if it
10856 failures for some reason (package does not exist or is
10859 operation = "merge"
10861 operation = "nomerge"
10863 if self._digraph is not None:
10864 # Reuse existing instance when available.
10865 pkg = self._digraph.get(
10866 (type_name, root_config.root, cpv, operation))
10867 if pkg is not None:
10870 tree_type = depgraph.pkg_tree_map[type_name]
10871 db = root_config.trees[tree_type].dbapi
10872 db_keys = list(self.trees[root_config.root][
10873 tree_type].dbapi._aux_cache_keys)
10874 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10875 pkg = Package(cpv=cpv, metadata=metadata,
10876 root_config=root_config, installed=installed)
10877 if type_name == "ebuild":
10878 settings = self.pkgsettings[root_config.root]
10879 settings.setcpv(pkg)
10880 pkg.metadata["USE"] = settings["PORTAGE_USE"]
10884 class MetadataRegen(PollScheduler):
10886 def __init__(self, portdb, max_jobs=None, max_load=None):
10887 PollScheduler.__init__(self)
10888 self._portdb = portdb
10890 if max_jobs is None:
10893 self._max_jobs = max_jobs
10894 self._max_load = max_load
10895 self._sched_iface = self._sched_iface_class(
10896 register=self._register,
10897 schedule=self._schedule_wait,
10898 unregister=self._unregister)
10900 self._valid_pkgs = set()
10901 self._process_iter = self._iter_metadata_processes()
10903 def _iter_metadata_processes(self):
10904 portdb = self._portdb
10905 valid_pkgs = self._valid_pkgs
10906 every_cp = portdb.cp_all()
10907 every_cp.sort(reverse=True)
10910 cp = every_cp.pop()
10911 portage.writemsg_stdout("Processing %s\n" % cp)
10912 cpv_list = portdb.cp_list(cp)
10913 for cpv in cpv_list:
10914 valid_pkgs.add(cpv)
10915 ebuild_path, repo_path = portdb.findname2(cpv)
10916 metadata_process = portdb._metadata_process(
10917 cpv, ebuild_path, repo_path)
10918 if metadata_process is None:
10920 yield metadata_process
10924 portdb = self._portdb
10925 from portage.cache.cache_errors import CacheError
10928 for mytree in portdb.porttrees:
10930 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10931 except CacheError, e:
10932 portage.writemsg("Error listing cache entries for " + \
10933 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10938 while self._schedule():
10945 for y in self._valid_pkgs:
10946 for mytree in portdb.porttrees:
10947 if portdb.findname2(y, mytree=mytree)[0]:
10948 dead_nodes[mytree].discard(y)
10950 for mytree, nodes in dead_nodes.iteritems():
10951 auxdb = portdb.auxdb[mytree]
10955 except (KeyError, CacheError):
10958 def _schedule_tasks(self):
10961 @returns: True if there may be remaining tasks to schedule,
10964 while self._can_add_job():
10966 metadata_process = self._process_iter.next()
10967 except StopIteration:
10971 metadata_process.scheduler = self._sched_iface
10972 metadata_process.addExitListener(self._metadata_exit)
10973 metadata_process.start()
10976 def _metadata_exit(self, metadata_process):
10978 if metadata_process.returncode != os.EX_OK:
10979 self._valid_pkgs.discard(metadata_process.cpv)
10980 portage.writemsg("Error processing %s, continuing...\n" % \
10981 (metadata_process.cpv,))
10984 class UninstallFailure(portage.exception.PortageException):
10986 An instance of this class is raised by unmerge() when
10987 an uninstallation fails.
10990 def __init__(self, *pargs):
10991 portage.exception.PortageException.__init__(self, pargs)
10993 self.status = pargs[0]
10995 def unmerge(root_config, myopts, unmerge_action,
10996 unmerge_files, ldpath_mtimes, autoclean=0,
10997 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10998 scheduler=None, writemsg_level=portage.util.writemsg_level):
11000 quiet = "--quiet" in myopts
11001 settings = root_config.settings
11002 sets = root_config.sets
11003 vartree = root_config.trees["vartree"]
11004 candidate_catpkgs=[]
11006 xterm_titles = "notitles" not in settings.features
11007 out = portage.output.EOutput()
11009 db_keys = list(vartree.dbapi._aux_cache_keys)
11012 pkg = pkg_cache.get(cpv)
11014 pkg = Package(cpv=cpv, installed=True,
11015 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11016 root_config=root_config,
11017 type_name="installed")
11018 pkg_cache[cpv] = pkg
11021 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11023 # At least the parent needs to exist for the lock file.
11024 portage.util.ensure_dirs(vdb_path)
11025 except portage.exception.PortageException:
11029 if os.access(vdb_path, os.W_OK):
11030 vdb_lock = portage.locks.lockdir(vdb_path)
11031 realsyslist = sets["system"].getAtoms()
11033 for x in realsyslist:
11034 mycp = portage.dep_getkey(x)
11035 if mycp in settings.getvirtuals():
11037 for provider in settings.getvirtuals()[mycp]:
11038 if vartree.dbapi.match(provider):
11039 providers.append(provider)
11040 if len(providers) == 1:
11041 syslist.extend(providers)
11043 syslist.append(mycp)
11045 mysettings = portage.config(clone=settings)
11047 if not unmerge_files:
11048 if unmerge_action == "unmerge":
11050 print bold("emerge unmerge") + " can only be used with specific package names"
11056 localtree = vartree
11057 # process all arguments and add all
11058 # valid db entries to candidate_catpkgs
11060 if not unmerge_files:
11061 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11063 #we've got command-line arguments
11064 if not unmerge_files:
11065 print "\nNo packages to unmerge have been provided.\n"
11067 for x in unmerge_files:
11068 arg_parts = x.split('/')
11069 if x[0] not in [".","/"] and \
11070 arg_parts[-1][-7:] != ".ebuild":
11071 #possible cat/pkg or dep; treat as such
11072 candidate_catpkgs.append(x)
11073 elif unmerge_action in ["prune","clean"]:
11074 print "\n!!! Prune and clean do not accept individual" + \
11075 " ebuilds as arguments;\n skipping.\n"
11078 # it appears that the user is specifying an installed
11079 # ebuild and we're in "unmerge" mode, so it's ok.
11080 if not os.path.exists(x):
11081 print "\n!!! The path '"+x+"' doesn't exist.\n"
11084 absx = os.path.abspath(x)
11085 sp_absx = absx.split("/")
11086 if sp_absx[-1][-7:] == ".ebuild":
11088 absx = "/".join(sp_absx)
11090 sp_absx_len = len(sp_absx)
11092 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11093 vdb_len = len(vdb_path)
11095 sp_vdb = vdb_path.split("/")
11096 sp_vdb_len = len(sp_vdb)
11098 if not os.path.exists(absx+"/CONTENTS"):
11099 print "!!! Not a valid db dir: "+str(absx)
11102 if sp_absx_len <= sp_vdb_len:
11103 # The Path is shorter... so it can't be inside the vdb.
11106 print "\n!!!",x,"cannot be inside "+ \
11107 vdb_path+"; aborting.\n"
11110 for idx in range(0,sp_vdb_len):
11111 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11114 print "\n!!!", x, "is not inside "+\
11115 vdb_path+"; aborting.\n"
11118 print "="+"/".join(sp_absx[sp_vdb_len:])
11119 candidate_catpkgs.append(
11120 "="+"/".join(sp_absx[sp_vdb_len:]))
11123 if (not "--quiet" in myopts):
11125 if settings["ROOT"] != "/":
11126 writemsg_level(darkgreen(newline+ \
11127 ">>> Using system located in ROOT tree %s\n" % \
11130 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11131 not ("--quiet" in myopts):
11132 writemsg_level(darkgreen(newline+\
11133 ">>> These are the packages that would be unmerged:\n"))
11135 # Preservation of order is required for --depclean and --prune so
11136 # that dependencies are respected. Use all_selected to eliminate
11137 # duplicate packages since the same package may be selected by
11140 all_selected = set()
11141 for x in candidate_catpkgs:
11142 # cycle through all our candidate deps and determine
11143 # what will and will not get unmerged
11145 mymatch = vartree.dbapi.match(x)
11146 except portage.exception.AmbiguousPackageName, errpkgs:
11147 print "\n\n!!! The short ebuild name \"" + \
11148 x + "\" is ambiguous. Please specify"
11149 print "!!! one of the following fully-qualified " + \
11150 "ebuild names instead:\n"
11151 for i in errpkgs[0]:
11152 print " " + green(i)
11156 if not mymatch and x[0] not in "<>=~":
11157 mymatch = localtree.dep_match(x)
11159 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11160 (x, unmerge_action), noiselevel=-1)
11164 {"protected": set(), "selected": set(), "omitted": set()})
11165 mykey = len(pkgmap) - 1
11166 if unmerge_action=="unmerge":
11168 if y not in all_selected:
11169 pkgmap[mykey]["selected"].add(y)
11170 all_selected.add(y)
11171 elif unmerge_action == "prune":
11172 if len(mymatch) == 1:
11174 best_version = mymatch[0]
11175 best_slot = vartree.getslot(best_version)
11176 best_counter = vartree.dbapi.cpv_counter(best_version)
11177 for mypkg in mymatch[1:]:
11178 myslot = vartree.getslot(mypkg)
11179 mycounter = vartree.dbapi.cpv_counter(mypkg)
11180 if (myslot == best_slot and mycounter > best_counter) or \
11181 mypkg == portage.best([mypkg, best_version]):
11182 if myslot == best_slot:
11183 if mycounter < best_counter:
11184 # On slot collision, keep the one with the
11185 # highest counter since it is the most
11186 # recently installed.
11188 best_version = mypkg
11190 best_counter = mycounter
11191 pkgmap[mykey]["protected"].add(best_version)
11192 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11193 if mypkg != best_version and mypkg not in all_selected)
11194 all_selected.update(pkgmap[mykey]["selected"])
11196 # unmerge_action == "clean"
11198 for mypkg in mymatch:
11199 if unmerge_action == "clean":
11200 myslot = localtree.getslot(mypkg)
11202 # since we're pruning, we don't care about slots
11203 # and put all the pkgs in together
11205 if myslot not in slotmap:
11206 slotmap[myslot] = {}
11207 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11209 for mypkg in vartree.dbapi.cp_list(
11210 portage.dep_getkey(mymatch[0])):
11211 myslot = vartree.getslot(mypkg)
11212 if myslot not in slotmap:
11213 slotmap[myslot] = {}
11214 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11216 for myslot in slotmap:
11217 counterkeys = slotmap[myslot].keys()
11218 if not counterkeys:
11221 pkgmap[mykey]["protected"].add(
11222 slotmap[myslot][counterkeys[-1]])
11223 del counterkeys[-1]
11225 for counter in counterkeys[:]:
11226 mypkg = slotmap[myslot][counter]
11227 if mypkg not in mymatch:
11228 counterkeys.remove(counter)
11229 pkgmap[mykey]["protected"].add(
11230 slotmap[myslot][counter])
11232 #be pretty and get them in order of merge:
11233 for ckey in counterkeys:
11234 mypkg = slotmap[myslot][ckey]
11235 if mypkg not in all_selected:
11236 pkgmap[mykey]["selected"].add(mypkg)
11237 all_selected.add(mypkg)
11238 # ok, now the last-merged package
11239 # is protected, and the rest are selected
11240 numselected = len(all_selected)
11241 if global_unmerge and not numselected:
11242 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11245 if not numselected:
11246 portage.writemsg_stdout(
11247 "\n>>> No packages selected for removal by " + \
11248 unmerge_action + "\n")
11252 vartree.dbapi.flush_cache()
11253 portage.locks.unlockdir(vdb_lock)
11255 from portage.sets.base import EditablePackageSet
11257 # generate a list of package sets that are directly or indirectly listed in "world",
11258 # as there is no persistent list of "installed" sets
11259 installed_sets = ["world"]
11264 pos = len(installed_sets)
11265 for s in installed_sets[pos - 1:]:
11268 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11271 installed_sets += candidates
11272 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11275 # we don't want to unmerge packages that are still listed in user-editable package sets
11276 # listed in "world" as they would be remerged on the next update of "world" or the
11277 # relevant package sets.
11278 unknown_sets = set()
11279 for cp in xrange(len(pkgmap)):
11280 for cpv in pkgmap[cp]["selected"].copy():
11284 # It could have been uninstalled
11285 # by a concurrent process.
11288 if unmerge_action != "clean" and \
11289 root_config.root == "/" and \
11290 portage.match_from_list(
11291 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11292 msg = ("Not unmerging package %s since there is no valid " + \
11293 "reason for portage to unmerge itself.") % (pkg.cpv,)
11294 for line in textwrap.wrap(msg, 75):
11296 # adjust pkgmap so the display output is correct
11297 pkgmap[cp]["selected"].remove(cpv)
11298 all_selected.remove(cpv)
11299 pkgmap[cp]["protected"].add(cpv)
11303 for s in installed_sets:
11304 # skip sets that the user requested to unmerge, and skip world
11305 # unless we're unmerging a package set (as the package would be
11306 # removed from "world" later on)
11307 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11311 if s in unknown_sets:
11313 unknown_sets.add(s)
11314 out = portage.output.EOutput()
11315 out.eerror(("Unknown set '@%s' in " + \
11316 "%svar/lib/portage/world_sets") % \
11317 (s, root_config.root))
11320 # only check instances of EditablePackageSet as other classes are generally used for
11321 # special purposes and can be ignored here (and are usually generated dynamically, so the
11322 # user can't do much about them anyway)
11323 if isinstance(sets[s], EditablePackageSet):
11325 # This is derived from a snippet of code in the
11326 # depgraph._iter_atoms_for_pkg() method.
11327 for atom in sets[s].iterAtomsForPackage(pkg):
11328 inst_matches = vartree.dbapi.match(atom)
11329 inst_matches.reverse() # descending order
11331 for inst_cpv in inst_matches:
11333 inst_pkg = _pkg(inst_cpv)
11335 # It could have been uninstalled
11336 # by a concurrent process.
11339 if inst_pkg.cp != atom.cp:
11341 if pkg >= inst_pkg:
11342 # This is descending order, and we're not
11343 # interested in any versions <= pkg given.
11345 if pkg.slot_atom != inst_pkg.slot_atom:
11346 higher_slot = inst_pkg
11348 if higher_slot is None:
11352 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11353 #print colorize("WARN", "but still listed in the following package sets:")
11354 #print " %s\n" % ", ".join(parents)
11355 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11356 print colorize("WARN", "still referenced by the following package sets:")
11357 print " %s\n" % ", ".join(parents)
11358 # adjust pkgmap so the display output is correct
11359 pkgmap[cp]["selected"].remove(cpv)
11360 all_selected.remove(cpv)
11361 pkgmap[cp]["protected"].add(cpv)
11365 numselected = len(all_selected)
11366 if not numselected:
11368 "\n>>> No packages selected for removal by " + \
11369 unmerge_action + "\n")
11372 # Unmerge order only matters in some cases
11376 selected = d["selected"]
11379 cp = portage.cpv_getkey(iter(selected).next())
11380 cp_dict = unordered.get(cp)
11381 if cp_dict is None:
11383 unordered[cp] = cp_dict
11386 for k, v in d.iteritems():
11387 cp_dict[k].update(v)
11388 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11390 for x in xrange(len(pkgmap)):
11391 selected = pkgmap[x]["selected"]
11394 for mytype, mylist in pkgmap[x].iteritems():
11395 if mytype == "selected":
11397 mylist.difference_update(all_selected)
11398 cp = portage.cpv_getkey(iter(selected).next())
11399 for y in localtree.dep_match(cp):
11400 if y not in pkgmap[x]["omitted"] and \
11401 y not in pkgmap[x]["selected"] and \
11402 y not in pkgmap[x]["protected"] and \
11403 y not in all_selected:
11404 pkgmap[x]["omitted"].add(y)
11405 if global_unmerge and not pkgmap[x]["selected"]:
11406 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11408 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11409 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11410 "'%s' is part of your system profile.\n" % cp),
11411 level=logging.WARNING, noiselevel=-1)
11412 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11413 "be damaging to your system.\n\n"),
11414 level=logging.WARNING, noiselevel=-1)
11415 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11416 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11417 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11419 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11421 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11422 for mytype in ["selected","protected","omitted"]:
11424 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11425 if pkgmap[x][mytype]:
11426 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11427 sorted_pkgs.sort(portage.pkgcmp)
11428 for pn, ver, rev in sorted_pkgs:
11432 myversion = ver + "-" + rev
11433 if mytype == "selected":
11435 colorize("UNMERGE_WARN", myversion + " "),
11439 colorize("GOOD", myversion + " "), noiselevel=-1)
11441 writemsg_level("none ", noiselevel=-1)
11443 writemsg_level("\n", noiselevel=-1)
11445 writemsg_level("\n", noiselevel=-1)
11447 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11448 " packages are slated for removal.\n")
11449 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11450 " and " + colorize("GOOD", "'omitted'") + \
11451 " packages will not be removed.\n\n")
11453 if "--pretend" in myopts:
11454 #we're done... return
11456 if "--ask" in myopts:
11457 if userquery("Would you like to unmerge these packages?")=="No":
11458 # enter pretend mode for correct formatting of results
11459 myopts["--pretend"] = True
11464 #the real unmerging begins, after a short delay....
11465 if clean_delay and not autoclean:
11466 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11468 for x in xrange(len(pkgmap)):
11469 for y in pkgmap[x]["selected"]:
11470 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11471 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11472 mysplit = y.split("/")
11474 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11475 mysettings, unmerge_action not in ["clean","prune"],
11476 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11477 scheduler=scheduler)
11479 if retval != os.EX_OK:
11480 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11482 raise UninstallFailure(retval)
11485 if clean_world and hasattr(sets["world"], "cleanPackage"):
11486 sets["world"].cleanPackage(vartree.dbapi, y)
11487 emergelog(xterm_titles, " >>> unmerge success: "+y)
11488 if clean_world and hasattr(sets["world"], "remove"):
11489 for s in root_config.setconfig.active:
11490 sets["world"].remove(SETPREFIX+s)
11493 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11495 if os.path.exists("/usr/bin/install-info"):
11496 out = portage.output.EOutput()
11501 inforoot=normpath(root+z)
11502 if os.path.isdir(inforoot):
11503 infomtime = long(os.stat(inforoot).st_mtime)
11504 if inforoot not in prev_mtimes or \
11505 prev_mtimes[inforoot] != infomtime:
11506 regen_infodirs.append(inforoot)
11508 if not regen_infodirs:
11509 portage.writemsg_stdout("\n")
11510 out.einfo("GNU info directory index is up-to-date.")
11512 portage.writemsg_stdout("\n")
11513 out.einfo("Regenerating GNU info directory index...")
11515 dir_extensions = ("", ".gz", ".bz2")
11519 for inforoot in regen_infodirs:
11523 if not os.path.isdir(inforoot) or \
11524 not os.access(inforoot, os.W_OK):
11527 file_list = os.listdir(inforoot)
11529 dir_file = os.path.join(inforoot, "dir")
11530 moved_old_dir = False
11531 processed_count = 0
11532 for x in file_list:
11533 if x.startswith(".") or \
11534 os.path.isdir(os.path.join(inforoot, x)):
11536 if x.startswith("dir"):
11538 for ext in dir_extensions:
11539 if x == "dir" + ext or \
11540 x == "dir" + ext + ".old":
11545 if processed_count == 0:
11546 for ext in dir_extensions:
11548 os.rename(dir_file + ext, dir_file + ext + ".old")
11549 moved_old_dir = True
11550 except EnvironmentError, e:
11551 if e.errno != errno.ENOENT:
11554 processed_count += 1
11555 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11556 existsstr="already exists, for file `"
11558 if re.search(existsstr,myso):
11559 # Already exists... Don't increment the count for this.
11561 elif myso[:44]=="install-info: warning: no info dir entry in ":
11562 # This info file doesn't contain a DIR-header: install-info produces this
11563 # (harmless) warning (the --quiet switch doesn't seem to work).
11564 # Don't increment the count for this.
11567 badcount=badcount+1
11568 errmsg += myso + "\n"
11571 if moved_old_dir and not os.path.exists(dir_file):
11572 # We didn't generate a new dir file, so put the old file
11573 # back where it was originally found.
11574 for ext in dir_extensions:
11576 os.rename(dir_file + ext + ".old", dir_file + ext)
11577 except EnvironmentError, e:
11578 if e.errno != errno.ENOENT:
11582 # Clean dir.old cruft so that they don't prevent
11583 # unmerge of otherwise empty directories.
11584 for ext in dir_extensions:
11586 os.unlink(dir_file + ext + ".old")
11587 except EnvironmentError, e:
11588 if e.errno != errno.ENOENT:
11592 #update mtime so we can potentially avoid regenerating.
11593 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11596 out.eerror("Processed %d info files; %d errors." % \
11597 (icount, badcount))
11598 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11601 out.einfo("Processed %d info files." % (icount,))
11604 def display_news_notification(root_config, myopts):
11605 target_root = root_config.root
11606 trees = root_config.trees
11607 settings = trees["vartree"].settings
11608 portdb = trees["porttree"].dbapi
11609 vardb = trees["vartree"].dbapi
11610 NEWS_PATH = os.path.join("metadata", "news")
11611 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11612 newsReaderDisplay = False
11613 update = "--pretend" not in myopts
11615 for repo in portdb.getRepositories():
11616 unreadItems = checkUpdatedNewsItems(
11617 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11619 if not newsReaderDisplay:
11620 newsReaderDisplay = True
11622 print colorize("WARN", " * IMPORTANT:"),
11623 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11626 if newsReaderDisplay:
11627 print colorize("WARN", " *"),
11628 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11631 def display_preserved_libs(vardbapi):
11634 # Ensure the registry is consistent with existing files.
11635 vardbapi.plib_registry.pruneNonExisting()
11637 if vardbapi.plib_registry.hasEntries():
11639 print colorize("WARN", "!!!") + " existing preserved libs:"
11640 plibdata = vardbapi.plib_registry.getPreservedLibs()
11641 linkmap = vardbapi.linkmap
11644 linkmap_broken = False
11648 except portage.exception.CommandNotFound, e:
11649 writemsg_level("!!! Command Not Found: %s\n" % (e,),
11650 level=logging.ERROR, noiselevel=-1)
11652 linkmap_broken = True
11654 search_for_owners = set()
11655 for cpv in plibdata:
11656 internal_plib_keys = set(linkmap._obj_key(f) \
11657 for f in plibdata[cpv])
11658 for f in plibdata[cpv]:
11659 if f in consumer_map:
11662 for c in linkmap.findConsumers(f):
11663 # Filter out any consumers that are also preserved libs
11664 # belonging to the same package as the provider.
11665 if linkmap._obj_key(c) not in internal_plib_keys:
11666 consumers.append(c)
11668 consumer_map[f] = consumers
11669 search_for_owners.update(consumers[:MAX_DISPLAY+1])
11671 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11673 for cpv in plibdata:
11674 print colorize("WARN", ">>>") + " package: %s" % cpv
11676 for f in plibdata[cpv]:
11677 obj_key = linkmap._obj_key(f)
11678 alt_paths = samefile_map.get(obj_key)
11679 if alt_paths is None:
11681 samefile_map[obj_key] = alt_paths
11684 for alt_paths in samefile_map.itervalues():
11685 alt_paths = sorted(alt_paths)
11686 for p in alt_paths:
11687 print colorize("WARN", " * ") + " - %s" % (p,)
11689 consumers = consumer_map.get(f, [])
11690 for c in consumers[:MAX_DISPLAY]:
11691 print colorize("WARN", " * ") + " used by %s (%s)" % \
11692 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11693 if len(consumers) == MAX_DISPLAY + 1:
11694 print colorize("WARN", " * ") + " used by %s (%s)" % \
11695 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11696 for x in owners.get(consumers[MAX_DISPLAY], [])))
11697 elif len(consumers) > MAX_DISPLAY:
11698 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
11699 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11702 def _flush_elog_mod_echo():
11704 Dump the mod_echo output now so that our other
11705 notifications are shown last.
11707 @returns: True if messages were shown, False otherwise.
11709 messages_shown = False
11711 from portage.elog import mod_echo
11712 except ImportError:
11713 pass # happens during downgrade to a version without the module
11715 messages_shown = bool(mod_echo._items)
11716 mod_echo.finalize()
11717 return messages_shown
11719 def post_emerge(root_config, myopts, mtimedb, retval):
11721 Misc. things to run at the end of a merge session.
11724 Update Config Files
11727 Display preserved libs warnings
11730 @param trees: A dictionary mapping each ROOT to it's package databases
11732 @param mtimedb: The mtimeDB to store data needed across merge invocations
11733 @type mtimedb: MtimeDB class instance
11734 @param retval: Emerge's return value
11738 1. Calls sys.exit(retval)
11741 target_root = root_config.root
11742 trees = { target_root : root_config.trees }
11743 vardbapi = trees[target_root]["vartree"].dbapi
11744 settings = vardbapi.settings
11745 info_mtimes = mtimedb["info"]
11747 # Load the most current variables from ${ROOT}/etc/profile.env
11750 settings.regenerate()
11753 config_protect = settings.get("CONFIG_PROTECT","").split()
11754 infodirs = settings.get("INFOPATH","").split(":") + \
11755 settings.get("INFODIR","").split(":")
11759 if retval == os.EX_OK:
11760 exit_msg = " *** exiting successfully."
11762 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11763 emergelog("notitles" not in settings.features, exit_msg)
11765 _flush_elog_mod_echo()
11767 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11768 if counter_hash is not None and \
11769 counter_hash == vardbapi._counter_hash():
11770 display_news_notification(root_config, myopts)
11771 # If vdb state has not changed then there's nothing else to do.
11774 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11775 portage.util.ensure_dirs(vdb_path)
11777 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11778 vdb_lock = portage.locks.lockdir(vdb_path)
11782 if "noinfo" not in settings.features:
11783 chk_updated_info_files(target_root,
11784 infodirs, info_mtimes, retval)
11788 portage.locks.unlockdir(vdb_lock)
11790 chk_updated_cfg_files(target_root, config_protect)
11792 display_news_notification(root_config, myopts)
11793 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11794 display_preserved_libs(vardbapi)
11799 def chk_updated_cfg_files(target_root, config_protect):
11801 #number of directories with some protect files in them
11803 for x in config_protect:
11804 x = os.path.join(target_root, x.lstrip(os.path.sep))
11805 if not os.access(x, os.W_OK):
11806 # Avoid Permission denied errors generated
11810 mymode = os.lstat(x).st_mode
11813 if stat.S_ISLNK(mymode):
11814 # We want to treat it like a directory if it
11815 # is a symlink to an existing directory.
11817 real_mode = os.stat(x).st_mode
11818 if stat.S_ISDIR(real_mode):
11822 if stat.S_ISDIR(mymode):
11823 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11825 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11826 os.path.split(x.rstrip(os.path.sep))
11827 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11828 a = commands.getstatusoutput(mycommand)
11830 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11832 # Show the error message alone, sending stdout to /dev/null.
11833 os.system(mycommand + " 1>/dev/null")
11835 files = a[1].split('\0')
11836 # split always produces an empty string as the last element
11837 if files and not files[-1]:
11841 print "\n"+colorize("WARN", " * IMPORTANT:"),
11842 if stat.S_ISDIR(mymode):
11843 print "%d config files in '%s' need updating." % \
11846 print "config file '%s' needs updating." % x
11849 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11850 " section of the " + bold("emerge")
11851 print " "+yellow("*")+" man page to learn how to update config files."
11853 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11856 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11857 Returns the number of unread (yet relevent) items.
11859 @param portdb: a portage tree database
11860 @type portdb: pordbapi
11861 @param vardb: an installed package database
11862 @type vardb: vardbapi
11865 @param UNREAD_PATH:
11871 1. The number of unread but relevant news items.
11874 from portage.news import NewsManager
11875 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11876 return manager.getUnreadItems( repo_id, update=update )
11878 def insert_category_into_atom(atom, category):
11879 alphanum = re.search(r'\w', atom)
11881 ret = atom[:alphanum.start()] + "%s/" % category + \
11882 atom[alphanum.start():]
11887 def is_valid_package_atom(x):
11889 alphanum = re.search(r'\w', x)
11891 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11892 return portage.isvalidatom(x)
11894 def show_blocker_docs_link():
11896 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11897 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11899 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11902 def show_mask_docs():
11903 print "For more information, see the MASKED PACKAGES section in the emerge"
11904 print "man page or refer to the Gentoo Handbook."
11906 def action_sync(settings, trees, mtimedb, myopts, myaction):
11907 xterm_titles = "notitles" not in settings.features
11908 emergelog(xterm_titles, " === sync")
11909 myportdir = settings.get("PORTDIR", None)
11910 out = portage.output.EOutput()
11912 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
11914 if myportdir[-1]=="/":
11915 myportdir=myportdir[:-1]
11917 st = os.stat(myportdir)
11921 print ">>>",myportdir,"not found, creating it."
11922 os.makedirs(myportdir,0755)
11923 st = os.stat(myportdir)
11926 spawn_kwargs["env"] = settings.environ()
11927 if portage.data.secpass >= 2 and \
11928 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
11929 st.st_gid != os.getgid() and st.st_mode & 0070):
11931 homedir = pwd.getpwuid(st.st_uid).pw_dir
11935 # Drop privileges when syncing, in order to match
11936 # existing uid/gid settings.
11937 spawn_kwargs["uid"] = st.st_uid
11938 spawn_kwargs["gid"] = st.st_gid
11939 spawn_kwargs["groups"] = [st.st_gid]
11940 spawn_kwargs["env"]["HOME"] = homedir
11942 if not st.st_mode & 0020:
11943 umask = umask | 0020
11944 spawn_kwargs["umask"] = umask
11946 syncuri = settings.get("SYNC", "").strip()
11948 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11949 noiselevel=-1, level=logging.ERROR)
11952 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
11953 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
11956 dosyncuri = syncuri
11957 updatecache_flg = False
11958 if myaction == "metadata":
11959 print "skipping sync"
11960 updatecache_flg = True
11961 elif ".git" in vcs_dirs:
11962 # Update existing git repository, and ignore the syncuri. We are
11963 # going to trust the user and assume that the user is in the branch
11964 # that he/she wants updated. We'll let the user manage branches with
11966 if portage.process.find_binary("git") is None:
11967 msg = ["Command not found: git",
11968 "Type \"emerge dev-util/git\" to enable git support."]
11970 writemsg_level("!!! %s\n" % l,
11971 level=logging.ERROR, noiselevel=-1)
11973 msg = ">>> Starting git pull in %s..." % myportdir
11974 emergelog(xterm_titles, msg )
11975 writemsg_level(msg + "\n")
11976 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
11977 (portage._shell_quote(myportdir),), **spawn_kwargs)
11978 if exitcode != os.EX_OK:
11979 msg = "!!! git pull error in %s." % myportdir
11980 emergelog(xterm_titles, msg)
11981 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
11983 msg = ">>> Git pull in %s successful" % myportdir
11984 emergelog(xterm_titles, msg)
11985 writemsg_level(msg + "\n")
11986 exitcode = git_sync_timestamps(settings, myportdir)
11987 if exitcode == os.EX_OK:
11988 updatecache_flg = True
11989 elif syncuri[:8]=="rsync://":
11990 for vcs_dir in vcs_dirs:
11991 writemsg_level(("!!! %s appears to be under revision " + \
11992 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
11993 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
11995 if not os.path.exists("/usr/bin/rsync"):
11996 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11997 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12002 import shlex, StringIO
12003 if settings["PORTAGE_RSYNC_OPTS"] == "":
12004 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12005 rsync_opts.extend([
12006 "--recursive", # Recurse directories
12007 "--links", # Consider symlinks
12008 "--safe-links", # Ignore links outside of tree
12009 "--perms", # Preserve permissions
12010 "--times", # Preserive mod times
12011 "--compress", # Compress the data transmitted
12012 "--force", # Force deletion on non-empty dirs
12013 "--whole-file", # Don't do block transfers, only entire files
12014 "--delete", # Delete files that aren't in the master tree
12015 "--stats", # Show final statistics about what was transfered
12016 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12017 "--exclude=/distfiles", # Exclude distfiles from consideration
12018 "--exclude=/local", # Exclude local from consideration
12019 "--exclude=/packages", # Exclude packages from consideration
12023 # The below validation is not needed when using the above hardcoded
12026 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12027 lexer = shlex.shlex(StringIO.StringIO(
12028 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
12029 lexer.whitespace_split = True
12030 rsync_opts.extend(lexer)
12033 for opt in ("--recursive", "--times"):
12034 if opt not in rsync_opts:
12035 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12036 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12037 rsync_opts.append(opt)
12039 for exclude in ("distfiles", "local", "packages"):
12040 opt = "--exclude=/%s" % exclude
12041 if opt not in rsync_opts:
12042 portage.writemsg(yellow("WARNING:") + \
12043 " adding required option %s not included in " % opt + \
12044 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12045 rsync_opts.append(opt)
12047 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12048 def rsync_opt_startswith(opt_prefix):
12049 for x in rsync_opts:
12050 if x.startswith(opt_prefix):
12054 if not rsync_opt_startswith("--timeout="):
12055 rsync_opts.append("--timeout=%d" % mytimeout)
12057 for opt in ("--compress", "--whole-file"):
12058 if opt not in rsync_opts:
12059 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12060 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12061 rsync_opts.append(opt)
12063 if "--quiet" in myopts:
12064 rsync_opts.append("--quiet") # Shut up a lot
12066 rsync_opts.append("--verbose") # Print filelist
12068 if "--verbose" in myopts:
12069 rsync_opts.append("--progress") # Progress meter for each file
12071 if "--debug" in myopts:
12072 rsync_opts.append("--checksum") # Force checksum on all files
12074 # Real local timestamp file.
12075 servertimestampfile = os.path.join(
12076 myportdir, "metadata", "timestamp.chk")
12078 content = portage.util.grabfile(servertimestampfile)
12082 mytimestamp = time.mktime(time.strptime(content[0],
12083 "%a, %d %b %Y %H:%M:%S +0000"))
12084 except (OverflowError, ValueError):
12089 rsync_initial_timeout = \
12090 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12092 rsync_initial_timeout = 15
12095 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12096 except SystemExit, e:
12097 raise # Needed else can't exit
12099 maxretries=3 #default number of retries
12102 user_name, hostname, port = re.split(
12103 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12106 if user_name is None:
12108 updatecache_flg=True
12109 all_rsync_opts = set(rsync_opts)
12110 lexer = shlex.shlex(StringIO.StringIO(
12111 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
12112 lexer.whitespace_split = True
12113 extra_rsync_opts = list(lexer)
12115 all_rsync_opts.update(extra_rsync_opts)
12116 family = socket.AF_INET
12117 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12118 family = socket.AF_INET
12119 elif socket.has_ipv6 and \
12120 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12121 family = socket.AF_INET6
12123 SERVER_OUT_OF_DATE = -1
12124 EXCEEDED_MAX_RETRIES = -2
12130 for addrinfo in socket.getaddrinfo(
12131 hostname, None, family, socket.SOCK_STREAM):
12132 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12133 # IPv6 addresses need to be enclosed in square brackets
12134 ips.append("[%s]" % addrinfo[4][0])
12136 ips.append(addrinfo[4][0])
12137 from random import shuffle
12139 except SystemExit, e:
12140 raise # Needed else can't exit
12141 except Exception, e:
12142 print "Notice:",str(e)
12147 dosyncuri = syncuri.replace(
12148 "//" + user_name + hostname + port + "/",
12149 "//" + user_name + ips[0] + port + "/", 1)
12150 except SystemExit, e:
12151 raise # Needed else can't exit
12152 except Exception, e:
12153 print "Notice:",str(e)
12157 if "--ask" in myopts:
12158 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12163 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12164 if "--quiet" not in myopts:
12165 print ">>> Starting rsync with "+dosyncuri+"..."
12167 emergelog(xterm_titles,
12168 ">>> Starting retry %d of %d with %s" % \
12169 (retries,maxretries,dosyncuri))
12170 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12172 if mytimestamp != 0 and "--quiet" not in myopts:
12173 print ">>> Checking server timestamp ..."
12175 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12177 if "--debug" in myopts:
12180 exitcode = os.EX_OK
12181 servertimestamp = 0
12182 # Even if there's no timestamp available locally, fetch the
12183 # timestamp anyway as an initial probe to verify that the server is
12184 # responsive. This protects us from hanging indefinitely on a
12185 # connection attempt to an unresponsive server which rsync's
12186 # --timeout option does not prevent.
12188 # Temporary file for remote server timestamp comparison.
12189 from tempfile import mkstemp
12190 fd, tmpservertimestampfile = mkstemp()
12192 mycommand = rsynccommand[:]
12193 mycommand.append(dosyncuri.rstrip("/") + \
12194 "/metadata/timestamp.chk")
12195 mycommand.append(tmpservertimestampfile)
12199 def timeout_handler(signum, frame):
12200 raise portage.exception.PortageException("timed out")
12201 signal.signal(signal.SIGALRM, timeout_handler)
12202 # Timeout here in case the server is unresponsive. The
12203 # --timeout rsync option doesn't apply to the initial
12204 # connection attempt.
12205 if rsync_initial_timeout:
12206 signal.alarm(rsync_initial_timeout)
12208 mypids.extend(portage.process.spawn(
12209 mycommand, env=settings.environ(), returnpid=True))
12210 exitcode = os.waitpid(mypids[0], 0)[1]
12211 content = portage.grabfile(tmpservertimestampfile)
12213 if rsync_initial_timeout:
12216 os.unlink(tmpservertimestampfile)
12219 except portage.exception.PortageException, e:
12223 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12224 os.kill(mypids[0], signal.SIGTERM)
12225 os.waitpid(mypids[0], 0)
12226 # This is the same code rsync uses for timeout.
12229 if exitcode != os.EX_OK:
12230 if exitcode & 0xff:
12231 exitcode = (exitcode & 0xff) << 8
12233 exitcode = exitcode >> 8
12235 portage.process.spawned_pids.remove(mypids[0])
12238 servertimestamp = time.mktime(time.strptime(
12239 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12240 except (OverflowError, ValueError):
12242 del mycommand, mypids, content
12243 if exitcode == os.EX_OK:
12244 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12245 emergelog(xterm_titles,
12246 ">>> Cancelling sync -- Already current.")
12249 print ">>> Timestamps on the server and in the local repository are the same."
12250 print ">>> Cancelling all further sync action. You are already up to date."
12252 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12256 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12257 emergelog(xterm_titles,
12258 ">>> Server out of date: %s" % dosyncuri)
12261 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12263 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12266 exitcode = SERVER_OUT_OF_DATE
12267 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12269 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12270 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12271 if exitcode in [0,1,3,4,11,14,20,21]:
12273 elif exitcode in [1,3,4,11,14,20,21]:
12276 # Code 2 indicates protocol incompatibility, which is expected
12277 # for servers with protocol < 29 that don't support
12278 # --prune-empty-directories. Retry for a server that supports
12279 # at least rsync protocol version 29 (>=rsync-2.6.4).
12284 if retries<=maxretries:
12285 print ">>> Retrying..."
12290 updatecache_flg=False
12291 exitcode = EXCEEDED_MAX_RETRIES
12295 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12296 elif exitcode == SERVER_OUT_OF_DATE:
12298 elif exitcode == EXCEEDED_MAX_RETRIES:
12300 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12305 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12306 msg.append("that your SYNC statement is proper.")
12307 msg.append("SYNC=" + settings["SYNC"])
12309 msg.append("Rsync has reported that there is a File IO error. Normally")
12310 msg.append("this means your disk is full, but can be caused by corruption")
12311 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12312 msg.append("and try again after the problem has been fixed.")
12313 msg.append("PORTDIR=" + settings["PORTDIR"])
12315 msg.append("Rsync was killed before it finished.")
12317 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12318 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12319 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12320 msg.append("temporary problem unless complications exist with your network")
12321 msg.append("(and possibly your system's filesystem) configuration.")
12325 elif syncuri[:6]=="cvs://":
12326 if not os.path.exists("/usr/bin/cvs"):
12327 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12328 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12330 cvsroot=syncuri[6:]
12331 cvsdir=os.path.dirname(myportdir)
12332 if not os.path.exists(myportdir+"/CVS"):
12334 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12335 if os.path.exists(cvsdir+"/gentoo-x86"):
12336 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12339 os.rmdir(myportdir)
12341 if e.errno != errno.ENOENT:
12343 "!!! existing '%s' directory; exiting.\n" % myportdir)
12346 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12347 print "!!! cvs checkout error; exiting."
12349 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12352 print ">>> Starting cvs update with "+syncuri+"..."
12353 retval = portage.process.spawn_bash(
12354 "cd %s; cvs -z0 -q update -dP" % \
12355 (portage._shell_quote(myportdir),), **spawn_kwargs)
12356 if retval != os.EX_OK:
12358 dosyncuri = syncuri
12360 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12361 noiselevel=-1, level=logging.ERROR)
12364 if updatecache_flg and \
12365 myaction != "metadata" and \
12366 "metadata-transfer" not in settings.features:
12367 updatecache_flg = False
12369 # Reload the whole config from scratch.
12370 settings, trees, mtimedb = load_emerge_config(trees=trees)
12371 root_config = trees[settings["ROOT"]]["root_config"]
12372 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12374 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12375 action_metadata(settings, portdb, myopts)
12377 if portage._global_updates(trees, mtimedb["updates"]):
12379 # Reload the whole config from scratch.
12380 settings, trees, mtimedb = load_emerge_config(trees=trees)
12381 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12382 root_config = trees[settings["ROOT"]]["root_config"]
12384 mybestpv = portdb.xmatch("bestmatch-visible",
12385 portage.const.PORTAGE_PACKAGE_ATOM)
12386 mypvs = portage.best(
12387 trees[settings["ROOT"]]["vartree"].dbapi.match(
12388 portage.const.PORTAGE_PACKAGE_ATOM))
12390 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12392 if myaction != "metadata":
12393 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12394 retval = portage.process.spawn(
12395 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12396 dosyncuri], env=settings.environ())
12397 if retval != os.EX_OK:
12398 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12400 if(mybestpv != mypvs) and not "--quiet" in myopts:
12402 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12403 print red(" * ")+"that you update portage now, before any other packages are updated."
12405 print red(" * ")+"To update portage, run 'emerge portage' now."
12408 display_news_notification(root_config, myopts)
12411 def git_sync_timestamps(settings, portdir):
12413 Since git doesn't preserve timestamps, synchronize timestamps between
12414 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12415 for a given file as long as the file in the working tree is not modified
12416 (relative to HEAD).
12418 cache_dir = os.path.join(portdir, "metadata", "cache")
12419 if not os.path.isdir(cache_dir):
12421 writemsg_level(">>> Synchronizing timestamps...\n")
12423 from portage.cache.cache_errors import CacheError
12425 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12426 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12427 except CacheError, e:
12428 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12429 level=logging.ERROR, noiselevel=-1)
12432 ec_dir = os.path.join(portdir, "eclass")
12434 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12435 if f.endswith(".eclass"))
12437 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12438 level=logging.ERROR, noiselevel=-1)
12441 args = [portage.const.BASH_BINARY, "-c",
12442 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
12443 portage._shell_quote(portdir)]
12445 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
12446 modified_files = set(l.rstrip("\n") for l in proc.stdout)
12448 if rval != os.EX_OK:
12451 modified_eclasses = set(ec for ec in ec_names \
12452 if os.path.join("eclass", ec + ".eclass") in modified_files)
12454 updated_ec_mtimes = {}
12456 for cpv in cache_db:
12457 cpv_split = portage.catpkgsplit(cpv)
12458 if cpv_split is None:
12459 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
12460 level=logging.ERROR, noiselevel=-1)
12463 cat, pn, ver, rev = cpv_split
12464 cat, pf = portage.catsplit(cpv)
12465 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
12466 if relative_eb_path in modified_files:
12470 cache_entry = cache_db[cpv]
12471 eb_mtime = cache_entry.get("_mtime_")
12472 ec_mtimes = cache_entry.get("_eclasses_")
12474 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
12475 level=logging.ERROR, noiselevel=-1)
12477 except CacheError, e:
12478 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
12479 (cpv, e), level=logging.ERROR, noiselevel=-1)
12482 if eb_mtime is None:
12483 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
12484 level=logging.ERROR, noiselevel=-1)
12488 eb_mtime = long(eb_mtime)
12490 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
12491 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
12494 if ec_mtimes is None:
12495 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
12496 level=logging.ERROR, noiselevel=-1)
12499 if modified_eclasses.intersection(ec_mtimes):
12502 missing_eclasses = set(ec_mtimes).difference(ec_names)
12503 if missing_eclasses:
12504 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
12505 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
12509 eb_path = os.path.join(portdir, relative_eb_path)
12511 current_eb_mtime = os.stat(eb_path)
12513 writemsg_level("!!! Missing ebuild: %s\n" % \
12514 (cpv,), level=logging.ERROR, noiselevel=-1)
12517 inconsistent = False
12518 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12519 updated_mtime = updated_ec_mtimes.get(ec)
12520 if updated_mtime is not None and updated_mtime != ec_mtime:
12521 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
12522 (cpv, ec), level=logging.ERROR, noiselevel=-1)
12523 inconsistent = True
12529 if current_eb_mtime != eb_mtime:
12530 os.utime(eb_path, (eb_mtime, eb_mtime))
12532 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12533 if ec in updated_ec_mtimes:
12535 ec_path = os.path.join(ec_dir, ec + ".eclass")
12536 current_mtime = long(os.stat(ec_path).st_mtime)
12537 if current_mtime != ec_mtime:
12538 os.utime(ec_path, (ec_mtime, ec_mtime))
12539 updated_ec_mtimes[ec] = ec_mtime
12543 def action_metadata(settings, portdb, myopts):
12544 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
12545 old_umask = os.umask(0002)
12546 cachedir = os.path.normpath(settings.depcachedir)
12547 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
12548 "/lib", "/opt", "/proc", "/root", "/sbin",
12549 "/sys", "/tmp", "/usr", "/var"]:
12550 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12551 "ROOT DIRECTORY ON YOUR SYSTEM."
12552 print >> sys.stderr, \
12553 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12555 if not os.path.exists(cachedir):
12558 ec = portage.eclass_cache.cache(portdb.porttree_root)
12559 myportdir = os.path.realpath(settings["PORTDIR"])
12560 cm = settings.load_best_module("portdbapi.metadbmodule")(
12561 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12563 from portage.cache import util
12565 class percentage_noise_maker(util.quiet_mirroring):
12566 def __init__(self, dbapi):
12568 self.cp_all = dbapi.cp_all()
12569 l = len(self.cp_all)
12570 self.call_update_min = 100000000
12571 self.min_cp_all = l/100.0
12575 def __iter__(self):
12576 for x in self.cp_all:
12578 if self.count > self.min_cp_all:
12579 self.call_update_min = 0
12581 for y in self.dbapi.cp_list(x):
12583 self.call_update_mine = 0
12585 def update(self, *arg):
12586 try: self.pstr = int(self.pstr) + 1
12587 except ValueError: self.pstr = 1
12588 sys.stdout.write("%s%i%%" % \
12589 ("\b" * (len(str(self.pstr))+1), self.pstr))
12591 self.call_update_min = 10000000
12593 def finish(self, *arg):
12594 sys.stdout.write("\b\b\b\b100%\n")
12597 if "--quiet" in myopts:
12598 def quicky_cpv_generator(cp_all_list):
12599 for x in cp_all_list:
12600 for y in portdb.cp_list(x):
12602 source = quicky_cpv_generator(portdb.cp_all())
12603 noise_maker = portage.cache.util.quiet_mirroring()
12605 noise_maker = source = percentage_noise_maker(portdb)
12606 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12607 eclass_cache=ec, verbose_instance=noise_maker)
12610 os.umask(old_umask)
12612 def action_regen(settings, portdb, max_jobs, max_load):
12613 xterm_titles = "notitles" not in settings.features
12614 emergelog(xterm_titles, " === regen")
12615 #regenerate cache entries
12616 portage.writemsg_stdout("Regenerating cache entries...\n")
12618 os.close(sys.stdin.fileno())
12619 except SystemExit, e:
12620 raise # Needed else can't exit
12625 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12628 portage.writemsg_stdout("done!\n")
12630 def action_config(settings, trees, myopts, myfiles):
12631 if len(myfiles) != 1:
12632 print red("!!! config can only take a single package atom at this time\n")
12634 if not is_valid_package_atom(myfiles[0]):
12635 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12637 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12638 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12642 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12643 except portage.exception.AmbiguousPackageName, e:
12644 # Multiple matches thrown from cpv_expand
12647 print "No packages found.\n"
12649 elif len(pkgs) > 1:
12650 if "--ask" in myopts:
12652 print "Please select a package to configure:"
12656 options.append(str(idx))
12657 print options[-1]+") "+pkg
12659 options.append("X")
12660 idx = userquery("Selection?", options)
12663 pkg = pkgs[int(idx)-1]
12665 print "The following packages available:"
12668 print "\nPlease use a specific atom or the --ask option."
12674 if "--ask" in myopts:
12675 if userquery("Ready to configure "+pkg+"?") == "No":
12678 print "Configuring pkg..."
12680 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12681 mysettings = portage.config(clone=settings)
12682 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12683 debug = mysettings.get("PORTAGE_DEBUG") == "1"
12684 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12686 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12687 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12688 if retval == os.EX_OK:
12689 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12690 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12693 def action_info(settings, trees, myopts, myfiles):
12694 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12695 settings.profile_path, settings["CHOST"],
12696 trees[settings["ROOT"]]["vartree"].dbapi)
12698 header_title = "System Settings"
12700 print header_width * "="
12701 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12702 print header_width * "="
12703 print "System uname: "+platform.platform(aliased=1)
12705 lastSync = portage.grabfile(os.path.join(
12706 settings["PORTDIR"], "metadata", "timestamp.chk"))
12707 print "Timestamp of tree:",
12713 output=commands.getstatusoutput("distcc --version")
12715 print str(output[1].split("\n",1)[0]),
12716 if "distcc" in settings.features:
12721 output=commands.getstatusoutput("ccache -V")
12723 print str(output[1].split("\n",1)[0]),
12724 if "ccache" in settings.features:
12729 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12730 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
12731 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12732 myvars = portage.util.unique_array(myvars)
12736 if portage.isvalidatom(x):
12737 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12738 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12739 pkg_matches.sort(portage.pkgcmp)
12741 for pn, ver, rev in pkg_matches:
12743 pkgs.append(ver + "-" + rev)
12747 pkgs = ", ".join(pkgs)
12748 print "%-20s %s" % (x+":", pkgs)
12750 print "%-20s %s" % (x+":", "[NOT VALID]")
12752 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12754 if "--verbose" in myopts:
12755 myvars=settings.keys()
12757 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12758 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12759 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12760 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12762 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12764 myvars = portage.util.unique_array(myvars)
12770 print '%s="%s"' % (x, settings[x])
12772 use = set(settings["USE"].split())
12773 use_expand = settings["USE_EXPAND"].split()
12775 for varname in use_expand:
12776 flag_prefix = varname.lower() + "_"
12777 for f in list(use):
12778 if f.startswith(flag_prefix):
12782 print 'USE="%s"' % " ".join(use),
12783 for varname in use_expand:
12784 myval = settings.get(varname)
12786 print '%s="%s"' % (varname, myval),
12789 unset_vars.append(x)
12791 print "Unset: "+", ".join(unset_vars)
12794 if "--debug" in myopts:
12795 for x in dir(portage):
12796 module = getattr(portage, x)
12797 if "cvs_id_string" in dir(module):
12798 print "%s: %s" % (str(x), str(module.cvs_id_string))
12800 # See if we can find any packages installed matching the strings
12801 # passed on the command line
12803 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12804 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12806 mypkgs.extend(vardb.match(x))
12808 # If some packages were found...
12810 # Get our global settings (we only print stuff if it varies from
12811 # the current config)
12812 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12813 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12815 pkgsettings = portage.config(clone=settings)
12817 for myvar in mydesiredvars:
12818 global_vals[myvar] = set(settings.get(myvar, "").split())
12820 # Loop through each package
12821 # Only print settings if they differ from global settings
12822 header_title = "Package Settings"
12823 print header_width * "="
12824 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12825 print header_width * "="
12826 from portage.output import EOutput
12829 # Get all package specific variables
12830 auxvalues = vardb.aux_get(pkg, auxkeys)
12832 for i in xrange(len(auxkeys)):
12833 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12835 for myvar in mydesiredvars:
12836 # If the package variable doesn't match the
12837 # current global variable, something has changed
12838 # so set diff_found so we know to print
12839 if valuesmap[myvar] != global_vals[myvar]:
12840 diff_values[myvar] = valuesmap[myvar]
12841 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12842 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12843 pkgsettings.reset()
12844 # If a matching ebuild is no longer available in the tree, maybe it
12845 # would make sense to compare against the flags for the best
12846 # available version with the same slot?
12848 if portdb.cpv_exists(pkg):
12850 pkgsettings.setcpv(pkg, mydb=mydb)
12851 if valuesmap["IUSE"].intersection(
12852 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12853 diff_values["USE"] = valuesmap["USE"]
12854 # If a difference was found, print the info for
12857 # Print package info
12858 print "%s was built with the following:" % pkg
12859 for myvar in mydesiredvars + ["USE"]:
12860 if myvar in diff_values:
12861 mylist = list(diff_values[myvar])
12863 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12865 print ">>> Attempting to run pkg_info() for '%s'" % pkg
12866 ebuildpath = vardb.findname(pkg)
12867 if not ebuildpath or not os.path.exists(ebuildpath):
12868 out.ewarn("No ebuild found for '%s'" % pkg)
12870 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12871 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12872 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12875 def action_search(root_config, myopts, myfiles, spinner):
12877 print "emerge: no search terms provided."
12879 searchinstance = search(root_config,
12880 spinner, "--searchdesc" in myopts,
12881 "--quiet" not in myopts, "--usepkg" in myopts,
12882 "--usepkgonly" in myopts)
12883 for mysearch in myfiles:
12885 searchinstance.execute(mysearch)
12886 except re.error, comment:
12887 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12889 searchinstance.output()
12891 def action_depclean(settings, trees, ldpath_mtimes,
12892 myopts, action, myfiles, spinner):
12893 # Kill packages that aren't explicitly merged or are required as a
12894 # dependency of another package. World file is explicit.
12896 # Global depclean or prune operations are not very safe when there are
12897 # missing dependencies since it's unknown how badly incomplete
12898 # the dependency graph is, and we might accidentally remove packages
12899 # that should have been pulled into the graph. On the other hand, it's
12900 # relatively safe to ignore missing deps when only asked to remove
12901 # specific packages.
12902 allow_missing_deps = len(myfiles) > 0
12905 msg.append("Always study the list of packages to be cleaned for any obvious\n")
12906 msg.append("mistakes. Packages that are part of the world set will always\n")
12907 msg.append("be kept. They can be manually added to this set with\n")
12908 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
12909 msg.append("package.provided (see portage(5)) will be removed by\n")
12910 msg.append("depclean, even if they are part of the world set.\n")
12912 msg.append("As a safety measure, depclean will not remove any packages\n")
12913 msg.append("unless *all* required dependencies have been resolved. As a\n")
12914 msg.append("consequence, it is often necessary to run %s\n" % \
12915 good("`emerge --update"))
12916 msg.append(good("--newuse --deep @system @world`") + \
12917 " prior to depclean.\n")
12919 if action == "depclean" and "--quiet" not in myopts and not myfiles:
12920 portage.writemsg_stdout("\n")
12922 portage.writemsg_stdout(colorize("WARN", " * ") + x)
12924 xterm_titles = "notitles" not in settings.features
12925 myroot = settings["ROOT"]
12926 root_config = trees[myroot]["root_config"]
12927 getSetAtoms = root_config.setconfig.getSetAtoms
12928 vardb = trees[myroot]["vartree"].dbapi
12930 required_set_names = ("system", "world")
12934 for s in required_set_names:
12935 required_sets[s] = InternalPackageSet(
12936 initial_atoms=getSetAtoms(s))
12939 # When removing packages, use a temporary version of world
12940 # which excludes packages that are intended to be eligible for
12942 world_temp_set = required_sets["world"]
12943 system_set = required_sets["system"]
12945 if not system_set or not world_temp_set:
12948 writemsg_level("!!! You have no system list.\n",
12949 level=logging.ERROR, noiselevel=-1)
12951 if not world_temp_set:
12952 writemsg_level("!!! You have no world file.\n",
12953 level=logging.WARNING, noiselevel=-1)
12955 writemsg_level("!!! Proceeding is likely to " + \
12956 "break your installation.\n",
12957 level=logging.WARNING, noiselevel=-1)
12958 if "--pretend" not in myopts:
12959 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12961 if action == "depclean":
12962 emergelog(xterm_titles, " >>> depclean")
12965 args_set = InternalPackageSet()
12968 if not is_valid_package_atom(x):
12969 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12970 level=logging.ERROR, noiselevel=-1)
12971 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12974 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12975 except portage.exception.AmbiguousPackageName, e:
12976 msg = "The short ebuild name \"" + x + \
12977 "\" is ambiguous. Please specify " + \
12978 "one of the following " + \
12979 "fully-qualified ebuild names instead:"
12980 for line in textwrap.wrap(msg, 70):
12981 writemsg_level("!!! %s\n" % (line,),
12982 level=logging.ERROR, noiselevel=-1)
12984 writemsg_level(" %s\n" % colorize("INFORM", i),
12985 level=logging.ERROR, noiselevel=-1)
12986 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12989 matched_packages = False
12992 matched_packages = True
12994 if not matched_packages:
12995 writemsg_level(">>> No packages selected for removal by %s\n" % \
12999 writemsg_level("\nCalculating dependencies ")
13000 resolver_params = create_depgraph_params(myopts, "remove")
13001 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13002 vardb = resolver.trees[myroot]["vartree"].dbapi
13004 if action == "depclean":
13007 # Pull in everything that's installed but not matched
13008 # by an argument atom since we don't want to clean any
13009 # package if something depends on it.
13011 world_temp_set.clear()
13016 if args_set.findAtomForPackage(pkg) is None:
13017 world_temp_set.add("=" + pkg.cpv)
13019 except portage.exception.InvalidDependString, e:
13020 show_invalid_depstring_notice(pkg,
13021 pkg.metadata["PROVIDE"], str(e))
13023 world_temp_set.add("=" + pkg.cpv)
13026 elif action == "prune":
13028 # Pull in everything that's installed since we don't
13029 # to prune a package if something depends on it.
13030 world_temp_set.clear()
13031 world_temp_set.update(vardb.cp_all())
13035 # Try to prune everything that's slotted.
13036 for cp in vardb.cp_all():
13037 if len(vardb.cp_list(cp)) > 1:
13040 # Remove atoms from world that match installed packages
13041 # that are also matched by argument atoms, but do not remove
13042 # them if they match the highest installed version.
13045 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13046 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13047 raise AssertionError("package expected in matches: " + \
13048 "cp = %s, cpv = %s matches = %s" % \
13049 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13051 highest_version = pkgs_for_cp[-1]
13052 if pkg == highest_version:
13053 # pkg is the highest version
13054 world_temp_set.add("=" + pkg.cpv)
13057 if len(pkgs_for_cp) <= 1:
13058 raise AssertionError("more packages expected: " + \
13059 "cp = %s, cpv = %s matches = %s" % \
13060 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13063 if args_set.findAtomForPackage(pkg) is None:
13064 world_temp_set.add("=" + pkg.cpv)
13066 except portage.exception.InvalidDependString, e:
13067 show_invalid_depstring_notice(pkg,
13068 pkg.metadata["PROVIDE"], str(e))
13070 world_temp_set.add("=" + pkg.cpv)
13074 for s, package_set in required_sets.iteritems():
13075 set_atom = SETPREFIX + s
13076 set_arg = SetArg(arg=set_atom, set=package_set,
13077 root_config=resolver.roots[myroot])
13078 set_args[s] = set_arg
13079 for atom in set_arg.set:
13080 resolver._dep_stack.append(
13081 Dependency(atom=atom, root=myroot, parent=set_arg))
13082 resolver.digraph.add(set_arg, None)
13084 success = resolver._complete_graph()
13085 writemsg_level("\b\b... done!\n")
13087 resolver.display_problems()
13092 def unresolved_deps():
13094 unresolvable = set()
13095 for dep in resolver._initially_unsatisfied_deps:
13096 if isinstance(dep.parent, Package) and \
13097 (dep.priority > UnmergeDepPriority.SOFT):
13098 unresolvable.add((dep.atom, dep.parent.cpv))
13100 if not unresolvable:
13103 if unresolvable and not allow_missing_deps:
13104 prefix = bad(" * ")
13106 msg.append("Dependencies could not be completely resolved due to")
13107 msg.append("the following required packages not being installed:")
13109 for atom, parent in unresolvable:
13110 msg.append(" %s pulled in by:" % (atom,))
13111 msg.append(" %s" % (parent,))
13113 msg.append("Have you forgotten to run " + \
13114 good("`emerge --update --newuse --deep @system @world`") + " prior")
13115 msg.append(("to %s? It may be necessary to manually " + \
13116 "uninstall packages that no longer") % action)
13117 msg.append("exist in the portage tree since " + \
13118 "it may not be possible to satisfy their")
13119 msg.append("dependencies. Also, be aware of " + \
13120 "the --with-bdeps option that is documented")
13121 msg.append("in " + good("`man emerge`") + ".")
13122 if action == "prune":
13124 msg.append("If you would like to ignore " + \
13125 "dependencies then use %s." % good("--nodeps"))
13126 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13127 level=logging.ERROR, noiselevel=-1)
13131 if unresolved_deps():
13134 graph = resolver.digraph.copy()
13135 required_pkgs_total = 0
13137 if isinstance(node, Package):
13138 required_pkgs_total += 1
13140 def show_parents(child_node):
13141 parent_nodes = graph.parent_nodes(child_node)
13142 if not parent_nodes:
13143 # With --prune, the highest version can be pulled in without any
13144 # real parent since all installed packages are pulled in. In that
13145 # case there's nothing to show here.
13148 for node in parent_nodes:
13149 parent_strs.append(str(getattr(node, "cpv", node)))
13152 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13153 for parent_str in parent_strs:
13154 msg.append(" %s\n" % (parent_str,))
13156 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13158 def create_cleanlist():
13159 pkgs_to_remove = []
13161 if action == "depclean":
13167 arg_atom = args_set.findAtomForPackage(pkg)
13168 except portage.exception.InvalidDependString:
13169 # this error has already been displayed by now
13173 if pkg not in graph:
13174 pkgs_to_remove.append(pkg)
13175 elif "--verbose" in myopts:
13180 if pkg not in graph:
13181 pkgs_to_remove.append(pkg)
13182 elif "--verbose" in myopts:
13185 elif action == "prune":
13186 # Prune really uses all installed instead of world. It's not
13187 # a real reverse dependency so don't display it as such.
13188 graph.remove(set_args["world"])
13190 for atom in args_set:
13191 for pkg in vardb.match_pkgs(atom):
13192 if pkg not in graph:
13193 pkgs_to_remove.append(pkg)
13194 elif "--verbose" in myopts:
13197 if not pkgs_to_remove:
13199 ">>> No packages selected for removal by %s\n" % action)
13200 if "--verbose" not in myopts:
13202 ">>> To see reverse dependencies, use %s\n" % \
13204 if action == "prune":
13206 ">>> To ignore dependencies, use %s\n" % \
13209 return pkgs_to_remove
13211 cleanlist = create_cleanlist()
13214 clean_set = set(cleanlist)
13216 # Check if any of these package are the sole providers of libraries
13217 # with consumers that have not been selected for removal. If so, these
13218 # packages and any dependencies need to be added to the graph.
13219 real_vardb = trees[myroot]["vartree"].dbapi
13220 linkmap = real_vardb.linkmap
13221 liblist = linkmap.listLibraryObjects()
13222 consumer_cache = {}
13223 provider_cache = {}
13227 writemsg_level(">>> Checking for lib consumers...\n")
13229 for pkg in cleanlist:
13230 pkg_dblink = real_vardb._dblink(pkg.cpv)
13231 provided_libs = set()
13233 for lib in liblist:
13234 if pkg_dblink.isowner(lib, myroot):
13235 provided_libs.add(lib)
13237 if not provided_libs:
13241 for lib in provided_libs:
13242 lib_consumers = consumer_cache.get(lib)
13243 if lib_consumers is None:
13244 lib_consumers = linkmap.findConsumers(lib)
13245 consumer_cache[lib] = lib_consumers
13247 consumers[lib] = lib_consumers
13252 for lib, lib_consumers in consumers.items():
13253 for consumer_file in list(lib_consumers):
13254 if pkg_dblink.isowner(consumer_file, myroot):
13255 lib_consumers.remove(consumer_file)
13256 if not lib_consumers:
13262 for lib, lib_consumers in consumers.iteritems():
13264 soname = soname_cache.get(lib)
13266 soname = linkmap.getSoname(lib)
13267 soname_cache[lib] = soname
13269 consumer_providers = []
13270 for lib_consumer in lib_consumers:
13271 providers = provider_cache.get(lib)
13272 if providers is None:
13273 providers = linkmap.findProviders(lib_consumer)
13274 provider_cache[lib_consumer] = providers
13275 if soname not in providers:
13276 # Why does this happen?
13278 consumer_providers.append(
13279 (lib_consumer, providers[soname]))
13281 consumers[lib] = consumer_providers
13283 consumer_map[pkg] = consumers
13287 search_files = set()
13288 for consumers in consumer_map.itervalues():
13289 for lib, consumer_providers in consumers.iteritems():
13290 for lib_consumer, providers in consumer_providers:
13291 search_files.add(lib_consumer)
13292 search_files.update(providers)
13294 writemsg_level(">>> Assigning files to packages...\n")
13295 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13297 for pkg, consumers in consumer_map.items():
13298 for lib, consumer_providers in consumers.items():
13299 lib_consumers = set()
13301 for lib_consumer, providers in consumer_providers:
13302 owner_set = file_owners.get(lib_consumer)
13303 provider_dblinks = set()
13304 provider_pkgs = set()
13306 if len(providers) > 1:
13307 for provider in providers:
13308 provider_set = file_owners.get(provider)
13309 if provider_set is not None:
13310 provider_dblinks.update(provider_set)
13312 if len(provider_dblinks) > 1:
13313 for provider_dblink in provider_dblinks:
13314 pkg_key = ("installed", myroot,
13315 provider_dblink.mycpv, "nomerge")
13316 if pkg_key not in clean_set:
13317 provider_pkgs.add(vardb.get(pkg_key))
13322 if owner_set is not None:
13323 lib_consumers.update(owner_set)
13325 for consumer_dblink in list(lib_consumers):
13326 if ("installed", myroot, consumer_dblink.mycpv,
13327 "nomerge") in clean_set:
13328 lib_consumers.remove(consumer_dblink)
13332 consumers[lib] = lib_consumers
13336 del consumer_map[pkg]
13339 # TODO: Implement a package set for rebuilding consumer packages.
13341 msg = "In order to avoid breakage of link level " + \
13342 "dependencies, one or more packages will not be removed. " + \
13343 "This can be solved by rebuilding " + \
13344 "the packages that pulled them in."
13346 prefix = bad(" * ")
13347 from textwrap import wrap
13348 writemsg_level("".join(prefix + "%s\n" % line for \
13349 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13352 for pkg, consumers in consumer_map.iteritems():
13353 unique_consumers = set(chain(*consumers.values()))
13354 unique_consumers = sorted(consumer.mycpv \
13355 for consumer in unique_consumers)
13357 msg.append(" %s pulled in by:" % (pkg.cpv,))
13358 for consumer in unique_consumers:
13359 msg.append(" %s" % (consumer,))
13361 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13362 level=logging.WARNING, noiselevel=-1)
13364 # Add lib providers to the graph as children of lib consumers,
13365 # and also add any dependencies pulled in by the provider.
13366 writemsg_level(">>> Adding lib providers to graph...\n")
13368 for pkg, consumers in consumer_map.iteritems():
13369 for consumer_dblink in set(chain(*consumers.values())):
13370 consumer_pkg = vardb.get(("installed", myroot,
13371 consumer_dblink.mycpv, "nomerge"))
13372 if not resolver._add_pkg(pkg,
13373 Dependency(parent=consumer_pkg,
13374 priority=UnmergeDepPriority(runtime=True),
13376 resolver.display_problems()
13379 writemsg_level("\nCalculating dependencies ")
13380 success = resolver._complete_graph()
13381 writemsg_level("\b\b... done!\n")
13382 resolver.display_problems()
13385 if unresolved_deps():
13388 graph = resolver.digraph.copy()
13389 required_pkgs_total = 0
13391 if isinstance(node, Package):
13392 required_pkgs_total += 1
13393 cleanlist = create_cleanlist()
13396 clean_set = set(cleanlist)
13398 # Use a topological sort to create an unmerge order such that
13399 # each package is unmerged before it's dependencies. This is
13400 # necessary to avoid breaking things that may need to run
13401 # during pkg_prerm or pkg_postrm phases.
13403 # Create a new graph to account for dependencies between the
13404 # packages being unmerged.
13408 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13409 runtime = UnmergeDepPriority(runtime=True)
13410 runtime_post = UnmergeDepPriority(runtime_post=True)
13411 buildtime = UnmergeDepPriority(buildtime=True)
13413 "RDEPEND": runtime,
13414 "PDEPEND": runtime_post,
13415 "DEPEND": buildtime,
13418 for node in clean_set:
13419 graph.add(node, None)
13421 node_use = node.metadata["USE"].split()
13422 for dep_type in dep_keys:
13423 depstr = node.metadata[dep_type]
13427 portage.dep._dep_check_strict = False
13428 success, atoms = portage.dep_check(depstr, None, settings,
13429 myuse=node_use, trees=resolver._graph_trees,
13432 portage.dep._dep_check_strict = True
13434 # Ignore invalid deps of packages that will
13435 # be uninstalled anyway.
13438 priority = priority_map[dep_type]
13440 if not isinstance(atom, portage.dep.Atom):
13441 # Ignore invalid atoms returned from dep_check().
13445 matches = vardb.match_pkgs(atom)
13448 for child_node in matches:
13449 if child_node in clean_set:
13450 graph.add(child_node, node, priority=priority)
13453 if len(graph.order) == len(graph.root_nodes()):
13454 # If there are no dependencies between packages
13455 # let unmerge() group them by cat/pn.
13457 cleanlist = [pkg.cpv for pkg in graph.order]
13459 # Order nodes from lowest to highest overall reference count for
13460 # optimal root node selection.
13461 node_refcounts = {}
13462 for node in graph.order:
13463 node_refcounts[node] = len(graph.parent_nodes(node))
13464 def cmp_reference_count(node1, node2):
13465 return node_refcounts[node1] - node_refcounts[node2]
13466 graph.order.sort(cmp_reference_count)
13468 ignore_priority_range = [None]
13469 ignore_priority_range.extend(
13470 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13471 while not graph.empty():
13472 for ignore_priority in ignore_priority_range:
13473 nodes = graph.root_nodes(ignore_priority=ignore_priority)
13477 raise AssertionError("no root nodes")
13478 if ignore_priority is not None:
13479 # Some deps have been dropped due to circular dependencies,
13480 # so only pop one node in order do minimize the number that
13485 cleanlist.append(node.cpv)
13487 unmerge(root_config, myopts, "unmerge", cleanlist,
13488 ldpath_mtimes, ordered=ordered)
13490 if action == "prune":
13493 if not cleanlist and "--quiet" in myopts:
13496 print "Packages installed: "+str(len(vardb.cpv_all()))
13497 print "Packages in world: " + \
13498 str(len(root_config.sets["world"].getAtoms()))
13499 print "Packages in system: " + \
13500 str(len(root_config.sets["system"].getAtoms()))
13501 print "Required packages: "+str(required_pkgs_total)
13502 if "--pretend" in myopts:
13503 print "Number to remove: "+str(len(cleanlist))
13505 print "Number removed: "+str(len(cleanlist))
13507 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13508 skip_masked=False, skip_unsatisfied=False):
13510 Construct a depgraph for the given resume list. This will raise
13511 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13513 @returns: (success, depgraph, dropped_tasks)
13515 mergelist = mtimedb["resume"]["mergelist"]
13516 dropped_tasks = set()
13518 mydepgraph = depgraph(settings, trees,
13519 myopts, myparams, spinner)
13521 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13522 skip_masked=skip_masked)
13523 except depgraph.UnsatisfiedResumeDep, e:
13524 if not skip_unsatisfied:
13527 graph = mydepgraph.digraph
13528 unsatisfied_parents = dict((dep.parent, dep.parent) \
13529 for dep in e.value)
13530 traversed_nodes = set()
13531 unsatisfied_stack = list(unsatisfied_parents)
13532 while unsatisfied_stack:
13533 pkg = unsatisfied_stack.pop()
13534 if pkg in traversed_nodes:
13536 traversed_nodes.add(pkg)
13538 # If this package was pulled in by a parent
13539 # package scheduled for merge, removing this
13540 # package may cause the the parent package's
13541 # dependency to become unsatisfied.
13542 for parent_node in graph.parent_nodes(pkg):
13543 if not isinstance(parent_node, Package) \
13544 or parent_node.operation not in ("merge", "nomerge"):
13547 graph.child_nodes(parent_node,
13548 ignore_priority=DepPriority.SOFT)
13549 if pkg in unsatisfied:
13550 unsatisfied_parents[parent_node] = parent_node
13551 unsatisfied_stack.append(parent_node)
13553 pruned_mergelist = [x for x in mergelist \
13554 if isinstance(x, list) and \
13555 tuple(x) not in unsatisfied_parents]
13557 # If the mergelist doesn't shrink then this loop is infinite.
13558 if len(pruned_mergelist) == len(mergelist):
13559 # This happens if a package can't be dropped because
13560 # it's already installed, but it has unsatisfied PDEPEND.
13562 mergelist[:] = pruned_mergelist
13564 # Exclude installed packages that have been removed from the graph due
13565 # to failure to build/install runtime dependencies after the dependent
13566 # package has already been installed.
13567 dropped_tasks.update(pkg for pkg in \
13568 unsatisfied_parents if pkg.operation != "nomerge")
13569 mydepgraph.break_refs(unsatisfied_parents)
13571 del e, graph, traversed_nodes, \
13572 unsatisfied_parents, unsatisfied_stack
13576 return (success, mydepgraph, dropped_tasks)
13578 def action_build(settings, trees, mtimedb,
13579 myopts, myaction, myfiles, spinner):
13581 # validate the state of the resume data
13582 # so that we can make assumptions later.
13583 for k in ("resume", "resume_backup"):
13584 if k not in mtimedb:
13586 resume_data = mtimedb[k]
13587 if not isinstance(resume_data, dict):
13590 mergelist = resume_data.get("mergelist")
13591 if not isinstance(mergelist, list):
13594 for x in mergelist:
13595 if not (isinstance(x, list) and len(x) == 4):
13597 pkg_type, pkg_root, pkg_key, pkg_action = x
13598 if pkg_root not in trees:
13599 # Current $ROOT setting differs,
13600 # so the list must be stale.
13606 resume_opts = resume_data.get("myopts")
13607 if not isinstance(resume_opts, (dict, list)):
13610 favorites = resume_data.get("favorites")
13611 if not isinstance(favorites, list):
13616 if "--resume" in myopts and \
13617 ("resume" in mtimedb or
13618 "resume_backup" in mtimedb):
13620 if "resume" not in mtimedb:
13621 mtimedb["resume"] = mtimedb["resume_backup"]
13622 del mtimedb["resume_backup"]
13624 # "myopts" is a list for backward compatibility.
13625 resume_opts = mtimedb["resume"].get("myopts", [])
13626 if isinstance(resume_opts, list):
13627 resume_opts = dict((k,True) for k in resume_opts)
13628 for opt in ("--skipfirst", "--ask", "--tree"):
13629 resume_opts.pop(opt, None)
13630 myopts.update(resume_opts)
13632 if "--debug" in myopts:
13633 writemsg_level("myopts %s\n" % (myopts,))
13635 # Adjust config according to options of the command being resumed.
13636 for myroot in trees:
13637 mysettings = trees[myroot]["vartree"].settings
13638 mysettings.unlock()
13639 adjust_config(myopts, mysettings)
13641 del myroot, mysettings
13643 ldpath_mtimes = mtimedb["ldpath"]
13646 buildpkgonly = "--buildpkgonly" in myopts
13647 pretend = "--pretend" in myopts
13648 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13649 ask = "--ask" in myopts
13650 nodeps = "--nodeps" in myopts
13651 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13652 tree = "--tree" in myopts
13653 if nodeps and tree:
13655 del myopts["--tree"]
13656 portage.writemsg(colorize("WARN", " * ") + \
13657 "--tree is broken with --nodeps. Disabling...\n")
13658 debug = "--debug" in myopts
13659 verbose = "--verbose" in myopts
13660 quiet = "--quiet" in myopts
13661 if pretend or fetchonly:
13662 # make the mtimedb readonly
13663 mtimedb.filename = None
13664 if "--digest" in myopts:
13665 msg = "The --digest option can prevent corruption from being" + \
13666 " noticed. The `repoman manifest` command is the preferred" + \
13667 " way to generate manifests and it is capable of doing an" + \
13668 " entire repository or category at once."
13669 prefix = bad(" * ")
13670 writemsg(prefix + "\n")
13671 from textwrap import wrap
13672 for line in wrap(msg, 72):
13673 writemsg("%s%s\n" % (prefix, line))
13674 writemsg(prefix + "\n")
13676 if "--quiet" not in myopts and \
13677 ("--pretend" in myopts or "--ask" in myopts or \
13678 "--tree" in myopts or "--verbose" in myopts):
13680 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13682 elif "--buildpkgonly" in myopts:
13686 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13688 print darkgreen("These are the packages that would be %s, in reverse order:") % action
13692 print darkgreen("These are the packages that would be %s, in order:") % action
13695 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13696 if not show_spinner:
13697 spinner.update = spinner.update_quiet
13700 favorites = mtimedb["resume"].get("favorites")
13701 if not isinstance(favorites, list):
13705 print "Calculating dependencies ",
13706 myparams = create_depgraph_params(myopts, myaction)
13708 resume_data = mtimedb["resume"]
13709 mergelist = resume_data["mergelist"]
13710 if mergelist and "--skipfirst" in myopts:
13711 for i, task in enumerate(mergelist):
13712 if isinstance(task, list) and \
13713 task and task[-1] == "merge":
13717 skip_masked = "--skipfirst" in myopts
13718 skip_unsatisfied = "--skipfirst" in myopts
13722 success, mydepgraph, dropped_tasks = resume_depgraph(
13723 settings, trees, mtimedb, myopts, myparams, spinner,
13724 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13725 except (portage.exception.PackageNotFound,
13726 depgraph.UnsatisfiedResumeDep), e:
13727 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13728 mydepgraph = e.depgraph
13731 from textwrap import wrap
13732 from portage.output import EOutput
13735 resume_data = mtimedb["resume"]
13736 mergelist = resume_data.get("mergelist")
13737 if not isinstance(mergelist, list):
13739 if mergelist and debug or (verbose and not quiet):
13740 out.eerror("Invalid resume list:")
13743 for task in mergelist:
13744 if isinstance(task, list):
13745 out.eerror(indent + str(tuple(task)))
13748 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13749 out.eerror("One or more packages are either masked or " + \
13750 "have missing dependencies:")
13753 for dep in e.value:
13754 if dep.atom is None:
13755 out.eerror(indent + "Masked package:")
13756 out.eerror(2 * indent + str(dep.parent))
13759 out.eerror(indent + str(dep.atom) + " pulled in by:")
13760 out.eerror(2 * indent + str(dep.parent))
13762 msg = "The resume list contains packages " + \
13763 "that are either masked or have " + \
13764 "unsatisfied dependencies. " + \
13765 "Please restart/continue " + \
13766 "the operation manually, or use --skipfirst " + \
13767 "to skip the first package in the list and " + \
13768 "any other packages that may be " + \
13769 "masked or have missing dependencies."
13770 for line in wrap(msg, 72):
13772 elif isinstance(e, portage.exception.PackageNotFound):
13773 out.eerror("An expected package is " + \
13774 "not available: %s" % str(e))
13776 msg = "The resume list contains one or more " + \
13777 "packages that are no longer " + \
13778 "available. Please restart/continue " + \
13779 "the operation manually."
13780 for line in wrap(msg, 72):
13784 print "\b\b... done!"
13788 portage.writemsg("!!! One or more packages have been " + \
13789 "dropped due to\n" + \
13790 "!!! masking or unsatisfied dependencies:\n\n",
13792 for task in dropped_tasks:
13793 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
13794 portage.writemsg("\n", noiselevel=-1)
13797 if mydepgraph is not None:
13798 mydepgraph.display_problems()
13799 if not (ask or pretend):
13800 # delete the current list and also the backup
13801 # since it's probably stale too.
13802 for k in ("resume", "resume_backup"):
13803 mtimedb.pop(k, None)
13808 if ("--resume" in myopts):
13809 print darkgreen("emerge: It seems we have nothing to resume...")
13812 myparams = create_depgraph_params(myopts, myaction)
13813 if "--quiet" not in myopts and "--nodeps" not in myopts:
13814 print "Calculating dependencies ",
13816 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13818 retval, favorites = mydepgraph.select_files(myfiles)
13819 except portage.exception.PackageNotFound, e:
13820 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13822 except portage.exception.PackageSetNotFound, e:
13823 root_config = trees[settings["ROOT"]]["root_config"]
13824 display_missing_pkg_set(root_config, e.value)
13827 print "\b\b... done!"
13829 mydepgraph.display_problems()
13832 if "--pretend" not in myopts and \
13833 ("--ask" in myopts or "--tree" in myopts or \
13834 "--verbose" in myopts) and \
13835 not ("--quiet" in myopts and "--ask" not in myopts):
13836 if "--resume" in myopts:
13837 mymergelist = mydepgraph.altlist()
13838 if len(mymergelist) == 0:
13839 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13841 favorites = mtimedb["resume"]["favorites"]
13842 retval = mydepgraph.display(
13843 mydepgraph.altlist(reversed=tree),
13844 favorites=favorites)
13845 mydepgraph.display_problems()
13846 if retval != os.EX_OK:
13848 prompt="Would you like to resume merging these packages?"
13850 retval = mydepgraph.display(
13851 mydepgraph.altlist(reversed=("--tree" in myopts)),
13852 favorites=favorites)
13853 mydepgraph.display_problems()
13854 if retval != os.EX_OK:
13857 for x in mydepgraph.altlist():
13858 if isinstance(x, Package) and x.operation == "merge":
13862 sets = trees[settings["ROOT"]]["root_config"].sets
13863 world_candidates = None
13864 if "--noreplace" in myopts and \
13865 not oneshot and favorites:
13866 # Sets that are not world candidates are filtered
13867 # out here since the favorites list needs to be
13868 # complete for depgraph.loadResumeCommand() to
13869 # operate correctly.
13870 world_candidates = [x for x in favorites \
13871 if not (x.startswith(SETPREFIX) and \
13872 not sets[x[1:]].world_candidate)]
13873 if "--noreplace" in myopts and \
13874 not oneshot and world_candidates:
13876 for x in world_candidates:
13877 print " %s %s" % (good("*"), x)
13878 prompt="Would you like to add these packages to your world favorites?"
13879 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13880 prompt="Nothing to merge; would you like to auto-clean packages?"
13883 print "Nothing to merge; quitting."
13886 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13887 prompt="Would you like to fetch the source files for these packages?"
13889 prompt="Would you like to merge these packages?"
13891 if "--ask" in myopts and userquery(prompt) == "No":
13896 # Don't ask again (e.g. when auto-cleaning packages after merge)
13897 myopts.pop("--ask", None)
13899 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13900 if ("--resume" in myopts):
13901 mymergelist = mydepgraph.altlist()
13902 if len(mymergelist) == 0:
13903 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13905 favorites = mtimedb["resume"]["favorites"]
13906 retval = mydepgraph.display(
13907 mydepgraph.altlist(reversed=tree),
13908 favorites=favorites)
13909 mydepgraph.display_problems()
13910 if retval != os.EX_OK:
13913 retval = mydepgraph.display(
13914 mydepgraph.altlist(reversed=("--tree" in myopts)),
13915 favorites=favorites)
13916 mydepgraph.display_problems()
13917 if retval != os.EX_OK:
13919 if "--buildpkgonly" in myopts:
13920 graph_copy = mydepgraph.digraph.clone()
13921 for node in list(graph_copy.order):
13922 if not isinstance(node, Package):
13923 graph_copy.remove(node)
13924 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13925 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13926 print "!!! You have to merge the dependencies before you can build this package.\n"
13929 if "--buildpkgonly" in myopts:
13930 graph_copy = mydepgraph.digraph.clone()
13931 for node in list(graph_copy.order):
13932 if not isinstance(node, Package):
13933 graph_copy.remove(node)
13934 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13935 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13936 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13939 if ("--resume" in myopts):
13940 favorites=mtimedb["resume"]["favorites"]
13941 mymergelist = mydepgraph.altlist()
13942 mydepgraph.break_refs(mymergelist)
13943 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13944 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13945 del mydepgraph, mymergelist
13946 clear_caches(trees)
13948 retval = mergetask.merge()
13949 merge_count = mergetask.curval
13951 if "resume" in mtimedb and \
13952 "mergelist" in mtimedb["resume"] and \
13953 len(mtimedb["resume"]["mergelist"]) > 1:
13954 mtimedb["resume_backup"] = mtimedb["resume"]
13955 del mtimedb["resume"]
13957 mtimedb["resume"]={}
13958 # Stored as a dict starting with portage-2.1.6_rc1, and supported
13959 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13960 # a list type for options.
13961 mtimedb["resume"]["myopts"] = myopts.copy()
13963 # Convert Atom instances to plain str since the mtimedb loader
13964 # sets unpickler.find_global = None which causes unpickler.load()
13965 # to raise the following exception:
13967 # cPickle.UnpicklingError: Global and instance pickles are not supported.
13969 # TODO: Maybe stop setting find_global = None, or find some other
13970 # way to avoid accidental triggering of the above UnpicklingError.
13971 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13973 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13974 for pkgline in mydepgraph.altlist():
13975 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13976 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13977 tmpsettings = portage.config(clone=settings)
13979 if settings.get("PORTAGE_DEBUG", "") == "1":
13981 retval = portage.doebuild(
13982 y, "digest", settings["ROOT"], tmpsettings, edebug,
13983 ("--pretend" in myopts),
13984 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13987 pkglist = mydepgraph.altlist()
13988 mydepgraph.saveNomergeFavorites()
13989 mydepgraph.break_refs(pkglist)
13990 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13991 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13992 del mydepgraph, pkglist
13993 clear_caches(trees)
13995 retval = mergetask.merge()
13996 merge_count = mergetask.curval
13998 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13999 if "yes" == settings.get("AUTOCLEAN"):
14000 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14001 unmerge(trees[settings["ROOT"]]["root_config"],
14002 myopts, "clean", [],
14003 ldpath_mtimes, autoclean=1)
14005 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14006 + " AUTOCLEAN is disabled. This can cause serious"
14007 + " problems due to overlapping packages.\n")
14008 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14012 def multiple_actions(action1, action2):
14013 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14014 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14017 def insert_optional_args(args):
14019 Parse optional arguments and insert a value if one has
14020 not been provided. This is done before feeding the args
14021 to the optparse parser since that parser does not support
14022 this feature natively.
14026 jobs_opts = ("-j", "--jobs")
14027 arg_stack = args[:]
14028 arg_stack.reverse()
14030 arg = arg_stack.pop()
14032 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14033 if not (short_job_opt or arg in jobs_opts):
14034 new_args.append(arg)
14037 # Insert an empty placeholder in order to
14038 # satisfy the requirements of optparse.
14040 new_args.append("--jobs")
14043 if short_job_opt and len(arg) > 2:
14044 if arg[:2] == "-j":
14046 job_count = int(arg[2:])
14048 saved_opts = arg[2:]
14051 saved_opts = arg[1:].replace("j", "")
14053 if job_count is None and arg_stack:
14055 job_count = int(arg_stack[-1])
14059 # Discard the job count from the stack
14060 # since we're consuming it here.
14063 if job_count is None:
14064 # unlimited number of jobs
14065 new_args.append("True")
14067 new_args.append(str(job_count))
14069 if saved_opts is not None:
14070 new_args.append("-" + saved_opts)
14074 def parse_opts(tmpcmdline, silent=False):
14079 global actions, options, shortmapping
14081 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14082 argument_options = {
14084 "help":"specify the location for portage configuration files",
14088 "help":"enable or disable color output",
14090 "choices":("y", "n")
14095 "help" : "Specifies the number of packages to build " + \
14101 "--load-average": {
14103 "help" :"Specifies that no new builds should be started " + \
14104 "if there are other builds running and the load average " + \
14105 "is at least LOAD (a floating-point number).",
14111 "help":"include unnecessary build time dependencies",
14113 "choices":("y", "n")
14116 "help":"specify conditions to trigger package reinstallation",
14118 "choices":["changed-use"]
14122 from optparse import OptionParser
14123 parser = OptionParser()
14124 if parser.has_option("--help"):
14125 parser.remove_option("--help")
14127 for action_opt in actions:
14128 parser.add_option("--" + action_opt, action="store_true",
14129 dest=action_opt.replace("-", "_"), default=False)
14130 for myopt in options:
14131 parser.add_option(myopt, action="store_true",
14132 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14133 for shortopt, longopt in shortmapping.iteritems():
14134 parser.add_option("-" + shortopt, action="store_true",
14135 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14136 for myalias, myopt in longopt_aliases.iteritems():
14137 parser.add_option(myalias, action="store_true",
14138 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14140 for myopt, kwargs in argument_options.iteritems():
14141 parser.add_option(myopt,
14142 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14144 tmpcmdline = insert_optional_args(tmpcmdline)
14146 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14150 if myoptions.jobs == "True":
14154 jobs = int(myoptions.jobs)
14158 if jobs is not True and \
14162 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14163 (myoptions.jobs,), noiselevel=-1)
14165 myoptions.jobs = jobs
14167 if myoptions.load_average:
14169 load_average = float(myoptions.load_average)
14173 if load_average <= 0.0:
14174 load_average = None
14176 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14177 (myoptions.load_average,), noiselevel=-1)
14179 myoptions.load_average = load_average
14181 for myopt in options:
14182 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14184 myopts[myopt] = True
14186 for myopt in argument_options:
14187 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14191 for action_opt in actions:
14192 v = getattr(myoptions, action_opt.replace("-", "_"))
14195 multiple_actions(myaction, action_opt)
14197 myaction = action_opt
14201 return myaction, myopts, myfiles
14203 def validate_ebuild_environment(trees):
14204 for myroot in trees:
14205 settings = trees[myroot]["vartree"].settings
14206 settings.validate()
14208 def clear_caches(trees):
14209 for d in trees.itervalues():
14210 d["porttree"].dbapi.melt()
14211 d["porttree"].dbapi._aux_cache.clear()
14212 d["bintree"].dbapi._aux_cache.clear()
14213 d["bintree"].dbapi._clear_cache()
14214 d["vartree"].dbapi.linkmap._clear_cache()
14215 portage.dircache.clear()
14218 def load_emerge_config(trees=None):
14220 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14221 v = os.environ.get(envvar, None)
14222 if v and v.strip():
14224 trees = portage.create_trees(trees=trees, **kwargs)
14226 for root, root_trees in trees.iteritems():
14227 settings = root_trees["vartree"].settings
14228 setconfig = load_default_config(settings, root_trees)
14229 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14231 settings = trees["/"]["vartree"].settings
14233 for myroot in trees:
14235 settings = trees[myroot]["vartree"].settings
14238 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14239 mtimedb = portage.MtimeDB(mtimedbfile)
14241 return settings, trees, mtimedb
14243 def adjust_config(myopts, settings):
14244 """Make emerge specific adjustments to the config."""
14246 # To enhance usability, make some vars case insensitive by forcing them to
14248 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14249 if myvar in settings:
14250 settings[myvar] = settings[myvar].lower()
14251 settings.backup_changes(myvar)
14254 # Kill noauto as it will break merges otherwise.
14255 if "noauto" in settings.features:
14256 while "noauto" in settings.features:
14257 settings.features.remove("noauto")
14258 settings["FEATURES"] = " ".join(settings.features)
14259 settings.backup_changes("FEATURES")
14263 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14264 except ValueError, e:
14265 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14266 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14267 settings["CLEAN_DELAY"], noiselevel=-1)
14268 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14269 settings.backup_changes("CLEAN_DELAY")
14271 EMERGE_WARNING_DELAY = 10
14273 EMERGE_WARNING_DELAY = int(settings.get(
14274 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14275 except ValueError, e:
14276 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14277 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14278 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14279 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14280 settings.backup_changes("EMERGE_WARNING_DELAY")
14282 if "--quiet" in myopts:
14283 settings["PORTAGE_QUIET"]="1"
14284 settings.backup_changes("PORTAGE_QUIET")
14286 if "--verbose" in myopts:
14287 settings["PORTAGE_VERBOSE"] = "1"
14288 settings.backup_changes("PORTAGE_VERBOSE")
14290 # Set so that configs will be merged regardless of remembered status
14291 if ("--noconfmem" in myopts):
14292 settings["NOCONFMEM"]="1"
14293 settings.backup_changes("NOCONFMEM")
14295 # Set various debug markers... They should be merged somehow.
14298 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14299 if PORTAGE_DEBUG not in (0, 1):
14300 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14301 PORTAGE_DEBUG, noiselevel=-1)
14302 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14305 except ValueError, e:
14306 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14307 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14308 settings["PORTAGE_DEBUG"], noiselevel=-1)
14310 if "--debug" in myopts:
14312 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14313 settings.backup_changes("PORTAGE_DEBUG")
14315 if settings.get("NOCOLOR") not in ("yes","true"):
14316 portage.output.havecolor = 1
14318 """The explicit --color < y | n > option overrides the NOCOLOR environment
14319 variable and stdout auto-detection."""
14320 if "--color" in myopts:
14321 if "y" == myopts["--color"]:
14322 portage.output.havecolor = 1
14323 settings["NOCOLOR"] = "false"
14325 portage.output.havecolor = 0
14326 settings["NOCOLOR"] = "true"
14327 settings.backup_changes("NOCOLOR")
14328 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14329 portage.output.havecolor = 0
14330 settings["NOCOLOR"] = "true"
14331 settings.backup_changes("NOCOLOR")
14333 def apply_priorities(settings):
14337 def nice(settings):
14339 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14340 except (OSError, ValueError), e:
14341 out = portage.output.EOutput()
14342 out.eerror("Failed to change nice value to '%s'" % \
14343 settings["PORTAGE_NICENESS"])
14344 out.eerror("%s\n" % str(e))
14346 def ionice(settings):
14348 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14350 ionice_cmd = shlex.split(ionice_cmd)
14354 from portage.util import varexpand
14355 variables = {"PID" : str(os.getpid())}
14356 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14359 rval = portage.process.spawn(cmd, env=os.environ)
14360 except portage.exception.CommandNotFound:
14361 # The OS kernel probably doesn't support ionice,
14362 # so return silently.
14365 if rval != os.EX_OK:
14366 out = portage.output.EOutput()
14367 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14368 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14370 def display_missing_pkg_set(root_config, set_name):
14373 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14374 "The following sets exist:") % \
14375 colorize("INFORM", set_name))
14378 for s in sorted(root_config.sets):
14379 msg.append(" %s" % s)
14382 writemsg_level("".join("%s\n" % l for l in msg),
14383 level=logging.ERROR, noiselevel=-1)
14385 def expand_set_arguments(myfiles, myaction, root_config):
14387 setconfig = root_config.setconfig
14389 sets = setconfig.getSets()
14391 # In order to know exactly which atoms/sets should be added to the
14392 # world file, the depgraph performs set expansion later. It will get
14393 # confused about where the atoms came from if it's not allowed to
14394 # expand them itself.
14395 do_not_expand = (None, )
14398 if a in ("system", "world"):
14399 newargs.append(SETPREFIX+a)
14406 # separators for set arguments
14410 # WARNING: all operators must be of equal length
14412 DIFF_OPERATOR = "-@"
14413 UNION_OPERATOR = "+@"
14415 for i in range(0, len(myfiles)):
14416 if myfiles[i].startswith(SETPREFIX):
14419 x = myfiles[i][len(SETPREFIX):]
14422 start = x.find(ARG_START)
14423 end = x.find(ARG_END)
14424 if start > 0 and start < end:
14425 namepart = x[:start]
14426 argpart = x[start+1:end]
14428 # TODO: implement proper quoting
14429 args = argpart.split(",")
14433 k, v = a.split("=", 1)
14436 options[a] = "True"
14437 setconfig.update(namepart, options)
14438 newset += (x[:start-len(namepart)]+namepart)
14439 x = x[end+len(ARG_END):]
14443 myfiles[i] = SETPREFIX+newset
14445 sets = setconfig.getSets()
14447 # display errors that occured while loading the SetConfig instance
14448 for e in setconfig.errors:
14449 print colorize("BAD", "Error during set creation: %s" % e)
14451 # emerge relies on the existance of sets with names "world" and "system"
14452 required_sets = ("world", "system")
14455 for s in required_sets:
14457 missing_sets.append(s)
14459 if len(missing_sets) > 2:
14460 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
14461 missing_sets_str += ', and "%s"' % missing_sets[-1]
14462 elif len(missing_sets) == 2:
14463 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
14465 missing_sets_str = '"%s"' % missing_sets[-1]
14466 msg = ["emerge: incomplete set configuration, " + \
14467 "missing set(s): %s" % missing_sets_str]
14469 msg.append(" sets defined: %s" % ", ".join(sets))
14470 msg.append(" This usually means that '%s'" % \
14471 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
14472 msg.append(" is missing or corrupt.")
14474 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
14476 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14479 if a.startswith(SETPREFIX):
14480 # support simple set operations (intersection, difference and union)
14481 # on the commandline. Expressions are evaluated strictly left-to-right
14482 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14483 expression = a[len(SETPREFIX):]
14486 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14487 is_pos = expression.rfind(IS_OPERATOR)
14488 diff_pos = expression.rfind(DIFF_OPERATOR)
14489 union_pos = expression.rfind(UNION_OPERATOR)
14490 op_pos = max(is_pos, diff_pos, union_pos)
14491 s1 = expression[:op_pos]
14492 s2 = expression[op_pos+len(IS_OPERATOR):]
14493 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14495 display_missing_pkg_set(root_config, s2)
14497 expr_sets.insert(0, s2)
14498 expr_ops.insert(0, op)
14500 if not expression in sets:
14501 display_missing_pkg_set(root_config, expression)
14503 expr_sets.insert(0, expression)
14504 result = set(setconfig.getSetAtoms(expression))
14505 for i in range(0, len(expr_ops)):
14506 s2 = setconfig.getSetAtoms(expr_sets[i+1])
14507 if expr_ops[i] == IS_OPERATOR:
14508 result.intersection_update(s2)
14509 elif expr_ops[i] == DIFF_OPERATOR:
14510 result.difference_update(s2)
14511 elif expr_ops[i] == UNION_OPERATOR:
14514 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14515 newargs.extend(result)
14517 s = a[len(SETPREFIX):]
14519 display_missing_pkg_set(root_config, s)
14521 setconfig.active.append(s)
14523 set_atoms = setconfig.getSetAtoms(s)
14524 except portage.exception.PackageSetNotFound, e:
14525 writemsg_level(("emerge: the given set '%s' " + \
14526 "contains a non-existent set named '%s'.\n") % \
14527 (s, e), level=logging.ERROR, noiselevel=-1)
14529 if myaction in unmerge_actions and \
14530 not sets[s].supportsOperation("unmerge"):
14531 sys.stderr.write("emerge: the given set '%s' does " % s + \
14532 "not support unmerge operations\n")
14534 elif not set_atoms:
14535 print "emerge: '%s' is an empty set" % s
14536 elif myaction not in do_not_expand:
14537 newargs.extend(set_atoms)
14539 newargs.append(SETPREFIX+s)
14540 for e in sets[s].errors:
14544 return (newargs, retval)
14546 def repo_name_check(trees):
14547 missing_repo_names = set()
14548 for root, root_trees in trees.iteritems():
14549 if "porttree" in root_trees:
14550 portdb = root_trees["porttree"].dbapi
14551 missing_repo_names.update(portdb.porttrees)
14552 repos = portdb.getRepositories()
14554 missing_repo_names.discard(portdb.getRepositoryPath(r))
14555 if portdb.porttree_root in missing_repo_names and \
14556 not os.path.exists(os.path.join(
14557 portdb.porttree_root, "profiles")):
14558 # This is normal if $PORTDIR happens to be empty,
14559 # so don't warn about it.
14560 missing_repo_names.remove(portdb.porttree_root)
14562 if missing_repo_names:
14564 msg.append("WARNING: One or more repositories " + \
14565 "have missing repo_name entries:")
14567 for p in missing_repo_names:
14568 msg.append("\t%s/profiles/repo_name" % (p,))
14570 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14571 "should be a plain text file containing a unique " + \
14572 "name for the repository on the first line.", 70))
14573 writemsg_level("".join("%s\n" % l for l in msg),
14574 level=logging.WARNING, noiselevel=-1)
14576 return bool(missing_repo_names)
14578 def config_protect_check(trees):
14579 for root, root_trees in trees.iteritems():
14580 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14581 msg = "!!! CONFIG_PROTECT is empty"
14583 msg += " for '%s'" % root
14584 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14586 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14588 if "--quiet" in myopts:
14589 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14590 print "!!! one of the following fully-qualified ebuild names instead:\n"
14591 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14592 print " " + colorize("INFORM", cp)
14595 s = search(root_config, spinner, "--searchdesc" in myopts,
14596 "--quiet" not in myopts, "--usepkg" in myopts,
14597 "--usepkgonly" in myopts)
14598 null_cp = portage.dep_getkey(insert_category_into_atom(
14600 cat, atom_pn = portage.catsplit(null_cp)
14601 s.searchkey = atom_pn
14602 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14605 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14606 print "!!! one of the above fully-qualified ebuild names instead.\n"
14608 def profile_check(trees, myaction, myopts):
14609 if myaction in ("info", "sync"):
14611 elif "--version" in myopts or "--help" in myopts:
14613 for root, root_trees in trees.iteritems():
14614 if root_trees["root_config"].settings.profiles:
14616 # generate some profile related warning messages
14617 validate_ebuild_environment(trees)
14618 msg = "If you have just changed your profile configuration, you " + \
14619 "should revert back to the previous configuration. Due to " + \
14620 "your current profile being invalid, allowed actions are " + \
14621 "limited to --help, --info, --sync, and --version."
14622 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14623 level=logging.ERROR, noiselevel=-1)
14628 global portage # NFC why this is necessary now - genone
14629 portage._disable_legacy_globals()
14630 # Disable color until we're sure that it should be enabled (after
14631 # EMERGE_DEFAULT_OPTS has been parsed).
14632 portage.output.havecolor = 0
14633 # This first pass is just for options that need to be known as early as
14634 # possible, such as --config-root. They will be parsed again later,
14635 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14636 # the value of --config-root).
14637 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14638 if "--debug" in myopts:
14639 os.environ["PORTAGE_DEBUG"] = "1"
14640 if "--config-root" in myopts:
14641 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14643 # Portage needs to ensure a sane umask for the files it creates.
14645 settings, trees, mtimedb = load_emerge_config()
14646 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14647 rval = profile_check(trees, myaction, myopts)
14648 if rval != os.EX_OK:
14651 if portage._global_updates(trees, mtimedb["updates"]):
14653 # Reload the whole config from scratch.
14654 settings, trees, mtimedb = load_emerge_config(trees=trees)
14655 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14657 xterm_titles = "notitles" not in settings.features
14660 if "--ignore-default-opts" not in myopts:
14661 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14662 tmpcmdline.extend(sys.argv[1:])
14663 myaction, myopts, myfiles = parse_opts(tmpcmdline)
14665 if "--digest" in myopts:
14666 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14667 # Reload the whole config from scratch so that the portdbapi internal
14668 # config is updated with new FEATURES.
14669 settings, trees, mtimedb = load_emerge_config(trees=trees)
14670 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14672 for myroot in trees:
14673 mysettings = trees[myroot]["vartree"].settings
14674 mysettings.unlock()
14675 adjust_config(myopts, mysettings)
14676 mysettings["PORTAGE_COUNTER_HASH"] = \
14677 trees[myroot]["vartree"].dbapi._counter_hash()
14678 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14680 del myroot, mysettings
14682 apply_priorities(settings)
14684 spinner = stdout_spinner()
14685 if "candy" in settings.features:
14686 spinner.update = spinner.update_scroll
14688 if "--quiet" not in myopts:
14689 portage.deprecated_profile_check(settings=settings)
14690 repo_name_check(trees)
14691 config_protect_check(trees)
14693 eclasses_overridden = {}
14694 for mytrees in trees.itervalues():
14695 mydb = mytrees["porttree"].dbapi
14696 # Freeze the portdbapi for performance (memoize all xmatch results).
14698 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14701 if eclasses_overridden and \
14702 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14703 prefix = bad(" * ")
14704 if len(eclasses_overridden) == 1:
14705 writemsg(prefix + "Overlay eclass overrides " + \
14706 "eclass from PORTDIR:\n", noiselevel=-1)
14708 writemsg(prefix + "Overlay eclasses override " + \
14709 "eclasses from PORTDIR:\n", noiselevel=-1)
14710 writemsg(prefix + "\n", noiselevel=-1)
14711 for eclass_name in sorted(eclasses_overridden):
14712 writemsg(prefix + " '%s/%s.eclass'\n" % \
14713 (eclasses_overridden[eclass_name], eclass_name),
14715 writemsg(prefix + "\n", noiselevel=-1)
14716 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14717 "because it will trigger invalidation of cached ebuild metadata " + \
14718 "that is distributed with the portage tree. If you must " + \
14719 "override eclasses from PORTDIR then you are advised to add " + \
14720 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14721 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14722 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14723 "you would like to disable this warning."
14724 from textwrap import wrap
14725 for line in wrap(msg, 72):
14726 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14728 if "moo" in myfiles:
14731 Larry loves Gentoo (""" + platform.system() + """)
14733 _______________________
14734 < Have you mooed today? >
14735 -----------------------
14745 ext = os.path.splitext(x)[1]
14746 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14747 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14750 root_config = trees[settings["ROOT"]]["root_config"]
14751 if myaction == "list-sets":
14752 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14756 # only expand sets for actions taking package arguments
14757 oldargs = myfiles[:]
14758 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14759 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14760 if retval != os.EX_OK:
14763 # Need to handle empty sets specially, otherwise emerge will react
14764 # with the help message for empty argument lists
14765 if oldargs and not myfiles:
14766 print "emerge: no targets left after set expansion"
14769 if ("--tree" in myopts) and ("--columns" in myopts):
14770 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14773 if ("--quiet" in myopts):
14774 spinner.update = spinner.update_quiet
14775 portage.util.noiselimit = -1
14777 # Always create packages if FEATURES=buildpkg
14778 # Imply --buildpkg if --buildpkgonly
14779 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14780 if "--buildpkg" not in myopts:
14781 myopts["--buildpkg"] = True
14783 # Also allow -S to invoke search action (-sS)
14784 if ("--searchdesc" in myopts):
14785 if myaction and myaction != "search":
14786 myfiles.append(myaction)
14787 if "--search" not in myopts:
14788 myopts["--search"] = True
14789 myaction = "search"
14791 # Always try and fetch binary packages if FEATURES=getbinpkg
14792 if ("getbinpkg" in settings.features):
14793 myopts["--getbinpkg"] = True
14795 if "--buildpkgonly" in myopts:
14796 # --buildpkgonly will not merge anything, so
14797 # it cancels all binary package options.
14798 for opt in ("--getbinpkg", "--getbinpkgonly",
14799 "--usepkg", "--usepkgonly"):
14800 myopts.pop(opt, None)
14802 if "--fetch-all-uri" in myopts:
14803 myopts["--fetchonly"] = True
14805 if "--skipfirst" in myopts and "--resume" not in myopts:
14806 myopts["--resume"] = True
14808 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14809 myopts["--usepkgonly"] = True
14811 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14812 myopts["--getbinpkg"] = True
14814 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14815 myopts["--usepkg"] = True
14817 # Also allow -K to apply --usepkg/-k
14818 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14819 myopts["--usepkg"] = True
14821 # Allow -p to remove --ask
14822 if ("--pretend" in myopts) and ("--ask" in myopts):
14823 print ">>> --pretend disables --ask... removing --ask from options."
14824 del myopts["--ask"]
14826 # forbid --ask when not in a terminal
14827 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14828 if ("--ask" in myopts) and (not sys.stdin.isatty()):
14829 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14833 if settings.get("PORTAGE_DEBUG", "") == "1":
14834 spinner.update = spinner.update_quiet
14836 if "python-trace" in settings.features:
14837 import portage.debug
14838 portage.debug.set_trace(True)
14840 if not ("--quiet" in myopts):
14841 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14842 spinner.update = spinner.update_basic
14844 if "--version" in myopts:
14845 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14846 settings.profile_path, settings["CHOST"],
14847 trees[settings["ROOT"]]["vartree"].dbapi)
14849 elif "--help" in myopts:
14850 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14853 if "--debug" in myopts:
14854 print "myaction", myaction
14855 print "myopts", myopts
14857 if not myaction and not myfiles and "--resume" not in myopts:
14858 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14861 pretend = "--pretend" in myopts
14862 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14863 buildpkgonly = "--buildpkgonly" in myopts
14865 # check if root user is the current user for the actions where emerge needs this
14866 if portage.secpass < 2:
14867 # We've already allowed "--version" and "--help" above.
14868 if "--pretend" not in myopts and myaction not in ("search","info"):
14869 need_superuser = not \
14871 (buildpkgonly and secpass >= 1) or \
14872 myaction in ("metadata", "regen") or \
14873 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
14874 if portage.secpass < 1 or \
14877 access_desc = "superuser"
14879 access_desc = "portage group"
14880 # Always show portage_group_warning() when only portage group
14881 # access is required but the user is not in the portage group.
14882 from portage.data import portage_group_warning
14883 if "--ask" in myopts:
14884 myopts["--pretend"] = True
14885 del myopts["--ask"]
14886 print ("%s access is required... " + \
14887 "adding --pretend to options.\n") % access_desc
14888 if portage.secpass < 1 and not need_superuser:
14889 portage_group_warning()
14891 sys.stderr.write(("emerge: %s access is " + \
14892 "required.\n\n") % access_desc)
14893 if portage.secpass < 1 and not need_superuser:
14894 portage_group_warning()
14897 disable_emergelog = False
14898 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
14900 disable_emergelog = True
14902 if myaction in ("search", "info"):
14903 disable_emergelog = True
14904 if disable_emergelog:
14905 """ Disable emergelog for everything except build or unmerge
14906 operations. This helps minimize parallel emerge.log entries that can
14907 confuse log parsers. We especially want it disabled during
14908 parallel-fetch, which uses --resume --fetchonly."""
14910 def emergelog(*pargs, **kargs):
14913 if not "--pretend" in myopts:
14914 emergelog(xterm_titles, "Started emerge on: "+\
14915 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
14918 myelogstr=" ".join(myopts)
14920 myelogstr+=" "+myaction
14922 myelogstr += " " + " ".join(oldargs)
14923 emergelog(xterm_titles, " *** emerge " + myelogstr)
14926 def emergeexitsig(signum, frame):
14927 signal.signal(signal.SIGINT, signal.SIG_IGN)
14928 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14929 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
14930 sys.exit(100+signum)
14931 signal.signal(signal.SIGINT, emergeexitsig)
14932 signal.signal(signal.SIGTERM, emergeexitsig)
14935 """This gets out final log message in before we quit."""
14936 if "--pretend" not in myopts:
14937 emergelog(xterm_titles, " *** terminating.")
14938 if "notitles" not in settings.features:
14940 portage.atexit_register(emergeexit)
14942 if myaction in ("config", "metadata", "regen", "sync"):
14943 if "--pretend" in myopts:
14944 sys.stderr.write(("emerge: The '%s' action does " + \
14945 "not support '--pretend'.\n") % myaction)
14948 if "sync" == myaction:
14949 return action_sync(settings, trees, mtimedb, myopts, myaction)
14950 elif "metadata" == myaction:
14951 action_metadata(settings, portdb, myopts)
14952 elif myaction=="regen":
14953 validate_ebuild_environment(trees)
14954 action_regen(settings, portdb, myopts.get("--jobs"),
14955 myopts.get("--load-average"))
14957 elif "config"==myaction:
14958 validate_ebuild_environment(trees)
14959 action_config(settings, trees, myopts, myfiles)
14962 elif "search"==myaction:
14963 validate_ebuild_environment(trees)
14964 action_search(trees[settings["ROOT"]]["root_config"],
14965 myopts, myfiles, spinner)
14966 elif myaction in ("clean", "unmerge") or \
14967 (myaction == "prune" and "--nodeps" in myopts):
14968 validate_ebuild_environment(trees)
14970 # Ensure atoms are valid before calling unmerge().
14971 # For backward compat, leading '=' is not required.
14973 if is_valid_package_atom(x) or \
14974 is_valid_package_atom("=" + x):
14977 msg.append("'%s' is not a valid package atom." % (x,))
14978 msg.append("Please check ebuild(5) for full details.")
14979 writemsg_level("".join("!!! %s\n" % line for line in msg),
14980 level=logging.ERROR, noiselevel=-1)
14983 # When given a list of atoms, unmerge
14984 # them in the order given.
14985 ordered = myaction == "unmerge"
14986 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14987 mtimedb["ldpath"], ordered=ordered):
14988 if not (buildpkgonly or fetchonly or pretend):
14989 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14991 elif myaction in ("depclean", "info", "prune"):
14993 # Ensure atoms are valid before calling unmerge().
14994 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14997 if is_valid_package_atom(x):
14999 valid_atoms.append(
15000 portage.dep_expand(x, mydb=vardb, settings=settings))
15001 except portage.exception.AmbiguousPackageName, e:
15002 msg = "The short ebuild name \"" + x + \
15003 "\" is ambiguous. Please specify " + \
15004 "one of the following " + \
15005 "fully-qualified ebuild names instead:"
15006 for line in textwrap.wrap(msg, 70):
15007 writemsg_level("!!! %s\n" % (line,),
15008 level=logging.ERROR, noiselevel=-1)
15010 writemsg_level(" %s\n" % colorize("INFORM", i),
15011 level=logging.ERROR, noiselevel=-1)
15012 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15016 msg.append("'%s' is not a valid package atom." % (x,))
15017 msg.append("Please check ebuild(5) for full details.")
15018 writemsg_level("".join("!!! %s\n" % line for line in msg),
15019 level=logging.ERROR, noiselevel=-1)
15022 if myaction == "info":
15023 return action_info(settings, trees, myopts, valid_atoms)
15025 validate_ebuild_environment(trees)
15026 action_depclean(settings, trees, mtimedb["ldpath"],
15027 myopts, myaction, valid_atoms, spinner)
15028 if not (buildpkgonly or fetchonly or pretend):
15029 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15030 # "update", "system", or just process files:
15032 validate_ebuild_environment(trees)
15033 if "--pretend" not in myopts:
15034 display_news_notification(root_config, myopts)
15035 retval = action_build(settings, trees, mtimedb,
15036 myopts, myaction, myfiles, spinner)
15037 root_config = trees[settings["ROOT"]]["root_config"]
15038 post_emerge(root_config, myopts, mtimedb, retval)