2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 # This block ensures that ^C interrupts are handled quietly.
11 def exithandler(signum,frame):
12 signal.signal(signal.SIGINT, signal.SIG_IGN)
13 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16 signal.signal(signal.SIGINT, exithandler)
17 signal.signal(signal.SIGTERM, exithandler)
18 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
20 except KeyboardInterrupt:
24 from collections import deque
41 from os import path as osp
42 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
45 from portage import digraph
46 from portage.const import NEWS_LIB_PATH
49 import portage.xpak, commands, errno, re, socket, time, types
50 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
51 nc_len, red, teal, turquoise, xtermTitle, \
52 xtermTitleReset, yellow
53 from portage.output import create_color_func
54 good = create_color_func("GOOD")
55 bad = create_color_func("BAD")
56 # white looks bad on terminals with white background
57 from portage.output import bold as white
61 portage.dep._dep_check_strict = True
64 import portage.exception
65 from portage.data import secpass
66 from portage.elog.messages import eerror
67 from portage.util import normalize_path as normpath
68 from portage.util import writemsg, writemsg_level
69 from portage._sets import load_default_config, SETPREFIX
70 from portage._sets.base import InternalPackageSet
72 from itertools import chain, izip
73 from UserDict import DictMixin
76 import cPickle as pickle
81 import cStringIO as StringIO
85 class stdout_spinner(object):
87 "Gentoo Rocks ("+platform.system()+")",
88 "Thank you for using Gentoo. :)",
89 "Are you actually trying to read this?",
90 "How many times have you stared at this?",
91 "We are generating the cache right now",
92 "You are paying too much attention.",
93 "A theory is better than its explanation.",
94 "Phasers locked on target, Captain.",
95 "Thrashing is just virtual crashing.",
96 "To be is to program.",
97 "Real Users hate Real Programmers.",
98 "When all else fails, read the instructions.",
99 "Functionality breeds Contempt.",
100 "The future lies ahead.",
101 "3.1415926535897932384626433832795028841971694",
102 "Sometimes insanity is the only alternative.",
103 "Inaccuracy saves a world of explanation.",
106 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
110 self.update = self.update_twirl
111 self.scroll_sequence = self.scroll_msgs[
112 int(time.time() * 100) % len(self.scroll_msgs)]
114 self.min_display_latency = 0.05
116 def _return_early(self):
118 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
119 each update* method should return without doing any output when this
122 cur_time = time.time()
123 if cur_time - self.last_update < self.min_display_latency:
125 self.last_update = cur_time
128 def update_basic(self):
129 self.spinpos = (self.spinpos + 1) % 500
130 if self._return_early():
132 if (self.spinpos % 100) == 0:
133 if self.spinpos == 0:
134 sys.stdout.write(". ")
136 sys.stdout.write(".")
139 def update_scroll(self):
140 if self._return_early():
142 if(self.spinpos >= len(self.scroll_sequence)):
143 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
144 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
146 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
148 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
150 def update_twirl(self):
151 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
152 if self._return_early():
154 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
157 def update_quiet(self):
160 def userquery(prompt, responses=None, colours=None):
161 """Displays a prompt and a set of responses, then waits for a response
162 which is checked against the responses and the first to match is
163 returned. An empty response will match the first value in responses. The
164 input buffer is *not* cleared prior to the prompt!
167 responses: a List of Strings.
168 colours: a List of Functions taking and returning a String, used to
169 process the responses for display. Typically these will be functions
170 like red() but could be e.g. lambda x: "DisplayString".
171 If responses is omitted, defaults to ["Yes", "No"], [green, red].
172 If only colours is omitted, defaults to [bold, ...].
174 Returns a member of the List responses. (If called without optional
175 arguments, returns "Yes" or "No".)
176 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
178 if responses is None:
179 responses = ["Yes", "No"]
181 create_color_func("PROMPT_CHOICE_DEFAULT"),
182 create_color_func("PROMPT_CHOICE_OTHER")
184 elif colours is None:
186 colours=(colours*len(responses))[:len(responses)]
190 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
191 for key in responses:
192 # An empty response will match the first value in responses.
193 if response.upper()==key[:len(response)].upper():
195 print "Sorry, response '%s' not understood." % response,
196 except (EOFError, KeyboardInterrupt):
200 actions = frozenset([
201 "clean", "config", "depclean",
202 "info", "list-sets", "metadata",
203 "prune", "regen", "search",
207 "--ask", "--alphabetical",
208 "--buildpkg", "--buildpkgonly",
209 "--changelog", "--columns",
214 "--fetchonly", "--fetch-all-uri",
215 "--getbinpkg", "--getbinpkgonly",
216 "--help", "--ignore-default-opts",
219 "--newuse", "--nocolor",
220 "--nodeps", "--noreplace",
221 "--nospinner", "--oneshot",
222 "--onlydeps", "--pretend",
223 "--quiet", "--resume",
224 "--searchdesc", "--selective",
228 "--usepkg", "--usepkgonly",
229 "--verbose", "--version"
235 "b":"--buildpkg", "B":"--buildpkgonly",
236 "c":"--clean", "C":"--unmerge",
237 "d":"--debug", "D":"--deep",
239 "f":"--fetchonly", "F":"--fetch-all-uri",
240 "g":"--getbinpkg", "G":"--getbinpkgonly",
242 "k":"--usepkg", "K":"--usepkgonly",
244 "n":"--noreplace", "N":"--newuse",
245 "o":"--onlydeps", "O":"--nodeps",
246 "p":"--pretend", "P":"--prune",
248 "s":"--search", "S":"--searchdesc",
251 "v":"--verbose", "V":"--version"
254 def emergelog(xterm_titles, mystr, short_msg=None):
255 if xterm_titles and short_msg:
256 if "HOSTNAME" in os.environ:
257 short_msg = os.environ["HOSTNAME"]+": "+short_msg
258 xtermTitle(short_msg)
260 file_path = "/var/log/emerge.log"
261 mylogfile = open(file_path, "a")
262 portage.util.apply_secpass_permissions(file_path,
263 uid=portage.portage_uid, gid=portage.portage_gid,
267 mylock = portage.locks.lockfile(mylogfile)
268 # seek because we may have gotten held up by the lock.
269 # if so, we may not be positioned at the end of the file.
271 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
275 portage.locks.unlockfile(mylock)
277 except (IOError,OSError,portage.exception.PortageException), e:
279 print >> sys.stderr, "emergelog():",e
281 def countdown(secs=5, doing="Starting"):
283 print ">>> Waiting",secs,"seconds before starting..."
284 print ">>> (Control-C to abort)...\n"+doing+" in: ",
288 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
293 # formats a size given in bytes nicely
294 def format_size(mysize):
295 if type(mysize) not in [types.IntType,types.LongType]:
297 if 0 != mysize % 1024:
298 # Always round up to the next kB so that it doesn't show 0 kB when
299 # some small file still needs to be fetched.
300 mysize += 1024 - mysize % 1024
301 mystr=str(mysize/1024)
305 mystr=mystr[:mycount]+","+mystr[mycount:]
309 def getgccversion(chost):
312 return: the current in-use gcc version
315 gcc_ver_command = 'gcc -dumpversion'
316 gcc_ver_prefix = 'gcc-'
318 gcc_not_found_error = red(
319 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
320 "!!! to update the environment of this terminal and possibly\n" +
321 "!!! other terminals also.\n"
324 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
325 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
326 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
328 mystatus, myoutput = commands.getstatusoutput(
329 chost + "-" + gcc_ver_command)
330 if mystatus == os.EX_OK:
331 return gcc_ver_prefix + myoutput
333 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
334 if mystatus == os.EX_OK:
335 return gcc_ver_prefix + myoutput
337 portage.writemsg(gcc_not_found_error, noiselevel=-1)
338 return "[unavailable]"
340 def getportageversion(portdir, target_root, profile, chost, vardb):
341 profilever = "unavailable"
343 realpath = os.path.realpath(profile)
344 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
345 if realpath.startswith(basepath):
346 profilever = realpath[1 + len(basepath):]
349 profilever = "!" + os.readlink(profile)
352 del realpath, basepath
355 libclist = vardb.match("virtual/libc")
356 libclist += vardb.match("virtual/glibc")
357 libclist = portage.util.unique_array(libclist)
359 xs=portage.catpkgsplit(x)
361 libcver+=","+"-".join(xs[1:])
363 libcver="-".join(xs[1:])
365 libcver="unavailable"
367 gccver = getgccversion(chost)
368 unameout=platform.release()+" "+platform.machine()
370 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
372 def create_depgraph_params(myopts, myaction):
373 #configure emerge engine parameters
375 # self: include _this_ package regardless of if it is merged.
376 # selective: exclude the package if it is merged
377 # recurse: go into the dependencies
378 # deep: go into the dependencies of already merged packages
379 # empty: pretend nothing is merged
380 # complete: completely account for all known dependencies
381 # remove: build graph for use in removing packages
382 myparams = set(["recurse"])
384 if myaction == "remove":
385 myparams.add("remove")
386 myparams.add("complete")
389 if "--update" in myopts or \
390 "--newuse" in myopts or \
391 "--reinstall" in myopts or \
392 "--noreplace" in myopts:
393 myparams.add("selective")
394 if "--emptytree" in myopts:
395 myparams.add("empty")
396 myparams.discard("selective")
397 if "--nodeps" in myopts:
398 myparams.discard("recurse")
399 if "--deep" in myopts:
401 if "--complete-graph" in myopts:
402 myparams.add("complete")
405 # search functionality
406 class search(object):
417 def __init__(self, root_config, spinner, searchdesc,
418 verbose, usepkg, usepkgonly):
419 """Searches the available and installed packages for the supplied search key.
420 The list of available and installed packages is created at object instantiation.
421 This makes successive searches faster."""
422 self.settings = root_config.settings
423 self.vartree = root_config.trees["vartree"]
424 self.spinner = spinner
425 self.verbose = verbose
426 self.searchdesc = searchdesc
427 self.root_config = root_config
428 self.setconfig = root_config.setconfig
429 self.matches = {"pkg" : []}
434 self.portdb = fake_portdb
435 for attrib in ("aux_get", "cp_all",
436 "xmatch", "findname", "getFetchMap"):
437 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
441 portdb = root_config.trees["porttree"].dbapi
442 bindb = root_config.trees["bintree"].dbapi
443 vardb = root_config.trees["vartree"].dbapi
445 if not usepkgonly and portdb._have_root_eclass_dir:
446 self._dbs.append(portdb)
448 if (usepkg or usepkgonly) and bindb.cp_all():
449 self._dbs.append(bindb)
451 self._dbs.append(vardb)
452 self._portdb = portdb
457 cp_all.update(db.cp_all())
458 return list(sorted(cp_all))
460 def _aux_get(self, *args, **kwargs):
463 return db.aux_get(*args, **kwargs)
468 def _findname(self, *args, **kwargs):
470 if db is not self._portdb:
471 # We don't want findname to return anything
472 # unless it's an ebuild in a portage tree.
473 # Otherwise, it's already built and we don't
476 func = getattr(db, "findname", None)
478 value = func(*args, **kwargs)
483 def _getFetchMap(self, *args, **kwargs):
485 func = getattr(db, "getFetchMap", None)
487 value = func(*args, **kwargs)
492 def _visible(self, db, cpv, metadata):
493 installed = db is self.vartree.dbapi
494 built = installed or db is not self._portdb
497 pkg_type = "installed"
500 return visible(self.settings,
501 Package(type_name=pkg_type, root_config=self.root_config,
502 cpv=cpv, built=built, installed=installed, metadata=metadata))
504 def _xmatch(self, level, atom):
506 This method does not expand old-style virtuals because it
507 is restricted to returning matches for a single ${CATEGORY}/${PN}
508 and old-style virual matches unreliable for that when querying
509 multiple package databases. If necessary, old-style virtuals
510 can be performed on atoms prior to calling this method.
512 cp = portage.dep_getkey(atom)
513 if level == "match-all":
516 if hasattr(db, "xmatch"):
517 matches.update(db.xmatch(level, atom))
519 matches.update(db.match(atom))
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "match-visible":
525 if hasattr(db, "xmatch"):
526 matches.update(db.xmatch(level, atom))
528 db_keys = list(db._aux_cache_keys)
529 for cpv in db.match(atom):
530 metadata = izip(db_keys,
531 db.aux_get(cpv, db_keys))
532 if not self._visible(db, cpv, metadata):
535 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
536 db._cpv_sort_ascending(result)
537 elif level == "bestmatch-visible":
540 if hasattr(db, "xmatch"):
541 cpv = db.xmatch("bestmatch-visible", atom)
542 if not cpv or portage.cpv_getkey(cpv) != cp:
544 if not result or cpv == portage.best([cpv, result]):
547 db_keys = Package.metadata_keys
548 # break out of this loop with highest visible
549 # match, checked in descending order
550 for cpv in reversed(db.match(atom)):
551 if portage.cpv_getkey(cpv) != cp:
553 metadata = izip(db_keys,
554 db.aux_get(cpv, db_keys))
555 if not self._visible(db, cpv, metadata):
557 if not result or cpv == portage.best([cpv, result]):
561 raise NotImplementedError(level)
564 def execute(self,searchkey):
565 """Performs the search for the supplied search key"""
567 self.searchkey=searchkey
568 self.packagematches = []
571 self.matches = {"pkg":[], "desc":[]}
574 self.matches = {"pkg":[]}
575 print "Searching... ",
578 if self.searchkey.startswith('%'):
580 self.searchkey = self.searchkey[1:]
581 if self.searchkey.startswith('@'):
583 self.searchkey = self.searchkey[1:]
585 self.searchre=re.compile(self.searchkey,re.I)
587 self.searchre=re.compile(re.escape(self.searchkey), re.I)
588 for package in self.portdb.cp_all():
589 self.spinner.update()
592 match_string = package[:]
594 match_string = package.split("/")[-1]
597 if self.searchre.search(match_string):
598 if not self.portdb.xmatch("match-visible", package):
600 self.matches["pkg"].append([package,masked])
601 elif self.searchdesc: # DESCRIPTION searching
602 full_package = self.portdb.xmatch("bestmatch-visible", package)
604 #no match found; we don't want to query description
605 full_package = portage.best(
606 self.portdb.xmatch("match-all", package))
612 full_desc = self.portdb.aux_get(
613 full_package, ["DESCRIPTION"])[0]
615 print "emerge: search: aux_get() failed, skipping"
617 if self.searchre.search(full_desc):
618 self.matches["desc"].append([full_package,masked])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
657 desc, homepage, license = self.portdb.aux_get(
658 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
660 print "emerge: search: aux_get() failed, skipping"
663 print green("*")+" "+white(match)+" "+red("[ Masked ]")
665 print green("*")+" "+white(match)
666 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
670 mycat = match.split("/")[0]
671 mypkg = match.split("/")[1]
672 mycpv = match + "-" + myversion
673 myebuild = self.portdb.findname(mycpv)
675 pkgdir = os.path.dirname(myebuild)
676 from portage import manifest
677 mf = manifest.Manifest(
678 pkgdir, self.settings["DISTDIR"])
680 uri_map = self.portdb.getFetchMap(mycpv)
681 except portage.exception.InvalidDependString, e:
682 file_size_str = "Unknown (%s)" % (e,)
686 mysum[0] = mf.getDistfilesSize(uri_map)
688 file_size_str = "Unknown (missing " + \
689 "digest for %s)" % (e,)
694 if db is not vardb and \
695 db.cpv_exists(mycpv):
697 if not myebuild and hasattr(db, "bintree"):
698 myebuild = db.bintree.getname(mycpv)
700 mysum[0] = os.stat(myebuild).st_size
705 if myebuild and file_size_str is None:
706 mystr = str(mysum[0] / 1024)
710 mystr = mystr[:mycount] + "," + mystr[mycount:]
711 file_size_str = mystr + " kB"
715 print " ", darkgreen("Latest version available:"),myversion
716 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
719 (darkgreen("Size of files:"), file_size_str)
720 print " ", darkgreen("Homepage:")+" ",homepage
721 print " ", darkgreen("Description:")+" ",desc
722 print " ", darkgreen("License:")+" ",license
727 def getInstallationStatus(self,package):
728 installed_package = self.vartree.dep_bestmatch(package)
730 version = self.getVersion(installed_package,search.VERSION_RELEASE)
732 result = darkgreen("Latest version installed:")+" "+version
734 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
737 def getVersion(self,full_package,detail):
738 if len(full_package) > 1:
739 package_parts = portage.catpkgsplit(full_package)
740 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
741 result = package_parts[2]+ "-" + package_parts[3]
743 result = package_parts[2]
748 class RootConfig(object):
749 """This is used internally by depgraph to track information about a
753 "ebuild" : "porttree",
754 "binary" : "bintree",
755 "installed" : "vartree"
759 for k, v in pkg_tree_map.iteritems():
762 def __init__(self, settings, trees, setconfig):
764 self.settings = settings
765 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
766 self.root = self.settings["ROOT"]
767 self.setconfig = setconfig
768 self.sets = self.setconfig.getSets()
769 self.visible_pkgs = PackageVirtualDbapi(self.settings)
771 def create_world_atom(pkg, args_set, root_config):
772 """Create a new atom for the world file if one does not exist. If the
773 argument atom is precise enough to identify a specific slot then a slot
774 atom will be returned. Atoms that are in the system set may also be stored
775 in world since system atoms can only match one slot while world atoms can
776 be greedy with respect to slots. Unslotted system packages will not be
779 arg_atom = args_set.findAtomForPackage(pkg)
782 cp = portage.dep_getkey(arg_atom)
784 sets = root_config.sets
785 portdb = root_config.trees["porttree"].dbapi
786 vardb = root_config.trees["vartree"].dbapi
787 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
788 for cpv in portdb.match(cp))
789 slotted = len(available_slots) > 1 or \
790 (len(available_slots) == 1 and "0" not in available_slots)
792 # check the vdb in case this is multislot
793 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
794 for cpv in vardb.match(cp))
795 slotted = len(available_slots) > 1 or \
796 (len(available_slots) == 1 and "0" not in available_slots)
797 if slotted and arg_atom != cp:
798 # If the user gave a specific atom, store it as a
799 # slot atom in the world file.
800 slot_atom = pkg.slot_atom
802 # For USE=multislot, there are a couple of cases to
805 # 1) SLOT="0", but the real SLOT spontaneously changed to some
806 # unknown value, so just record an unslotted atom.
808 # 2) SLOT comes from an installed package and there is no
809 # matching SLOT in the portage tree.
811 # Make sure that the slot atom is available in either the
812 # portdb or the vardb, since otherwise the user certainly
813 # doesn't want the SLOT atom recorded in the world file
814 # (case 1 above). If it's only available in the vardb,
815 # the user may be trying to prevent a USE=multislot
816 # package from being removed by --depclean (case 2 above).
819 if not portdb.match(slot_atom):
820 # SLOT seems to come from an installed multislot package
822 # If there is no installed package matching the SLOT atom,
823 # it probably changed SLOT spontaneously due to USE=multislot,
824 # so just record an unslotted atom.
825 if vardb.match(slot_atom):
826 # Now verify that the argument is precise
827 # enough to identify a specific slot.
828 matches = mydb.match(arg_atom)
829 matched_slots = set()
831 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
832 if len(matched_slots) == 1:
833 new_world_atom = slot_atom
835 if new_world_atom == sets["world"].findAtomForPackage(pkg):
836 # Both atoms would be identical, so there's nothing to add.
839 # Unlike world atoms, system atoms are not greedy for slots, so they
840 # can't be safely excluded from world if they are slotted.
841 system_atom = sets["system"].findAtomForPackage(pkg)
843 if not portage.dep_getkey(system_atom).startswith("virtual/"):
845 # System virtuals aren't safe to exclude from world since they can
846 # match multiple old-style virtuals but only one of them will be
847 # pulled in by update or depclean.
848 providers = portdb.mysettings.getvirtuals().get(
849 portage.dep_getkey(system_atom))
850 if providers and len(providers) == 1 and providers[0] == cp:
852 return new_world_atom
854 def filter_iuse_defaults(iuse):
856 if flag.startswith("+") or flag.startswith("-"):
861 class SlotObject(object):
862 __slots__ = ("__weakref__",)
864 def __init__(self, **kwargs):
865 classes = [self.__class__]
870 classes.extend(c.__bases__)
871 slots = getattr(c, "__slots__", None)
875 myvalue = kwargs.get(myattr, None)
876 setattr(self, myattr, myvalue)
880 Create a new instance and copy all attributes
881 defined from __slots__ (including those from
884 obj = self.__class__()
886 classes = [self.__class__]
891 classes.extend(c.__bases__)
892 slots = getattr(c, "__slots__", None)
896 setattr(obj, myattr, getattr(self, myattr))
900 class AbstractDepPriority(SlotObject):
901 __slots__ = ("buildtime", "runtime", "runtime_post")
903 def __lt__(self, other):
904 return self.__int__() < other
906 def __le__(self, other):
907 return self.__int__() <= other
909 def __eq__(self, other):
910 return self.__int__() == other
912 def __ne__(self, other):
913 return self.__int__() != other
915 def __gt__(self, other):
916 return self.__int__() > other
918 def __ge__(self, other):
919 return self.__int__() >= other
923 return copy.copy(self)
925 class DepPriority(AbstractDepPriority):
927 This class generates an integer priority level based of various
928 attributes of the dependency relationship. Attributes can be assigned
929 at any time and the new integer value will be generated on calls to the
930 __int__() method. Rich comparison operators are supported.
932 The boolean attributes that affect the integer value are "satisfied",
933 "buildtime", "runtime", and "system". Various combinations of
934 attributes lead to the following priority levels:
936 Combination of properties Priority Category
938 not satisfied and buildtime 0 HARD
939 not satisfied and runtime -1 MEDIUM
940 not satisfied and runtime_post -2 MEDIUM_SOFT
941 satisfied and buildtime and rebuild -3 SOFT
942 satisfied and buildtime -4 SOFT
943 satisfied and runtime -5 SOFT
944 satisfied and runtime_post -6 SOFT
945 (none of the above) -6 SOFT
947 Several integer constants are defined for categorization of priority
950 MEDIUM The upper boundary for medium dependencies.
951 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
952 SOFT The upper boundary for soft dependencies.
953 MIN The lower boundary for soft dependencies.
955 __slots__ = ("satisfied", "rebuild")
962 if not self.satisfied:
967 if self.runtime_post:
975 if self.runtime_post:
980 myvalue = self.__int__()
981 if myvalue > self.MEDIUM:
983 if myvalue > self.MEDIUM_SOFT:
985 if myvalue > self.SOFT:
989 class BlockerDepPriority(DepPriority):
994 BlockerDepPriority.instance = BlockerDepPriority()
996 class UnmergeDepPriority(AbstractDepPriority):
997 __slots__ = ("satisfied",)
999 Combination of properties Priority Category
1002 runtime_post -1 HARD
1004 (none of the above) -2 SOFT
1014 if self.runtime_post:
1021 myvalue = self.__int__()
1022 if myvalue > self.SOFT:
1026 class FakeVartree(portage.vartree):
1027 """This is implements an in-memory copy of a vartree instance that provides
1028 all the interfaces required for use by the depgraph. The vardb is locked
1029 during the constructor call just long enough to read a copy of the
1030 installed package information. This allows the depgraph to do it's
1031 dependency calculations without holding a lock on the vardb. It also
1032 allows things like vardb global updates to be done in memory so that the
1033 user doesn't necessarily need write access to the vardb in cases where
1034 global updates are necessary (updates are performed when necessary if there
1035 is not a matching ebuild in the tree)."""
1036 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1037 self._root_config = root_config
1038 if pkg_cache is None:
1040 real_vartree = root_config.trees["vartree"]
1041 portdb = root_config.trees["porttree"].dbapi
1042 self.root = real_vartree.root
1043 self.settings = real_vartree.settings
1044 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1045 self._pkg_cache = pkg_cache
1046 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1047 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1049 # At least the parent needs to exist for the lock file.
1050 portage.util.ensure_dirs(vdb_path)
1051 except portage.exception.PortageException:
1055 if acquire_lock and os.access(vdb_path, os.W_OK):
1056 vdb_lock = portage.locks.lockdir(vdb_path)
1057 real_dbapi = real_vartree.dbapi
1059 for cpv in real_dbapi.cpv_all():
1060 cache_key = ("installed", self.root, cpv, "nomerge")
1061 pkg = self._pkg_cache.get(cache_key)
1063 metadata = pkg.metadata
1065 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1066 myslot = metadata["SLOT"]
1067 mycp = portage.dep_getkey(cpv)
1068 myslot_atom = "%s:%s" % (mycp, myslot)
1070 mycounter = long(metadata["COUNTER"])
1073 metadata["COUNTER"] = str(mycounter)
1074 other_counter = slot_counters.get(myslot_atom, None)
1075 if other_counter is not None:
1076 if other_counter > mycounter:
1078 slot_counters[myslot_atom] = mycounter
1080 pkg = Package(built=True, cpv=cpv,
1081 installed=True, metadata=metadata,
1082 root_config=root_config, type_name="installed")
1083 self._pkg_cache[pkg] = pkg
1084 self.dbapi.cpv_inject(pkg)
1085 real_dbapi.flush_cache()
1088 portage.locks.unlockdir(vdb_lock)
1089 # Populate the old-style virtuals using the cached values.
1090 if not self.settings.treeVirtuals:
1091 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1092 portage.getCPFromCPV, self.get_all_provides())
1094 # Intialize variables needed for lazy cache pulls of the live ebuild
1095 # metadata. This ensures that the vardb lock is released ASAP, without
1096 # being delayed in case cache generation is triggered.
1097 self._aux_get = self.dbapi.aux_get
1098 self.dbapi.aux_get = self._aux_get_wrapper
1099 self._match = self.dbapi.match
1100 self.dbapi.match = self._match_wrapper
1101 self._aux_get_history = set()
1102 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1103 self._portdb = portdb
1104 self._global_updates = None
1106 def _match_wrapper(self, cpv, use_cache=1):
1108 Make sure the metadata in Package instances gets updated for any
1109 cpv that is returned from a match() call, since the metadata can
1110 be accessed directly from the Package instance instead of via
1113 matches = self._match(cpv, use_cache=use_cache)
1115 if cpv in self._aux_get_history:
1117 self._aux_get_wrapper(cpv, [])
1120 def _aux_get_wrapper(self, pkg, wants):
1121 if pkg in self._aux_get_history:
1122 return self._aux_get(pkg, wants)
1123 self._aux_get_history.add(pkg)
1125 # Use the live ebuild metadata if possible.
1126 live_metadata = dict(izip(self._portdb_keys,
1127 self._portdb.aux_get(pkg, self._portdb_keys)))
1128 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1130 self.dbapi.aux_update(pkg, live_metadata)
1131 except (KeyError, portage.exception.PortageException):
1132 if self._global_updates is None:
1133 self._global_updates = \
1134 grab_global_updates(self._portdb.porttree_root)
1135 perform_global_updates(
1136 pkg, self.dbapi, self._global_updates)
1137 return self._aux_get(pkg, wants)
1139 def sync(self, acquire_lock=1):
1141 Call this method to synchronize state with the real vardb
1142 after one or more packages may have been installed or
1145 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1147 # At least the parent needs to exist for the lock file.
1148 portage.util.ensure_dirs(vdb_path)
1149 except portage.exception.PortageException:
1153 if acquire_lock and os.access(vdb_path, os.W_OK):
1154 vdb_lock = portage.locks.lockdir(vdb_path)
1158 portage.locks.unlockdir(vdb_lock)
1162 real_vardb = self._root_config.trees["vartree"].dbapi
1163 current_cpv_set = frozenset(real_vardb.cpv_all())
1164 pkg_vardb = self.dbapi
1165 aux_get_history = self._aux_get_history
1167 # Remove any packages that have been uninstalled.
1168 for pkg in list(pkg_vardb):
1169 if pkg.cpv not in current_cpv_set:
1170 pkg_vardb.cpv_remove(pkg)
1171 aux_get_history.discard(pkg.cpv)
1173 # Validate counters and timestamps.
1176 validation_keys = ["COUNTER", "_mtime_"]
1177 for cpv in current_cpv_set:
1179 pkg_hash_key = ("installed", root, cpv, "nomerge")
1180 pkg = pkg_vardb.get(pkg_hash_key)
1182 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1184 if counter != pkg.metadata["COUNTER"] or \
1186 pkg_vardb.cpv_remove(pkg)
1187 aux_get_history.discard(pkg.cpv)
1191 pkg = self._pkg(cpv)
1193 other_counter = slot_counters.get(pkg.slot_atom)
1194 if other_counter is not None:
1195 if other_counter > pkg.counter:
1198 slot_counters[pkg.slot_atom] = pkg.counter
1199 pkg_vardb.cpv_inject(pkg)
1201 real_vardb.flush_cache()
1203 def _pkg(self, cpv):
1204 root_config = self._root_config
1205 real_vardb = root_config.trees["vartree"].dbapi
1206 db_keys = list(real_vardb._aux_cache_keys)
1207 pkg = Package(cpv=cpv, installed=True,
1208 metadata=izip(db_keys, real_vardb.aux_get(cpv, db_keys)),
1209 root_config=root_config,
1210 type_name="installed")
1213 def grab_global_updates(portdir):
1214 from portage.update import grab_updates, parse_updates
1215 updpath = os.path.join(portdir, "profiles", "updates")
1217 rawupdates = grab_updates(updpath)
1218 except portage.exception.DirectoryNotFound:
1221 for mykey, mystat, mycontent in rawupdates:
1222 commands, errors = parse_updates(mycontent)
1223 upd_commands.extend(commands)
1226 def perform_global_updates(mycpv, mydb, mycommands):
1227 from portage.update import update_dbentries
1228 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1229 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1230 updates = update_dbentries(mycommands, aux_dict)
1232 mydb.aux_update(mycpv, updates)
1234 def visible(pkgsettings, pkg):
1236 Check if a package is visible. This can raise an InvalidDependString
1237 exception if LICENSE is invalid.
1238 TODO: optionally generate a list of masking reasons
1240 @returns: True if the package is visible, False otherwise.
1242 if not pkg.metadata["SLOT"]:
1244 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1245 if not pkgsettings._accept_chost(pkg):
1247 eapi = pkg.metadata["EAPI"]
1248 if not portage.eapi_is_supported(eapi):
1250 if not pkg.installed:
1251 if portage._eapi_is_deprecated(eapi):
1253 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1255 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1257 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1260 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1262 except portage.exception.InvalidDependString:
1266 def get_masking_status(pkg, pkgsettings, root_config):
1268 mreasons = portage.getmaskingstatus(
1269 pkg, settings=pkgsettings,
1270 portdb=root_config.trees["porttree"].dbapi)
1272 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1273 if not pkgsettings._accept_chost(pkg):
1274 mreasons.append("CHOST: %s" % \
1275 pkg.metadata["CHOST"])
1277 if not pkg.metadata["SLOT"]:
1278 mreasons.append("invalid: SLOT is undefined")
1282 def get_mask_info(root_config, cpv, pkgsettings,
1283 db, pkg_type, built, installed, db_keys):
1286 metadata = dict(izip(db_keys,
1287 db.aux_get(cpv, db_keys)))
1290 if metadata and not built:
1291 pkgsettings.setcpv(cpv, mydb=metadata)
1292 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1293 if metadata is None:
1294 mreasons = ["corruption"]
1296 pkg = Package(type_name=pkg_type, root_config=root_config,
1297 cpv=cpv, built=built, installed=installed, metadata=metadata)
1298 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1299 return metadata, mreasons
1301 def show_masked_packages(masked_packages):
1302 shown_licenses = set()
1303 shown_comments = set()
1304 # Maybe there is both an ebuild and a binary. Only
1305 # show one of them to avoid redundant appearance.
1307 have_eapi_mask = False
1308 for (root_config, pkgsettings, cpv,
1309 metadata, mreasons) in masked_packages:
1310 if cpv in shown_cpvs:
1313 comment, filename = None, None
1314 if "package.mask" in mreasons:
1315 comment, filename = \
1316 portage.getmaskingreason(
1317 cpv, metadata=metadata,
1318 settings=pkgsettings,
1319 portdb=root_config.trees["porttree"].dbapi,
1320 return_location=True)
1321 missing_licenses = []
1323 if not portage.eapi_is_supported(metadata["EAPI"]):
1324 have_eapi_mask = True
1326 missing_licenses = \
1327 pkgsettings._getMissingLicenses(
1329 except portage.exception.InvalidDependString:
1330 # This will have already been reported
1331 # above via mreasons.
1334 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1335 if comment and comment not in shown_comments:
1338 shown_comments.add(comment)
1339 portdb = root_config.trees["porttree"].dbapi
1340 for l in missing_licenses:
1341 l_path = portdb.findLicensePath(l)
1342 if l in shown_licenses:
1344 msg = ("A copy of the '%s' license" + \
1345 " is located at '%s'.") % (l, l_path)
1348 shown_licenses.add(l)
1349 return have_eapi_mask
1351 class Task(SlotObject):
1352 __slots__ = ("_hash_key", "_hash_value")
1354 def _get_hash_key(self):
1355 hash_key = getattr(self, "_hash_key", None)
1356 if hash_key is None:
1357 raise NotImplementedError(self)
1360 def __eq__(self, other):
1361 return self._get_hash_key() == other
1363 def __ne__(self, other):
1364 return self._get_hash_key() != other
1367 hash_value = getattr(self, "_hash_value", None)
1368 if hash_value is None:
1369 self._hash_value = hash(self._get_hash_key())
1370 return self._hash_value
1373 return len(self._get_hash_key())
1375 def __getitem__(self, key):
1376 return self._get_hash_key()[key]
1379 return iter(self._get_hash_key())
1381 def __contains__(self, key):
1382 return key in self._get_hash_key()
1385 return str(self._get_hash_key())
1387 class Blocker(Task):
1389 __hash__ = Task.__hash__
1390 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1392 def __init__(self, **kwargs):
1393 Task.__init__(self, **kwargs)
1394 self.cp = portage.dep_getkey(self.atom)
1396 def _get_hash_key(self):
1397 hash_key = getattr(self, "_hash_key", None)
1398 if hash_key is None:
1400 ("blocks", self.root, self.atom, self.eapi)
1401 return self._hash_key
1403 class Package(Task):
1405 __hash__ = Task.__hash__
1406 __slots__ = ("built", "cpv", "depth",
1407 "installed", "metadata", "onlydeps", "operation",
1408 "root_config", "type_name",
1409 "category", "counter", "cp", "cpv_split",
1410 "inherited", "iuse", "mtime",
1411 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1414 "CHOST", "COUNTER", "DEPEND", "EAPI",
1415 "INHERITED", "IUSE", "KEYWORDS",
1416 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1417 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1419 def __init__(self, **kwargs):
1420 Task.__init__(self, **kwargs)
1421 self.root = self.root_config.root
1422 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1423 self.cp = portage.cpv_getkey(self.cpv)
1424 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1425 self.category, self.pf = portage.catsplit(self.cpv)
1426 self.cpv_split = portage.catpkgsplit(self.cpv)
1427 self.pv_split = self.cpv_split[1:]
1431 __slots__ = ("__weakref__", "enabled")
1433 def __init__(self, use):
1434 self.enabled = frozenset(use)
1436 class _iuse(object):
1438 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1440 def __init__(self, tokens, iuse_implicit):
1441 self.tokens = tuple(tokens)
1442 self.iuse_implicit = iuse_implicit
1449 enabled.append(x[1:])
1451 disabled.append(x[1:])
1454 self.enabled = frozenset(enabled)
1455 self.disabled = frozenset(disabled)
1456 self.all = frozenset(chain(enabled, disabled, other))
1458 def __getattribute__(self, name):
1461 return object.__getattribute__(self, "regex")
1462 except AttributeError:
1463 all = object.__getattribute__(self, "all")
1464 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1465 # Escape anything except ".*" which is supposed
1466 # to pass through from _get_implicit_iuse()
1467 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1468 regex = "^(%s)$" % "|".join(regex)
1469 regex = regex.replace("\\.\\*", ".*")
1470 self.regex = re.compile(regex)
1471 return object.__getattribute__(self, name)
1473 def _get_hash_key(self):
1474 hash_key = getattr(self, "_hash_key", None)
1475 if hash_key is None:
1476 if self.operation is None:
1477 self.operation = "merge"
1478 if self.onlydeps or self.installed:
1479 self.operation = "nomerge"
1481 (self.type_name, self.root, self.cpv, self.operation)
1482 return self._hash_key
1484 def __cmp__(self, other):
1491 def __lt__(self, other):
1492 if other.cp != self.cp:
1494 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1498 def __le__(self, other):
1499 if other.cp != self.cp:
1501 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1505 def __gt__(self, other):
1506 if other.cp != self.cp:
1508 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1512 def __ge__(self, other):
1513 if other.cp != self.cp:
1515 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1519 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1520 if not x.startswith("UNUSED_"))
1521 _all_metadata_keys.discard("CDEPEND")
1522 _all_metadata_keys.update(Package.metadata_keys)
1524 from portage.cache.mappings import slot_dict_class
1525 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1527 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1529 Detect metadata updates and synchronize Package attributes.
1532 __slots__ = ("_pkg",)
1533 _wrapped_keys = frozenset(
1534 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1536 def __init__(self, pkg, metadata):
1537 _PackageMetadataWrapperBase.__init__(self)
1539 self.update(metadata)
1541 def __setitem__(self, k, v):
1542 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1543 if k in self._wrapped_keys:
1544 getattr(self, "_set_" + k.lower())(k, v)
1546 def _set_inherited(self, k, v):
1547 if isinstance(v, basestring):
1548 v = frozenset(v.split())
1549 self._pkg.inherited = v
1551 def _set_iuse(self, k, v):
1552 self._pkg.iuse = self._pkg._iuse(
1553 v.split(), self._pkg.root_config.iuse_implicit)
1555 def _set_slot(self, k, v):
1558 def _set_use(self, k, v):
1559 self._pkg.use = self._pkg._use(v.split())
1561 def _set_counter(self, k, v):
1562 if isinstance(v, basestring):
1567 self._pkg.counter = v
1569 def _set__mtime_(self, k, v):
1570 if isinstance(v, basestring):
1572 v = float(v.strip())
1577 class EbuildFetchonly(SlotObject):
1579 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1582 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1583 # ensuring sane $PWD (bug #239560) and storing elog
1584 # messages. Use a private temp directory, in order
1585 # to avoid locking the main one.
1586 settings = self.settings
1587 global_tmpdir = settings["PORTAGE_TMPDIR"]
1588 from tempfile import mkdtemp
1589 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1590 settings["PORTAGE_TMPDIR"] = private_tmpdir
1591 settings.backup_changes("PORTAGE_TMPDIR")
1593 retval = self._execute()
1595 settings["PORTAGE_TMPDIR"] = global_tmpdir
1596 settings.backup_changes("PORTAGE_TMPDIR")
1597 shutil.rmtree(private_tmpdir)
1601 settings = self.settings
1603 root_config = pkg.root_config
1604 portdb = root_config.trees["porttree"].dbapi
1605 ebuild_path = portdb.findname(pkg.cpv)
1606 settings.setcpv(pkg)
1607 debug = settings.get("PORTAGE_DEBUG") == "1"
1608 use_cache = 1 # always true
1609 portage.doebuild_environment(ebuild_path, "fetch",
1610 root_config.root, settings, debug, use_cache, portdb)
1611 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1613 retval = portage.doebuild(ebuild_path, "fetch",
1614 self.settings["ROOT"], self.settings, debug=debug,
1615 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1616 mydbapi=portdb, tree="porttree")
1618 if retval != os.EX_OK:
1619 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1620 eerror(msg, phase="unpack", key=pkg.cpv)
1622 portage.elog.elog_process(self.pkg.cpv, self.settings)
1625 class AsynchronousTask(SlotObject):
1627 Subclasses override _wait() and _poll() so that calls
1628 to public methods can be wrapped for implementing
1629 hooks such as exit listener notification.
1631 Sublasses should call self.wait() to notify exit listeners after
1632 the task is complete and self.returncode has been set.
1635 __slots__ = ("background", "cancelled", "returncode") + \
1636 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1640 Start an asynchronous task and then return as soon as possible.
1646 raise NotImplementedError(self)
1649 return self.returncode is None
1656 return self.returncode
1659 if self.returncode is None:
1662 return self.returncode
1665 return self.returncode
1668 self.cancelled = True
1671 def addStartListener(self, f):
1673 The function will be called with one argument, a reference to self.
1675 if self._start_listeners is None:
1676 self._start_listeners = []
1677 self._start_listeners.append(f)
1679 def removeStartListener(self, f):
1680 if self._start_listeners is None:
1682 self._start_listeners.remove(f)
1684 def _start_hook(self):
1685 if self._start_listeners is not None:
1686 start_listeners = self._start_listeners
1687 self._start_listeners = None
1689 for f in start_listeners:
1692 def addExitListener(self, f):
1694 The function will be called with one argument, a reference to self.
1696 if self._exit_listeners is None:
1697 self._exit_listeners = []
1698 self._exit_listeners.append(f)
1700 def removeExitListener(self, f):
1701 if self._exit_listeners is None:
1702 if self._exit_listener_stack is not None:
1703 self._exit_listener_stack.remove(f)
1705 self._exit_listeners.remove(f)
1707 def _wait_hook(self):
1709 Call this method after the task completes, just before returning
1710 the returncode from wait() or poll(). This hook is
1711 used to trigger exit listeners when the returncode first
1714 if self.returncode is not None and \
1715 self._exit_listeners is not None:
1717 # This prevents recursion, in case one of the
1718 # exit handlers triggers this method again by
1719 # calling wait(). Use a stack that gives
1720 # removeExitListener() an opportunity to consume
1721 # listeners from the stack, before they can get
1722 # called below. This is necessary because a call
1723 # to one exit listener may result in a call to
1724 # removeExitListener() for another listener on
1725 # the stack. That listener needs to be removed
1726 # from the stack since it would be inconsistent
1727 # to call it after it has been been passed into
1728 # removeExitListener().
1729 self._exit_listener_stack = self._exit_listeners
1730 self._exit_listeners = None
1732 self._exit_listener_stack.reverse()
1733 while self._exit_listener_stack:
1734 self._exit_listener_stack.pop()(self)
1736 class PipeReader(AsynchronousTask):
1739 Reads output from one or more files and saves it in memory,
1740 for retrieval via the getvalue() method. This is driven by
1741 the scheduler's poll() loop, so it runs entirely within the
1745 __slots__ = ("input_files", "scheduler",) + \
1746 ("pid", "_read_data", "_registered", "_reg_ids")
1751 self._reg_ids = set()
1752 self._read_data = []
1753 for k, f in self.input_files.iteritems():
1754 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1755 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1756 self._reg_ids.add(self.scheduler.register(f.fileno(),
1757 PollConstants.POLLIN, self._output_handler))
1758 self._registered = True
1761 return self._registered
1764 if self.returncode is not None:
1765 return self.returncode
1767 if self._registered:
1768 self.scheduler.schedule(self._reg_ids)
1771 self.returncode = os.EX_OK
1772 return self.returncode
1775 """Retrieve the entire contents"""
1776 return "".join(self._read_data)
1779 """Free the memory buffer."""
1780 self._read_data = None
1782 def _output_handler(self, fd, event):
1783 files = self.input_files
1784 for f in files.itervalues():
1785 if fd == f.fileno():
1788 buf = array.array('B')
1790 buf.fromfile(f, self._bufsize)
1795 self._read_data.append(buf.tostring())
1800 return self._registered
1802 def _unregister(self):
1804 Unregister from the scheduler and close open files.
1807 self._registered = False
1809 if self._reg_ids is not None:
1810 for reg_id in self._reg_ids:
1811 self.scheduler.unregister(reg_id)
1812 self._reg_ids = None
1814 if self.input_files is not None:
1815 for f in self.input_files.itervalues():
1817 self.input_files = None
1819 class CompositeTask(AsynchronousTask):
1821 __slots__ = ("scheduler",) + ("_current_task",)
1824 return self._current_task is not None
1827 self.cancelled = True
1828 if self._current_task is not None:
1829 self._current_task.cancel()
1833 This does a loop calling self._current_task.poll()
1834 repeatedly as long as the value of self._current_task
1835 keeps changing. It calls poll() a maximum of one time
1836 for a given self._current_task instance. This is useful
1837 since calling poll() on a task can trigger advance to
1838 the next task could eventually lead to the returncode
1839 being set in cases when polling only a single task would
1840 not have the same effect.
1845 task = self._current_task
1846 if task is None or task is prev:
1847 # don't poll the same task more than once
1852 return self.returncode
1858 task = self._current_task
1860 # don't wait for the same task more than once
1863 # Before the task.wait() method returned, an exit
1864 # listener should have set self._current_task to either
1865 # a different task or None. Something is wrong.
1866 raise AssertionError("self._current_task has not " + \
1867 "changed since calling wait", self, task)
1871 return self.returncode
1873 def _assert_current(self, task):
1875 Raises an AssertionError if the given task is not the
1876 same one as self._current_task. This can be useful
1879 if task is not self._current_task:
1880 raise AssertionError("Unrecognized task: %s" % (task,))
1882 def _default_exit(self, task):
1884 Calls _assert_current() on the given task and then sets the
1885 composite returncode attribute if task.returncode != os.EX_OK.
1886 If the task failed then self._current_task will be set to None.
1887 Subclasses can use this as a generic task exit callback.
1890 @returns: The task.returncode attribute.
1892 self._assert_current(task)
1893 if task.returncode != os.EX_OK:
1894 self.returncode = task.returncode
1895 self._current_task = None
1896 return task.returncode
1898 def _final_exit(self, task):
1900 Assumes that task is the final task of this composite task.
1901 Calls _default_exit() and sets self.returncode to the task's
1902 returncode and sets self._current_task to None.
1904 self._default_exit(task)
1905 self._current_task = None
1906 self.returncode = task.returncode
1907 return self.returncode
1909 def _default_final_exit(self, task):
1911 This calls _final_exit() and then wait().
1913 Subclasses can use this as a generic final task exit callback.
1916 self._final_exit(task)
1919 def _start_task(self, task, exit_handler):
1921 Register exit handler for the given task, set it
1922 as self._current_task, and call task.start().
1924 Subclasses can use this as a generic way to start
1928 task.addExitListener(exit_handler)
1929 self._current_task = task
1932 class TaskSequence(CompositeTask):
1934 A collection of tasks that executes sequentially. Each task
1935 must have a addExitListener() method that can be used as
1936 a means to trigger movement from one task to the next.
1939 __slots__ = ("_task_queue",)
1941 def __init__(self, **kwargs):
1942 AsynchronousTask.__init__(self, **kwargs)
1943 self._task_queue = deque()
1945 def add(self, task):
1946 self._task_queue.append(task)
1949 self._start_next_task()
1952 self._task_queue.clear()
1953 CompositeTask.cancel(self)
1955 def _start_next_task(self):
1956 self._start_task(self._task_queue.popleft(),
1957 self._task_exit_handler)
1959 def _task_exit_handler(self, task):
1960 if self._default_exit(task) != os.EX_OK:
1962 elif self._task_queue:
1963 self._start_next_task()
1965 self._final_exit(task)
1968 class SubProcess(AsynchronousTask):
1970 __slots__ = ("scheduler",) + ("pid", "_files", "_registered", "_reg_id")
1972 # A file descriptor is required for the scheduler to monitor changes from
1973 # inside a poll() loop. When logging is not enabled, create a pipe just to
1974 # serve this purpose alone.
1978 if self.returncode is not None:
1979 return self.returncode
1980 if self.pid is None:
1981 return self.returncode
1982 if self._registered:
1983 return self.returncode
1986 retval = os.waitpid(self.pid, os.WNOHANG)
1988 if e.errno != errno.ECHILD:
1991 retval = (self.pid, 1)
1993 if retval == (0, 0):
1995 self._set_returncode(retval)
1996 return self.returncode
2001 os.kill(self.pid, signal.SIGTERM)
2003 if e.errno != errno.ESRCH:
2007 self.cancelled = True
2008 if self.pid is not None:
2010 return self.returncode
2013 return self.pid is not None and \
2014 self.returncode is None
2018 if self.returncode is not None:
2019 return self.returncode
2021 if self._registered:
2022 self.scheduler.schedule(self._reg_id)
2024 if self.returncode is not None:
2025 return self.returncode
2028 wait_retval = os.waitpid(self.pid, 0)
2030 if e.errno != errno.ECHILD:
2033 self._set_returncode((self.pid, 1))
2035 self._set_returncode(wait_retval)
2037 return self.returncode
2039 def _unregister(self):
2041 Unregister from the scheduler and close open files.
2044 self._registered = False
2046 if self._reg_id is not None:
2047 self.scheduler.unregister(self._reg_id)
2050 if self._files is not None:
2051 for f in self._files.itervalues():
2055 def _set_returncode(self, wait_retval):
2057 retval = wait_retval[1]
2059 if retval != os.EX_OK:
2061 retval = (retval & 0xff) << 8
2063 retval = retval >> 8
2065 self.returncode = retval
2067 class SpawnProcess(SubProcess):
2070 Constructor keyword args are passed into portage.process.spawn().
2071 The required "args" keyword argument will be passed as the first
2075 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2076 "uid", "gid", "groups", "umask", "logfile",
2077 "path_lookup", "pre_exec")
2079 __slots__ = ("args",) + \
2082 _file_names = ("log", "process", "stdout")
2083 _files_dict = slot_dict_class(_file_names, prefix="")
2091 if self.fd_pipes is None:
2093 fd_pipes = self.fd_pipes
2094 fd_pipes.setdefault(0, sys.stdin.fileno())
2095 fd_pipes.setdefault(1, sys.stdout.fileno())
2096 fd_pipes.setdefault(2, sys.stderr.fileno())
2098 # flush any pending output
2099 for fd in fd_pipes.itervalues():
2100 if fd == sys.stdout.fileno():
2102 if fd == sys.stderr.fileno():
2105 logfile = self.logfile
2106 self._files = self._files_dict()
2109 master_fd, slave_fd = self._pipe(fd_pipes)
2110 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2111 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2114 fd_pipes_orig = fd_pipes.copy()
2116 # TODO: Use job control functions like tcsetpgrp() to control
2117 # access to stdin. Until then, use /dev/null so that any
2118 # attempts to read from stdin will immediately return EOF
2119 # instead of blocking indefinitely.
2120 null_input = open('/dev/null', 'rb')
2121 fd_pipes[0] = null_input.fileno()
2123 fd_pipes[0] = fd_pipes_orig[0]
2125 files.process = os.fdopen(master_fd, 'r')
2126 if logfile is not None:
2128 fd_pipes[1] = slave_fd
2129 fd_pipes[2] = slave_fd
2131 files.log = open(logfile, "a")
2132 portage.util.apply_secpass_permissions(logfile,
2133 uid=portage.portage_uid, gid=portage.portage_gid,
2136 if not self.background:
2137 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2139 output_handler = self._output_handler
2143 # Create a dummy pipe so the scheduler can monitor
2144 # the process from inside a poll() loop.
2145 fd_pipes[self._dummy_pipe_fd] = slave_fd
2147 fd_pipes[1] = slave_fd
2148 fd_pipes[2] = slave_fd
2149 output_handler = self._dummy_handler
2152 for k in self._spawn_kwarg_names:
2153 v = getattr(self, k)
2157 kwargs["fd_pipes"] = fd_pipes
2158 kwargs["returnpid"] = True
2159 kwargs.pop("logfile", None)
2161 retval = self._spawn(self.args, **kwargs)
2164 if null_input is not None:
2167 if isinstance(retval, int):
2169 for f in files.values():
2171 self.returncode = retval
2175 self.pid = retval[0]
2176 portage.process.spawned_pids.remove(self.pid)
2178 self._reg_id = self.scheduler.register(files.process.fileno(),
2179 PollConstants.POLLIN, output_handler)
2180 self._registered = True
2182 def _pipe(self, fd_pipes):
2184 @type fd_pipes: dict
2185 @param fd_pipes: pipes from which to copy terminal size if desired.
2189 def _spawn(self, args, **kwargs):
2190 return portage.process.spawn(args, **kwargs)
2192 def _output_handler(self, fd, event):
2194 buf = array.array('B')
2196 buf.fromfile(files.process, self._bufsize)
2200 if not self.background:
2201 buf.tofile(files.stdout)
2202 files.stdout.flush()
2203 buf.tofile(files.log)
2208 return self._registered
2210 def _dummy_handler(self, fd, event):
2212 This method is mainly interested in detecting EOF, since
2213 the only purpose of the pipe is to allow the scheduler to
2214 monitor the process from inside a poll() loop.
2217 buf = array.array('B')
2219 buf.fromfile(files.process, self._bufsize)
2227 return self._registered
2229 class MiscFunctionsProcess(SpawnProcess):
2231 Spawns misc-functions.sh with an existing ebuild environment.
2234 __slots__ = ("commands", "phase", "pkg", "settings")
2237 settings = self.settings
2238 settings.pop("EBUILD_PHASE", None)
2239 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2240 misc_sh_binary = os.path.join(portage_bin_path,
2241 os.path.basename(portage.const.MISC_SH_BINARY))
2243 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2244 self.logfile = settings.get("PORTAGE_LOG_FILE")
2246 portage._doebuild_exit_status_unlink(
2247 settings.get("EBUILD_EXIT_STATUS_FILE"))
2249 SpawnProcess._start(self)
2251 def _spawn(self, args, **kwargs):
2252 settings = self.settings
2253 debug = settings.get("PORTAGE_DEBUG") == "1"
2254 return portage.spawn(" ".join(args), settings,
2255 debug=debug, **kwargs)
2257 def _set_returncode(self, wait_retval):
2258 SpawnProcess._set_returncode(self, wait_retval)
2259 self.returncode = portage._doebuild_exit_status_check_and_log(
2260 self.settings, self.phase, self.returncode)
2262 class EbuildFetcher(SpawnProcess):
2264 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2269 root_config = self.pkg.root_config
2270 portdb = root_config.trees["porttree"].dbapi
2271 ebuild_path = portdb.findname(self.pkg.cpv)
2272 settings = self.config_pool.allocate()
2273 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2274 self._build_dir.lock()
2275 self._build_dir.clean()
2276 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2277 if self.logfile is None:
2278 self.logfile = settings.get("PORTAGE_LOG_FILE")
2284 # If any incremental variables have been overridden
2285 # via the environment, those values need to be passed
2286 # along here so that they are correctly considered by
2287 # the config instance in the subproccess.
2288 fetch_env = os.environ.copy()
2290 fetch_env["PORTAGE_NICENESS"] = "0"
2292 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2294 ebuild_binary = os.path.join(
2295 settings["PORTAGE_BIN_PATH"], "ebuild")
2297 fetch_args = [ebuild_binary, ebuild_path, phase]
2298 debug = settings.get("PORTAGE_DEBUG") == "1"
2300 fetch_args.append("--debug")
2302 self.args = fetch_args
2303 self.env = fetch_env
2304 SpawnProcess._start(self)
2306 def _pipe(self, fd_pipes):
2307 """When appropriate, use a pty so that fetcher progress bars,
2308 like wget has, will work properly."""
2309 if self.background or not sys.stdout.isatty():
2310 # When the output only goes to a log file,
2311 # there's no point in creating a pty.
2313 stdout_pipe = fd_pipes.get(1)
2314 got_pty, master_fd, slave_fd = \
2315 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2316 return (master_fd, slave_fd)
2318 def _set_returncode(self, wait_retval):
2319 SpawnProcess._set_returncode(self, wait_retval)
2320 # Collect elog messages that might have been
2321 # created by the pkg_nofetch phase.
2322 if self._build_dir is not None:
2323 # Skip elog messages for prefetch, in order to avoid duplicates.
2324 if not self.prefetch and self.returncode != os.EX_OK:
2326 if self.logfile is not None:
2328 elog_out = open(self.logfile, 'a')
2329 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2330 if self.logfile is not None:
2331 msg += ", Log file:"
2332 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2333 if self.logfile is not None:
2334 eerror(" '%s'" % (self.logfile,),
2335 phase="unpack", key=self.pkg.cpv, out=elog_out)
2336 if elog_out is not None:
2338 if not self.prefetch:
2339 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2340 features = self._build_dir.settings.features
2341 if self.returncode == os.EX_OK:
2342 self._build_dir.clean()
2343 self._build_dir.unlock()
2344 self.config_pool.deallocate(self._build_dir.settings)
2345 self._build_dir = None
2347 class EbuildBuildDir(SlotObject):
2349 __slots__ = ("dir_path", "pkg", "settings",
2350 "locked", "_catdir", "_lock_obj")
2352 def __init__(self, **kwargs):
2353 SlotObject.__init__(self, **kwargs)
2358 This raises an AlreadyLocked exception if lock() is called
2359 while a lock is already held. In order to avoid this, call
2360 unlock() or check whether the "locked" attribute is True
2361 or False before calling lock().
2363 if self._lock_obj is not None:
2364 raise self.AlreadyLocked((self._lock_obj,))
2366 dir_path = self.dir_path
2367 if dir_path is None:
2368 root_config = self.pkg.root_config
2369 portdb = root_config.trees["porttree"].dbapi
2370 ebuild_path = portdb.findname(self.pkg.cpv)
2371 settings = self.settings
2372 settings.setcpv(self.pkg)
2373 debug = settings.get("PORTAGE_DEBUG") == "1"
2374 use_cache = 1 # always true
2375 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2376 self.settings, debug, use_cache, portdb)
2377 dir_path = self.settings["PORTAGE_BUILDDIR"]
2379 catdir = os.path.dirname(dir_path)
2380 self._catdir = catdir
2382 portage.util.ensure_dirs(os.path.dirname(catdir),
2383 gid=portage.portage_gid,
2387 catdir_lock = portage.locks.lockdir(catdir)
2388 portage.util.ensure_dirs(catdir,
2389 gid=portage.portage_gid,
2391 self._lock_obj = portage.locks.lockdir(dir_path)
2393 self.locked = self._lock_obj is not None
2394 if catdir_lock is not None:
2395 portage.locks.unlockdir(catdir_lock)
2398 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2399 by keepwork or keeptemp in FEATURES."""
2400 settings = self.settings
2401 features = settings.features
2402 if not ("keepwork" in features or "keeptemp" in features):
2404 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2405 except EnvironmentError, e:
2406 if e.errno != errno.ENOENT:
2411 if self._lock_obj is None:
2414 portage.locks.unlockdir(self._lock_obj)
2415 self._lock_obj = None
2418 catdir = self._catdir
2421 catdir_lock = portage.locks.lockdir(catdir)
2427 if e.errno not in (errno.ENOENT,
2428 errno.ENOTEMPTY, errno.EEXIST):
2431 portage.locks.unlockdir(catdir_lock)
2433 class AlreadyLocked(portage.exception.PortageException):
2436 class EbuildBuild(CompositeTask):
2438 __slots__ = ("args_set", "config_pool", "find_blockers",
2439 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2440 "prefetcher", "settings", "world_atom") + \
2441 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2445 logger = self.logger
2448 settings = self.settings
2449 world_atom = self.world_atom
2450 root_config = pkg.root_config
2453 portdb = root_config.trees[tree].dbapi
2454 settings["EMERGE_FROM"] = pkg.type_name
2455 settings.backup_changes("EMERGE_FROM")
2457 ebuild_path = portdb.findname(self.pkg.cpv)
2458 self._ebuild_path = ebuild_path
2460 prefetcher = self.prefetcher
2461 if prefetcher is None:
2463 elif not prefetcher.isAlive():
2465 elif prefetcher.poll() is None:
2467 waiting_msg = "Fetching files " + \
2468 "in the background. " + \
2469 "To view fetch progress, run `tail -f " + \
2470 "/var/log/emerge-fetch.log` in another " + \
2472 msg_prefix = colorize("GOOD", " * ")
2473 from textwrap import wrap
2474 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2475 for line in wrap(waiting_msg, 65))
2476 if not self.background:
2477 writemsg(waiting_msg, noiselevel=-1)
2479 self._current_task = prefetcher
2480 prefetcher.addExitListener(self._prefetch_exit)
2483 self._prefetch_exit(prefetcher)
2485 def _prefetch_exit(self, prefetcher):
2489 settings = self.settings
2492 fetcher = EbuildFetchonly(
2493 fetch_all=opts.fetch_all_uri,
2494 pkg=pkg, pretend=opts.pretend,
2496 retval = fetcher.execute()
2497 self.returncode = retval
2501 fetcher = EbuildFetcher(config_pool=self.config_pool,
2502 fetchall=opts.fetch_all_uri,
2503 fetchonly=opts.fetchonly,
2504 background=self.background,
2505 pkg=pkg, scheduler=self.scheduler)
2507 self._start_task(fetcher, self._fetch_exit)
2509 def _fetch_exit(self, fetcher):
2513 fetch_failed = False
2515 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2517 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2519 if fetch_failed and fetcher.logfile is not None and \
2520 os.path.exists(fetcher.logfile):
2521 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2523 if not fetch_failed and fetcher.logfile is not None:
2524 # Fetch was successful, so remove the fetch log.
2526 os.unlink(fetcher.logfile)
2530 if fetch_failed or opts.fetchonly:
2534 logger = self.logger
2536 pkg_count = self.pkg_count
2537 scheduler = self.scheduler
2538 settings = self.settings
2539 features = settings.features
2540 ebuild_path = self._ebuild_path
2541 system_set = pkg.root_config.sets["system"]
2543 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2544 self._build_dir.lock()
2546 # Cleaning is triggered before the setup
2547 # phase, in portage.doebuild().
2548 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2549 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2550 short_msg = "emerge: (%s of %s) %s Clean" % \
2551 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2552 logger.log(msg, short_msg=short_msg)
2554 #buildsyspkg: Check if we need to _force_ binary package creation
2555 self._issyspkg = "buildsyspkg" in features and \
2556 system_set.findAtomForPackage(pkg) and \
2559 if opts.buildpkg or self._issyspkg:
2561 self._buildpkg = True
2563 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2564 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2565 short_msg = "emerge: (%s of %s) %s Compile" % \
2566 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2567 logger.log(msg, short_msg=short_msg)
2570 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2571 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2572 short_msg = "emerge: (%s of %s) %s Compile" % \
2573 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2574 logger.log(msg, short_msg=short_msg)
2576 build = EbuildExecuter(background=self.background, pkg=pkg,
2577 scheduler=scheduler, settings=settings)
2578 self._start_task(build, self._build_exit)
2580 def _unlock_builddir(self):
2581 portage.elog.elog_process(self.pkg.cpv, self.settings)
2582 self._build_dir.unlock()
2584 def _build_exit(self, build):
2585 if self._default_exit(build) != os.EX_OK:
2586 self._unlock_builddir()
2591 buildpkg = self._buildpkg
2594 self._final_exit(build)
2599 msg = ">>> This is a system package, " + \
2600 "let's pack a rescue tarball.\n"
2602 log_path = self.settings.get("PORTAGE_LOG_FILE")
2603 if log_path is not None:
2604 log_file = open(log_path, 'a')
2610 if not self.background:
2611 portage.writemsg_stdout(msg, noiselevel=-1)
2613 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2614 scheduler=self.scheduler, settings=self.settings)
2616 self._start_task(packager, self._buildpkg_exit)
2618 def _buildpkg_exit(self, packager):
2620 Released build dir lock when there is a failure or
2621 when in buildpkgonly mode. Otherwise, the lock will
2622 be released when merge() is called.
2625 if self._default_exit(packager) == os.EX_OK and \
2626 self.opts.buildpkgonly:
2627 # Need to call "clean" phase for buildpkgonly mode
2628 portage.elog.elog_process(self.pkg.cpv, self.settings)
2630 clean_phase = EbuildPhase(background=self.background,
2631 pkg=self.pkg, phase=phase,
2632 scheduler=self.scheduler, settings=self.settings,
2634 self._start_task(clean_phase, self._clean_exit)
2637 if self._final_exit(packager) != os.EX_OK or \
2638 self.opts.buildpkgonly:
2639 self._unlock_builddir()
2642 def _clean_exit(self, clean_phase):
2643 if self._final_exit(clean_phase) != os.EX_OK or \
2644 self.opts.buildpkgonly:
2645 self._unlock_builddir()
2650 Install the package and then clean up and release locks.
2651 Only call this after the build has completed successfully
2652 and neither fetchonly nor buildpkgonly mode are enabled.
2655 find_blockers = self.find_blockers
2656 ldpath_mtimes = self.ldpath_mtimes
2657 logger = self.logger
2659 pkg_count = self.pkg_count
2660 settings = self.settings
2661 world_atom = self.world_atom
2662 ebuild_path = self._ebuild_path
2665 merge = EbuildMerge(find_blockers=self.find_blockers,
2666 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2667 pkg_count=pkg_count, pkg_path=ebuild_path,
2668 scheduler=self.scheduler,
2669 settings=settings, tree=tree, world_atom=world_atom)
2671 msg = " === (%s of %s) Merging (%s::%s)" % \
2672 (pkg_count.curval, pkg_count.maxval,
2673 pkg.cpv, ebuild_path)
2674 short_msg = "emerge: (%s of %s) %s Merge" % \
2675 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2676 logger.log(msg, short_msg=short_msg)
2679 rval = merge.execute()
2681 self._unlock_builddir()
2685 class EbuildExecuter(CompositeTask):
2687 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2689 _phases = ("prepare", "configure", "compile", "test", "install")
2691 _live_eclasses = frozenset([
2701 self._tree = "porttree"
2704 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2705 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2706 self._start_task(clean_phase, self._clean_phase_exit)
2708 def _clean_phase_exit(self, clean_phase):
2710 if self._default_exit(clean_phase) != os.EX_OK:
2715 scheduler = self.scheduler
2716 settings = self.settings
2719 # This initializes PORTAGE_LOG_FILE.
2720 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2722 setup_phase = EbuildPhase(background=self.background,
2723 pkg=pkg, phase="setup", scheduler=scheduler,
2724 settings=settings, tree=self._tree)
2726 setup_phase.addExitListener(self._setup_exit)
2727 self._current_task = setup_phase
2728 self.scheduler.scheduleSetup(setup_phase)
2730 def _setup_exit(self, setup_phase):
2732 if self._default_exit(setup_phase) != os.EX_OK:
2736 unpack_phase = EbuildPhase(background=self.background,
2737 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2738 settings=self.settings, tree=self._tree)
2740 if self._live_eclasses.intersection(self.pkg.inherited):
2741 # Serialize $DISTDIR access for live ebuilds since
2742 # otherwise they can interfere with eachother.
2744 unpack_phase.addExitListener(self._unpack_exit)
2745 self._current_task = unpack_phase
2746 self.scheduler.scheduleUnpack(unpack_phase)
2749 self._start_task(unpack_phase, self._unpack_exit)
2751 def _unpack_exit(self, unpack_phase):
2753 if self._default_exit(unpack_phase) != os.EX_OK:
2757 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2760 phases = self._phases
2761 eapi = pkg.metadata["EAPI"]
2762 if eapi in ("0", "1", "2_pre1"):
2763 # skip src_prepare and src_configure
2765 elif eapi in ("2_pre2",):
2769 for phase in phases:
2770 ebuild_phases.add(EbuildPhase(background=self.background,
2771 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2772 settings=self.settings, tree=self._tree))
2774 self._start_task(ebuild_phases, self._default_final_exit)
2776 class EbuildMetadataPhase(SubProcess):
2779 Asynchronous interface for the ebuild "depend" phase which is
2780 used to extract metadata from the ebuild.
2783 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2784 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2787 _file_names = ("ebuild",)
2788 _files_dict = slot_dict_class(_file_names, prefix="")
2789 _bufsize = SpawnProcess._bufsize
2793 settings = self.settings
2795 ebuild_path = self.ebuild_path
2796 debug = settings.get("PORTAGE_DEBUG") == "1"
2800 if self.fd_pipes is not None:
2801 fd_pipes = self.fd_pipes.copy()
2805 fd_pipes.setdefault(0, sys.stdin.fileno())
2806 fd_pipes.setdefault(1, sys.stdout.fileno())
2807 fd_pipes.setdefault(2, sys.stderr.fileno())
2809 # flush any pending output
2810 for fd in fd_pipes.itervalues():
2811 if fd == sys.stdout.fileno():
2813 if fd == sys.stderr.fileno():
2816 fd_pipes_orig = fd_pipes.copy()
2817 self._files = self._files_dict()
2820 master_fd, slave_fd = os.pipe()
2821 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2822 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2824 fd_pipes[self._metadata_fd] = slave_fd
2826 retval = portage.doebuild(ebuild_path, "depend",
2827 settings["ROOT"], settings, debug,
2828 mydbapi=self.portdb, tree="porttree",
2829 fd_pipes=fd_pipes, returnpid=True)
2833 if isinstance(retval, int):
2834 # doebuild failed before spawning
2836 self.returncode = retval
2840 self.pid = retval[0]
2841 portage.process.spawned_pids.remove(self.pid)
2843 self._raw_metadata = []
2844 files.ebuild = os.fdopen(master_fd, 'r')
2845 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2846 PollConstants.POLLIN, self._output_handler)
2847 self._registered = True
2849 def _output_handler(self, fd, event):
2851 self._raw_metadata.append(files.ebuild.read())
2852 if not self._raw_metadata[-1]:
2856 if self.returncode == os.EX_OK:
2857 metadata = izip(portage.auxdbkeys,
2858 "".join(self._raw_metadata).splitlines())
2859 self.metadata_callback(self.cpv, self.ebuild_path,
2860 self.repo_path, metadata, self.ebuild_mtime)
2862 return self._registered
2864 class EbuildProcess(SpawnProcess):
2866 __slots__ = ("phase", "pkg", "settings", "tree")
2869 # Don't open the log file during the clean phase since the
2870 # open file can result in an nfs lock on $T/build.log which
2871 # prevents the clean phase from removing $T.
2872 if self.phase not in ("clean", "cleanrm"):
2873 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2874 SpawnProcess._start(self)
2876 def _pipe(self, fd_pipes):
2877 stdout_pipe = fd_pipes.get(1)
2878 got_pty, master_fd, slave_fd = \
2879 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2880 return (master_fd, slave_fd)
2882 def _spawn(self, args, **kwargs):
2884 root_config = self.pkg.root_config
2886 mydbapi = root_config.trees[tree].dbapi
2887 settings = self.settings
2888 ebuild_path = settings["EBUILD"]
2889 debug = settings.get("PORTAGE_DEBUG") == "1"
2891 rval = portage.doebuild(ebuild_path, self.phase,
2892 root_config.root, settings, debug,
2893 mydbapi=mydbapi, tree=tree, **kwargs)
2897 def _set_returncode(self, wait_retval):
2898 SpawnProcess._set_returncode(self, wait_retval)
2900 if self.phase not in ("clean", "cleanrm"):
2901 self.returncode = portage._doebuild_exit_status_check_and_log(
2902 self.settings, self.phase, self.returncode)
2904 portage._post_phase_userpriv_perms(self.settings)
2906 class EbuildPhase(CompositeTask):
2908 __slots__ = ("background", "pkg", "phase",
2909 "scheduler", "settings", "tree")
2911 _post_phase_cmds = portage._post_phase_cmds
2915 ebuild_process = EbuildProcess(background=self.background,
2916 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
2917 settings=self.settings, tree=self.tree)
2919 self._start_task(ebuild_process, self._ebuild_exit)
2921 def _ebuild_exit(self, ebuild_process):
2923 if self.phase == "install":
2925 log_path = self.settings.get("PORTAGE_LOG_FILE")
2927 if self.background and log_path is not None:
2928 log_file = open(log_path, 'a')
2931 portage._check_build_log(self.settings, out=out)
2933 if log_file is not None:
2936 if self._default_exit(ebuild_process) != os.EX_OK:
2940 settings = self.settings
2942 if self.phase == "install":
2943 portage._post_src_install_uid_fix(settings)
2945 post_phase_cmds = self._post_phase_cmds.get(self.phase)
2946 if post_phase_cmds is not None:
2947 post_phase = MiscFunctionsProcess(background=self.background,
2948 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
2949 scheduler=self.scheduler, settings=settings)
2950 self._start_task(post_phase, self._post_phase_exit)
2953 self.returncode = ebuild_process.returncode
2954 self._current_task = None
2957 def _post_phase_exit(self, post_phase):
2958 if self._final_exit(post_phase) != os.EX_OK:
2959 writemsg("!!! post %s failed; exiting.\n" % self.phase,
2961 self._current_task = None
2965 class EbuildBinpkg(EbuildProcess):
2967 This assumes that src_install() has successfully completed.
2969 __slots__ = ("_binpkg_tmpfile",)
2972 self.phase = "package"
2973 self.tree = "porttree"
2975 root_config = pkg.root_config
2976 portdb = root_config.trees["porttree"].dbapi
2977 bintree = root_config.trees["bintree"]
2978 ebuild_path = portdb.findname(self.pkg.cpv)
2979 settings = self.settings
2980 debug = settings.get("PORTAGE_DEBUG") == "1"
2982 bintree.prevent_collision(pkg.cpv)
2983 binpkg_tmpfile = os.path.join(bintree.pkgdir,
2984 pkg.cpv + ".tbz2." + str(os.getpid()))
2985 self._binpkg_tmpfile = binpkg_tmpfile
2986 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
2987 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
2990 EbuildProcess._start(self)
2992 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
2994 def _set_returncode(self, wait_retval):
2995 EbuildProcess._set_returncode(self, wait_retval)
2998 bintree = pkg.root_config.trees["bintree"]
2999 binpkg_tmpfile = self._binpkg_tmpfile
3000 if self.returncode == os.EX_OK:
3001 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3003 class EbuildMerge(SlotObject):
3005 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3006 "pkg", "pkg_count", "pkg_path", "pretend",
3007 "scheduler", "settings", "tree", "world_atom")
3010 root_config = self.pkg.root_config
3011 settings = self.settings
3012 retval = portage.merge(settings["CATEGORY"],
3013 settings["PF"], settings["D"],
3014 os.path.join(settings["PORTAGE_BUILDDIR"],
3015 "build-info"), root_config.root, settings,
3016 myebuild=settings["EBUILD"],
3017 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3018 vartree=root_config.trees["vartree"],
3019 prev_mtimes=self.ldpath_mtimes,
3020 scheduler=self.scheduler,
3021 blockers=self.find_blockers)
3023 if retval == os.EX_OK:
3024 self.world_atom(self.pkg)
3029 def _log_success(self):
3031 pkg_count = self.pkg_count
3032 pkg_path = self.pkg_path
3033 logger = self.logger
3034 if "noclean" not in self.settings.features:
3035 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3036 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3037 logger.log((" === (%s of %s) " + \
3038 "Post-Build Cleaning (%s::%s)") % \
3039 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3040 short_msg=short_msg)
3041 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3042 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3044 class PackageUninstall(AsynchronousTask):
3046 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3050 unmerge(self.pkg.root_config, self.opts, "unmerge",
3051 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3052 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3053 writemsg_level=self._writemsg_level)
3054 except UninstallFailure, e:
3055 self.returncode = e.status
3057 self.returncode = os.EX_OK
3060 def _writemsg_level(self, msg, level=0, noiselevel=0):
3062 log_path = self.settings.get("PORTAGE_LOG_FILE")
3063 background = self.background
3065 if log_path is None:
3066 if not (background and level < logging.WARNING):
3067 portage.util.writemsg_level(msg,
3068 level=level, noiselevel=noiselevel)
3071 portage.util.writemsg_level(msg,
3072 level=level, noiselevel=noiselevel)
3074 f = open(log_path, 'a')
3080 class Binpkg(CompositeTask):
3082 __slots__ = ("find_blockers",
3083 "ldpath_mtimes", "logger", "opts",
3084 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3085 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3086 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3088 def _writemsg_level(self, msg, level=0, noiselevel=0):
3090 if not self.background:
3091 portage.util.writemsg_level(msg,
3092 level=level, noiselevel=noiselevel)
3094 log_path = self.settings.get("PORTAGE_LOG_FILE")
3095 if log_path is not None:
3096 f = open(log_path, 'a')
3105 settings = self.settings
3106 settings.setcpv(pkg)
3107 self._tree = "bintree"
3108 self._bintree = self.pkg.root_config.trees[self._tree]
3109 self._verify = "strict" in self.settings.features and \
3110 not self.opts.pretend
3112 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3113 "portage", pkg.category, pkg.pf)
3114 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3115 pkg=pkg, settings=settings)
3116 self._image_dir = os.path.join(dir_path, "image")
3117 self._infloc = os.path.join(dir_path, "build-info")
3118 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3120 # The prefetcher has already completed or it
3121 # could be running now. If it's running now,
3122 # wait for it to complete since it holds
3123 # a lock on the file being fetched. The
3124 # portage.locks functions are only designed
3125 # to work between separate processes. Since
3126 # the lock is held by the current process,
3127 # use the scheduler and fetcher methods to
3128 # synchronize with the fetcher.
3129 prefetcher = self.prefetcher
3130 if prefetcher is None:
3132 elif not prefetcher.isAlive():
3134 elif prefetcher.poll() is None:
3136 waiting_msg = ("Fetching '%s' " + \
3137 "in the background. " + \
3138 "To view fetch progress, run `tail -f " + \
3139 "/var/log/emerge-fetch.log` in another " + \
3140 "terminal.") % prefetcher.pkg_path
3141 msg_prefix = colorize("GOOD", " * ")
3142 from textwrap import wrap
3143 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3144 for line in wrap(waiting_msg, 65))
3145 if not self.background:
3146 writemsg(waiting_msg, noiselevel=-1)
3148 self._current_task = prefetcher
3149 prefetcher.addExitListener(self._prefetch_exit)
3152 self._prefetch_exit(prefetcher)
3154 def _prefetch_exit(self, prefetcher):
3157 pkg_count = self.pkg_count
3158 fetcher = BinpkgFetcher(background=self.background,
3159 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3160 scheduler=self.scheduler)
3161 pkg_path = fetcher.pkg_path
3162 self._pkg_path = pkg_path
3164 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3166 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3167 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3168 short_msg = "emerge: (%s of %s) %s Fetch" % \
3169 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3170 self.logger.log(msg, short_msg=short_msg)
3173 fetcher.addExitListener(self._fetcher_exit)
3174 self._current_task = fetcher
3175 self.scheduler.fetch.schedule(fetcher)
3177 self._start_task(fetcher, self._fetcher_exit)
3180 self._fetcher_exit(fetcher)
3182 def _fetcher_exit(self, fetcher):
3184 # The fetcher only has a returncode when
3185 # --getbinpkg is enabled.
3186 if fetcher.returncode is not None:
3187 self._fetched_pkg = True
3188 if self.opts.fetchonly:
3189 self._final_exit(fetcher)
3192 elif self._default_exit(fetcher) != os.EX_OK:
3198 verifier = BinpkgVerifier(background=self.background,
3199 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3202 verifier.addExitListener(self._verifier_exit)
3203 self._current_task = verifier
3204 self.scheduler.fetch.schedule(verifier)
3206 self._start_task(verifier, self._verifier_exit)
3209 self._verifier_exit(verifier)
3211 def _verifier_exit(self, verifier):
3212 if verifier is not None and \
3213 self._default_exit(verifier) != os.EX_OK:
3217 logger = self.logger
3219 pkg_count = self.pkg_count
3220 pkg_path = self._pkg_path
3222 if self._fetched_pkg:
3223 self._bintree.inject(pkg.cpv, filename=pkg_path)
3225 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3226 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3227 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3228 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3229 logger.log(msg, short_msg=short_msg)
3231 self._build_dir.lock()
3234 settings = self.settings
3235 settings.setcpv(pkg)
3236 settings["EBUILD"] = self._ebuild_path
3237 ebuild_phase = EbuildPhase(background=self.background,
3238 pkg=pkg, phase=phase, scheduler=self.scheduler,
3239 settings=settings, tree=self._tree)
3241 self._start_task(ebuild_phase, self._clean_exit)
3243 def _clean_exit(self, clean_phase):
3244 if self._default_exit(clean_phase) != os.EX_OK:
3245 self._unlock_builddir()
3249 dir_path = self._build_dir.dir_path
3252 shutil.rmtree(dir_path)
3253 except (IOError, OSError), e:
3254 if e.errno != errno.ENOENT:
3258 infloc = self._infloc
3260 pkg_path = self._pkg_path
3263 for mydir in (dir_path, self._image_dir, infloc):
3264 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3265 gid=portage.data.portage_gid, mode=dir_mode)
3267 # This initializes PORTAGE_LOG_FILE.
3268 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3269 self._writemsg_level(">>> Extracting info\n")
3271 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3272 check_missing_metadata = ("CATEGORY", "PF")
3273 missing_metadata = set()
3274 for k in check_missing_metadata:
3275 v = pkg_xpak.getfile(k)
3277 missing_metadata.add(k)
3279 pkg_xpak.unpackinfo(infloc)
3280 for k in missing_metadata:
3288 f = open(os.path.join(infloc, k), 'wb')
3294 # Store the md5sum in the vdb.
3295 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3297 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3301 # This gives bashrc users an opportunity to do various things
3302 # such as remove binary packages after they're installed.
3303 settings = self.settings
3304 settings.setcpv(self.pkg)
3305 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3306 settings.backup_changes("PORTAGE_BINPKG_FILE")
3309 setup_phase = EbuildPhase(background=self.background,
3310 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3311 settings=settings, tree=self._tree)
3313 setup_phase.addExitListener(self._setup_exit)
3314 self._current_task = setup_phase
3315 self.scheduler.scheduleSetup(setup_phase)
3317 def _setup_exit(self, setup_phase):
3318 if self._default_exit(setup_phase) != os.EX_OK:
3319 self._unlock_builddir()
3323 extractor = BinpkgExtractorAsync(background=self.background,
3324 image_dir=self._image_dir,
3325 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3326 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3327 self._start_task(extractor, self._extractor_exit)
3329 def _extractor_exit(self, extractor):
3330 if self._final_exit(extractor) != os.EX_OK:
3331 self._unlock_builddir()
3332 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3336 def _unlock_builddir(self):
3337 portage.elog.elog_process(self.pkg.cpv, self.settings)
3338 self._build_dir.unlock()
3342 # This gives bashrc users an opportunity to do various things
3343 # such as remove binary packages after they're installed.
3344 settings = self.settings
3345 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3346 settings.backup_changes("PORTAGE_BINPKG_FILE")
3348 merge = EbuildMerge(find_blockers=self.find_blockers,
3349 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3350 pkg=self.pkg, pkg_count=self.pkg_count,
3351 pkg_path=self._pkg_path, scheduler=self.scheduler,
3352 settings=settings, tree=self._tree, world_atom=self.world_atom)
3355 retval = merge.execute()
3357 settings.pop("PORTAGE_BINPKG_FILE", None)
3358 self._unlock_builddir()
3361 class BinpkgFetcher(SpawnProcess):
3364 "locked", "pkg_path", "_lock_obj")
3366 def __init__(self, **kwargs):
3367 SpawnProcess.__init__(self, **kwargs)
3369 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3377 bintree = pkg.root_config.trees["bintree"]
3378 settings = bintree.settings
3379 use_locks = "distlocks" in settings.features
3380 pkg_path = self.pkg_path
3381 resume = os.path.exists(pkg_path)
3383 # urljoin doesn't work correctly with
3384 # unrecognized protocols like sftp
3385 if bintree._remote_has_index:
3386 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3388 rel_uri = pkg.cpv + ".tbz2"
3389 uri = bintree._remote_base_uri.rstrip("/") + \
3390 "/" + rel_uri.lstrip("/")
3392 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3393 "/" + pkg.pf + ".tbz2"
3395 protocol = urlparse.urlparse(uri)[0]
3396 fcmd_prefix = "FETCHCOMMAND"
3398 fcmd_prefix = "RESUMECOMMAND"
3399 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3401 fcmd = settings.get(fcmd_prefix)
3404 "DISTDIR" : os.path.dirname(pkg_path),
3406 "FILE" : os.path.basename(pkg_path)
3409 fetch_env = dict(settings.iteritems())
3410 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3411 for x in shlex.split(fcmd)]
3413 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3417 if self.fd_pipes is None:
3419 fd_pipes = self.fd_pipes
3421 # Redirect all output to stdout since some fetchers like
3422 # wget pollute stderr (if portage detects a problem then it
3423 # can send it's own message to stderr).
3424 fd_pipes.setdefault(0, sys.stdin.fileno())
3425 fd_pipes.setdefault(1, sys.stdout.fileno())
3426 fd_pipes.setdefault(2, sys.stdout.fileno())
3428 self.args = fetch_args
3429 self.env = fetch_env
3430 SpawnProcess._start(self)
3432 def _set_returncode(self, wait_retval):
3433 SpawnProcess._set_returncode(self, wait_retval)
3439 This raises an AlreadyLocked exception if lock() is called
3440 while a lock is already held. In order to avoid this, call
3441 unlock() or check whether the "locked" attribute is True
3442 or False before calling lock().
3444 if self._lock_obj is not None:
3445 raise self.AlreadyLocked((self._lock_obj,))
3447 self._lock_obj = portage.locks.lockfile(
3448 self.pkg_path, wantnewlockfile=1)
3451 class AlreadyLocked(portage.exception.PortageException):
3455 if self._lock_obj is None:
3457 portage.locks.unlockfile(self._lock_obj)
3458 self._lock_obj = None
3461 class BinpkgVerifier(AsynchronousTask):
3462 __slots__ = ("logfile", "pkg",)
3466 Note: Unlike a normal AsynchronousTask.start() method,
3467 this one does all work is synchronously. The returncode
3468 attribute will be set before it returns.
3472 root_config = pkg.root_config
3473 bintree = root_config.trees["bintree"]
3475 stdout_orig = sys.stdout
3476 stderr_orig = sys.stderr
3478 if self.background and self.logfile is not None:
3479 log_file = open(self.logfile, 'a')
3481 if log_file is not None:
3482 sys.stdout = log_file
3483 sys.stderr = log_file
3485 bintree.digestCheck(pkg)
3486 except portage.exception.FileNotFound:
3487 writemsg("!!! Fetching Binary failed " + \
3488 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3490 except portage.exception.DigestException, e:
3491 writemsg("\n!!! Digest verification failed:\n",
3493 writemsg("!!! %s\n" % e.value[0],
3495 writemsg("!!! Reason: %s\n" % e.value[1],
3497 writemsg("!!! Got: %s\n" % e.value[2],
3499 writemsg("!!! Expected: %s\n" % e.value[3],
3503 sys.stdout = stdout_orig
3504 sys.stderr = stderr_orig
3505 if log_file is not None:
3508 self.returncode = rval
3511 class BinpkgExtractorAsync(SpawnProcess):
3513 __slots__ = ("image_dir", "pkg", "pkg_path")
3515 _shell_binary = portage.const.BASH_BINARY
3518 self.args = [self._shell_binary, "-c",
3519 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3520 (portage._shell_quote(self.pkg_path),
3521 portage._shell_quote(self.image_dir))]
3523 self.env = self.pkg.root_config.settings.environ()
3524 SpawnProcess._start(self)
3526 class MergeListItem(CompositeTask):
3529 TODO: For parallel scheduling, everything here needs asynchronous
3530 execution support (start, poll, and wait methods).
3533 __slots__ = ("args_set",
3534 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3535 "find_blockers", "logger", "mtimedb", "pkg",
3536 "pkg_count", "pkg_to_replace", "prefetcher",
3537 "settings", "statusMessage", "world_atom") + \
3543 build_opts = self.build_opts
3546 # uninstall, executed by self.merge()
3547 self.returncode = os.EX_OK
3551 args_set = self.args_set
3552 find_blockers = self.find_blockers
3553 logger = self.logger
3554 mtimedb = self.mtimedb
3555 pkg_count = self.pkg_count
3556 scheduler = self.scheduler
3557 settings = self.settings
3558 world_atom = self.world_atom
3559 ldpath_mtimes = mtimedb["ldpath"]
3561 action_desc = "Emerging"
3563 if pkg.type_name == "binary":
3564 action_desc += " binary"
3566 if build_opts.fetchonly:
3567 action_desc = "Fetching"
3569 msg = "%s (%s of %s) %s" % \
3571 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3572 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3573 colorize("GOOD", pkg.cpv))
3576 msg += " %s %s" % (preposition, pkg.root)
3578 if not build_opts.pretend:
3579 self.statusMessage(msg)
3580 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3581 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3583 if pkg.type_name == "ebuild":
3585 build = EbuildBuild(args_set=args_set,
3586 background=self.background,
3587 config_pool=self.config_pool,
3588 find_blockers=find_blockers,
3589 ldpath_mtimes=ldpath_mtimes, logger=logger,
3590 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3591 prefetcher=self.prefetcher, scheduler=scheduler,
3592 settings=settings, world_atom=world_atom)
3594 self._install_task = build
3595 self._start_task(build, self._default_final_exit)
3598 elif pkg.type_name == "binary":
3600 binpkg = Binpkg(background=self.background,
3601 find_blockers=find_blockers,
3602 ldpath_mtimes=ldpath_mtimes, logger=logger,
3603 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3604 prefetcher=self.prefetcher, settings=settings,
3605 scheduler=scheduler, world_atom=world_atom)
3607 self._install_task = binpkg
3608 self._start_task(binpkg, self._default_final_exit)
3612 self._install_task.poll()
3613 return self.returncode
3616 self._install_task.wait()
3617 return self.returncode
3622 build_opts = self.build_opts
3623 find_blockers = self.find_blockers
3624 logger = self.logger
3625 mtimedb = self.mtimedb
3626 pkg_count = self.pkg_count
3627 prefetcher = self.prefetcher
3628 scheduler = self.scheduler
3629 settings = self.settings
3630 world_atom = self.world_atom
3631 ldpath_mtimes = mtimedb["ldpath"]
3634 if not (build_opts.buildpkgonly or \
3635 build_opts.fetchonly or build_opts.pretend):
3637 uninstall = PackageUninstall(background=self.background,
3638 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3639 pkg=pkg, scheduler=scheduler, settings=settings)
3642 retval = uninstall.wait()
3643 if retval != os.EX_OK:
3647 if build_opts.fetchonly or \
3648 build_opts.buildpkgonly:
3649 return self.returncode
3651 retval = self._install_task.install()
3654 class PackageMerge(AsynchronousTask):
3656 TODO: Implement asynchronous merge so that the scheduler can
3657 run while a merge is executing.
3660 __slots__ = ("merge",)
3664 pkg = self.merge.pkg
3665 pkg_count = self.merge.pkg_count
3668 action_desc = "Uninstalling"
3669 preposition = "from"
3671 action_desc = "Installing"
3674 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3677 msg += " %s %s" % (preposition, pkg.root)
3679 if not self.merge.build_opts.fetchonly and \
3680 not self.merge.build_opts.pretend and \
3681 not self.merge.build_opts.buildpkgonly:
3682 self.merge.statusMessage(msg)
3684 self.returncode = self.merge.merge()
3687 class DependencyArg(object):
3688 def __init__(self, arg=None, root_config=None):
3690 self.root_config = root_config
3695 class AtomArg(DependencyArg):
3696 def __init__(self, atom=None, **kwargs):
3697 DependencyArg.__init__(self, **kwargs)
3699 if not isinstance(self.atom, portage.dep.Atom):
3700 self.atom = portage.dep.Atom(self.atom)
3701 self.set = (self.atom, )
3703 class PackageArg(DependencyArg):
3704 def __init__(self, package=None, **kwargs):
3705 DependencyArg.__init__(self, **kwargs)
3706 self.package = package
3707 self.atom = portage.dep.Atom("=" + package.cpv)
3708 self.set = (self.atom, )
3710 class SetArg(DependencyArg):
3711 def __init__(self, set=None, **kwargs):
3712 DependencyArg.__init__(self, **kwargs)
3714 self.name = self.arg[len(SETPREFIX):]
3719 class Dependency(SlotObject):
3720 __slots__ = ("atom", "blocker", "depth",
3721 "parent", "onlydeps", "priority", "root")
3722 def __init__(self, **kwargs):
3723 SlotObject.__init__(self, **kwargs)
3724 if self.priority is None:
3725 self.priority = DepPriority()
3726 if self.depth is None:
3729 class BlockerCache(DictMixin):
3730 """This caches blockers of installed packages so that dep_check does not
3731 have to be done for every single installed package on every invocation of
3732 emerge. The cache is invalidated whenever it is detected that something
3733 has changed that might alter the results of dep_check() calls:
3734 1) the set of installed packages (including COUNTER) has changed
3735 2) the old-style virtuals have changed
3738 # Number of uncached packages to trigger cache update, since
3739 # it's wasteful to update it for every vdb change.
3740 _cache_threshold = 5
3742 class BlockerData(object):
3744 __slots__ = ("__weakref__", "atoms", "counter")
3746 def __init__(self, counter, atoms):
3747 self.counter = counter
3750 def __init__(self, myroot, vardb):
3752 self._virtuals = vardb.settings.getvirtuals()
3753 self._cache_filename = os.path.join(myroot,
3754 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3755 self._cache_version = "1"
3756 self._cache_data = None
3757 self._modified = set()
3762 f = open(self._cache_filename)
3763 mypickle = pickle.Unpickler(f)
3764 mypickle.find_global = None
3765 self._cache_data = mypickle.load()
3768 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3769 if isinstance(e, pickle.UnpicklingError):
3770 writemsg("!!! Error loading '%s': %s\n" % \
3771 (self._cache_filename, str(e)), noiselevel=-1)
3774 cache_valid = self._cache_data and \
3775 isinstance(self._cache_data, dict) and \
3776 self._cache_data.get("version") == self._cache_version and \
3777 isinstance(self._cache_data.get("blockers"), dict)
3779 # Validate all the atoms and counters so that
3780 # corruption is detected as soon as possible.
3781 invalid_items = set()
3782 for k, v in self._cache_data["blockers"].iteritems():
3783 if not isinstance(k, basestring):
3784 invalid_items.add(k)
3787 if portage.catpkgsplit(k) is None:
3788 invalid_items.add(k)
3790 except portage.exception.InvalidData:
3791 invalid_items.add(k)
3793 if not isinstance(v, tuple) or \
3795 invalid_items.add(k)
3798 if not isinstance(counter, (int, long)):
3799 invalid_items.add(k)
3801 if not isinstance(atoms, (list, tuple)):
3802 invalid_items.add(k)
3804 invalid_atom = False
3806 if not isinstance(atom, basestring):
3809 if atom[:1] != "!" or \
3810 not portage.isvalidatom(
3811 atom, allow_blockers=True):
3815 invalid_items.add(k)
3818 for k in invalid_items:
3819 del self._cache_data["blockers"][k]
3820 if not self._cache_data["blockers"]:
3824 self._cache_data = {"version":self._cache_version}
3825 self._cache_data["blockers"] = {}
3826 self._cache_data["virtuals"] = self._virtuals
3827 self._modified.clear()
3830 """If the current user has permission and the internal blocker cache
3831 been updated, save it to disk and mark it unmodified. This is called
3832 by emerge after it has proccessed blockers for all installed packages.
3833 Currently, the cache is only written if the user has superuser
3834 privileges (since that's required to obtain a lock), but all users
3835 have read access and benefit from faster blocker lookups (as long as
3836 the entire cache is still valid). The cache is stored as a pickled
3837 dict object with the following format:
3841 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
3842 "virtuals" : vardb.settings.getvirtuals()
3845 if len(self._modified) >= self._cache_threshold and \
3848 f = portage.util.atomic_ofstream(self._cache_filename)
3849 pickle.dump(self._cache_data, f, -1)
3851 portage.util.apply_secpass_permissions(
3852 self._cache_filename, gid=portage.portage_gid, mode=0644)
3853 except (IOError, OSError), e:
3855 self._modified.clear()
3857 def __setitem__(self, cpv, blocker_data):
3859 Update the cache and mark it as modified for a future call to
3862 @param cpv: Package for which to cache blockers.
3864 @param blocker_data: An object with counter and atoms attributes.
3865 @type blocker_data: BlockerData
3867 self._cache_data["blockers"][cpv] = \
3868 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
3869 self._modified.add(cpv)
3872 return iter(self._cache_data["blockers"])
3874 def __delitem__(self, cpv):
3875 del self._cache_data["blockers"][cpv]
3877 def __getitem__(self, cpv):
3880 @returns: An object with counter and atoms attributes.
3882 return self.BlockerData(*self._cache_data["blockers"][cpv])
3885 """This needs to be implemented so that self.__repr__() doesn't raise
3886 an AttributeError."""
3889 class BlockerDB(object):
3891 def __init__(self, root_config):
3892 self._root_config = root_config
3893 self._vartree = root_config.trees["vartree"]
3894 self._portdb = root_config.trees["porttree"].dbapi
3896 self._dep_check_trees = None
3897 self._fake_vartree = None
3899 def _get_fake_vartree(self, acquire_lock=0):
3900 fake_vartree = self._fake_vartree
3901 if fake_vartree is None:
3902 fake_vartree = FakeVartree(self._root_config,
3903 acquire_lock=acquire_lock)
3904 self._fake_vartree = fake_vartree
3905 self._dep_check_trees = { self._vartree.root : {
3906 "porttree" : fake_vartree,
3907 "vartree" : fake_vartree,
3910 fake_vartree.sync(acquire_lock=acquire_lock)
3913 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
3914 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
3915 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3916 settings = self._vartree.settings
3917 stale_cache = set(blocker_cache)
3918 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
3919 dep_check_trees = self._dep_check_trees
3920 vardb = fake_vartree.dbapi
3921 installed_pkgs = list(vardb)
3923 for inst_pkg in installed_pkgs:
3924 stale_cache.discard(inst_pkg.cpv)
3925 cached_blockers = blocker_cache.get(inst_pkg.cpv)
3926 if cached_blockers is not None and \
3927 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
3928 cached_blockers = None
3929 if cached_blockers is not None:
3930 blocker_atoms = cached_blockers.atoms
3932 # Use aux_get() to trigger FakeVartree global
3933 # updates on *DEPEND when appropriate.
3934 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
3936 portage.dep._dep_check_strict = False
3937 success, atoms = portage.dep_check(depstr,
3938 vardb, settings, myuse=inst_pkg.use.enabled,
3939 trees=dep_check_trees, myroot=inst_pkg.root)
3941 portage.dep._dep_check_strict = True
3943 pkg_location = os.path.join(inst_pkg.root,
3944 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
3945 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
3946 (pkg_location, atoms), noiselevel=-1)
3949 blocker_atoms = [atom for atom in atoms \
3950 if atom.startswith("!")]
3951 blocker_atoms.sort()
3952 counter = long(inst_pkg.metadata["COUNTER"])
3953 blocker_cache[inst_pkg.cpv] = \
3954 blocker_cache.BlockerData(counter, blocker_atoms)
3955 for cpv in stale_cache:
3956 del blocker_cache[cpv]
3957 blocker_cache.flush()
3959 blocker_parents = digraph()
3961 for pkg in installed_pkgs:
3962 for blocker_atom in blocker_cache[pkg.cpv].atoms:
3963 blocker_atom = blocker_atom.lstrip("!")
3964 blocker_atoms.append(blocker_atom)
3965 blocker_parents.add(blocker_atom, pkg)
3967 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3968 blocking_pkgs = set()
3969 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
3970 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
3972 # Check for blockers in the other direction.
3973 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
3975 portage.dep._dep_check_strict = False
3976 success, atoms = portage.dep_check(depstr,
3977 vardb, settings, myuse=new_pkg.use.enabled,
3978 trees=dep_check_trees, myroot=new_pkg.root)
3980 portage.dep._dep_check_strict = True
3982 # We should never get this far with invalid deps.
3983 show_invalid_depstring_notice(new_pkg, depstr, atoms)
3986 blocker_atoms = [atom.lstrip("!") for atom in atoms \
3989 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3990 for inst_pkg in installed_pkgs:
3992 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
3993 except (portage.exception.InvalidDependString, StopIteration):
3995 blocking_pkgs.add(inst_pkg)
3997 return blocking_pkgs
3999 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4001 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4002 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4003 p_type, p_root, p_key, p_status = parent_node
4005 if p_status == "nomerge":
4006 category, pf = portage.catsplit(p_key)
4007 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4008 msg.append("Portage is unable to process the dependencies of the ")
4009 msg.append("'%s' package. " % p_key)
4010 msg.append("In order to correct this problem, the package ")
4011 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4012 msg.append("As a temporary workaround, the --nodeps option can ")
4013 msg.append("be used to ignore all dependencies. For reference, ")
4014 msg.append("the problematic dependencies can be found in the ")
4015 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4017 msg.append("This package can not be installed. ")
4018 msg.append("Please notify the '%s' package maintainer " % p_key)
4019 msg.append("about this problem.")
4021 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4022 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4024 class PackageVirtualDbapi(portage.dbapi):
4026 A dbapi-like interface class that represents the state of the installed
4027 package database as new packages are installed, replacing any packages
4028 that previously existed in the same slot. The main difference between
4029 this class and fakedbapi is that this one uses Package instances
4030 internally (passed in via cpv_inject() and cpv_remove() calls).
4032 def __init__(self, settings):
4033 portage.dbapi.__init__(self)
4034 self.settings = settings
4035 self._match_cache = {}
4041 Remove all packages.
4045 self._cp_map.clear()
4046 self._cpv_map.clear()
4049 obj = PackageVirtualDbapi(self.settings)
4050 obj._match_cache = self._match_cache.copy()
4051 obj._cp_map = self._cp_map.copy()
4052 for k, v in obj._cp_map.iteritems():
4053 obj._cp_map[k] = v[:]
4054 obj._cpv_map = self._cpv_map.copy()
4058 return self._cpv_map.itervalues()
4060 def __contains__(self, item):
4061 existing = self._cpv_map.get(item.cpv)
4062 if existing is not None and \
4067 def get(self, item, default=None):
4068 cpv = getattr(item, "cpv", None)
4072 type_name, root, cpv, operation = item
4074 existing = self._cpv_map.get(cpv)
4075 if existing is not None and \
4080 def match_pkgs(self, atom):
4081 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4083 def _clear_cache(self):
4084 if self._categories is not None:
4085 self._categories = None
4086 if self._match_cache:
4087 self._match_cache = {}
4089 def match(self, origdep, use_cache=1):
4090 result = self._match_cache.get(origdep)
4091 if result is not None:
4093 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4094 self._match_cache[origdep] = result
4097 def cpv_exists(self, cpv):
4098 return cpv in self._cpv_map
4100 def cp_list(self, mycp, use_cache=1):
4101 cachelist = self._match_cache.get(mycp)
4102 # cp_list() doesn't expand old-style virtuals
4103 if cachelist and cachelist[0].startswith(mycp):
4105 cpv_list = self._cp_map.get(mycp)
4106 if cpv_list is None:
4109 cpv_list = [pkg.cpv for pkg in cpv_list]
4110 self._cpv_sort_ascending(cpv_list)
4111 if not (not cpv_list and mycp.startswith("virtual/")):
4112 self._match_cache[mycp] = cpv_list
4116 return list(self._cp_map)
4119 return list(self._cpv_map)
4121 def cpv_inject(self, pkg):
4122 cp_list = self._cp_map.get(pkg.cp)
4125 self._cp_map[pkg.cp] = cp_list
4126 e_pkg = self._cpv_map.get(pkg.cpv)
4127 if e_pkg is not None:
4130 self.cpv_remove(e_pkg)
4131 for e_pkg in cp_list:
4132 if e_pkg.slot_atom == pkg.slot_atom:
4135 self.cpv_remove(e_pkg)
4138 self._cpv_map[pkg.cpv] = pkg
4141 def cpv_remove(self, pkg):
4142 old_pkg = self._cpv_map.get(pkg.cpv)
4145 self._cp_map[pkg.cp].remove(pkg)
4146 del self._cpv_map[pkg.cpv]
4149 def aux_get(self, cpv, wants):
4150 metadata = self._cpv_map[cpv].metadata
4151 return [metadata.get(x, "") for x in wants]
4153 def aux_update(self, cpv, values):
4154 self._cpv_map[cpv].metadata.update(values)
4157 class depgraph(object):
4159 pkg_tree_map = RootConfig.pkg_tree_map
4161 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4163 def __init__(self, settings, trees, myopts, myparams, spinner):
4164 self.settings = settings
4165 self.target_root = settings["ROOT"]
4166 self.myopts = myopts
4167 self.myparams = myparams
4169 if settings.get("PORTAGE_DEBUG", "") == "1":
4171 self.spinner = spinner
4172 self._running_root = trees["/"]["root_config"]
4173 self._opts_no_restart = Scheduler._opts_no_restart
4174 self.pkgsettings = {}
4175 # Maps slot atom to package for each Package added to the graph.
4176 self._slot_pkg_map = {}
4177 # Maps nodes to the reasons they were selected for reinstallation.
4178 self._reinstall_nodes = {}
4181 self._trees_orig = trees
4183 # Contains a filtered view of preferred packages that are selected
4184 # from available repositories.
4185 self._filtered_trees = {}
4186 # Contains installed packages and new packages that have been added
4188 self._graph_trees = {}
4189 # All Package instances
4190 self._pkg_cache = self._package_cache(self)
4191 for myroot in trees:
4192 self.trees[myroot] = {}
4193 # Create a RootConfig instance that references
4194 # the FakeVartree instead of the real one.
4195 self.roots[myroot] = RootConfig(
4196 trees[myroot]["vartree"].settings,
4198 trees[myroot]["root_config"].setconfig)
4199 for tree in ("porttree", "bintree"):
4200 self.trees[myroot][tree] = trees[myroot][tree]
4201 self.trees[myroot]["vartree"] = \
4202 FakeVartree(trees[myroot]["root_config"],
4203 pkg_cache=self._pkg_cache)
4204 self.pkgsettings[myroot] = portage.config(
4205 clone=self.trees[myroot]["vartree"].settings)
4206 self._slot_pkg_map[myroot] = {}
4207 vardb = self.trees[myroot]["vartree"].dbapi
4208 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4209 "--buildpkgonly" not in self.myopts
4210 # This fakedbapi instance will model the state that the vdb will
4211 # have after new packages have been installed.
4212 fakedb = PackageVirtualDbapi(vardb.settings)
4213 if preload_installed_pkgs:
4215 self.spinner.update()
4216 # This triggers metadata updates via FakeVartree.
4217 vardb.aux_get(pkg.cpv, [])
4218 fakedb.cpv_inject(pkg)
4220 # Now that the vardb state is cached in our FakeVartree,
4221 # we won't be needing the real vartree cache for awhile.
4222 # To make some room on the heap, clear the vardbapi
4224 trees[myroot]["vartree"].dbapi._clear_cache()
4227 self.mydbapi[myroot] = fakedb
4230 graph_tree.dbapi = fakedb
4231 self._graph_trees[myroot] = {}
4232 self._filtered_trees[myroot] = {}
4233 # Substitute the graph tree for the vartree in dep_check() since we
4234 # want atom selections to be consistent with package selections
4235 # have already been made.
4236 self._graph_trees[myroot]["porttree"] = graph_tree
4237 self._graph_trees[myroot]["vartree"] = graph_tree
4238 def filtered_tree():
4240 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4241 self._filtered_trees[myroot]["porttree"] = filtered_tree
4243 # Passing in graph_tree as the vartree here could lead to better
4244 # atom selections in some cases by causing atoms for packages that
4245 # have been added to the graph to be preferred over other choices.
4246 # However, it can trigger atom selections that result in
4247 # unresolvable direct circular dependencies. For example, this
4248 # happens with gwydion-dylan which depends on either itself or
4249 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4250 # gwydion-dylan-bin needs to be selected in order to avoid a
4251 # an unresolvable direct circular dependency.
4253 # To solve the problem described above, pass in "graph_db" so that
4254 # packages that have been added to the graph are distinguishable
4255 # from other available packages and installed packages. Also, pass
4256 # the parent package into self._select_atoms() calls so that
4257 # unresolvable direct circular dependencies can be detected and
4258 # avoided when possible.
4259 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4260 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4263 portdb = self.trees[myroot]["porttree"].dbapi
4264 bindb = self.trees[myroot]["bintree"].dbapi
4265 vardb = self.trees[myroot]["vartree"].dbapi
4266 # (db, pkg_type, built, installed, db_keys)
4267 if "--usepkgonly" not in self.myopts:
4268 db_keys = list(portdb._aux_cache_keys)
4269 dbs.append((portdb, "ebuild", False, False, db_keys))
4270 if "--usepkg" in self.myopts:
4271 db_keys = list(bindb._aux_cache_keys)
4272 dbs.append((bindb, "binary", True, False, db_keys))
4273 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4274 dbs.append((vardb, "installed", True, True, db_keys))
4275 self._filtered_trees[myroot]["dbs"] = dbs
4276 if "--usepkg" in self.myopts:
4277 self.trees[myroot]["bintree"].populate(
4278 "--getbinpkg" in self.myopts,
4279 "--getbinpkgonly" in self.myopts)
4282 self.digraph=portage.digraph()
4283 # contains all sets added to the graph
4285 # contains atoms given as arguments
4286 self._sets["args"] = InternalPackageSet()
4287 # contains all atoms from all sets added to the graph, including
4288 # atoms given as arguments
4289 self._set_atoms = InternalPackageSet()
4290 self._atom_arg_map = {}
4291 # contains all nodes pulled in by self._set_atoms
4292 self._set_nodes = set()
4293 # Contains only Blocker -> Uninstall edges
4294 self._blocker_uninstalls = digraph()
4295 # Contains only Package -> Blocker edges
4296 self._blocker_parents = digraph()
4297 # Contains only irrelevant Package -> Blocker edges
4298 self._irrelevant_blockers = digraph()
4299 # Contains only unsolvable Package -> Blocker edges
4300 self._unsolvable_blockers = digraph()
4301 self._slot_collision_info = set()
4302 # Slot collision nodes are not allowed to block other packages since
4303 # blocker validation is only able to account for one package per slot.
4304 self._slot_collision_nodes = set()
4305 self._serialized_tasks_cache = None
4306 self._scheduler_graph = None
4307 self._displayed_list = None
4308 self._pprovided_args = []
4309 self._missing_args = []
4310 self._masked_installed = set()
4311 self._unsatisfied_deps_for_display = []
4312 self._unsatisfied_blockers_for_display = None
4313 self._circular_deps_for_display = None
4314 self._dep_stack = []
4315 self._unsatisfied_deps = []
4316 self._initially_unsatisfied_deps = []
4317 self._ignored_deps = []
4318 self._required_set_names = set(["system", "world"])
4319 self._select_atoms = self._select_atoms_highest_available
4320 self._select_package = self._select_pkg_highest_available
4321 self._highest_pkg_cache = {}
4323 def _show_slot_collision_notice(self):
4324 """Show an informational message advising the user to mask one of the
4325 the packages. In some cases it may be possible to resolve this
4326 automatically, but support for backtracking (removal nodes that have
4327 already been selected) will be required in order to handle all possible
4330 if not self._slot_collision_info:
4333 self._show_merge_list()
4336 msg.append("\n!!! Multiple package instances within a single " + \
4337 "package slot have been pulled\n")
4338 msg.append("!!! into the dependency graph, resulting" + \
4339 " in a slot conflict:\n\n")
4341 # Max number of parents shown, to avoid flooding the display.
4343 for slot_atom, root in self._slot_collision_info:
4344 msg.append(str(slot_atom))
4347 for node in self._slot_collision_nodes:
4348 if node.slot_atom == slot_atom:
4349 slot_nodes.append(node)
4350 slot_nodes.append(self._slot_pkg_map[root][slot_atom])
4351 for node in slot_nodes:
4353 msg.append(str(node))
4354 parents = self.digraph.parent_nodes(node)
4357 if len(parents) > max_parents:
4359 # When generating the pruned list, prefer instances
4360 # of DependencyArg over instances of Package.
4361 for parent in parents:
4362 if isinstance(parent, DependencyArg):
4363 pruned_list.append(parent)
4364 # Prefer Packages instances that themselves have been
4365 # pulled into collision slots.
4366 for parent in parents:
4367 if isinstance(parent, Package) and \
4368 (parent.slot_atom, parent.root) \
4369 in self._slot_collision_info:
4370 pruned_list.append(parent)
4371 for parent in parents:
4372 if len(pruned_list) >= max_parents:
4374 if not isinstance(parent, DependencyArg) and \
4375 parent not in pruned_list:
4376 pruned_list.append(parent)
4377 omitted_parents = len(parents) - len(pruned_list)
4378 parents = pruned_list
4379 msg.append(" pulled in by\n")
4380 for parent in parents:
4381 msg.append(2*indent)
4382 msg.append(str(parent))
4385 msg.append(2*indent)
4386 msg.append("(and %d more)\n" % omitted_parents)
4388 msg.append(" (no parents)\n")
4391 sys.stderr.write("".join(msg))
4394 if "--quiet" in self.myopts:
4398 msg.append("It may be possible to solve this problem ")
4399 msg.append("by using package.mask to prevent one of ")
4400 msg.append("those packages from being selected. ")
4401 msg.append("However, it is also possible that conflicting ")
4402 msg.append("dependencies exist such that they are impossible to ")
4403 msg.append("satisfy simultaneously. If such a conflict exists in ")
4404 msg.append("the dependencies of two different packages, then those ")
4405 msg.append("packages can not be installed simultaneously.")
4407 from formatter import AbstractFormatter, DumbWriter
4408 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4410 f.add_flowing_data(x)
4414 msg.append("For more information, see MASKED PACKAGES ")
4415 msg.append("section in the emerge man page or refer ")
4416 msg.append("to the Gentoo Handbook.")
4418 f.add_flowing_data(x)
4422 def _reinstall_for_flags(self, forced_flags,
4423 orig_use, orig_iuse, cur_use, cur_iuse):
4424 """Return a set of flags that trigger reinstallation, or None if there
4425 are no such flags."""
4426 if "--newuse" in self.myopts:
4427 flags = set(orig_iuse.symmetric_difference(
4428 cur_iuse).difference(forced_flags))
4429 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4430 cur_iuse.intersection(cur_use)))
4433 elif "changed-use" == self.myopts.get("--reinstall"):
4434 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4435 cur_iuse.intersection(cur_use))
4440 def _create_graph(self, allow_unsatisfied=False):
4441 dep_stack = self._dep_stack
4443 self.spinner.update()
4444 dep = dep_stack.pop()
4445 if isinstance(dep, Package):
4446 if not self._add_pkg_deps(dep,
4447 allow_unsatisfied=allow_unsatisfied):
4450 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4454 def _add_dep(self, dep, allow_unsatisfied=False):
4455 debug = "--debug" in self.myopts
4456 buildpkgonly = "--buildpkgonly" in self.myopts
4457 nodeps = "--nodeps" in self.myopts
4458 empty = "empty" in self.myparams
4459 deep = "deep" in self.myparams
4460 update = "--update" in self.myopts and dep.depth <= 1
4462 if not buildpkgonly and \
4464 dep.parent not in self._slot_collision_nodes:
4465 if dep.parent.onlydeps:
4466 # It's safe to ignore blockers if the
4467 # parent is an --onlydeps node.
4469 # The blocker applies to the root where
4470 # the parent is or will be installed.
4471 blocker = Blocker(atom=dep.atom,
4472 eapi=dep.parent.metadata["EAPI"],
4473 root=dep.parent.root)
4474 self._blocker_parents.add(blocker, dep.parent)
4476 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4477 onlydeps=dep.onlydeps)
4479 if allow_unsatisfied:
4480 self._unsatisfied_deps.append(dep)
4482 self._unsatisfied_deps_for_display.append(
4483 ((dep.root, dep.atom), {"myparent":dep.parent}))
4485 # In some cases, dep_check will return deps that shouldn't
4486 # be proccessed any further, so they are identified and
4487 # discarded here. Try to discard as few as possible since
4488 # discarded dependencies reduce the amount of information
4489 # available for optimization of merge order.
4490 if dep.priority.satisfied and \
4491 not (existing_node or empty or deep or update):
4493 if dep.root == self.target_root:
4495 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4496 except StopIteration:
4498 except portage.exception.InvalidDependString:
4499 if not dep_pkg.installed:
4500 # This shouldn't happen since the package
4501 # should have been masked.
4504 self._ignored_deps.append(dep)
4507 if not self._add_pkg(dep_pkg, dep):
4511 def _add_pkg(self, pkg, dep):
4518 myparent = dep.parent
4519 priority = dep.priority
4521 if priority is None:
4522 priority = DepPriority()
4524 Fills the digraph with nodes comprised of packages to merge.
4525 mybigkey is the package spec of the package to merge.
4526 myparent is the package depending on mybigkey ( or None )
4527 addme = Should we add this package to the digraph or are we just looking at it's deps?
4528 Think --onlydeps, we need to ignore packages in that case.
4531 #IUSE-aware emerge -> USE DEP aware depgraph
4532 #"no downgrade" emerge
4535 # select the correct /var database that we'll be checking against
4536 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4537 pkgsettings = self.pkgsettings[pkg.root]
4543 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4544 except portage.exception.InvalidDependString, e:
4545 if not pkg.installed:
4546 show_invalid_depstring_notice(
4547 pkg, pkg.metadata["PROVIDE"], str(e))
4551 args = [arg for arg, atom in arg_atoms]
4553 if not pkg.onlydeps:
4554 if not pkg.installed and \
4555 "empty" not in self.myparams and \
4556 vardbapi.match(pkg.slot_atom):
4557 # Increase the priority of dependencies on packages that
4558 # are being rebuilt. This optimizes merge order so that
4559 # dependencies are rebuilt/updated as soon as possible,
4560 # which is needed especially when emerge is called by
4561 # revdep-rebuild since dependencies may be affected by ABI
4562 # breakage that has rendered them useless. Don't adjust
4563 # priority here when in "empty" mode since all packages
4564 # are being merged in that case.
4565 priority.rebuild = True
4567 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4568 slot_collision = False
4570 existing_node_matches = pkg.cpv == existing_node.cpv
4571 if existing_node_matches and \
4572 pkg != existing_node and \
4573 dep.atom is not None:
4574 # Use package set for matching since it will match via
4575 # PROVIDE when necessary, while match_from_list does not.
4576 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4577 if not atom_set.findAtomForPackage(existing_node):
4578 existing_node_matches = False
4579 if existing_node_matches:
4580 # The existing node can be reused.
4583 self.digraph.add(existing_node, arg,
4585 # If a direct circular dependency is not an unsatisfied
4586 # buildtime dependency then drop it here since otherwise
4587 # it can skew the merge order calculation in an unwanted
4589 if existing_node != myparent or \
4590 (priority.buildtime and not priority.satisfied):
4591 self.digraph.addnode(existing_node, myparent,
4596 if pkg.cpv == existing_node.cpv and \
4597 dep.atom is not None and \
4599 # Multiple different instances of the same version
4600 # (typically one installed and another not yet
4601 # installed) have been pulled into the graph due
4602 # to a USE dependency. The "slot collision" display
4603 # is not helpful in a case like this, so display it
4604 # as an unsatisfied dependency.
4605 self._unsatisfied_deps_for_display.append(
4606 ((dep.root, dep.atom), {"myparent":dep.parent}))
4607 self._slot_collision_info.add((pkg.slot_atom, pkg.root))
4608 self._slot_collision_nodes.add(pkg)
4609 self.digraph.addnode(pkg, myparent, priority=priority)
4612 if pkg in self._slot_collision_nodes:
4614 # A slot collision has occurred. Sometimes this coincides
4615 # with unresolvable blockers, so the slot collision will be
4616 # shown later if there are no unresolvable blockers.
4617 self._slot_collision_info.add((pkg.slot_atom, pkg.root))
4618 self._slot_collision_nodes.add(pkg)
4619 slot_collision = True
4622 # Now add this node to the graph so that self.display()
4623 # can show use flags and --tree portage.output. This node is
4624 # only being partially added to the graph. It must not be
4625 # allowed to interfere with the other nodes that have been
4626 # added. Do not overwrite data for existing nodes in
4627 # self.mydbapi since that data will be used for blocker
4629 # Even though the graph is now invalid, continue to process
4630 # dependencies so that things like --fetchonly can still
4631 # function despite collisions.
4634 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4635 self.mydbapi[pkg.root].cpv_inject(pkg)
4637 self.digraph.addnode(pkg, myparent, priority=priority)
4639 if not pkg.installed:
4640 # Allow this package to satisfy old-style virtuals in case it
4641 # doesn't already. Any pre-existing providers will be preferred
4644 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4645 # For consistency, also update the global virtuals.
4646 settings = self.roots[pkg.root].settings
4648 settings.setinst(pkg.cpv, pkg.metadata)
4650 except portage.exception.InvalidDependString, e:
4651 show_invalid_depstring_notice(
4652 pkg, pkg.metadata["PROVIDE"], str(e))
4657 self._set_nodes.add(pkg)
4659 # Do this even when addme is False (--onlydeps) so that the
4660 # parent/child relationship is always known in case
4661 # self._show_slot_collision_notice() needs to be called later.
4663 self.digraph.add(pkg, myparent, priority=priority)
4666 self.digraph.add(pkg, arg, priority=priority)
4668 """ This section determines whether we go deeper into dependencies or not.
4669 We want to go deeper on a few occasions:
4670 Installing package A, we need to make sure package A's deps are met.
4671 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4672 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4674 dep_stack = self._dep_stack
4675 if "recurse" not in self.myparams:
4677 elif pkg.installed and \
4678 "deep" not in self.myparams:
4679 dep_stack = self._ignored_deps
4681 self.spinner.update()
4686 dep_stack.append(pkg)
4689 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
4691 mytype = pkg.type_name
4694 metadata = pkg.metadata
4695 myuse = pkg.use.enabled
4697 depth = pkg.depth + 1
4698 removal_action = "remove" in self.myparams
4701 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
4703 edepend[k] = metadata[k]
4705 if not pkg.built and \
4706 "--buildpkgonly" in self.myopts and \
4707 "deep" not in self.myparams and \
4708 "empty" not in self.myparams:
4709 edepend["RDEPEND"] = ""
4710 edepend["PDEPEND"] = ""
4711 bdeps_satisfied = False
4713 if pkg.built and not removal_action:
4714 if self.myopts.get("--with-bdeps", "n") == "y":
4715 # Pull in build time deps as requested, but marked them as
4716 # "satisfied" since they are not strictly required. This allows
4717 # more freedom in the merge order calculation for solving
4718 # circular dependencies. Don't convert to PDEPEND since that
4719 # could make --with-bdeps=y less effective if it is used to
4720 # adjust merge order to prevent built_with_use() calls from
4722 bdeps_satisfied = True
4724 # built packages do not have build time dependencies.
4725 edepend["DEPEND"] = ""
4727 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
4728 edepend["DEPEND"] = ""
4731 ("/", edepend["DEPEND"],
4732 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
4733 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
4734 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
4737 debug = "--debug" in self.myopts
4738 strict = mytype != "installed"
4740 for dep_root, dep_string, dep_priority in deps:
4742 # Decrease priority so that --buildpkgonly
4743 # hasallzeros() works correctly.
4744 dep_priority = DepPriority()
4749 print "Parent: ", jbigkey
4750 print "Depstring:", dep_string
4751 print "Priority:", dep_priority
4752 vardb = self.roots[dep_root].trees["vartree"].dbapi
4754 selected_atoms = self._select_atoms(dep_root,
4755 dep_string, myuse=myuse, parent=pkg, strict=strict)
4756 except portage.exception.InvalidDependString, e:
4757 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
4760 print "Candidates:", selected_atoms
4762 for atom in selected_atoms:
4765 atom = portage.dep.Atom(atom)
4767 mypriority = dep_priority.copy()
4768 if not atom.blocker and vardb.match(atom):
4769 mypriority.satisfied = True
4771 if not self._add_dep(Dependency(atom=atom,
4772 blocker=atom.blocker, depth=depth, parent=pkg,
4773 priority=mypriority, root=dep_root),
4774 allow_unsatisfied=allow_unsatisfied):
4777 except portage.exception.InvalidAtom, e:
4778 show_invalid_depstring_notice(
4779 pkg, dep_string, str(e))
4781 if not pkg.installed:
4785 print "Exiting...", jbigkey
4786 except portage.exception.AmbiguousPackageName, e:
4788 portage.writemsg("\n\n!!! An atom in the dependencies " + \
4789 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
4791 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
4792 portage.writemsg("\n", noiselevel=-1)
4793 if mytype == "binary":
4795 "!!! This binary package cannot be installed: '%s'\n" % \
4796 mykey, noiselevel=-1)
4797 elif mytype == "ebuild":
4798 portdb = self.roots[myroot].trees["porttree"].dbapi
4799 myebuild, mylocation = portdb.findname2(mykey)
4800 portage.writemsg("!!! This ebuild cannot be installed: " + \
4801 "'%s'\n" % myebuild, noiselevel=-1)
4802 portage.writemsg("!!! Please notify the package maintainer " + \
4803 "that atoms must be fully-qualified.\n", noiselevel=-1)
4807 def _priority(self, **kwargs):
4808 if "remove" in self.myparams:
4809 priority_constructor = UnmergeDepPriority
4811 priority_constructor = DepPriority
4812 return priority_constructor(**kwargs)
4814 def _dep_expand(self, root_config, atom_without_category):
4816 @param root_config: a root config instance
4817 @type root_config: RootConfig
4818 @param atom_without_category: an atom without a category component
4819 @type atom_without_category: String
4821 @returns: a list of atoms containing categories (possibly empty)
4823 null_cp = portage.dep_getkey(insert_category_into_atom(
4824 atom_without_category, "null"))
4825 cat, atom_pn = portage.catsplit(null_cp)
4828 for db, pkg_type, built, installed, db_keys in \
4829 self._filtered_trees[root_config.root]["dbs"]:
4830 cp_set.update(db.cp_all())
4831 for cp in list(cp_set):
4832 cat, pn = portage.catsplit(cp)
4837 cat, pn = portage.catsplit(cp)
4838 deps.append(insert_category_into_atom(
4839 atom_without_category, cat))
4842 def _have_new_virt(self, root, atom_cp):
4844 for db, pkg_type, built, installed, db_keys in \
4845 self._filtered_trees[root]["dbs"]:
4846 if db.cp_list(atom_cp):
4851 def _iter_atoms_for_pkg(self, pkg):
4852 # TODO: add multiple $ROOT support
4853 if pkg.root != self.target_root:
4855 atom_arg_map = self._atom_arg_map
4856 root_config = self.roots[pkg.root]
4857 for atom in self._set_atoms.iterAtomsForPackage(pkg):
4858 atom_cp = portage.dep_getkey(atom)
4859 if atom_cp != pkg.cp and \
4860 self._have_new_virt(pkg.root, atom_cp):
4862 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
4863 visible_pkgs.reverse() # descending order
4865 for visible_pkg in visible_pkgs:
4866 if visible_pkg.cp != atom_cp:
4868 if pkg >= visible_pkg:
4869 # This is descending order, and we're not
4870 # interested in any versions <= pkg given.
4872 if pkg.slot_atom != visible_pkg.slot_atom:
4873 higher_slot = visible_pkg
4875 if higher_slot is not None:
4877 for arg in atom_arg_map[(atom, pkg.root)]:
4878 if isinstance(arg, PackageArg) and \
4883 def select_files(self, myfiles):
4884 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
4885 appropriate depgraph and return a favorite list."""
4886 debug = "--debug" in self.myopts
4887 root_config = self.roots[self.target_root]
4888 sets = root_config.sets
4889 getSetAtoms = root_config.setconfig.getSetAtoms
4891 myroot = self.target_root
4892 dbs = self._filtered_trees[myroot]["dbs"]
4893 vardb = self.trees[myroot]["vartree"].dbapi
4894 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
4895 portdb = self.trees[myroot]["porttree"].dbapi
4896 bindb = self.trees[myroot]["bintree"].dbapi
4897 pkgsettings = self.pkgsettings[myroot]
4899 onlydeps = "--onlydeps" in self.myopts
4902 ext = os.path.splitext(x)[1]
4904 if not os.path.exists(x):
4906 os.path.join(pkgsettings["PKGDIR"], "All", x)):
4907 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
4908 elif os.path.exists(
4909 os.path.join(pkgsettings["PKGDIR"], x)):
4910 x = os.path.join(pkgsettings["PKGDIR"], x)
4912 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
4913 print "!!! Please ensure the tbz2 exists as specified.\n"
4914 return 0, myfavorites
4915 mytbz2=portage.xpak.tbz2(x)
4916 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
4917 if os.path.realpath(x) != \
4918 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
4919 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
4920 return 0, myfavorites
4921 db_keys = list(bindb._aux_cache_keys)
4922 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
4923 pkg = Package(type_name="binary", root_config=root_config,
4924 cpv=mykey, built=True, metadata=metadata,
4926 self._pkg_cache[pkg] = pkg
4927 args.append(PackageArg(arg=x, package=pkg,
4928 root_config=root_config))
4929 elif ext==".ebuild":
4930 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
4931 pkgdir = os.path.dirname(ebuild_path)
4932 tree_root = os.path.dirname(os.path.dirname(pkgdir))
4933 cp = pkgdir[len(tree_root)+1:]
4934 e = portage.exception.PackageNotFound(
4935 ("%s is not in a valid portage tree " + \
4936 "hierarchy or does not exist") % x)
4937 if not portage.isvalidatom(cp):
4939 cat = portage.catsplit(cp)[0]
4940 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
4941 if not portage.isvalidatom("="+mykey):
4943 ebuild_path = portdb.findname(mykey)
4945 if ebuild_path != os.path.join(os.path.realpath(tree_root),
4946 cp, os.path.basename(ebuild_path)):
4947 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
4948 return 0, myfavorites
4949 if mykey not in portdb.xmatch(
4950 "match-visible", portage.dep_getkey(mykey)):
4951 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
4952 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
4953 print colorize("BAD", "*** page for details.")
4954 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
4957 raise portage.exception.PackageNotFound(
4958 "%s is not in a valid portage tree hierarchy or does not exist" % x)
4959 db_keys = list(portdb._aux_cache_keys)
4960 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
4961 pkg = Package(type_name="ebuild", root_config=root_config,
4962 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
4963 pkgsettings.setcpv(pkg)
4964 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
4965 self._pkg_cache[pkg] = pkg
4966 args.append(PackageArg(arg=x, package=pkg,
4967 root_config=root_config))
4968 elif x.startswith(os.path.sep):
4969 if not x.startswith(myroot):
4970 portage.writemsg(("\n\n!!! '%s' does not start with" + \
4971 " $ROOT.\n") % x, noiselevel=-1)
4973 # Queue these up since it's most efficient to handle
4974 # multiple files in a single iter_owners() call.
4975 lookup_owners.append(x)
4977 if x in ("system", "world"):
4979 if x.startswith(SETPREFIX):
4980 s = x[len(SETPREFIX):]
4982 raise portage.exception.PackageSetNotFound(s)
4985 # Recursively expand sets so that containment tests in
4986 # self._get_parent_sets() properly match atoms in nested
4987 # sets (like if world contains system).
4988 expanded_set = InternalPackageSet(
4989 initial_atoms=getSetAtoms(s))
4990 self._sets[s] = expanded_set
4991 args.append(SetArg(arg=x, set=expanded_set,
4992 root_config=root_config))
4993 myfavorites.append(x)
4995 if not is_valid_package_atom(x):
4996 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
4998 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
4999 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5001 # Don't expand categories or old-style virtuals here unless
5002 # necessary. Expansion of old-style virtuals here causes at
5003 # least the following problems:
5004 # 1) It's more difficult to determine which set(s) an atom
5005 # came from, if any.
5006 # 2) It takes away freedom from the resolver to choose other
5007 # possible expansions when necessary.
5009 args.append(AtomArg(arg=x, atom=x,
5010 root_config=root_config))
5012 expanded_atoms = self._dep_expand(root_config, x)
5013 installed_cp_set = set()
5014 for atom in expanded_atoms:
5015 atom_cp = portage.dep_getkey(atom)
5016 if vardb.cp_list(atom_cp):
5017 installed_cp_set.add(atom_cp)
5018 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5019 installed_cp = iter(installed_cp_set).next()
5020 expanded_atoms = [atom for atom in expanded_atoms \
5021 if portage.dep_getkey(atom) == installed_cp]
5023 if len(expanded_atoms) > 1:
5026 ambiguous_package_name(x, expanded_atoms, root_config,
5027 self.spinner, self.myopts)
5028 return False, myfavorites
5030 atom = expanded_atoms[0]
5032 null_atom = insert_category_into_atom(x, "null")
5033 null_cp = portage.dep_getkey(null_atom)
5034 cat, atom_pn = portage.catsplit(null_cp)
5035 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5037 # Allow the depgraph to choose which virtual.
5038 atom = insert_category_into_atom(x, "virtual")
5040 atom = insert_category_into_atom(x, "null")
5042 args.append(AtomArg(arg=x, atom=atom,
5043 root_config=root_config))
5047 search_for_multiple = False
5048 if len(lookup_owners) > 1:
5049 search_for_multiple = True
5051 for x in lookup_owners:
5052 if not search_for_multiple and os.path.isdir(x):
5053 search_for_multiple = True
5054 relative_paths.append(x[len(myroot):])
5057 for pkg, relative_path in \
5058 real_vardb._owners.iter_owners(relative_paths):
5059 owners.add(pkg.mycpv)
5060 if not search_for_multiple:
5064 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5065 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5069 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5071 # portage now masks packages with missing slot, but it's
5072 # possible that one was installed by an older version
5073 atom = portage.cpv_getkey(cpv)
5075 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5076 args.append(AtomArg(arg=atom, atom=atom,
5077 root_config=root_config))
5079 if "--update" in self.myopts:
5080 # Enable greedy SLOT atoms for atoms given as arguments.
5081 # This is currently disabled for sets since greedy SLOT
5082 # atoms could be a property of the set itself.
5085 # In addition to any installed slots, also try to pull
5086 # in the latest new slot that may be available.
5087 greedy_atoms.append(arg)
5088 if not isinstance(arg, (AtomArg, PackageArg)):
5090 atom_cp = portage.dep_getkey(arg.atom)
5092 for cpv in vardb.match(arg.atom):
5093 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5095 greedy_atoms.append(
5096 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5097 root_config=root_config))
5101 # Create the "args" package set from atoms and
5102 # packages given as arguments.
5103 args_set = self._sets["args"]
5105 if not isinstance(arg, (AtomArg, PackageArg)):
5108 if myatom in args_set:
5110 args_set.add(myatom)
5111 myfavorites.append(myatom)
5112 self._set_atoms.update(chain(*self._sets.itervalues()))
5113 atom_arg_map = self._atom_arg_map
5115 for atom in arg.set:
5116 atom_key = (atom, myroot)
5117 refs = atom_arg_map.get(atom_key)
5120 atom_arg_map[atom_key] = refs
5123 pprovideddict = pkgsettings.pprovideddict
5125 portage.writemsg("\n", noiselevel=-1)
5126 # Order needs to be preserved since a feature of --nodeps
5127 # is to allow the user to force a specific merge order.
5131 for atom in arg.set:
5132 self.spinner.update()
5133 dep = Dependency(atom=atom, onlydeps=onlydeps,
5134 root=myroot, parent=arg)
5135 atom_cp = portage.dep_getkey(atom)
5137 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5138 if pprovided and portage.match_from_list(atom, pprovided):
5139 # A provided package has been specified on the command line.
5140 self._pprovided_args.append((arg, atom))
5142 if isinstance(arg, PackageArg):
5143 if not self._add_pkg(arg.package, dep) or \
5144 not self._create_graph():
5145 sys.stderr.write(("\n\n!!! Problem resolving " + \
5146 "dependencies for %s\n") % arg.arg)
5147 return 0, myfavorites
5150 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5151 (arg, atom), noiselevel=-1)
5152 pkg, existing_node = self._select_package(
5153 myroot, atom, onlydeps=onlydeps)
5155 if not (isinstance(arg, SetArg) and \
5156 arg.name in ("system", "world")):
5157 self._unsatisfied_deps_for_display.append(
5158 ((myroot, atom), {}))
5159 return 0, myfavorites
5160 self._missing_args.append((arg, atom))
5162 if atom_cp != pkg.cp:
5163 # For old-style virtuals, we need to repeat the
5164 # package.provided check against the selected package.
5165 expanded_atom = atom.replace(atom_cp, pkg.cp)
5166 pprovided = pprovideddict.get(pkg.cp)
5168 portage.match_from_list(expanded_atom, pprovided):
5169 # A provided package has been
5170 # specified on the command line.
5171 self._pprovided_args.append((arg, atom))
5173 if pkg.installed and "selective" not in self.myparams:
5174 self._unsatisfied_deps_for_display.append(
5175 ((myroot, atom), {}))
5176 # Previous behavior was to bail out in this case, but
5177 # since the dep is satisfied by the installed package,
5178 # it's more friendly to continue building the graph
5179 # and just show a warning message. Therefore, only bail
5180 # out here if the atom is not from either the system or
5182 if not (isinstance(arg, SetArg) and \
5183 arg.name in ("system", "world")):
5184 return 0, myfavorites
5186 # Add the selected package to the graph as soon as possible
5187 # so that later dep_check() calls can use it as feedback
5188 # for making more consistent atom selections.
5189 if not self._add_pkg(pkg, dep):
5190 if isinstance(arg, SetArg):
5191 sys.stderr.write(("\n\n!!! Problem resolving " + \
5192 "dependencies for %s from %s\n") % \
5195 sys.stderr.write(("\n\n!!! Problem resolving " + \
5196 "dependencies for %s\n") % atom)
5197 return 0, myfavorites
5199 except portage.exception.MissingSignature, e:
5200 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5201 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5202 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5203 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5204 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5205 return 0, myfavorites
5206 except portage.exception.InvalidSignature, e:
5207 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5208 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5209 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5210 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5211 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5212 return 0, myfavorites
5213 except SystemExit, e:
5214 raise # Needed else can't exit
5215 except Exception, e:
5216 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5217 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5220 # Now that the root packages have been added to the graph,
5221 # process the dependencies.
5222 if not self._create_graph():
5223 return 0, myfavorites
5226 if "--usepkgonly" in self.myopts:
5227 for xs in self.digraph.all_nodes():
5228 if not isinstance(xs, Package):
5230 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5234 print "Missing binary for:",xs[2]
5238 except self._unknown_internal_error:
5239 return False, myfavorites
5241 # We're true here unless we are missing binaries.
5242 return (not missing,myfavorites)
5244 def _select_atoms_from_graph(self, *pargs, **kwargs):
5246 Prefer atoms matching packages that have already been
5247 added to the graph or those that are installed and have
5248 not been scheduled for replacement.
5250 kwargs["trees"] = self._graph_trees
5251 return self._select_atoms_highest_available(*pargs, **kwargs)
5253 def _select_atoms_highest_available(self, root, depstring,
5254 myuse=None, parent=None, strict=True, trees=None):
5255 """This will raise InvalidDependString if necessary. If trees is
5256 None then self._filtered_trees is used."""
5257 pkgsettings = self.pkgsettings[root]
5259 trees = self._filtered_trees
5262 if parent is not None:
5263 trees[root]["parent"] = parent
5265 portage.dep._dep_check_strict = False
5266 mycheck = portage.dep_check(depstring, None,
5267 pkgsettings, myuse=myuse,
5268 myroot=root, trees=trees)
5270 if parent is not None:
5271 trees[root].pop("parent")
5272 portage.dep._dep_check_strict = True
5274 raise portage.exception.InvalidDependString(mycheck[1])
5275 selected_atoms = mycheck[1]
5276 return selected_atoms
5278 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5279 atom = portage.dep.Atom(atom)
5280 atom_set = InternalPackageSet(initial_atoms=(atom,))
5281 atom_without_use = atom
5283 atom_without_use = portage.dep.remove_slot(atom)
5285 atom_without_use += ":" + atom.slot
5286 atom_without_use = portage.dep.Atom(atom_without_use)
5287 xinfo = '"%s"' % atom
5290 # Discard null/ from failed cpv_expand category expansion.
5291 xinfo = xinfo.replace("null/", "")
5292 masked_packages = []
5294 missing_licenses = []
5295 have_eapi_mask = False
5296 pkgsettings = self.pkgsettings[root]
5297 implicit_iuse = pkgsettings._get_implicit_iuse()
5298 root_config = self.roots[root]
5299 portdb = self.roots[root].trees["porttree"].dbapi
5300 dbs = self._filtered_trees[root]["dbs"]
5301 for db, pkg_type, built, installed, db_keys in dbs:
5305 if hasattr(db, "xmatch"):
5306 cpv_list = db.xmatch("match-all", atom_without_use)
5308 cpv_list = db.match(atom_without_use)
5311 for cpv in cpv_list:
5312 metadata, mreasons = get_mask_info(root_config, cpv,
5313 pkgsettings, db, pkg_type, built, installed, db_keys)
5314 if metadata is not None:
5315 pkg = Package(built=built, cpv=cpv,
5316 installed=installed, metadata=metadata,
5317 root_config=root_config)
5318 if pkg.cp != atom.cp:
5319 # A cpv can be returned from dbapi.match() as an
5320 # old-style virtual match even in cases when the
5321 # package does not actually PROVIDE the virtual.
5322 # Filter out any such false matches here.
5323 if not atom_set.findAtomForPackage(pkg):
5325 if atom.use and not mreasons:
5326 missing_use.append(pkg)
5328 masked_packages.append(
5329 (root_config, pkgsettings, cpv, metadata, mreasons))
5331 missing_use_reasons = []
5332 missing_iuse_reasons = []
5333 for pkg in missing_use:
5334 use = pkg.use.enabled
5335 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5336 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5338 for x in atom.use.required:
5339 if iuse_re.match(x) is None:
5340 missing_iuse.append(x)
5343 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5344 missing_iuse_reasons.append((pkg, mreasons))
5346 need_enable = sorted(atom.use.enabled.difference(use))
5347 need_disable = sorted(atom.use.disabled.intersection(use))
5348 if need_enable or need_disable:
5350 changes.extend(colorize("red", "+" + x) \
5351 for x in need_enable)
5352 changes.extend(colorize("blue", "-" + x) \
5353 for x in need_disable)
5354 mreasons.append("Change USE: %s" % " ".join(changes))
5355 missing_use_reasons.append((pkg, mreasons))
5357 if missing_iuse_reasons and not missing_use_reasons:
5358 missing_use_reasons = missing_iuse_reasons
5359 elif missing_use_reasons:
5360 # Only show the latest version.
5361 del missing_use_reasons[1:]
5363 if missing_use_reasons:
5364 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5365 print "!!! One of the following packages is required to complete your request:"
5366 for pkg, mreasons in missing_use_reasons:
5367 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5369 elif masked_packages:
5371 colorize("BAD", "All ebuilds that could satisfy ") + \
5372 colorize("INFORM", xinfo) + \
5373 colorize("BAD", " have been masked.")
5374 print "!!! One of the following masked packages is required to complete your request:"
5375 have_eapi_mask = show_masked_packages(masked_packages)
5378 msg = ("The current version of portage supports " + \
5379 "EAPI '%s'. You must upgrade to a newer version" + \
5380 " of portage before EAPI masked packages can" + \
5381 " be installed.") % portage.const.EAPI
5382 from textwrap import wrap
5383 for line in wrap(msg, 75):
5388 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5390 # Show parent nodes and the argument that pulled them in.
5391 traversed_nodes = set()
5394 while node is not None:
5395 traversed_nodes.add(node)
5396 msg.append('(dependency required by "%s" [%s])' % \
5397 (colorize('INFORM', str(node.cpv)), node.type_name))
5398 # When traversing to parents, prefer arguments over packages
5399 # since arguments are root nodes. Never traverse the same
5400 # package twice, in order to prevent an infinite loop.
5401 selected_parent = None
5402 for parent in self.digraph.parent_nodes(node):
5403 if isinstance(parent, DependencyArg):
5404 msg.append('(dependency required by "%s" [argument])' % \
5405 (colorize('INFORM', str(parent))))
5406 selected_parent = None
5408 if parent not in traversed_nodes:
5409 selected_parent = parent
5410 node = selected_parent
5416 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5417 cache_key = (root, atom, onlydeps)
5418 ret = self._highest_pkg_cache.get(cache_key)
5421 if pkg and not existing:
5422 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5423 if existing and existing == pkg:
5424 # Update the cache to reflect that the
5425 # package has been added to the graph.
5427 self._highest_pkg_cache[cache_key] = ret
5429 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5430 self._highest_pkg_cache[cache_key] = ret
5433 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5434 root_config = self.roots[root]
5435 pkgsettings = self.pkgsettings[root]
5436 dbs = self._filtered_trees[root]["dbs"]
5437 vardb = self.roots[root].trees["vartree"].dbapi
5438 portdb = self.roots[root].trees["porttree"].dbapi
5439 # List of acceptable packages, ordered by type preference.
5440 matched_packages = []
5441 highest_version = None
5442 if not isinstance(atom, portage.dep.Atom):
5443 atom = portage.dep.Atom(atom)
5445 atom_set = InternalPackageSet(initial_atoms=(atom,))
5446 existing_node = None
5448 usepkgonly = "--usepkgonly" in self.myopts
5449 empty = "empty" in self.myparams
5450 selective = "selective" in self.myparams
5452 noreplace = "--noreplace" in self.myopts
5453 # Behavior of the "selective" parameter depends on
5454 # whether or not a package matches an argument atom.
5455 # If an installed package provides an old-style
5456 # virtual that is no longer provided by an available
5457 # package, the installed package may match an argument
5458 # atom even though none of the available packages do.
5459 # Therefore, "selective" logic does not consider
5460 # whether or not an installed package matches an
5461 # argument atom. It only considers whether or not
5462 # available packages match argument atoms, which is
5463 # represented by the found_available_arg flag.
5464 found_available_arg = False
5465 for find_existing_node in True, False:
5468 for db, pkg_type, built, installed, db_keys in dbs:
5471 if installed and not find_existing_node:
5472 want_reinstall = reinstall or empty or \
5473 (found_available_arg and not selective)
5474 if want_reinstall and matched_packages:
5476 if hasattr(db, "xmatch"):
5477 cpv_list = db.xmatch("match-all", atom)
5479 cpv_list = db.match(atom)
5481 # USE=multislot can make an installed package appear as if
5482 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5483 # won't do any good as long as USE=multislot is enabled since
5484 # the newly built package still won't have the expected slot.
5485 # Therefore, assume that such SLOT dependencies are already
5486 # satisfied rather than forcing a rebuild.
5487 if installed and not cpv_list and atom.slot:
5488 for cpv in db.match(atom.cp):
5489 slot_available = False
5490 for other_db, other_type, other_built, \
5491 other_installed, other_keys in dbs:
5494 other_db.aux_get(cpv, ["SLOT"])[0]:
5495 slot_available = True
5499 if not slot_available:
5501 inst_pkg = self._pkg(cpv, "installed",
5502 root_config, installed=installed)
5503 # Remove the slot from the atom and verify that
5504 # the package matches the resulting atom.
5505 atom_without_slot = portage.dep.remove_slot(atom)
5507 atom_without_slot += str(atom.use)
5508 atom_without_slot = portage.dep.Atom(atom_without_slot)
5509 if portage.match_from_list(
5510 atom_without_slot, [inst_pkg]):
5511 cpv_list = [inst_pkg.cpv]
5516 pkg_status = "merge"
5517 if installed or onlydeps:
5518 pkg_status = "nomerge"
5521 for cpv in cpv_list:
5522 # Make --noreplace take precedence over --newuse.
5523 if not installed and noreplace and \
5524 cpv in vardb.match(atom):
5525 # If the installed version is masked, it may
5526 # be necessary to look at lower versions,
5527 # in case there is a visible downgrade.
5529 reinstall_for_flags = None
5530 cache_key = (pkg_type, root, cpv, pkg_status)
5531 calculated_use = True
5532 pkg = self._pkg_cache.get(cache_key)
5534 calculated_use = False
5536 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5539 pkg = Package(built=built, cpv=cpv,
5540 installed=installed, metadata=metadata,
5541 onlydeps=onlydeps, root_config=root_config,
5543 metadata = pkg.metadata
5544 if not built and ("?" in metadata["LICENSE"] or \
5545 "?" in metadata["PROVIDE"]):
5546 # This is avoided whenever possible because
5547 # it's expensive. It only needs to be done here
5548 # if it has an effect on visibility.
5549 pkgsettings.setcpv(pkg)
5550 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5551 calculated_use = True
5552 self._pkg_cache[pkg] = pkg
5554 if not installed or (installed and matched_packages):
5555 # Only enforce visibility on installed packages
5556 # if there is at least one other visible package
5557 # available. By filtering installed masked packages
5558 # here, packages that have been masked since they
5559 # were installed can be automatically downgraded
5560 # to an unmasked version.
5562 if not visible(pkgsettings, pkg):
5564 except portage.exception.InvalidDependString:
5568 # Enable upgrade or downgrade to a version
5569 # with visible KEYWORDS when the installed
5570 # version is masked by KEYWORDS, but never
5571 # reinstall the same exact version only due
5572 # to a KEYWORDS mask.
5573 if installed and matched_packages and \
5574 pkgsettings._getMissingKeywords(
5575 pkg.cpv, pkg.metadata):
5576 different_version = None
5577 for avail_pkg in matched_packages:
5578 if not portage.dep.cpvequal(
5579 pkg.cpv, avail_pkg.cpv):
5580 different_version = avail_pkg
5582 if different_version is not None:
5583 # Only reinstall for KEYWORDS if
5584 # it's not the same version.
5587 if not pkg.built and not calculated_use:
5588 # This is avoided whenever possible because
5590 pkgsettings.setcpv(pkg)
5591 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5593 if pkg.cp != atom.cp:
5594 # A cpv can be returned from dbapi.match() as an
5595 # old-style virtual match even in cases when the
5596 # package does not actually PROVIDE the virtual.
5597 # Filter out any such false matches here.
5598 if not atom_set.findAtomForPackage(pkg):
5602 if root == self.target_root:
5604 # Ebuild USE must have been calculated prior
5605 # to this point, in case atoms have USE deps.
5606 myarg = self._iter_atoms_for_pkg(pkg).next()
5607 except StopIteration:
5609 except portage.exception.InvalidDependString:
5611 # masked by corruption
5613 if not installed and myarg:
5614 found_available_arg = True
5616 if atom.use and not pkg.built:
5617 use = pkg.use.enabled
5618 if atom.use.enabled.difference(use):
5620 if atom.use.disabled.intersection(use):
5622 if pkg.cp == atom_cp:
5623 if highest_version is None:
5624 highest_version = pkg
5625 elif pkg > highest_version:
5626 highest_version = pkg
5627 # At this point, we've found the highest visible
5628 # match from the current repo. Any lower versions
5629 # from this repo are ignored, so this so the loop
5630 # will always end with a break statement below
5632 if find_existing_node:
5633 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5636 if portage.dep.match_from_list(atom, [e_pkg]):
5637 if highest_version and \
5638 e_pkg.cp == atom_cp and \
5639 e_pkg < highest_version and \
5640 e_pkg.slot_atom != highest_version.slot_atom:
5641 # There is a higher version available in a
5642 # different slot, so this existing node is
5646 matched_packages.append(e_pkg)
5647 existing_node = e_pkg
5649 # Compare built package to current config and
5650 # reject the built package if necessary.
5651 if built and not installed and \
5652 ("--newuse" in self.myopts or \
5653 "--reinstall" in self.myopts):
5654 iuses = pkg.iuse.all
5655 old_use = pkg.use.enabled
5657 pkgsettings.setcpv(myeb)
5659 pkgsettings.setcpv(pkg)
5660 now_use = pkgsettings["PORTAGE_USE"].split()
5661 forced_flags = set()
5662 forced_flags.update(pkgsettings.useforce)
5663 forced_flags.update(pkgsettings.usemask)
5665 if myeb and not usepkgonly:
5666 cur_iuse = myeb.iuse.all
5667 if self._reinstall_for_flags(forced_flags,
5671 # Compare current config to installed package
5672 # and do not reinstall if possible.
5673 if not installed and \
5674 ("--newuse" in self.myopts or \
5675 "--reinstall" in self.myopts) and \
5676 cpv in vardb.match(atom):
5677 pkgsettings.setcpv(pkg)
5678 forced_flags = set()
5679 forced_flags.update(pkgsettings.useforce)
5680 forced_flags.update(pkgsettings.usemask)
5681 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
5682 old_iuse = set(filter_iuse_defaults(
5683 vardb.aux_get(cpv, ["IUSE"])[0].split()))
5684 cur_use = pkgsettings["PORTAGE_USE"].split()
5685 cur_iuse = pkg.iuse.all
5686 reinstall_for_flags = \
5687 self._reinstall_for_flags(
5688 forced_flags, old_use, old_iuse,
5690 if reinstall_for_flags:
5694 matched_packages.append(pkg)
5695 if reinstall_for_flags:
5696 self._reinstall_nodes[pkg] = \
5700 if not matched_packages:
5703 if "--debug" in self.myopts:
5704 for pkg in matched_packages:
5705 portage.writemsg("%s %s\n" % \
5706 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
5708 # Filter out any old-style virtual matches if they are
5709 # mixed with new-style virtual matches.
5710 cp = portage.dep_getkey(atom)
5711 if len(matched_packages) > 1 and \
5712 "virtual" == portage.catsplit(cp)[0]:
5713 for pkg in matched_packages:
5716 # Got a new-style virtual, so filter
5717 # out any old-style virtuals.
5718 matched_packages = [pkg for pkg in matched_packages \
5722 # If the installed version is in a different slot and it is higher than
5723 # the highest available visible package, _iter_atoms_for_pkg() may fail
5724 # to properly match the available package with a corresponding argument
5725 # atom. Detect this case and correct it here.
5726 if not selective and len(matched_packages) > 1 and \
5727 matched_packages[-1].installed and \
5728 matched_packages[-1].slot_atom != \
5729 matched_packages[-2].slot_atom and \
5730 matched_packages[-1] > matched_packages[-2]:
5731 pkg = matched_packages[-2]
5732 if pkg.root == self.target_root and \
5733 self._set_atoms.findAtomForPackage(pkg):
5734 # Select the available package instead
5735 # of the installed package.
5736 matched_packages.pop()
5738 if len(matched_packages) > 1:
5739 bestmatch = portage.best(
5740 [pkg.cpv for pkg in matched_packages])
5741 matched_packages = [pkg for pkg in matched_packages \
5742 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
5744 # ordered by type preference ("ebuild" type is the last resort)
5745 return matched_packages[-1], existing_node
5747 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
5749 Select packages that have already been added to the graph or
5750 those that are installed and have not been scheduled for
5753 graph_db = self._graph_trees[root]["porttree"].dbapi
5754 matches = graph_db.match(atom)
5757 cpv = matches[-1] # highest match
5758 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
5759 graph_db.aux_get(cpv, ["SLOT"])[0])
5760 e_pkg = self._slot_pkg_map[root].get(slot_atom)
5763 # Since this cpv exists in the graph_db,
5764 # we must have a cached Package instance.
5765 cache_key = ("installed", root, cpv, "nomerge")
5766 return (self._pkg_cache[cache_key], None)
5768 def _complete_graph(self):
5770 Add any deep dependencies of required sets (args, system, world) that
5771 have not been pulled into the graph yet. This ensures that the graph
5772 is consistent such that initially satisfied deep dependencies are not
5773 broken in the new graph. Initially unsatisfied dependencies are
5774 irrelevant since we only want to avoid breaking dependencies that are
5777 Since this method can consume enough time to disturb users, it is
5778 currently only enabled by the --complete-graph option.
5780 if "--buildpkgonly" in self.myopts or \
5781 "recurse" not in self.myparams:
5784 if "complete" not in self.myparams:
5785 # Skip this to avoid consuming enough time to disturb users.
5788 # Put the depgraph into a mode that causes it to only
5789 # select packages that have already been added to the
5790 # graph or those that are installed and have not been
5791 # scheduled for replacement. Also, toggle the "deep"
5792 # parameter so that all dependencies are traversed and
5794 self._select_atoms = self._select_atoms_from_graph
5795 self._select_package = self._select_pkg_from_graph
5796 already_deep = "deep" in self.myparams
5797 if not already_deep:
5798 self.myparams.add("deep")
5800 for root in self.roots:
5801 required_set_names = self._required_set_names.copy()
5802 if root == self.target_root and \
5803 (already_deep or "empty" in self.myparams):
5804 required_set_names.difference_update(self._sets)
5805 if not required_set_names and not self._ignored_deps:
5807 root_config = self.roots[root]
5808 setconfig = root_config.setconfig
5810 # Reuse existing SetArg instances when available.
5811 for arg in self.digraph.root_nodes():
5812 if not isinstance(arg, SetArg):
5814 if arg.root_config != root_config:
5816 if arg.name in required_set_names:
5818 required_set_names.remove(arg.name)
5819 # Create new SetArg instances only when necessary.
5820 for s in required_set_names:
5821 expanded_set = InternalPackageSet(
5822 initial_atoms=setconfig.getSetAtoms(s))
5823 atom = SETPREFIX + s
5824 args.append(SetArg(arg=atom, set=expanded_set,
5825 root_config=root_config))
5826 vardb = root_config.trees["vartree"].dbapi
5828 for atom in arg.set:
5829 self._dep_stack.append(
5830 Dependency(atom=atom, root=root, parent=arg))
5831 if self._ignored_deps:
5832 self._dep_stack.extend(self._ignored_deps)
5833 self._ignored_deps = []
5834 if not self._create_graph(allow_unsatisfied=True):
5836 # Check the unsatisfied deps to see if any initially satisfied deps
5837 # will become unsatisfied due to an upgrade. Initially unsatisfied
5838 # deps are irrelevant since we only want to avoid breaking deps
5839 # that are initially satisfied.
5840 while self._unsatisfied_deps:
5841 dep = self._unsatisfied_deps.pop()
5842 matches = vardb.match_pkgs(dep.atom)
5844 self._initially_unsatisfied_deps.append(dep)
5846 # An scheduled installation broke a deep dependency.
5847 # Add the installed package to the graph so that it
5848 # will be appropriately reported as a slot collision
5849 # (possibly solvable via backtracking).
5850 pkg = matches[-1] # highest match
5851 if not self._add_pkg(pkg, dep):
5853 if not self._create_graph(allow_unsatisfied=True):
5857 def _pkg(self, cpv, type_name, root_config, installed=False):
5859 Get a package instance from the cache, or create a new
5860 one if necessary. Raises KeyError from aux_get if it
5861 failures for some reason (package does not exist or is
5866 operation = "nomerge"
5867 pkg = self._pkg_cache.get(
5868 (type_name, root_config.root, cpv, operation))
5870 tree_type = self.pkg_tree_map[type_name]
5871 db = root_config.trees[tree_type].dbapi
5872 db_keys = list(self._trees_orig[root_config.root][
5873 tree_type].dbapi._aux_cache_keys)
5874 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5875 pkg = Package(cpv=cpv, metadata=metadata,
5876 root_config=root_config, installed=installed)
5877 if type_name == "ebuild":
5878 settings = self.pkgsettings[root_config.root]
5879 settings.setcpv(pkg)
5880 pkg.metadata["USE"] = settings["PORTAGE_USE"]
5881 self._pkg_cache[pkg] = pkg
5884 def validate_blockers(self):
5885 """Remove any blockers from the digraph that do not match any of the
5886 packages within the graph. If necessary, create hard deps to ensure
5887 correct merge order such that mutually blocking packages are never
5888 installed simultaneously."""
5890 if "--buildpkgonly" in self.myopts or \
5891 "--nodeps" in self.myopts:
5894 #if "deep" in self.myparams:
5896 # Pull in blockers from all installed packages that haven't already
5897 # been pulled into the depgraph. This is not enabled by default
5898 # due to the performance penalty that is incurred by all the
5899 # additional dep_check calls that are required.
5901 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
5902 for myroot in self.trees:
5903 vardb = self.trees[myroot]["vartree"].dbapi
5904 portdb = self.trees[myroot]["porttree"].dbapi
5905 pkgsettings = self.pkgsettings[myroot]
5906 final_db = self.mydbapi[myroot]
5908 blocker_cache = BlockerCache(myroot, vardb)
5909 stale_cache = set(blocker_cache)
5912 stale_cache.discard(cpv)
5913 pkg_in_graph = self.digraph.contains(pkg)
5915 # Check for masked installed packages. Only warn about
5916 # packages that are in the graph in order to avoid warning
5917 # about those that will be automatically uninstalled during
5918 # the merge process or by --depclean.
5920 if pkg_in_graph and not visible(pkgsettings, pkg):
5921 self._masked_installed.add(pkg)
5923 blocker_atoms = None
5929 self._blocker_parents.child_nodes(pkg))
5934 self._irrelevant_blockers.child_nodes(pkg))
5937 if blockers is not None:
5938 blockers = set(str(blocker.atom) \
5939 for blocker in blockers)
5941 # If this node has any blockers, create a "nomerge"
5942 # node for it so that they can be enforced.
5943 self.spinner.update()
5944 blocker_data = blocker_cache.get(cpv)
5945 if blocker_data is not None and \
5946 blocker_data.counter != long(pkg.metadata["COUNTER"]):
5949 # If blocker data from the graph is available, use
5950 # it to validate the cache and update the cache if
5952 if blocker_data is not None and \
5953 blockers is not None:
5954 if not blockers.symmetric_difference(
5955 blocker_data.atoms):
5959 if blocker_data is None and \
5960 blockers is not None:
5961 # Re-use the blockers from the graph.
5962 blocker_atoms = sorted(blockers)
5963 counter = long(pkg.metadata["COUNTER"])
5965 blocker_cache.BlockerData(counter, blocker_atoms)
5966 blocker_cache[pkg.cpv] = blocker_data
5970 blocker_atoms = blocker_data.atoms
5972 # Use aux_get() to trigger FakeVartree global
5973 # updates on *DEPEND when appropriate.
5974 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5975 # It is crucial to pass in final_db here in order to
5976 # optimize dep_check calls by eliminating atoms via
5977 # dep_wordreduce and dep_eval calls.
5979 portage.dep._dep_check_strict = False
5981 success, atoms = portage.dep_check(depstr,
5982 final_db, pkgsettings, myuse=pkg.use.enabled,
5983 trees=self._graph_trees, myroot=myroot)
5984 except Exception, e:
5985 if isinstance(e, SystemExit):
5987 # This is helpful, for example, if a ValueError
5988 # is thrown from cpv_expand due to multiple
5989 # matches (this can happen if an atom lacks a
5991 show_invalid_depstring_notice(
5992 pkg, depstr, str(e))
5996 portage.dep._dep_check_strict = True
5998 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
5999 if replacement_pkg and \
6000 replacement_pkg[0].operation == "merge":
6001 # This package is being replaced anyway, so
6002 # ignore invalid dependencies so as not to
6003 # annoy the user too much (otherwise they'd be
6004 # forced to manually unmerge it first).
6006 show_invalid_depstring_notice(pkg, depstr, atoms)
6008 blocker_atoms = [myatom for myatom in atoms \
6009 if myatom.startswith("!")]
6010 blocker_atoms.sort()
6011 counter = long(pkg.metadata["COUNTER"])
6012 blocker_cache[cpv] = \
6013 blocker_cache.BlockerData(counter, blocker_atoms)
6016 for atom in blocker_atoms:
6017 blocker = Blocker(atom=portage.dep.Atom(atom),
6018 eapi=pkg.metadata["EAPI"], root=myroot)
6019 self._blocker_parents.add(blocker, pkg)
6020 except portage.exception.InvalidAtom, e:
6021 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6022 show_invalid_depstring_notice(
6023 pkg, depstr, "Invalid Atom: %s" % (e,))
6025 for cpv in stale_cache:
6026 del blocker_cache[cpv]
6027 blocker_cache.flush()
6030 # Discard any "uninstall" tasks scheduled by previous calls
6031 # to this method, since those tasks may not make sense given
6032 # the current graph state.
6033 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6034 if previous_uninstall_tasks:
6035 self._blocker_uninstalls = digraph()
6036 self.digraph.difference_update(previous_uninstall_tasks)
6038 for blocker in self._blocker_parents.leaf_nodes():
6039 self.spinner.update()
6040 root_config = self.roots[blocker.root]
6041 virtuals = root_config.settings.getvirtuals()
6042 myroot = blocker.root
6043 initial_db = self.trees[myroot]["vartree"].dbapi
6044 final_db = self.mydbapi[myroot]
6046 provider_virtual = False
6047 if blocker.cp in virtuals and \
6048 not self._have_new_virt(blocker.root, blocker.cp):
6049 provider_virtual = True
6051 if provider_virtual:
6053 for provider_entry in virtuals[blocker.cp]:
6055 portage.dep_getkey(provider_entry)
6056 atoms.append(blocker.atom.replace(
6057 blocker.cp, provider_cp))
6059 atoms = [blocker.atom]
6061 blocked_initial = []
6063 blocked_initial.extend(initial_db.match_pkgs(atom))
6067 blocked_final.extend(final_db.match_pkgs(atom))
6069 if not blocked_initial and not blocked_final:
6070 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6071 self._blocker_parents.remove(blocker)
6072 # Discard any parents that don't have any more blockers.
6073 for pkg in parent_pkgs:
6074 self._irrelevant_blockers.add(blocker, pkg)
6075 if not self._blocker_parents.child_nodes(pkg):
6076 self._blocker_parents.remove(pkg)
6078 for parent in self._blocker_parents.parent_nodes(blocker):
6079 unresolved_blocks = False
6080 depends_on_order = set()
6081 for pkg in blocked_initial:
6082 if pkg.slot_atom == parent.slot_atom:
6083 # TODO: Support blocks within slots in cases where it
6084 # might make sense. For example, a new version might
6085 # require that the old version be uninstalled at build
6088 if parent.installed:
6089 # Two currently installed packages conflict with
6090 # eachother. Ignore this case since the damage
6091 # is already done and this would be likely to
6092 # confuse users if displayed like a normal blocker.
6094 if parent.operation == "merge":
6095 # Maybe the blocked package can be replaced or simply
6096 # unmerged to resolve this block.
6097 depends_on_order.add((pkg, parent))
6099 # None of the above blocker resolutions techniques apply,
6100 # so apparently this one is unresolvable.
6101 unresolved_blocks = True
6102 for pkg in blocked_final:
6103 if pkg.slot_atom == parent.slot_atom:
6104 # TODO: Support blocks within slots.
6106 if parent.operation == "nomerge" and \
6107 pkg.operation == "nomerge":
6108 # This blocker will be handled the next time that a
6109 # merge of either package is triggered.
6112 # Maybe the blocking package can be
6113 # unmerged to resolve this block.
6114 if parent.operation == "merge" and pkg.installed:
6115 depends_on_order.add((pkg, parent))
6117 elif parent.operation == "nomerge":
6118 depends_on_order.add((parent, pkg))
6120 # None of the above blocker resolutions techniques apply,
6121 # so apparently this one is unresolvable.
6122 unresolved_blocks = True
6124 # Make sure we don't unmerge any package that have been pulled
6126 if not unresolved_blocks and depends_on_order:
6127 for inst_pkg, inst_task in depends_on_order:
6128 if self.digraph.contains(inst_pkg) and \
6129 self.digraph.parent_nodes(inst_pkg):
6130 unresolved_blocks = True
6133 if not unresolved_blocks and depends_on_order:
6134 for inst_pkg, inst_task in depends_on_order:
6135 uninst_task = Package(built=inst_pkg.built,
6136 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6137 metadata=inst_pkg.metadata,
6138 operation="uninstall",
6139 root_config=inst_pkg.root_config,
6140 type_name=inst_pkg.type_name)
6141 self._pkg_cache[uninst_task] = uninst_task
6142 # Enforce correct merge order with a hard dep.
6143 self.digraph.addnode(uninst_task, inst_task,
6144 priority=BlockerDepPriority.instance)
6145 # Count references to this blocker so that it can be
6146 # invalidated after nodes referencing it have been
6148 self._blocker_uninstalls.addnode(uninst_task, blocker)
6149 if not unresolved_blocks and not depends_on_order:
6150 self._irrelevant_blockers.add(blocker, parent)
6151 self._blocker_parents.remove_edge(blocker, parent)
6152 if not self._blocker_parents.parent_nodes(blocker):
6153 self._blocker_parents.remove(blocker)
6154 if not self._blocker_parents.child_nodes(parent):
6155 self._blocker_parents.remove(parent)
6156 if unresolved_blocks:
6157 self._unsolvable_blockers.add(blocker, parent)
6161 def _accept_blocker_conflicts(self):
6163 for x in ("--buildpkgonly", "--fetchonly",
6164 "--fetch-all-uri", "--nodeps", "--pretend"):
6165 if x in self.myopts:
6170 def _merge_order_bias(self, mygraph):
6171 """Order nodes from highest to lowest overall reference count for
6172 optimal leaf node selection."""
6174 for node in mygraph.order:
6175 node_info[node] = len(mygraph.parent_nodes(node))
6176 def cmp_merge_preference(node1, node2):
6177 return node_info[node2] - node_info[node1]
6178 mygraph.order.sort(cmp_merge_preference)
6180 def altlist(self, reversed=False):
6182 while self._serialized_tasks_cache is None:
6183 self._resolve_conflicts()
6185 self._serialized_tasks_cache, self._scheduler_graph = \
6186 self._serialize_tasks()
6187 except self._serialize_tasks_retry:
6190 retlist = self._serialized_tasks_cache[:]
6195 def schedulerGraph(self):
6197 The scheduler graph is identical to the normal one except that
6198 uninstall edges are reversed in specific cases that require
6199 conflicting packages to be temporarily installed simultaneously.
6200 This is intended for use by the Scheduler in it's parallelization
6201 logic. It ensures that temporary simultaneous installation of
6202 conflicting packages is avoided when appropriate (especially for
6203 !!atom blockers), but allowed in specific cases that require it.
6205 Note that this method calls break_refs() which alters the state of
6206 internal Package instances such that this depgraph instance should
6207 not be used to perform any more calculations.
6209 if self._scheduler_graph is None:
6211 self.break_refs(self._scheduler_graph.order)
6212 return self._scheduler_graph
6214 def break_refs(self, nodes):
6216 Take a mergelist like that returned from self.altlist() and
6217 break any references that lead back to the depgraph. This is
6218 useful if you want to hold references to packages without
6219 also holding the depgraph on the heap.
6222 if hasattr(node, "root_config"):
6223 # The FakeVartree references the _package_cache which
6224 # references the depgraph. So that Package instances don't
6225 # hold the depgraph and FakeVartree on the heap, replace
6226 # the RootConfig that references the FakeVartree with the
6227 # original RootConfig instance which references the actual
6229 node.root_config = \
6230 self._trees_orig[node.root_config.root]["root_config"]
6232 def _resolve_conflicts(self):
6233 if not self._complete_graph():
6234 raise self._unknown_internal_error()
6236 if not self.validate_blockers():
6237 raise self._unknown_internal_error()
6239 def _serialize_tasks(self):
6240 scheduler_graph = self.digraph.copy()
6241 mygraph=self.digraph.copy()
6242 # Prune "nomerge" root nodes if nothing depends on them, since
6243 # otherwise they slow down merge order calculation. Don't remove
6244 # non-root nodes since they help optimize merge order in some cases
6245 # such as revdep-rebuild.
6246 removed_nodes = set()
6248 for node in mygraph.root_nodes():
6249 if not isinstance(node, Package) or \
6250 node.installed or node.onlydeps:
6251 removed_nodes.add(node)
6253 self.spinner.update()
6254 mygraph.difference_update(removed_nodes)
6255 if not removed_nodes:
6257 removed_nodes.clear()
6258 self._merge_order_bias(mygraph)
6259 def cmp_circular_bias(n1, n2):
6261 RDEPEND is stronger than PDEPEND and this function
6262 measures such a strength bias within a circular
6263 dependency relationship.
6265 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6266 ignore_priority=DepPriority.MEDIUM_SOFT)
6267 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6268 ignore_priority=DepPriority.MEDIUM_SOFT)
6269 if n1_n2_medium == n2_n1_medium:
6274 myblocker_uninstalls = self._blocker_uninstalls.copy()
6276 # Contains uninstall tasks that have been scheduled to
6277 # occur after overlapping blockers have been installed.
6278 scheduled_uninstalls = set()
6279 # Contains any Uninstall tasks that have been ignored
6280 # in order to avoid the circular deps code path. These
6281 # correspond to blocker conflicts that could not be
6283 ignored_uninstall_tasks = set()
6284 have_uninstall_task = False
6285 complete = "complete" in self.myparams
6286 myblocker_parents = self._blocker_parents.copy()
6289 def get_nodes(**kwargs):
6291 Returns leaf nodes excluding Uninstall instances
6292 since those should be executed as late as possible.
6294 return [node for node in mygraph.leaf_nodes(**kwargs) \
6295 if isinstance(node, Package) and \
6296 (node.operation != "uninstall" or \
6297 node in scheduled_uninstalls)]
6299 # sys-apps/portage needs special treatment if ROOT="/"
6300 running_root = self._running_root.root
6301 from portage.const import PORTAGE_PACKAGE_ATOM
6302 runtime_deps = InternalPackageSet(
6303 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6304 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6305 PORTAGE_PACKAGE_ATOM)
6306 replacement_portage = self.mydbapi[running_root].match_pkgs(
6307 PORTAGE_PACKAGE_ATOM)
6310 running_portage = running_portage[0]
6312 running_portage = None
6314 if replacement_portage:
6315 replacement_portage = replacement_portage[0]
6317 replacement_portage = None
6319 if replacement_portage == running_portage:
6320 replacement_portage = None
6322 if replacement_portage is not None:
6323 # update from running_portage to replacement_portage asap
6324 asap_nodes.append(replacement_portage)
6326 if running_portage is not None:
6328 portage_rdepend = self._select_atoms_highest_available(
6329 running_root, running_portage.metadata["RDEPEND"],
6330 myuse=running_portage.use.enabled,
6331 parent=running_portage, strict=False)
6332 except portage.exception.InvalidDependString, e:
6333 portage.writemsg("!!! Invalid RDEPEND in " + \
6334 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6335 (running_root, running_portage.cpv, e), noiselevel=-1)
6337 portage_rdepend = []
6338 runtime_deps.update(atom for atom in portage_rdepend \
6339 if not atom.startswith("!"))
6341 ignore_priority_soft_range = [None]
6342 ignore_priority_soft_range.extend(
6343 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6344 tree_mode = "--tree" in self.myopts
6345 # Tracks whether or not the current iteration should prefer asap_nodes
6346 # if available. This is set to False when the previous iteration
6347 # failed to select any nodes. It is reset whenever nodes are
6348 # successfully selected.
6351 # By default, try to avoid selecting root nodes whenever possible. This
6352 # helps ensure that the maximimum possible number of soft dependencies
6353 # have been removed from the graph before their parent nodes have
6354 # selected. This is especially important when those dependencies are
6355 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6356 # CHOST has been changed (like when building a stage3 from a stage2).
6357 accept_root_node = False
6359 # State of prefer_asap and accept_root_node flags for successive
6360 # iterations that loosen the criteria for node selection.
6362 # iteration prefer_asap accept_root_node
6367 # If no nodes are selected on the 3rd iteration, it is due to
6368 # unresolved blockers or circular dependencies.
6370 while not mygraph.empty():
6371 self.spinner.update()
6372 selected_nodes = None
6373 ignore_priority = None
6374 if prefer_asap and asap_nodes:
6375 """ASAP nodes are merged before their soft deps."""
6376 asap_nodes = [node for node in asap_nodes \
6377 if mygraph.contains(node)]
6378 for node in asap_nodes:
6379 if not mygraph.child_nodes(node,
6380 ignore_priority=DepPriority.SOFT):
6381 selected_nodes = [node]
6382 asap_nodes.remove(node)
6384 if not selected_nodes and \
6385 not (prefer_asap and asap_nodes):
6386 for ignore_priority in ignore_priority_soft_range:
6387 nodes = get_nodes(ignore_priority=ignore_priority)
6391 if ignore_priority is None and not tree_mode:
6392 # Greedily pop all of these nodes since no relationship
6393 # has been ignored. This optimization destroys --tree
6394 # output, so it's disabled in reversed mode. If there
6395 # is a mix of merge and uninstall nodes, save the
6396 # uninstall nodes from later since sometimes a merge
6397 # node will render an install node unnecessary, and
6398 # we want to avoid doing a separate uninstall task in
6400 merge_nodes = [node for node in nodes \
6401 if node.operation == "merge"]
6403 selected_nodes = merge_nodes
6405 selected_nodes = nodes
6407 # For optimal merge order:
6408 # * Only pop one node.
6409 # * Removing a root node (node without a parent)
6410 # will not produce a leaf node, so avoid it.
6412 if mygraph.parent_nodes(node):
6413 # found a non-root node
6414 selected_nodes = [node]
6416 if not selected_nodes and \
6417 (accept_root_node or ignore_priority is None):
6418 # settle for a root node
6419 selected_nodes = [nodes[0]]
6421 if not selected_nodes:
6422 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6424 """Recursively gather a group of nodes that RDEPEND on
6425 eachother. This ensures that they are merged as a group
6426 and get their RDEPENDs satisfied as soon as possible."""
6427 def gather_deps(ignore_priority,
6428 mergeable_nodes, selected_nodes, node):
6429 if node in selected_nodes:
6431 if node not in mergeable_nodes:
6433 if node == replacement_portage and \
6434 mygraph.child_nodes(node,
6435 ignore_priority=DepPriority.MEDIUM_SOFT):
6436 # Make sure that portage always has all of it's
6437 # RDEPENDs installed first.
6439 selected_nodes.add(node)
6440 for child in mygraph.child_nodes(node,
6441 ignore_priority=ignore_priority):
6442 if not gather_deps(ignore_priority,
6443 mergeable_nodes, selected_nodes, child):
6446 mergeable_nodes = set(nodes)
6447 if prefer_asap and asap_nodes:
6449 for ignore_priority in xrange(DepPriority.SOFT,
6450 DepPriority.MEDIUM_SOFT + 1):
6452 if nodes is not asap_nodes and \
6453 not accept_root_node and \
6454 not mygraph.parent_nodes(node):
6456 selected_nodes = set()
6457 if gather_deps(ignore_priority,
6458 mergeable_nodes, selected_nodes, node):
6461 selected_nodes = None
6465 # If any nodes have been selected here, it's always
6466 # possible that anything up to a MEDIUM_SOFT priority
6467 # relationship has been ignored. This state is recorded
6468 # in ignore_priority so that relevant nodes will be
6469 # added to asap_nodes when appropriate.
6471 ignore_priority = DepPriority.MEDIUM_SOFT
6473 if prefer_asap and asap_nodes and not selected_nodes:
6474 # We failed to find any asap nodes to merge, so ignore
6475 # them for the next iteration.
6479 if not selected_nodes and not accept_root_node:
6480 # Maybe there are only root nodes left, so accept them
6481 # for the next iteration.
6482 accept_root_node = True
6485 if selected_nodes and ignore_priority > DepPriority.SOFT:
6486 # Try to merge ignored medium deps as soon as possible.
6487 for node in selected_nodes:
6488 children = set(mygraph.child_nodes(node))
6489 soft = children.difference(
6490 mygraph.child_nodes(node,
6491 ignore_priority=DepPriority.SOFT))
6492 medium_soft = children.difference(
6493 mygraph.child_nodes(node,
6494 ignore_priority=DepPriority.MEDIUM_SOFT))
6495 medium_soft.difference_update(soft)
6496 for child in medium_soft:
6497 if child in selected_nodes:
6499 if child in asap_nodes:
6501 asap_nodes.append(child)
6503 if selected_nodes and len(selected_nodes) > 1:
6504 if not isinstance(selected_nodes, list):
6505 selected_nodes = list(selected_nodes)
6506 selected_nodes.sort(cmp_circular_bias)
6508 if not selected_nodes and not myblocker_uninstalls.is_empty():
6509 # An Uninstall task needs to be executed in order to
6510 # avoid conflict if possible.
6511 min_parent_deps = None
6513 for task in myblocker_uninstalls.leaf_nodes():
6514 # Do some sanity checks so that system or world packages
6515 # don't get uninstalled inappropriately here (only really
6516 # necessary when --complete-graph has not been enabled).
6518 if task in ignored_uninstall_tasks:
6521 if task in scheduled_uninstalls:
6522 # It's been scheduled but it hasn't
6523 # been executed yet due to dependence
6524 # on installation of blocking packages.
6527 root_config = self.roots[task.root]
6528 inst_pkg = self._pkg_cache[
6529 ("installed", task.root, task.cpv, "nomerge")]
6531 if self.digraph.contains(inst_pkg):
6534 forbid_overlap = False
6535 heuristic_overlap = False
6536 for blocker in myblocker_uninstalls.parent_nodes(task):
6537 if blocker.eapi in ("0", "1"):
6538 heuristic_overlap = True
6539 elif blocker.atom.blocker.overlap.forbid:
6540 forbid_overlap = True
6542 if forbid_overlap and running_root == task.root:
6545 if heuristic_overlap and running_root == task.root:
6546 # Never uninstall sys-apps/portage or it's essential
6547 # dependencies, except through replacement.
6549 runtime_dep_atoms = \
6550 list(runtime_deps.iterAtomsForPackage(task))
6551 except portage.exception.InvalidDependString, e:
6552 portage.writemsg("!!! Invalid PROVIDE in " + \
6553 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6554 (task.root, task.cpv, e), noiselevel=-1)
6558 # Don't uninstall a runtime dep if it appears
6559 # to be the only suitable one installed.
6561 vardb = root_config.trees["vartree"].dbapi
6562 for atom in runtime_dep_atoms:
6563 other_version = None
6564 for pkg in vardb.match_pkgs(atom):
6565 if pkg.cpv == task.cpv and \
6566 pkg.metadata["COUNTER"] == \
6567 task.metadata["COUNTER"]:
6571 if other_version is None:
6577 # For packages in the system set, don't take
6578 # any chances. If the conflict can't be resolved
6579 # by a normal replacement operation then abort.
6582 for atom in root_config.sets[
6583 "system"].iterAtomsForPackage(task):
6586 except portage.exception.InvalidDependString, e:
6587 portage.writemsg("!!! Invalid PROVIDE in " + \
6588 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6589 (task.root, task.cpv, e), noiselevel=-1)
6595 # Note that the world check isn't always
6596 # necessary since self._complete_graph() will
6597 # add all packages from the system and world sets to the
6598 # graph. This just allows unresolved conflicts to be
6599 # detected as early as possible, which makes it possible
6600 # to avoid calling self._complete_graph() when it is
6601 # unnecessary due to blockers triggering an abortion.
6603 # For packages in the world set, go ahead an uninstall
6604 # when necessary, as long as the atom will be satisfied
6605 # in the final state.
6606 graph_db = self.mydbapi[task.root]
6609 for atom in root_config.sets[
6610 "world"].iterAtomsForPackage(task):
6612 for pkg in graph_db.match_pkgs(atom):
6620 except portage.exception.InvalidDependString, e:
6621 portage.writemsg("!!! Invalid PROVIDE in " + \
6622 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6623 (task.root, task.cpv, e), noiselevel=-1)
6629 # Check the deps of parent nodes to ensure that
6630 # the chosen task produces a leaf node. Maybe
6631 # this can be optimized some more to make the
6632 # best possible choice, but the current algorithm
6633 # is simple and should be near optimal for most
6636 for parent in mygraph.parent_nodes(task):
6637 parent_deps.update(mygraph.child_nodes(parent,
6638 ignore_priority=DepPriority.MEDIUM_SOFT))
6639 parent_deps.remove(task)
6640 if min_parent_deps is None or \
6641 len(parent_deps) < min_parent_deps:
6642 min_parent_deps = len(parent_deps)
6645 if uninst_task is not None:
6646 # The uninstall is performed only after blocking
6647 # packages have been merged on top of it. File
6648 # collisions between blocking packages are detected
6649 # and removed from the list of files to be uninstalled.
6650 scheduled_uninstalls.add(uninst_task)
6651 parent_nodes = mygraph.parent_nodes(uninst_task)
6653 # Reverse the parent -> uninstall edges since we want
6654 # to do the uninstall after blocking packages have
6655 # been merged on top of it.
6656 mygraph.remove(uninst_task)
6657 for blocked_pkg in parent_nodes:
6658 mygraph.add(blocked_pkg, uninst_task,
6659 priority=BlockerDepPriority.instance)
6660 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6661 scheduler_graph.add(blocked_pkg, uninst_task,
6662 priority=BlockerDepPriority.instance)
6665 # None of the Uninstall tasks are acceptable, so
6666 # the corresponding blockers are unresolvable.
6667 # We need to drop an Uninstall task here in order
6668 # to avoid the circular deps code path, but the
6669 # blocker will still be counted as an unresolved
6671 for node in myblocker_uninstalls.leaf_nodes():
6673 mygraph.remove(node)
6677 ignored_uninstall_tasks.add(node)
6680 # After dropping an Uninstall task, reset
6681 # the state variables for leaf node selection and
6682 # continue trying to select leaf nodes.
6684 accept_root_node = False
6687 if not selected_nodes:
6688 self._circular_deps_for_display = mygraph
6689 raise self._unknown_internal_error()
6691 # At this point, we've succeeded in selecting one or more nodes, so
6692 # it's now safe to reset the prefer_asap and accept_root_node flags
6693 # to their default states.
6695 accept_root_node = False
6697 mygraph.difference_update(selected_nodes)
6699 for node in selected_nodes:
6700 if isinstance(node, Package) and \
6701 node.operation == "nomerge":
6704 # Handle interactions between blockers
6705 # and uninstallation tasks.
6706 solved_blockers = set()
6708 if isinstance(node, Package) and \
6709 "uninstall" == node.operation:
6710 have_uninstall_task = True
6713 vardb = self.trees[node.root]["vartree"].dbapi
6714 previous_cpv = vardb.match(node.slot_atom)
6716 # The package will be replaced by this one, so remove
6717 # the corresponding Uninstall task if necessary.
6718 previous_cpv = previous_cpv[0]
6720 ("installed", node.root, previous_cpv, "uninstall")
6722 mygraph.remove(uninst_task)
6726 if uninst_task is not None and \
6727 uninst_task not in ignored_uninstall_tasks and \
6728 myblocker_uninstalls.contains(uninst_task):
6729 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6730 myblocker_uninstalls.remove(uninst_task)
6731 # Discard any blockers that this Uninstall solves.
6732 for blocker in blocker_nodes:
6733 if not myblocker_uninstalls.child_nodes(blocker):
6734 myblocker_uninstalls.remove(blocker)
6735 solved_blockers.add(blocker)
6737 retlist.append(node)
6739 if (isinstance(node, Package) and \
6740 "uninstall" == node.operation) or \
6741 (uninst_task is not None and \
6742 uninst_task in scheduled_uninstalls):
6743 # Include satisfied blockers in the merge list
6744 # since the user might be interested and also
6745 # it serves as an indicator that blocking packages
6746 # will be temporarily installed simultaneously.
6747 for blocker in solved_blockers:
6748 retlist.append(Blocker(atom=blocker.atom,
6749 root=blocker.root, eapi=blocker.eapi,
6752 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
6753 for node in myblocker_uninstalls.root_nodes():
6754 unsolvable_blockers.add(node)
6756 for blocker in unsolvable_blockers:
6757 retlist.append(blocker)
6759 # If any Uninstall tasks need to be executed in order
6760 # to avoid a conflict, complete the graph with any
6761 # dependencies that may have been initially
6762 # neglected (to ensure that unsafe Uninstall tasks
6763 # are properly identified and blocked from execution).
6764 if have_uninstall_task and \
6766 not unsolvable_blockers:
6767 self.myparams.add("complete")
6768 raise self._serialize_tasks_retry("")
6770 if unsolvable_blockers and \
6771 not self._accept_blocker_conflicts():
6772 self._unsatisfied_blockers_for_display = unsolvable_blockers
6773 self._serialized_tasks_cache = retlist[:]
6774 self._scheduler_graph = scheduler_graph
6775 raise self._unknown_internal_error()
6777 if self._slot_collision_info and \
6778 not self._accept_blocker_conflicts():
6779 self._serialized_tasks_cache = retlist[:]
6780 self._scheduler_graph = scheduler_graph
6781 raise self._unknown_internal_error()
6783 return retlist, scheduler_graph
6785 def _show_circular_deps(self, mygraph):
6786 # No leaf nodes are available, so we have a circular
6787 # dependency panic situation. Reduce the noise level to a
6788 # minimum via repeated elimination of root nodes since they
6789 # have no parents and thus can not be part of a cycle.
6791 root_nodes = mygraph.root_nodes(
6792 ignore_priority=DepPriority.MEDIUM_SOFT)
6795 mygraph.difference_update(root_nodes)
6796 # Display the USE flags that are enabled on nodes that are part
6797 # of dependency cycles in case that helps the user decide to
6798 # disable some of them.
6800 tempgraph = mygraph.copy()
6801 while not tempgraph.empty():
6802 nodes = tempgraph.leaf_nodes()
6804 node = tempgraph.order[0]
6807 display_order.append(node)
6808 tempgraph.remove(node)
6809 display_order.reverse()
6810 self.myopts.pop("--quiet", None)
6811 self.myopts.pop("--verbose", None)
6812 self.myopts["--tree"] = True
6813 portage.writemsg("\n\n", noiselevel=-1)
6814 self.display(display_order)
6815 prefix = colorize("BAD", " * ")
6816 portage.writemsg("\n", noiselevel=-1)
6817 portage.writemsg(prefix + "Error: circular dependencies:\n",
6819 portage.writemsg("\n", noiselevel=-1)
6820 mygraph.debug_print()
6821 portage.writemsg("\n", noiselevel=-1)
6822 portage.writemsg(prefix + "Note that circular dependencies " + \
6823 "can often be avoided by temporarily\n", noiselevel=-1)
6824 portage.writemsg(prefix + "disabling USE flags that trigger " + \
6825 "optional dependencies.\n", noiselevel=-1)
6827 def _show_merge_list(self):
6828 if self._serialized_tasks_cache is not None and \
6829 not (self._displayed_list and \
6830 (self._displayed_list == self._serialized_tasks_cache or \
6831 self._displayed_list == \
6832 list(reversed(self._serialized_tasks_cache)))):
6833 display_list = self._serialized_tasks_cache[:]
6834 if "--tree" in self.myopts:
6835 display_list.reverse()
6836 self.display(display_list)
6838 def _show_unsatisfied_blockers(self, blockers):
6839 self._show_merge_list()
6840 msg = "Error: The above package list contains " + \
6841 "packages which cannot be installed " + \
6842 "at the same time on the same system."
6843 prefix = colorize("BAD", " * ")
6844 from textwrap import wrap
6845 portage.writemsg("\n", noiselevel=-1)
6846 for line in wrap(msg, 70):
6847 portage.writemsg(prefix + line + "\n", noiselevel=-1)
6848 if "--quiet" not in self.myopts:
6849 show_blocker_docs_link()
6851 def display(self, mylist, favorites=[], verbosity=None):
6853 # This is used to prevent display_problems() from
6854 # redundantly displaying this exact same merge list
6855 # again via _show_merge_list().
6856 self._displayed_list = mylist
6858 if verbosity is None:
6859 verbosity = ("--quiet" in self.myopts and 1 or \
6860 "--verbose" in self.myopts and 3 or 2)
6861 favorites_set = InternalPackageSet(favorites)
6862 oneshot = "--oneshot" in self.myopts or \
6863 "--onlydeps" in self.myopts
6864 columns = "--columns" in self.myopts
6869 counters = PackageCounters()
6871 if verbosity == 1 and "--verbose" not in self.myopts:
6872 def create_use_string(*args):
6875 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
6877 is_new, reinst_flags,
6878 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
6879 alphabetical=("--alphabetical" in self.myopts)):
6887 cur_iuse = set(cur_iuse)
6888 enabled_flags = cur_iuse.intersection(cur_use)
6889 removed_iuse = set(old_iuse).difference(cur_iuse)
6890 any_iuse = cur_iuse.union(old_iuse)
6891 any_iuse = list(any_iuse)
6893 for flag in any_iuse:
6896 reinst_flag = reinst_flags and flag in reinst_flags
6897 if flag in enabled_flags:
6899 if is_new or flag in old_use and \
6900 (all_flags or reinst_flag):
6901 flag_str = red(flag)
6902 elif flag not in old_iuse:
6903 flag_str = yellow(flag) + "%*"
6904 elif flag not in old_use:
6905 flag_str = green(flag) + "*"
6906 elif flag in removed_iuse:
6907 if all_flags or reinst_flag:
6908 flag_str = yellow("-" + flag) + "%"
6911 flag_str = "(" + flag_str + ")"
6912 removed.append(flag_str)
6915 if is_new or flag in old_iuse and \
6916 flag not in old_use and \
6917 (all_flags or reinst_flag):
6918 flag_str = blue("-" + flag)
6919 elif flag not in old_iuse:
6920 flag_str = yellow("-" + flag)
6921 if flag not in iuse_forced:
6923 elif flag in old_use:
6924 flag_str = green("-" + flag) + "*"
6926 if flag in iuse_forced:
6927 flag_str = "(" + flag_str + ")"
6929 enabled.append(flag_str)
6931 disabled.append(flag_str)
6934 ret = " ".join(enabled)
6936 ret = " ".join(enabled + disabled + removed)
6938 ret = '%s="%s" ' % (name, ret)
6941 repo_display = RepoDisplay(self.roots)
6945 mygraph = self.digraph.copy()
6947 # If there are any Uninstall instances, add the corresponding
6948 # blockers to the digraph (useful for --tree display).
6950 executed_uninstalls = set(node for node in mylist \
6951 if isinstance(node, Package) and node.operation == "unmerge")
6953 for uninstall in self._blocker_uninstalls.leaf_nodes():
6954 uninstall_parents = \
6955 self._blocker_uninstalls.parent_nodes(uninstall)
6956 if not uninstall_parents:
6959 # Remove the corresponding "nomerge" node and substitute
6960 # the Uninstall node.
6961 inst_pkg = self._pkg_cache[
6962 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
6964 mygraph.remove(inst_pkg)
6969 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
6971 inst_pkg_blockers = []
6973 # Break the Package -> Uninstall edges.
6974 mygraph.remove(uninstall)
6976 # Resolution of a package's blockers
6977 # depend on it's own uninstallation.
6978 for blocker in inst_pkg_blockers:
6979 mygraph.add(uninstall, blocker)
6981 # Expand Package -> Uninstall edges into
6982 # Package -> Blocker -> Uninstall edges.
6983 for blocker in uninstall_parents:
6984 mygraph.add(uninstall, blocker)
6985 for parent in self._blocker_parents.parent_nodes(blocker):
6986 if parent != inst_pkg:
6987 mygraph.add(blocker, parent)
6989 # If the uninstall task did not need to be executed because
6990 # of an upgrade, display Blocker -> Upgrade edges since the
6991 # corresponding Blocker -> Uninstall edges will not be shown.
6993 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
6994 if upgrade_node is not None and \
6995 uninstall not in executed_uninstalls:
6996 for blocker in uninstall_parents:
6997 mygraph.add(upgrade_node, blocker)
6999 unsatisfied_blockers = []
7004 if isinstance(x, Blocker) and not x.satisfied:
7005 unsatisfied_blockers.append(x)
7008 if "--tree" in self.myopts:
7009 depth = len(tree_nodes)
7010 while depth and graph_key not in \
7011 mygraph.child_nodes(tree_nodes[depth-1]):
7014 tree_nodes = tree_nodes[:depth]
7015 tree_nodes.append(graph_key)
7016 display_list.append((x, depth, True))
7017 shown_edges.add((graph_key, tree_nodes[depth-1]))
7019 traversed_nodes = set() # prevent endless circles
7020 traversed_nodes.add(graph_key)
7021 def add_parents(current_node, ordered):
7023 # Do not traverse to parents if this node is an
7024 # an argument or a direct member of a set that has
7025 # been specified as an argument (system or world).
7026 if current_node not in self._set_nodes:
7027 parent_nodes = mygraph.parent_nodes(current_node)
7029 child_nodes = set(mygraph.child_nodes(current_node))
7030 selected_parent = None
7031 # First, try to avoid a direct cycle.
7032 for node in parent_nodes:
7033 if not isinstance(node, (Blocker, Package)):
7035 if node not in traversed_nodes and \
7036 node not in child_nodes:
7037 edge = (current_node, node)
7038 if edge in shown_edges:
7040 selected_parent = node
7042 if not selected_parent:
7043 # A direct cycle is unavoidable.
7044 for node in parent_nodes:
7045 if not isinstance(node, (Blocker, Package)):
7047 if node not in traversed_nodes:
7048 edge = (current_node, node)
7049 if edge in shown_edges:
7051 selected_parent = node
7054 shown_edges.add((current_node, selected_parent))
7055 traversed_nodes.add(selected_parent)
7056 add_parents(selected_parent, False)
7057 display_list.append((current_node,
7058 len(tree_nodes), ordered))
7059 tree_nodes.append(current_node)
7061 add_parents(graph_key, True)
7063 display_list.append((x, depth, True))
7064 mylist = display_list
7065 for x in unsatisfied_blockers:
7066 mylist.append((x, 0, True))
7068 last_merge_depth = 0
7069 for i in xrange(len(mylist)-1,-1,-1):
7070 graph_key, depth, ordered = mylist[i]
7071 if not ordered and depth == 0 and i > 0 \
7072 and graph_key == mylist[i-1][0] and \
7073 mylist[i-1][1] == 0:
7074 # An ordered node got a consecutive duplicate when the tree was
7078 if ordered and graph_key[-1] != "nomerge":
7079 last_merge_depth = depth
7081 if depth >= last_merge_depth or \
7082 i < len(mylist) - 1 and \
7083 depth >= mylist[i+1][1]:
7086 from portage import flatten
7087 from portage.dep import use_reduce, paren_reduce
7088 # files to fetch list - avoids counting a same file twice
7089 # in size display (verbose mode)
7092 # Use this set to detect when all the "repoadd" strings are "[0]"
7093 # and disable the entire repo display in this case.
7096 for mylist_index in xrange(len(mylist)):
7097 x, depth, ordered = mylist[mylist_index]
7101 portdb = self.trees[myroot]["porttree"].dbapi
7102 bindb = self.trees[myroot]["bintree"].dbapi
7103 vardb = self.trees[myroot]["vartree"].dbapi
7104 vartree = self.trees[myroot]["vartree"]
7105 pkgsettings = self.pkgsettings[myroot]
7108 indent = " " * depth
7110 if isinstance(x, Blocker):
7112 blocker_style = "PKG_BLOCKER_SATISFIED"
7113 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7115 blocker_style = "PKG_BLOCKER"
7116 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7118 counters.blocks += 1
7120 counters.blocks_satisfied += 1
7121 resolved = portage.key_expand(
7122 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7123 if "--columns" in self.myopts and "--quiet" in self.myopts:
7124 addl += " " + colorize(blocker_style, resolved)
7126 addl = "[%s %s] %s%s" % \
7127 (colorize(blocker_style, "blocks"),
7128 addl, indent, colorize(blocker_style, resolved))
7129 block_parents = self._blocker_parents.parent_nodes(x)
7130 block_parents = set([pnode[2] for pnode in block_parents])
7131 block_parents = ", ".join(block_parents)
7133 addl += colorize(blocker_style,
7134 " (\"%s\" is blocking %s)") % \
7135 (str(x.atom).lstrip("!"), block_parents)
7137 addl += colorize(blocker_style,
7138 " (is blocking %s)") % block_parents
7139 if isinstance(x, Blocker) and x.satisfied:
7144 blockers.append(addl)
7147 pkg_merge = ordered and pkg_status == "merge"
7148 if not pkg_merge and pkg_status == "merge":
7149 pkg_status = "nomerge"
7150 built = pkg_type != "ebuild"
7151 installed = pkg_type == "installed"
7153 metadata = pkg.metadata
7155 repo_name = metadata["repository"]
7156 if pkg_type == "ebuild":
7157 ebuild_path = portdb.findname(pkg_key)
7158 if not ebuild_path: # shouldn't happen
7159 raise portage.exception.PackageNotFound(pkg_key)
7160 repo_path_real = os.path.dirname(os.path.dirname(
7161 os.path.dirname(ebuild_path)))
7163 repo_path_real = portdb.getRepositoryPath(repo_name)
7164 pkg_use = list(pkg.use.enabled)
7166 restrict = flatten(use_reduce(paren_reduce(
7167 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7168 except portage.exception.InvalidDependString, e:
7169 if not pkg.installed:
7170 show_invalid_depstring_notice(x,
7171 pkg.metadata["RESTRICT"], str(e))
7175 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7176 "fetch" in restrict:
7179 counters.restrict_fetch += 1
7180 if portdb.fetch_check(pkg_key, pkg_use):
7183 counters.restrict_fetch_satisfied += 1
7185 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7186 #param is used for -u, where you still *do* want to see when something is being upgraded.
7189 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7190 if vardb.cpv_exists(pkg_key):
7191 addl=" "+yellow("R")+fetch+" "
7194 counters.reinst += 1
7195 elif pkg_status == "uninstall":
7196 counters.uninst += 1
7197 # filter out old-style virtual matches
7198 elif installed_versions and \
7199 portage.cpv_getkey(installed_versions[0]) == \
7200 portage.cpv_getkey(pkg_key):
7201 myinslotlist = vardb.match(pkg.slot_atom)
7202 # If this is the first install of a new-style virtual, we
7203 # need to filter out old-style virtual matches.
7204 if myinslotlist and \
7205 portage.cpv_getkey(myinslotlist[0]) != \
7206 portage.cpv_getkey(pkg_key):
7209 myoldbest = myinslotlist[:]
7211 if not portage.dep.cpvequal(pkg_key,
7212 portage.best([pkg_key] + myoldbest)):
7214 addl += turquoise("U")+blue("D")
7216 counters.downgrades += 1
7219 addl += turquoise("U") + " "
7221 counters.upgrades += 1
7223 # New slot, mark it new.
7224 addl = " " + green("NS") + fetch + " "
7225 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7227 counters.newslot += 1
7229 if "--changelog" in self.myopts:
7230 inst_matches = vardb.match(pkg.slot_atom)
7232 changelogs.extend(self.calc_changelog(
7233 portdb.findname(pkg_key),
7234 inst_matches[0], pkg_key))
7236 addl = " " + green("N") + " " + fetch + " "
7245 forced_flags = set()
7246 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7247 forced_flags.update(pkgsettings.useforce)
7248 forced_flags.update(pkgsettings.usemask)
7250 cur_use = [flag for flag in pkg.use.enabled \
7251 if flag in pkg.iuse.all]
7252 cur_iuse = sorted(pkg.iuse.all)
7254 if myoldbest and myinslotlist:
7255 previous_cpv = myoldbest[0]
7257 previous_cpv = pkg.cpv
7258 if vardb.cpv_exists(previous_cpv):
7259 old_iuse, old_use = vardb.aux_get(
7260 previous_cpv, ["IUSE", "USE"])
7261 old_iuse = list(set(
7262 filter_iuse_defaults(old_iuse.split())))
7264 old_use = old_use.split()
7271 old_use = [flag for flag in old_use if flag in old_iuse]
7273 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7275 use_expand.reverse()
7276 use_expand_hidden = \
7277 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7279 def map_to_use_expand(myvals, forcedFlags=False,
7283 for exp in use_expand:
7286 for val in myvals[:]:
7287 if val.startswith(exp.lower()+"_"):
7288 if val in forced_flags:
7289 forced[exp].add(val[len(exp)+1:])
7290 ret[exp].append(val[len(exp)+1:])
7293 forced["USE"] = [val for val in myvals \
7294 if val in forced_flags]
7296 for exp in use_expand_hidden:
7302 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7303 # are the only thing that triggered reinstallation.
7304 reinst_flags_map = {}
7305 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7306 reinst_expand_map = None
7307 if reinstall_for_flags:
7308 reinst_flags_map = map_to_use_expand(
7309 list(reinstall_for_flags), removeHidden=False)
7310 for k in list(reinst_flags_map):
7311 if not reinst_flags_map[k]:
7312 del reinst_flags_map[k]
7313 if not reinst_flags_map.get("USE"):
7314 reinst_expand_map = reinst_flags_map.copy()
7315 reinst_expand_map.pop("USE", None)
7316 if reinst_expand_map and \
7317 not set(reinst_expand_map).difference(
7319 use_expand_hidden = \
7320 set(use_expand_hidden).difference(
7323 cur_iuse_map, iuse_forced = \
7324 map_to_use_expand(cur_iuse, forcedFlags=True)
7325 cur_use_map = map_to_use_expand(cur_use)
7326 old_iuse_map = map_to_use_expand(old_iuse)
7327 old_use_map = map_to_use_expand(old_use)
7330 use_expand.insert(0, "USE")
7332 for key in use_expand:
7333 if key in use_expand_hidden:
7335 verboseadd += create_use_string(key.upper(),
7336 cur_iuse_map[key], iuse_forced[key],
7337 cur_use_map[key], old_iuse_map[key],
7338 old_use_map[key], is_new,
7339 reinst_flags_map.get(key))
7344 if pkg_type == "ebuild" and pkg_merge:
7346 myfilesdict = portdb.getfetchsizes(pkg_key,
7347 useflags=pkg_use, debug=self.edebug)
7348 except portage.exception.InvalidDependString, e:
7349 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7350 show_invalid_depstring_notice(x, src_uri, str(e))
7353 if myfilesdict is None:
7354 myfilesdict="[empty/missing/bad digest]"
7356 for myfetchfile in myfilesdict:
7357 if myfetchfile not in myfetchlist:
7358 mysize+=myfilesdict[myfetchfile]
7359 myfetchlist.append(myfetchfile)
7361 counters.totalsize += mysize
7362 verboseadd += format_size(mysize)
7365 # assign index for a previous version in the same slot
7366 has_previous = False
7367 repo_name_prev = None
7368 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7370 slot_matches = vardb.match(slot_atom)
7373 repo_name_prev = vardb.aux_get(slot_matches[0],
7376 # now use the data to generate output
7377 if pkg.installed or not has_previous:
7378 repoadd = repo_display.repoStr(repo_path_real)
7380 repo_path_prev = None
7382 repo_path_prev = portdb.getRepositoryPath(
7384 if repo_path_prev == repo_path_real:
7385 repoadd = repo_display.repoStr(repo_path_real)
7387 repoadd = "%s=>%s" % (
7388 repo_display.repoStr(repo_path_prev),
7389 repo_display.repoStr(repo_path_real))
7391 repoadd_set.add(repoadd)
7393 xs = [portage.cpv_getkey(pkg_key)] + \
7394 list(portage.catpkgsplit(pkg_key)[2:])
7401 if "COLUMNWIDTH" in self.settings:
7403 mywidth = int(self.settings["COLUMNWIDTH"])
7404 except ValueError, e:
7405 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7407 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7408 self.settings["COLUMNWIDTH"], noiselevel=-1)
7410 oldlp = mywidth - 30
7413 # Convert myoldbest from a list to a string.
7417 for pos, key in enumerate(myoldbest):
7418 key = portage.catpkgsplit(key)[2] + \
7419 "-" + portage.catpkgsplit(key)[3]
7420 if key[-3:] == "-r0":
7422 myoldbest[pos] = key
7423 myoldbest = blue("["+", ".join(myoldbest)+"]")
7426 root_config = self.roots[myroot]
7427 system_set = root_config.sets["system"]
7428 world_set = root_config.sets["world"]
7433 pkg_system = system_set.findAtomForPackage(pkg)
7434 pkg_world = world_set.findAtomForPackage(pkg)
7435 if not (oneshot or pkg_world) and \
7436 myroot == self.target_root and \
7437 favorites_set.findAtomForPackage(pkg):
7438 # Maybe it will be added to world now.
7439 if create_world_atom(pkg, favorites_set, root_config):
7441 except portage.exception.InvalidDependString:
7442 # This is reported elsewhere if relevant.
7445 def pkgprint(pkg_str):
7448 return colorize("PKG_MERGE_SYSTEM", pkg_str)
7450 return colorize("PKG_MERGE_WORLD", pkg_str)
7452 return colorize("PKG_MERGE", pkg_str)
7453 elif pkg_status == "uninstall":
7454 return colorize("PKG_UNINSTALL", pkg_str)
7457 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7459 return colorize("PKG_NOMERGE_WORLD", pkg_str)
7461 return colorize("PKG_NOMERGE", pkg_str)
7464 properties = flatten(use_reduce(paren_reduce(
7465 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7466 except portage.exception.InvalidDependString, e:
7467 if not pkg.installed:
7468 show_invalid_depstring_notice(pkg,
7469 pkg.metadata["PROPERTIES"], str(e))
7473 interactive = "interactive" in properties
7474 if interactive and pkg.operation == "merge":
7475 addl = colorize("WARN", "I") + addl[1:]
7477 counters.interactive += 1
7482 if "--columns" in self.myopts:
7483 if "--quiet" in self.myopts:
7484 myprint=addl+" "+indent+pkgprint(pkg_cp)
7485 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7486 myprint=myprint+myoldbest
7487 myprint=myprint+darkgreen("to "+x[1])
7491 myprint = "[%s] %s%s" % \
7492 (pkgprint(pkg_status.ljust(13)),
7493 indent, pkgprint(pkg.cp))
7495 myprint = "[%s %s] %s%s" % \
7496 (pkgprint(pkg.type_name), addl,
7497 indent, pkgprint(pkg.cp))
7498 if (newlp-nc_len(myprint)) > 0:
7499 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7500 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7501 if (oldlp-nc_len(myprint)) > 0:
7502 myprint=myprint+" "*(oldlp-nc_len(myprint))
7503 myprint=myprint+myoldbest
7504 myprint += darkgreen("to " + pkg.root)
7507 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7509 myprint = "[" + pkg_type + " " + addl + "] "
7510 myprint += indent + pkgprint(pkg_key) + " " + \
7511 myoldbest + darkgreen("to " + myroot)
7513 if "--columns" in self.myopts:
7514 if "--quiet" in self.myopts:
7515 myprint=addl+" "+indent+pkgprint(pkg_cp)
7516 myprint=myprint+" "+green(xs[1]+xs[2])+" "
7517 myprint=myprint+myoldbest
7521 myprint = "[%s] %s%s" % \
7522 (pkgprint(pkg_status.ljust(13)),
7523 indent, pkgprint(pkg.cp))
7525 myprint = "[%s %s] %s%s" % \
7526 (pkgprint(pkg.type_name), addl,
7527 indent, pkgprint(pkg.cp))
7528 if (newlp-nc_len(myprint)) > 0:
7529 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7530 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7531 if (oldlp-nc_len(myprint)) > 0:
7532 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7533 myprint += myoldbest
7536 myprint = "[%s] %s%s %s" % \
7537 (pkgprint(pkg_status.ljust(13)),
7538 indent, pkgprint(pkg.cpv),
7541 myprint = "[%s %s] %s%s %s" % \
7542 (pkgprint(pkg_type), addl, indent,
7543 pkgprint(pkg.cpv), myoldbest)
7545 if columns and pkg.operation == "uninstall":
7547 p.append((myprint, verboseadd, repoadd))
7549 if "--tree" not in self.myopts and \
7550 "--quiet" not in self.myopts and \
7551 not self._opts_no_restart.intersection(self.myopts) and \
7552 pkg.root == self._running_root.root and \
7553 portage.match_from_list(
7554 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7555 not vardb.cpv_exists(pkg.cpv) and \
7556 "--quiet" not in self.myopts:
7557 if mylist_index < len(mylist) - 1:
7558 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7559 p.append(colorize("WARN", " then resume the merge."))
7562 show_repos = repoadd_set and repoadd_set != set(["0"])
7565 if isinstance(x, basestring):
7566 out.write("%s\n" % (x,))
7569 myprint, verboseadd, repoadd = x
7572 myprint += " " + verboseadd
7574 if show_repos and repoadd:
7575 myprint += " " + teal("[%s]" % repoadd)
7577 out.write("%s\n" % (myprint,))
7586 sys.stdout.write(str(repo_display))
7588 if "--changelog" in self.myopts:
7590 for revision,text in changelogs:
7591 print bold('*'+revision)
7592 sys.stdout.write(text)
7597 def display_problems(self):
7599 Display problems with the dependency graph such as slot collisions.
7600 This is called internally by display() to show the problems _after_
7601 the merge list where it is most likely to be seen, but if display()
7602 is not going to be called then this method should be called explicitly
7603 to ensure that the user is notified of problems with the graph.
7605 All output goes to stderr, except for unsatisfied dependencies which
7606 go to stdout for parsing by programs such as autounmask.
7609 # Note that show_masked_packages() sends it's output to
7610 # stdout, and some programs such as autounmask parse the
7611 # output in cases when emerge bails out. However, when
7612 # show_masked_packages() is called for installed packages
7613 # here, the message is a warning that is more appropriate
7614 # to send to stderr, so temporarily redirect stdout to
7615 # stderr. TODO: Fix output code so there's a cleaner way
7616 # to redirect everything to stderr.
7621 sys.stdout = sys.stderr
7622 self._display_problems()
7628 # This goes to stdout for parsing by programs like autounmask.
7629 for pargs, kwargs in self._unsatisfied_deps_for_display:
7630 self._show_unsatisfied_dep(*pargs, **kwargs)
7632 def _display_problems(self):
7633 if self._circular_deps_for_display is not None:
7634 self._show_circular_deps(
7635 self._circular_deps_for_display)
7637 # The user is only notified of a slot conflict if
7638 # there are no unresolvable blocker conflicts.
7639 if self._unsatisfied_blockers_for_display is not None:
7640 self._show_unsatisfied_blockers(
7641 self._unsatisfied_blockers_for_display)
7643 self._show_slot_collision_notice()
7645 # TODO: Add generic support for "set problem" handlers so that
7646 # the below warnings aren't special cases for world only.
7648 if self._missing_args:
7649 world_problems = False
7650 if "world" in self._sets:
7651 # Filter out indirect members of world (from nested sets)
7652 # since only direct members of world are desired here.
7653 world_set = self.roots[self.target_root].sets["world"]
7654 for arg, atom in self._missing_args:
7655 if arg.name == "world" and atom in world_set:
7656 world_problems = True
7660 sys.stderr.write("\n!!! Problems have been " + \
7661 "detected with your world file\n")
7662 sys.stderr.write("!!! Please run " + \
7663 green("emaint --check world")+"\n\n")
7665 if self._missing_args:
7666 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7667 " Ebuilds for the following packages are either all\n")
7668 sys.stderr.write(colorize("BAD", "!!!") + \
7669 " masked or don't exist:\n")
7670 sys.stderr.write(" ".join(str(atom) for arg, atom in \
7671 self._missing_args) + "\n")
7673 if self._pprovided_args:
7675 for arg, atom in self._pprovided_args:
7676 if isinstance(arg, SetArg):
7678 arg_atom = (atom, atom)
7681 arg_atom = (arg.arg, atom)
7682 refs = arg_refs.setdefault(arg_atom, [])
7683 if parent not in refs:
7686 msg.append(bad("\nWARNING: "))
7687 if len(self._pprovided_args) > 1:
7688 msg.append("Requested packages will not be " + \
7689 "merged because they are listed in\n")
7691 msg.append("A requested package will not be " + \
7692 "merged because it is listed in\n")
7693 msg.append("package.provided:\n\n")
7694 problems_sets = set()
7695 for (arg, atom), refs in arg_refs.iteritems():
7698 problems_sets.update(refs)
7700 ref_string = ", ".join(["'%s'" % name for name in refs])
7701 ref_string = " pulled in by " + ref_string
7702 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7704 if "world" in problems_sets:
7705 msg.append("This problem can be solved in one of the following ways:\n\n")
7706 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
7707 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
7708 msg.append(" C) Remove offending entries from package.provided.\n\n")
7709 msg.append("The best course of action depends on the reason that an offending\n")
7710 msg.append("package.provided entry exists.\n\n")
7711 sys.stderr.write("".join(msg))
7713 masked_packages = []
7714 for pkg in self._masked_installed:
7715 root_config = pkg.root_config
7716 pkgsettings = self.pkgsettings[pkg.root]
7717 mreasons = get_masking_status(pkg, pkgsettings, root_config)
7718 masked_packages.append((root_config, pkgsettings,
7719 pkg.cpv, pkg.metadata, mreasons))
7721 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7722 " The following installed packages are masked:\n")
7723 show_masked_packages(masked_packages)
7727 def calc_changelog(self,ebuildpath,current,next):
7728 if ebuildpath == None or not os.path.exists(ebuildpath):
7730 current = '-'.join(portage.catpkgsplit(current)[1:])
7731 if current.endswith('-r0'):
7732 current = current[:-3]
7733 next = '-'.join(portage.catpkgsplit(next)[1:])
7734 if next.endswith('-r0'):
7736 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
7738 changelog = open(changelogpath).read()
7739 except SystemExit, e:
7740 raise # Needed else can't exit
7743 divisions = self.find_changelog_tags(changelog)
7744 #print 'XX from',current,'to',next
7745 #for div,text in divisions: print 'XX',div
7746 # skip entries for all revisions above the one we are about to emerge
7747 for i in range(len(divisions)):
7748 if divisions[i][0]==next:
7749 divisions = divisions[i:]
7751 # find out how many entries we are going to display
7752 for i in range(len(divisions)):
7753 if divisions[i][0]==current:
7754 divisions = divisions[:i]
7757 # couldnt find the current revision in the list. display nothing
7761 def find_changelog_tags(self,changelog):
7765 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
7767 if release is not None:
7768 divs.append((release,changelog))
7770 if release is not None:
7771 divs.append((release,changelog[:match.start()]))
7772 changelog = changelog[match.end():]
7773 release = match.group(1)
7774 if release.endswith('.ebuild'):
7775 release = release[:-7]
7776 if release.endswith('-r0'):
7777 release = release[:-3]
7779 def saveNomergeFavorites(self):
7780 """Find atoms in favorites that are not in the mergelist and add them
7781 to the world file if necessary."""
7782 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7783 "--oneshot", "--onlydeps", "--pretend"):
7784 if x in self.myopts:
7786 root_config = self.roots[self.target_root]
7787 world_set = root_config.sets["world"]
7789 world_locked = False
7790 if hasattr(world_set, "lock"):
7794 if hasattr(world_set, "load"):
7795 world_set.load() # maybe it's changed on disk
7797 args_set = self._sets["args"]
7798 portdb = self.trees[self.target_root]["porttree"].dbapi
7799 added_favorites = set()
7800 for x in self._set_nodes:
7801 pkg_type, root, pkg_key, pkg_status = x
7802 if pkg_status != "nomerge":
7806 myfavkey = create_world_atom(x, args_set, root_config)
7808 if myfavkey in added_favorites:
7810 added_favorites.add(myfavkey)
7811 except portage.exception.InvalidDependString, e:
7812 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7813 (pkg_key, str(e)), noiselevel=-1)
7814 writemsg("!!! see '%s'\n\n" % os.path.join(
7815 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
7818 for k in self._sets:
7819 if k in ("args", "world") or not root_config.sets[k].world_candidate:
7824 all_added.append(SETPREFIX + k)
7825 all_added.extend(added_favorites)
7828 print ">>> Recording %s in \"world\" favorites file..." % \
7829 colorize("INFORM", str(a))
7831 world_set.update(all_added)
7836 def loadResumeCommand(self, resume_data, skip_masked=False):
7838 Add a resume command to the graph and validate it in the process. This
7839 will raise a PackageNotFound exception if a package is not available.
7842 if not isinstance(resume_data, dict):
7845 mergelist = resume_data.get("mergelist")
7846 if not isinstance(mergelist, list):
7849 fakedb = self.mydbapi
7851 serialized_tasks = []
7854 if not (isinstance(x, list) and len(x) == 4):
7856 pkg_type, myroot, pkg_key, action = x
7857 if pkg_type not in self.pkg_tree_map:
7859 if action != "merge":
7861 tree_type = self.pkg_tree_map[pkg_type]
7862 mydb = trees[myroot][tree_type].dbapi
7863 db_keys = list(self._trees_orig[myroot][
7864 tree_type].dbapi._aux_cache_keys)
7866 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
7868 # It does no exist or it is corrupt.
7869 if action == "uninstall":
7871 raise portage.exception.PackageNotFound(pkg_key)
7872 installed = action == "uninstall"
7873 built = pkg_type != "ebuild"
7874 root_config = self.roots[myroot]
7875 pkg = Package(built=built, cpv=pkg_key,
7876 installed=installed, metadata=metadata,
7877 operation=action, root_config=root_config,
7879 if pkg_type == "ebuild":
7880 pkgsettings = self.pkgsettings[myroot]
7881 pkgsettings.setcpv(pkg)
7882 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
7883 self._pkg_cache[pkg] = pkg
7885 root_config = self.roots[pkg.root]
7886 if "merge" == pkg.operation and \
7887 not visible(root_config.settings, pkg):
7889 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
7891 self._unsatisfied_deps_for_display.append(
7892 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
7894 fakedb[myroot].cpv_inject(pkg)
7895 serialized_tasks.append(pkg)
7896 self.spinner.update()
7898 if self._unsatisfied_deps_for_display:
7901 if not serialized_tasks or "--nodeps" in self.myopts:
7902 self._serialized_tasks_cache = serialized_tasks
7903 self._scheduler_graph = self.digraph
7905 self._select_package = self._select_pkg_from_graph
7906 self.myparams.add("selective")
7908 favorites = resume_data.get("favorites")
7909 args_set = self._sets["args"]
7910 if isinstance(favorites, list):
7911 args = self._load_favorites(favorites)
7915 for task in serialized_tasks:
7916 if isinstance(task, Package) and \
7917 task.operation == "merge":
7918 if not self._add_pkg(task, None):
7921 # Packages for argument atoms need to be explicitly
7922 # added via _add_pkg() so that they are included in the
7923 # digraph (needed at least for --tree display).
7925 for atom in arg.set:
7926 pkg, existing_node = self._select_package(
7927 arg.root_config.root, atom)
7928 if existing_node is None and \
7930 if not self._add_pkg(pkg, Dependency(atom=atom,
7931 root=pkg.root, parent=arg)):
7934 # Allow unsatisfied deps here to avoid showing a masking
7935 # message for an unsatisfied dep that isn't necessarily
7937 if not self._create_graph(allow_unsatisfied=True):
7939 if masked_tasks or self._unsatisfied_deps:
7940 # This probably means that a required package
7941 # was dropped via --skipfirst. It makes the
7942 # resume list invalid, so convert it to a
7943 # UnsatisfiedResumeDep exception.
7944 raise self.UnsatisfiedResumeDep(self,
7945 masked_tasks + self._unsatisfied_deps)
7946 self._serialized_tasks_cache = None
7949 except self._unknown_internal_error:
7954 def _load_favorites(self, favorites):
7956 Use a list of favorites to resume state from a
7957 previous select_files() call. This creates similar
7958 DependencyArg instances to those that would have
7959 been created by the original select_files() call.
7960 This allows Package instances to be matched with
7961 DependencyArg instances during graph creation.
7963 root_config = self.roots[self.target_root]
7964 getSetAtoms = root_config.setconfig.getSetAtoms
7965 sets = root_config.sets
7968 if not isinstance(x, basestring):
7970 if x in ("system", "world"):
7972 if x.startswith(SETPREFIX):
7973 s = x[len(SETPREFIX):]
7978 # Recursively expand sets so that containment tests in
7979 # self._get_parent_sets() properly match atoms in nested
7980 # sets (like if world contains system).
7981 expanded_set = InternalPackageSet(
7982 initial_atoms=getSetAtoms(s))
7983 self._sets[s] = expanded_set
7984 args.append(SetArg(arg=x, set=expanded_set,
7985 root_config=root_config))
7987 if not portage.isvalidatom(x):
7989 args.append(AtomArg(arg=x, atom=x,
7990 root_config=root_config))
7992 # Create the "args" package set from atoms and
7993 # packages given as arguments.
7994 args_set = self._sets["args"]
7996 if not isinstance(arg, (AtomArg, PackageArg)):
7999 if myatom in args_set:
8001 args_set.add(myatom)
8002 self._set_atoms.update(chain(*self._sets.itervalues()))
8003 atom_arg_map = self._atom_arg_map
8005 for atom in arg.set:
8006 atom_key = (atom, arg.root_config.root)
8007 refs = atom_arg_map.get(atom_key)
8010 atom_arg_map[atom_key] = refs
8015 class UnsatisfiedResumeDep(portage.exception.PortageException):
8017 A dependency of a resume list is not installed. This
8018 can occur when a required package is dropped from the
8019 merge list via --skipfirst.
8021 def __init__(self, depgraph, value):
8022 portage.exception.PortageException.__init__(self, value)
8023 self.depgraph = depgraph
8025 class _internal_exception(portage.exception.PortageException):
8026 def __init__(self, value=""):
8027 portage.exception.PortageException.__init__(self, value)
8029 class _unknown_internal_error(_internal_exception):
8031 Used by the depgraph internally to terminate graph creation.
8032 The specific reason for the failure should have been dumped
8033 to stderr, unfortunately, the exact reason for the failure
8037 class _serialize_tasks_retry(_internal_exception):
8039 This is raised by the _serialize_tasks() method when it needs to
8040 be called again for some reason. The only case that it's currently
8041 used for is when neglected dependencies need to be added to the
8042 graph in order to avoid making a potentially unsafe decision.
8045 class _dep_check_composite_db(portage.dbapi):
8047 A dbapi-like interface that is optimized for use in dep_check() calls.
8048 This is built on top of the existing depgraph package selection logic.
8049 Some packages that have been added to the graph may be masked from this
8050 view in order to influence the atom preference selection that occurs
8053 def __init__(self, depgraph, root):
8054 portage.dbapi.__init__(self)
8055 self._depgraph = depgraph
8057 self._match_cache = {}
8058 self._cpv_pkg_map = {}
8060 def match(self, atom):
8061 ret = self._match_cache.get(atom)
8066 atom = self._dep_expand(atom)
8067 pkg, existing = self._depgraph._select_package(self._root, atom)
8071 # Return the highest available from select_package() as well as
8072 # any matching slots in the graph db.
8074 slots.add(pkg.metadata["SLOT"])
8075 atom_cp = portage.dep_getkey(atom)
8076 if pkg.cp.startswith("virtual/"):
8077 # For new-style virtual lookahead that occurs inside
8078 # dep_check(), examine all slots. This is needed
8079 # so that newer slots will not unnecessarily be pulled in
8080 # when a satisfying lower slot is already installed. For
8081 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8082 # there's no need to pull in a newer slot to satisfy a
8083 # virtual/jdk dependency.
8084 for db, pkg_type, built, installed, db_keys in \
8085 self._depgraph._filtered_trees[self._root]["dbs"]:
8086 for cpv in db.match(atom):
8087 if portage.cpv_getkey(cpv) != pkg.cp:
8089 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8091 if self._visible(pkg):
8092 self._cpv_pkg_map[pkg.cpv] = pkg
8094 slots.remove(pkg.metadata["SLOT"])
8096 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8097 pkg, existing = self._depgraph._select_package(
8098 self._root, slot_atom)
8101 if not self._visible(pkg):
8103 self._cpv_pkg_map[pkg.cpv] = pkg
8106 self._cpv_sort_ascending(ret)
8107 self._match_cache[orig_atom] = ret
8110 def _visible(self, pkg):
8111 if pkg.installed and "selective" not in self._depgraph.myparams:
8113 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8114 except (StopIteration, portage.exception.InvalidDependString):
8121 self._depgraph.pkgsettings[pkg.root], pkg):
8123 except portage.exception.InvalidDependString:
8127 def _dep_expand(self, atom):
8129 This is only needed for old installed packages that may
8130 contain atoms that are not fully qualified with a specific
8131 category. Emulate the cpv_expand() function that's used by
8132 dbapi.match() in cases like this. If there are multiple
8133 matches, it's often due to a new-style virtual that has
8134 been added, so try to filter those out to avoid raising
8137 root_config = self._depgraph.roots[self._root]
8139 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8140 if len(expanded_atoms) > 1:
8141 non_virtual_atoms = []
8142 for x in expanded_atoms:
8143 if not portage.dep_getkey(x).startswith("virtual/"):
8144 non_virtual_atoms.append(x)
8145 if len(non_virtual_atoms) == 1:
8146 expanded_atoms = non_virtual_atoms
8147 if len(expanded_atoms) > 1:
8148 # compatible with portage.cpv_expand()
8149 raise portage.exception.AmbiguousPackageName(
8150 [portage.dep_getkey(x) for x in expanded_atoms])
8152 atom = expanded_atoms[0]
8154 null_atom = insert_category_into_atom(atom, "null")
8155 null_cp = portage.dep_getkey(null_atom)
8156 cat, atom_pn = portage.catsplit(null_cp)
8157 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8159 # Allow the resolver to choose which virtual.
8160 atom = insert_category_into_atom(atom, "virtual")
8162 atom = insert_category_into_atom(atom, "null")
8165 def aux_get(self, cpv, wants):
8166 metadata = self._cpv_pkg_map[cpv].metadata
8167 return [metadata.get(x, "") for x in wants]
8169 class _package_cache(dict):
8170 def __init__(self, depgraph):
8172 self._depgraph = depgraph
8174 def __setitem__(self, k, v):
8175 dict.__setitem__(self, k, v)
8176 root_config = self._depgraph.roots[v.root]
8178 if visible(root_config.settings, v) and \
8179 not (v.installed and \
8180 v.root_config.settings._getMissingKeywords(v.cpv, v.metadata)):
8181 root_config.visible_pkgs.cpv_inject(v)
8182 except portage.exception.InvalidDependString:
8185 class RepoDisplay(object):
8186 def __init__(self, roots):
8187 self._shown_repos = {}
8188 self._unknown_repo = False
8190 for root_config in roots.itervalues():
8191 portdir = root_config.settings.get("PORTDIR")
8193 repo_paths.add(portdir)
8194 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8196 repo_paths.update(overlays.split())
8197 repo_paths = list(repo_paths)
8198 self._repo_paths = repo_paths
8199 self._repo_paths_real = [ os.path.realpath(repo_path) \
8200 for repo_path in repo_paths ]
8202 # pre-allocate index for PORTDIR so that it always has index 0.
8203 for root_config in roots.itervalues():
8204 portdb = root_config.trees["porttree"].dbapi
8205 portdir = portdb.porttree_root
8207 self.repoStr(portdir)
8209 def repoStr(self, repo_path_real):
8212 real_index = self._repo_paths_real.index(repo_path_real)
8213 if real_index == -1:
8215 self._unknown_repo = True
8217 shown_repos = self._shown_repos
8218 repo_paths = self._repo_paths
8219 repo_path = repo_paths[real_index]
8220 index = shown_repos.get(repo_path)
8222 index = len(shown_repos)
8223 shown_repos[repo_path] = index
8229 shown_repos = self._shown_repos
8230 unknown_repo = self._unknown_repo
8231 if shown_repos or self._unknown_repo:
8232 output.append("Portage tree and overlays:\n")
8233 show_repo_paths = list(shown_repos)
8234 for repo_path, repo_index in shown_repos.iteritems():
8235 show_repo_paths[repo_index] = repo_path
8237 for index, repo_path in enumerate(show_repo_paths):
8238 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8240 output.append(" "+teal("[?]") + \
8241 " indicates that the source repository could not be determined\n")
8242 return "".join(output)
8244 class PackageCounters(object):
8254 self.blocks_satisfied = 0
8256 self.restrict_fetch = 0
8257 self.restrict_fetch_satisfied = 0
8258 self.interactive = 0
8261 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8264 myoutput.append("Total: %s package" % total_installs)
8265 if total_installs != 1:
8266 myoutput.append("s")
8267 if total_installs != 0:
8268 myoutput.append(" (")
8269 if self.upgrades > 0:
8270 details.append("%s upgrade" % self.upgrades)
8271 if self.upgrades > 1:
8273 if self.downgrades > 0:
8274 details.append("%s downgrade" % self.downgrades)
8275 if self.downgrades > 1:
8278 details.append("%s new" % self.new)
8279 if self.newslot > 0:
8280 details.append("%s in new slot" % self.newslot)
8281 if self.newslot > 1:
8284 details.append("%s reinstall" % self.reinst)
8288 details.append("%s uninstall" % self.uninst)
8291 if self.interactive > 0:
8292 details.append("%s %s" % (self.interactive,
8293 colorize("WARN", "interactive")))
8294 myoutput.append(", ".join(details))
8295 if total_installs != 0:
8296 myoutput.append(")")
8297 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8298 if self.restrict_fetch:
8299 myoutput.append("\nFetch Restriction: %s package" % \
8300 self.restrict_fetch)
8301 if self.restrict_fetch > 1:
8302 myoutput.append("s")
8303 if self.restrict_fetch_satisfied < self.restrict_fetch:
8304 myoutput.append(bad(" (%s unsatisfied)") % \
8305 (self.restrict_fetch - self.restrict_fetch_satisfied))
8307 myoutput.append("\nConflict: %s block" % \
8310 myoutput.append("s")
8311 if self.blocks_satisfied < self.blocks:
8312 myoutput.append(bad(" (%s unsatisfied)") % \
8313 (self.blocks - self.blocks_satisfied))
8314 return "".join(myoutput)
8316 class PollConstants(object):
8319 Provides POLL* constants that are equivalent to those from the
8320 select module, for use by PollSelectAdapter.
8323 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
8326 locals()[k] = getattr(select, k, v)
8330 class PollSelectAdapter(PollConstants):
8333 Use select to emulate a poll object, for
8334 systems that don't support poll().
8338 self._registered = {}
8339 self._select_args = [[], [], []]
8341 def register(self, fd, *args):
8343 Only POLLIN is currently supported!
8347 "register expected at most 2 arguments, got " + \
8348 repr(1 + len(args)))
8350 eventmask = PollConstants.POLLIN | \
8351 PollConstants.POLLPRI | PollConstants.POLLOUT
8355 self._registered[fd] = eventmask
8356 self._select_args = None
8358 def unregister(self, fd):
8359 self._select_args = None
8360 del self._registered[fd]
8362 def poll(self, *args):
8365 "poll expected at most 2 arguments, got " + \
8366 repr(1 + len(args)))
8372 select_args = self._select_args
8373 if select_args is None:
8374 select_args = [self._registered.keys(), [], []]
8376 if timeout is not None:
8377 select_args = select_args[:]
8378 # Translate poll() timeout args to select() timeout args:
8380 # | units | value(s) for indefinite block
8381 # ---------|--------------|------------------------------
8382 # poll | milliseconds | omitted, negative, or None
8383 # ---------|--------------|------------------------------
8384 # select | seconds | omitted
8385 # ---------|--------------|------------------------------
8387 if timeout is not None and timeout < 0:
8389 if timeout is not None:
8390 select_args.append(timeout / 1000)
8392 select_events = select.select(*select_args)
8394 for fd in select_events[0]:
8395 poll_events.append((fd, PollConstants.POLLIN))
8398 class SequentialTaskQueue(SlotObject):
8400 __slots__ = ("max_jobs", "running_tasks") + \
8401 ("_dirty", "_scheduling", "_task_queue")
8403 def __init__(self, **kwargs):
8404 SlotObject.__init__(self, **kwargs)
8405 self._task_queue = deque()
8406 self.running_tasks = set()
8407 if self.max_jobs is None:
8411 def add(self, task):
8412 self._task_queue.append(task)
8415 def addFront(self, task):
8416 self._task_queue.appendleft(task)
8427 if self._scheduling:
8428 # Ignore any recursive schedule() calls triggered via
8429 # self._task_exit().
8432 self._scheduling = True
8434 task_queue = self._task_queue
8435 running_tasks = self.running_tasks
8436 max_jobs = self.max_jobs
8437 state_changed = False
8439 while task_queue and \
8440 (max_jobs is True or len(running_tasks) < max_jobs):
8441 task = task_queue.popleft()
8442 cancelled = getattr(task, "cancelled", None)
8444 running_tasks.add(task)
8445 task.addExitListener(self._task_exit)
8447 state_changed = True
8450 self._scheduling = False
8452 return state_changed
8454 def _task_exit(self, task):
8456 Since we can always rely on exit listeners being called, the set of
8457 running tasks is always pruned automatically and there is never any need
8458 to actively prune it.
8460 self.running_tasks.remove(task)
8461 if self._task_queue:
8465 self._task_queue.clear()
8466 running_tasks = self.running_tasks
8467 while running_tasks:
8468 task = running_tasks.pop()
8469 task.removeExitListener(self._task_exit)
8473 def __nonzero__(self):
8474 return bool(self._task_queue or self.running_tasks)
8477 return len(self._task_queue) + len(self.running_tasks)
8479 _can_poll_device = None
8481 def can_poll_device():
8483 Test if it's possible to use poll() on a device such as a pty. This
8484 is known to fail on Darwin.
8486 @returns: True if poll() on a device succeeds, False otherwise.
8489 global _can_poll_device
8490 if _can_poll_device is not None:
8491 return _can_poll_device
8493 if not hasattr(select, "poll"):
8494 _can_poll_device = False
8495 return _can_poll_device
8498 dev_null = open('/dev/null', 'rb')
8500 _can_poll_device = False
8501 return _can_poll_device
8504 p.register(dev_null.fileno(), PollConstants.POLLIN)
8506 invalid_request = False
8507 for f, event in p.poll():
8508 if event & PollConstants.POLLNVAL:
8509 invalid_request = True
8513 _can_poll_device = not invalid_request
8514 return _can_poll_device
8516 def create_poll_instance():
8518 Create an instance of select.poll, or an instance of
8519 PollSelectAdapter there is no poll() implementation or
8520 it is broken somehow.
8522 if can_poll_device():
8523 return select.poll()
8524 return PollSelectAdapter()
8526 class PollScheduler(object):
8528 class _sched_iface_class(SlotObject):
8529 __slots__ = ("register", "schedule", "unregister")
8533 self._max_load = None
8535 self._poll_event_queue = []
8536 self._poll_event_handlers = {}
8537 self._poll_event_handler_ids = {}
8538 # Increment id for each new handler.
8539 self._event_handler_id = 0
8540 self._poll_obj = create_poll_instance()
8541 self._scheduling = False
8543 def _schedule(self):
8545 Calls _schedule_tasks() and automatically returns early from
8546 any recursive calls to this method that the _schedule_tasks()
8547 call might trigger. This makes _schedule() safe to call from
8548 inside exit listeners.
8550 if self._scheduling:
8552 self._scheduling = True
8554 return self._schedule_tasks()
8556 self._scheduling = False
8558 def _running_job_count(self):
8561 def _can_add_job(self):
8562 max_jobs = self._max_jobs
8563 max_load = self._max_load
8565 if self._max_jobs is not True and \
8566 self._running_job_count() >= self._max_jobs:
8569 if max_load is not None and \
8570 (max_jobs is True or max_jobs > 1) and \
8571 self._running_job_count() >= 1:
8573 avg1, avg5, avg15 = os.getloadavg()
8574 except (AttributeError, OSError), e:
8575 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8580 if avg1 >= max_load:
8585 def _poll(self, timeout=None):
8587 All poll() calls pass through here. The poll events
8588 are added directly to self._poll_event_queue.
8589 In order to avoid endless blocking, this raises
8590 StopIteration if timeout is None and there are
8591 no file descriptors to poll.
8593 if not self._poll_event_handlers:
8595 if timeout is None and \
8596 not self._poll_event_handlers:
8597 raise StopIteration(
8598 "timeout is None and there are no poll() event handlers")
8600 # The following error is known to occur with Linux kernel versions
8603 # select.error: (4, 'Interrupted system call')
8605 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8606 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8607 # without any events.
8610 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8612 except select.error, e:
8613 writemsg_level("\n!!! select error: %s\n" % (e,),
8614 level=logging.ERROR, noiselevel=-1)
8616 if timeout is not None:
8619 def _next_poll_event(self, timeout=None):
8621 Since the _schedule_wait() loop is called by event
8622 handlers from _poll_loop(), maintain a central event
8623 queue for both of them to share events from a single
8624 poll() call. In order to avoid endless blocking, this
8625 raises StopIteration if timeout is None and there are
8626 no file descriptors to poll.
8628 if not self._poll_event_queue:
8630 return self._poll_event_queue.pop()
8632 def _poll_loop(self):
8634 event_handlers = self._poll_event_handlers
8635 event_handled = False
8638 while event_handlers:
8639 f, event = self._next_poll_event()
8640 handler, reg_id = event_handlers[f]
8642 event_handled = True
8643 except StopIteration:
8644 event_handled = True
8646 if not event_handled:
8647 raise AssertionError("tight loop")
8649 def _schedule_yield(self):
8651 Schedule for a short period of time chosen by the scheduler based
8652 on internal state. Synchronous tasks should call this periodically
8653 in order to allow the scheduler to service pending poll events. The
8654 scheduler will call poll() exactly once, without blocking, and any
8655 resulting poll events will be serviced.
8657 event_handlers = self._poll_event_handlers
8660 if not event_handlers:
8661 return bool(events_handled)
8663 if not self._poll_event_queue:
8667 while event_handlers and self._poll_event_queue:
8668 f, event = self._next_poll_event()
8669 handler, reg_id = event_handlers[f]
8672 except StopIteration:
8675 return bool(events_handled)
8677 def _register(self, f, eventmask, handler):
8680 @return: A unique registration id, for use in schedule() or
8683 if f in self._poll_event_handlers:
8684 raise AssertionError("fd %d is already registered" % f)
8685 self._event_handler_id += 1
8686 reg_id = self._event_handler_id
8687 self._poll_event_handler_ids[reg_id] = f
8688 self._poll_event_handlers[f] = (handler, reg_id)
8689 self._poll_obj.register(f, eventmask)
8692 def _unregister(self, reg_id):
8693 f = self._poll_event_handler_ids[reg_id]
8694 self._poll_obj.unregister(f)
8695 del self._poll_event_handlers[f]
8696 del self._poll_event_handler_ids[reg_id]
8698 def _schedule_wait(self, wait_ids):
8700 Schedule until wait_id is not longer registered
8703 @param wait_id: a task id to wait for
8705 event_handlers = self._poll_event_handlers
8706 handler_ids = self._poll_event_handler_ids
8707 event_handled = False
8709 if isinstance(wait_ids, int):
8710 wait_ids = frozenset([wait_ids])
8713 while wait_ids.intersection(handler_ids):
8714 f, event = self._next_poll_event()
8715 handler, reg_id = event_handlers[f]
8717 event_handled = True
8718 except StopIteration:
8719 event_handled = True
8721 return event_handled
8723 class QueueScheduler(PollScheduler):
8726 Add instances of SequentialTaskQueue and then call run(). The
8727 run() method returns when no tasks remain.
8730 def __init__(self, max_jobs=None, max_load=None):
8731 PollScheduler.__init__(self)
8733 if max_jobs is None:
8736 self._max_jobs = max_jobs
8737 self._max_load = max_load
8738 self.sched_iface = self._sched_iface_class(
8739 register=self._register,
8740 schedule=self._schedule_wait,
8741 unregister=self._unregister)
8744 self._schedule_listeners = []
8747 self._queues.append(q)
8749 def remove(self, q):
8750 self._queues.remove(q)
8754 while self._schedule():
8757 while self._running_job_count():
8760 def _schedule_tasks(self):
8763 @returns: True if there may be remaining tasks to schedule,
8766 while self._can_add_job():
8767 n = self._max_jobs - self._running_job_count()
8771 if not self._start_next_job(n):
8774 for q in self._queues:
8779 def _running_job_count(self):
8781 for q in self._queues:
8782 job_count += len(q.running_tasks)
8783 self._jobs = job_count
8786 def _start_next_job(self, n=1):
8788 for q in self._queues:
8789 initial_job_count = len(q.running_tasks)
8791 final_job_count = len(q.running_tasks)
8792 if final_job_count > initial_job_count:
8793 started_count += (final_job_count - initial_job_count)
8794 if started_count >= n:
8796 return started_count
8798 class TaskScheduler(object):
8801 A simple way to handle scheduling of AsynchrousTask instances. Simply
8802 add tasks and call run(). The run() method returns when no tasks remain.
8805 def __init__(self, max_jobs=None, max_load=None):
8806 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
8807 self._scheduler = QueueScheduler(
8808 max_jobs=max_jobs, max_load=max_load)
8809 self.sched_iface = self._scheduler.sched_iface
8810 self.run = self._scheduler.run
8811 self._scheduler.add(self._queue)
8813 def add(self, task):
8814 self._queue.add(task)
8817 self._scheduler.schedule()
8819 class JobStatusDisplay(object):
8821 _bound_properties = ("curval", "failed", "running")
8822 _jobs_column_width = 48
8824 # Don't update the display unless at least this much
8825 # time has passed, in units of seconds.
8826 _min_display_latency = 2
8828 _default_term_codes = {
8834 _termcap_name_map = {
8835 'carriage_return' : 'cr',
8840 def __init__(self, out=sys.stdout, quiet=False):
8841 object.__setattr__(self, "out", out)
8842 object.__setattr__(self, "quiet", quiet)
8843 object.__setattr__(self, "maxval", 0)
8844 object.__setattr__(self, "merges", 0)
8845 object.__setattr__(self, "_changed", False)
8846 object.__setattr__(self, "_displayed", False)
8847 object.__setattr__(self, "_last_display_time", 0)
8848 object.__setattr__(self, "width", 80)
8851 isatty = hasattr(out, "isatty") and out.isatty()
8852 object.__setattr__(self, "_isatty", isatty)
8853 if not isatty or not self._init_term():
8855 for k, capname in self._termcap_name_map.iteritems():
8856 term_codes[k] = self._default_term_codes[capname]
8857 object.__setattr__(self, "_term_codes", term_codes)
8859 def _init_term(self):
8861 Initialize term control codes.
8863 @returns: True if term codes were successfully initialized,
8867 term_type = os.environ.get("TERM", "vt100")
8873 curses.setupterm(term_type, self.out.fileno())
8874 tigetstr = curses.tigetstr
8875 except curses.error:
8880 if tigetstr is None:
8884 for k, capname in self._termcap_name_map.iteritems():
8885 code = tigetstr(capname)
8887 code = self._default_term_codes[capname]
8888 term_codes[k] = code
8889 object.__setattr__(self, "_term_codes", term_codes)
8892 def _format_msg(self, msg):
8893 return ">>> %s" % msg
8897 self._term_codes['carriage_return'] + \
8898 self._term_codes['clr_eol'])
8900 self._displayed = False
8902 def _display(self, line):
8903 self.out.write(line)
8905 self._displayed = True
8907 def _update(self, msg):
8910 if not self._isatty:
8911 out.write(self._format_msg(msg) + self._term_codes['newline'])
8913 self._displayed = True
8919 self._display(self._format_msg(msg))
8921 def displayMessage(self, msg):
8923 was_displayed = self._displayed
8925 if self._isatty and self._displayed:
8928 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
8930 self._displayed = False
8933 self._changed = True
8939 for name in self._bound_properties:
8940 object.__setattr__(self, name, 0)
8943 self.out.write(self._term_codes['newline'])
8945 self._displayed = False
8947 def __setattr__(self, name, value):
8948 old_value = getattr(self, name)
8949 if value == old_value:
8951 object.__setattr__(self, name, value)
8952 if name in self._bound_properties:
8953 self._property_change(name, old_value, value)
8955 def _property_change(self, name, old_value, new_value):
8956 self._changed = True
8959 def _load_avg_str(self):
8961 avg = os.getloadavg()
8962 except (AttributeError, OSError), e:
8974 return ", ".join(("%%.%df" % digits ) % x for x in avg)
8978 Display status on stdout, but only if something has
8979 changed since the last call.
8985 current_time = time.time()
8986 time_delta = current_time - self._last_display_time
8987 if self._displayed and \
8989 if not self._isatty:
8991 if time_delta < self._min_display_latency:
8994 self._last_display_time = current_time
8995 self._changed = False
8996 self._display_status()
8998 def _display_status(self):
8999 # Don't use len(self._completed_tasks) here since that also
9000 # can include uninstall tasks.
9001 curval_str = str(self.curval)
9002 maxval_str = str(self.maxval)
9003 running_str = str(self.running)
9004 failed_str = str(self.failed)
9005 load_avg_str = self._load_avg_str()
9007 color_output = StringIO.StringIO()
9008 plain_output = StringIO.StringIO()
9009 style_file = portage.output.ConsoleStyleFile(color_output)
9010 style_file.write_listener = plain_output
9011 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9012 style_writer.style_listener = style_file.new_styles
9013 f = formatter.AbstractFormatter(style_writer)
9015 number_style = "INFORM"
9016 f.add_literal_data("Jobs: ")
9017 f.push_style(number_style)
9018 f.add_literal_data(curval_str)
9020 f.add_literal_data(" of ")
9021 f.push_style(number_style)
9022 f.add_literal_data(maxval_str)
9024 f.add_literal_data(" complete")
9027 f.add_literal_data(", ")
9028 f.push_style(number_style)
9029 f.add_literal_data(running_str)
9031 f.add_literal_data(" running")
9034 f.add_literal_data(", ")
9035 f.push_style(number_style)
9036 f.add_literal_data(failed_str)
9038 f.add_literal_data(" failed")
9040 padding = self._jobs_column_width - len(plain_output.getvalue())
9042 f.add_literal_data(padding * " ")
9044 f.add_literal_data("Load avg: ")
9045 f.add_literal_data(load_avg_str)
9047 # Truncate to fit width, to avoid making the terminal scroll if the
9048 # line overflows (happens when the load average is large).
9049 plain_output = plain_output.getvalue()
9050 if self._isatty and len(plain_output) > self.width:
9051 # Use plain_output here since it's easier to truncate
9052 # properly than the color output which contains console
9054 self._update(plain_output[:self.width])
9056 self._update(color_output.getvalue())
9058 xtermTitle(" ".join(plain_output.split()))
9060 class Scheduler(PollScheduler):
9062 _opts_ignore_blockers = \
9063 frozenset(["--buildpkgonly",
9064 "--fetchonly", "--fetch-all-uri",
9065 "--nodeps", "--pretend"])
9067 _opts_no_background = \
9068 frozenset(["--pretend",
9069 "--fetchonly", "--fetch-all-uri"])
9071 _opts_no_restart = frozenset(["--buildpkgonly",
9072 "--fetchonly", "--fetch-all-uri", "--pretend"])
9074 _bad_resume_opts = set(["--ask", "--changelog",
9075 "--resume", "--skipfirst"])
9077 _fetch_log = "/var/log/emerge-fetch.log"
9079 class _iface_class(SlotObject):
9080 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9081 "dblinkElog", "fetch", "register", "schedule",
9082 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9085 class _fetch_iface_class(SlotObject):
9086 __slots__ = ("log_file", "schedule")
9088 _task_queues_class = slot_dict_class(
9089 ("merge", "jobs", "fetch", "unpack"), prefix="")
9091 class _build_opts_class(SlotObject):
9092 __slots__ = ("buildpkg", "buildpkgonly",
9093 "fetch_all_uri", "fetchonly", "pretend")
9095 class _binpkg_opts_class(SlotObject):
9096 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9098 class _pkg_count_class(SlotObject):
9099 __slots__ = ("curval", "maxval")
9101 class _emerge_log_class(SlotObject):
9102 __slots__ = ("xterm_titles",)
9104 def log(self, *pargs, **kwargs):
9105 if not self.xterm_titles:
9106 # Avoid interference with the scheduler's status display.
9107 kwargs.pop("short_msg", None)
9108 emergelog(self.xterm_titles, *pargs, **kwargs)
9110 class _failed_pkg(SlotObject):
9111 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9113 class _ConfigPool(object):
9114 """Interface for a task to temporarily allocate a config
9115 instance from a pool. This allows a task to be constructed
9116 long before the config instance actually becomes needed, like
9117 when prefetchers are constructed for the whole merge list."""
9118 __slots__ = ("_root", "_allocate", "_deallocate")
9119 def __init__(self, root, allocate, deallocate):
9121 self._allocate = allocate
9122 self._deallocate = deallocate
9124 return self._allocate(self._root)
9125 def deallocate(self, settings):
9126 self._deallocate(settings)
9128 class _unknown_internal_error(portage.exception.PortageException):
9130 Used internally to terminate scheduling. The specific reason for
9131 the failure should have been dumped to stderr.
9133 def __init__(self, value=""):
9134 portage.exception.PortageException.__init__(self, value)
9136 def __init__(self, settings, trees, mtimedb, myopts,
9137 spinner, mergelist, favorites, digraph):
9138 PollScheduler.__init__(self)
9139 self.settings = settings
9140 self.target_root = settings["ROOT"]
9142 self.myopts = myopts
9143 self._spinner = spinner
9144 self._mtimedb = mtimedb
9145 self._mergelist = mergelist
9146 self._favorites = favorites
9147 self._args_set = InternalPackageSet(favorites)
9148 self._build_opts = self._build_opts_class()
9149 for k in self._build_opts.__slots__:
9150 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9151 self._binpkg_opts = self._binpkg_opts_class()
9152 for k in self._binpkg_opts.__slots__:
9153 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9156 self._logger = self._emerge_log_class()
9157 self._task_queues = self._task_queues_class()
9158 for k in self._task_queues.allowed_keys:
9159 setattr(self._task_queues, k,
9160 SequentialTaskQueue())
9161 self._status_display = JobStatusDisplay()
9162 self._max_load = myopts.get("--load-average")
9163 max_jobs = myopts.get("--jobs")
9164 if max_jobs is None:
9166 self._set_max_jobs(max_jobs)
9168 # The root where the currently running
9169 # portage instance is installed.
9170 self._running_root = trees["/"]["root_config"]
9172 if settings.get("PORTAGE_DEBUG", "") == "1":
9174 self.pkgsettings = {}
9175 self._config_pool = {}
9176 self._blocker_db = {}
9178 self._config_pool[root] = []
9179 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9181 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9182 schedule=self._schedule_fetch)
9183 self._sched_iface = self._iface_class(
9184 dblinkEbuildPhase=self._dblink_ebuild_phase,
9185 dblinkDisplayMerge=self._dblink_display_merge,
9186 dblinkElog=self._dblink_elog,
9187 fetch=fetch_iface, register=self._register,
9188 schedule=self._schedule_wait,
9189 scheduleSetup=self._schedule_setup,
9190 scheduleUnpack=self._schedule_unpack,
9191 scheduleYield=self._schedule_yield,
9192 unregister=self._unregister)
9194 self._prefetchers = weakref.WeakValueDictionary()
9195 self._pkg_queue = []
9196 self._completed_tasks = set()
9198 self._failed_pkgs = []
9199 self._failed_pkgs_all = []
9200 self._failed_pkgs_die_msgs = []
9201 self._post_mod_echo_msgs = []
9202 self._parallel_fetch = False
9203 merge_count = len([x for x in mergelist \
9204 if isinstance(x, Package) and x.operation == "merge"])
9205 self._pkg_count = self._pkg_count_class(
9206 curval=0, maxval=merge_count)
9207 self._status_display.maxval = self._pkg_count.maxval
9209 # The load average takes some time to respond when new
9210 # jobs are added, so we need to limit the rate of adding
9212 self._job_delay_max = 10
9213 self._job_delay_factor = 1.0
9214 self._job_delay_exp = 1.5
9215 self._previous_job_start_time = None
9217 self._set_digraph(digraph)
9219 # This is used to memoize the _choose_pkg() result when
9220 # no packages can be chosen until one of the existing
9222 self._choose_pkg_return_early = False
9224 features = self.settings.features
9225 if "parallel-fetch" in features and \
9226 not ("--pretend" in self.myopts or \
9227 "--fetch-all-uri" in self.myopts or \
9228 "--fetchonly" in self.myopts):
9229 if "distlocks" not in features:
9230 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9231 portage.writemsg(red("!!!")+" parallel-fetching " + \
9232 "requires the distlocks feature enabled"+"\n",
9234 portage.writemsg(red("!!!")+" you have it disabled, " + \
9235 "thus parallel-fetching is being disabled"+"\n",
9237 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9238 elif len(mergelist) > 1:
9239 self._parallel_fetch = True
9241 if self._parallel_fetch:
9242 # clear out existing fetch log if it exists
9244 open(self._fetch_log, 'w')
9245 except EnvironmentError:
9248 self._running_portage = None
9249 portage_match = self._running_root.trees["vartree"].dbapi.match(
9250 portage.const.PORTAGE_PACKAGE_ATOM)
9252 cpv = portage_match.pop()
9253 self._running_portage = self._pkg(cpv, "installed",
9254 self._running_root, installed=True)
9256 def _poll(self, timeout=None):
9258 PollScheduler._poll(self, timeout=timeout)
9260 def _set_max_jobs(self, max_jobs):
9261 self._max_jobs = max_jobs
9262 self._task_queues.jobs.max_jobs = max_jobs
9264 def _background_mode(self):
9266 Check if background mode is enabled and adjust states as necessary.
9269 @returns: True if background mode is enabled, False otherwise.
9271 background = (self._max_jobs is True or \
9272 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9273 not bool(self._opts_no_background.intersection(self.myopts))
9276 interactive_tasks = self._get_interactive_tasks()
9277 if interactive_tasks:
9279 writemsg_level(">>> Sending package output to stdio due " + \
9280 "to interactive package(s):\n",
9281 level=logging.INFO, noiselevel=-1)
9283 for pkg in interactive_tasks:
9284 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9286 pkg_str += " for " + pkg.root
9289 writemsg_level("".join("%s\n" % (l,) for l in msg),
9290 level=logging.INFO, noiselevel=-1)
9291 if self._max_jobs is True or self._max_jobs > 1:
9292 self._set_max_jobs(1)
9293 writemsg_level(">>> Setting --jobs=1 due " + \
9294 "to the above interactive package(s)\n",
9295 level=logging.INFO, noiselevel=-1)
9297 self._status_display.quiet = \
9299 ("--quiet" in self.myopts and \
9300 "--verbose" not in self.myopts)
9302 self._logger.xterm_titles = \
9303 "notitles" not in self.settings.features and \
9304 self._status_display.quiet
9308 def _get_interactive_tasks(self):
9309 from portage import flatten
9310 from portage.dep import use_reduce, paren_reduce
9311 interactive_tasks = []
9312 for task in self._mergelist:
9313 if not (isinstance(task, Package) and \
9314 task.operation == "merge"):
9317 properties = flatten(use_reduce(paren_reduce(
9318 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9319 except portage.exception.InvalidDependString, e:
9320 show_invalid_depstring_notice(task,
9321 task.metadata["PROPERTIES"], str(e))
9322 raise self._unknown_internal_error()
9323 if "interactive" in properties:
9324 interactive_tasks.append(task)
9325 return interactive_tasks
9327 def _set_digraph(self, digraph):
9328 if "--nodeps" in self.myopts or \
9329 (self._max_jobs is not True and self._max_jobs < 2):
9331 self._digraph = None
9334 self._digraph = digraph
9335 self._prune_digraph()
9337 def _prune_digraph(self):
9339 Prune any root nodes that are irrelevant.
9342 graph = self._digraph
9343 completed_tasks = self._completed_tasks
9344 removed_nodes = set()
9346 for node in graph.root_nodes():
9347 if not isinstance(node, Package) or \
9348 (node.installed and node.operation == "nomerge") or \
9350 node in completed_tasks:
9351 removed_nodes.add(node)
9353 graph.difference_update(removed_nodes)
9354 if not removed_nodes:
9356 removed_nodes.clear()
9358 class _pkg_failure(portage.exception.PortageException):
9360 An instance of this class is raised by unmerge() when
9361 an uninstallation fails.
9364 def __init__(self, *pargs):
9365 portage.exception.PortageException.__init__(self, pargs)
9367 self.status = pargs[0]
9369 def _schedule_fetch(self, fetcher):
9371 Schedule a fetcher on the fetch queue, in order to
9372 serialize access to the fetch log.
9374 self._task_queues.fetch.addFront(fetcher)
9376 def _schedule_setup(self, setup_phase):
9378 Schedule a setup phase on the merge queue, in order to
9379 serialize unsandboxed access to the live filesystem.
9381 self._task_queues.merge.addFront(setup_phase)
9384 def _schedule_unpack(self, unpack_phase):
9386 Schedule an unpack phase on the unpack queue, in order
9387 to serialize $DISTDIR access for live ebuilds.
9389 self._task_queues.unpack.add(unpack_phase)
9391 def _find_blockers(self, new_pkg):
9393 Returns a callable which should be called only when
9394 the vdb lock has been acquired.
9397 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9400 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9401 if self._opts_ignore_blockers.intersection(self.myopts):
9404 # Call gc.collect() here to avoid heap overflow that
9405 # triggers 'Cannot allocate memory' errors (reported
9410 blocker_db = self._blocker_db[new_pkg.root]
9412 blocker_dblinks = []
9413 for blocking_pkg in blocker_db.findInstalledBlockers(
9414 new_pkg, acquire_lock=acquire_lock):
9415 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9417 if new_pkg.cpv == blocking_pkg.cpv:
9419 blocker_dblinks.append(portage.dblink(
9420 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9421 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9422 vartree=self.trees[blocking_pkg.root]["vartree"]))
9426 return blocker_dblinks
9428 def _dblink_pkg(self, pkg_dblink):
9429 cpv = pkg_dblink.mycpv
9430 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9431 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9432 installed = type_name == "installed"
9433 return self._pkg(cpv, type_name, root_config, installed=installed)
9435 def _append_to_log_path(self, log_path, msg):
9436 f = open(log_path, 'a')
9442 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9444 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9447 background = self._background
9449 if background and log_path is not None:
9450 log_file = open(log_path, 'a')
9455 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9457 if log_file is not None:
9460 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9461 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9462 background = self._background
9464 if log_path is None:
9465 if not (background and level < logging.WARN):
9466 portage.util.writemsg_level(msg,
9467 level=level, noiselevel=noiselevel)
9470 portage.util.writemsg_level(msg,
9471 level=level, noiselevel=noiselevel)
9472 self._append_to_log_path(log_path, msg)
9474 def _dblink_ebuild_phase(self,
9475 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9477 Using this callback for merge phases allows the scheduler
9478 to run while these phases execute asynchronously, and allows
9479 the scheduler control output handling.
9482 scheduler = self._sched_iface
9483 settings = pkg_dblink.settings
9484 pkg = self._dblink_pkg(pkg_dblink)
9485 background = self._background
9486 log_path = settings.get("PORTAGE_LOG_FILE")
9488 ebuild_phase = EbuildPhase(background=background,
9489 pkg=pkg, phase=phase, scheduler=scheduler,
9490 settings=settings, tree=pkg_dblink.treetype)
9491 ebuild_phase.start()
9494 return ebuild_phase.returncode
9496 def _check_manifests(self):
9497 # Verify all the manifests now so that the user is notified of failure
9498 # as soon as possible.
9499 if "strict" not in self.settings.features or \
9500 "--fetchonly" in self.myopts or \
9501 "--fetch-all-uri" in self.myopts:
9504 shown_verifying_msg = False
9506 for myroot, pkgsettings in self.pkgsettings.iteritems():
9507 quiet_config = portage.config(clone=pkgsettings)
9508 quiet_config["PORTAGE_QUIET"] = "1"
9509 quiet_config.backup_changes("PORTAGE_QUIET")
9510 quiet_settings[myroot] = quiet_config
9513 for x in self._mergelist:
9514 if not isinstance(x, Package) or \
9515 x.type_name != "ebuild":
9518 if not shown_verifying_msg:
9519 shown_verifying_msg = True
9520 self._status_msg("Verifying ebuild manifests")
9522 root_config = x.root_config
9523 portdb = root_config.trees["porttree"].dbapi
9524 quiet_config = quiet_settings[root_config.root]
9525 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9526 if not portage.digestcheck([], quiet_config, strict=True):
9531 def _add_prefetchers(self):
9533 if not self._parallel_fetch:
9536 if self._parallel_fetch:
9537 self._status_msg("Starting parallel fetch")
9539 prefetchers = self._prefetchers
9540 getbinpkg = "--getbinpkg" in self.myopts
9542 # In order to avoid "waiting for lock" messages
9543 # at the beginning, which annoy users, never
9544 # spawn a prefetcher for the first package.
9545 for pkg in self._mergelist[1:]:
9546 prefetcher = self._create_prefetcher(pkg)
9547 if prefetcher is not None:
9548 self._task_queues.fetch.add(prefetcher)
9549 prefetchers[pkg] = prefetcher
9551 def _create_prefetcher(self, pkg):
9553 @return: a prefetcher, or None if not applicable
9557 if not isinstance(pkg, Package):
9560 elif pkg.type_name == "ebuild":
9562 prefetcher = EbuildFetcher(background=True,
9563 config_pool=self._ConfigPool(pkg.root,
9564 self._allocate_config, self._deallocate_config),
9565 fetchonly=1, logfile=self._fetch_log,
9566 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9568 elif pkg.type_name == "binary" and \
9569 "--getbinpkg" in self.myopts and \
9570 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9572 prefetcher = BinpkgFetcher(background=True,
9573 logfile=self._fetch_log, pkg=pkg,
9574 scheduler=self._sched_iface)
9578 def _is_restart_scheduled(self):
9580 Check if the merge list contains a replacement
9581 for the current running instance, that will result
9582 in restart after merge.
9584 @returns: True if a restart is scheduled, False otherwise.
9586 if self._opts_no_restart.intersection(self.myopts):
9589 mergelist = self._mergelist
9591 for i, pkg in enumerate(mergelist):
9592 if self._is_restart_necessary(pkg) and \
9593 i != len(mergelist) - 1:
9598 def _is_restart_necessary(self, pkg):
9600 @return: True if merging the given package
9601 requires restart, False otherwise.
9604 # Figure out if we need a restart.
9605 if pkg.root == self._running_root.root and \
9606 portage.match_from_list(
9607 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9608 if self._running_portage:
9609 return cmp(pkg, self._running_portage) != 0
9613 def _restart_if_necessary(self, pkg):
9615 Use execv() to restart emerge. This happens
9616 if portage upgrades itself and there are
9617 remaining packages in the list.
9620 if self._opts_no_restart.intersection(self.myopts):
9623 if not self._is_restart_necessary(pkg):
9626 if pkg == self._mergelist[-1]:
9629 self._main_loop_cleanup()
9631 logger = self._logger
9632 pkg_count = self._pkg_count
9633 mtimedb = self._mtimedb
9634 bad_resume_opts = self._bad_resume_opts
9636 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9637 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9639 logger.log(" *** RESTARTING " + \
9640 "emerge via exec() after change of " + \
9643 mtimedb["resume"]["mergelist"].remove(list(pkg))
9645 portage.run_exitfuncs()
9646 mynewargv = [sys.argv[0], "--resume"]
9647 resume_opts = self.myopts.copy()
9648 # For automatic resume, we need to prevent
9649 # any of bad_resume_opts from leaking in
9650 # via EMERGE_DEFAULT_OPTS.
9651 resume_opts["--ignore-default-opts"] = True
9652 for myopt, myarg in resume_opts.iteritems():
9653 if myopt not in bad_resume_opts:
9655 mynewargv.append(myopt)
9657 mynewargv.append(myopt +"="+ str(myarg))
9658 # priority only needs to be adjusted on the first run
9659 os.environ["PORTAGE_NICENESS"] = "0"
9660 os.execv(mynewargv[0], mynewargv)
9664 if "--resume" in self.myopts:
9666 portage.writemsg_stdout(
9667 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9668 self._logger.log(" *** Resuming merge...")
9670 self._save_resume_list()
9673 self._background = self._background_mode()
9674 except self._unknown_internal_error:
9677 for root in self.trees:
9678 root_config = self.trees[root]["root_config"]
9679 if self._background:
9680 root_config.settings.unlock()
9681 root_config.settings["PORTAGE_BACKGROUND"] = "1"
9682 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
9683 root_config.settings.lock()
9685 self.pkgsettings[root] = portage.config(
9686 clone=root_config.settings)
9688 rval = self._check_manifests()
9689 if rval != os.EX_OK:
9692 keep_going = "--keep-going" in self.myopts
9693 fetchonly = self._build_opts.fetchonly
9694 mtimedb = self._mtimedb
9695 failed_pkgs = self._failed_pkgs
9698 rval = self._merge()
9699 if rval == os.EX_OK or fetchonly or not keep_going:
9701 if "resume" not in mtimedb:
9703 mergelist = self._mtimedb["resume"].get("mergelist")
9710 for failed_pkg in failed_pkgs:
9711 mergelist.remove(list(failed_pkg.pkg))
9713 self._failed_pkgs_all.extend(failed_pkgs)
9719 if not self._calc_resume_list():
9722 clear_caches(self.trees)
9723 if not self._mergelist:
9726 self._save_resume_list()
9727 self._pkg_count.curval = 0
9728 self._pkg_count.maxval = len([x for x in self._mergelist \
9729 if isinstance(x, Package) and x.operation == "merge"])
9730 self._status_display.maxval = self._pkg_count.maxval
9732 self._logger.log(" *** Finished. Cleaning up...")
9735 self._failed_pkgs_all.extend(failed_pkgs)
9738 background = self._background
9739 failure_log_shown = False
9740 if background and len(self._failed_pkgs_all) == 1:
9741 # If only one package failed then just show it's
9742 # whole log for easy viewing.
9743 failed_pkg = self._failed_pkgs_all[-1]
9744 build_dir = failed_pkg.build_dir
9747 log_paths = [failed_pkg.build_log]
9749 log_path = self._locate_failure_log(failed_pkg)
9750 if log_path is not None:
9752 log_file = open(log_path, 'rb')
9756 if log_file is not None:
9758 for line in log_file:
9759 writemsg_level(line, noiselevel=-1)
9762 failure_log_shown = True
9764 # Dump mod_echo output now since it tends to flood the terminal.
9765 # This allows us to avoid having more important output, generated
9766 # later, from being swept away by the mod_echo output.
9767 mod_echo_output = _flush_elog_mod_echo()
9769 if background and not failure_log_shown and \
9770 self._failed_pkgs_all and \
9771 self._failed_pkgs_die_msgs and \
9772 not mod_echo_output:
9774 printer = portage.output.EOutput()
9775 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
9777 if mysettings["ROOT"] != "/":
9778 root_msg = " merged to %s" % mysettings["ROOT"]
9780 printer.einfo("Error messages for package %s%s:" % \
9781 (colorize("INFORM", key), root_msg))
9783 for phase in portage.const.EBUILD_PHASES:
9784 if phase not in logentries:
9786 for msgtype, msgcontent in logentries[phase]:
9787 if isinstance(msgcontent, basestring):
9788 msgcontent = [msgcontent]
9789 for line in msgcontent:
9790 printer.eerror(line.strip("\n"))
9792 if self._post_mod_echo_msgs:
9793 for msg in self._post_mod_echo_msgs:
9796 if len(self._failed_pkgs_all) > 1:
9797 msg = "The following packages have " + \
9798 "failed to build or install:"
9800 writemsg(prefix + "\n", noiselevel=-1)
9801 from textwrap import wrap
9802 for line in wrap(msg, 72):
9803 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
9804 writemsg(prefix + "\n", noiselevel=-1)
9805 for failed_pkg in self._failed_pkgs_all:
9806 writemsg("%s\t%s\n" % (prefix,
9807 colorize("INFORM", str(failed_pkg.pkg))),
9809 writemsg(prefix + "\n", noiselevel=-1)
9813 def _elog_listener(self, mysettings, key, logentries, fulltext):
9814 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
9816 self._failed_pkgs_die_msgs.append(
9817 (mysettings, key, errors))
9819 def _locate_failure_log(self, failed_pkg):
9821 build_dir = failed_pkg.build_dir
9824 log_paths = [failed_pkg.build_log]
9826 for log_path in log_paths:
9831 log_size = os.stat(log_path).st_size
9842 def _add_packages(self):
9843 pkg_queue = self._pkg_queue
9844 for pkg in self._mergelist:
9845 if isinstance(pkg, Package):
9846 pkg_queue.append(pkg)
9847 elif isinstance(pkg, Blocker):
9850 def _merge_exit(self, merge):
9851 self._do_merge_exit(merge)
9852 self._deallocate_config(merge.merge.settings)
9853 if merge.returncode == os.EX_OK and \
9854 not merge.merge.pkg.installed:
9855 self._status_display.curval += 1
9856 self._status_display.merges = len(self._task_queues.merge)
9859 def _do_merge_exit(self, merge):
9860 pkg = merge.merge.pkg
9861 if merge.returncode != os.EX_OK:
9862 settings = merge.merge.settings
9863 build_dir = settings.get("PORTAGE_BUILDDIR")
9864 build_log = settings.get("PORTAGE_LOG_FILE")
9866 self._failed_pkgs.append(self._failed_pkg(
9867 build_dir=build_dir, build_log=build_log,
9869 returncode=merge.returncode))
9870 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
9872 self._status_display.failed = len(self._failed_pkgs)
9875 self._task_complete(pkg)
9876 pkg_to_replace = merge.merge.pkg_to_replace
9877 if pkg_to_replace is not None:
9878 # When a package is replaced, mark it's uninstall
9879 # task complete (if any).
9881 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
9882 self._task_complete(uninst_hash_key)
9887 self._restart_if_necessary(pkg)
9889 # Call mtimedb.commit() after each merge so that
9890 # --resume still works after being interrupted
9891 # by reboot, sigkill or similar.
9892 mtimedb = self._mtimedb
9893 mtimedb["resume"]["mergelist"].remove(list(pkg))
9894 if not mtimedb["resume"]["mergelist"]:
9895 del mtimedb["resume"]
9898 def _build_exit(self, build):
9899 if build.returncode == os.EX_OK:
9901 merge = PackageMerge(merge=build)
9902 merge.addExitListener(self._merge_exit)
9903 self._task_queues.merge.add(merge)
9904 self._status_display.merges = len(self._task_queues.merge)
9906 settings = build.settings
9907 build_dir = settings.get("PORTAGE_BUILDDIR")
9908 build_log = settings.get("PORTAGE_LOG_FILE")
9910 self._failed_pkgs.append(self._failed_pkg(
9911 build_dir=build_dir, build_log=build_log,
9913 returncode=build.returncode))
9914 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
9916 self._status_display.failed = len(self._failed_pkgs)
9917 self._deallocate_config(build.settings)
9919 self._status_display.running = self._jobs
9922 def _extract_exit(self, build):
9923 self._build_exit(build)
9925 def _task_complete(self, pkg):
9926 self._completed_tasks.add(pkg)
9927 self._choose_pkg_return_early = False
9931 self._add_prefetchers()
9932 self._add_packages()
9933 pkg_queue = self._pkg_queue
9934 failed_pkgs = self._failed_pkgs
9935 portage.locks._quiet = self._background
9936 portage.elog._emerge_elog_listener = self._elog_listener
9942 self._main_loop_cleanup()
9943 portage.locks._quiet = False
9944 portage.elog._emerge_elog_listener = None
9946 rval = failed_pkgs[-1].returncode
9950 def _main_loop_cleanup(self):
9951 del self._pkg_queue[:]
9952 self._completed_tasks.clear()
9953 self._choose_pkg_return_early = False
9954 self._status_display.reset()
9955 self._digraph = None
9956 self._task_queues.fetch.clear()
9958 def _choose_pkg(self):
9960 Choose a task that has all it's dependencies satisfied.
9963 if self._choose_pkg_return_early:
9966 if self._digraph is None:
9967 if (self._jobs or self._task_queues.merge) and \
9968 not ("--nodeps" in self.myopts and \
9969 (self._max_jobs is True or self._max_jobs > 1)):
9970 self._choose_pkg_return_early = True
9972 return self._pkg_queue.pop(0)
9974 if not (self._jobs or self._task_queues.merge):
9975 return self._pkg_queue.pop(0)
9977 self._prune_digraph()
9980 later = set(self._pkg_queue)
9981 for pkg in self._pkg_queue:
9983 if not self._dependent_on_scheduled_merges(pkg, later):
9987 if chosen_pkg is not None:
9988 self._pkg_queue.remove(chosen_pkg)
9990 if chosen_pkg is None:
9991 # There's no point in searching for a package to
9992 # choose until at least one of the existing jobs
9994 self._choose_pkg_return_early = True
9998 def _dependent_on_scheduled_merges(self, pkg, later):
10000 Traverse the subgraph of the given packages deep dependencies
10001 to see if it contains any scheduled merges.
10002 @param pkg: a package to check dependencies for
10004 @param later: packages for which dependence should be ignored
10005 since they will be merged later than pkg anyway and therefore
10006 delaying the merge of pkg will not result in a more optimal
10010 @returns: True if the package is dependent, False otherwise.
10013 graph = self._digraph
10014 completed_tasks = self._completed_tasks
10017 traversed_nodes = set([pkg])
10018 direct_deps = graph.child_nodes(pkg)
10019 node_stack = direct_deps
10020 direct_deps = frozenset(direct_deps)
10022 node = node_stack.pop()
10023 if node in traversed_nodes:
10025 traversed_nodes.add(node)
10026 if not ((node.installed and node.operation == "nomerge") or \
10027 (node.operation == "uninstall" and \
10028 node not in direct_deps) or \
10029 node in completed_tasks or \
10033 node_stack.extend(graph.child_nodes(node))
10037 def _allocate_config(self, root):
10039 Allocate a unique config instance for a task in order
10040 to prevent interference between parallel tasks.
10042 if self._config_pool[root]:
10043 temp_settings = self._config_pool[root].pop()
10045 temp_settings = portage.config(clone=self.pkgsettings[root])
10046 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10047 # performance reasons, call it here to make sure all settings from the
10048 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10049 temp_settings.reload()
10050 temp_settings.reset()
10051 return temp_settings
10053 def _deallocate_config(self, settings):
10054 self._config_pool[settings["ROOT"]].append(settings)
10056 def _main_loop(self):
10058 # Only allow 1 job max if a restart is scheduled
10059 # due to portage update.
10060 if self._is_restart_scheduled() or \
10061 self._opts_no_background.intersection(self.myopts):
10062 self._set_max_jobs(1)
10064 merge_queue = self._task_queues.merge
10066 while self._schedule():
10067 if self._poll_event_handlers:
10072 if not (self._jobs or merge_queue):
10074 if self._poll_event_handlers:
10077 def _keep_scheduling(self):
10078 return bool(self._pkg_queue and \
10079 not (self._failed_pkgs and not self._build_opts.fetchonly))
10081 def _schedule_tasks(self):
10082 self._schedule_tasks_imp()
10083 self._status_display.display()
10086 for q in self._task_queues.values():
10090 # Cancel prefetchers if they're the only reason
10091 # the main poll loop is still running.
10092 if self._failed_pkgs and not self._build_opts.fetchonly and \
10093 not (self._jobs or self._task_queues.merge) and \
10094 self._task_queues.fetch:
10095 self._task_queues.fetch.clear()
10099 self._schedule_tasks_imp()
10100 self._status_display.display()
10102 return self._keep_scheduling()
10104 def _job_delay(self):
10107 @returns: True if job scheduling should be delayed, False otherwise.
10110 if self._jobs and self._max_load is not None:
10112 current_time = time.time()
10114 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10115 if delay > self._job_delay_max:
10116 delay = self._job_delay_max
10117 if (current_time - self._previous_job_start_time) < delay:
10122 def _schedule_tasks_imp(self):
10125 @returns: True if state changed, False otherwise.
10132 if not self._keep_scheduling():
10133 return bool(state_change)
10135 if self._choose_pkg_return_early or \
10136 not self._can_add_job() or \
10138 return bool(state_change)
10140 pkg = self._choose_pkg()
10142 return bool(state_change)
10146 if not pkg.installed:
10147 self._pkg_count.curval += 1
10149 task = self._task(pkg)
10152 merge = PackageMerge(merge=task)
10153 merge.addExitListener(self._merge_exit)
10154 self._task_queues.merge.add(merge)
10158 self._previous_job_start_time = time.time()
10159 self._status_display.running = self._jobs
10160 task.addExitListener(self._extract_exit)
10161 self._task_queues.jobs.add(task)
10165 self._previous_job_start_time = time.time()
10166 self._status_display.running = self._jobs
10167 task.addExitListener(self._build_exit)
10168 self._task_queues.jobs.add(task)
10170 return bool(state_change)
10172 def _task(self, pkg):
10174 pkg_to_replace = None
10175 if pkg.operation != "uninstall":
10176 vardb = pkg.root_config.trees["vartree"].dbapi
10177 previous_cpv = vardb.match(pkg.slot_atom)
10179 previous_cpv = previous_cpv.pop()
10180 pkg_to_replace = self._pkg(previous_cpv,
10181 "installed", pkg.root_config, installed=True)
10183 task = MergeListItem(args_set=self._args_set,
10184 background=self._background, binpkg_opts=self._binpkg_opts,
10185 build_opts=self._build_opts,
10186 config_pool=self._ConfigPool(pkg.root,
10187 self._allocate_config, self._deallocate_config),
10188 emerge_opts=self.myopts,
10189 find_blockers=self._find_blockers(pkg), logger=self._logger,
10190 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10191 pkg_to_replace=pkg_to_replace,
10192 prefetcher=self._prefetchers.get(pkg),
10193 scheduler=self._sched_iface,
10194 settings=self._allocate_config(pkg.root),
10195 statusMessage=self._status_msg,
10196 world_atom=self._world_atom)
10200 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10201 pkg = failed_pkg.pkg
10202 msg = "%s to %s %s" % \
10203 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10204 if pkg.root != "/":
10205 msg += " %s %s" % (preposition, pkg.root)
10207 log_path = self._locate_failure_log(failed_pkg)
10208 if log_path is not None:
10209 msg += ", Log file:"
10210 self._status_msg(msg)
10212 if log_path is not None:
10213 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10215 def _status_msg(self, msg):
10217 Display a brief status message (no newlines) in the status display.
10218 This is called by tasks to provide feedback to the user. This
10219 delegates the resposibility of generating \r and \n control characters,
10220 to guarantee that lines are created or erased when necessary and
10224 @param msg: a brief status message (no newlines allowed)
10226 if not self._background:
10227 writemsg_level("\n")
10228 self._status_display.displayMessage(msg)
10230 def _save_resume_list(self):
10232 Do this before verifying the ebuild Manifests since it might
10233 be possible for the user to use --resume --skipfirst get past
10234 a non-essential package with a broken digest.
10236 mtimedb = self._mtimedb
10237 mtimedb["resume"]["mergelist"] = [list(x) \
10238 for x in self._mergelist \
10239 if isinstance(x, Package) and x.operation == "merge"]
10243 def _calc_resume_list(self):
10245 Use the current resume list to calculate a new one,
10246 dropping any packages with unsatisfied deps.
10248 @returns: True if successful, False otherwise.
10250 print colorize("GOOD", "*** Resuming merge...")
10252 if self._show_list():
10253 if "--tree" in self.myopts:
10254 portage.writemsg_stdout("\n" + \
10255 darkgreen("These are the packages that " + \
10256 "would be merged, in reverse order:\n\n"))
10259 portage.writemsg_stdout("\n" + \
10260 darkgreen("These are the packages that " + \
10261 "would be merged, in order:\n\n"))
10263 show_spinner = "--quiet" not in self.myopts and \
10264 "--nodeps" not in self.myopts
10267 print "Calculating dependencies ",
10269 myparams = create_depgraph_params(self.myopts, None)
10273 success, mydepgraph, dropped_tasks = resume_depgraph(
10274 self.settings, self.trees, self._mtimedb, self.myopts,
10275 myparams, self._spinner, skip_unsatisfied=True)
10276 except depgraph.UnsatisfiedResumeDep, e:
10277 mydepgraph = e.depgraph
10278 dropped_tasks = set()
10281 print "\b\b... done!"
10284 def unsatisfied_resume_dep_msg():
10285 mydepgraph.display_problems()
10286 out = portage.output.EOutput()
10287 out.eerror("One or more packages are either masked or " + \
10288 "have missing dependencies:")
10291 show_parents = set()
10292 for dep in e.value:
10293 if dep.parent in show_parents:
10295 show_parents.add(dep.parent)
10296 if dep.atom is None:
10297 out.eerror(indent + "Masked package:")
10298 out.eerror(2 * indent + str(dep.parent))
10301 out.eerror(indent + str(dep.atom) + " pulled in by:")
10302 out.eerror(2 * indent + str(dep.parent))
10304 msg = "The resume list contains packages " + \
10305 "that are either masked or have " + \
10306 "unsatisfied dependencies. " + \
10307 "Please restart/continue " + \
10308 "the operation manually, or use --skipfirst " + \
10309 "to skip the first package in the list and " + \
10310 "any other packages that may be " + \
10311 "masked or have missing dependencies."
10312 for line in textwrap.wrap(msg, 72):
10314 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10317 if success and self._show_list():
10318 mylist = mydepgraph.altlist()
10320 if "--tree" in self.myopts:
10322 mydepgraph.display(mylist, favorites=self._favorites)
10325 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10327 mydepgraph.display_problems()
10329 mylist = mydepgraph.altlist()
10330 mydepgraph.break_refs(mylist)
10331 self._mergelist = mylist
10332 self._set_digraph(mydepgraph.schedulerGraph())
10335 for task in dropped_tasks:
10336 if not (isinstance(task, Package) and task.operation == "merge"):
10339 msg = "emerge --keep-going:" + \
10341 if pkg.root != "/":
10342 msg += " for %s" % (pkg.root,)
10343 msg += " dropped due to unsatisfied dependency."
10344 for line in textwrap.wrap(msg, msg_width):
10345 eerror(line, phase="other", key=pkg.cpv)
10346 settings = self.pkgsettings[pkg.root]
10347 # Ensure that log collection from $T is disabled inside
10348 # elog_process(), since any logs that might exist are
10350 settings.pop("T", None)
10351 portage.elog.elog_process(pkg.cpv, settings)
10355 def _show_list(self):
10356 myopts = self.myopts
10357 if "--quiet" not in myopts and \
10358 ("--ask" in myopts or "--tree" in myopts or \
10359 "--verbose" in myopts):
10363 def _world_atom(self, pkg):
10365 Add the package to the world file, but only if
10366 it's supposed to be added. Otherwise, do nothing.
10369 if set(("--buildpkgonly", "--fetchonly",
10371 "--oneshot", "--onlydeps",
10372 "--pretend")).intersection(self.myopts):
10375 if pkg.root != self.target_root:
10378 args_set = self._args_set
10379 if not args_set.findAtomForPackage(pkg):
10382 logger = self._logger
10383 pkg_count = self._pkg_count
10384 root_config = pkg.root_config
10385 world_set = root_config.sets["world"]
10386 world_locked = False
10387 if hasattr(world_set, "lock"):
10389 world_locked = True
10392 if hasattr(world_set, "load"):
10393 world_set.load() # maybe it's changed on disk
10395 atom = create_world_atom(pkg, args_set, root_config)
10397 if hasattr(world_set, "add"):
10398 self._status_msg(('Recording %s in "world" ' + \
10399 'favorites file...') % atom)
10400 logger.log(" === (%s of %s) Updating world file (%s)" % \
10401 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10402 world_set.add(atom)
10404 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10405 (atom,), level=logging.WARN, noiselevel=-1)
10410 def _pkg(self, cpv, type_name, root_config, installed=False):
10412 Get a package instance from the cache, or create a new
10413 one if necessary. Raises KeyError from aux_get if it
10414 failures for some reason (package does not exist or is
10417 operation = "merge"
10419 operation = "nomerge"
10421 if self._digraph is not None:
10422 # Reuse existing instance when available.
10423 pkg = self._digraph.get(
10424 (type_name, root_config.root, cpv, operation))
10425 if pkg is not None:
10428 tree_type = depgraph.pkg_tree_map[type_name]
10429 db = root_config.trees[tree_type].dbapi
10430 db_keys = list(self.trees[root_config.root][
10431 tree_type].dbapi._aux_cache_keys)
10432 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10433 pkg = Package(cpv=cpv, metadata=metadata,
10434 root_config=root_config, installed=installed)
10435 if type_name == "ebuild":
10436 settings = self.pkgsettings[root_config.root]
10437 settings.setcpv(pkg)
10438 pkg.metadata["USE"] = settings["PORTAGE_USE"]
10442 class MetadataRegen(PollScheduler):
10444 def __init__(self, portdb, max_jobs=None, max_load=None):
10445 PollScheduler.__init__(self)
10446 self._portdb = portdb
10448 if max_jobs is None:
10451 self._max_jobs = max_jobs
10452 self._max_load = max_load
10453 self._sched_iface = self._sched_iface_class(
10454 register=self._register,
10455 schedule=self._schedule_wait,
10456 unregister=self._unregister)
10458 self._valid_pkgs = set()
10459 self._process_iter = self._iter_metadata_processes()
10461 def _iter_metadata_processes(self):
10462 portdb = self._portdb
10463 valid_pkgs = self._valid_pkgs
10464 every_cp = portdb.cp_all()
10465 every_cp.sort(reverse=True)
10468 cp = every_cp.pop()
10469 portage.writemsg_stdout("Processing %s\n" % cp)
10470 cpv_list = portdb.cp_list(cp)
10471 for cpv in cpv_list:
10472 valid_pkgs.add(cpv)
10473 ebuild_path, repo_path = portdb.findname2(cpv)
10474 metadata_process = portdb._metadata_process(
10475 cpv, ebuild_path, repo_path)
10476 if metadata_process is None:
10478 yield metadata_process
10482 portdb = self._portdb
10483 from portage.cache.cache_errors import CacheError
10486 for mytree in portdb.porttrees:
10488 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10489 except CacheError, e:
10490 portage.writemsg("Error listing cache entries for " + \
10491 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10496 while self._schedule():
10503 for y in self._valid_pkgs:
10504 for mytree in portdb.porttrees:
10505 if portdb.findname2(y, mytree=mytree)[0]:
10506 dead_nodes[mytree].discard(y)
10508 for mytree, nodes in dead_nodes.iteritems():
10509 auxdb = portdb.auxdb[mytree]
10513 except (KeyError, CacheError):
10516 def _schedule_tasks(self):
10519 @returns: True if there may be remaining tasks to schedule,
10522 while self._can_add_job():
10524 metadata_process = self._process_iter.next()
10525 except StopIteration:
10529 metadata_process.scheduler = self._sched_iface
10530 metadata_process.addExitListener(self._metadata_exit)
10531 metadata_process.start()
10534 def _metadata_exit(self, metadata_process):
10536 if metadata_process.returncode != os.EX_OK:
10537 self._valid_pkgs.discard(metadata_process.cpv)
10538 portage.writemsg("Error processing %s, continuing...\n" % \
10539 (metadata_process.cpv,))
10542 class UninstallFailure(portage.exception.PortageException):
10544 An instance of this class is raised by unmerge() when
10545 an uninstallation fails.
10548 def __init__(self, *pargs):
10549 portage.exception.PortageException.__init__(self, pargs)
10551 self.status = pargs[0]
10553 def unmerge(root_config, myopts, unmerge_action,
10554 unmerge_files, ldpath_mtimes, autoclean=0,
10555 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10556 scheduler=None, writemsg_level=portage.util.writemsg_level):
10558 quiet = "--quiet" in myopts
10559 settings = root_config.settings
10560 sets = root_config.sets
10561 vartree = root_config.trees["vartree"]
10562 candidate_catpkgs=[]
10564 xterm_titles = "notitles" not in settings.features
10565 out = portage.output.EOutput()
10567 db_keys = list(vartree.dbapi._aux_cache_keys)
10570 pkg = pkg_cache.get(cpv)
10572 pkg = Package(cpv=cpv, installed=True,
10573 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10574 root_config=root_config,
10575 type_name="installed")
10576 pkg_cache[cpv] = pkg
10579 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10581 # At least the parent needs to exist for the lock file.
10582 portage.util.ensure_dirs(vdb_path)
10583 except portage.exception.PortageException:
10587 if os.access(vdb_path, os.W_OK):
10588 vdb_lock = portage.locks.lockdir(vdb_path)
10589 realsyslist = sets["system"].getAtoms()
10591 for x in realsyslist:
10592 mycp = portage.dep_getkey(x)
10593 if mycp in settings.getvirtuals():
10595 for provider in settings.getvirtuals()[mycp]:
10596 if vartree.dbapi.match(provider):
10597 providers.append(provider)
10598 if len(providers) == 1:
10599 syslist.extend(providers)
10601 syslist.append(mycp)
10603 mysettings = portage.config(clone=settings)
10605 if not unmerge_files:
10606 if unmerge_action == "unmerge":
10608 print bold("emerge unmerge") + " can only be used with specific package names"
10614 localtree = vartree
10615 # process all arguments and add all
10616 # valid db entries to candidate_catpkgs
10618 if not unmerge_files:
10619 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10621 #we've got command-line arguments
10622 if not unmerge_files:
10623 print "\nNo packages to unmerge have been provided.\n"
10625 for x in unmerge_files:
10626 arg_parts = x.split('/')
10627 if x[0] not in [".","/"] and \
10628 arg_parts[-1][-7:] != ".ebuild":
10629 #possible cat/pkg or dep; treat as such
10630 candidate_catpkgs.append(x)
10631 elif unmerge_action in ["prune","clean"]:
10632 print "\n!!! Prune and clean do not accept individual" + \
10633 " ebuilds as arguments;\n skipping.\n"
10636 # it appears that the user is specifying an installed
10637 # ebuild and we're in "unmerge" mode, so it's ok.
10638 if not os.path.exists(x):
10639 print "\n!!! The path '"+x+"' doesn't exist.\n"
10642 absx = os.path.abspath(x)
10643 sp_absx = absx.split("/")
10644 if sp_absx[-1][-7:] == ".ebuild":
10646 absx = "/".join(sp_absx)
10648 sp_absx_len = len(sp_absx)
10650 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10651 vdb_len = len(vdb_path)
10653 sp_vdb = vdb_path.split("/")
10654 sp_vdb_len = len(sp_vdb)
10656 if not os.path.exists(absx+"/CONTENTS"):
10657 print "!!! Not a valid db dir: "+str(absx)
10660 if sp_absx_len <= sp_vdb_len:
10661 # The Path is shorter... so it can't be inside the vdb.
10664 print "\n!!!",x,"cannot be inside "+ \
10665 vdb_path+"; aborting.\n"
10668 for idx in range(0,sp_vdb_len):
10669 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
10672 print "\n!!!", x, "is not inside "+\
10673 vdb_path+"; aborting.\n"
10676 print "="+"/".join(sp_absx[sp_vdb_len:])
10677 candidate_catpkgs.append(
10678 "="+"/".join(sp_absx[sp_vdb_len:]))
10681 if (not "--quiet" in myopts):
10683 if settings["ROOT"] != "/":
10684 writemsg_level(darkgreen(newline+ \
10685 ">>> Using system located in ROOT tree %s\n" % \
10688 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
10689 not ("--quiet" in myopts):
10690 writemsg_level(darkgreen(newline+\
10691 ">>> These are the packages that would be unmerged:\n"))
10693 # Preservation of order is required for --depclean and --prune so
10694 # that dependencies are respected. Use all_selected to eliminate
10695 # duplicate packages since the same package may be selected by
10698 all_selected = set()
10699 for x in candidate_catpkgs:
10700 # cycle through all our candidate deps and determine
10701 # what will and will not get unmerged
10703 mymatch = vartree.dbapi.match(x)
10704 except portage.exception.AmbiguousPackageName, errpkgs:
10705 print "\n\n!!! The short ebuild name \"" + \
10706 x + "\" is ambiguous. Please specify"
10707 print "!!! one of the following fully-qualified " + \
10708 "ebuild names instead:\n"
10709 for i in errpkgs[0]:
10710 print " " + green(i)
10714 if not mymatch and x[0] not in "<>=~":
10715 mymatch = localtree.dep_match(x)
10717 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
10718 (x, unmerge_action), noiselevel=-1)
10722 {"protected": set(), "selected": set(), "omitted": set()})
10723 mykey = len(pkgmap) - 1
10724 if unmerge_action=="unmerge":
10726 if y not in all_selected:
10727 pkgmap[mykey]["selected"].add(y)
10728 all_selected.add(y)
10729 elif unmerge_action == "prune":
10730 if len(mymatch) == 1:
10732 best_version = mymatch[0]
10733 best_slot = vartree.getslot(best_version)
10734 best_counter = vartree.dbapi.cpv_counter(best_version)
10735 for mypkg in mymatch[1:]:
10736 myslot = vartree.getslot(mypkg)
10737 mycounter = vartree.dbapi.cpv_counter(mypkg)
10738 if (myslot == best_slot and mycounter > best_counter) or \
10739 mypkg == portage.best([mypkg, best_version]):
10740 if myslot == best_slot:
10741 if mycounter < best_counter:
10742 # On slot collision, keep the one with the
10743 # highest counter since it is the most
10744 # recently installed.
10746 best_version = mypkg
10748 best_counter = mycounter
10749 pkgmap[mykey]["protected"].add(best_version)
10750 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
10751 if mypkg != best_version and mypkg not in all_selected)
10752 all_selected.update(pkgmap[mykey]["selected"])
10754 # unmerge_action == "clean"
10756 for mypkg in mymatch:
10757 if unmerge_action == "clean":
10758 myslot = localtree.getslot(mypkg)
10760 # since we're pruning, we don't care about slots
10761 # and put all the pkgs in together
10763 if myslot not in slotmap:
10764 slotmap[myslot] = {}
10765 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
10767 for myslot in slotmap:
10768 counterkeys = slotmap[myslot].keys()
10769 if not counterkeys:
10772 pkgmap[mykey]["protected"].add(
10773 slotmap[myslot][counterkeys[-1]])
10774 del counterkeys[-1]
10775 #be pretty and get them in order of merge:
10776 for ckey in counterkeys:
10777 mypkg = slotmap[myslot][ckey]
10778 if mypkg not in all_selected:
10779 pkgmap[mykey]["selected"].add(mypkg)
10780 all_selected.add(mypkg)
10781 # ok, now the last-merged package
10782 # is protected, and the rest are selected
10783 numselected = len(all_selected)
10784 if global_unmerge and not numselected:
10785 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
10788 if not numselected:
10789 portage.writemsg_stdout(
10790 "\n>>> No packages selected for removal by " + \
10791 unmerge_action + "\n")
10795 vartree.dbapi.flush_cache()
10796 portage.locks.unlockdir(vdb_lock)
10798 for cp in xrange(len(pkgmap)):
10799 for cpv in pkgmap[cp]["selected"].copy():
10803 # It could have been uninstalled
10804 # by a concurrent process.
10807 if unmerge_action != "clean" and \
10808 root_config.root == "/" and \
10809 portage.match_from_list(
10810 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10811 msg = ("Not unmerging package %s since there is no valid " + \
10812 "reason for portage to unmerge itself.") % (pkg.cpv,)
10813 for line in textwrap.wrap(msg, 75):
10815 # adjust pkgmap so the display output is correct
10816 pkgmap[cp]["selected"].remove(cpv)
10817 all_selected.remove(cpv)
10818 pkgmap[cp]["protected"].add(cpv)
10821 numselected = len(all_selected)
10822 if not numselected:
10824 "\n>>> No packages selected for removal by " + \
10825 unmerge_action + "\n")
10828 # Unmerge order only matters in some cases
10832 selected = d["selected"]
10835 cp = portage.cpv_getkey(iter(selected).next())
10836 cp_dict = unordered.get(cp)
10837 if cp_dict is None:
10839 unordered[cp] = cp_dict
10842 for k, v in d.iteritems():
10843 cp_dict[k].update(v)
10844 pkgmap = [unordered[cp] for cp in sorted(unordered)]
10846 for x in xrange(len(pkgmap)):
10847 selected = pkgmap[x]["selected"]
10850 for mytype, mylist in pkgmap[x].iteritems():
10851 if mytype == "selected":
10853 mylist.difference_update(all_selected)
10854 cp = portage.cpv_getkey(iter(selected).next())
10855 for y in localtree.dep_match(cp):
10856 if y not in pkgmap[x]["omitted"] and \
10857 y not in pkgmap[x]["selected"] and \
10858 y not in pkgmap[x]["protected"] and \
10859 y not in all_selected:
10860 pkgmap[x]["omitted"].add(y)
10861 if global_unmerge and not pkgmap[x]["selected"]:
10862 #avoid cluttering the preview printout with stuff that isn't getting unmerged
10864 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
10865 writemsg_level(colorize("BAD","\a\n\n!!! " + \
10866 "'%s' is part of your system profile.\n" % cp),
10867 level=logging.WARNING, noiselevel=-1)
10868 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
10869 "be damaging to your system.\n\n"),
10870 level=logging.WARNING, noiselevel=-1)
10871 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
10872 countdown(int(settings["EMERGE_WARNING_DELAY"]),
10873 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
10875 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
10877 writemsg_level(bold(cp) + ": ", noiselevel=-1)
10878 for mytype in ["selected","protected","omitted"]:
10880 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
10881 if pkgmap[x][mytype]:
10882 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
10883 sorted_pkgs.sort(portage.pkgcmp)
10884 for pn, ver, rev in sorted_pkgs:
10888 myversion = ver + "-" + rev
10889 if mytype == "selected":
10891 colorize("UNMERGE_WARN", myversion + " "),
10895 colorize("GOOD", myversion + " "), noiselevel=-1)
10897 writemsg_level("none ", noiselevel=-1)
10899 writemsg_level("\n", noiselevel=-1)
10901 writemsg_level("\n", noiselevel=-1)
10903 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
10904 " packages are slated for removal.\n")
10905 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
10906 " and " + colorize("GOOD", "'omitted'") + \
10907 " packages will not be removed.\n\n")
10909 if "--pretend" in myopts:
10910 #we're done... return
10912 if "--ask" in myopts:
10913 if userquery("Would you like to unmerge these packages?")=="No":
10914 # enter pretend mode for correct formatting of results
10915 myopts["--pretend"] = True
10920 #the real unmerging begins, after a short delay....
10921 if clean_delay and not autoclean:
10922 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
10924 for x in xrange(len(pkgmap)):
10925 for y in pkgmap[x]["selected"]:
10926 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
10927 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
10928 mysplit = y.split("/")
10930 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
10931 mysettings, unmerge_action not in ["clean","prune"],
10932 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
10933 scheduler=scheduler)
10935 if retval != os.EX_OK:
10936 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
10938 raise UninstallFailure(retval)
10941 if clean_world and hasattr(sets["world"], "cleanPackage"):
10942 sets["world"].cleanPackage(vartree.dbapi, y)
10943 emergelog(xterm_titles, " >>> unmerge success: "+y)
10944 if clean_world and hasattr(sets["world"], "remove"):
10945 for s in root_config.setconfig.active:
10946 sets["world"].remove(SETPREFIX+s)
10949 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
10951 if os.path.exists("/usr/bin/install-info"):
10952 out = portage.output.EOutput()
10957 inforoot=normpath(root+z)
10958 if os.path.isdir(inforoot):
10959 infomtime = long(os.stat(inforoot).st_mtime)
10960 if inforoot not in prev_mtimes or \
10961 prev_mtimes[inforoot] != infomtime:
10962 regen_infodirs.append(inforoot)
10964 if not regen_infodirs:
10965 portage.writemsg_stdout("\n")
10966 out.einfo("GNU info directory index is up-to-date.")
10968 portage.writemsg_stdout("\n")
10969 out.einfo("Regenerating GNU info directory index...")
10971 dir_extensions = ("", ".gz", ".bz2")
10975 for inforoot in regen_infodirs:
10979 if not os.path.isdir(inforoot) or \
10980 not os.access(inforoot, os.W_OK):
10983 file_list = os.listdir(inforoot)
10985 dir_file = os.path.join(inforoot, "dir")
10986 moved_old_dir = False
10987 processed_count = 0
10988 for x in file_list:
10989 if x.startswith(".") or \
10990 os.path.isdir(os.path.join(inforoot, x)):
10992 if x.startswith("dir"):
10994 for ext in dir_extensions:
10995 if x == "dir" + ext or \
10996 x == "dir" + ext + ".old":
11001 if processed_count == 0:
11002 for ext in dir_extensions:
11004 os.rename(dir_file + ext, dir_file + ext + ".old")
11005 moved_old_dir = True
11006 except EnvironmentError, e:
11007 if e.errno != errno.ENOENT:
11010 processed_count += 1
11011 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11012 existsstr="already exists, for file `"
11014 if re.search(existsstr,myso):
11015 # Already exists... Don't increment the count for this.
11017 elif myso[:44]=="install-info: warning: no info dir entry in ":
11018 # This info file doesn't contain a DIR-header: install-info produces this
11019 # (harmless) warning (the --quiet switch doesn't seem to work).
11020 # Don't increment the count for this.
11023 badcount=badcount+1
11024 errmsg += myso + "\n"
11027 if moved_old_dir and not os.path.exists(dir_file):
11028 # We didn't generate a new dir file, so put the old file
11029 # back where it was originally found.
11030 for ext in dir_extensions:
11032 os.rename(dir_file + ext + ".old", dir_file + ext)
11033 except EnvironmentError, e:
11034 if e.errno != errno.ENOENT:
11038 # Clean dir.old cruft so that they don't prevent
11039 # unmerge of otherwise empty directories.
11040 for ext in dir_extensions:
11042 os.unlink(dir_file + ext + ".old")
11043 except EnvironmentError, e:
11044 if e.errno != errno.ENOENT:
11048 #update mtime so we can potentially avoid regenerating.
11049 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11052 out.eerror("Processed %d info files; %d errors." % \
11053 (icount, badcount))
11054 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11057 out.einfo("Processed %d info files." % (icount,))
11060 def display_news_notification(root_config, myopts):
11061 target_root = root_config.root
11062 trees = root_config.trees
11063 settings = trees["vartree"].settings
11064 portdb = trees["porttree"].dbapi
11065 vardb = trees["vartree"].dbapi
11066 NEWS_PATH = os.path.join("metadata", "news")
11067 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11068 newsReaderDisplay = False
11069 update = "--pretend" not in myopts
11071 for repo in portdb.getRepositories():
11072 unreadItems = checkUpdatedNewsItems(
11073 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11075 if not newsReaderDisplay:
11076 newsReaderDisplay = True
11078 print colorize("WARN", " * IMPORTANT:"),
11079 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11082 if newsReaderDisplay:
11083 print colorize("WARN", " *"),
11084 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11087 def _flush_elog_mod_echo():
11089 Dump the mod_echo output now so that our other
11090 notifications are shown last.
11092 @returns: True if messages were shown, False otherwise.
11094 messages_shown = False
11096 from portage.elog import mod_echo
11097 except ImportError:
11098 pass # happens during downgrade to a version without the module
11100 messages_shown = bool(mod_echo._items)
11101 mod_echo.finalize()
11102 return messages_shown
11104 def post_emerge(root_config, myopts, mtimedb, retval):
11106 Misc. things to run at the end of a merge session.
11109 Update Config Files
11112 Display preserved libs warnings
11115 @param trees: A dictionary mapping each ROOT to it's package databases
11117 @param mtimedb: The mtimeDB to store data needed across merge invocations
11118 @type mtimedb: MtimeDB class instance
11119 @param retval: Emerge's return value
11123 1. Calls sys.exit(retval)
11126 target_root = root_config.root
11127 trees = { target_root : root_config.trees }
11128 vardbapi = trees[target_root]["vartree"].dbapi
11129 settings = vardbapi.settings
11130 info_mtimes = mtimedb["info"]
11132 # Load the most current variables from ${ROOT}/etc/profile.env
11135 settings.regenerate()
11138 config_protect = settings.get("CONFIG_PROTECT","").split()
11139 infodirs = settings.get("INFOPATH","").split(":") + \
11140 settings.get("INFODIR","").split(":")
11144 if retval == os.EX_OK:
11145 exit_msg = " *** exiting successfully."
11147 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11148 emergelog("notitles" not in settings.features, exit_msg)
11150 _flush_elog_mod_echo()
11152 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11153 if counter_hash is not None and \
11154 counter_hash == vardbapi._counter_hash():
11155 # If vdb state has not changed then there's nothing else to do.
11158 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11159 portage.util.ensure_dirs(vdb_path)
11161 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11162 vdb_lock = portage.locks.lockdir(vdb_path)
11166 if "noinfo" not in settings.features:
11167 chk_updated_info_files(target_root,
11168 infodirs, info_mtimes, retval)
11172 portage.locks.unlockdir(vdb_lock)
11174 chk_updated_cfg_files(target_root, config_protect)
11176 display_news_notification(root_config, myopts)
11181 def chk_updated_cfg_files(target_root, config_protect):
11183 #number of directories with some protect files in them
11185 for x in config_protect:
11186 x = os.path.join(target_root, x.lstrip(os.path.sep))
11187 if not os.access(x, os.W_OK):
11188 # Avoid Permission denied errors generated
11192 mymode = os.lstat(x).st_mode
11195 if stat.S_ISLNK(mymode):
11196 # We want to treat it like a directory if it
11197 # is a symlink to an existing directory.
11199 real_mode = os.stat(x).st_mode
11200 if stat.S_ISDIR(real_mode):
11204 if stat.S_ISDIR(mymode):
11205 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11207 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11208 os.path.split(x.rstrip(os.path.sep))
11209 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11210 a = commands.getstatusoutput(mycommand)
11212 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11214 # Show the error message alone, sending stdout to /dev/null.
11215 os.system(mycommand + " 1>/dev/null")
11217 files = a[1].split('\0')
11218 # split always produces an empty string as the last element
11219 if files and not files[-1]:
11223 print "\n"+colorize("WARN", " * IMPORTANT:"),
11224 if stat.S_ISDIR(mymode):
11225 print "%d config files in '%s' need updating." % \
11228 print "config file '%s' needs updating." % x
11231 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11232 " section of the " + bold("emerge")
11233 print " "+yellow("*")+" man page to learn how to update config files."
11235 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11238 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11239 Returns the number of unread (yet relevent) items.
11241 @param portdb: a portage tree database
11242 @type portdb: pordbapi
11243 @param vardb: an installed package database
11244 @type vardb: vardbapi
11247 @param UNREAD_PATH:
11253 1. The number of unread but relevant news items.
11256 from portage.news import NewsManager
11257 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11258 return manager.getUnreadItems( repo_id, update=update )
11260 def insert_category_into_atom(atom, category):
11261 alphanum = re.search(r'\w', atom)
11263 ret = atom[:alphanum.start()] + "%s/" % category + \
11264 atom[alphanum.start():]
11269 def is_valid_package_atom(x):
11271 alphanum = re.search(r'\w', x)
11273 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11274 return portage.isvalidatom(x)
11276 def show_blocker_docs_link():
11278 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11279 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11281 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11284 def show_mask_docs():
11285 print "For more information, see the MASKED PACKAGES section in the emerge"
11286 print "man page or refer to the Gentoo Handbook."
11288 def action_sync(settings, trees, mtimedb, myopts, myaction):
11289 xterm_titles = "notitles" not in settings.features
11290 emergelog(xterm_titles, " === sync")
11291 myportdir = settings.get("PORTDIR", None)
11292 out = portage.output.EOutput()
11294 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
11296 if myportdir[-1]=="/":
11297 myportdir=myportdir[:-1]
11298 if not os.path.exists(myportdir):
11299 print ">>>",myportdir,"not found, creating it."
11300 os.makedirs(myportdir,0755)
11301 syncuri = settings.get("SYNC", "").strip()
11303 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11304 noiselevel=-1, level=logging.ERROR)
11308 updatecache_flg = False
11309 if myaction == "metadata":
11310 print "skipping sync"
11311 updatecache_flg = True
11312 elif syncuri[:8]=="rsync://":
11313 if not os.path.exists("/usr/bin/rsync"):
11314 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11315 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11320 import shlex, StringIO
11321 if settings["PORTAGE_RSYNC_OPTS"] == "":
11322 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11323 rsync_opts.extend([
11324 "--recursive", # Recurse directories
11325 "--links", # Consider symlinks
11326 "--safe-links", # Ignore links outside of tree
11327 "--perms", # Preserve permissions
11328 "--times", # Preserive mod times
11329 "--compress", # Compress the data transmitted
11330 "--force", # Force deletion on non-empty dirs
11331 "--whole-file", # Don't do block transfers, only entire files
11332 "--delete", # Delete files that aren't in the master tree
11333 "--stats", # Show final statistics about what was transfered
11334 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11335 "--exclude=/distfiles", # Exclude distfiles from consideration
11336 "--exclude=/local", # Exclude local from consideration
11337 "--exclude=/packages", # Exclude packages from consideration
11341 # The below validation is not needed when using the above hardcoded
11344 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11345 lexer = shlex.shlex(StringIO.StringIO(
11346 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11347 lexer.whitespace_split = True
11348 rsync_opts.extend(lexer)
11351 for opt in ("--recursive", "--times"):
11352 if opt not in rsync_opts:
11353 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11354 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11355 rsync_opts.append(opt)
11357 for exclude in ("distfiles", "local", "packages"):
11358 opt = "--exclude=/%s" % exclude
11359 if opt not in rsync_opts:
11360 portage.writemsg(yellow("WARNING:") + \
11361 " adding required option %s not included in " % opt + \
11362 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11363 rsync_opts.append(opt)
11365 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11366 def rsync_opt_startswith(opt_prefix):
11367 for x in rsync_opts:
11368 if x.startswith(opt_prefix):
11372 if not rsync_opt_startswith("--timeout="):
11373 rsync_opts.append("--timeout=%d" % mytimeout)
11375 for opt in ("--compress", "--whole-file"):
11376 if opt not in rsync_opts:
11377 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11378 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11379 rsync_opts.append(opt)
11381 if "--quiet" in myopts:
11382 rsync_opts.append("--quiet") # Shut up a lot
11384 rsync_opts.append("--verbose") # Print filelist
11386 if "--verbose" in myopts:
11387 rsync_opts.append("--progress") # Progress meter for each file
11389 if "--debug" in myopts:
11390 rsync_opts.append("--checksum") # Force checksum on all files
11392 # Real local timestamp file.
11393 servertimestampfile = os.path.join(
11394 myportdir, "metadata", "timestamp.chk")
11396 content = portage.util.grabfile(servertimestampfile)
11400 mytimestamp = time.mktime(time.strptime(content[0],
11401 "%a, %d %b %Y %H:%M:%S +0000"))
11402 except (OverflowError, ValueError):
11407 rsync_initial_timeout = \
11408 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11410 rsync_initial_timeout = 15
11413 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11414 except SystemExit, e:
11415 raise # Needed else can't exit
11417 maxretries=3 #default number of retries
11420 user_name, hostname, port = re.split(
11421 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11424 if user_name is None:
11426 updatecache_flg=True
11427 all_rsync_opts = set(rsync_opts)
11428 lexer = shlex.shlex(StringIO.StringIO(
11429 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
11430 lexer.whitespace_split = True
11431 extra_rsync_opts = list(lexer)
11433 all_rsync_opts.update(extra_rsync_opts)
11434 family = socket.AF_INET
11435 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
11436 family = socket.AF_INET
11437 elif socket.has_ipv6 and \
11438 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
11439 family = socket.AF_INET6
11441 SERVER_OUT_OF_DATE = -1
11442 EXCEEDED_MAX_RETRIES = -2
11448 for addrinfo in socket.getaddrinfo(
11449 hostname, None, family, socket.SOCK_STREAM):
11450 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
11451 # IPv6 addresses need to be enclosed in square brackets
11452 ips.append("[%s]" % addrinfo[4][0])
11454 ips.append(addrinfo[4][0])
11455 from random import shuffle
11457 except SystemExit, e:
11458 raise # Needed else can't exit
11459 except Exception, e:
11460 print "Notice:",str(e)
11465 dosyncuri = syncuri.replace(
11466 "//" + user_name + hostname + port + "/",
11467 "//" + user_name + ips[0] + port + "/", 1)
11468 except SystemExit, e:
11469 raise # Needed else can't exit
11470 except Exception, e:
11471 print "Notice:",str(e)
11475 if "--ask" in myopts:
11476 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
11481 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
11482 if "--quiet" not in myopts:
11483 print ">>> Starting rsync with "+dosyncuri+"..."
11485 emergelog(xterm_titles,
11486 ">>> Starting retry %d of %d with %s" % \
11487 (retries,maxretries,dosyncuri))
11488 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
11490 if mytimestamp != 0 and "--quiet" not in myopts:
11491 print ">>> Checking server timestamp ..."
11493 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
11495 if "--debug" in myopts:
11498 exitcode = os.EX_OK
11499 servertimestamp = 0
11500 # Even if there's no timestamp available locally, fetch the
11501 # timestamp anyway as an initial probe to verify that the server is
11502 # responsive. This protects us from hanging indefinitely on a
11503 # connection attempt to an unresponsive server which rsync's
11504 # --timeout option does not prevent.
11506 # Temporary file for remote server timestamp comparison.
11507 from tempfile import mkstemp
11508 fd, tmpservertimestampfile = mkstemp()
11510 mycommand = rsynccommand[:]
11511 mycommand.append(dosyncuri.rstrip("/") + \
11512 "/metadata/timestamp.chk")
11513 mycommand.append(tmpservertimestampfile)
11517 def timeout_handler(signum, frame):
11518 raise portage.exception.PortageException("timed out")
11519 signal.signal(signal.SIGALRM, timeout_handler)
11520 # Timeout here in case the server is unresponsive. The
11521 # --timeout rsync option doesn't apply to the initial
11522 # connection attempt.
11523 if rsync_initial_timeout:
11524 signal.alarm(rsync_initial_timeout)
11526 mypids.extend(portage.process.spawn(
11527 mycommand, env=settings.environ(), returnpid=True))
11528 exitcode = os.waitpid(mypids[0], 0)[1]
11529 content = portage.grabfile(tmpservertimestampfile)
11531 if rsync_initial_timeout:
11534 os.unlink(tmpservertimestampfile)
11537 except portage.exception.PortageException, e:
11541 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
11542 os.kill(mypids[0], signal.SIGTERM)
11543 os.waitpid(mypids[0], 0)
11544 # This is the same code rsync uses for timeout.
11547 if exitcode != os.EX_OK:
11548 if exitcode & 0xff:
11549 exitcode = (exitcode & 0xff) << 8
11551 exitcode = exitcode >> 8
11553 portage.process.spawned_pids.remove(mypids[0])
11556 servertimestamp = time.mktime(time.strptime(
11557 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
11558 except (OverflowError, ValueError):
11560 del mycommand, mypids, content
11561 if exitcode == os.EX_OK:
11562 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
11563 emergelog(xterm_titles,
11564 ">>> Cancelling sync -- Already current.")
11567 print ">>> Timestamps on the server and in the local repository are the same."
11568 print ">>> Cancelling all further sync action. You are already up to date."
11570 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11574 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
11575 emergelog(xterm_titles,
11576 ">>> Server out of date: %s" % dosyncuri)
11579 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
11581 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11584 exitcode = SERVER_OUT_OF_DATE
11585 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
11587 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
11588 exitcode = portage.process.spawn(mycommand,
11589 env=settings.environ())
11590 if exitcode in [0,1,3,4,11,14,20,21]:
11592 elif exitcode in [1,3,4,11,14,20,21]:
11595 # Code 2 indicates protocol incompatibility, which is expected
11596 # for servers with protocol < 29 that don't support
11597 # --prune-empty-directories. Retry for a server that supports
11598 # at least rsync protocol version 29 (>=rsync-2.6.4).
11603 if retries<=maxretries:
11604 print ">>> Retrying..."
11609 updatecache_flg=False
11610 exitcode = EXCEEDED_MAX_RETRIES
11614 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
11615 elif exitcode == SERVER_OUT_OF_DATE:
11617 elif exitcode == EXCEEDED_MAX_RETRIES:
11619 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
11624 msg.append("Rsync has reported that there is a syntax error. Please ensure")
11625 msg.append("that your SYNC statement is proper.")
11626 msg.append("SYNC=" + settings["SYNC"])
11628 msg.append("Rsync has reported that there is a File IO error. Normally")
11629 msg.append("this means your disk is full, but can be caused by corruption")
11630 msg.append("on the filesystem that contains PORTDIR. Please investigate")
11631 msg.append("and try again after the problem has been fixed.")
11632 msg.append("PORTDIR=" + settings["PORTDIR"])
11634 msg.append("Rsync was killed before it finished.")
11636 msg.append("Rsync has not successfully finished. It is recommended that you keep")
11637 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
11638 msg.append("to use rsync due to firewall or other restrictions. This should be a")
11639 msg.append("temporary problem unless complications exist with your network")
11640 msg.append("(and possibly your system's filesystem) configuration.")
11644 elif syncuri[:6]=="cvs://":
11645 if not os.path.exists("/usr/bin/cvs"):
11646 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
11647 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
11649 cvsroot=syncuri[6:]
11650 cvsdir=os.path.dirname(myportdir)
11651 if not os.path.exists(myportdir+"/CVS"):
11653 print ">>> Starting initial cvs checkout with "+syncuri+"..."
11654 if os.path.exists(cvsdir+"/gentoo-x86"):
11655 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
11658 os.rmdir(myportdir)
11660 if e.errno != errno.ENOENT:
11662 "!!! existing '%s' directory; exiting.\n" % myportdir)
11665 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
11666 print "!!! cvs checkout error; exiting."
11668 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
11671 print ">>> Starting cvs update with "+syncuri+"..."
11672 retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
11673 myportdir, settings, free=1)
11674 if retval != os.EX_OK:
11676 dosyncuri = syncuri
11678 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
11679 noiselevel=-1, level=logging.ERROR)
11682 if updatecache_flg and \
11683 myaction != "metadata" and \
11684 "metadata-transfer" not in settings.features:
11685 updatecache_flg = False
11687 # Reload the whole config from scratch.
11688 settings, trees, mtimedb = load_emerge_config(trees=trees)
11689 root_config = trees[settings["ROOT"]]["root_config"]
11690 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11692 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
11693 action_metadata(settings, portdb, myopts)
11695 if portage._global_updates(trees, mtimedb["updates"]):
11697 # Reload the whole config from scratch.
11698 settings, trees, mtimedb = load_emerge_config(trees=trees)
11699 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11700 root_config = trees[settings["ROOT"]]["root_config"]
11702 mybestpv = portdb.xmatch("bestmatch-visible",
11703 portage.const.PORTAGE_PACKAGE_ATOM)
11704 mypvs = portage.best(
11705 trees[settings["ROOT"]]["vartree"].dbapi.match(
11706 portage.const.PORTAGE_PACKAGE_ATOM))
11708 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
11710 if myaction != "metadata":
11711 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
11712 retval = portage.process.spawn(
11713 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
11714 dosyncuri], env=settings.environ())
11715 if retval != os.EX_OK:
11716 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
11718 if(mybestpv != mypvs) and not "--quiet" in myopts:
11720 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
11721 print red(" * ")+"that you update portage now, before any other packages are updated."
11723 print red(" * ")+"To update portage, run 'emerge portage' now."
11726 display_news_notification(root_config, myopts)
11729 def action_metadata(settings, portdb, myopts):
11730 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
11731 old_umask = os.umask(0002)
11732 cachedir = os.path.normpath(settings.depcachedir)
11733 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
11734 "/lib", "/opt", "/proc", "/root", "/sbin",
11735 "/sys", "/tmp", "/usr", "/var"]:
11736 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
11737 "ROOT DIRECTORY ON YOUR SYSTEM."
11738 print >> sys.stderr, \
11739 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
11741 if not os.path.exists(cachedir):
11744 ec = portage.eclass_cache.cache(portdb.porttree_root)
11745 myportdir = os.path.realpath(settings["PORTDIR"])
11746 cm = settings.load_best_module("portdbapi.metadbmodule")(
11747 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
11749 from portage.cache import util
11751 class percentage_noise_maker(util.quiet_mirroring):
11752 def __init__(self, dbapi):
11754 self.cp_all = dbapi.cp_all()
11755 l = len(self.cp_all)
11756 self.call_update_min = 100000000
11757 self.min_cp_all = l/100.0
11761 def __iter__(self):
11762 for x in self.cp_all:
11764 if self.count > self.min_cp_all:
11765 self.call_update_min = 0
11767 for y in self.dbapi.cp_list(x):
11769 self.call_update_mine = 0
11771 def update(self, *arg):
11772 try: self.pstr = int(self.pstr) + 1
11773 except ValueError: self.pstr = 1
11774 sys.stdout.write("%s%i%%" % \
11775 ("\b" * (len(str(self.pstr))+1), self.pstr))
11777 self.call_update_min = 10000000
11779 def finish(self, *arg):
11780 sys.stdout.write("\b\b\b\b100%\n")
11783 if "--quiet" in myopts:
11784 def quicky_cpv_generator(cp_all_list):
11785 for x in cp_all_list:
11786 for y in portdb.cp_list(x):
11788 source = quicky_cpv_generator(portdb.cp_all())
11789 noise_maker = portage.cache.util.quiet_mirroring()
11791 noise_maker = source = percentage_noise_maker(portdb)
11792 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
11793 eclass_cache=ec, verbose_instance=noise_maker)
11796 os.umask(old_umask)
11798 def action_regen(settings, portdb, max_jobs, max_load):
11799 xterm_titles = "notitles" not in settings.features
11800 emergelog(xterm_titles, " === regen")
11801 #regenerate cache entries
11802 portage.writemsg_stdout("Regenerating cache entries...\n")
11804 os.close(sys.stdin.fileno())
11805 except SystemExit, e:
11806 raise # Needed else can't exit
11811 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
11814 portage.writemsg_stdout("done!\n")
11816 def action_config(settings, trees, myopts, myfiles):
11817 if len(myfiles) != 1:
11818 print red("!!! config can only take a single package atom at this time\n")
11820 if not is_valid_package_atom(myfiles[0]):
11821 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
11823 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
11824 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
11828 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
11829 except portage.exception.AmbiguousPackageName, e:
11830 # Multiple matches thrown from cpv_expand
11833 print "No packages found.\n"
11835 elif len(pkgs) > 1:
11836 if "--ask" in myopts:
11838 print "Please select a package to configure:"
11842 options.append(str(idx))
11843 print options[-1]+") "+pkg
11845 options.append("X")
11846 idx = userquery("Selection?", options)
11849 pkg = pkgs[int(idx)-1]
11851 print "The following packages available:"
11854 print "\nPlease use a specific atom or the --ask option."
11860 if "--ask" in myopts:
11861 if userquery("Ready to configure "+pkg+"?") == "No":
11864 print "Configuring pkg..."
11866 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
11867 mysettings = portage.config(clone=settings)
11868 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
11869 debug = mysettings.get("PORTAGE_DEBUG") == "1"
11870 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
11872 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
11873 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
11874 if retval == os.EX_OK:
11875 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
11876 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
11879 def action_info(settings, trees, myopts, myfiles):
11880 print getportageversion(settings["PORTDIR"], settings["ROOT"],
11881 settings.profile_path, settings["CHOST"],
11882 trees[settings["ROOT"]]["vartree"].dbapi)
11884 header_title = "System Settings"
11886 print header_width * "="
11887 print header_title.rjust(int(header_width/2 + len(header_title)/2))
11888 print header_width * "="
11889 print "System uname: "+platform.platform(aliased=1)
11891 lastSync = portage.grabfile(os.path.join(
11892 settings["PORTDIR"], "metadata", "timestamp.chk"))
11893 print "Timestamp of tree:",
11899 output=commands.getstatusoutput("distcc --version")
11901 print str(output[1].split("\n",1)[0]),
11902 if "distcc" in settings.features:
11907 output=commands.getstatusoutput("ccache -V")
11909 print str(output[1].split("\n",1)[0]),
11910 if "ccache" in settings.features:
11915 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
11916 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
11917 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
11918 myvars = portage.util.unique_array(myvars)
11922 if portage.isvalidatom(x):
11923 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
11924 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
11925 pkg_matches.sort(portage.pkgcmp)
11927 for pn, ver, rev in pkg_matches:
11929 pkgs.append(ver + "-" + rev)
11933 pkgs = ", ".join(pkgs)
11934 print "%-20s %s" % (x+":", pkgs)
11936 print "%-20s %s" % (x+":", "[NOT VALID]")
11938 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
11940 if "--verbose" in myopts:
11941 myvars=settings.keys()
11943 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
11944 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
11945 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
11946 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
11948 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
11950 myvars = portage.util.unique_array(myvars)
11956 print '%s="%s"' % (x, settings[x])
11958 use = set(settings["USE"].split())
11959 use_expand = settings["USE_EXPAND"].split()
11961 for varname in use_expand:
11962 flag_prefix = varname.lower() + "_"
11963 for f in list(use):
11964 if f.startswith(flag_prefix):
11968 print 'USE="%s"' % " ".join(use),
11969 for varname in use_expand:
11970 myval = settings.get(varname)
11972 print '%s="%s"' % (varname, myval),
11975 unset_vars.append(x)
11977 print "Unset: "+", ".join(unset_vars)
11980 if "--debug" in myopts:
11981 for x in dir(portage):
11982 module = getattr(portage, x)
11983 if "cvs_id_string" in dir(module):
11984 print "%s: %s" % (str(x), str(module.cvs_id_string))
11986 # See if we can find any packages installed matching the strings
11987 # passed on the command line
11989 vardb = trees[settings["ROOT"]]["vartree"].dbapi
11990 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11992 mypkgs.extend(vardb.match(x))
11994 # If some packages were found...
11996 # Get our global settings (we only print stuff if it varies from
11997 # the current config)
11998 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
11999 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12001 pkgsettings = portage.config(clone=settings)
12003 for myvar in mydesiredvars:
12004 global_vals[myvar] = set(settings.get(myvar, "").split())
12006 # Loop through each package
12007 # Only print settings if they differ from global settings
12008 header_title = "Package Settings"
12009 print header_width * "="
12010 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12011 print header_width * "="
12012 from portage.output import EOutput
12015 # Get all package specific variables
12016 auxvalues = vardb.aux_get(pkg, auxkeys)
12018 for i in xrange(len(auxkeys)):
12019 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12021 for myvar in mydesiredvars:
12022 # If the package variable doesn't match the
12023 # current global variable, something has changed
12024 # so set diff_found so we know to print
12025 if valuesmap[myvar] != global_vals[myvar]:
12026 diff_values[myvar] = valuesmap[myvar]
12027 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12028 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12029 pkgsettings.reset()
12030 # If a matching ebuild is no longer available in the tree, maybe it
12031 # would make sense to compare against the flags for the best
12032 # available version with the same slot?
12034 if portdb.cpv_exists(pkg):
12036 pkgsettings.setcpv(pkg, mydb=mydb)
12037 if valuesmap["IUSE"].intersection(
12038 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12039 diff_values["USE"] = valuesmap["USE"]
12040 # If a difference was found, print the info for
12043 # Print package info
12044 print "%s was built with the following:" % pkg
12045 for myvar in mydesiredvars + ["USE"]:
12046 if myvar in diff_values:
12047 mylist = list(diff_values[myvar])
12049 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12051 print ">>> Attempting to run pkg_info() for '%s'" % pkg
12052 ebuildpath = vardb.findname(pkg)
12053 if not ebuildpath or not os.path.exists(ebuildpath):
12054 out.ewarn("No ebuild found for '%s'" % pkg)
12056 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12057 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12058 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12061 def action_search(root_config, myopts, myfiles, spinner):
12063 print "emerge: no search terms provided."
12065 searchinstance = search(root_config,
12066 spinner, "--searchdesc" in myopts,
12067 "--quiet" not in myopts, "--usepkg" in myopts,
12068 "--usepkgonly" in myopts)
12069 for mysearch in myfiles:
12071 searchinstance.execute(mysearch)
12072 except re.error, comment:
12073 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12075 searchinstance.output()
12077 def action_depclean(settings, trees, ldpath_mtimes,
12078 myopts, action, myfiles, spinner):
12079 # Kill packages that aren't explicitly merged or are required as a
12080 # dependency of another package. World file is explicit.
12082 # Global depclean or prune operations are not very safe when there are
12083 # missing dependencies since it's unknown how badly incomplete
12084 # the dependency graph is, and we might accidentally remove packages
12085 # that should have been pulled into the graph. On the other hand, it's
12086 # relatively safe to ignore missing deps when only asked to remove
12087 # specific packages.
12088 allow_missing_deps = len(myfiles) > 0
12091 msg.append("Depclean may break link level dependencies. Thus, it is\n")
12092 msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
12093 msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
12095 msg.append("Always study the list of packages to be cleaned for any obvious\n")
12096 msg.append("mistakes. Packages that are part of the world set will always\n")
12097 msg.append("be kept. They can be manually added to this set with\n")
12098 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
12099 msg.append("package.provided (see portage(5)) will be removed by\n")
12100 msg.append("depclean, even if they are part of the world set.\n")
12102 msg.append("As a safety measure, depclean will not remove any packages\n")
12103 msg.append("unless *all* required dependencies have been resolved. As a\n")
12104 msg.append("consequence, it is often necessary to run %s\n" % \
12105 good("`emerge --update"))
12106 msg.append(good("--newuse --deep world`") + \
12107 " prior to depclean.\n")
12109 if action == "depclean" and "--quiet" not in myopts and not myfiles:
12110 portage.writemsg_stdout("\n")
12112 portage.writemsg_stdout(colorize("WARN", " * ") + x)
12114 xterm_titles = "notitles" not in settings.features
12115 myroot = settings["ROOT"]
12116 root_config = trees[myroot]["root_config"]
12117 getSetAtoms = root_config.setconfig.getSetAtoms
12118 vardb = trees[myroot]["vartree"].dbapi
12120 required_set_names = ("system", "world")
12124 for s in required_set_names:
12125 required_sets[s] = InternalPackageSet(
12126 initial_atoms=getSetAtoms(s))
12129 # When removing packages, use a temporary version of world
12130 # which excludes packages that are intended to be eligible for
12132 world_temp_set = required_sets["world"]
12133 system_set = required_sets["system"]
12135 if not system_set or not world_temp_set:
12138 writemsg_level("!!! You have no system list.\n",
12139 level=logging.ERROR, noiselevel=-1)
12141 if not world_temp_set:
12142 writemsg_level("!!! You have no world file.\n",
12143 level=logging.WARNING, noiselevel=-1)
12145 writemsg_level("!!! Proceeding is likely to " + \
12146 "break your installation.\n",
12147 level=logging.WARNING, noiselevel=-1)
12148 if "--pretend" not in myopts:
12149 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12151 if action == "depclean":
12152 emergelog(xterm_titles, " >>> depclean")
12155 args_set = InternalPackageSet()
12158 if not is_valid_package_atom(x):
12159 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12160 level=logging.ERROR, noiselevel=-1)
12161 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12164 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12165 except portage.exception.AmbiguousPackageName, e:
12166 msg = "The short ebuild name \"" + x + \
12167 "\" is ambiguous. Please specify " + \
12168 "one of the following " + \
12169 "fully-qualified ebuild names instead:"
12170 for line in textwrap.wrap(msg, 70):
12171 writemsg_level("!!! %s\n" % (line,),
12172 level=logging.ERROR, noiselevel=-1)
12174 writemsg_level(" %s\n" % colorize("INFORM", i),
12175 level=logging.ERROR, noiselevel=-1)
12176 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12179 matched_packages = False
12182 matched_packages = True
12184 if not matched_packages:
12185 writemsg_level(">>> No packages selected for removal by %s\n" % \
12189 writemsg_level("\nCalculating dependencies ")
12190 resolver_params = create_depgraph_params(myopts, "remove")
12191 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12192 vardb = resolver.trees[myroot]["vartree"].dbapi
12194 if action == "depclean":
12197 # Pull in everything that's installed but not matched
12198 # by an argument atom since we don't want to clean any
12199 # package if something depends on it.
12201 world_temp_set.clear()
12206 if args_set.findAtomForPackage(pkg) is None:
12207 world_temp_set.add("=" + pkg.cpv)
12209 except portage.exception.InvalidDependString, e:
12210 show_invalid_depstring_notice(pkg,
12211 pkg.metadata["PROVIDE"], str(e))
12213 world_temp_set.add("=" + pkg.cpv)
12216 elif action == "prune":
12218 # Pull in everything that's installed since we don't
12219 # to prune a package if something depends on it.
12220 world_temp_set.clear()
12221 world_temp_set.update(vardb.cp_all())
12225 # Try to prune everything that's slotted.
12226 for cp in vardb.cp_all():
12227 if len(vardb.cp_list(cp)) > 1:
12230 # Remove atoms from world that match installed packages
12231 # that are also matched by argument atoms, but do not remove
12232 # them if they match the highest installed version.
12235 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12236 if not pkgs_for_cp or pkg not in pkgs_for_cp:
12237 raise AssertionError("package expected in matches: " + \
12238 "cp = %s, cpv = %s matches = %s" % \
12239 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12241 highest_version = pkgs_for_cp[-1]
12242 if pkg == highest_version:
12243 # pkg is the highest version
12244 world_temp_set.add("=" + pkg.cpv)
12247 if len(pkgs_for_cp) <= 1:
12248 raise AssertionError("more packages expected: " + \
12249 "cp = %s, cpv = %s matches = %s" % \
12250 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12253 if args_set.findAtomForPackage(pkg) is None:
12254 world_temp_set.add("=" + pkg.cpv)
12256 except portage.exception.InvalidDependString, e:
12257 show_invalid_depstring_notice(pkg,
12258 pkg.metadata["PROVIDE"], str(e))
12260 world_temp_set.add("=" + pkg.cpv)
12264 for s, package_set in required_sets.iteritems():
12265 set_atom = SETPREFIX + s
12266 set_arg = SetArg(arg=set_atom, set=package_set,
12267 root_config=resolver.roots[myroot])
12268 set_args[s] = set_arg
12269 for atom in set_arg.set:
12270 resolver._dep_stack.append(
12271 Dependency(atom=atom, root=myroot, parent=set_arg))
12272 resolver.digraph.add(set_arg, None)
12274 success = resolver._complete_graph()
12275 writemsg_level("\b\b... done!\n")
12277 resolver.display_problems()
12282 def unresolved_deps():
12284 unresolvable = set()
12285 for dep in resolver._initially_unsatisfied_deps:
12286 if isinstance(dep.parent, Package) and \
12287 (dep.priority > UnmergeDepPriority.SOFT):
12288 unresolvable.add((dep.atom, dep.parent.cpv))
12290 if not unresolvable:
12293 if unresolvable and not allow_missing_deps:
12294 prefix = bad(" * ")
12296 msg.append("Dependencies could not be completely resolved due to")
12297 msg.append("the following required packages not being installed:")
12299 for atom, parent in unresolvable:
12300 msg.append(" %s pulled in by:" % (atom,))
12301 msg.append(" %s" % (parent,))
12303 msg.append("Have you forgotten to run " + \
12304 good("`emerge --update --newuse --deep world`") + " prior to")
12305 msg.append(("%s? It may be necessary to manually " + \
12306 "uninstall packages that no longer") % action)
12307 msg.append("exist in the portage tree since " + \
12308 "it may not be possible to satisfy their")
12309 msg.append("dependencies. Also, be aware of " + \
12310 "the --with-bdeps option that is documented")
12311 msg.append("in " + good("`man emerge`") + ".")
12312 if action == "prune":
12314 msg.append("If you would like to ignore " + \
12315 "dependencies then use %s." % good("--nodeps"))
12316 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
12317 level=logging.ERROR, noiselevel=-1)
12321 if unresolved_deps():
12324 graph = resolver.digraph.copy()
12325 required_pkgs_total = 0
12327 if isinstance(node, Package):
12328 required_pkgs_total += 1
12330 def show_parents(child_node):
12331 parent_nodes = graph.parent_nodes(child_node)
12332 if not parent_nodes:
12333 # With --prune, the highest version can be pulled in without any
12334 # real parent since all installed packages are pulled in. In that
12335 # case there's nothing to show here.
12338 for node in parent_nodes:
12339 parent_strs.append(str(getattr(node, "cpv", node)))
12342 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
12343 for parent_str in parent_strs:
12344 msg.append(" %s\n" % (parent_str,))
12346 portage.writemsg_stdout("".join(msg), noiselevel=-1)
12348 def create_cleanlist():
12349 pkgs_to_remove = []
12351 if action == "depclean":
12357 arg_atom = args_set.findAtomForPackage(pkg)
12358 except portage.exception.InvalidDependString:
12359 # this error has already been displayed by now
12363 if pkg not in graph:
12364 pkgs_to_remove.append(pkg)
12365 elif "--verbose" in myopts:
12370 if pkg not in graph:
12371 pkgs_to_remove.append(pkg)
12372 elif "--verbose" in myopts:
12375 elif action == "prune":
12376 # Prune really uses all installed instead of world. It's not
12377 # a real reverse dependency so don't display it as such.
12378 graph.remove(set_args["world"])
12380 for atom in args_set:
12381 for pkg in vardb.match_pkgs(atom):
12382 if pkg not in graph:
12383 pkgs_to_remove.append(pkg)
12384 elif "--verbose" in myopts:
12387 if not pkgs_to_remove:
12389 ">>> No packages selected for removal by %s\n" % action)
12390 if "--verbose" not in myopts:
12392 ">>> To see reverse dependencies, use %s\n" % \
12394 if action == "prune":
12396 ">>> To ignore dependencies, use %s\n" % \
12399 return pkgs_to_remove
12401 cleanlist = create_cleanlist()
12404 clean_set = set(cleanlist)
12406 # Use a topological sort to create an unmerge order such that
12407 # each package is unmerged before it's dependencies. This is
12408 # necessary to avoid breaking things that may need to run
12409 # during pkg_prerm or pkg_postrm phases.
12411 # Create a new graph to account for dependencies between the
12412 # packages being unmerged.
12416 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
12417 runtime = UnmergeDepPriority(runtime=True)
12418 runtime_post = UnmergeDepPriority(runtime_post=True)
12419 buildtime = UnmergeDepPriority(buildtime=True)
12421 "RDEPEND": runtime,
12422 "PDEPEND": runtime_post,
12423 "DEPEND": buildtime,
12426 for node in clean_set:
12427 graph.add(node, None)
12429 node_use = node.metadata["USE"].split()
12430 for dep_type in dep_keys:
12431 depstr = node.metadata[dep_type]
12435 portage.dep._dep_check_strict = False
12436 success, atoms = portage.dep_check(depstr, None, settings,
12437 myuse=node_use, trees=resolver._graph_trees,
12440 portage.dep._dep_check_strict = True
12442 # Ignore invalid deps of packages that will
12443 # be uninstalled anyway.
12446 priority = priority_map[dep_type]
12448 if not isinstance(atom, portage.dep.Atom):
12449 # Ignore invalid atoms returned from dep_check().
12453 matches = vardb.match_pkgs(atom)
12456 for child_node in matches:
12457 if child_node in clean_set:
12458 graph.add(child_node, node, priority=priority)
12461 if len(graph.order) == len(graph.root_nodes()):
12462 # If there are no dependencies between packages
12463 # let unmerge() group them by cat/pn.
12465 cleanlist = [pkg.cpv for pkg in graph.order]
12467 # Order nodes from lowest to highest overall reference count for
12468 # optimal root node selection.
12469 node_refcounts = {}
12470 for node in graph.order:
12471 node_refcounts[node] = len(graph.parent_nodes(node))
12472 def cmp_reference_count(node1, node2):
12473 return node_refcounts[node1] - node_refcounts[node2]
12474 graph.order.sort(cmp_reference_count)
12476 ignore_priority_range = [None]
12477 ignore_priority_range.extend(
12478 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
12479 while not graph.empty():
12480 for ignore_priority in ignore_priority_range:
12481 nodes = graph.root_nodes(ignore_priority=ignore_priority)
12485 raise AssertionError("no root nodes")
12486 if ignore_priority is not None:
12487 # Some deps have been dropped due to circular dependencies,
12488 # so only pop one node in order do minimize the number that
12493 cleanlist.append(node.cpv)
12495 unmerge(root_config, myopts, "unmerge", cleanlist,
12496 ldpath_mtimes, ordered=ordered)
12498 if action == "prune":
12501 if not cleanlist and "--quiet" in myopts:
12504 print "Packages installed: "+str(len(vardb.cpv_all()))
12505 print "Packages in world: " + \
12506 str(len(root_config.sets["world"].getAtoms()))
12507 print "Packages in system: " + \
12508 str(len(root_config.sets["system"].getAtoms()))
12509 print "Required packages: "+str(required_pkgs_total)
12510 if "--pretend" in myopts:
12511 print "Number to remove: "+str(len(cleanlist))
12513 print "Number removed: "+str(len(cleanlist))
12515 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
12516 skip_masked=False, skip_unsatisfied=False):
12518 Construct a depgraph for the given resume list. This will raise
12519 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
12521 @returns: (success, depgraph, dropped_tasks)
12523 mergelist = mtimedb["resume"]["mergelist"]
12524 dropped_tasks = set()
12526 mydepgraph = depgraph(settings, trees,
12527 myopts, myparams, spinner)
12529 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
12530 skip_masked=skip_masked)
12531 except depgraph.UnsatisfiedResumeDep, e:
12532 if not skip_unsatisfied:
12535 graph = mydepgraph.digraph
12536 unsatisfied_parents = dict((dep.parent, dep.parent) \
12537 for dep in e.value)
12538 traversed_nodes = set()
12539 unsatisfied_stack = list(unsatisfied_parents)
12540 while unsatisfied_stack:
12541 pkg = unsatisfied_stack.pop()
12542 if pkg in traversed_nodes:
12544 traversed_nodes.add(pkg)
12546 # If this package was pulled in by a parent
12547 # package scheduled for merge, removing this
12548 # package may cause the the parent package's
12549 # dependency to become unsatisfied.
12550 for parent_node in graph.parent_nodes(pkg):
12551 if not isinstance(parent_node, Package) \
12552 or parent_node.operation not in ("merge", "nomerge"):
12555 graph.child_nodes(parent_node,
12556 ignore_priority=DepPriority.SOFT)
12557 if pkg in unsatisfied:
12558 unsatisfied_parents[parent_node] = parent_node
12559 unsatisfied_stack.append(parent_node)
12561 pruned_mergelist = [x for x in mergelist \
12562 if isinstance(x, list) and \
12563 tuple(x) not in unsatisfied_parents]
12565 # If the mergelist doesn't shrink then this loop is infinite.
12566 if len(pruned_mergelist) == len(mergelist):
12567 # This happens if a package can't be dropped because
12568 # it's already installed, but it has unsatisfied PDEPEND.
12570 mergelist[:] = pruned_mergelist
12572 # Exclude installed packages that have been removed from the graph due
12573 # to failure to build/install runtime dependencies after the dependent
12574 # package has already been installed.
12575 dropped_tasks.update(pkg for pkg in \
12576 unsatisfied_parents if pkg.operation != "nomerge")
12577 mydepgraph.break_refs(unsatisfied_parents)
12579 del e, graph, traversed_nodes, \
12580 unsatisfied_parents, unsatisfied_stack
12584 return (success, mydepgraph, dropped_tasks)
12586 def action_build(settings, trees, mtimedb,
12587 myopts, myaction, myfiles, spinner):
12589 # validate the state of the resume data
12590 # so that we can make assumptions later.
12591 for k in ("resume", "resume_backup"):
12592 if k not in mtimedb:
12594 resume_data = mtimedb[k]
12595 if not isinstance(resume_data, dict):
12598 mergelist = resume_data.get("mergelist")
12599 if not isinstance(mergelist, list):
12602 resume_opts = resume_data.get("myopts")
12603 if not isinstance(resume_opts, (dict, list)):
12606 favorites = resume_data.get("favorites")
12607 if not isinstance(favorites, list):
12612 if "--resume" in myopts and \
12613 ("resume" in mtimedb or
12614 "resume_backup" in mtimedb):
12616 if "resume" not in mtimedb:
12617 mtimedb["resume"] = mtimedb["resume_backup"]
12618 del mtimedb["resume_backup"]
12620 # "myopts" is a list for backward compatibility.
12621 resume_opts = mtimedb["resume"].get("myopts", [])
12622 if isinstance(resume_opts, list):
12623 resume_opts = dict((k,True) for k in resume_opts)
12624 for opt in ("--skipfirst", "--ask", "--tree"):
12625 resume_opts.pop(opt, None)
12626 myopts.update(resume_opts)
12628 if "--debug" in myopts:
12629 writemsg_level("myopts %s\n" % (myopts,))
12631 # Adjust config according to options of the command being resumed.
12632 for myroot in trees:
12633 mysettings = trees[myroot]["vartree"].settings
12634 mysettings.unlock()
12635 adjust_config(myopts, mysettings)
12637 del myroot, mysettings
12639 ldpath_mtimes = mtimedb["ldpath"]
12642 buildpkgonly = "--buildpkgonly" in myopts
12643 pretend = "--pretend" in myopts
12644 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
12645 ask = "--ask" in myopts
12646 nodeps = "--nodeps" in myopts
12647 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
12648 tree = "--tree" in myopts
12649 if nodeps and tree:
12651 del myopts["--tree"]
12652 portage.writemsg(colorize("WARN", " * ") + \
12653 "--tree is broken with --nodeps. Disabling...\n")
12654 debug = "--debug" in myopts
12655 verbose = "--verbose" in myopts
12656 quiet = "--quiet" in myopts
12657 if pretend or fetchonly:
12658 # make the mtimedb readonly
12659 mtimedb.filename = None
12660 if "--digest" in myopts:
12661 msg = "The --digest option can prevent corruption from being" + \
12662 " noticed. The `repoman manifest` command is the preferred" + \
12663 " way to generate manifests and it is capable of doing an" + \
12664 " entire repository or category at once."
12665 prefix = bad(" * ")
12666 writemsg(prefix + "\n")
12667 from textwrap import wrap
12668 for line in wrap(msg, 72):
12669 writemsg("%s%s\n" % (prefix, line))
12670 writemsg(prefix + "\n")
12672 if "--quiet" not in myopts and \
12673 ("--pretend" in myopts or "--ask" in myopts or \
12674 "--tree" in myopts or "--verbose" in myopts):
12676 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
12678 elif "--buildpkgonly" in myopts:
12682 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
12684 print darkgreen("These are the packages that would be %s, in reverse order:") % action
12688 print darkgreen("These are the packages that would be %s, in order:") % action
12691 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
12692 if not show_spinner:
12693 spinner.update = spinner.update_quiet
12696 favorites = mtimedb["resume"].get("favorites")
12697 if not isinstance(favorites, list):
12701 print "Calculating dependencies ",
12702 myparams = create_depgraph_params(myopts, myaction)
12704 resume_data = mtimedb["resume"]
12705 mergelist = resume_data["mergelist"]
12706 if mergelist and "--skipfirst" in myopts:
12707 for i, task in enumerate(mergelist):
12708 if isinstance(task, list) and \
12709 task and task[-1] == "merge":
12713 skip_masked = "--skipfirst" in myopts
12714 skip_unsatisfied = "--skipfirst" in myopts
12718 success, mydepgraph, dropped_tasks = resume_depgraph(
12719 settings, trees, mtimedb, myopts, myparams, spinner,
12720 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
12721 except (portage.exception.PackageNotFound,
12722 depgraph.UnsatisfiedResumeDep), e:
12723 if isinstance(e, depgraph.UnsatisfiedResumeDep):
12724 mydepgraph = e.depgraph
12727 from textwrap import wrap
12728 from portage.output import EOutput
12731 resume_data = mtimedb["resume"]
12732 mergelist = resume_data.get("mergelist")
12733 if not isinstance(mergelist, list):
12735 if mergelist and debug or (verbose and not quiet):
12736 out.eerror("Invalid resume list:")
12739 for task in mergelist:
12740 if isinstance(task, list):
12741 out.eerror(indent + str(tuple(task)))
12744 if isinstance(e, depgraph.UnsatisfiedResumeDep):
12745 out.eerror("One or more packages are either masked or " + \
12746 "have missing dependencies:")
12749 for dep in e.value:
12750 if dep.atom is None:
12751 out.eerror(indent + "Masked package:")
12752 out.eerror(2 * indent + str(dep.parent))
12755 out.eerror(indent + str(dep.atom) + " pulled in by:")
12756 out.eerror(2 * indent + str(dep.parent))
12758 msg = "The resume list contains packages " + \
12759 "that are either masked or have " + \
12760 "unsatisfied dependencies. " + \
12761 "Please restart/continue " + \
12762 "the operation manually, or use --skipfirst " + \
12763 "to skip the first package in the list and " + \
12764 "any other packages that may be " + \
12765 "masked or have missing dependencies."
12766 for line in wrap(msg, 72):
12768 elif isinstance(e, portage.exception.PackageNotFound):
12769 out.eerror("An expected package is " + \
12770 "not available: %s" % str(e))
12772 msg = "The resume list contains one or more " + \
12773 "packages that are no longer " + \
12774 "available. Please restart/continue " + \
12775 "the operation manually."
12776 for line in wrap(msg, 72):
12780 print "\b\b... done!"
12784 portage.writemsg("!!! One or more packages have been " + \
12785 "dropped due to\n" + \
12786 "!!! masking or unsatisfied dependencies:\n\n",
12788 for task in dropped_tasks:
12789 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
12790 portage.writemsg("\n", noiselevel=-1)
12793 if mydepgraph is not None:
12794 mydepgraph.display_problems()
12795 if not (ask or pretend):
12796 # delete the current list and also the backup
12797 # since it's probably stale too.
12798 for k in ("resume", "resume_backup"):
12799 mtimedb.pop(k, None)
12804 if ("--resume" in myopts):
12805 print darkgreen("emerge: It seems we have nothing to resume...")
12808 myparams = create_depgraph_params(myopts, myaction)
12809 if "--quiet" not in myopts and "--nodeps" not in myopts:
12810 print "Calculating dependencies ",
12812 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
12814 retval, favorites = mydepgraph.select_files(myfiles)
12815 except portage.exception.PackageNotFound, e:
12816 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
12818 except portage.exception.PackageSetNotFound, e:
12819 root_config = trees[settings["ROOT"]]["root_config"]
12820 display_missing_pkg_set(root_config, e.value)
12823 print "\b\b... done!"
12825 mydepgraph.display_problems()
12828 if "--pretend" not in myopts and \
12829 ("--ask" in myopts or "--tree" in myopts or \
12830 "--verbose" in myopts) and \
12831 not ("--quiet" in myopts and "--ask" not in myopts):
12832 if "--resume" in myopts:
12833 mymergelist = mydepgraph.altlist()
12834 if len(mymergelist) == 0:
12835 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12837 favorites = mtimedb["resume"]["favorites"]
12838 retval = mydepgraph.display(
12839 mydepgraph.altlist(reversed=tree),
12840 favorites=favorites)
12841 mydepgraph.display_problems()
12842 if retval != os.EX_OK:
12844 prompt="Would you like to resume merging these packages?"
12846 retval = mydepgraph.display(
12847 mydepgraph.altlist(reversed=("--tree" in myopts)),
12848 favorites=favorites)
12849 mydepgraph.display_problems()
12850 if retval != os.EX_OK:
12853 for x in mydepgraph.altlist():
12854 if isinstance(x, Package) and x.operation == "merge":
12858 sets = trees[settings["ROOT"]]["root_config"].sets
12859 world_candidates = None
12860 if "--noreplace" in myopts and \
12861 not oneshot and favorites:
12862 # Sets that are not world candidates are filtered
12863 # out here since the favorites list needs to be
12864 # complete for depgraph.loadResumeCommand() to
12865 # operate correctly.
12866 world_candidates = [x for x in favorites \
12867 if not (x.startswith(SETPREFIX) and \
12868 not sets[x[1:]].world_candidate)]
12869 if "--noreplace" in myopts and \
12870 not oneshot and world_candidates:
12872 for x in world_candidates:
12873 print " %s %s" % (good("*"), x)
12874 prompt="Would you like to add these packages to your world favorites?"
12875 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
12876 prompt="Nothing to merge; would you like to auto-clean packages?"
12879 print "Nothing to merge; quitting."
12882 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
12883 prompt="Would you like to fetch the source files for these packages?"
12885 prompt="Would you like to merge these packages?"
12887 if "--ask" in myopts and userquery(prompt) == "No":
12892 # Don't ask again (e.g. when auto-cleaning packages after merge)
12893 myopts.pop("--ask", None)
12895 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
12896 if ("--resume" in myopts):
12897 mymergelist = mydepgraph.altlist()
12898 if len(mymergelist) == 0:
12899 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12901 favorites = mtimedb["resume"]["favorites"]
12902 retval = mydepgraph.display(
12903 mydepgraph.altlist(reversed=tree),
12904 favorites=favorites)
12905 mydepgraph.display_problems()
12906 if retval != os.EX_OK:
12909 retval = mydepgraph.display(
12910 mydepgraph.altlist(reversed=("--tree" in myopts)),
12911 favorites=favorites)
12912 mydepgraph.display_problems()
12913 if retval != os.EX_OK:
12915 if "--buildpkgonly" in myopts:
12916 graph_copy = mydepgraph.digraph.clone()
12917 for node in list(graph_copy.order):
12918 if not isinstance(node, Package):
12919 graph_copy.remove(node)
12920 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
12921 print "\n!!! --buildpkgonly requires all dependencies to be merged."
12922 print "!!! You have to merge the dependencies before you can build this package.\n"
12925 if "--buildpkgonly" in myopts:
12926 graph_copy = mydepgraph.digraph.clone()
12927 for node in list(graph_copy.order):
12928 if not isinstance(node, Package):
12929 graph_copy.remove(node)
12930 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
12931 print "\n!!! --buildpkgonly requires all dependencies to be merged."
12932 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
12935 if ("--resume" in myopts):
12936 favorites=mtimedb["resume"]["favorites"]
12937 mymergelist = mydepgraph.altlist()
12938 mydepgraph.break_refs(mymergelist)
12939 mergetask = Scheduler(settings, trees, mtimedb, myopts,
12940 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
12941 del mydepgraph, mymergelist
12942 clear_caches(trees)
12944 retval = mergetask.merge()
12945 merge_count = mergetask.curval
12947 if "resume" in mtimedb and \
12948 "mergelist" in mtimedb["resume"] and \
12949 len(mtimedb["resume"]["mergelist"]) > 1:
12950 mtimedb["resume_backup"] = mtimedb["resume"]
12951 del mtimedb["resume"]
12953 mtimedb["resume"]={}
12954 # Stored as a dict starting with portage-2.1.6_rc1, and supported
12955 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
12956 # a list type for options.
12957 mtimedb["resume"]["myopts"] = myopts.copy()
12959 # Convert Atom instances to plain str since the mtimedb loader
12960 # sets unpickler.find_global = None which causes unpickler.load()
12961 # to raise the following exception:
12963 # cPickle.UnpicklingError: Global and instance pickles are not supported.
12965 # TODO: Maybe stop setting find_global = None, or find some other
12966 # way to avoid accidental triggering of the above UnpicklingError.
12967 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
12969 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
12970 for pkgline in mydepgraph.altlist():
12971 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
12972 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
12973 tmpsettings = portage.config(clone=settings)
12975 if settings.get("PORTAGE_DEBUG", "") == "1":
12977 retval = portage.doebuild(
12978 y, "digest", settings["ROOT"], tmpsettings, edebug,
12979 ("--pretend" in myopts),
12980 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
12983 pkglist = mydepgraph.altlist()
12984 mydepgraph.saveNomergeFavorites()
12985 mydepgraph.break_refs(pkglist)
12986 mergetask = Scheduler(settings, trees, mtimedb, myopts,
12987 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
12988 del mydepgraph, pkglist
12989 clear_caches(trees)
12991 retval = mergetask.merge()
12992 merge_count = mergetask.curval
12994 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
12995 if "yes" == settings.get("AUTOCLEAN"):
12996 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
12997 unmerge(trees[settings["ROOT"]]["root_config"],
12998 myopts, "clean", [],
12999 ldpath_mtimes, autoclean=1)
13001 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13002 + " AUTOCLEAN is disabled. This can cause serious"
13003 + " problems due to overlapping packages.\n")
13007 def multiple_actions(action1, action2):
13008 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13009 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
13012 def insert_optional_args(args):
13014 Parse optional arguments and insert a value if one has
13015 not been provided. This is done before feeding the args
13016 to the optparse parser since that parser does not support
13017 this feature natively.
13021 jobs_opts = ("-j", "--jobs")
13022 arg_stack = args[:]
13023 arg_stack.reverse()
13025 arg = arg_stack.pop()
13027 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
13028 if not (short_job_opt or arg in jobs_opts):
13029 new_args.append(arg)
13032 # Insert an empty placeholder in order to
13033 # satisfy the requirements of optparse.
13035 new_args.append("--jobs")
13038 if short_job_opt and len(arg) > 2:
13039 if arg[:2] == "-j":
13041 job_count = int(arg[2:])
13043 saved_opts = arg[2:]
13046 saved_opts = arg[1:].replace("j", "")
13048 if job_count is None and arg_stack:
13050 job_count = int(arg_stack[-1])
13054 # Discard the job count from the stack
13055 # since we're consuming it here.
13058 if job_count is None:
13059 # unlimited number of jobs
13060 new_args.append("True")
13062 new_args.append(str(job_count))
13064 if saved_opts is not None:
13065 new_args.append("-" + saved_opts)
13069 def parse_opts(tmpcmdline, silent=False):
13074 global actions, options, shortmapping
13076 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13077 argument_options = {
13079 "help":"specify the location for portage configuration files",
13083 "help":"enable or disable color output",
13085 "choices":("y", "n")
13090 "help" : "Specifies the number of packages to build " + \
13096 "--load-average": {
13098 "help" :"Specifies that no new builds should be started " + \
13099 "if there are other builds running and the load average " + \
13100 "is at least LOAD (a floating-point number).",
13106 "help":"include unnecessary build time dependencies",
13108 "choices":("y", "n")
13111 "help":"specify conditions to trigger package reinstallation",
13113 "choices":["changed-use"]
13117 from optparse import OptionParser
13118 parser = OptionParser()
13119 if parser.has_option("--help"):
13120 parser.remove_option("--help")
13122 for action_opt in actions:
13123 parser.add_option("--" + action_opt, action="store_true",
13124 dest=action_opt.replace("-", "_"), default=False)
13125 for myopt in options:
13126 parser.add_option(myopt, action="store_true",
13127 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13128 for shortopt, longopt in shortmapping.iteritems():
13129 parser.add_option("-" + shortopt, action="store_true",
13130 dest=longopt.lstrip("--").replace("-", "_"), default=False)
13131 for myalias, myopt in longopt_aliases.iteritems():
13132 parser.add_option(myalias, action="store_true",
13133 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13135 for myopt, kwargs in argument_options.iteritems():
13136 parser.add_option(myopt,
13137 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
13139 tmpcmdline = insert_optional_args(tmpcmdline)
13141 myoptions, myargs = parser.parse_args(args=tmpcmdline)
13145 if myoptions.jobs == "True":
13149 jobs = int(myoptions.jobs)
13153 if jobs is not True and \
13157 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
13158 (myoptions.jobs,), noiselevel=-1)
13160 myoptions.jobs = jobs
13162 if myoptions.load_average:
13164 load_average = float(myoptions.load_average)
13168 if load_average <= 0.0:
13169 load_average = None
13171 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
13172 (myoptions.load_average,), noiselevel=-1)
13174 myoptions.load_average = load_average
13176 for myopt in options:
13177 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
13179 myopts[myopt] = True
13181 for myopt in argument_options:
13182 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
13186 for action_opt in actions:
13187 v = getattr(myoptions, action_opt.replace("-", "_"))
13190 multiple_actions(myaction, action_opt)
13192 myaction = action_opt
13196 return myaction, myopts, myfiles
13198 def validate_ebuild_environment(trees):
13199 for myroot in trees:
13200 settings = trees[myroot]["vartree"].settings
13201 settings.validate()
13203 def clear_caches(trees):
13204 for d in trees.itervalues():
13205 d["porttree"].dbapi.melt()
13206 d["porttree"].dbapi._aux_cache.clear()
13207 d["bintree"].dbapi._aux_cache.clear()
13208 d["bintree"].dbapi._clear_cache()
13209 portage.dircache.clear()
13212 def load_emerge_config(trees=None):
13214 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
13215 v = os.environ.get(envvar, None)
13216 if v and v.strip():
13218 trees = portage.create_trees(trees=trees, **kwargs)
13220 for root, root_trees in trees.iteritems():
13221 settings = root_trees["vartree"].settings
13222 setconfig = load_default_config(settings, root_trees)
13223 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
13225 settings = trees["/"]["vartree"].settings
13227 for myroot in trees:
13229 settings = trees[myroot]["vartree"].settings
13232 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
13233 mtimedb = portage.MtimeDB(mtimedbfile)
13235 return settings, trees, mtimedb
13237 def adjust_config(myopts, settings):
13238 """Make emerge specific adjustments to the config."""
13240 # To enhance usability, make some vars case insensitive by forcing them to
13242 for myvar in ("AUTOCLEAN", "NOCOLOR"):
13243 if myvar in settings:
13244 settings[myvar] = settings[myvar].lower()
13245 settings.backup_changes(myvar)
13248 # Kill noauto as it will break merges otherwise.
13249 if "noauto" in settings.features:
13250 while "noauto" in settings.features:
13251 settings.features.remove("noauto")
13252 settings["FEATURES"] = " ".join(settings.features)
13253 settings.backup_changes("FEATURES")
13257 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
13258 except ValueError, e:
13259 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13260 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
13261 settings["CLEAN_DELAY"], noiselevel=-1)
13262 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
13263 settings.backup_changes("CLEAN_DELAY")
13265 EMERGE_WARNING_DELAY = 10
13267 EMERGE_WARNING_DELAY = int(settings.get(
13268 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
13269 except ValueError, e:
13270 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13271 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
13272 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
13273 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
13274 settings.backup_changes("EMERGE_WARNING_DELAY")
13276 if "--quiet" in myopts:
13277 settings["PORTAGE_QUIET"]="1"
13278 settings.backup_changes("PORTAGE_QUIET")
13280 if "--verbose" in myopts:
13281 settings["PORTAGE_VERBOSE"] = "1"
13282 settings.backup_changes("PORTAGE_VERBOSE")
13284 # Set so that configs will be merged regardless of remembered status
13285 if ("--noconfmem" in myopts):
13286 settings["NOCONFMEM"]="1"
13287 settings.backup_changes("NOCONFMEM")
13289 # Set various debug markers... They should be merged somehow.
13292 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
13293 if PORTAGE_DEBUG not in (0, 1):
13294 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
13295 PORTAGE_DEBUG, noiselevel=-1)
13296 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
13299 except ValueError, e:
13300 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13301 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
13302 settings["PORTAGE_DEBUG"], noiselevel=-1)
13304 if "--debug" in myopts:
13306 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
13307 settings.backup_changes("PORTAGE_DEBUG")
13309 if settings.get("NOCOLOR") not in ("yes","true"):
13310 portage.output.havecolor = 1
13312 """The explicit --color < y | n > option overrides the NOCOLOR environment
13313 variable and stdout auto-detection."""
13314 if "--color" in myopts:
13315 if "y" == myopts["--color"]:
13316 portage.output.havecolor = 1
13317 settings["NOCOLOR"] = "false"
13319 portage.output.havecolor = 0
13320 settings["NOCOLOR"] = "true"
13321 settings.backup_changes("NOCOLOR")
13322 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
13323 portage.output.havecolor = 0
13324 settings["NOCOLOR"] = "true"
13325 settings.backup_changes("NOCOLOR")
13327 def apply_priorities(settings):
13331 def nice(settings):
13333 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
13334 except (OSError, ValueError), e:
13335 out = portage.output.EOutput()
13336 out.eerror("Failed to change nice value to '%s'" % \
13337 settings["PORTAGE_NICENESS"])
13338 out.eerror("%s\n" % str(e))
13340 def ionice(settings):
13342 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
13344 ionice_cmd = shlex.split(ionice_cmd)
13348 from portage.util import varexpand
13349 variables = {"PID" : str(os.getpid())}
13350 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
13353 rval = portage.process.spawn(cmd, env=os.environ)
13354 except portage.exception.CommandNotFound:
13355 # The OS kernel probably doesn't support ionice,
13356 # so return silently.
13359 if rval != os.EX_OK:
13360 out = portage.output.EOutput()
13361 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
13362 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
13364 def display_missing_pkg_set(root_config, set_name):
13367 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
13368 "The following sets exist:") % \
13369 colorize("INFORM", set_name))
13372 for s in sorted(root_config.sets):
13373 msg.append(" %s" % s)
13376 writemsg_level("".join("%s\n" % l for l in msg),
13377 level=logging.ERROR, noiselevel=-1)
13379 def expand_set_arguments(myfiles, myaction, root_config):
13381 if myaction != "search":
13387 if x[:1] == SETPREFIX:
13389 msg.append("'%s' is not a valid package atom." % (x,))
13390 msg.append("Please check ebuild(5) for full details.")
13391 writemsg_level("".join("!!! %s\n" % line for line in msg),
13392 level=logging.ERROR, noiselevel=-1)
13393 return (myfiles, 1)
13394 elif x == "system":
13399 if myaction is not None:
13401 multiple_actions("system", myaction)
13402 return (myfiles, 1)
13404 multiple_actions("world", myaction)
13405 return (myfiles, 1)
13407 if system and world:
13408 multiple_actions("system", "world")
13409 return (myfiles, 1)
13411 return (myfiles, os.EX_OK)
13413 def repo_name_check(trees):
13414 missing_repo_names = set()
13415 for root, root_trees in trees.iteritems():
13416 if "porttree" in root_trees:
13417 portdb = root_trees["porttree"].dbapi
13418 missing_repo_names.update(portdb.porttrees)
13419 repos = portdb.getRepositories()
13421 missing_repo_names.discard(portdb.getRepositoryPath(r))
13423 if missing_repo_names:
13425 msg.append("WARNING: One or more repositories " + \
13426 "have missing repo_name entries:")
13428 for p in missing_repo_names:
13429 msg.append("\t%s/profiles/repo_name" % (p,))
13431 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
13432 "should be a plain text file containing a unique " + \
13433 "name for the repository on the first line.", 70))
13434 writemsg_level("".join("%s\n" % l for l in msg),
13435 level=logging.WARNING, noiselevel=-1)
13437 return bool(missing_repo_names)
13439 def config_protect_check(trees):
13440 for root, root_trees in trees.iteritems():
13441 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
13442 msg = "!!! CONFIG_PROTECT is empty"
13444 msg += " for '%s'" % root
13445 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
13447 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
13449 if "--quiet" in myopts:
13450 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
13451 print "!!! one of the following fully-qualified ebuild names instead:\n"
13452 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
13453 print " " + colorize("INFORM", cp)
13456 s = search(root_config, spinner, "--searchdesc" in myopts,
13457 "--quiet" not in myopts, "--usepkg" in myopts,
13458 "--usepkgonly" in myopts)
13459 null_cp = portage.dep_getkey(insert_category_into_atom(
13461 cat, atom_pn = portage.catsplit(null_cp)
13462 s.searchkey = atom_pn
13463 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
13466 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
13467 print "!!! one of the above fully-qualified ebuild names instead.\n"
13469 def profile_check(trees, myaction, myopts):
13470 if myaction in ("info", "sync"):
13472 elif "--version" in myopts or "--help" in myopts:
13474 for root, root_trees in trees.iteritems():
13475 if root_trees["root_config"].settings.profiles:
13477 # generate some profile related warning messages
13478 validate_ebuild_environment(trees)
13479 msg = "If you have just changed your profile configuration, you " + \
13480 "should revert back to the previous configuration. Due to " + \
13481 "your current profile being invalid, allowed actions are " + \
13482 "limited to --help, --info, --sync, and --version."
13483 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
13484 level=logging.ERROR, noiselevel=-1)
13489 global portage # NFC why this is necessary now - genone
13490 portage._disable_legacy_globals()
13491 # Disable color until we're sure that it should be enabled (after
13492 # EMERGE_DEFAULT_OPTS has been parsed).
13493 portage.output.havecolor = 0
13494 # This first pass is just for options that need to be known as early as
13495 # possible, such as --config-root. They will be parsed again later,
13496 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
13497 # the value of --config-root).
13498 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
13499 if "--debug" in myopts:
13500 os.environ["PORTAGE_DEBUG"] = "1"
13501 if "--config-root" in myopts:
13502 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
13504 # Portage needs to ensure a sane umask for the files it creates.
13506 settings, trees, mtimedb = load_emerge_config()
13507 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13508 rval = profile_check(trees, myaction, myopts)
13509 if rval != os.EX_OK:
13512 if portage._global_updates(trees, mtimedb["updates"]):
13514 # Reload the whole config from scratch.
13515 settings, trees, mtimedb = load_emerge_config(trees=trees)
13516 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13518 xterm_titles = "notitles" not in settings.features
13521 if "--ignore-default-opts" not in myopts:
13522 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
13523 tmpcmdline.extend(sys.argv[1:])
13524 myaction, myopts, myfiles = parse_opts(tmpcmdline)
13526 if "--digest" in myopts:
13527 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
13528 # Reload the whole config from scratch so that the portdbapi internal
13529 # config is updated with new FEATURES.
13530 settings, trees, mtimedb = load_emerge_config(trees=trees)
13531 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13533 for myroot in trees:
13534 mysettings = trees[myroot]["vartree"].settings
13535 mysettings.unlock()
13536 adjust_config(myopts, mysettings)
13537 mysettings["PORTAGE_COUNTER_HASH"] = \
13538 trees[myroot]["vartree"].dbapi._counter_hash()
13539 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
13541 del myroot, mysettings
13543 apply_priorities(settings)
13545 spinner = stdout_spinner()
13546 if "candy" in settings.features:
13547 spinner.update = spinner.update_scroll
13549 if "--quiet" not in myopts:
13550 portage.deprecated_profile_check()
13551 repo_name_check(trees)
13552 config_protect_check(trees)
13554 eclasses_overridden = {}
13555 for mytrees in trees.itervalues():
13556 mydb = mytrees["porttree"].dbapi
13557 # Freeze the portdbapi for performance (memoize all xmatch results).
13559 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
13562 if eclasses_overridden and \
13563 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
13564 prefix = bad(" * ")
13565 if len(eclasses_overridden) == 1:
13566 writemsg(prefix + "Overlay eclass overrides " + \
13567 "eclass from PORTDIR:\n", noiselevel=-1)
13569 writemsg(prefix + "Overlay eclasses override " + \
13570 "eclasses from PORTDIR:\n", noiselevel=-1)
13571 writemsg(prefix + "\n", noiselevel=-1)
13572 for eclass_name in sorted(eclasses_overridden):
13573 writemsg(prefix + " '%s/%s.eclass'\n" % \
13574 (eclasses_overridden[eclass_name], eclass_name),
13576 writemsg(prefix + "\n", noiselevel=-1)
13577 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
13578 "because it will trigger invalidation of cached ebuild metadata " + \
13579 "that is distributed with the portage tree. If you must " + \
13580 "override eclasses from PORTDIR then you are advised to add " + \
13581 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
13582 "`emerge --regen` after each time that you run `emerge --sync`. " + \
13583 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
13584 "you would like to disable this warning."
13585 from textwrap import wrap
13586 for line in wrap(msg, 72):
13587 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
13589 if "moo" in myfiles:
13592 Larry loves Gentoo (""" + platform.system() + """)
13594 _______________________
13595 < Have you mooed today? >
13596 -----------------------
13606 ext = os.path.splitext(x)[1]
13607 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
13608 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
13611 root_config = trees[settings["ROOT"]]["root_config"]
13612 if myaction == "list-sets":
13613 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
13617 # only expand sets for actions taking package arguments
13618 oldargs = myfiles[:]
13619 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
13620 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
13621 if retval != os.EX_OK:
13624 # Need to handle empty sets specially, otherwise emerge will react
13625 # with the help message for empty argument lists
13626 if oldargs and not myfiles:
13627 print "emerge: no targets left after set expansion"
13630 if ("--tree" in myopts) and ("--columns" in myopts):
13631 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
13634 if ("--quiet" in myopts):
13635 spinner.update = spinner.update_quiet
13636 portage.util.noiselimit = -1
13638 # Always create packages if FEATURES=buildpkg
13639 # Imply --buildpkg if --buildpkgonly
13640 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
13641 if "--buildpkg" not in myopts:
13642 myopts["--buildpkg"] = True
13644 # Also allow -S to invoke search action (-sS)
13645 if ("--searchdesc" in myopts):
13646 if myaction and myaction != "search":
13647 myfiles.append(myaction)
13648 if "--search" not in myopts:
13649 myopts["--search"] = True
13650 myaction = "search"
13652 # Always try and fetch binary packages if FEATURES=getbinpkg
13653 if ("getbinpkg" in settings.features):
13654 myopts["--getbinpkg"] = True
13656 if "--buildpkgonly" in myopts:
13657 # --buildpkgonly will not merge anything, so
13658 # it cancels all binary package options.
13659 for opt in ("--getbinpkg", "--getbinpkgonly",
13660 "--usepkg", "--usepkgonly"):
13661 myopts.pop(opt, None)
13663 if "--fetch-all-uri" in myopts:
13664 myopts["--fetchonly"] = True
13666 if "--skipfirst" in myopts and "--resume" not in myopts:
13667 myopts["--resume"] = True
13669 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
13670 myopts["--usepkgonly"] = True
13672 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
13673 myopts["--getbinpkg"] = True
13675 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
13676 myopts["--usepkg"] = True
13678 # Also allow -K to apply --usepkg/-k
13679 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
13680 myopts["--usepkg"] = True
13682 # Allow -p to remove --ask
13683 if ("--pretend" in myopts) and ("--ask" in myopts):
13684 print ">>> --pretend disables --ask... removing --ask from options."
13685 del myopts["--ask"]
13687 # forbid --ask when not in a terminal
13688 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
13689 if ("--ask" in myopts) and (not sys.stdin.isatty()):
13690 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
13694 if settings.get("PORTAGE_DEBUG", "") == "1":
13695 spinner.update = spinner.update_quiet
13697 if "python-trace" in settings.features:
13698 import portage.debug
13699 portage.debug.set_trace(True)
13701 if not ("--quiet" in myopts):
13702 if not sys.stdout.isatty() or ("--nospinner" in myopts):
13703 spinner.update = spinner.update_basic
13705 if "--version" in myopts:
13706 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13707 settings.profile_path, settings["CHOST"],
13708 trees[settings["ROOT"]]["vartree"].dbapi)
13710 elif "--help" in myopts:
13711 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13714 if "--debug" in myopts:
13715 print "myaction", myaction
13716 print "myopts", myopts
13718 if not myaction and not myfiles and "--resume" not in myopts:
13719 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13722 pretend = "--pretend" in myopts
13723 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13724 buildpkgonly = "--buildpkgonly" in myopts
13726 # check if root user is the current user for the actions where emerge needs this
13727 if portage.secpass < 2:
13728 # We've already allowed "--version" and "--help" above.
13729 if "--pretend" not in myopts and myaction not in ("search","info"):
13730 need_superuser = not \
13732 (buildpkgonly and secpass >= 1) or \
13733 myaction in ("metadata", "regen") or \
13734 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
13735 if portage.secpass < 1 or \
13738 access_desc = "superuser"
13740 access_desc = "portage group"
13741 # Always show portage_group_warning() when only portage group
13742 # access is required but the user is not in the portage group.
13743 from portage.data import portage_group_warning
13744 if "--ask" in myopts:
13745 myopts["--pretend"] = True
13746 del myopts["--ask"]
13747 print ("%s access is required... " + \
13748 "adding --pretend to options.\n") % access_desc
13749 if portage.secpass < 1 and not need_superuser:
13750 portage_group_warning()
13752 sys.stderr.write(("emerge: %s access is " + \
13753 "required.\n\n") % access_desc)
13754 if portage.secpass < 1 and not need_superuser:
13755 portage_group_warning()
13758 disable_emergelog = False
13759 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
13761 disable_emergelog = True
13763 if myaction in ("search", "info"):
13764 disable_emergelog = True
13765 if disable_emergelog:
13766 """ Disable emergelog for everything except build or unmerge
13767 operations. This helps minimize parallel emerge.log entries that can
13768 confuse log parsers. We especially want it disabled during
13769 parallel-fetch, which uses --resume --fetchonly."""
13771 def emergelog(*pargs, **kargs):
13774 if not "--pretend" in myopts:
13775 emergelog(xterm_titles, "Started emerge on: "+\
13776 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
13779 myelogstr=" ".join(myopts)
13781 myelogstr+=" "+myaction
13783 myelogstr += " " + " ".join(oldargs)
13784 emergelog(xterm_titles, " *** emerge " + myelogstr)
13787 def emergeexitsig(signum, frame):
13788 signal.signal(signal.SIGINT, signal.SIG_IGN)
13789 signal.signal(signal.SIGTERM, signal.SIG_IGN)
13790 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
13791 sys.exit(100+signum)
13792 signal.signal(signal.SIGINT, emergeexitsig)
13793 signal.signal(signal.SIGTERM, emergeexitsig)
13796 """This gets out final log message in before we quit."""
13797 if "--pretend" not in myopts:
13798 emergelog(xterm_titles, " *** terminating.")
13799 if "notitles" not in settings.features:
13801 portage.atexit_register(emergeexit)
13803 if myaction in ("config", "metadata", "regen", "sync"):
13804 if "--pretend" in myopts:
13805 sys.stderr.write(("emerge: The '%s' action does " + \
13806 "not support '--pretend'.\n") % myaction)
13809 if "sync" == myaction:
13810 return action_sync(settings, trees, mtimedb, myopts, myaction)
13811 elif "metadata" == myaction:
13812 action_metadata(settings, portdb, myopts)
13813 elif myaction=="regen":
13814 validate_ebuild_environment(trees)
13815 action_regen(settings, portdb, myopts.get("--jobs"),
13816 myopts.get("--load-average"))
13818 elif "config"==myaction:
13819 validate_ebuild_environment(trees)
13820 action_config(settings, trees, myopts, myfiles)
13823 elif "search"==myaction:
13824 validate_ebuild_environment(trees)
13825 action_search(trees[settings["ROOT"]]["root_config"],
13826 myopts, myfiles, spinner)
13827 elif myaction in ("clean", "unmerge") or \
13828 (myaction == "prune" and "--nodeps" in myopts):
13829 validate_ebuild_environment(trees)
13831 # Ensure atoms are valid before calling unmerge().
13832 # For backward compat, leading '=' is not required.
13834 if is_valid_package_atom(x) or \
13835 is_valid_package_atom("=" + x):
13838 msg.append("'%s' is not a valid package atom." % (x,))
13839 msg.append("Please check ebuild(5) for full details.")
13840 writemsg_level("".join("!!! %s\n" % line for line in msg),
13841 level=logging.ERROR, noiselevel=-1)
13844 # When given a list of atoms, unmerge
13845 # them in the order given.
13846 ordered = myaction == "unmerge"
13847 if 1 == unmerge(root_config, myopts, myaction, myfiles,
13848 mtimedb["ldpath"], ordered=ordered):
13849 if not (buildpkgonly or fetchonly or pretend):
13850 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
13852 elif myaction in ("depclean", "info", "prune"):
13854 # Ensure atoms are valid before calling unmerge().
13855 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13858 if is_valid_package_atom(x):
13860 valid_atoms.append(
13861 portage.dep_expand(x, mydb=vardb, settings=settings))
13862 except portage.exception.AmbiguousPackageName, e:
13863 msg = "The short ebuild name \"" + x + \
13864 "\" is ambiguous. Please specify " + \
13865 "one of the following " + \
13866 "fully-qualified ebuild names instead:"
13867 for line in textwrap.wrap(msg, 70):
13868 writemsg_level("!!! %s\n" % (line,),
13869 level=logging.ERROR, noiselevel=-1)
13871 writemsg_level(" %s\n" % colorize("INFORM", i),
13872 level=logging.ERROR, noiselevel=-1)
13873 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13877 msg.append("'%s' is not a valid package atom." % (x,))
13878 msg.append("Please check ebuild(5) for full details.")
13879 writemsg_level("".join("!!! %s\n" % line for line in msg),
13880 level=logging.ERROR, noiselevel=-1)
13883 if myaction == "info":
13884 return action_info(settings, trees, myopts, valid_atoms)
13886 validate_ebuild_environment(trees)
13887 action_depclean(settings, trees, mtimedb["ldpath"],
13888 myopts, myaction, valid_atoms, spinner)
13889 if not (buildpkgonly or fetchonly or pretend):
13890 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
13891 # "update", "system", or just process files:
13893 validate_ebuild_environment(trees)
13894 if "--pretend" not in myopts:
13895 display_news_notification(root_config, myopts)
13896 retval = action_build(settings, trees, mtimedb,
13897 myopts, myaction, myfiles, spinner)
13898 root_config = trees[settings["ROOT"]]["root_config"]
13899 post_emerge(root_config, myopts, mtimedb, retval)