2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 # This block ensures that ^C interrupts are handled quietly.
11 def exithandler(signum,frame):
12 signal.signal(signal.SIGINT, signal.SIG_IGN)
13 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16 signal.signal(signal.SIGINT, exithandler)
17 signal.signal(signal.SIGTERM, exithandler)
18 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
20 except KeyboardInterrupt:
24 from collections import deque
41 from os import path as osp
42 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
45 from portage import digraph
46 from portage.const import NEWS_LIB_PATH
49 import portage.xpak, commands, errno, re, socket, time, types
50 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
51 nc_len, red, teal, turquoise, xtermTitle, \
52 xtermTitleReset, yellow
53 from portage.output import create_color_func
54 good = create_color_func("GOOD")
55 bad = create_color_func("BAD")
56 # white looks bad on terminals with white background
57 from portage.output import bold as white
61 portage.dep._dep_check_strict = True
64 import portage.exception
65 from portage.data import secpass
66 from portage.elog.messages import eerror
67 from portage.util import normalize_path as normpath
68 from portage.util import writemsg, writemsg_level
69 from portage._sets import load_default_config, SETPREFIX
70 from portage._sets.base import InternalPackageSet
72 from itertools import chain, izip
73 from UserDict import DictMixin
76 import cPickle as pickle
81 import cStringIO as StringIO
85 class stdout_spinner(object):
87 "Gentoo Rocks ("+platform.system()+")",
88 "Thank you for using Gentoo. :)",
89 "Are you actually trying to read this?",
90 "How many times have you stared at this?",
91 "We are generating the cache right now",
92 "You are paying too much attention.",
93 "A theory is better than its explanation.",
94 "Phasers locked on target, Captain.",
95 "Thrashing is just virtual crashing.",
96 "To be is to program.",
97 "Real Users hate Real Programmers.",
98 "When all else fails, read the instructions.",
99 "Functionality breeds Contempt.",
100 "The future lies ahead.",
101 "3.1415926535897932384626433832795028841971694",
102 "Sometimes insanity is the only alternative.",
103 "Inaccuracy saves a world of explanation.",
106 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
110 self.update = self.update_twirl
111 self.scroll_sequence = self.scroll_msgs[
112 int(time.time() * 100) % len(self.scroll_msgs)]
114 self.min_display_latency = 0.05
116 def _return_early(self):
118 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
119 each update* method should return without doing any output when this
122 cur_time = time.time()
123 if cur_time - self.last_update < self.min_display_latency:
125 self.last_update = cur_time
128 def update_basic(self):
129 self.spinpos = (self.spinpos + 1) % 500
130 if self._return_early():
132 if (self.spinpos % 100) == 0:
133 if self.spinpos == 0:
134 sys.stdout.write(". ")
136 sys.stdout.write(".")
139 def update_scroll(self):
140 if self._return_early():
142 if(self.spinpos >= len(self.scroll_sequence)):
143 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
144 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
146 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
148 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
150 def update_twirl(self):
151 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
152 if self._return_early():
154 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
157 def update_quiet(self):
160 def userquery(prompt, responses=None, colours=None):
161 """Displays a prompt and a set of responses, then waits for a response
162 which is checked against the responses and the first to match is
163 returned. An empty response will match the first value in responses. The
164 input buffer is *not* cleared prior to the prompt!
167 responses: a List of Strings.
168 colours: a List of Functions taking and returning a String, used to
169 process the responses for display. Typically these will be functions
170 like red() but could be e.g. lambda x: "DisplayString".
171 If responses is omitted, defaults to ["Yes", "No"], [green, red].
172 If only colours is omitted, defaults to [bold, ...].
174 Returns a member of the List responses. (If called without optional
175 arguments, returns "Yes" or "No".)
176 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
178 if responses is None:
179 responses = ["Yes", "No"]
181 create_color_func("PROMPT_CHOICE_DEFAULT"),
182 create_color_func("PROMPT_CHOICE_OTHER")
184 elif colours is None:
186 colours=(colours*len(responses))[:len(responses)]
190 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
191 for key in responses:
192 # An empty response will match the first value in responses.
193 if response.upper()==key[:len(response)].upper():
195 print "Sorry, response '%s' not understood." % response,
196 except (EOFError, KeyboardInterrupt):
200 actions = frozenset([
201 "clean", "config", "depclean",
202 "info", "list-sets", "metadata",
203 "prune", "regen", "search",
207 "--ask", "--alphabetical",
208 "--buildpkg", "--buildpkgonly",
209 "--changelog", "--columns",
214 "--fetchonly", "--fetch-all-uri",
215 "--getbinpkg", "--getbinpkgonly",
216 "--help", "--ignore-default-opts",
219 "--newuse", "--nocolor",
220 "--nodeps", "--noreplace",
221 "--nospinner", "--oneshot",
222 "--onlydeps", "--pretend",
223 "--quiet", "--resume",
224 "--searchdesc", "--selective",
228 "--usepkg", "--usepkgonly",
229 "--verbose", "--version"
235 "b":"--buildpkg", "B":"--buildpkgonly",
236 "c":"--clean", "C":"--unmerge",
237 "d":"--debug", "D":"--deep",
239 "f":"--fetchonly", "F":"--fetch-all-uri",
240 "g":"--getbinpkg", "G":"--getbinpkgonly",
242 "k":"--usepkg", "K":"--usepkgonly",
244 "n":"--noreplace", "N":"--newuse",
245 "o":"--onlydeps", "O":"--nodeps",
246 "p":"--pretend", "P":"--prune",
248 "s":"--search", "S":"--searchdesc",
251 "v":"--verbose", "V":"--version"
254 def emergelog(xterm_titles, mystr, short_msg=None):
255 if xterm_titles and short_msg:
256 if "HOSTNAME" in os.environ:
257 short_msg = os.environ["HOSTNAME"]+": "+short_msg
258 xtermTitle(short_msg)
260 file_path = "/var/log/emerge.log"
261 mylogfile = open(file_path, "a")
262 portage.util.apply_secpass_permissions(file_path,
263 uid=portage.portage_uid, gid=portage.portage_gid,
267 mylock = portage.locks.lockfile(mylogfile)
268 # seek because we may have gotten held up by the lock.
269 # if so, we may not be positioned at the end of the file.
271 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
275 portage.locks.unlockfile(mylock)
277 except (IOError,OSError,portage.exception.PortageException), e:
279 print >> sys.stderr, "emergelog():",e
281 def countdown(secs=5, doing="Starting"):
283 print ">>> Waiting",secs,"seconds before starting..."
284 print ">>> (Control-C to abort)...\n"+doing+" in: ",
288 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
293 # formats a size given in bytes nicely
294 def format_size(mysize):
295 if type(mysize) not in [types.IntType,types.LongType]:
297 if 0 != mysize % 1024:
298 # Always round up to the next kB so that it doesn't show 0 kB when
299 # some small file still needs to be fetched.
300 mysize += 1024 - mysize % 1024
301 mystr=str(mysize/1024)
305 mystr=mystr[:mycount]+","+mystr[mycount:]
309 def getgccversion(chost):
312 return: the current in-use gcc version
315 gcc_ver_command = 'gcc -dumpversion'
316 gcc_ver_prefix = 'gcc-'
318 gcc_not_found_error = red(
319 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
320 "!!! to update the environment of this terminal and possibly\n" +
321 "!!! other terminals also.\n"
324 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
325 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
326 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
328 mystatus, myoutput = commands.getstatusoutput(
329 chost + "-" + gcc_ver_command)
330 if mystatus == os.EX_OK:
331 return gcc_ver_prefix + myoutput
333 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
334 if mystatus == os.EX_OK:
335 return gcc_ver_prefix + myoutput
337 portage.writemsg(gcc_not_found_error, noiselevel=-1)
338 return "[unavailable]"
340 def getportageversion(portdir, target_root, profile, chost, vardb):
341 profilever = "unavailable"
343 realpath = os.path.realpath(profile)
344 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
345 if realpath.startswith(basepath):
346 profilever = realpath[1 + len(basepath):]
349 profilever = "!" + os.readlink(profile)
352 del realpath, basepath
355 libclist = vardb.match("virtual/libc")
356 libclist += vardb.match("virtual/glibc")
357 libclist = portage.util.unique_array(libclist)
359 xs=portage.catpkgsplit(x)
361 libcver+=","+"-".join(xs[1:])
363 libcver="-".join(xs[1:])
365 libcver="unavailable"
367 gccver = getgccversion(chost)
368 unameout=platform.release()+" "+platform.machine()
370 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
372 def create_depgraph_params(myopts, myaction):
373 #configure emerge engine parameters
375 # self: include _this_ package regardless of if it is merged.
376 # selective: exclude the package if it is merged
377 # recurse: go into the dependencies
378 # deep: go into the dependencies of already merged packages
379 # empty: pretend nothing is merged
380 # complete: completely account for all known dependencies
381 # remove: build graph for use in removing packages
382 myparams = set(["recurse"])
384 if myaction == "remove":
385 myparams.add("remove")
386 myparams.add("complete")
389 if "--update" in myopts or \
390 "--newuse" in myopts or \
391 "--reinstall" in myopts or \
392 "--noreplace" in myopts:
393 myparams.add("selective")
394 if "--emptytree" in myopts:
395 myparams.add("empty")
396 myparams.discard("selective")
397 if "--nodeps" in myopts:
398 myparams.discard("recurse")
399 if "--deep" in myopts:
401 if "--complete-graph" in myopts:
402 myparams.add("complete")
405 # search functionality
406 class search(object):
417 def __init__(self, root_config, spinner, searchdesc,
418 verbose, usepkg, usepkgonly):
419 """Searches the available and installed packages for the supplied search key.
420 The list of available and installed packages is created at object instantiation.
421 This makes successive searches faster."""
422 self.settings = root_config.settings
423 self.vartree = root_config.trees["vartree"]
424 self.spinner = spinner
425 self.verbose = verbose
426 self.searchdesc = searchdesc
427 self.root_config = root_config
428 self.setconfig = root_config.setconfig
432 self.portdb = fake_portdb
433 for attrib in ("aux_get", "cp_all",
434 "xmatch", "findname", "getFetchMap"):
435 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
439 portdb = root_config.trees["porttree"].dbapi
440 bindb = root_config.trees["bintree"].dbapi
441 vardb = root_config.trees["vartree"].dbapi
443 if not usepkgonly and portdb._have_root_eclass_dir:
444 self._dbs.append(portdb)
446 if (usepkg or usepkgonly) and bindb.cp_all():
447 self._dbs.append(bindb)
449 self._dbs.append(vardb)
450 self._portdb = portdb
455 cp_all.update(db.cp_all())
456 return list(sorted(cp_all))
458 def _aux_get(self, *args, **kwargs):
461 return db.aux_get(*args, **kwargs)
466 def _findname(self, *args, **kwargs):
468 if db is not self._portdb:
469 # We don't want findname to return anything
470 # unless it's an ebuild in a portage tree.
471 # Otherwise, it's already built and we don't
474 func = getattr(db, "findname", None)
476 value = func(*args, **kwargs)
481 def _getFetchMap(self, *args, **kwargs):
483 func = getattr(db, "getFetchMap", None)
485 value = func(*args, **kwargs)
490 def _visible(self, db, cpv, metadata):
491 installed = db is self.vartree.dbapi
492 built = installed or db is not self._portdb
495 pkg_type = "installed"
498 return visible(self.settings,
499 Package(type_name=pkg_type, root_config=self.root_config,
500 cpv=cpv, built=built, installed=installed, metadata=metadata))
502 def _xmatch(self, level, atom):
504 This method does not expand old-style virtuals because it
505 is restricted to returning matches for a single ${CATEGORY}/${PN}
506 and old-style virual matches unreliable for that when querying
507 multiple package databases. If necessary, old-style virtuals
508 can be performed on atoms prior to calling this method.
510 cp = portage.dep_getkey(atom)
511 if level == "match-all":
514 if hasattr(db, "xmatch"):
515 matches.update(db.xmatch(level, atom))
517 matches.update(db.match(atom))
518 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
519 db._cpv_sort_ascending(result)
520 elif level == "match-visible":
523 if hasattr(db, "xmatch"):
524 matches.update(db.xmatch(level, atom))
526 db_keys = list(db._aux_cache_keys)
527 for cpv in db.match(atom):
528 metadata = izip(db_keys,
529 db.aux_get(cpv, db_keys))
530 if not self._visible(db, cpv, metadata):
533 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
534 db._cpv_sort_ascending(result)
535 elif level == "bestmatch-visible":
538 if hasattr(db, "xmatch"):
539 cpv = db.xmatch("bestmatch-visible", atom)
540 if not cpv or portage.cpv_getkey(cpv) != cp:
542 if not result or cpv == portage.best([cpv, result]):
545 db_keys = list(db._aux_cache_keys)
546 # break out of this loop with highest visible
547 # match, checked in descending order
548 for cpv in reversed(db.match(atom)):
549 if portage.cpv_getkey(cpv) != cp:
551 metadata = izip(db_keys,
552 db.aux_get(cpv, db_keys))
553 if not self._visible(db, cpv, metadata):
555 if not result or cpv == portage.best([cpv, result]):
559 raise NotImplementedError(level)
562 def execute(self,searchkey):
563 """Performs the search for the supplied search key"""
565 self.searchkey=searchkey
566 self.packagematches = []
569 self.matches = {"pkg":[], "desc":[]}
572 self.matches = {"pkg":[]}
573 print "Searching... ",
576 if self.searchkey.startswith('%'):
578 self.searchkey = self.searchkey[1:]
579 if self.searchkey.startswith('@'):
581 self.searchkey = self.searchkey[1:]
583 self.searchre=re.compile(self.searchkey,re.I)
585 self.searchre=re.compile(re.escape(self.searchkey), re.I)
586 for package in self.portdb.cp_all():
587 self.spinner.update()
590 match_string = package[:]
592 match_string = package.split("/")[-1]
595 if self.searchre.search(match_string):
596 if not self.portdb.xmatch("match-visible", package):
598 self.matches["pkg"].append([package,masked])
599 elif self.searchdesc: # DESCRIPTION searching
600 full_package = self.portdb.xmatch("bestmatch-visible", package)
602 #no match found; we don't want to query description
603 full_package = portage.best(
604 self.portdb.xmatch("match-all", package))
610 full_desc = self.portdb.aux_get(
611 full_package, ["DESCRIPTION"])[0]
613 print "emerge: search: aux_get() failed, skipping"
615 if self.searchre.search(full_desc):
616 self.matches["desc"].append([full_package,masked])
619 for mtype in self.matches:
620 self.matches[mtype].sort()
621 self.mlen += len(self.matches[mtype])
624 """Outputs the results of the search."""
625 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
626 print "[ Applications found : "+white(str(self.mlen))+" ]"
628 vardb = self.vartree.dbapi
629 for mtype in self.matches:
630 for match,masked in self.matches[mtype]:
634 full_package = self.portdb.xmatch(
635 "bestmatch-visible", match)
637 #no match found; we don't want to query description
639 full_package = portage.best(
640 self.portdb.xmatch("match-all",match))
641 elif mtype == "desc":
643 match = portage.cpv_getkey(match)
646 desc, homepage, license = self.portdb.aux_get(
647 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
649 print "emerge: search: aux_get() failed, skipping"
652 print green("*")+" "+white(match)+" "+red("[ Masked ]")
654 print green("*")+" "+white(match)
655 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
659 mycat = match.split("/")[0]
660 mypkg = match.split("/")[1]
661 mycpv = match + "-" + myversion
662 myebuild = self.portdb.findname(mycpv)
664 pkgdir = os.path.dirname(myebuild)
665 from portage import manifest
666 mf = manifest.Manifest(
667 pkgdir, self.settings["DISTDIR"])
669 uri_map = self.portdb.getFetchMap(mycpv)
670 except portage.exception.InvalidDependString, e:
671 file_size_str = "Unknown (%s)" % (e,)
675 mysum[0] = mf.getDistfilesSize(uri_map)
677 file_size_str = "Unknown (missing " + \
678 "digest for %s)" % (e,)
683 if db is not vardb and \
684 db.cpv_exists(mycpv):
686 if not myebuild and hasattr(db, "bintree"):
687 myebuild = db.bintree.getname(mycpv)
689 mysum[0] = os.stat(myebuild).st_size
694 if myebuild and file_size_str is None:
695 mystr = str(mysum[0] / 1024)
699 mystr = mystr[:mycount] + "," + mystr[mycount:]
700 file_size_str = mystr + " kB"
704 print " ", darkgreen("Latest version available:"),myversion
705 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
708 (darkgreen("Size of files:"), file_size_str)
709 print " ", darkgreen("Homepage:")+" ",homepage
710 print " ", darkgreen("Description:")+" ",desc
711 print " ", darkgreen("License:")+" ",license
717 def getInstallationStatus(self,package):
718 installed_package = self.vartree.dep_bestmatch(package)
720 version = self.getVersion(installed_package,search.VERSION_RELEASE)
722 result = darkgreen("Latest version installed:")+" "+version
724 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
727 def getVersion(self,full_package,detail):
728 if len(full_package) > 1:
729 package_parts = portage.catpkgsplit(full_package)
730 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
731 result = package_parts[2]+ "-" + package_parts[3]
733 result = package_parts[2]
738 class RootConfig(object):
739 """This is used internally by depgraph to track information about a
743 "ebuild" : "porttree",
744 "binary" : "bintree",
745 "installed" : "vartree"
749 for k, v in pkg_tree_map.iteritems():
752 def __init__(self, settings, trees, setconfig):
754 self.settings = settings
755 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
756 self.root = self.settings["ROOT"]
757 self.setconfig = setconfig
758 self.sets = self.setconfig.getSets()
759 self.visible_pkgs = PackageVirtualDbapi(self.settings)
761 def create_world_atom(pkg, args_set, root_config):
762 """Create a new atom for the world file if one does not exist. If the
763 argument atom is precise enough to identify a specific slot then a slot
764 atom will be returned. Atoms that are in the system set may also be stored
765 in world since system atoms can only match one slot while world atoms can
766 be greedy with respect to slots. Unslotted system packages will not be
769 arg_atom = args_set.findAtomForPackage(pkg)
772 cp = portage.dep_getkey(arg_atom)
774 sets = root_config.sets
775 portdb = root_config.trees["porttree"].dbapi
776 vardb = root_config.trees["vartree"].dbapi
777 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
778 for cpv in portdb.match(cp))
779 slotted = len(available_slots) > 1 or \
780 (len(available_slots) == 1 and "0" not in available_slots)
782 # check the vdb in case this is multislot
783 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
784 for cpv in vardb.match(cp))
785 slotted = len(available_slots) > 1 or \
786 (len(available_slots) == 1 and "0" not in available_slots)
787 if slotted and arg_atom != cp:
788 # If the user gave a specific atom, store it as a
789 # slot atom in the world file.
790 slot_atom = pkg.slot_atom
792 # For USE=multislot, there are a couple of cases to
795 # 1) SLOT="0", but the real SLOT spontaneously changed to some
796 # unknown value, so just record an unslotted atom.
798 # 2) SLOT comes from an installed package and there is no
799 # matching SLOT in the portage tree.
801 # Make sure that the slot atom is available in either the
802 # portdb or the vardb, since otherwise the user certainly
803 # doesn't want the SLOT atom recorded in the world file
804 # (case 1 above). If it's only available in the vardb,
805 # the user may be trying to prevent a USE=multislot
806 # package from being removed by --depclean (case 2 above).
809 if not portdb.match(slot_atom):
810 # SLOT seems to come from an installed multislot package
812 # If there is no installed package matching the SLOT atom,
813 # it probably changed SLOT spontaneously due to USE=multislot,
814 # so just record an unslotted atom.
815 if vardb.match(slot_atom):
816 # Now verify that the argument is precise
817 # enough to identify a specific slot.
818 matches = mydb.match(arg_atom)
819 matched_slots = set()
821 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
822 if len(matched_slots) == 1:
823 new_world_atom = slot_atom
825 if new_world_atom == sets["world"].findAtomForPackage(pkg):
826 # Both atoms would be identical, so there's nothing to add.
829 # Unlike world atoms, system atoms are not greedy for slots, so they
830 # can't be safely excluded from world if they are slotted.
831 system_atom = sets["system"].findAtomForPackage(pkg)
833 if not portage.dep_getkey(system_atom).startswith("virtual/"):
835 # System virtuals aren't safe to exclude from world since they can
836 # match multiple old-style virtuals but only one of them will be
837 # pulled in by update or depclean.
838 providers = portdb.mysettings.getvirtuals().get(
839 portage.dep_getkey(system_atom))
840 if providers and len(providers) == 1 and providers[0] == cp:
842 return new_world_atom
844 def filter_iuse_defaults(iuse):
846 if flag.startswith("+") or flag.startswith("-"):
851 class SlotObject(object):
852 __slots__ = ("__weakref__",)
854 def __init__(self, **kwargs):
855 classes = [self.__class__]
860 classes.extend(c.__bases__)
861 slots = getattr(c, "__slots__", None)
865 myvalue = kwargs.get(myattr, None)
866 setattr(self, myattr, myvalue)
870 Create a new instance and copy all attributes
871 defined from __slots__ (including those from
874 obj = self.__class__()
876 classes = [self.__class__]
881 classes.extend(c.__bases__)
882 slots = getattr(c, "__slots__", None)
886 setattr(obj, myattr, getattr(self, myattr))
890 class AbstractDepPriority(SlotObject):
891 __slots__ = ("buildtime", "runtime", "runtime_post")
893 def __lt__(self, other):
894 return self.__int__() < other
896 def __le__(self, other):
897 return self.__int__() <= other
899 def __eq__(self, other):
900 return self.__int__() == other
902 def __ne__(self, other):
903 return self.__int__() != other
905 def __gt__(self, other):
906 return self.__int__() > other
908 def __ge__(self, other):
909 return self.__int__() >= other
913 return copy.copy(self)
915 class DepPriority(AbstractDepPriority):
917 This class generates an integer priority level based of various
918 attributes of the dependency relationship. Attributes can be assigned
919 at any time and the new integer value will be generated on calls to the
920 __int__() method. Rich comparison operators are supported.
922 The boolean attributes that affect the integer value are "satisfied",
923 "buildtime", "runtime", and "system". Various combinations of
924 attributes lead to the following priority levels:
926 Combination of properties Priority Category
928 not satisfied and buildtime 0 HARD
929 not satisfied and runtime -1 MEDIUM
930 not satisfied and runtime_post -2 MEDIUM_SOFT
931 satisfied and buildtime and rebuild -3 SOFT
932 satisfied and buildtime -4 SOFT
933 satisfied and runtime -5 SOFT
934 satisfied and runtime_post -6 SOFT
935 (none of the above) -6 SOFT
937 Several integer constants are defined for categorization of priority
940 MEDIUM The upper boundary for medium dependencies.
941 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
942 SOFT The upper boundary for soft dependencies.
943 MIN The lower boundary for soft dependencies.
945 __slots__ = ("satisfied", "rebuild")
952 if not self.satisfied:
957 if self.runtime_post:
965 if self.runtime_post:
970 myvalue = self.__int__()
971 if myvalue > self.MEDIUM:
973 if myvalue > self.MEDIUM_SOFT:
975 if myvalue > self.SOFT:
979 class BlockerDepPriority(DepPriority):
984 BlockerDepPriority.instance = BlockerDepPriority()
986 class UnmergeDepPriority(AbstractDepPriority):
987 __slots__ = ("satisfied",)
989 Combination of properties Priority Category
994 (none of the above) -2 SOFT
1004 if self.runtime_post:
1011 myvalue = self.__int__()
1012 if myvalue > self.SOFT:
1016 class FakeVartree(portage.vartree):
1017 """This is implements an in-memory copy of a vartree instance that provides
1018 all the interfaces required for use by the depgraph. The vardb is locked
1019 during the constructor call just long enough to read a copy of the
1020 installed package information. This allows the depgraph to do it's
1021 dependency calculations without holding a lock on the vardb. It also
1022 allows things like vardb global updates to be done in memory so that the
1023 user doesn't necessarily need write access to the vardb in cases where
1024 global updates are necessary (updates are performed when necessary if there
1025 is not a matching ebuild in the tree)."""
1026 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1027 self._root_config = root_config
1028 if pkg_cache is None:
1030 real_vartree = root_config.trees["vartree"]
1031 portdb = root_config.trees["porttree"].dbapi
1032 self.root = real_vartree.root
1033 self.settings = real_vartree.settings
1034 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1035 self._pkg_cache = pkg_cache
1036 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1037 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1039 # At least the parent needs to exist for the lock file.
1040 portage.util.ensure_dirs(vdb_path)
1041 except portage.exception.PortageException:
1045 if acquire_lock and os.access(vdb_path, os.W_OK):
1046 vdb_lock = portage.locks.lockdir(vdb_path)
1047 real_dbapi = real_vartree.dbapi
1049 for cpv in real_dbapi.cpv_all():
1050 cache_key = ("installed", self.root, cpv, "nomerge")
1051 pkg = self._pkg_cache.get(cache_key)
1053 metadata = pkg.metadata
1055 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1056 myslot = metadata["SLOT"]
1057 mycp = portage.dep_getkey(cpv)
1058 myslot_atom = "%s:%s" % (mycp, myslot)
1060 mycounter = long(metadata["COUNTER"])
1063 metadata["COUNTER"] = str(mycounter)
1064 other_counter = slot_counters.get(myslot_atom, None)
1065 if other_counter is not None:
1066 if other_counter > mycounter:
1068 slot_counters[myslot_atom] = mycounter
1070 pkg = Package(built=True, cpv=cpv,
1071 installed=True, metadata=metadata,
1072 root_config=root_config, type_name="installed")
1073 self._pkg_cache[pkg] = pkg
1074 self.dbapi.cpv_inject(pkg)
1075 real_dbapi.flush_cache()
1078 portage.locks.unlockdir(vdb_lock)
1079 # Populate the old-style virtuals using the cached values.
1080 if not self.settings.treeVirtuals:
1081 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1082 portage.getCPFromCPV, self.get_all_provides())
1084 # Intialize variables needed for lazy cache pulls of the live ebuild
1085 # metadata. This ensures that the vardb lock is released ASAP, without
1086 # being delayed in case cache generation is triggered.
1087 self._aux_get = self.dbapi.aux_get
1088 self.dbapi.aux_get = self._aux_get_wrapper
1089 self._match = self.dbapi.match
1090 self.dbapi.match = self._match_wrapper
1091 self._aux_get_history = set()
1092 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1093 self._portdb = portdb
1094 self._global_updates = None
1096 def _match_wrapper(self, cpv, use_cache=1):
1098 Make sure the metadata in Package instances gets updated for any
1099 cpv that is returned from a match() call, since the metadata can
1100 be accessed directly from the Package instance instead of via
1103 matches = self._match(cpv, use_cache=use_cache)
1105 if cpv in self._aux_get_history:
1107 self._aux_get_wrapper(cpv, [])
1110 def _aux_get_wrapper(self, pkg, wants):
1111 if pkg in self._aux_get_history:
1112 return self._aux_get(pkg, wants)
1113 self._aux_get_history.add(pkg)
1115 # Use the live ebuild metadata if possible.
1116 live_metadata = dict(izip(self._portdb_keys,
1117 self._portdb.aux_get(pkg, self._portdb_keys)))
1118 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1120 self.dbapi.aux_update(pkg, live_metadata)
1121 except (KeyError, portage.exception.PortageException):
1122 if self._global_updates is None:
1123 self._global_updates = \
1124 grab_global_updates(self._portdb.porttree_root)
1125 perform_global_updates(
1126 pkg, self.dbapi, self._global_updates)
1127 return self._aux_get(pkg, wants)
1129 def sync(self, acquire_lock=1):
1131 Call this method to synchronize state with the real vardb
1132 after one or more packages may have been installed or
1135 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1137 # At least the parent needs to exist for the lock file.
1138 portage.util.ensure_dirs(vdb_path)
1139 except portage.exception.PortageException:
1143 if acquire_lock and os.access(vdb_path, os.W_OK):
1144 vdb_lock = portage.locks.lockdir(vdb_path)
1148 portage.locks.unlockdir(vdb_lock)
1152 real_vardb = self._root_config.trees["vartree"].dbapi
1153 current_cpv_set = frozenset(real_vardb.cpv_all())
1154 pkg_vardb = self.dbapi
1155 aux_get_history = self._aux_get_history
1157 # Remove any packages that have been uninstalled.
1158 for pkg in list(pkg_vardb):
1159 if pkg.cpv not in current_cpv_set:
1160 pkg_vardb.cpv_remove(pkg)
1161 aux_get_history.discard(pkg.cpv)
1163 # Validate counters and timestamps.
1166 validation_keys = ["COUNTER", "_mtime_"]
1167 for cpv in current_cpv_set:
1169 pkg_hash_key = ("installed", root, cpv, "nomerge")
1170 pkg = pkg_vardb.get(pkg_hash_key)
1172 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1174 if counter != pkg.metadata["COUNTER"] or \
1176 pkg_vardb.cpv_remove(pkg)
1177 aux_get_history.discard(pkg.cpv)
1181 pkg = self._pkg(cpv)
1183 other_counter = slot_counters.get(pkg.slot_atom)
1184 if other_counter is not None:
1185 if other_counter > pkg.counter:
1188 slot_counters[pkg.slot_atom] = pkg.counter
1189 pkg_vardb.cpv_inject(pkg)
1191 real_vardb.flush_cache()
1193 def _pkg(self, cpv):
1194 root_config = self._root_config
1195 real_vardb = root_config.trees["vartree"].dbapi
1196 db_keys = list(real_vardb._aux_cache_keys)
1197 pkg = Package(cpv=cpv, installed=True,
1198 metadata=izip(db_keys, real_vardb.aux_get(cpv, db_keys)),
1199 root_config=root_config,
1200 type_name="installed")
1203 def grab_global_updates(portdir):
1204 from portage.update import grab_updates, parse_updates
1205 updpath = os.path.join(portdir, "profiles", "updates")
1207 rawupdates = grab_updates(updpath)
1208 except portage.exception.DirectoryNotFound:
1211 for mykey, mystat, mycontent in rawupdates:
1212 commands, errors = parse_updates(mycontent)
1213 upd_commands.extend(commands)
1216 def perform_global_updates(mycpv, mydb, mycommands):
1217 from portage.update import update_dbentries
1218 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1219 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1220 updates = update_dbentries(mycommands, aux_dict)
1222 mydb.aux_update(mycpv, updates)
1224 def visible(pkgsettings, pkg):
1226 Check if a package is visible. This can raise an InvalidDependString
1227 exception if LICENSE is invalid.
1228 TODO: optionally generate a list of masking reasons
1230 @returns: True if the package is visible, False otherwise.
1232 if not pkg.metadata["SLOT"]:
1234 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1235 if not pkgsettings._accept_chost(pkg):
1237 eapi = pkg.metadata["EAPI"]
1238 if not portage.eapi_is_supported(eapi):
1240 if not pkg.installed:
1241 if portage._eapi_is_deprecated(eapi):
1243 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1245 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1247 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1250 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1252 except portage.exception.InvalidDependString:
1256 def get_masking_status(pkg, pkgsettings, root_config):
1258 mreasons = portage.getmaskingstatus(
1259 pkg, settings=pkgsettings,
1260 portdb=root_config.trees["porttree"].dbapi)
1262 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1263 if not pkgsettings._accept_chost(pkg):
1264 mreasons.append("CHOST: %s" % \
1265 pkg.metadata["CHOST"])
1267 if not pkg.metadata["SLOT"]:
1268 mreasons.append("invalid: SLOT is undefined")
1272 def get_mask_info(root_config, cpv, pkgsettings,
1273 db, pkg_type, built, installed, db_keys):
1276 metadata = dict(izip(db_keys,
1277 db.aux_get(cpv, db_keys)))
1280 if metadata and not built:
1281 pkgsettings.setcpv(cpv, mydb=metadata)
1282 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1283 if metadata is None:
1284 mreasons = ["corruption"]
1286 pkg = Package(type_name=pkg_type, root_config=root_config,
1287 cpv=cpv, built=built, installed=installed, metadata=metadata)
1288 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1289 return metadata, mreasons
1291 def show_masked_packages(masked_packages):
1292 shown_licenses = set()
1293 shown_comments = set()
1294 # Maybe there is both an ebuild and a binary. Only
1295 # show one of them to avoid redundant appearance.
1297 have_eapi_mask = False
1298 for (root_config, pkgsettings, cpv,
1299 metadata, mreasons) in masked_packages:
1300 if cpv in shown_cpvs:
1303 comment, filename = None, None
1304 if "package.mask" in mreasons:
1305 comment, filename = \
1306 portage.getmaskingreason(
1307 cpv, metadata=metadata,
1308 settings=pkgsettings,
1309 portdb=root_config.trees["porttree"].dbapi,
1310 return_location=True)
1311 missing_licenses = []
1313 if not portage.eapi_is_supported(metadata["EAPI"]):
1314 have_eapi_mask = True
1316 missing_licenses = \
1317 pkgsettings._getMissingLicenses(
1319 except portage.exception.InvalidDependString:
1320 # This will have already been reported
1321 # above via mreasons.
1324 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1325 if comment and comment not in shown_comments:
1328 shown_comments.add(comment)
1329 portdb = root_config.trees["porttree"].dbapi
1330 for l in missing_licenses:
1331 l_path = portdb.findLicensePath(l)
1332 if l in shown_licenses:
1334 msg = ("A copy of the '%s' license" + \
1335 " is located at '%s'.") % (l, l_path)
1338 shown_licenses.add(l)
1339 return have_eapi_mask
1341 class Task(SlotObject):
1342 __slots__ = ("_hash_key", "_hash_value")
1344 def _get_hash_key(self):
1345 hash_key = getattr(self, "_hash_key", None)
1346 if hash_key is None:
1347 raise NotImplementedError(self)
1350 def __eq__(self, other):
1351 return self._get_hash_key() == other
1353 def __ne__(self, other):
1354 return self._get_hash_key() != other
1357 hash_value = getattr(self, "_hash_value", None)
1358 if hash_value is None:
1359 self._hash_value = hash(self._get_hash_key())
1360 return self._hash_value
1363 return len(self._get_hash_key())
1365 def __getitem__(self, key):
1366 return self._get_hash_key()[key]
1369 return iter(self._get_hash_key())
1371 def __contains__(self, key):
1372 return key in self._get_hash_key()
1375 return str(self._get_hash_key())
1377 class Blocker(Task):
1379 __hash__ = Task.__hash__
1380 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1382 def __init__(self, **kwargs):
1383 Task.__init__(self, **kwargs)
1384 self.cp = portage.dep_getkey(self.atom)
1386 def _get_hash_key(self):
1387 hash_key = getattr(self, "_hash_key", None)
1388 if hash_key is None:
1390 ("blocks", self.root, self.atom, self.eapi)
1391 return self._hash_key
1393 class Package(Task):
1395 __hash__ = Task.__hash__
1396 __slots__ = ("built", "cpv", "depth",
1397 "installed", "metadata", "onlydeps", "operation",
1398 "root_config", "type_name",
1399 "category", "counter", "cp", "cpv_split",
1400 "inherited", "iuse", "mtime",
1401 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1404 "CHOST", "COUNTER", "DEPEND", "EAPI",
1405 "INHERITED", "IUSE", "KEYWORDS",
1406 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1407 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1409 def __init__(self, **kwargs):
1410 Task.__init__(self, **kwargs)
1411 self.root = self.root_config.root
1412 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1413 self.cp = portage.cpv_getkey(self.cpv)
1414 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1415 self.category, self.pf = portage.catsplit(self.cpv)
1416 self.cpv_split = portage.catpkgsplit(self.cpv)
1417 self.pv_split = self.cpv_split[1:]
1421 __slots__ = ("__weakref__", "enabled")
1423 def __init__(self, use):
1424 self.enabled = frozenset(use)
1426 class _iuse(object):
1428 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1430 def __init__(self, tokens, iuse_implicit):
1431 self.tokens = tuple(tokens)
1432 self.iuse_implicit = iuse_implicit
1439 enabled.append(x[1:])
1441 disabled.append(x[1:])
1444 self.enabled = frozenset(enabled)
1445 self.disabled = frozenset(disabled)
1446 self.all = frozenset(chain(enabled, disabled, other))
1448 def __getattribute__(self, name):
1451 return object.__getattribute__(self, "regex")
1452 except AttributeError:
1453 all = object.__getattribute__(self, "all")
1454 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1455 # Escape anything except ".*" which is supposed
1456 # to pass through from _get_implicit_iuse()
1457 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1458 regex = "^(%s)$" % "|".join(regex)
1459 regex = regex.replace("\\.\\*", ".*")
1460 self.regex = re.compile(regex)
1461 return object.__getattribute__(self, name)
1463 def _get_hash_key(self):
1464 hash_key = getattr(self, "_hash_key", None)
1465 if hash_key is None:
1466 if self.operation is None:
1467 self.operation = "merge"
1468 if self.onlydeps or self.installed:
1469 self.operation = "nomerge"
1471 (self.type_name, self.root, self.cpv, self.operation)
1472 return self._hash_key
1474 def __cmp__(self, other):
1481 def __lt__(self, other):
1482 if other.cp != self.cp:
1484 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1488 def __le__(self, other):
1489 if other.cp != self.cp:
1491 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1495 def __gt__(self, other):
1496 if other.cp != self.cp:
1498 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1502 def __ge__(self, other):
1503 if other.cp != self.cp:
1505 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1509 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1510 if not x.startswith("UNUSED_"))
1511 _all_metadata_keys.discard("CDEPEND")
1512 _all_metadata_keys.update(Package.metadata_keys)
1514 from portage.cache.mappings import slot_dict_class
1515 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1517 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1519 Detect metadata updates and synchronize Package attributes.
1522 __slots__ = ("_pkg",)
1523 _wrapped_keys = frozenset(
1524 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1526 def __init__(self, pkg, metadata):
1527 _PackageMetadataWrapperBase.__init__(self)
1529 self.update(metadata)
1531 def __setitem__(self, k, v):
1532 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1533 if k in self._wrapped_keys:
1534 getattr(self, "_set_" + k.lower())(k, v)
1536 def _set_inherited(self, k, v):
1537 if isinstance(v, basestring):
1538 v = frozenset(v.split())
1539 self._pkg.inherited = v
1541 def _set_iuse(self, k, v):
1542 self._pkg.iuse = self._pkg._iuse(
1543 v.split(), self._pkg.root_config.iuse_implicit)
1545 def _set_slot(self, k, v):
1548 def _set_use(self, k, v):
1549 self._pkg.use = self._pkg._use(v.split())
1551 def _set_counter(self, k, v):
1552 if isinstance(v, basestring):
1557 self._pkg.counter = v
1559 def _set__mtime_(self, k, v):
1560 if isinstance(v, basestring):
1562 v = float(v.strip())
1567 class EbuildFetchPretend(SlotObject):
1569 __slots__ = ("fetch_all", "pkg", "settings")
1572 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1573 # ensuring sane $PWD (bug #239560) and storing elog
1574 # messages. Use a private temp directory, in order
1575 # to avoid locking the main one.
1576 settings = self.settings
1577 global_tmpdir = settings["PORTAGE_TMPDIR"]
1578 from tempfile import mkdtemp
1579 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1580 settings["PORTAGE_TMPDIR"] = private_tmpdir
1581 settings.backup_changes("PORTAGE_TMPDIR")
1583 retval = self._execute()
1585 settings["PORTAGE_TMPDIR"] = global_tmpdir
1586 settings.backup_changes("PORTAGE_TMPDIR")
1587 shutil.rmtree(private_tmpdir)
1591 settings = self.settings
1593 root_config = pkg.root_config
1594 portdb = root_config.trees["porttree"].dbapi
1595 ebuild_path = portdb.findname(pkg.cpv)
1596 settings.setcpv(pkg)
1597 debug = settings.get("PORTAGE_DEBUG") == "1"
1598 use_cache = 1 # always true
1599 portage.doebuild_environment(ebuild_path, "fetch",
1600 root_config.root, settings, debug, use_cache, portdb)
1601 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1603 retval = portage.doebuild(ebuild_path, "fetch",
1604 self.settings["ROOT"], self.settings, debug=debug,
1605 listonly=1, fetchonly=1, fetchall=self.fetch_all,
1606 mydbapi=portdb, tree="porttree")
1608 portage.elog.elog_process(self.pkg.cpv, self.settings)
1611 class AsynchronousTask(SlotObject):
1613 Subclasses override _wait() and _poll() so that calls
1614 to public methods can be wrapped for implementing
1615 hooks such as exit listener notification.
1617 Sublasses should call self.wait() to notify exit listeners after
1618 the task is complete and self.returncode has been set.
1621 __slots__ = ("background", "cancelled", "returncode") + \
1622 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1626 Start an asynchronous task and then return as soon as possible.
1632 raise NotImplementedError(self)
1635 return self.returncode is None
1642 return self.returncode
1645 if self.returncode is None:
1648 return self.returncode
1651 return self.returncode
1654 self.cancelled = True
1657 def addStartListener(self, f):
1659 The function will be called with one argument, a reference to self.
1661 if self._start_listeners is None:
1662 self._start_listeners = []
1663 self._start_listeners.append(f)
1665 def removeStartListener(self, f):
1666 if self._start_listeners is None:
1668 self._start_listeners.remove(f)
1670 def _start_hook(self):
1671 if self._start_listeners is not None:
1672 start_listeners = self._start_listeners
1673 self._start_listeners = None
1675 for f in start_listeners:
1678 def addExitListener(self, f):
1680 The function will be called with one argument, a reference to self.
1682 if self._exit_listeners is None:
1683 self._exit_listeners = []
1684 self._exit_listeners.append(f)
1686 def removeExitListener(self, f):
1687 if self._exit_listeners is None:
1688 if self._exit_listener_stack is not None:
1689 self._exit_listener_stack.remove(f)
1691 self._exit_listeners.remove(f)
1693 def _wait_hook(self):
1695 Call this method after the task completes, just before returning
1696 the returncode from wait() or poll(). This hook is
1697 used to trigger exit listeners when the returncode first
1700 if self.returncode is not None and \
1701 self._exit_listeners is not None:
1703 # This prevents recursion, in case one of the
1704 # exit handlers triggers this method again by
1705 # calling wait(). Use a stack that gives
1706 # removeExitListener() an opportunity to consume
1707 # listeners from the stack, before they can get
1708 # called below. This is necessary because a call
1709 # to one exit listener may result in a call to
1710 # removeExitListener() for another listener on
1711 # the stack. That listener needs to be removed
1712 # from the stack since it would be inconsistent
1713 # to call it after it has been been passed into
1714 # removeExitListener().
1715 self._exit_listener_stack = self._exit_listeners
1716 self._exit_listeners = None
1718 self._exit_listener_stack.reverse()
1719 while self._exit_listener_stack:
1720 self._exit_listener_stack.pop()(self)
1722 class PipeReader(AsynchronousTask):
1725 Reads output from one or more files and saves it in memory,
1726 for retrieval via the getvalue() method. This is driven by
1727 the scheduler's poll() loop, so it runs entirely within the
1731 __slots__ = ("input_files", "scheduler",) + \
1732 ("pid", "_read_data", "_registered", "_reg_ids")
1737 self._reg_ids = set()
1738 self._read_data = []
1739 for k, f in self.input_files.iteritems():
1740 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1741 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1742 self._reg_ids.add(self.scheduler.register(f.fileno(),
1743 PollConstants.POLLIN, self._output_handler))
1744 self._registered = True
1747 return self._registered
1750 if self.returncode is not None:
1751 return self.returncode
1753 if self._registered:
1754 self.scheduler.schedule(self._reg_ids)
1757 self.returncode = os.EX_OK
1758 return self.returncode
1761 """Retrieve the entire contents"""
1762 return "".join(self._read_data)
1765 """Free the memory buffer."""
1766 self._read_data = None
1768 def _output_handler(self, fd, event):
1769 files = self.input_files
1770 for f in files.itervalues():
1771 if fd == f.fileno():
1774 buf = array.array('B')
1776 buf.fromfile(f, self._bufsize)
1781 self._read_data.append(buf.tostring())
1786 return self._registered
1788 def _unregister(self):
1790 Unregister from the scheduler and close open files.
1793 self._registered = False
1795 if self._reg_ids is not None:
1796 for reg_id in self._reg_ids:
1797 self.scheduler.unregister(reg_id)
1798 self._reg_ids = None
1800 if self.input_files is not None:
1801 for f in self.input_files.itervalues():
1803 self.input_files = None
1805 class CompositeTask(AsynchronousTask):
1807 __slots__ = ("scheduler",) + ("_current_task",)
1810 return self._current_task is not None
1813 self.cancelled = True
1814 if self._current_task is not None:
1815 self._current_task.cancel()
1819 This does a loop calling self._current_task.poll()
1820 repeatedly as long as the value of self._current_task
1821 keeps changing. It calls poll() a maximum of one time
1822 for a given self._current_task instance. This is useful
1823 since calling poll() on a task can trigger advance to
1824 the next task could eventually lead to the returncode
1825 being set in cases when polling only a single task would
1826 not have the same effect.
1831 task = self._current_task
1832 if task is None or task is prev:
1833 # don't poll the same task more than once
1838 return self.returncode
1844 task = self._current_task
1846 # don't wait for the same task more than once
1849 # Before the task.wait() method returned, an exit
1850 # listener should have set self._current_task to either
1851 # a different task or None. Something is wrong.
1852 raise AssertionError("self._current_task has not " + \
1853 "changed since calling wait", self, task)
1857 return self.returncode
1859 def _assert_current(self, task):
1861 Raises an AssertionError if the given task is not the
1862 same one as self._current_task. This can be useful
1865 if task is not self._current_task:
1866 raise AssertionError("Unrecognized task: %s" % (task,))
1868 def _default_exit(self, task):
1870 Calls _assert_current() on the given task and then sets the
1871 composite returncode attribute if task.returncode != os.EX_OK.
1872 If the task failed then self._current_task will be set to None.
1873 Subclasses can use this as a generic task exit callback.
1876 @returns: The task.returncode attribute.
1878 self._assert_current(task)
1879 if task.returncode != os.EX_OK:
1880 self.returncode = task.returncode
1881 self._current_task = None
1882 return task.returncode
1884 def _final_exit(self, task):
1886 Assumes that task is the final task of this composite task.
1887 Calls _default_exit() and sets self.returncode to the task's
1888 returncode and sets self._current_task to None.
1890 self._default_exit(task)
1891 self._current_task = None
1892 self.returncode = task.returncode
1893 return self.returncode
1895 def _default_final_exit(self, task):
1897 This calls _final_exit() and then wait().
1899 Subclasses can use this as a generic final task exit callback.
1902 self._final_exit(task)
1905 def _start_task(self, task, exit_handler):
1907 Register exit handler for the given task, set it
1908 as self._current_task, and call task.start().
1910 Subclasses can use this as a generic way to start
1914 task.addExitListener(exit_handler)
1915 self._current_task = task
1918 class TaskSequence(CompositeTask):
1920 A collection of tasks that executes sequentially. Each task
1921 must have a addExitListener() method that can be used as
1922 a means to trigger movement from one task to the next.
1925 __slots__ = ("_task_queue",)
1927 def __init__(self, **kwargs):
1928 AsynchronousTask.__init__(self, **kwargs)
1929 self._task_queue = deque()
1931 def add(self, task):
1932 self._task_queue.append(task)
1935 self._start_next_task()
1938 self._task_queue.clear()
1939 CompositeTask.cancel(self)
1941 def _start_next_task(self):
1942 self._start_task(self._task_queue.popleft(),
1943 self._task_exit_handler)
1945 def _task_exit_handler(self, task):
1946 if self._default_exit(task) != os.EX_OK:
1948 elif self._task_queue:
1949 self._start_next_task()
1951 self._final_exit(task)
1954 class SubProcess(AsynchronousTask):
1956 __slots__ = ("scheduler",) + ("pid", "_files", "_registered", "_reg_id")
1958 # A file descriptor is required for the scheduler to monitor changes from
1959 # inside a poll() loop. When logging is not enabled, create a pipe just to
1960 # serve this purpose alone.
1964 if self.returncode is not None:
1965 return self.returncode
1966 if self.pid is None:
1967 return self.returncode
1968 if self._registered:
1969 return self.returncode
1972 retval = os.waitpid(self.pid, os.WNOHANG)
1974 if e.errno != errno.ECHILD:
1977 retval = (self.pid, 1)
1979 if retval == (0, 0):
1981 self._set_returncode(retval)
1982 return self.returncode
1987 os.kill(self.pid, signal.SIGTERM)
1989 if e.errno != errno.ESRCH:
1993 self.cancelled = True
1994 if self.pid is not None:
1996 return self.returncode
1999 return self.pid is not None and \
2000 self.returncode is None
2004 if self.returncode is not None:
2005 return self.returncode
2007 if self._registered:
2008 self.scheduler.schedule(self._reg_id)
2010 if self.returncode is not None:
2011 return self.returncode
2014 wait_retval = os.waitpid(self.pid, 0)
2016 if e.errno != errno.ECHILD:
2019 self._set_returncode((self.pid, 1))
2021 self._set_returncode(wait_retval)
2023 return self.returncode
2025 def _unregister(self):
2027 Unregister from the scheduler and close open files.
2030 self._registered = False
2032 if self._reg_id is not None:
2033 self.scheduler.unregister(self._reg_id)
2036 if self._files is not None:
2037 for f in self._files.itervalues():
2041 def _set_returncode(self, wait_retval):
2043 retval = wait_retval[1]
2045 if retval != os.EX_OK:
2047 retval = (retval & 0xff) << 8
2049 retval = retval >> 8
2051 self.returncode = retval
2053 class SpawnProcess(SubProcess):
2056 Constructor keyword args are passed into portage.process.spawn().
2057 The required "args" keyword argument will be passed as the first
2061 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2062 "uid", "gid", "groups", "umask", "logfile",
2063 "path_lookup", "pre_exec")
2065 __slots__ = ("args",) + \
2068 _file_names = ("log", "process", "stdout")
2069 _files_dict = slot_dict_class(_file_names, prefix="")
2077 if self.fd_pipes is None:
2079 fd_pipes = self.fd_pipes
2080 fd_pipes.setdefault(0, sys.stdin.fileno())
2081 fd_pipes.setdefault(1, sys.stdout.fileno())
2082 fd_pipes.setdefault(2, sys.stderr.fileno())
2084 # flush any pending output
2085 for fd in fd_pipes.itervalues():
2086 if fd == sys.stdout.fileno():
2088 if fd == sys.stderr.fileno():
2091 logfile = self.logfile
2092 self._files = self._files_dict()
2095 master_fd, slave_fd = self._pipe(fd_pipes)
2096 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2097 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2100 fd_pipes_orig = fd_pipes.copy()
2102 # TODO: Use job control functions like tcsetpgrp() to control
2103 # access to stdin. Until then, use /dev/null so that any
2104 # attempts to read from stdin will immediately return EOF
2105 # instead of blocking indefinitely.
2106 null_input = open('/dev/null', 'rb')
2107 fd_pipes[0] = null_input.fileno()
2109 fd_pipes[0] = fd_pipes_orig[0]
2111 files.process = os.fdopen(master_fd, 'r')
2112 if logfile is not None:
2114 fd_pipes[1] = slave_fd
2115 fd_pipes[2] = slave_fd
2117 files.log = open(logfile, "a")
2118 portage.util.apply_secpass_permissions(logfile,
2119 uid=portage.portage_uid, gid=portage.portage_gid,
2122 if not self.background:
2123 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2125 output_handler = self._output_handler
2129 # Create a dummy pipe so the scheduler can monitor
2130 # the process from inside a poll() loop.
2131 fd_pipes[self._dummy_pipe_fd] = slave_fd
2133 fd_pipes[1] = slave_fd
2134 fd_pipes[2] = slave_fd
2135 output_handler = self._dummy_handler
2138 for k in self._spawn_kwarg_names:
2139 v = getattr(self, k)
2143 kwargs["fd_pipes"] = fd_pipes
2144 kwargs["returnpid"] = True
2145 kwargs.pop("logfile", None)
2147 retval = self._spawn(self.args, **kwargs)
2150 if null_input is not None:
2153 if isinstance(retval, int):
2155 for f in files.values():
2157 self.returncode = retval
2161 self.pid = retval[0]
2162 portage.process.spawned_pids.remove(self.pid)
2164 self._reg_id = self.scheduler.register(files.process.fileno(),
2165 PollConstants.POLLIN, output_handler)
2166 self._registered = True
2168 def _pipe(self, fd_pipes):
2170 @type fd_pipes: dict
2171 @param fd_pipes: pipes from which to copy terminal size if desired.
2175 def _spawn(self, args, **kwargs):
2176 return portage.process.spawn(args, **kwargs)
2178 def _output_handler(self, fd, event):
2180 buf = array.array('B')
2182 buf.fromfile(files.process, self._bufsize)
2186 if not self.background:
2187 buf.tofile(files.stdout)
2188 files.stdout.flush()
2189 buf.tofile(files.log)
2194 return self._registered
2196 def _dummy_handler(self, fd, event):
2198 This method is mainly interested in detecting EOF, since
2199 the only purpose of the pipe is to allow the scheduler to
2200 monitor the process from inside a poll() loop.
2203 buf = array.array('B')
2205 buf.fromfile(files.process, self._bufsize)
2213 return self._registered
2215 class MiscFunctionsProcess(SpawnProcess):
2217 Spawns misc-functions.sh with an existing ebuild environment.
2220 __slots__ = ("commands", "phase", "pkg", "settings")
2223 settings = self.settings
2224 settings.pop("EBUILD_PHASE", None)
2225 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2226 misc_sh_binary = os.path.join(portage_bin_path,
2227 os.path.basename(portage.const.MISC_SH_BINARY))
2229 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2230 self.logfile = settings.get("PORTAGE_LOG_FILE")
2232 portage._doebuild_exit_status_unlink(
2233 settings.get("EBUILD_EXIT_STATUS_FILE"))
2235 SpawnProcess._start(self)
2237 def _spawn(self, args, **kwargs):
2238 settings = self.settings
2239 debug = settings.get("PORTAGE_DEBUG") == "1"
2240 return portage.spawn(" ".join(args), settings,
2241 debug=debug, **kwargs)
2243 def _set_returncode(self, wait_retval):
2244 SpawnProcess._set_returncode(self, wait_retval)
2245 self.returncode = portage._doebuild_exit_status_check_and_log(
2246 self.settings, self.phase, self.returncode)
2248 class EbuildFetcher(SpawnProcess):
2250 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2255 root_config = self.pkg.root_config
2256 portdb = root_config.trees["porttree"].dbapi
2257 ebuild_path = portdb.findname(self.pkg.cpv)
2258 settings = self.config_pool.allocate()
2259 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2260 self._build_dir.lock()
2261 self._build_dir.clean()
2262 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2263 if self.logfile is None:
2264 self.logfile = settings.get("PORTAGE_LOG_FILE")
2270 # If any incremental variables have been overridden
2271 # via the environment, those values need to be passed
2272 # along here so that they are correctly considered by
2273 # the config instance in the subproccess.
2274 fetch_env = os.environ.copy()
2276 fetch_env["PORTAGE_NICENESS"] = "0"
2278 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2280 ebuild_binary = os.path.join(
2281 settings["PORTAGE_BIN_PATH"], "ebuild")
2283 fetch_args = [ebuild_binary, ebuild_path, phase]
2284 debug = settings.get("PORTAGE_DEBUG") == "1"
2286 fetch_args.append("--debug")
2288 self.args = fetch_args
2289 self.env = fetch_env
2290 SpawnProcess._start(self)
2292 def _pipe(self, fd_pipes):
2293 """When appropriate, use a pty so that fetcher progress bars,
2294 like wget has, will work properly."""
2295 if self.background or not sys.stdout.isatty():
2296 # When the output only goes to a log file,
2297 # there's no point in creating a pty.
2299 stdout_pipe = fd_pipes.get(1)
2300 got_pty, master_fd, slave_fd = \
2301 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2302 return (master_fd, slave_fd)
2304 def _set_returncode(self, wait_retval):
2305 SpawnProcess._set_returncode(self, wait_retval)
2306 # Collect elog messages that might have been
2307 # created by the pkg_nofetch phase.
2308 if self._build_dir is not None:
2309 # Skip elog messages for prefetch, in order to avoid duplicates.
2310 if not self.prefetch and self.returncode != os.EX_OK:
2312 if self.logfile is not None:
2314 elog_out = open(self.logfile, 'a')
2315 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2316 if self.logfile is not None:
2317 msg += ", Log file:"
2318 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2319 if self.logfile is not None:
2320 eerror(" '%s'" % (self.logfile,),
2321 phase="unpack", key=self.pkg.cpv, out=elog_out)
2322 if elog_out is not None:
2324 if not self.prefetch:
2325 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2326 features = self._build_dir.settings.features
2327 if self.returncode == os.EX_OK:
2328 self._build_dir.clean()
2329 self._build_dir.unlock()
2330 self.config_pool.deallocate(self._build_dir.settings)
2331 self._build_dir = None
2333 class EbuildBuildDir(SlotObject):
2335 __slots__ = ("dir_path", "pkg", "settings",
2336 "locked", "_catdir", "_lock_obj")
2338 def __init__(self, **kwargs):
2339 SlotObject.__init__(self, **kwargs)
2344 This raises an AlreadyLocked exception if lock() is called
2345 while a lock is already held. In order to avoid this, call
2346 unlock() or check whether the "locked" attribute is True
2347 or False before calling lock().
2349 if self._lock_obj is not None:
2350 raise self.AlreadyLocked((self._lock_obj,))
2352 dir_path = self.dir_path
2353 if dir_path is None:
2354 root_config = self.pkg.root_config
2355 portdb = root_config.trees["porttree"].dbapi
2356 ebuild_path = portdb.findname(self.pkg.cpv)
2357 settings = self.settings
2358 settings.setcpv(self.pkg)
2359 debug = settings.get("PORTAGE_DEBUG") == "1"
2360 use_cache = 1 # always true
2361 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2362 self.settings, debug, use_cache, portdb)
2363 dir_path = self.settings["PORTAGE_BUILDDIR"]
2365 catdir = os.path.dirname(dir_path)
2366 self._catdir = catdir
2368 portage.util.ensure_dirs(os.path.dirname(catdir),
2369 gid=portage.portage_gid,
2373 catdir_lock = portage.locks.lockdir(catdir)
2374 portage.util.ensure_dirs(catdir,
2375 gid=portage.portage_gid,
2377 self._lock_obj = portage.locks.lockdir(dir_path)
2379 self.locked = self._lock_obj is not None
2380 if catdir_lock is not None:
2381 portage.locks.unlockdir(catdir_lock)
2384 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2385 by keepwork or keeptemp in FEATURES."""
2386 settings = self.settings
2387 features = settings.features
2388 if not ("keepwork" in features or "keeptemp" in features):
2390 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2391 except EnvironmentError, e:
2392 if e.errno != errno.ENOENT:
2397 if self._lock_obj is None:
2400 portage.locks.unlockdir(self._lock_obj)
2401 self._lock_obj = None
2404 catdir = self._catdir
2407 catdir_lock = portage.locks.lockdir(catdir)
2413 if e.errno not in (errno.ENOENT,
2414 errno.ENOTEMPTY, errno.EEXIST):
2417 portage.locks.unlockdir(catdir_lock)
2419 class AlreadyLocked(portage.exception.PortageException):
2422 class EbuildBuild(CompositeTask):
2424 __slots__ = ("args_set", "config_pool", "find_blockers",
2425 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2426 "prefetcher", "settings", "world_atom") + \
2427 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2431 logger = self.logger
2434 settings = self.settings
2435 world_atom = self.world_atom
2436 root_config = pkg.root_config
2439 portdb = root_config.trees[tree].dbapi
2440 settings["EMERGE_FROM"] = pkg.type_name
2441 settings.backup_changes("EMERGE_FROM")
2443 ebuild_path = portdb.findname(self.pkg.cpv)
2444 self._ebuild_path = ebuild_path
2446 prefetcher = self.prefetcher
2447 if prefetcher is None:
2449 elif not prefetcher.isAlive():
2451 elif prefetcher.poll() is None:
2453 waiting_msg = "Fetching files " + \
2454 "in the background. " + \
2455 "To view fetch progress, run `tail -f " + \
2456 "/var/log/emerge-fetch.log` in another " + \
2458 msg_prefix = colorize("GOOD", " * ")
2459 from textwrap import wrap
2460 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2461 for line in wrap(waiting_msg, 65))
2462 if not self.background:
2463 writemsg(waiting_msg, noiselevel=-1)
2465 self._current_task = prefetcher
2466 prefetcher.addExitListener(self._prefetch_exit)
2469 self._prefetch_exit(prefetcher)
2471 def _prefetch_exit(self, prefetcher):
2475 settings = self.settings
2477 if opts.fetchonly and opts.pretend:
2478 fetcher = EbuildFetchPretend(
2479 fetch_all=opts.fetch_all_uri,
2480 pkg=pkg, settings=settings)
2481 retval = fetcher.execute()
2482 self.returncode = retval
2486 fetcher = EbuildFetcher(config_pool=self.config_pool,
2487 fetchall=opts.fetch_all_uri,
2488 fetchonly=opts.fetchonly,
2489 background=self.background,
2490 pkg=pkg, scheduler=self.scheduler)
2492 self._start_task(fetcher, self._fetch_exit)
2494 def _fetch_exit(self, fetcher):
2498 fetch_failed = False
2500 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2502 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2504 if fetch_failed and fetcher.logfile is not None and \
2505 os.path.exists(fetcher.logfile):
2506 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2508 if not fetch_failed and fetcher.logfile is not None:
2509 # Fetch was successful, so remove the fetch log.
2511 os.unlink(fetcher.logfile)
2515 if fetch_failed or opts.fetchonly:
2519 logger = self.logger
2521 pkg_count = self.pkg_count
2522 scheduler = self.scheduler
2523 settings = self.settings
2524 features = settings.features
2525 ebuild_path = self._ebuild_path
2526 system_set = pkg.root_config.sets["system"]
2528 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2529 self._build_dir.lock()
2531 # Cleaning is triggered before the setup
2532 # phase, in portage.doebuild().
2533 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2534 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2535 short_msg = "emerge: (%s of %s) %s Clean" % \
2536 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2537 logger.log(msg, short_msg=short_msg)
2539 #buildsyspkg: Check if we need to _force_ binary package creation
2540 self._issyspkg = "buildsyspkg" in features and \
2541 system_set.findAtomForPackage(pkg) and \
2544 if opts.buildpkg or self._issyspkg:
2546 self._buildpkg = True
2548 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2549 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2550 short_msg = "emerge: (%s of %s) %s Compile" % \
2551 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2552 logger.log(msg, short_msg=short_msg)
2555 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2556 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2557 short_msg = "emerge: (%s of %s) %s Compile" % \
2558 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2559 logger.log(msg, short_msg=short_msg)
2561 build = EbuildExecuter(background=self.background, pkg=pkg,
2562 scheduler=scheduler, settings=settings)
2563 self._start_task(build, self._build_exit)
2565 def _unlock_builddir(self):
2566 portage.elog.elog_process(self.pkg.cpv, self.settings)
2567 self._build_dir.unlock()
2569 def _build_exit(self, build):
2570 if self._default_exit(build) != os.EX_OK:
2571 self._unlock_builddir()
2576 buildpkg = self._buildpkg
2579 self._final_exit(build)
2584 msg = ">>> This is a system package, " + \
2585 "let's pack a rescue tarball.\n"
2587 log_path = self.settings.get("PORTAGE_LOG_FILE")
2588 if log_path is not None:
2589 log_file = open(log_path, 'a')
2595 if not self.background:
2596 portage.writemsg_stdout(msg, noiselevel=-1)
2598 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2599 scheduler=self.scheduler, settings=self.settings)
2601 self._start_task(packager, self._buildpkg_exit)
2603 def _buildpkg_exit(self, packager):
2605 Released build dir lock when there is a failure or
2606 when in buildpkgonly mode. Otherwise, the lock will
2607 be released when merge() is called.
2610 if self._default_exit(packager) == os.EX_OK and \
2611 self.opts.buildpkgonly:
2612 # Need to call "clean" phase for buildpkgonly mode
2613 portage.elog.elog_process(self.pkg.cpv, self.settings)
2615 clean_phase = EbuildPhase(background=self.background,
2616 pkg=self.pkg, phase=phase,
2617 scheduler=self.scheduler, settings=self.settings,
2619 self._start_task(clean_phase, self._clean_exit)
2622 if self._final_exit(packager) != os.EX_OK or \
2623 self.opts.buildpkgonly:
2624 self._unlock_builddir()
2627 def _clean_exit(self, clean_phase):
2628 if self._final_exit(clean_phase) != os.EX_OK or \
2629 self.opts.buildpkgonly:
2630 self._unlock_builddir()
2635 Install the package and then clean up and release locks.
2636 Only call this after the build has completed successfully
2637 and neither fetchonly nor buildpkgonly mode are enabled.
2640 find_blockers = self.find_blockers
2641 ldpath_mtimes = self.ldpath_mtimes
2642 logger = self.logger
2644 pkg_count = self.pkg_count
2645 settings = self.settings
2646 world_atom = self.world_atom
2647 ebuild_path = self._ebuild_path
2650 merge = EbuildMerge(find_blockers=self.find_blockers,
2651 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2652 pkg_count=pkg_count, pkg_path=ebuild_path,
2653 scheduler=self.scheduler,
2654 settings=settings, tree=tree, world_atom=world_atom)
2656 msg = " === (%s of %s) Merging (%s::%s)" % \
2657 (pkg_count.curval, pkg_count.maxval,
2658 pkg.cpv, ebuild_path)
2659 short_msg = "emerge: (%s of %s) %s Merge" % \
2660 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2661 logger.log(msg, short_msg=short_msg)
2664 rval = merge.execute()
2666 self._unlock_builddir()
2670 class EbuildExecuter(CompositeTask):
2672 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2674 _phases = ("prepare", "configure", "compile", "test", "install")
2676 _live_eclasses = frozenset([
2686 self._tree = "porttree"
2689 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2690 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2691 self._start_task(clean_phase, self._clean_phase_exit)
2693 def _clean_phase_exit(self, clean_phase):
2695 if self._default_exit(clean_phase) != os.EX_OK:
2700 scheduler = self.scheduler
2701 settings = self.settings
2704 # This initializes PORTAGE_LOG_FILE.
2705 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2707 setup_phase = EbuildPhase(background=self.background,
2708 pkg=pkg, phase="setup", scheduler=scheduler,
2709 settings=settings, tree=self._tree)
2711 setup_phase.addExitListener(self._setup_exit)
2712 self._current_task = setup_phase
2713 self.scheduler.scheduleSetup(setup_phase)
2715 def _setup_exit(self, setup_phase):
2717 if self._default_exit(setup_phase) != os.EX_OK:
2721 unpack_phase = EbuildPhase(background=self.background,
2722 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2723 settings=self.settings, tree=self._tree)
2725 if self._live_eclasses.intersection(self.pkg.inherited):
2726 # Serialize $DISTDIR access for live ebuilds since
2727 # otherwise they can interfere with eachother.
2729 unpack_phase.addExitListener(self._unpack_exit)
2730 self._current_task = unpack_phase
2731 self.scheduler.scheduleUnpack(unpack_phase)
2734 self._start_task(unpack_phase, self._unpack_exit)
2736 def _unpack_exit(self, unpack_phase):
2738 if self._default_exit(unpack_phase) != os.EX_OK:
2742 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2745 phases = self._phases
2746 eapi = pkg.metadata["EAPI"]
2747 if eapi in ("0", "1", "2_pre1"):
2748 # skip src_prepare and src_configure
2750 elif eapi in ("2_pre2",):
2754 for phase in phases:
2755 ebuild_phases.add(EbuildPhase(background=self.background,
2756 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2757 settings=self.settings, tree=self._tree))
2759 self._start_task(ebuild_phases, self._default_final_exit)
2761 class EbuildMetadataPhase(SubProcess):
2764 Asynchronous interface for the ebuild "depend" phase which is
2765 used to extract metadata from the ebuild.
2768 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2769 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2772 _file_names = ("ebuild",)
2773 _files_dict = slot_dict_class(_file_names, prefix="")
2774 _bufsize = SpawnProcess._bufsize
2778 settings = self.settings
2780 ebuild_path = self.ebuild_path
2781 debug = settings.get("PORTAGE_DEBUG") == "1"
2785 if self.fd_pipes is not None:
2786 fd_pipes = self.fd_pipes.copy()
2790 fd_pipes.setdefault(0, sys.stdin.fileno())
2791 fd_pipes.setdefault(1, sys.stdout.fileno())
2792 fd_pipes.setdefault(2, sys.stderr.fileno())
2794 # flush any pending output
2795 for fd in fd_pipes.itervalues():
2796 if fd == sys.stdout.fileno():
2798 if fd == sys.stderr.fileno():
2801 fd_pipes_orig = fd_pipes.copy()
2802 self._files = self._files_dict()
2805 master_fd, slave_fd = os.pipe()
2806 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2807 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2809 fd_pipes[self._metadata_fd] = slave_fd
2811 retval = portage.doebuild(ebuild_path, "depend",
2812 settings["ROOT"], settings, debug,
2813 mydbapi=self.portdb, tree="porttree",
2814 fd_pipes=fd_pipes, returnpid=True)
2818 if isinstance(retval, int):
2819 # doebuild failed before spawning
2821 self.returncode = retval
2825 self.pid = retval[0]
2826 portage.process.spawned_pids.remove(self.pid)
2828 self._raw_metadata = []
2829 files.ebuild = os.fdopen(master_fd, 'r')
2830 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2831 PollConstants.POLLIN, self._output_handler)
2832 self._registered = True
2834 def _output_handler(self, fd, event):
2836 self._raw_metadata.append(files.ebuild.read())
2837 if not self._raw_metadata[-1]:
2841 if self.returncode == os.EX_OK:
2842 metadata = izip(portage.auxdbkeys,
2843 "".join(self._raw_metadata).splitlines())
2844 self.metadata_callback(self.cpv, self.ebuild_path,
2845 self.repo_path, metadata, self.ebuild_mtime)
2847 return self._registered
2849 class EbuildProcess(SpawnProcess):
2851 __slots__ = ("phase", "pkg", "settings", "tree")
2854 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2855 SpawnProcess._start(self)
2857 def _pipe(self, fd_pipes):
2858 stdout_pipe = fd_pipes.get(1)
2859 got_pty, master_fd, slave_fd = \
2860 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2861 return (master_fd, slave_fd)
2863 def _spawn(self, args, **kwargs):
2865 root_config = self.pkg.root_config
2867 mydbapi = root_config.trees[tree].dbapi
2868 settings = self.settings
2869 ebuild_path = settings["EBUILD"]
2870 debug = settings.get("PORTAGE_DEBUG") == "1"
2872 rval = portage.doebuild(ebuild_path, self.phase,
2873 root_config.root, settings, debug,
2874 mydbapi=mydbapi, tree=tree, **kwargs)
2878 def _set_returncode(self, wait_retval):
2879 SpawnProcess._set_returncode(self, wait_retval)
2881 if self.phase not in ("clean", "cleanrm"):
2882 self.returncode = portage._doebuild_exit_status_check_and_log(
2883 self.settings, self.phase, self.returncode)
2885 portage._post_phase_userpriv_perms(self.settings)
2887 class EbuildPhase(CompositeTask):
2889 __slots__ = ("background", "pkg", "phase",
2890 "scheduler", "settings", "tree")
2892 _post_phase_cmds = portage._post_phase_cmds
2896 ebuild_process = EbuildProcess(background=self.background,
2897 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
2898 settings=self.settings, tree=self.tree)
2900 self._start_task(ebuild_process, self._ebuild_exit)
2902 def _ebuild_exit(self, ebuild_process):
2904 if self.phase == "install":
2906 log_path = self.settings.get("PORTAGE_LOG_FILE")
2908 if self.background and log_path is not None:
2909 log_file = open(log_path, 'a')
2912 portage._check_build_log(self.settings, out=out)
2914 if log_file is not None:
2917 if self._default_exit(ebuild_process) != os.EX_OK:
2921 settings = self.settings
2923 if self.phase == "install":
2924 portage._post_src_install_uid_fix(settings)
2926 post_phase_cmds = self._post_phase_cmds.get(self.phase)
2927 if post_phase_cmds is not None:
2928 post_phase = MiscFunctionsProcess(background=self.background,
2929 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
2930 scheduler=self.scheduler, settings=settings)
2931 self._start_task(post_phase, self._post_phase_exit)
2934 self.returncode = ebuild_process.returncode
2935 self._current_task = None
2938 def _post_phase_exit(self, post_phase):
2939 if self._final_exit(post_phase) != os.EX_OK:
2940 writemsg("!!! post %s failed; exiting.\n" % self.phase,
2942 self._current_task = None
2946 class EbuildBinpkg(EbuildProcess):
2948 This assumes that src_install() has successfully completed.
2950 __slots__ = ("_binpkg_tmpfile",)
2953 self.phase = "package"
2954 self.tree = "porttree"
2956 root_config = pkg.root_config
2957 portdb = root_config.trees["porttree"].dbapi
2958 bintree = root_config.trees["bintree"]
2959 ebuild_path = portdb.findname(self.pkg.cpv)
2960 settings = self.settings
2961 debug = settings.get("PORTAGE_DEBUG") == "1"
2963 bintree.prevent_collision(pkg.cpv)
2964 binpkg_tmpfile = os.path.join(bintree.pkgdir,
2965 pkg.cpv + ".tbz2." + str(os.getpid()))
2966 self._binpkg_tmpfile = binpkg_tmpfile
2967 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
2968 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
2971 EbuildProcess._start(self)
2973 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
2975 def _set_returncode(self, wait_retval):
2976 EbuildProcess._set_returncode(self, wait_retval)
2979 bintree = pkg.root_config.trees["bintree"]
2980 binpkg_tmpfile = self._binpkg_tmpfile
2981 if self.returncode == os.EX_OK:
2982 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
2984 class EbuildMerge(SlotObject):
2986 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
2987 "pkg", "pkg_count", "pkg_path", "pretend",
2988 "scheduler", "settings", "tree", "world_atom")
2991 root_config = self.pkg.root_config
2992 settings = self.settings
2993 retval = portage.merge(settings["CATEGORY"],
2994 settings["PF"], settings["D"],
2995 os.path.join(settings["PORTAGE_BUILDDIR"],
2996 "build-info"), root_config.root, settings,
2997 myebuild=settings["EBUILD"],
2998 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
2999 vartree=root_config.trees["vartree"],
3000 prev_mtimes=self.ldpath_mtimes,
3001 scheduler=self.scheduler,
3002 blockers=self.find_blockers)
3004 if retval == os.EX_OK:
3005 self.world_atom(self.pkg)
3010 def _log_success(self):
3012 pkg_count = self.pkg_count
3013 pkg_path = self.pkg_path
3014 logger = self.logger
3015 if "noclean" not in self.settings.features:
3016 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3017 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3018 logger.log((" === (%s of %s) " + \
3019 "Post-Build Cleaning (%s::%s)") % \
3020 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3021 short_msg=short_msg)
3022 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3023 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3025 class PackageUninstall(AsynchronousTask):
3027 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3031 unmerge(self.pkg.root_config, self.opts, "unmerge",
3032 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3033 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3034 writemsg_level=self._writemsg_level)
3035 except UninstallFailure, e:
3036 self.returncode = e.status
3038 self.returncode = os.EX_OK
3041 def _writemsg_level(self, msg, level=0, noiselevel=0):
3043 log_path = self.settings.get("PORTAGE_LOG_FILE")
3044 background = self.background
3046 if log_path is None:
3047 if not (background and level < logging.WARNING):
3048 portage.util.writemsg_level(msg,
3049 level=level, noiselevel=noiselevel)
3052 portage.util.writemsg_level(msg,
3053 level=level, noiselevel=noiselevel)
3055 f = open(log_path, 'a')
3061 class Binpkg(CompositeTask):
3063 __slots__ = ("find_blockers",
3064 "ldpath_mtimes", "logger", "opts",
3065 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3066 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3067 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3069 def _writemsg_level(self, msg, level=0, noiselevel=0):
3071 if not self.background:
3072 portage.util.writemsg_level(msg,
3073 level=level, noiselevel=noiselevel)
3075 log_path = self.settings.get("PORTAGE_LOG_FILE")
3076 if log_path is not None:
3077 f = open(log_path, 'a')
3086 settings = self.settings
3087 settings.setcpv(pkg)
3088 self._tree = "bintree"
3089 self._bintree = self.pkg.root_config.trees[self._tree]
3090 self._verify = "strict" in self.settings.features and \
3091 not self.opts.pretend
3093 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3094 "portage", pkg.category, pkg.pf)
3095 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3096 pkg=pkg, settings=settings)
3097 self._image_dir = os.path.join(dir_path, "image")
3098 self._infloc = os.path.join(dir_path, "build-info")
3099 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3101 # The prefetcher has already completed or it
3102 # could be running now. If it's running now,
3103 # wait for it to complete since it holds
3104 # a lock on the file being fetched. The
3105 # portage.locks functions are only designed
3106 # to work between separate processes. Since
3107 # the lock is held by the current process,
3108 # use the scheduler and fetcher methods to
3109 # synchronize with the fetcher.
3110 prefetcher = self.prefetcher
3111 if prefetcher is None:
3113 elif not prefetcher.isAlive():
3115 elif prefetcher.poll() is None:
3117 waiting_msg = ("Fetching '%s' " + \
3118 "in the background. " + \
3119 "To view fetch progress, run `tail -f " + \
3120 "/var/log/emerge-fetch.log` in another " + \
3121 "terminal.") % prefetcher.pkg_path
3122 msg_prefix = colorize("GOOD", " * ")
3123 from textwrap import wrap
3124 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3125 for line in wrap(waiting_msg, 65))
3126 if not self.background:
3127 writemsg(waiting_msg, noiselevel=-1)
3129 self._current_task = prefetcher
3130 prefetcher.addExitListener(self._prefetch_exit)
3133 self._prefetch_exit(prefetcher)
3135 def _prefetch_exit(self, prefetcher):
3138 pkg_count = self.pkg_count
3139 fetcher = BinpkgFetcher(background=self.background,
3140 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3141 scheduler=self.scheduler)
3142 pkg_path = fetcher.pkg_path
3143 self._pkg_path = pkg_path
3145 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3147 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3148 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3149 short_msg = "emerge: (%s of %s) %s Fetch" % \
3150 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3151 self.logger.log(msg, short_msg=short_msg)
3154 fetcher.addExitListener(self._fetcher_exit)
3155 self._current_task = fetcher
3156 self.scheduler.fetch.schedule(fetcher)
3158 self._start_task(fetcher, self._fetcher_exit)
3161 self._fetcher_exit(fetcher)
3163 def _fetcher_exit(self, fetcher):
3165 # The fetcher only has a returncode when
3166 # --getbinpkg is enabled.
3167 if fetcher.returncode is not None:
3168 self._fetched_pkg = True
3169 if self.opts.fetchonly:
3170 self._final_exit(fetcher)
3173 elif self._default_exit(fetcher) != os.EX_OK:
3179 verifier = BinpkgVerifier(background=self.background,
3180 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3183 verifier.addExitListener(self._verifier_exit)
3184 self._current_task = verifier
3185 self.scheduler.fetch.schedule(verifier)
3187 self._start_task(verifier, self._verifier_exit)
3190 self._verifier_exit(verifier)
3192 def _verifier_exit(self, verifier):
3193 if verifier is not None and \
3194 self._default_exit(verifier) != os.EX_OK:
3198 logger = self.logger
3200 pkg_count = self.pkg_count
3201 pkg_path = self._pkg_path
3203 if self._fetched_pkg:
3204 self._bintree.inject(pkg.cpv, filename=pkg_path)
3206 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3207 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3208 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3209 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3210 logger.log(msg, short_msg=short_msg)
3212 self._build_dir.lock()
3215 settings = self.settings
3216 settings.setcpv(pkg)
3217 settings["EBUILD"] = self._ebuild_path
3218 ebuild_phase = EbuildPhase(background=self.background,
3219 pkg=pkg, phase=phase, scheduler=self.scheduler,
3220 settings=settings, tree=self._tree)
3222 self._start_task(ebuild_phase, self._clean_exit)
3224 def _clean_exit(self, clean_phase):
3225 if self._default_exit(clean_phase) != os.EX_OK:
3226 self._unlock_builddir()
3230 dir_path = self._build_dir.dir_path
3233 shutil.rmtree(dir_path)
3234 except (IOError, OSError), e:
3235 if e.errno != errno.ENOENT:
3239 infloc = self._infloc
3241 pkg_path = self._pkg_path
3244 for mydir in (dir_path, self._image_dir, infloc):
3245 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3246 gid=portage.data.portage_gid, mode=dir_mode)
3248 # This initializes PORTAGE_LOG_FILE.
3249 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3250 self._writemsg_level(">>> Extracting info\n")
3252 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3253 check_missing_metadata = ("CATEGORY", "PF")
3254 missing_metadata = set()
3255 for k in check_missing_metadata:
3256 v = pkg_xpak.getfile(k)
3258 missing_metadata.add(k)
3260 pkg_xpak.unpackinfo(infloc)
3261 for k in missing_metadata:
3269 f = open(os.path.join(infloc, k), 'wb')
3275 # Store the md5sum in the vdb.
3276 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3278 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3282 # This gives bashrc users an opportunity to do various things
3283 # such as remove binary packages after they're installed.
3284 settings = self.settings
3285 settings.setcpv(self.pkg)
3286 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3287 settings.backup_changes("PORTAGE_BINPKG_FILE")
3290 setup_phase = EbuildPhase(background=self.background,
3291 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3292 settings=settings, tree=self._tree)
3294 setup_phase.addExitListener(self._setup_exit)
3295 self._current_task = setup_phase
3296 self.scheduler.scheduleSetup(setup_phase)
3298 def _setup_exit(self, setup_phase):
3299 if self._default_exit(setup_phase) != os.EX_OK:
3300 self._unlock_builddir()
3304 extractor = BinpkgExtractorAsync(background=self.background,
3305 image_dir=self._image_dir,
3306 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3307 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3308 self._start_task(extractor, self._extractor_exit)
3310 def _extractor_exit(self, extractor):
3311 if self._final_exit(extractor) != os.EX_OK:
3312 self._unlock_builddir()
3313 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3317 def _unlock_builddir(self):
3318 portage.elog.elog_process(self.pkg.cpv, self.settings)
3319 self._build_dir.unlock()
3323 # This gives bashrc users an opportunity to do various things
3324 # such as remove binary packages after they're installed.
3325 settings = self.settings
3326 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3327 settings.backup_changes("PORTAGE_BINPKG_FILE")
3329 merge = EbuildMerge(find_blockers=self.find_blockers,
3330 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3331 pkg=self.pkg, pkg_count=self.pkg_count,
3332 pkg_path=self._pkg_path, scheduler=self.scheduler,
3333 settings=settings, tree=self._tree, world_atom=self.world_atom)
3336 retval = merge.execute()
3338 settings.pop("PORTAGE_BINPKG_FILE", None)
3339 self._unlock_builddir()
3342 class BinpkgFetcher(SpawnProcess):
3345 "locked", "pkg_path", "_lock_obj")
3347 def __init__(self, **kwargs):
3348 SpawnProcess.__init__(self, **kwargs)
3350 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3358 bintree = pkg.root_config.trees["bintree"]
3359 settings = bintree.settings
3360 use_locks = "distlocks" in settings.features
3361 pkg_path = self.pkg_path
3362 resume = os.path.exists(pkg_path)
3364 # urljoin doesn't work correctly with
3365 # unrecognized protocols like sftp
3366 if bintree._remote_has_index:
3367 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3369 rel_uri = pkg.cpv + ".tbz2"
3370 uri = bintree._remote_base_uri.rstrip("/") + \
3371 "/" + rel_uri.lstrip("/")
3373 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3374 "/" + pkg.pf + ".tbz2"
3376 protocol = urlparse.urlparse(uri)[0]
3377 fcmd_prefix = "FETCHCOMMAND"
3379 fcmd_prefix = "RESUMECOMMAND"
3380 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3382 fcmd = settings.get(fcmd_prefix)
3385 "DISTDIR" : os.path.dirname(pkg_path),
3387 "FILE" : os.path.basename(pkg_path)
3390 fetch_env = dict(settings.iteritems())
3391 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3392 for x in shlex.split(fcmd)]
3394 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3398 if self.fd_pipes is None:
3400 fd_pipes = self.fd_pipes
3402 # Redirect all output to stdout since some fetchers like
3403 # wget pollute stderr (if portage detects a problem then it
3404 # can send it's own message to stderr).
3405 fd_pipes.setdefault(0, sys.stdin.fileno())
3406 fd_pipes.setdefault(1, sys.stdout.fileno())
3407 fd_pipes.setdefault(2, sys.stdout.fileno())
3409 self.args = fetch_args
3410 self.env = fetch_env
3411 SpawnProcess._start(self)
3413 def _set_returncode(self, wait_retval):
3414 SpawnProcess._set_returncode(self, wait_retval)
3420 This raises an AlreadyLocked exception if lock() is called
3421 while a lock is already held. In order to avoid this, call
3422 unlock() or check whether the "locked" attribute is True
3423 or False before calling lock().
3425 if self._lock_obj is not None:
3426 raise self.AlreadyLocked((self._lock_obj,))
3428 self._lock_obj = portage.locks.lockfile(
3429 self.pkg_path, wantnewlockfile=1)
3432 class AlreadyLocked(portage.exception.PortageException):
3436 if self._lock_obj is None:
3438 portage.locks.unlockfile(self._lock_obj)
3439 self._lock_obj = None
3442 class BinpkgVerifier(AsynchronousTask):
3443 __slots__ = ("logfile", "pkg",)
3447 Note: Unlike a normal AsynchronousTask.start() method,
3448 this one does all work is synchronously. The returncode
3449 attribute will be set before it returns.
3453 root_config = pkg.root_config
3454 bintree = root_config.trees["bintree"]
3456 stdout_orig = sys.stdout
3457 stderr_orig = sys.stderr
3459 if self.background and self.logfile is not None:
3460 log_file = open(self.logfile, 'a')
3462 if log_file is not None:
3463 sys.stdout = log_file
3464 sys.stderr = log_file
3466 bintree.digestCheck(pkg)
3467 except portage.exception.FileNotFound:
3468 writemsg("!!! Fetching Binary failed " + \
3469 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3471 except portage.exception.DigestException, e:
3472 writemsg("\n!!! Digest verification failed:\n",
3474 writemsg("!!! %s\n" % e.value[0],
3476 writemsg("!!! Reason: %s\n" % e.value[1],
3478 writemsg("!!! Got: %s\n" % e.value[2],
3480 writemsg("!!! Expected: %s\n" % e.value[3],
3484 sys.stdout = stdout_orig
3485 sys.stderr = stderr_orig
3486 if log_file is not None:
3489 self.returncode = rval
3492 class BinpkgExtractorAsync(SpawnProcess):
3494 __slots__ = ("image_dir", "pkg", "pkg_path")
3496 _shell_binary = portage.const.BASH_BINARY
3499 self.args = [self._shell_binary, "-c",
3500 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3501 (portage._shell_quote(self.pkg_path),
3502 portage._shell_quote(self.image_dir))]
3504 self.env = self.pkg.root_config.settings.environ()
3505 SpawnProcess._start(self)
3507 class MergeListItem(CompositeTask):
3510 TODO: For parallel scheduling, everything here needs asynchronous
3511 execution support (start, poll, and wait methods).
3514 __slots__ = ("args_set",
3515 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3516 "find_blockers", "logger", "mtimedb", "pkg",
3517 "pkg_count", "pkg_to_replace", "prefetcher",
3518 "settings", "statusMessage", "world_atom") + \
3524 build_opts = self.build_opts
3527 # uninstall, executed by self.merge()
3528 self.returncode = os.EX_OK
3532 args_set = self.args_set
3533 find_blockers = self.find_blockers
3534 logger = self.logger
3535 mtimedb = self.mtimedb
3536 pkg_count = self.pkg_count
3537 scheduler = self.scheduler
3538 settings = self.settings
3539 world_atom = self.world_atom
3540 ldpath_mtimes = mtimedb["ldpath"]
3542 action_desc = "Emerging"
3544 if pkg.type_name == "binary":
3545 action_desc += " binary"
3547 if build_opts.fetchonly:
3548 action_desc = "Fetching"
3550 msg = "%s (%s of %s) %s" % \
3552 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3553 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3554 colorize("GOOD", pkg.cpv))
3557 msg += " %s %s" % (preposition, pkg.root)
3559 if not build_opts.pretend:
3560 self.statusMessage(msg)
3561 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3562 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3564 if pkg.type_name == "ebuild":
3566 build = EbuildBuild(args_set=args_set,
3567 background=self.background,
3568 config_pool=self.config_pool,
3569 find_blockers=find_blockers,
3570 ldpath_mtimes=ldpath_mtimes, logger=logger,
3571 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3572 prefetcher=self.prefetcher, scheduler=scheduler,
3573 settings=settings, world_atom=world_atom)
3575 self._install_task = build
3576 self._start_task(build, self._default_final_exit)
3579 elif pkg.type_name == "binary":
3581 binpkg = Binpkg(background=self.background,
3582 find_blockers=find_blockers,
3583 ldpath_mtimes=ldpath_mtimes, logger=logger,
3584 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3585 prefetcher=self.prefetcher, settings=settings,
3586 scheduler=scheduler, world_atom=world_atom)
3588 self._install_task = binpkg
3589 self._start_task(binpkg, self._default_final_exit)
3593 self._install_task.poll()
3594 return self.returncode
3597 self._install_task.wait()
3598 return self.returncode
3603 build_opts = self.build_opts
3604 find_blockers = self.find_blockers
3605 logger = self.logger
3606 mtimedb = self.mtimedb
3607 pkg_count = self.pkg_count
3608 prefetcher = self.prefetcher
3609 scheduler = self.scheduler
3610 settings = self.settings
3611 world_atom = self.world_atom
3612 ldpath_mtimes = mtimedb["ldpath"]
3615 if not (build_opts.buildpkgonly or \
3616 build_opts.fetchonly or build_opts.pretend):
3618 uninstall = PackageUninstall(background=self.background,
3619 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3620 pkg=pkg, scheduler=scheduler, settings=settings)
3623 retval = uninstall.wait()
3624 if retval != os.EX_OK:
3628 if build_opts.fetchonly or \
3629 build_opts.buildpkgonly:
3630 return self.returncode
3632 retval = self._install_task.install()
3635 class PackageMerge(AsynchronousTask):
3637 TODO: Implement asynchronous merge so that the scheduler can
3638 run while a merge is executing.
3641 __slots__ = ("merge",)
3645 pkg = self.merge.pkg
3646 pkg_count = self.merge.pkg_count
3649 action_desc = "Uninstalling"
3650 preposition = "from"
3652 action_desc = "Installing"
3655 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3658 msg += " %s %s" % (preposition, pkg.root)
3660 if not self.merge.build_opts.fetchonly and \
3661 not self.merge.build_opts.pretend and \
3662 not self.merge.build_opts.buildpkgonly:
3663 self.merge.statusMessage(msg)
3665 self.returncode = self.merge.merge()
3668 class DependencyArg(object):
3669 def __init__(self, arg=None, root_config=None):
3671 self.root_config = root_config
3676 class AtomArg(DependencyArg):
3677 def __init__(self, atom=None, **kwargs):
3678 DependencyArg.__init__(self, **kwargs)
3680 if not isinstance(self.atom, portage.dep.Atom):
3681 self.atom = portage.dep.Atom(self.atom)
3682 self.set = (self.atom, )
3684 class PackageArg(DependencyArg):
3685 def __init__(self, package=None, **kwargs):
3686 DependencyArg.__init__(self, **kwargs)
3687 self.package = package
3688 self.atom = portage.dep.Atom("=" + package.cpv)
3689 self.set = (self.atom, )
3691 class SetArg(DependencyArg):
3692 def __init__(self, set=None, **kwargs):
3693 DependencyArg.__init__(self, **kwargs)
3695 self.name = self.arg[len(SETPREFIX):]
3700 class Dependency(SlotObject):
3701 __slots__ = ("atom", "blocker", "depth",
3702 "parent", "onlydeps", "priority", "root")
3703 def __init__(self, **kwargs):
3704 SlotObject.__init__(self, **kwargs)
3705 if self.priority is None:
3706 self.priority = DepPriority()
3707 if self.depth is None:
3710 class BlockerCache(DictMixin):
3711 """This caches blockers of installed packages so that dep_check does not
3712 have to be done for every single installed package on every invocation of
3713 emerge. The cache is invalidated whenever it is detected that something
3714 has changed that might alter the results of dep_check() calls:
3715 1) the set of installed packages (including COUNTER) has changed
3716 2) the old-style virtuals have changed
3719 # Number of uncached packages to trigger cache update, since
3720 # it's wasteful to update it for every vdb change.
3721 _cache_threshold = 5
3723 class BlockerData(object):
3725 __slots__ = ("__weakref__", "atoms", "counter")
3727 def __init__(self, counter, atoms):
3728 self.counter = counter
3731 def __init__(self, myroot, vardb):
3733 self._virtuals = vardb.settings.getvirtuals()
3734 self._cache_filename = os.path.join(myroot,
3735 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3736 self._cache_version = "1"
3737 self._cache_data = None
3738 self._modified = set()
3743 f = open(self._cache_filename)
3744 mypickle = pickle.Unpickler(f)
3745 mypickle.find_global = None
3746 self._cache_data = mypickle.load()
3749 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3750 if isinstance(e, pickle.UnpicklingError):
3751 writemsg("!!! Error loading '%s': %s\n" % \
3752 (self._cache_filename, str(e)), noiselevel=-1)
3755 cache_valid = self._cache_data and \
3756 isinstance(self._cache_data, dict) and \
3757 self._cache_data.get("version") == self._cache_version and \
3758 isinstance(self._cache_data.get("blockers"), dict)
3760 # Validate all the atoms and counters so that
3761 # corruption is detected as soon as possible.
3762 invalid_items = set()
3763 for k, v in self._cache_data["blockers"].iteritems():
3764 if not isinstance(k, basestring):
3765 invalid_items.add(k)
3768 if portage.catpkgsplit(k) is None:
3769 invalid_items.add(k)
3771 except portage.exception.InvalidData:
3772 invalid_items.add(k)
3774 if not isinstance(v, tuple) or \
3776 invalid_items.add(k)
3779 if not isinstance(counter, (int, long)):
3780 invalid_items.add(k)
3782 if not isinstance(atoms, (list, tuple)):
3783 invalid_items.add(k)
3785 invalid_atom = False
3787 if not isinstance(atom, basestring):
3790 if atom[:1] != "!" or \
3791 not portage.isvalidatom(
3792 atom, allow_blockers=True):
3796 invalid_items.add(k)
3799 for k in invalid_items:
3800 del self._cache_data["blockers"][k]
3801 if not self._cache_data["blockers"]:
3805 self._cache_data = {"version":self._cache_version}
3806 self._cache_data["blockers"] = {}
3807 self._cache_data["virtuals"] = self._virtuals
3808 self._modified.clear()
3811 """If the current user has permission and the internal blocker cache
3812 been updated, save it to disk and mark it unmodified. This is called
3813 by emerge after it has proccessed blockers for all installed packages.
3814 Currently, the cache is only written if the user has superuser
3815 privileges (since that's required to obtain a lock), but all users
3816 have read access and benefit from faster blocker lookups (as long as
3817 the entire cache is still valid). The cache is stored as a pickled
3818 dict object with the following format:
3822 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
3823 "virtuals" : vardb.settings.getvirtuals()
3826 if len(self._modified) >= self._cache_threshold and \
3829 f = portage.util.atomic_ofstream(self._cache_filename)
3830 pickle.dump(self._cache_data, f, -1)
3832 portage.util.apply_secpass_permissions(
3833 self._cache_filename, gid=portage.portage_gid, mode=0644)
3834 except (IOError, OSError), e:
3836 self._modified.clear()
3838 def __setitem__(self, cpv, blocker_data):
3840 Update the cache and mark it as modified for a future call to
3843 @param cpv: Package for which to cache blockers.
3845 @param blocker_data: An object with counter and atoms attributes.
3846 @type blocker_data: BlockerData
3848 self._cache_data["blockers"][cpv] = \
3849 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
3850 self._modified.add(cpv)
3853 return iter(self._cache_data["blockers"])
3855 def __delitem__(self, cpv):
3856 del self._cache_data["blockers"][cpv]
3858 def __getitem__(self, cpv):
3861 @returns: An object with counter and atoms attributes.
3863 return self.BlockerData(*self._cache_data["blockers"][cpv])
3866 """This needs to be implemented so that self.__repr__() doesn't raise
3867 an AttributeError."""
3870 class BlockerDB(object):
3872 def __init__(self, root_config):
3873 self._root_config = root_config
3874 self._vartree = root_config.trees["vartree"]
3875 self._portdb = root_config.trees["porttree"].dbapi
3877 self._dep_check_trees = None
3878 self._fake_vartree = None
3880 def _get_fake_vartree(self, acquire_lock=0):
3881 fake_vartree = self._fake_vartree
3882 if fake_vartree is None:
3883 fake_vartree = FakeVartree(self._root_config,
3884 acquire_lock=acquire_lock)
3885 self._fake_vartree = fake_vartree
3886 self._dep_check_trees = { self._vartree.root : {
3887 "porttree" : fake_vartree,
3888 "vartree" : fake_vartree,
3891 fake_vartree.sync(acquire_lock=acquire_lock)
3894 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
3895 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
3896 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3897 settings = self._vartree.settings
3898 stale_cache = set(blocker_cache)
3899 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
3900 dep_check_trees = self._dep_check_trees
3901 vardb = fake_vartree.dbapi
3902 installed_pkgs = list(vardb)
3904 for inst_pkg in installed_pkgs:
3905 stale_cache.discard(inst_pkg.cpv)
3906 cached_blockers = blocker_cache.get(inst_pkg.cpv)
3907 if cached_blockers is not None and \
3908 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
3909 cached_blockers = None
3910 if cached_blockers is not None:
3911 blocker_atoms = cached_blockers.atoms
3913 # Use aux_get() to trigger FakeVartree global
3914 # updates on *DEPEND when appropriate.
3915 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
3917 portage.dep._dep_check_strict = False
3918 success, atoms = portage.dep_check(depstr,
3919 vardb, settings, myuse=inst_pkg.use.enabled,
3920 trees=dep_check_trees, myroot=inst_pkg.root)
3922 portage.dep._dep_check_strict = True
3924 pkg_location = os.path.join(inst_pkg.root,
3925 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
3926 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
3927 (pkg_location, atoms), noiselevel=-1)
3930 blocker_atoms = [atom for atom in atoms \
3931 if atom.startswith("!")]
3932 blocker_atoms.sort()
3933 counter = long(inst_pkg.metadata["COUNTER"])
3934 blocker_cache[inst_pkg.cpv] = \
3935 blocker_cache.BlockerData(counter, blocker_atoms)
3936 for cpv in stale_cache:
3937 del blocker_cache[cpv]
3938 blocker_cache.flush()
3940 blocker_parents = digraph()
3942 for pkg in installed_pkgs:
3943 for blocker_atom in blocker_cache[pkg.cpv].atoms:
3944 blocker_atom = blocker_atom.lstrip("!")
3945 blocker_atoms.append(blocker_atom)
3946 blocker_parents.add(blocker_atom, pkg)
3948 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3949 blocking_pkgs = set()
3950 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
3951 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
3953 # Check for blockers in the other direction.
3954 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
3956 portage.dep._dep_check_strict = False
3957 success, atoms = portage.dep_check(depstr,
3958 vardb, settings, myuse=new_pkg.use.enabled,
3959 trees=dep_check_trees, myroot=new_pkg.root)
3961 portage.dep._dep_check_strict = True
3963 # We should never get this far with invalid deps.
3964 show_invalid_depstring_notice(new_pkg, depstr, atoms)
3967 blocker_atoms = [atom.lstrip("!") for atom in atoms \
3970 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3971 for inst_pkg in installed_pkgs:
3973 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
3974 except (portage.exception.InvalidDependString, StopIteration):
3976 blocking_pkgs.add(inst_pkg)
3978 return blocking_pkgs
3980 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
3982 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
3983 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
3984 p_type, p_root, p_key, p_status = parent_node
3986 if p_status == "nomerge":
3987 category, pf = portage.catsplit(p_key)
3988 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
3989 msg.append("Portage is unable to process the dependencies of the ")
3990 msg.append("'%s' package. " % p_key)
3991 msg.append("In order to correct this problem, the package ")
3992 msg.append("should be uninstalled, reinstalled, or upgraded. ")
3993 msg.append("As a temporary workaround, the --nodeps option can ")
3994 msg.append("be used to ignore all dependencies. For reference, ")
3995 msg.append("the problematic dependencies can be found in the ")
3996 msg.append("*DEPEND files located in '%s/'." % pkg_location)
3998 msg.append("This package can not be installed. ")
3999 msg.append("Please notify the '%s' package maintainer " % p_key)
4000 msg.append("about this problem.")
4002 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4003 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4005 class PackageVirtualDbapi(portage.dbapi):
4007 A dbapi-like interface class that represents the state of the installed
4008 package database as new packages are installed, replacing any packages
4009 that previously existed in the same slot. The main difference between
4010 this class and fakedbapi is that this one uses Package instances
4011 internally (passed in via cpv_inject() and cpv_remove() calls).
4013 def __init__(self, settings):
4014 portage.dbapi.__init__(self)
4015 self.settings = settings
4016 self._match_cache = {}
4022 Remove all packages.
4026 self._cp_map.clear()
4027 self._cpv_map.clear()
4030 obj = PackageVirtualDbapi(self.settings)
4031 obj._match_cache = self._match_cache.copy()
4032 obj._cp_map = self._cp_map.copy()
4033 for k, v in obj._cp_map.iteritems():
4034 obj._cp_map[k] = v[:]
4035 obj._cpv_map = self._cpv_map.copy()
4039 return self._cpv_map.itervalues()
4041 def __contains__(self, item):
4042 existing = self._cpv_map.get(item.cpv)
4043 if existing is not None and \
4048 def get(self, item, default=None):
4049 cpv = getattr(item, "cpv", None)
4053 type_name, root, cpv, operation = item
4055 existing = self._cpv_map.get(cpv)
4056 if existing is not None and \
4061 def match_pkgs(self, atom):
4062 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4064 def _clear_cache(self):
4065 if self._categories is not None:
4066 self._categories = None
4067 if self._match_cache:
4068 self._match_cache = {}
4070 def match(self, origdep, use_cache=1):
4071 result = self._match_cache.get(origdep)
4072 if result is not None:
4074 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4075 self._match_cache[origdep] = result
4078 def cpv_exists(self, cpv):
4079 return cpv in self._cpv_map
4081 def cp_list(self, mycp, use_cache=1):
4082 cachelist = self._match_cache.get(mycp)
4083 # cp_list() doesn't expand old-style virtuals
4084 if cachelist and cachelist[0].startswith(mycp):
4086 cpv_list = self._cp_map.get(mycp)
4087 if cpv_list is None:
4090 cpv_list = [pkg.cpv for pkg in cpv_list]
4091 self._cpv_sort_ascending(cpv_list)
4092 if not (not cpv_list and mycp.startswith("virtual/")):
4093 self._match_cache[mycp] = cpv_list
4097 return list(self._cp_map)
4100 return list(self._cpv_map)
4102 def cpv_inject(self, pkg):
4103 cp_list = self._cp_map.get(pkg.cp)
4106 self._cp_map[pkg.cp] = cp_list
4107 e_pkg = self._cpv_map.get(pkg.cpv)
4108 if e_pkg is not None:
4111 self.cpv_remove(e_pkg)
4112 for e_pkg in cp_list:
4113 if e_pkg.slot_atom == pkg.slot_atom:
4116 self.cpv_remove(e_pkg)
4119 self._cpv_map[pkg.cpv] = pkg
4122 def cpv_remove(self, pkg):
4123 old_pkg = self._cpv_map.get(pkg.cpv)
4126 self._cp_map[pkg.cp].remove(pkg)
4127 del self._cpv_map[pkg.cpv]
4130 def aux_get(self, cpv, wants):
4131 metadata = self._cpv_map[cpv].metadata
4132 return [metadata.get(x, "") for x in wants]
4134 def aux_update(self, cpv, values):
4135 self._cpv_map[cpv].metadata.update(values)
4138 class depgraph(object):
4140 pkg_tree_map = RootConfig.pkg_tree_map
4142 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4144 def __init__(self, settings, trees, myopts, myparams, spinner):
4145 self.settings = settings
4146 self.target_root = settings["ROOT"]
4147 self.myopts = myopts
4148 self.myparams = myparams
4150 if settings.get("PORTAGE_DEBUG", "") == "1":
4152 self.spinner = spinner
4153 self._running_root = trees["/"]["root_config"]
4154 self._opts_no_restart = Scheduler._opts_no_restart
4155 self.pkgsettings = {}
4156 # Maps slot atom to package for each Package added to the graph.
4157 self._slot_pkg_map = {}
4158 # Maps nodes to the reasons they were selected for reinstallation.
4159 self._reinstall_nodes = {}
4162 self._trees_orig = trees
4164 # Contains a filtered view of preferred packages that are selected
4165 # from available repositories.
4166 self._filtered_trees = {}
4167 # Contains installed packages and new packages that have been added
4169 self._graph_trees = {}
4170 # All Package instances
4171 self._pkg_cache = self._package_cache(self)
4172 for myroot in trees:
4173 self.trees[myroot] = {}
4174 # Create a RootConfig instance that references
4175 # the FakeVartree instead of the real one.
4176 self.roots[myroot] = RootConfig(
4177 trees[myroot]["vartree"].settings,
4179 trees[myroot]["root_config"].setconfig)
4180 for tree in ("porttree", "bintree"):
4181 self.trees[myroot][tree] = trees[myroot][tree]
4182 self.trees[myroot]["vartree"] = \
4183 FakeVartree(trees[myroot]["root_config"],
4184 pkg_cache=self._pkg_cache)
4185 self.pkgsettings[myroot] = portage.config(
4186 clone=self.trees[myroot]["vartree"].settings)
4187 self._slot_pkg_map[myroot] = {}
4188 vardb = self.trees[myroot]["vartree"].dbapi
4189 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4190 "--buildpkgonly" not in self.myopts
4191 # This fakedbapi instance will model the state that the vdb will
4192 # have after new packages have been installed.
4193 fakedb = PackageVirtualDbapi(vardb.settings)
4194 if preload_installed_pkgs:
4196 self.spinner.update()
4197 # This triggers metadata updates via FakeVartree.
4198 vardb.aux_get(pkg.cpv, [])
4199 fakedb.cpv_inject(pkg)
4201 # Now that the vardb state is cached in our FakeVartree,
4202 # we won't be needing the real vartree cache for awhile.
4203 # To make some room on the heap, clear the vardbapi
4205 trees[myroot]["vartree"].dbapi._clear_cache()
4208 self.mydbapi[myroot] = fakedb
4211 graph_tree.dbapi = fakedb
4212 self._graph_trees[myroot] = {}
4213 self._filtered_trees[myroot] = {}
4214 # Substitute the graph tree for the vartree in dep_check() since we
4215 # want atom selections to be consistent with package selections
4216 # have already been made.
4217 self._graph_trees[myroot]["porttree"] = graph_tree
4218 self._graph_trees[myroot]["vartree"] = graph_tree
4219 def filtered_tree():
4221 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4222 self._filtered_trees[myroot]["porttree"] = filtered_tree
4224 # Passing in graph_tree as the vartree here could lead to better
4225 # atom selections in some cases by causing atoms for packages that
4226 # have been added to the graph to be preferred over other choices.
4227 # However, it can trigger atom selections that result in
4228 # unresolvable direct circular dependencies. For example, this
4229 # happens with gwydion-dylan which depends on either itself or
4230 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4231 # gwydion-dylan-bin needs to be selected in order to avoid a
4232 # an unresolvable direct circular dependency.
4234 # To solve the problem described above, pass in "graph_db" so that
4235 # packages that have been added to the graph are distinguishable
4236 # from other available packages and installed packages. Also, pass
4237 # the parent package into self._select_atoms() calls so that
4238 # unresolvable direct circular dependencies can be detected and
4239 # avoided when possible.
4240 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4241 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4244 portdb = self.trees[myroot]["porttree"].dbapi
4245 bindb = self.trees[myroot]["bintree"].dbapi
4246 vardb = self.trees[myroot]["vartree"].dbapi
4247 # (db, pkg_type, built, installed, db_keys)
4248 if "--usepkgonly" not in self.myopts:
4249 db_keys = list(portdb._aux_cache_keys)
4250 dbs.append((portdb, "ebuild", False, False, db_keys))
4251 if "--usepkg" in self.myopts:
4252 db_keys = list(bindb._aux_cache_keys)
4253 dbs.append((bindb, "binary", True, False, db_keys))
4254 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4255 dbs.append((vardb, "installed", True, True, db_keys))
4256 self._filtered_trees[myroot]["dbs"] = dbs
4257 if "--usepkg" in self.myopts:
4258 self.trees[myroot]["bintree"].populate(
4259 "--getbinpkg" in self.myopts,
4260 "--getbinpkgonly" in self.myopts)
4263 self.digraph=portage.digraph()
4264 # contains all sets added to the graph
4266 # contains atoms given as arguments
4267 self._sets["args"] = InternalPackageSet()
4268 # contains all atoms from all sets added to the graph, including
4269 # atoms given as arguments
4270 self._set_atoms = InternalPackageSet()
4271 self._atom_arg_map = {}
4272 # contains all nodes pulled in by self._set_atoms
4273 self._set_nodes = set()
4274 # Contains only Blocker -> Uninstall edges
4275 self._blocker_uninstalls = digraph()
4276 # Contains only Package -> Blocker edges
4277 self._blocker_parents = digraph()
4278 # Contains only irrelevant Package -> Blocker edges
4279 self._irrelevant_blockers = digraph()
4280 # Contains only unsolvable Package -> Blocker edges
4281 self._unsolvable_blockers = digraph()
4282 self._slot_collision_info = set()
4283 # Slot collision nodes are not allowed to block other packages since
4284 # blocker validation is only able to account for one package per slot.
4285 self._slot_collision_nodes = set()
4286 self._serialized_tasks_cache = None
4287 self._scheduler_graph = None
4288 self._displayed_list = None
4289 self._pprovided_args = []
4290 self._missing_args = []
4291 self._masked_installed = set()
4292 self._unsatisfied_deps_for_display = []
4293 self._unsatisfied_blockers_for_display = None
4294 self._circular_deps_for_display = None
4295 self._dep_stack = []
4296 self._unsatisfied_deps = []
4297 self._initially_unsatisfied_deps = []
4298 self._ignored_deps = []
4299 self._required_set_names = set(["system", "world"])
4300 self._select_atoms = self._select_atoms_highest_available
4301 self._select_package = self._select_pkg_highest_available
4302 self._highest_pkg_cache = {}
4304 def _show_slot_collision_notice(self):
4305 """Show an informational message advising the user to mask one of the
4306 the packages. In some cases it may be possible to resolve this
4307 automatically, but support for backtracking (removal nodes that have
4308 already been selected) will be required in order to handle all possible
4311 if not self._slot_collision_info:
4314 self._show_merge_list()
4317 msg.append("\n!!! Multiple package instances within a single " + \
4318 "package slot have been pulled\n")
4319 msg.append("!!! into the dependency graph, resulting" + \
4320 " in a slot conflict:\n\n")
4322 # Max number of parents shown, to avoid flooding the display.
4324 for slot_atom, root in self._slot_collision_info:
4325 msg.append(str(slot_atom))
4328 for node in self._slot_collision_nodes:
4329 if node.slot_atom == slot_atom:
4330 slot_nodes.append(node)
4331 slot_nodes.append(self._slot_pkg_map[root][slot_atom])
4332 for node in slot_nodes:
4334 msg.append(str(node))
4335 parents = self.digraph.parent_nodes(node)
4338 if len(parents) > max_parents:
4340 # When generating the pruned list, prefer instances
4341 # of DependencyArg over instances of Package.
4342 for parent in parents:
4343 if isinstance(parent, DependencyArg):
4344 pruned_list.append(parent)
4345 # Prefer Packages instances that themselves have been
4346 # pulled into collision slots.
4347 for parent in parents:
4348 if isinstance(parent, Package) and \
4349 (parent.slot_atom, parent.root) \
4350 in self._slot_collision_info:
4351 pruned_list.append(parent)
4352 for parent in parents:
4353 if len(pruned_list) >= max_parents:
4355 if not isinstance(parent, DependencyArg) and \
4356 parent not in pruned_list:
4357 pruned_list.append(parent)
4358 omitted_parents = len(parents) - len(pruned_list)
4359 parents = pruned_list
4360 msg.append(" pulled in by\n")
4361 for parent in parents:
4362 msg.append(2*indent)
4363 msg.append(str(parent))
4366 msg.append(2*indent)
4367 msg.append("(and %d more)\n" % omitted_parents)
4369 msg.append(" (no parents)\n")
4372 sys.stderr.write("".join(msg))
4375 if "--quiet" in self.myopts:
4379 msg.append("It may be possible to solve this problem ")
4380 msg.append("by using package.mask to prevent one of ")
4381 msg.append("those packages from being selected. ")
4382 msg.append("However, it is also possible that conflicting ")
4383 msg.append("dependencies exist such that they are impossible to ")
4384 msg.append("satisfy simultaneously. If such a conflict exists in ")
4385 msg.append("the dependencies of two different packages, then those ")
4386 msg.append("packages can not be installed simultaneously.")
4388 from formatter import AbstractFormatter, DumbWriter
4389 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4391 f.add_flowing_data(x)
4395 msg.append("For more information, see MASKED PACKAGES ")
4396 msg.append("section in the emerge man page or refer ")
4397 msg.append("to the Gentoo Handbook.")
4399 f.add_flowing_data(x)
4403 def _reinstall_for_flags(self, forced_flags,
4404 orig_use, orig_iuse, cur_use, cur_iuse):
4405 """Return a set of flags that trigger reinstallation, or None if there
4406 are no such flags."""
4407 if "--newuse" in self.myopts:
4408 flags = set(orig_iuse.symmetric_difference(
4409 cur_iuse).difference(forced_flags))
4410 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4411 cur_iuse.intersection(cur_use)))
4414 elif "changed-use" == self.myopts.get("--reinstall"):
4415 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4416 cur_iuse.intersection(cur_use))
4421 def _create_graph(self, allow_unsatisfied=False):
4422 dep_stack = self._dep_stack
4424 self.spinner.update()
4425 dep = dep_stack.pop()
4426 if isinstance(dep, Package):
4427 if not self._add_pkg_deps(dep,
4428 allow_unsatisfied=allow_unsatisfied):
4431 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4435 def _add_dep(self, dep, allow_unsatisfied=False):
4436 debug = "--debug" in self.myopts
4437 buildpkgonly = "--buildpkgonly" in self.myopts
4438 nodeps = "--nodeps" in self.myopts
4439 empty = "empty" in self.myparams
4440 deep = "deep" in self.myparams
4441 update = "--update" in self.myopts and dep.depth <= 1
4443 if not buildpkgonly and \
4445 dep.parent not in self._slot_collision_nodes:
4446 if dep.parent.onlydeps:
4447 # It's safe to ignore blockers if the
4448 # parent is an --onlydeps node.
4450 # The blocker applies to the root where
4451 # the parent is or will be installed.
4452 blocker = Blocker(atom=dep.atom,
4453 eapi=dep.parent.metadata["EAPI"],
4454 root=dep.parent.root)
4455 self._blocker_parents.add(blocker, dep.parent)
4457 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4458 onlydeps=dep.onlydeps)
4460 if allow_unsatisfied:
4461 self._unsatisfied_deps.append(dep)
4463 self._unsatisfied_deps_for_display.append(
4464 ((dep.root, dep.atom), {"myparent":dep.parent}))
4466 # In some cases, dep_check will return deps that shouldn't
4467 # be proccessed any further, so they are identified and
4468 # discarded here. Try to discard as few as possible since
4469 # discarded dependencies reduce the amount of information
4470 # available for optimization of merge order.
4471 if dep.priority.satisfied and \
4472 not (existing_node or empty or deep or update):
4474 if dep.root == self.target_root:
4476 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4477 except StopIteration:
4479 except portage.exception.InvalidDependString:
4480 if not dep_pkg.installed:
4481 # This shouldn't happen since the package
4482 # should have been masked.
4485 self._ignored_deps.append(dep)
4488 if not self._add_pkg(dep_pkg, dep):
4492 def _add_pkg(self, pkg, dep):
4499 myparent = dep.parent
4500 priority = dep.priority
4502 if priority is None:
4503 priority = DepPriority()
4505 Fills the digraph with nodes comprised of packages to merge.
4506 mybigkey is the package spec of the package to merge.
4507 myparent is the package depending on mybigkey ( or None )
4508 addme = Should we add this package to the digraph or are we just looking at it's deps?
4509 Think --onlydeps, we need to ignore packages in that case.
4512 #IUSE-aware emerge -> USE DEP aware depgraph
4513 #"no downgrade" emerge
4516 # select the correct /var database that we'll be checking against
4517 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4518 pkgsettings = self.pkgsettings[pkg.root]
4524 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4525 except portage.exception.InvalidDependString, e:
4526 if not pkg.installed:
4527 show_invalid_depstring_notice(
4528 pkg, pkg.metadata["PROVIDE"], str(e))
4532 args = [arg for arg, atom in arg_atoms]
4534 if not pkg.onlydeps:
4535 if not pkg.installed and \
4536 "empty" not in self.myparams and \
4537 vardbapi.match(pkg.slot_atom):
4538 # Increase the priority of dependencies on packages that
4539 # are being rebuilt. This optimizes merge order so that
4540 # dependencies are rebuilt/updated as soon as possible,
4541 # which is needed especially when emerge is called by
4542 # revdep-rebuild since dependencies may be affected by ABI
4543 # breakage that has rendered them useless. Don't adjust
4544 # priority here when in "empty" mode since all packages
4545 # are being merged in that case.
4546 priority.rebuild = True
4548 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4549 slot_collision = False
4551 existing_node_matches = pkg.cpv == existing_node.cpv
4552 if existing_node_matches and \
4553 pkg != existing_node and \
4554 dep.atom is not None:
4555 # Use package set for matching since it will match via
4556 # PROVIDE when necessary, while match_from_list does not.
4557 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4558 if not atom_set.findAtomForPackage(existing_node):
4559 existing_node_matches = False
4560 if existing_node_matches:
4561 # The existing node can be reused.
4564 self.digraph.add(existing_node, arg,
4566 # If a direct circular dependency is not an unsatisfied
4567 # buildtime dependency then drop it here since otherwise
4568 # it can skew the merge order calculation in an unwanted
4570 if existing_node != myparent or \
4571 (priority.buildtime and not priority.satisfied):
4572 self.digraph.addnode(existing_node, myparent,
4577 if pkg.cpv == existing_node.cpv and \
4578 dep.atom is not None and \
4580 # Multiple different instances of the same version
4581 # (typically one installed and another not yet
4582 # installed) have been pulled into the graph due
4583 # to a USE dependency. The "slot collision" display
4584 # is not helpful in a case like this, so display it
4585 # as an unsatisfied dependency.
4586 self._unsatisfied_deps_for_display.append(
4587 ((dep.root, dep.atom), {"myparent":dep.parent}))
4588 self._slot_collision_info.add((pkg.slot_atom, pkg.root))
4589 self._slot_collision_nodes.add(pkg)
4590 self.digraph.addnode(pkg, myparent, priority=priority)
4593 if pkg in self._slot_collision_nodes:
4595 # A slot collision has occurred. Sometimes this coincides
4596 # with unresolvable blockers, so the slot collision will be
4597 # shown later if there are no unresolvable blockers.
4598 self._slot_collision_info.add((pkg.slot_atom, pkg.root))
4599 self._slot_collision_nodes.add(pkg)
4600 slot_collision = True
4603 # Now add this node to the graph so that self.display()
4604 # can show use flags and --tree portage.output. This node is
4605 # only being partially added to the graph. It must not be
4606 # allowed to interfere with the other nodes that have been
4607 # added. Do not overwrite data for existing nodes in
4608 # self.mydbapi since that data will be used for blocker
4610 # Even though the graph is now invalid, continue to process
4611 # dependencies so that things like --fetchonly can still
4612 # function despite collisions.
4615 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4616 self.mydbapi[pkg.root].cpv_inject(pkg)
4618 self.digraph.addnode(pkg, myparent, priority=priority)
4620 if not pkg.installed:
4621 # Allow this package to satisfy old-style virtuals in case it
4622 # doesn't already. Any pre-existing providers will be preferred
4625 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4626 # For consistency, also update the global virtuals.
4627 settings = self.roots[pkg.root].settings
4629 settings.setinst(pkg.cpv, pkg.metadata)
4631 except portage.exception.InvalidDependString, e:
4632 show_invalid_depstring_notice(
4633 pkg, pkg.metadata["PROVIDE"], str(e))
4638 self._set_nodes.add(pkg)
4640 # Do this even when addme is False (--onlydeps) so that the
4641 # parent/child relationship is always known in case
4642 # self._show_slot_collision_notice() needs to be called later.
4644 self.digraph.add(pkg, myparent, priority=priority)
4647 self.digraph.add(pkg, arg, priority=priority)
4649 """ This section determines whether we go deeper into dependencies or not.
4650 We want to go deeper on a few occasions:
4651 Installing package A, we need to make sure package A's deps are met.
4652 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4653 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4655 dep_stack = self._dep_stack
4656 if "recurse" not in self.myparams:
4658 elif pkg.installed and \
4659 "deep" not in self.myparams:
4660 dep_stack = self._ignored_deps
4662 self.spinner.update()
4667 dep_stack.append(pkg)
4670 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
4672 mytype = pkg.type_name
4675 metadata = pkg.metadata
4676 myuse = pkg.use.enabled
4678 depth = pkg.depth + 1
4679 removal_action = "remove" in self.myparams
4682 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
4684 edepend[k] = metadata[k]
4686 if not pkg.built and \
4687 "--buildpkgonly" in self.myopts and \
4688 "deep" not in self.myparams and \
4689 "empty" not in self.myparams:
4690 edepend["RDEPEND"] = ""
4691 edepend["PDEPEND"] = ""
4692 bdeps_satisfied = False
4694 if pkg.built and not removal_action:
4695 if self.myopts.get("--with-bdeps", "n") == "y":
4696 # Pull in build time deps as requested, but marked them as
4697 # "satisfied" since they are not strictly required. This allows
4698 # more freedom in the merge order calculation for solving
4699 # circular dependencies. Don't convert to PDEPEND since that
4700 # could make --with-bdeps=y less effective if it is used to
4701 # adjust merge order to prevent built_with_use() calls from
4703 bdeps_satisfied = True
4705 # built packages do not have build time dependencies.
4706 edepend["DEPEND"] = ""
4708 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
4709 edepend["DEPEND"] = ""
4712 ("/", edepend["DEPEND"],
4713 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
4714 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
4715 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
4718 debug = "--debug" in self.myopts
4719 strict = mytype != "installed"
4721 for dep_root, dep_string, dep_priority in deps:
4723 # Decrease priority so that --buildpkgonly
4724 # hasallzeros() works correctly.
4725 dep_priority = DepPriority()
4730 print "Parent: ", jbigkey
4731 print "Depstring:", dep_string
4732 print "Priority:", dep_priority
4733 vardb = self.roots[dep_root].trees["vartree"].dbapi
4735 selected_atoms = self._select_atoms(dep_root,
4736 dep_string, myuse=myuse, parent=pkg, strict=strict)
4737 except portage.exception.InvalidDependString, e:
4738 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
4741 print "Candidates:", selected_atoms
4743 for atom in selected_atoms:
4746 atom = portage.dep.Atom(atom)
4748 mypriority = dep_priority.copy()
4749 if not atom.blocker and vardb.match(atom):
4750 mypriority.satisfied = True
4752 if not self._add_dep(Dependency(atom=atom,
4753 blocker=atom.blocker, depth=depth, parent=pkg,
4754 priority=mypriority, root=dep_root),
4755 allow_unsatisfied=allow_unsatisfied):
4758 except portage.exception.InvalidAtom, e:
4759 show_invalid_depstring_notice(
4760 pkg, dep_string, str(e))
4762 if not pkg.installed:
4766 print "Exiting...", jbigkey
4767 except portage.exception.AmbiguousPackageName, e:
4769 portage.writemsg("\n\n!!! An atom in the dependencies " + \
4770 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
4772 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
4773 portage.writemsg("\n", noiselevel=-1)
4774 if mytype == "binary":
4776 "!!! This binary package cannot be installed: '%s'\n" % \
4777 mykey, noiselevel=-1)
4778 elif mytype == "ebuild":
4779 portdb = self.roots[myroot].trees["porttree"].dbapi
4780 myebuild, mylocation = portdb.findname2(mykey)
4781 portage.writemsg("!!! This ebuild cannot be installed: " + \
4782 "'%s'\n" % myebuild, noiselevel=-1)
4783 portage.writemsg("!!! Please notify the package maintainer " + \
4784 "that atoms must be fully-qualified.\n", noiselevel=-1)
4788 def _priority(self, **kwargs):
4789 if "remove" in self.myparams:
4790 priority_constructor = UnmergeDepPriority
4792 priority_constructor = DepPriority
4793 return priority_constructor(**kwargs)
4795 def _dep_expand(self, root_config, atom_without_category):
4797 @param root_config: a root config instance
4798 @type root_config: RootConfig
4799 @param atom_without_category: an atom without a category component
4800 @type atom_without_category: String
4802 @returns: a list of atoms containing categories (possibly empty)
4804 null_cp = portage.dep_getkey(insert_category_into_atom(
4805 atom_without_category, "null"))
4806 cat, atom_pn = portage.catsplit(null_cp)
4809 for db, pkg_type, built, installed, db_keys in \
4810 self._filtered_trees[root_config.root]["dbs"]:
4811 cp_set.update(db.cp_all())
4812 for cp in list(cp_set):
4813 cat, pn = portage.catsplit(cp)
4818 cat, pn = portage.catsplit(cp)
4819 deps.append(insert_category_into_atom(
4820 atom_without_category, cat))
4823 def _have_new_virt(self, root, atom_cp):
4825 for db, pkg_type, built, installed, db_keys in \
4826 self._filtered_trees[root]["dbs"]:
4827 if db.cp_list(atom_cp):
4832 def _iter_atoms_for_pkg(self, pkg):
4833 # TODO: add multiple $ROOT support
4834 if pkg.root != self.target_root:
4836 atom_arg_map = self._atom_arg_map
4837 root_config = self.roots[pkg.root]
4838 for atom in self._set_atoms.iterAtomsForPackage(pkg):
4839 atom_cp = portage.dep_getkey(atom)
4840 if atom_cp != pkg.cp and \
4841 self._have_new_virt(pkg.root, atom_cp):
4843 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
4844 visible_pkgs.reverse() # descending order
4846 for visible_pkg in visible_pkgs:
4847 if visible_pkg.cp != atom_cp:
4849 if pkg >= visible_pkg:
4850 # This is descending order, and we're not
4851 # interested in any versions <= pkg given.
4853 if pkg.slot_atom != visible_pkg.slot_atom:
4854 higher_slot = visible_pkg
4856 if higher_slot is not None:
4858 for arg in atom_arg_map[(atom, pkg.root)]:
4859 if isinstance(arg, PackageArg) and \
4864 def select_files(self, myfiles):
4865 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
4866 appropriate depgraph and return a favorite list."""
4867 debug = "--debug" in self.myopts
4868 root_config = self.roots[self.target_root]
4869 sets = root_config.sets
4870 getSetAtoms = root_config.setconfig.getSetAtoms
4872 myroot = self.target_root
4873 dbs = self._filtered_trees[myroot]["dbs"]
4874 vardb = self.trees[myroot]["vartree"].dbapi
4875 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
4876 portdb = self.trees[myroot]["porttree"].dbapi
4877 bindb = self.trees[myroot]["bintree"].dbapi
4878 pkgsettings = self.pkgsettings[myroot]
4880 onlydeps = "--onlydeps" in self.myopts
4883 ext = os.path.splitext(x)[1]
4885 if not os.path.exists(x):
4887 os.path.join(pkgsettings["PKGDIR"], "All", x)):
4888 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
4889 elif os.path.exists(
4890 os.path.join(pkgsettings["PKGDIR"], x)):
4891 x = os.path.join(pkgsettings["PKGDIR"], x)
4893 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
4894 print "!!! Please ensure the tbz2 exists as specified.\n"
4895 return 0, myfavorites
4896 mytbz2=portage.xpak.tbz2(x)
4897 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
4898 if os.path.realpath(x) != \
4899 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
4900 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
4901 return 0, myfavorites
4902 db_keys = list(bindb._aux_cache_keys)
4903 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
4904 pkg = Package(type_name="binary", root_config=root_config,
4905 cpv=mykey, built=True, metadata=metadata,
4907 self._pkg_cache[pkg] = pkg
4908 args.append(PackageArg(arg=x, package=pkg,
4909 root_config=root_config))
4910 elif ext==".ebuild":
4911 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
4912 pkgdir = os.path.dirname(ebuild_path)
4913 tree_root = os.path.dirname(os.path.dirname(pkgdir))
4914 cp = pkgdir[len(tree_root)+1:]
4915 e = portage.exception.PackageNotFound(
4916 ("%s is not in a valid portage tree " + \
4917 "hierarchy or does not exist") % x)
4918 if not portage.isvalidatom(cp):
4920 cat = portage.catsplit(cp)[0]
4921 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
4922 if not portage.isvalidatom("="+mykey):
4924 ebuild_path = portdb.findname(mykey)
4926 if ebuild_path != os.path.join(os.path.realpath(tree_root),
4927 cp, os.path.basename(ebuild_path)):
4928 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
4929 return 0, myfavorites
4930 if mykey not in portdb.xmatch(
4931 "match-visible", portage.dep_getkey(mykey)):
4932 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
4933 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
4934 print colorize("BAD", "*** page for details.")
4935 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
4938 raise portage.exception.PackageNotFound(
4939 "%s is not in a valid portage tree hierarchy or does not exist" % x)
4940 db_keys = list(portdb._aux_cache_keys)
4941 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
4942 pkg = Package(type_name="ebuild", root_config=root_config,
4943 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
4944 pkgsettings.setcpv(pkg)
4945 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
4946 self._pkg_cache[pkg] = pkg
4947 args.append(PackageArg(arg=x, package=pkg,
4948 root_config=root_config))
4949 elif x.startswith(os.path.sep):
4950 if not x.startswith(myroot):
4951 portage.writemsg(("\n\n!!! '%s' does not start with" + \
4952 " $ROOT.\n") % x, noiselevel=-1)
4954 # Queue these up since it's most efficient to handle
4955 # multiple files in a single iter_owners() call.
4956 lookup_owners.append(x)
4958 if x in ("system", "world"):
4960 if x.startswith(SETPREFIX):
4961 s = x[len(SETPREFIX):]
4963 raise portage.exception.PackageSetNotFound(s)
4966 # Recursively expand sets so that containment tests in
4967 # self._get_parent_sets() properly match atoms in nested
4968 # sets (like if world contains system).
4969 expanded_set = InternalPackageSet(
4970 initial_atoms=getSetAtoms(s))
4971 self._sets[s] = expanded_set
4972 args.append(SetArg(arg=x, set=expanded_set,
4973 root_config=root_config))
4974 myfavorites.append(x)
4976 if not is_valid_package_atom(x):
4977 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
4979 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
4980 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
4982 # Don't expand categories or old-style virtuals here unless
4983 # necessary. Expansion of old-style virtuals here causes at
4984 # least the following problems:
4985 # 1) It's more difficult to determine which set(s) an atom
4986 # came from, if any.
4987 # 2) It takes away freedom from the resolver to choose other
4988 # possible expansions when necessary.
4990 args.append(AtomArg(arg=x, atom=x,
4991 root_config=root_config))
4993 expanded_atoms = self._dep_expand(root_config, x)
4994 installed_cp_set = set()
4995 for atom in expanded_atoms:
4996 atom_cp = portage.dep_getkey(atom)
4997 if vardb.cp_list(atom_cp):
4998 installed_cp_set.add(atom_cp)
4999 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5000 installed_cp = iter(installed_cp_set).next()
5001 expanded_atoms = [atom for atom in expanded_atoms \
5002 if portage.dep_getkey(atom) == installed_cp]
5004 if len(expanded_atoms) > 1:
5005 print "\n\n!!! The short ebuild name \"" + x + "\" is ambiguous. Please specify"
5006 print "!!! one of the following fully-qualified ebuild names instead:\n"
5007 expanded_atoms = set(portage.dep_getkey(atom) \
5008 for atom in expanded_atoms)
5009 for i in sorted(expanded_atoms):
5010 print " " + green(i)
5012 return False, myfavorites
5014 atom = expanded_atoms[0]
5016 null_atom = insert_category_into_atom(x, "null")
5017 null_cp = portage.dep_getkey(null_atom)
5018 cat, atom_pn = portage.catsplit(null_cp)
5019 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5021 # Allow the depgraph to choose which virtual.
5022 atom = insert_category_into_atom(x, "virtual")
5024 atom = insert_category_into_atom(x, "null")
5026 args.append(AtomArg(arg=x, atom=atom,
5027 root_config=root_config))
5031 search_for_multiple = False
5032 if len(lookup_owners) > 1:
5033 search_for_multiple = True
5035 for x in lookup_owners:
5036 if not search_for_multiple and os.path.isdir(x):
5037 search_for_multiple = True
5038 relative_paths.append(x[len(myroot):])
5041 for pkg, relative_path in \
5042 real_vardb._owners.iter_owners(relative_paths):
5043 owners.add(pkg.mycpv)
5044 if not search_for_multiple:
5048 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5049 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5053 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5055 # portage now masks packages with missing slot, but it's
5056 # possible that one was installed by an older version
5057 atom = portage.cpv_getkey(cpv)
5059 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5060 args.append(AtomArg(arg=atom, atom=atom,
5061 root_config=root_config))
5063 if "--update" in self.myopts:
5064 # Enable greedy SLOT atoms for atoms given as arguments.
5065 # This is currently disabled for sets since greedy SLOT
5066 # atoms could be a property of the set itself.
5069 # In addition to any installed slots, also try to pull
5070 # in the latest new slot that may be available.
5071 greedy_atoms.append(arg)
5072 if not isinstance(arg, (AtomArg, PackageArg)):
5074 atom_cp = portage.dep_getkey(arg.atom)
5076 for cpv in vardb.match(arg.atom):
5077 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5079 greedy_atoms.append(
5080 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5081 root_config=root_config))
5085 # Create the "args" package set from atoms and
5086 # packages given as arguments.
5087 args_set = self._sets["args"]
5089 if not isinstance(arg, (AtomArg, PackageArg)):
5092 if myatom in args_set:
5094 args_set.add(myatom)
5095 myfavorites.append(myatom)
5096 self._set_atoms.update(chain(*self._sets.itervalues()))
5097 atom_arg_map = self._atom_arg_map
5099 for atom in arg.set:
5100 atom_key = (atom, myroot)
5101 refs = atom_arg_map.get(atom_key)
5104 atom_arg_map[atom_key] = refs
5107 pprovideddict = pkgsettings.pprovideddict
5109 portage.writemsg("\n", noiselevel=-1)
5110 # Order needs to be preserved since a feature of --nodeps
5111 # is to allow the user to force a specific merge order.
5115 for atom in arg.set:
5116 self.spinner.update()
5117 dep = Dependency(atom=atom, onlydeps=onlydeps,
5118 root=myroot, parent=arg)
5119 atom_cp = portage.dep_getkey(atom)
5121 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5122 if pprovided and portage.match_from_list(atom, pprovided):
5123 # A provided package has been specified on the command line.
5124 self._pprovided_args.append((arg, atom))
5126 if isinstance(arg, PackageArg):
5127 if not self._add_pkg(arg.package, dep) or \
5128 not self._create_graph():
5129 sys.stderr.write(("\n\n!!! Problem resolving " + \
5130 "dependencies for %s\n") % arg.arg)
5131 return 0, myfavorites
5134 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5135 (arg, atom), noiselevel=-1)
5136 pkg, existing_node = self._select_package(
5137 myroot, atom, onlydeps=onlydeps)
5139 if not (isinstance(arg, SetArg) and \
5140 arg.name in ("system", "world")):
5141 self._unsatisfied_deps_for_display.append(
5142 ((myroot, atom), {}))
5143 return 0, myfavorites
5144 self._missing_args.append((arg, atom))
5146 if atom_cp != pkg.cp:
5147 # For old-style virtuals, we need to repeat the
5148 # package.provided check against the selected package.
5149 expanded_atom = atom.replace(atom_cp, pkg.cp)
5150 pprovided = pprovideddict.get(pkg.cp)
5152 portage.match_from_list(expanded_atom, pprovided):
5153 # A provided package has been
5154 # specified on the command line.
5155 self._pprovided_args.append((arg, atom))
5157 if pkg.installed and "selective" not in self.myparams:
5158 self._unsatisfied_deps_for_display.append(
5159 ((myroot, atom), {}))
5160 # Previous behavior was to bail out in this case, but
5161 # since the dep is satisfied by the installed package,
5162 # it's more friendly to continue building the graph
5163 # and just show a warning message. Therefore, only bail
5164 # out here if the atom is not from either the system or
5166 if not (isinstance(arg, SetArg) and \
5167 arg.name in ("system", "world")):
5168 return 0, myfavorites
5170 # Add the selected package to the graph as soon as possible
5171 # so that later dep_check() calls can use it as feedback
5172 # for making more consistent atom selections.
5173 if not self._add_pkg(pkg, dep):
5174 if isinstance(arg, SetArg):
5175 sys.stderr.write(("\n\n!!! Problem resolving " + \
5176 "dependencies for %s from %s\n") % \
5179 sys.stderr.write(("\n\n!!! Problem resolving " + \
5180 "dependencies for %s\n") % atom)
5181 return 0, myfavorites
5183 except portage.exception.MissingSignature, e:
5184 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5185 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5186 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5187 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5188 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5189 return 0, myfavorites
5190 except portage.exception.InvalidSignature, e:
5191 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5192 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5193 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5194 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5195 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5196 return 0, myfavorites
5197 except SystemExit, e:
5198 raise # Needed else can't exit
5199 except Exception, e:
5200 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5201 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5204 # Now that the root packages have been added to the graph,
5205 # process the dependencies.
5206 if not self._create_graph():
5207 return 0, myfavorites
5210 if "--usepkgonly" in self.myopts:
5211 for xs in self.digraph.all_nodes():
5212 if not isinstance(xs, Package):
5214 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5218 print "Missing binary for:",xs[2]
5222 except self._unknown_internal_error:
5223 return False, myfavorites
5225 # We're true here unless we are missing binaries.
5226 return (not missing,myfavorites)
5228 def _select_atoms_from_graph(self, *pargs, **kwargs):
5230 Prefer atoms matching packages that have already been
5231 added to the graph or those that are installed and have
5232 not been scheduled for replacement.
5234 kwargs["trees"] = self._graph_trees
5235 return self._select_atoms_highest_available(*pargs, **kwargs)
5237 def _select_atoms_highest_available(self, root, depstring,
5238 myuse=None, parent=None, strict=True, trees=None):
5239 """This will raise InvalidDependString if necessary. If trees is
5240 None then self._filtered_trees is used."""
5241 pkgsettings = self.pkgsettings[root]
5243 trees = self._filtered_trees
5246 if parent is not None:
5247 trees[root]["parent"] = parent
5249 portage.dep._dep_check_strict = False
5250 mycheck = portage.dep_check(depstring, None,
5251 pkgsettings, myuse=myuse,
5252 myroot=root, trees=trees)
5254 if parent is not None:
5255 trees[root].pop("parent")
5256 portage.dep._dep_check_strict = True
5258 raise portage.exception.InvalidDependString(mycheck[1])
5259 selected_atoms = mycheck[1]
5260 return selected_atoms
5262 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5263 atom = portage.dep.Atom(atom)
5264 atom_set = InternalPackageSet(initial_atoms=(atom,))
5265 atom_without_use = atom
5267 atom_without_use = portage.dep.remove_slot(atom)
5269 atom_without_use += ":" + atom.slot
5270 atom_without_use = portage.dep.Atom(atom_without_use)
5271 xinfo = '"%s"' % atom
5274 # Discard null/ from failed cpv_expand category expansion.
5275 xinfo = xinfo.replace("null/", "")
5276 masked_packages = []
5278 missing_licenses = []
5279 have_eapi_mask = False
5280 pkgsettings = self.pkgsettings[root]
5281 implicit_iuse = pkgsettings._get_implicit_iuse()
5282 root_config = self.roots[root]
5283 portdb = self.roots[root].trees["porttree"].dbapi
5284 dbs = self._filtered_trees[root]["dbs"]
5285 for db, pkg_type, built, installed, db_keys in dbs:
5289 if hasattr(db, "xmatch"):
5290 cpv_list = db.xmatch("match-all", atom_without_use)
5292 cpv_list = db.match(atom_without_use)
5295 for cpv in cpv_list:
5296 metadata, mreasons = get_mask_info(root_config, cpv,
5297 pkgsettings, db, pkg_type, built, installed, db_keys)
5298 if metadata is not None:
5299 pkg = Package(built=built, cpv=cpv,
5300 installed=installed, metadata=metadata,
5301 root_config=root_config)
5302 if pkg.cp != atom.cp:
5303 # A cpv can be returned from dbapi.match() as an
5304 # old-style virtual match even in cases when the
5305 # package does not actually PROVIDE the virtual.
5306 # Filter out any such false matches here.
5307 if not atom_set.findAtomForPackage(pkg):
5309 if atom.use and not mreasons:
5310 missing_use.append(pkg)
5312 masked_packages.append(
5313 (root_config, pkgsettings, cpv, metadata, mreasons))
5315 missing_use_reasons = []
5316 missing_iuse_reasons = []
5317 for pkg in missing_use:
5318 use = pkg.use.enabled
5319 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5320 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5322 for x in atom.use.required:
5323 if iuse_re.match(x) is None:
5324 missing_iuse.append(x)
5327 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5328 missing_iuse_reasons.append((pkg, mreasons))
5330 need_enable = sorted(atom.use.enabled.difference(use))
5331 need_disable = sorted(atom.use.disabled.intersection(use))
5332 if need_enable or need_disable:
5334 changes.extend(colorize("red", "+" + x) \
5335 for x in need_enable)
5336 changes.extend(colorize("blue", "-" + x) \
5337 for x in need_disable)
5338 mreasons.append("Change USE: %s" % " ".join(changes))
5339 missing_use_reasons.append((pkg, mreasons))
5341 if missing_iuse_reasons and not missing_use_reasons:
5342 missing_use_reasons = missing_iuse_reasons
5343 elif missing_use_reasons:
5344 # Only show the latest version.
5345 del missing_use_reasons[1:]
5347 if missing_use_reasons:
5348 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5349 print "!!! One of the following packages is required to complete your request:"
5350 for pkg, mreasons in missing_use_reasons:
5351 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5353 elif masked_packages:
5355 colorize("BAD", "All ebuilds that could satisfy ") + \
5356 colorize("INFORM", xinfo) + \
5357 colorize("BAD", " have been masked.")
5358 print "!!! One of the following masked packages is required to complete your request:"
5359 have_eapi_mask = show_masked_packages(masked_packages)
5362 msg = ("The current version of portage supports " + \
5363 "EAPI '%s'. You must upgrade to a newer version" + \
5364 " of portage before EAPI masked packages can" + \
5365 " be installed.") % portage.const.EAPI
5366 from textwrap import wrap
5367 for line in wrap(msg, 75):
5372 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5374 # Show parent nodes and the argument that pulled them in.
5377 while node is not None:
5378 msg.append('(dependency required by "%s" [%s])' % \
5379 (colorize('INFORM', str(node.cpv)), node.type_name))
5381 for parent in self.digraph.parent_nodes(node):
5382 if isinstance(parent, DependencyArg):
5383 msg.append('(dependency required by "%s" [argument])' % \
5384 (colorize('INFORM', str(parent))))
5393 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5394 cache_key = (root, atom, onlydeps)
5395 ret = self._highest_pkg_cache.get(cache_key)
5398 if pkg and not existing:
5399 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5400 if existing and existing == pkg:
5401 # Update the cache to reflect that the
5402 # package has been added to the graph.
5404 self._highest_pkg_cache[cache_key] = ret
5406 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5407 self._highest_pkg_cache[cache_key] = ret
5410 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5411 root_config = self.roots[root]
5412 pkgsettings = self.pkgsettings[root]
5413 dbs = self._filtered_trees[root]["dbs"]
5414 vardb = self.roots[root].trees["vartree"].dbapi
5415 portdb = self.roots[root].trees["porttree"].dbapi
5416 # List of acceptable packages, ordered by type preference.
5417 matched_packages = []
5418 highest_version = None
5419 if not isinstance(atom, portage.dep.Atom):
5420 atom = portage.dep.Atom(atom)
5422 atom_set = InternalPackageSet(initial_atoms=(atom,))
5423 existing_node = None
5425 usepkgonly = "--usepkgonly" in self.myopts
5426 empty = "empty" in self.myparams
5427 selective = "selective" in self.myparams
5429 noreplace = "--noreplace" in self.myopts
5430 # Behavior of the "selective" parameter depends on
5431 # whether or not a package matches an argument atom.
5432 # If an installed package provides an old-style
5433 # virtual that is no longer provided by an available
5434 # package, the installed package may match an argument
5435 # atom even though none of the available packages do.
5436 # Therefore, "selective" logic does not consider
5437 # whether or not an installed package matches an
5438 # argument atom. It only considers whether or not
5439 # available packages match argument atoms, which is
5440 # represented by the found_available_arg flag.
5441 found_available_arg = False
5442 for find_existing_node in True, False:
5445 for db, pkg_type, built, installed, db_keys in dbs:
5448 if installed and not find_existing_node:
5449 want_reinstall = reinstall or empty or \
5450 (found_available_arg and not selective)
5451 if want_reinstall and matched_packages:
5453 if hasattr(db, "xmatch"):
5454 cpv_list = db.xmatch("match-all", atom)
5456 cpv_list = db.match(atom)
5458 # USE=multislot can make an installed package appear as if
5459 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5460 # won't do any good as long as USE=multislot is enabled since
5461 # the newly built package still won't have the expected slot.
5462 # Therefore, assume that such SLOT dependencies are already
5463 # satisfied rather than forcing a rebuild.
5464 if installed and not cpv_list and atom.slot:
5465 for cpv in db.match(atom.cp):
5466 slot_available = False
5467 for other_db, other_type, other_built, \
5468 other_installed, other_keys in dbs:
5471 other_db.aux_get(cpv, ["SLOT"])[0]:
5472 slot_available = True
5476 if not slot_available:
5478 inst_pkg = self._pkg(cpv, "installed",
5479 root_config, installed=installed)
5480 # Remove the slot from the atom and verify that
5481 # the package matches the resulting atom.
5482 atom_without_slot = portage.dep.remove_slot(atom)
5484 atom_without_slot += str(atom.use)
5485 atom_without_slot = portage.dep.Atom(atom_without_slot)
5486 if portage.match_from_list(
5487 atom_without_slot, [inst_pkg]):
5488 cpv_list = [inst_pkg.cpv]
5493 pkg_status = "merge"
5494 if installed or onlydeps:
5495 pkg_status = "nomerge"
5498 for cpv in cpv_list:
5499 # Make --noreplace take precedence over --newuse.
5500 if not installed and noreplace and \
5501 cpv in vardb.match(atom):
5502 # If the installed version is masked, it may
5503 # be necessary to look at lower versions,
5504 # in case there is a visible downgrade.
5506 reinstall_for_flags = None
5507 cache_key = (pkg_type, root, cpv, pkg_status)
5508 calculated_use = True
5509 pkg = self._pkg_cache.get(cache_key)
5511 calculated_use = False
5513 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5516 pkg = Package(built=built, cpv=cpv,
5517 installed=installed, metadata=metadata,
5518 onlydeps=onlydeps, root_config=root_config,
5520 metadata = pkg.metadata
5521 if not built and ("?" in metadata["LICENSE"] or \
5522 "?" in metadata["PROVIDE"]):
5523 # This is avoided whenever possible because
5524 # it's expensive. It only needs to be done here
5525 # if it has an effect on visibility.
5526 pkgsettings.setcpv(pkg)
5527 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5528 calculated_use = True
5529 self._pkg_cache[pkg] = pkg
5531 if not installed or (installed and matched_packages):
5532 # Only enforce visibility on installed packages
5533 # if there is at least one other visible package
5534 # available. By filtering installed masked packages
5535 # here, packages that have been masked since they
5536 # were installed can be automatically downgraded
5537 # to an unmasked version.
5539 if not visible(pkgsettings, pkg):
5541 except portage.exception.InvalidDependString:
5545 # Enable upgrade or downgrade to a version
5546 # with visible KEYWORDS when the installed
5547 # version is masked by KEYWORDS, but never
5548 # reinstall the same exact version only due
5549 # to a KEYWORDS mask.
5550 if installed and matched_packages and \
5551 pkgsettings._getMissingKeywords(
5552 pkg.cpv, pkg.metadata):
5553 different_version = None
5554 for avail_pkg in matched_packages:
5555 if not portage.dep.cpvequal(
5556 pkg.cpv, avail_pkg.cpv):
5557 different_version = avail_pkg
5559 if different_version is not None:
5560 # Only reinstall for KEYWORDS if
5561 # it's not the same version.
5564 if not pkg.built and not calculated_use:
5565 # This is avoided whenever possible because
5567 pkgsettings.setcpv(pkg)
5568 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5570 if pkg.cp != atom.cp:
5571 # A cpv can be returned from dbapi.match() as an
5572 # old-style virtual match even in cases when the
5573 # package does not actually PROVIDE the virtual.
5574 # Filter out any such false matches here.
5575 if not atom_set.findAtomForPackage(pkg):
5579 if root == self.target_root:
5581 # Ebuild USE must have been calculated prior
5582 # to this point, in case atoms have USE deps.
5583 myarg = self._iter_atoms_for_pkg(pkg).next()
5584 except StopIteration:
5586 except portage.exception.InvalidDependString:
5588 # masked by corruption
5590 if not installed and myarg:
5591 found_available_arg = True
5593 if atom.use and not pkg.built:
5594 use = pkg.use.enabled
5595 if atom.use.enabled.difference(use):
5597 if atom.use.disabled.intersection(use):
5599 if pkg.cp == atom_cp:
5600 if highest_version is None:
5601 highest_version = pkg
5602 elif pkg > highest_version:
5603 highest_version = pkg
5604 # At this point, we've found the highest visible
5605 # match from the current repo. Any lower versions
5606 # from this repo are ignored, so this so the loop
5607 # will always end with a break statement below
5609 if find_existing_node:
5610 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5613 if portage.dep.match_from_list(atom, [e_pkg]):
5614 if highest_version and \
5615 e_pkg.cp == atom_cp and \
5616 e_pkg < highest_version and \
5617 e_pkg.slot_atom != highest_version.slot_atom:
5618 # There is a higher version available in a
5619 # different slot, so this existing node is
5623 matched_packages.append(e_pkg)
5624 existing_node = e_pkg
5626 # Compare built package to current config and
5627 # reject the built package if necessary.
5628 if built and not installed and \
5629 ("--newuse" in self.myopts or \
5630 "--reinstall" in self.myopts):
5631 iuses = pkg.iuse.all
5632 old_use = pkg.use.enabled
5634 pkgsettings.setcpv(myeb)
5636 pkgsettings.setcpv(pkg)
5637 now_use = pkgsettings["PORTAGE_USE"].split()
5638 forced_flags = set()
5639 forced_flags.update(pkgsettings.useforce)
5640 forced_flags.update(pkgsettings.usemask)
5642 if myeb and not usepkgonly:
5643 cur_iuse = myeb.iuse.all
5644 if self._reinstall_for_flags(forced_flags,
5648 # Compare current config to installed package
5649 # and do not reinstall if possible.
5650 if not installed and \
5651 ("--newuse" in self.myopts or \
5652 "--reinstall" in self.myopts) and \
5653 cpv in vardb.match(atom):
5654 pkgsettings.setcpv(pkg)
5655 forced_flags = set()
5656 forced_flags.update(pkgsettings.useforce)
5657 forced_flags.update(pkgsettings.usemask)
5658 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
5659 old_iuse = set(filter_iuse_defaults(
5660 vardb.aux_get(cpv, ["IUSE"])[0].split()))
5661 cur_use = pkgsettings["PORTAGE_USE"].split()
5662 cur_iuse = pkg.iuse.all
5663 reinstall_for_flags = \
5664 self._reinstall_for_flags(
5665 forced_flags, old_use, old_iuse,
5667 if reinstall_for_flags:
5671 matched_packages.append(pkg)
5672 if reinstall_for_flags:
5673 self._reinstall_nodes[pkg] = \
5677 if not matched_packages:
5680 if "--debug" in self.myopts:
5681 for pkg in matched_packages:
5682 portage.writemsg("%s %s\n" % \
5683 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
5685 # Filter out any old-style virtual matches if they are
5686 # mixed with new-style virtual matches.
5687 cp = portage.dep_getkey(atom)
5688 if len(matched_packages) > 1 and \
5689 "virtual" == portage.catsplit(cp)[0]:
5690 for pkg in matched_packages:
5693 # Got a new-style virtual, so filter
5694 # out any old-style virtuals.
5695 matched_packages = [pkg for pkg in matched_packages \
5699 # If the installed version is in a different slot and it is higher than
5700 # the highest available visible package, _iter_atoms_for_pkg() may fail
5701 # to properly match the available package with a corresponding argument
5702 # atom. Detect this case and correct it here.
5703 if not selective and len(matched_packages) > 1 and \
5704 matched_packages[-1].installed and \
5705 matched_packages[-1].slot_atom != \
5706 matched_packages[-2].slot_atom and \
5707 matched_packages[-1] > matched_packages[-2]:
5708 pkg = matched_packages[-2]
5709 if pkg.root == self.target_root and \
5710 self._set_atoms.findAtomForPackage(pkg):
5711 # Select the available package instead
5712 # of the installed package.
5713 matched_packages.pop()
5715 if len(matched_packages) > 1:
5716 bestmatch = portage.best(
5717 [pkg.cpv for pkg in matched_packages])
5718 matched_packages = [pkg for pkg in matched_packages \
5719 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
5721 # ordered by type preference ("ebuild" type is the last resort)
5722 return matched_packages[-1], existing_node
5724 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
5726 Select packages that have already been added to the graph or
5727 those that are installed and have not been scheduled for
5730 graph_db = self._graph_trees[root]["porttree"].dbapi
5731 matches = graph_db.match(atom)
5734 cpv = matches[-1] # highest match
5735 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
5736 graph_db.aux_get(cpv, ["SLOT"])[0])
5737 e_pkg = self._slot_pkg_map[root].get(slot_atom)
5740 # Since this cpv exists in the graph_db,
5741 # we must have a cached Package instance.
5742 cache_key = ("installed", root, cpv, "nomerge")
5743 return (self._pkg_cache[cache_key], None)
5745 def _complete_graph(self):
5747 Add any deep dependencies of required sets (args, system, world) that
5748 have not been pulled into the graph yet. This ensures that the graph
5749 is consistent such that initially satisfied deep dependencies are not
5750 broken in the new graph. Initially unsatisfied dependencies are
5751 irrelevant since we only want to avoid breaking dependencies that are
5754 Since this method can consume enough time to disturb users, it is
5755 currently only enabled by the --complete-graph option.
5757 if "--buildpkgonly" in self.myopts or \
5758 "recurse" not in self.myparams:
5761 if "complete" not in self.myparams:
5762 # Skip this to avoid consuming enough time to disturb users.
5765 # Put the depgraph into a mode that causes it to only
5766 # select packages that have already been added to the
5767 # graph or those that are installed and have not been
5768 # scheduled for replacement. Also, toggle the "deep"
5769 # parameter so that all dependencies are traversed and
5771 self._select_atoms = self._select_atoms_from_graph
5772 self._select_package = self._select_pkg_from_graph
5773 already_deep = "deep" in self.myparams
5774 if not already_deep:
5775 self.myparams.add("deep")
5777 for root in self.roots:
5778 required_set_names = self._required_set_names.copy()
5779 if root == self.target_root and \
5780 (already_deep or "empty" in self.myparams):
5781 required_set_names.difference_update(self._sets)
5782 if not required_set_names and not self._ignored_deps:
5784 root_config = self.roots[root]
5785 setconfig = root_config.setconfig
5787 # Reuse existing SetArg instances when available.
5788 for arg in self.digraph.root_nodes():
5789 if not isinstance(arg, SetArg):
5791 if arg.root_config != root_config:
5793 if arg.name in required_set_names:
5795 required_set_names.remove(arg.name)
5796 # Create new SetArg instances only when necessary.
5797 for s in required_set_names:
5798 expanded_set = InternalPackageSet(
5799 initial_atoms=setconfig.getSetAtoms(s))
5800 atom = SETPREFIX + s
5801 args.append(SetArg(arg=atom, set=expanded_set,
5802 root_config=root_config))
5803 vardb = root_config.trees["vartree"].dbapi
5805 for atom in arg.set:
5806 self._dep_stack.append(
5807 Dependency(atom=atom, root=root, parent=arg))
5808 if self._ignored_deps:
5809 self._dep_stack.extend(self._ignored_deps)
5810 self._ignored_deps = []
5811 if not self._create_graph(allow_unsatisfied=True):
5813 # Check the unsatisfied deps to see if any initially satisfied deps
5814 # will become unsatisfied due to an upgrade. Initially unsatisfied
5815 # deps are irrelevant since we only want to avoid breaking deps
5816 # that are initially satisfied.
5817 while self._unsatisfied_deps:
5818 dep = self._unsatisfied_deps.pop()
5819 matches = vardb.match_pkgs(dep.atom)
5821 self._initially_unsatisfied_deps.append(dep)
5823 # An scheduled installation broke a deep dependency.
5824 # Add the installed package to the graph so that it
5825 # will be appropriately reported as a slot collision
5826 # (possibly solvable via backtracking).
5827 pkg = matches[-1] # highest match
5828 if not self._add_pkg(pkg, dep):
5830 if not self._create_graph(allow_unsatisfied=True):
5834 def _pkg(self, cpv, type_name, root_config, installed=False):
5836 Get a package instance from the cache, or create a new
5837 one if necessary. Raises KeyError from aux_get if it
5838 failures for some reason (package does not exist or is
5843 operation = "nomerge"
5844 pkg = self._pkg_cache.get(
5845 (type_name, root_config.root, cpv, operation))
5847 tree_type = self.pkg_tree_map[type_name]
5848 db = root_config.trees[tree_type].dbapi
5849 db_keys = list(self._trees_orig[root_config.root][
5850 tree_type].dbapi._aux_cache_keys)
5851 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5852 pkg = Package(cpv=cpv, metadata=metadata,
5853 root_config=root_config, installed=installed)
5854 if type_name == "ebuild":
5855 settings = self.pkgsettings[root_config.root]
5856 settings.setcpv(pkg)
5857 pkg.metadata["USE"] = settings["PORTAGE_USE"]
5858 self._pkg_cache[pkg] = pkg
5861 def validate_blockers(self):
5862 """Remove any blockers from the digraph that do not match any of the
5863 packages within the graph. If necessary, create hard deps to ensure
5864 correct merge order such that mutually blocking packages are never
5865 installed simultaneously."""
5867 if "--buildpkgonly" in self.myopts or \
5868 "--nodeps" in self.myopts:
5871 #if "deep" in self.myparams:
5873 # Pull in blockers from all installed packages that haven't already
5874 # been pulled into the depgraph. This is not enabled by default
5875 # due to the performance penalty that is incurred by all the
5876 # additional dep_check calls that are required.
5878 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
5879 for myroot in self.trees:
5880 vardb = self.trees[myroot]["vartree"].dbapi
5881 portdb = self.trees[myroot]["porttree"].dbapi
5882 pkgsettings = self.pkgsettings[myroot]
5883 final_db = self.mydbapi[myroot]
5885 blocker_cache = BlockerCache(myroot, vardb)
5886 stale_cache = set(blocker_cache)
5889 stale_cache.discard(cpv)
5890 pkg_in_graph = self.digraph.contains(pkg)
5892 # Check for masked installed packages. Only warn about
5893 # packages that are in the graph in order to avoid warning
5894 # about those that will be automatically uninstalled during
5895 # the merge process or by --depclean.
5897 if pkg_in_graph and not visible(pkgsettings, pkg):
5898 self._masked_installed.add(pkg)
5900 blocker_atoms = None
5906 self._blocker_parents.child_nodes(pkg))
5911 self._irrelevant_blockers.child_nodes(pkg))
5914 if blockers is not None:
5915 blockers = set(str(blocker.atom) \
5916 for blocker in blockers)
5918 # If this node has any blockers, create a "nomerge"
5919 # node for it so that they can be enforced.
5920 self.spinner.update()
5921 blocker_data = blocker_cache.get(cpv)
5922 if blocker_data is not None and \
5923 blocker_data.counter != long(pkg.metadata["COUNTER"]):
5926 # If blocker data from the graph is available, use
5927 # it to validate the cache and update the cache if
5929 if blocker_data is not None and \
5930 blockers is not None:
5931 if not blockers.symmetric_difference(
5932 blocker_data.atoms):
5936 if blocker_data is None and \
5937 blockers is not None:
5938 # Re-use the blockers from the graph.
5939 blocker_atoms = sorted(blockers)
5940 counter = long(pkg.metadata["COUNTER"])
5942 blocker_cache.BlockerData(counter, blocker_atoms)
5943 blocker_cache[pkg.cpv] = blocker_data
5947 blocker_atoms = blocker_data.atoms
5949 # Use aux_get() to trigger FakeVartree global
5950 # updates on *DEPEND when appropriate.
5951 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5952 # It is crucial to pass in final_db here in order to
5953 # optimize dep_check calls by eliminating atoms via
5954 # dep_wordreduce and dep_eval calls.
5956 portage.dep._dep_check_strict = False
5958 success, atoms = portage.dep_check(depstr,
5959 final_db, pkgsettings, myuse=pkg.use.enabled,
5960 trees=self._graph_trees, myroot=myroot)
5961 except Exception, e:
5962 if isinstance(e, SystemExit):
5964 # This is helpful, for example, if a ValueError
5965 # is thrown from cpv_expand due to multiple
5966 # matches (this can happen if an atom lacks a
5968 show_invalid_depstring_notice(
5969 pkg, depstr, str(e))
5973 portage.dep._dep_check_strict = True
5975 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
5976 if replacement_pkg and \
5977 replacement_pkg[0].operation == "merge":
5978 # This package is being replaced anyway, so
5979 # ignore invalid dependencies so as not to
5980 # annoy the user too much (otherwise they'd be
5981 # forced to manually unmerge it first).
5983 show_invalid_depstring_notice(pkg, depstr, atoms)
5985 blocker_atoms = [myatom for myatom in atoms \
5986 if myatom.startswith("!")]
5987 blocker_atoms.sort()
5988 counter = long(pkg.metadata["COUNTER"])
5989 blocker_cache[cpv] = \
5990 blocker_cache.BlockerData(counter, blocker_atoms)
5993 for atom in blocker_atoms:
5994 blocker = Blocker(atom=portage.dep.Atom(atom),
5995 eapi=pkg.metadata["EAPI"], root=myroot)
5996 self._blocker_parents.add(blocker, pkg)
5997 except portage.exception.InvalidAtom, e:
5998 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5999 show_invalid_depstring_notice(
6000 pkg, depstr, "Invalid Atom: %s" % (e,))
6002 for cpv in stale_cache:
6003 del blocker_cache[cpv]
6004 blocker_cache.flush()
6007 # Discard any "uninstall" tasks scheduled by previous calls
6008 # to this method, since those tasks may not make sense given
6009 # the current graph state.
6010 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6011 if previous_uninstall_tasks:
6012 self._blocker_uninstalls = digraph()
6013 self.digraph.difference_update(previous_uninstall_tasks)
6015 for blocker in self._blocker_parents.leaf_nodes():
6016 self.spinner.update()
6017 root_config = self.roots[blocker.root]
6018 virtuals = root_config.settings.getvirtuals()
6019 myroot = blocker.root
6020 initial_db = self.trees[myroot]["vartree"].dbapi
6021 final_db = self.mydbapi[myroot]
6023 provider_virtual = False
6024 if blocker.cp in virtuals and \
6025 not self._have_new_virt(blocker.root, blocker.cp):
6026 provider_virtual = True
6028 if provider_virtual:
6030 for provider_entry in virtuals[blocker.cp]:
6032 portage.dep_getkey(provider_entry)
6033 atoms.append(blocker.atom.replace(
6034 blocker.cp, provider_cp))
6036 atoms = [blocker.atom]
6038 blocked_initial = []
6040 blocked_initial.extend(initial_db.match_pkgs(atom))
6044 blocked_final.extend(final_db.match_pkgs(atom))
6046 if not blocked_initial and not blocked_final:
6047 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6048 self._blocker_parents.remove(blocker)
6049 # Discard any parents that don't have any more blockers.
6050 for pkg in parent_pkgs:
6051 self._irrelevant_blockers.add(blocker, pkg)
6052 if not self._blocker_parents.child_nodes(pkg):
6053 self._blocker_parents.remove(pkg)
6055 for parent in self._blocker_parents.parent_nodes(blocker):
6056 unresolved_blocks = False
6057 depends_on_order = set()
6058 for pkg in blocked_initial:
6059 if pkg.slot_atom == parent.slot_atom:
6060 # TODO: Support blocks within slots in cases where it
6061 # might make sense. For example, a new version might
6062 # require that the old version be uninstalled at build
6065 if parent.installed:
6066 # Two currently installed packages conflict with
6067 # eachother. Ignore this case since the damage
6068 # is already done and this would be likely to
6069 # confuse users if displayed like a normal blocker.
6071 if parent.operation == "merge":
6072 # Maybe the blocked package can be replaced or simply
6073 # unmerged to resolve this block.
6074 depends_on_order.add((pkg, parent))
6076 # None of the above blocker resolutions techniques apply,
6077 # so apparently this one is unresolvable.
6078 unresolved_blocks = True
6079 for pkg in blocked_final:
6080 if pkg.slot_atom == parent.slot_atom:
6081 # TODO: Support blocks within slots.
6083 if parent.operation == "nomerge" and \
6084 pkg.operation == "nomerge":
6085 # This blocker will be handled the next time that a
6086 # merge of either package is triggered.
6089 # Maybe the blocking package can be
6090 # unmerged to resolve this block.
6091 if parent.operation == "merge" and pkg.installed:
6092 depends_on_order.add((pkg, parent))
6094 elif parent.operation == "nomerge":
6095 depends_on_order.add((parent, pkg))
6097 # None of the above blocker resolutions techniques apply,
6098 # so apparently this one is unresolvable.
6099 unresolved_blocks = True
6101 # Make sure we don't unmerge any package that have been pulled
6103 if not unresolved_blocks and depends_on_order:
6104 for inst_pkg, inst_task in depends_on_order:
6105 if self.digraph.contains(inst_pkg) and \
6106 self.digraph.parent_nodes(inst_pkg):
6107 unresolved_blocks = True
6110 if not unresolved_blocks and depends_on_order:
6111 for inst_pkg, inst_task in depends_on_order:
6112 uninst_task = Package(built=inst_pkg.built,
6113 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6114 metadata=inst_pkg.metadata,
6115 operation="uninstall",
6116 root_config=inst_pkg.root_config,
6117 type_name=inst_pkg.type_name)
6118 self._pkg_cache[uninst_task] = uninst_task
6119 # Enforce correct merge order with a hard dep.
6120 self.digraph.addnode(uninst_task, inst_task,
6121 priority=BlockerDepPriority.instance)
6122 # Count references to this blocker so that it can be
6123 # invalidated after nodes referencing it have been
6125 self._blocker_uninstalls.addnode(uninst_task, blocker)
6126 if not unresolved_blocks and not depends_on_order:
6127 self._irrelevant_blockers.add(blocker, parent)
6128 self._blocker_parents.remove_edge(blocker, parent)
6129 if not self._blocker_parents.parent_nodes(blocker):
6130 self._blocker_parents.remove(blocker)
6131 if not self._blocker_parents.child_nodes(parent):
6132 self._blocker_parents.remove(parent)
6133 if unresolved_blocks:
6134 self._unsolvable_blockers.add(blocker, parent)
6138 def _accept_blocker_conflicts(self):
6140 for x in ("--buildpkgonly", "--fetchonly",
6141 "--fetch-all-uri", "--nodeps", "--pretend"):
6142 if x in self.myopts:
6147 def _merge_order_bias(self, mygraph):
6148 """Order nodes from highest to lowest overall reference count for
6149 optimal leaf node selection."""
6151 for node in mygraph.order:
6152 node_info[node] = len(mygraph.parent_nodes(node))
6153 def cmp_merge_preference(node1, node2):
6154 return node_info[node2] - node_info[node1]
6155 mygraph.order.sort(cmp_merge_preference)
6157 def altlist(self, reversed=False):
6159 while self._serialized_tasks_cache is None:
6160 self._resolve_conflicts()
6162 self._serialized_tasks_cache, self._scheduler_graph = \
6163 self._serialize_tasks()
6164 except self._serialize_tasks_retry:
6167 retlist = self._serialized_tasks_cache[:]
6172 def schedulerGraph(self):
6174 The scheduler graph is identical to the normal one except that
6175 uninstall edges are reversed in specific cases that require
6176 conflicting packages to be temporarily installed simultaneously.
6177 This is intended for use by the Scheduler in it's parallelization
6178 logic. It ensures that temporary simultaneous installation of
6179 conflicting packages is avoided when appropriate (especially for
6180 !!atom blockers), but allowed in specific cases that require it.
6182 Note that this method calls break_refs() which alters the state of
6183 internal Package instances such that this depgraph instance should
6184 not be used to perform any more calculations.
6186 if self._scheduler_graph is None:
6188 self.break_refs(self._scheduler_graph.order)
6189 return self._scheduler_graph
6191 def break_refs(self, nodes):
6193 Take a mergelist like that returned from self.altlist() and
6194 break any references that lead back to the depgraph. This is
6195 useful if you want to hold references to packages without
6196 also holding the depgraph on the heap.
6199 if hasattr(node, "root_config"):
6200 # The FakeVartree references the _package_cache which
6201 # references the depgraph. So that Package instances don't
6202 # hold the depgraph and FakeVartree on the heap, replace
6203 # the RootConfig that references the FakeVartree with the
6204 # original RootConfig instance which references the actual
6206 node.root_config = \
6207 self._trees_orig[node.root_config.root]["root_config"]
6209 def _resolve_conflicts(self):
6210 if not self._complete_graph():
6211 raise self._unknown_internal_error()
6213 if not self.validate_blockers():
6214 raise self._unknown_internal_error()
6216 def _serialize_tasks(self):
6217 scheduler_graph = self.digraph.copy()
6218 mygraph=self.digraph.copy()
6219 # Prune "nomerge" root nodes if nothing depends on them, since
6220 # otherwise they slow down merge order calculation. Don't remove
6221 # non-root nodes since they help optimize merge order in some cases
6222 # such as revdep-rebuild.
6223 removed_nodes = set()
6225 for node in mygraph.root_nodes():
6226 if not isinstance(node, Package) or \
6227 node.installed or node.onlydeps:
6228 removed_nodes.add(node)
6230 self.spinner.update()
6231 mygraph.difference_update(removed_nodes)
6232 if not removed_nodes:
6234 removed_nodes.clear()
6235 self._merge_order_bias(mygraph)
6236 def cmp_circular_bias(n1, n2):
6238 RDEPEND is stronger than PDEPEND and this function
6239 measures such a strength bias within a circular
6240 dependency relationship.
6242 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6243 ignore_priority=DepPriority.MEDIUM_SOFT)
6244 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6245 ignore_priority=DepPriority.MEDIUM_SOFT)
6246 if n1_n2_medium == n2_n1_medium:
6251 myblocker_uninstalls = self._blocker_uninstalls.copy()
6253 # Contains uninstall tasks that have been scheduled to
6254 # occur after overlapping blockers have been installed.
6255 scheduled_uninstalls = set()
6256 # Contains any Uninstall tasks that have been ignored
6257 # in order to avoid the circular deps code path. These
6258 # correspond to blocker conflicts that could not be
6260 ignored_uninstall_tasks = set()
6261 have_uninstall_task = False
6262 complete = "complete" in self.myparams
6263 myblocker_parents = self._blocker_parents.copy()
6266 def get_nodes(**kwargs):
6268 Returns leaf nodes excluding Uninstall instances
6269 since those should be executed as late as possible.
6271 return [node for node in mygraph.leaf_nodes(**kwargs) \
6272 if isinstance(node, Package) and \
6273 (node.operation != "uninstall" or \
6274 node in scheduled_uninstalls)]
6276 # sys-apps/portage needs special treatment if ROOT="/"
6277 running_root = self._running_root.root
6278 from portage.const import PORTAGE_PACKAGE_ATOM
6279 runtime_deps = InternalPackageSet(
6280 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6281 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6282 PORTAGE_PACKAGE_ATOM)
6283 replacement_portage = self.mydbapi[running_root].match_pkgs(
6284 PORTAGE_PACKAGE_ATOM)
6287 running_portage = running_portage[0]
6289 running_portage = None
6291 if replacement_portage:
6292 replacement_portage = replacement_portage[0]
6294 replacement_portage = None
6296 if replacement_portage == running_portage:
6297 replacement_portage = None
6299 if replacement_portage is not None:
6300 # update from running_portage to replacement_portage asap
6301 asap_nodes.append(replacement_portage)
6303 if running_portage is not None:
6305 portage_rdepend = self._select_atoms_highest_available(
6306 running_root, running_portage.metadata["RDEPEND"],
6307 myuse=running_portage.use.enabled,
6308 parent=running_portage, strict=False)
6309 except portage.exception.InvalidDependString, e:
6310 portage.writemsg("!!! Invalid RDEPEND in " + \
6311 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6312 (running_root, running_portage.cpv, e), noiselevel=-1)
6314 portage_rdepend = []
6315 runtime_deps.update(atom for atom in portage_rdepend \
6316 if not atom.startswith("!"))
6318 ignore_priority_soft_range = [None]
6319 ignore_priority_soft_range.extend(
6320 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6321 tree_mode = "--tree" in self.myopts
6322 # Tracks whether or not the current iteration should prefer asap_nodes
6323 # if available. This is set to False when the previous iteration
6324 # failed to select any nodes. It is reset whenever nodes are
6325 # successfully selected.
6328 # By default, try to avoid selecting root nodes whenever possible. This
6329 # helps ensure that the maximimum possible number of soft dependencies
6330 # have been removed from the graph before their parent nodes have
6331 # selected. This is especially important when those dependencies are
6332 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6333 # CHOST has been changed (like when building a stage3 from a stage2).
6334 accept_root_node = False
6336 # State of prefer_asap and accept_root_node flags for successive
6337 # iterations that loosen the criteria for node selection.
6339 # iteration prefer_asap accept_root_node
6344 # If no nodes are selected on the 3rd iteration, it is due to
6345 # unresolved blockers or circular dependencies.
6347 while not mygraph.empty():
6348 self.spinner.update()
6349 selected_nodes = None
6350 ignore_priority = None
6351 if prefer_asap and asap_nodes:
6352 """ASAP nodes are merged before their soft deps."""
6353 asap_nodes = [node for node in asap_nodes \
6354 if mygraph.contains(node)]
6355 for node in asap_nodes:
6356 if not mygraph.child_nodes(node,
6357 ignore_priority=DepPriority.SOFT):
6358 selected_nodes = [node]
6359 asap_nodes.remove(node)
6361 if not selected_nodes and \
6362 not (prefer_asap and asap_nodes):
6363 for ignore_priority in ignore_priority_soft_range:
6364 nodes = get_nodes(ignore_priority=ignore_priority)
6368 if ignore_priority is None and not tree_mode:
6369 # Greedily pop all of these nodes since no relationship
6370 # has been ignored. This optimization destroys --tree
6371 # output, so it's disabled in reversed mode. If there
6372 # is a mix of merge and uninstall nodes, save the
6373 # uninstall nodes from later since sometimes a merge
6374 # node will render an install node unnecessary, and
6375 # we want to avoid doing a separate uninstall task in
6377 merge_nodes = [node for node in nodes \
6378 if node.operation == "merge"]
6380 selected_nodes = merge_nodes
6382 selected_nodes = nodes
6384 # For optimal merge order:
6385 # * Only pop one node.
6386 # * Removing a root node (node without a parent)
6387 # will not produce a leaf node, so avoid it.
6389 if mygraph.parent_nodes(node):
6390 # found a non-root node
6391 selected_nodes = [node]
6393 if not selected_nodes and \
6394 (accept_root_node or ignore_priority is None):
6395 # settle for a root node
6396 selected_nodes = [nodes[0]]
6398 if not selected_nodes:
6399 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6401 """Recursively gather a group of nodes that RDEPEND on
6402 eachother. This ensures that they are merged as a group
6403 and get their RDEPENDs satisfied as soon as possible."""
6404 def gather_deps(ignore_priority,
6405 mergeable_nodes, selected_nodes, node):
6406 if node in selected_nodes:
6408 if node not in mergeable_nodes:
6410 if node == replacement_portage and \
6411 mygraph.child_nodes(node,
6412 ignore_priority=DepPriority.MEDIUM_SOFT):
6413 # Make sure that portage always has all of it's
6414 # RDEPENDs installed first.
6416 selected_nodes.add(node)
6417 for child in mygraph.child_nodes(node,
6418 ignore_priority=ignore_priority):
6419 if not gather_deps(ignore_priority,
6420 mergeable_nodes, selected_nodes, child):
6423 mergeable_nodes = set(nodes)
6424 if prefer_asap and asap_nodes:
6426 for ignore_priority in xrange(DepPriority.SOFT,
6427 DepPriority.MEDIUM_SOFT + 1):
6429 if nodes is not asap_nodes and \
6430 not accept_root_node and \
6431 not mygraph.parent_nodes(node):
6433 selected_nodes = set()
6434 if gather_deps(ignore_priority,
6435 mergeable_nodes, selected_nodes, node):
6438 selected_nodes = None
6442 # If any nodes have been selected here, it's always
6443 # possible that anything up to a MEDIUM_SOFT priority
6444 # relationship has been ignored. This state is recorded
6445 # in ignore_priority so that relevant nodes will be
6446 # added to asap_nodes when appropriate.
6448 ignore_priority = DepPriority.MEDIUM_SOFT
6450 if prefer_asap and asap_nodes and not selected_nodes:
6451 # We failed to find any asap nodes to merge, so ignore
6452 # them for the next iteration.
6456 if not selected_nodes and not accept_root_node:
6457 # Maybe there are only root nodes left, so accept them
6458 # for the next iteration.
6459 accept_root_node = True
6462 if selected_nodes and ignore_priority > DepPriority.SOFT:
6463 # Try to merge ignored medium deps as soon as possible.
6464 for node in selected_nodes:
6465 children = set(mygraph.child_nodes(node))
6466 soft = children.difference(
6467 mygraph.child_nodes(node,
6468 ignore_priority=DepPriority.SOFT))
6469 medium_soft = children.difference(
6470 mygraph.child_nodes(node,
6471 ignore_priority=DepPriority.MEDIUM_SOFT))
6472 medium_soft.difference_update(soft)
6473 for child in medium_soft:
6474 if child in selected_nodes:
6476 if child in asap_nodes:
6478 asap_nodes.append(child)
6480 if selected_nodes and len(selected_nodes) > 1:
6481 if not isinstance(selected_nodes, list):
6482 selected_nodes = list(selected_nodes)
6483 selected_nodes.sort(cmp_circular_bias)
6485 if not selected_nodes and not myblocker_uninstalls.is_empty():
6486 # An Uninstall task needs to be executed in order to
6487 # avoid conflict if possible.
6488 min_parent_deps = None
6490 for task in myblocker_uninstalls.leaf_nodes():
6491 # Do some sanity checks so that system or world packages
6492 # don't get uninstalled inappropriately here (only really
6493 # necessary when --complete-graph has not been enabled).
6495 if task in ignored_uninstall_tasks:
6498 if task in scheduled_uninstalls:
6499 # It's been scheduled but it hasn't
6500 # been executed yet due to dependence
6501 # on installation of blocking packages.
6504 root_config = self.roots[task.root]
6505 inst_pkg = self._pkg_cache[
6506 ("installed", task.root, task.cpv, "nomerge")]
6508 if self.digraph.contains(inst_pkg):
6511 forbid_overlap = False
6512 heuristic_overlap = False
6513 for blocker in myblocker_uninstalls.parent_nodes(task):
6514 if blocker.eapi in ("0", "1"):
6515 heuristic_overlap = True
6516 elif blocker.atom.blocker.overlap.forbid:
6517 forbid_overlap = True
6519 if forbid_overlap and running_root == task.root:
6522 if heuristic_overlap and running_root == task.root:
6523 # Never uninstall sys-apps/portage or it's essential
6524 # dependencies, except through replacement.
6526 runtime_dep_atoms = \
6527 list(runtime_deps.iterAtomsForPackage(task))
6528 except portage.exception.InvalidDependString, e:
6529 portage.writemsg("!!! Invalid PROVIDE in " + \
6530 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6531 (task.root, task.cpv, e), noiselevel=-1)
6535 # Don't uninstall a runtime dep if it appears
6536 # to be the only suitable one installed.
6538 vardb = root_config.trees["vartree"].dbapi
6539 for atom in runtime_dep_atoms:
6540 other_version = None
6541 for pkg in vardb.match_pkgs(atom):
6542 if pkg.cpv == task.cpv and \
6543 pkg.metadata["COUNTER"] == \
6544 task.metadata["COUNTER"]:
6548 if other_version is None:
6554 # For packages in the system set, don't take
6555 # any chances. If the conflict can't be resolved
6556 # by a normal replacement operation then abort.
6559 for atom in root_config.sets[
6560 "system"].iterAtomsForPackage(task):
6563 except portage.exception.InvalidDependString, e:
6564 portage.writemsg("!!! Invalid PROVIDE in " + \
6565 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6566 (task.root, task.cpv, e), noiselevel=-1)
6572 # Note that the world check isn't always
6573 # necessary since self._complete_graph() will
6574 # add all packages from the system and world sets to the
6575 # graph. This just allows unresolved conflicts to be
6576 # detected as early as possible, which makes it possible
6577 # to avoid calling self._complete_graph() when it is
6578 # unnecessary due to blockers triggering an abortion.
6580 # For packages in the world set, go ahead an uninstall
6581 # when necessary, as long as the atom will be satisfied
6582 # in the final state.
6583 graph_db = self.mydbapi[task.root]
6586 for atom in root_config.sets[
6587 "world"].iterAtomsForPackage(task):
6589 for pkg in graph_db.match_pkgs(atom):
6597 except portage.exception.InvalidDependString, e:
6598 portage.writemsg("!!! Invalid PROVIDE in " + \
6599 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6600 (task.root, task.cpv, e), noiselevel=-1)
6606 # Check the deps of parent nodes to ensure that
6607 # the chosen task produces a leaf node. Maybe
6608 # this can be optimized some more to make the
6609 # best possible choice, but the current algorithm
6610 # is simple and should be near optimal for most
6613 for parent in mygraph.parent_nodes(task):
6614 parent_deps.update(mygraph.child_nodes(parent,
6615 ignore_priority=DepPriority.MEDIUM_SOFT))
6616 parent_deps.remove(task)
6617 if min_parent_deps is None or \
6618 len(parent_deps) < min_parent_deps:
6619 min_parent_deps = len(parent_deps)
6622 if uninst_task is not None:
6623 # The uninstall is performed only after blocking
6624 # packages have been merged on top of it. File
6625 # collisions between blocking packages are detected
6626 # and removed from the list of files to be uninstalled.
6627 scheduled_uninstalls.add(uninst_task)
6628 parent_nodes = mygraph.parent_nodes(uninst_task)
6630 # Reverse the parent -> uninstall edges since we want
6631 # to do the uninstall after blocking packages have
6632 # been merged on top of it.
6633 mygraph.remove(uninst_task)
6634 for blocked_pkg in parent_nodes:
6635 mygraph.add(blocked_pkg, uninst_task,
6636 priority=BlockerDepPriority.instance)
6637 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6638 scheduler_graph.add(blocked_pkg, uninst_task,
6639 priority=BlockerDepPriority.instance)
6642 # None of the Uninstall tasks are acceptable, so
6643 # the corresponding blockers are unresolvable.
6644 # We need to drop an Uninstall task here in order
6645 # to avoid the circular deps code path, but the
6646 # blocker will still be counted as an unresolved
6648 for node in myblocker_uninstalls.leaf_nodes():
6650 mygraph.remove(node)
6654 ignored_uninstall_tasks.add(node)
6657 # After dropping an Uninstall task, reset
6658 # the state variables for leaf node selection and
6659 # continue trying to select leaf nodes.
6661 accept_root_node = False
6664 if not selected_nodes:
6665 self._circular_deps_for_display = mygraph
6666 raise self._unknown_internal_error()
6668 # At this point, we've succeeded in selecting one or more nodes, so
6669 # it's now safe to reset the prefer_asap and accept_root_node flags
6670 # to their default states.
6672 accept_root_node = False
6674 mygraph.difference_update(selected_nodes)
6676 for node in selected_nodes:
6677 if isinstance(node, Package) and \
6678 node.operation == "nomerge":
6681 # Handle interactions between blockers
6682 # and uninstallation tasks.
6683 solved_blockers = set()
6685 if isinstance(node, Package) and \
6686 "uninstall" == node.operation:
6687 have_uninstall_task = True
6690 vardb = self.trees[node.root]["vartree"].dbapi
6691 previous_cpv = vardb.match(node.slot_atom)
6693 # The package will be replaced by this one, so remove
6694 # the corresponding Uninstall task if necessary.
6695 previous_cpv = previous_cpv[0]
6697 ("installed", node.root, previous_cpv, "uninstall")
6699 mygraph.remove(uninst_task)
6703 if uninst_task is not None and \
6704 uninst_task not in ignored_uninstall_tasks and \
6705 myblocker_uninstalls.contains(uninst_task):
6706 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6707 myblocker_uninstalls.remove(uninst_task)
6708 # Discard any blockers that this Uninstall solves.
6709 for blocker in blocker_nodes:
6710 if not myblocker_uninstalls.child_nodes(blocker):
6711 myblocker_uninstalls.remove(blocker)
6712 solved_blockers.add(blocker)
6714 retlist.append(node)
6716 if (isinstance(node, Package) and \
6717 "uninstall" == node.operation) or \
6718 (uninst_task is not None and \
6719 uninst_task in scheduled_uninstalls):
6720 # Include satisfied blockers in the merge list
6721 # since the user might be interested and also
6722 # it serves as an indicator that blocking packages
6723 # will be temporarily installed simultaneously.
6724 for blocker in solved_blockers:
6725 retlist.append(Blocker(atom=blocker.atom,
6726 root=blocker.root, eapi=blocker.eapi,
6729 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
6730 for node in myblocker_uninstalls.root_nodes():
6731 unsolvable_blockers.add(node)
6733 for blocker in unsolvable_blockers:
6734 retlist.append(blocker)
6736 # If any Uninstall tasks need to be executed in order
6737 # to avoid a conflict, complete the graph with any
6738 # dependencies that may have been initially
6739 # neglected (to ensure that unsafe Uninstall tasks
6740 # are properly identified and blocked from execution).
6741 if have_uninstall_task and \
6743 not unsolvable_blockers:
6744 self.myparams.add("complete")
6745 raise self._serialize_tasks_retry("")
6747 if unsolvable_blockers and \
6748 not self._accept_blocker_conflicts():
6749 self._unsatisfied_blockers_for_display = unsolvable_blockers
6750 self._serialized_tasks_cache = retlist[:]
6751 self._scheduler_graph = scheduler_graph
6752 raise self._unknown_internal_error()
6754 if self._slot_collision_info and \
6755 not self._accept_blocker_conflicts():
6756 self._serialized_tasks_cache = retlist[:]
6757 self._scheduler_graph = scheduler_graph
6758 raise self._unknown_internal_error()
6760 return retlist, scheduler_graph
6762 def _show_circular_deps(self, mygraph):
6763 # No leaf nodes are available, so we have a circular
6764 # dependency panic situation. Reduce the noise level to a
6765 # minimum via repeated elimination of root nodes since they
6766 # have no parents and thus can not be part of a cycle.
6768 root_nodes = mygraph.root_nodes(
6769 ignore_priority=DepPriority.MEDIUM_SOFT)
6772 mygraph.difference_update(root_nodes)
6773 # Display the USE flags that are enabled on nodes that are part
6774 # of dependency cycles in case that helps the user decide to
6775 # disable some of them.
6777 tempgraph = mygraph.copy()
6778 while not tempgraph.empty():
6779 nodes = tempgraph.leaf_nodes()
6781 node = tempgraph.order[0]
6784 display_order.append(node)
6785 tempgraph.remove(node)
6786 display_order.reverse()
6787 self.myopts.pop("--quiet", None)
6788 self.myopts.pop("--verbose", None)
6789 self.myopts["--tree"] = True
6790 portage.writemsg("\n\n", noiselevel=-1)
6791 self.display(display_order)
6792 prefix = colorize("BAD", " * ")
6793 portage.writemsg("\n", noiselevel=-1)
6794 portage.writemsg(prefix + "Error: circular dependencies:\n",
6796 portage.writemsg("\n", noiselevel=-1)
6797 mygraph.debug_print()
6798 portage.writemsg("\n", noiselevel=-1)
6799 portage.writemsg(prefix + "Note that circular dependencies " + \
6800 "can often be avoided by temporarily\n", noiselevel=-1)
6801 portage.writemsg(prefix + "disabling USE flags that trigger " + \
6802 "optional dependencies.\n", noiselevel=-1)
6804 def _show_merge_list(self):
6805 if self._serialized_tasks_cache is not None and \
6806 not (self._displayed_list and \
6807 (self._displayed_list == self._serialized_tasks_cache or \
6808 self._displayed_list == \
6809 list(reversed(self._serialized_tasks_cache)))):
6810 display_list = self._serialized_tasks_cache[:]
6811 if "--tree" in self.myopts:
6812 display_list.reverse()
6813 self.display(display_list)
6815 def _show_unsatisfied_blockers(self, blockers):
6816 self._show_merge_list()
6817 msg = "Error: The above package list contains " + \
6818 "packages which cannot be installed " + \
6819 "at the same time on the same system."
6820 prefix = colorize("BAD", " * ")
6821 from textwrap import wrap
6822 portage.writemsg("\n", noiselevel=-1)
6823 for line in wrap(msg, 70):
6824 portage.writemsg(prefix + line + "\n", noiselevel=-1)
6825 if "--quiet" not in self.myopts:
6826 show_blocker_docs_link()
6828 def display(self, mylist, favorites=[], verbosity=None):
6830 # This is used to prevent display_problems() from
6831 # redundantly displaying this exact same merge list
6832 # again via _show_merge_list().
6833 self._displayed_list = mylist
6835 if verbosity is None:
6836 verbosity = ("--quiet" in self.myopts and 1 or \
6837 "--verbose" in self.myopts and 3 or 2)
6838 favorites_set = InternalPackageSet(favorites)
6839 oneshot = "--oneshot" in self.myopts or \
6840 "--onlydeps" in self.myopts
6841 columns = "--columns" in self.myopts
6846 counters = PackageCounters()
6848 if verbosity == 1 and "--verbose" not in self.myopts:
6849 def create_use_string(*args):
6852 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
6854 is_new, reinst_flags,
6855 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
6856 alphabetical=("--alphabetical" in self.myopts)):
6864 cur_iuse = set(cur_iuse)
6865 enabled_flags = cur_iuse.intersection(cur_use)
6866 removed_iuse = set(old_iuse).difference(cur_iuse)
6867 any_iuse = cur_iuse.union(old_iuse)
6868 any_iuse = list(any_iuse)
6870 for flag in any_iuse:
6873 reinst_flag = reinst_flags and flag in reinst_flags
6874 if flag in enabled_flags:
6876 if is_new or flag in old_use and \
6877 (all_flags or reinst_flag):
6878 flag_str = red(flag)
6879 elif flag not in old_iuse:
6880 flag_str = yellow(flag) + "%*"
6881 elif flag not in old_use:
6882 flag_str = green(flag) + "*"
6883 elif flag in removed_iuse:
6884 if all_flags or reinst_flag:
6885 flag_str = yellow("-" + flag) + "%"
6888 flag_str = "(" + flag_str + ")"
6889 removed.append(flag_str)
6892 if is_new or flag in old_iuse and \
6893 flag not in old_use and \
6894 (all_flags or reinst_flag):
6895 flag_str = blue("-" + flag)
6896 elif flag not in old_iuse:
6897 flag_str = yellow("-" + flag)
6898 if flag not in iuse_forced:
6900 elif flag in old_use:
6901 flag_str = green("-" + flag) + "*"
6903 if flag in iuse_forced:
6904 flag_str = "(" + flag_str + ")"
6906 enabled.append(flag_str)
6908 disabled.append(flag_str)
6911 ret = " ".join(enabled)
6913 ret = " ".join(enabled + disabled + removed)
6915 ret = '%s="%s" ' % (name, ret)
6918 repo_display = RepoDisplay(self.roots)
6922 mygraph = self.digraph.copy()
6924 # If there are any Uninstall instances, add the corresponding
6925 # blockers to the digraph (useful for --tree display).
6927 executed_uninstalls = set(node for node in mylist \
6928 if isinstance(node, Package) and node.operation == "unmerge")
6930 for uninstall in self._blocker_uninstalls.leaf_nodes():
6931 uninstall_parents = \
6932 self._blocker_uninstalls.parent_nodes(uninstall)
6933 if not uninstall_parents:
6936 # Remove the corresponding "nomerge" node and substitute
6937 # the Uninstall node.
6938 inst_pkg = self._pkg_cache[
6939 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
6941 mygraph.remove(inst_pkg)
6946 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
6948 inst_pkg_blockers = []
6950 # Break the Package -> Uninstall edges.
6951 mygraph.remove(uninstall)
6953 # Resolution of a package's blockers
6954 # depend on it's own uninstallation.
6955 for blocker in inst_pkg_blockers:
6956 mygraph.add(uninstall, blocker)
6958 # Expand Package -> Uninstall edges into
6959 # Package -> Blocker -> Uninstall edges.
6960 for blocker in uninstall_parents:
6961 mygraph.add(uninstall, blocker)
6962 for parent in self._blocker_parents.parent_nodes(blocker):
6963 if parent != inst_pkg:
6964 mygraph.add(blocker, parent)
6966 # If the uninstall task did not need to be executed because
6967 # of an upgrade, display Blocker -> Upgrade edges since the
6968 # corresponding Blocker -> Uninstall edges will not be shown.
6970 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
6971 if upgrade_node is not None and \
6972 uninstall not in executed_uninstalls:
6973 for blocker in uninstall_parents:
6974 mygraph.add(upgrade_node, blocker)
6976 unsatisfied_blockers = []
6981 if isinstance(x, Blocker) and not x.satisfied:
6982 unsatisfied_blockers.append(x)
6985 if "--tree" in self.myopts:
6986 depth = len(tree_nodes)
6987 while depth and graph_key not in \
6988 mygraph.child_nodes(tree_nodes[depth-1]):
6991 tree_nodes = tree_nodes[:depth]
6992 tree_nodes.append(graph_key)
6993 display_list.append((x, depth, True))
6994 shown_edges.add((graph_key, tree_nodes[depth-1]))
6996 traversed_nodes = set() # prevent endless circles
6997 traversed_nodes.add(graph_key)
6998 def add_parents(current_node, ordered):
7000 # Do not traverse to parents if this node is an
7001 # an argument or a direct member of a set that has
7002 # been specified as an argument (system or world).
7003 if current_node not in self._set_nodes:
7004 parent_nodes = mygraph.parent_nodes(current_node)
7006 child_nodes = set(mygraph.child_nodes(current_node))
7007 selected_parent = None
7008 # First, try to avoid a direct cycle.
7009 for node in parent_nodes:
7010 if not isinstance(node, (Blocker, Package)):
7012 if node not in traversed_nodes and \
7013 node not in child_nodes:
7014 edge = (current_node, node)
7015 if edge in shown_edges:
7017 selected_parent = node
7019 if not selected_parent:
7020 # A direct cycle is unavoidable.
7021 for node in parent_nodes:
7022 if not isinstance(node, (Blocker, Package)):
7024 if node not in traversed_nodes:
7025 edge = (current_node, node)
7026 if edge in shown_edges:
7028 selected_parent = node
7031 shown_edges.add((current_node, selected_parent))
7032 traversed_nodes.add(selected_parent)
7033 add_parents(selected_parent, False)
7034 display_list.append((current_node,
7035 len(tree_nodes), ordered))
7036 tree_nodes.append(current_node)
7038 add_parents(graph_key, True)
7040 display_list.append((x, depth, True))
7041 mylist = display_list
7042 for x in unsatisfied_blockers:
7043 mylist.append((x, 0, True))
7045 last_merge_depth = 0
7046 for i in xrange(len(mylist)-1,-1,-1):
7047 graph_key, depth, ordered = mylist[i]
7048 if not ordered and depth == 0 and i > 0 \
7049 and graph_key == mylist[i-1][0] and \
7050 mylist[i-1][1] == 0:
7051 # An ordered node got a consecutive duplicate when the tree was
7055 if ordered and graph_key[-1] != "nomerge":
7056 last_merge_depth = depth
7058 if depth >= last_merge_depth or \
7059 i < len(mylist) - 1 and \
7060 depth >= mylist[i+1][1]:
7063 from portage import flatten
7064 from portage.dep import use_reduce, paren_reduce
7065 # files to fetch list - avoids counting a same file twice
7066 # in size display (verbose mode)
7069 # Use this set to detect when all the "repoadd" strings are "[0]"
7070 # and disable the entire repo display in this case.
7073 for mylist_index in xrange(len(mylist)):
7074 x, depth, ordered = mylist[mylist_index]
7078 portdb = self.trees[myroot]["porttree"].dbapi
7079 bindb = self.trees[myroot]["bintree"].dbapi
7080 vardb = self.trees[myroot]["vartree"].dbapi
7081 vartree = self.trees[myroot]["vartree"]
7082 pkgsettings = self.pkgsettings[myroot]
7085 indent = " " * depth
7087 if isinstance(x, Blocker):
7089 blocker_style = "PKG_BLOCKER_SATISFIED"
7090 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7092 blocker_style = "PKG_BLOCKER"
7093 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7095 counters.blocks += 1
7097 counters.blocks_satisfied += 1
7098 resolved = portage.key_expand(
7099 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7100 if "--columns" in self.myopts and "--quiet" in self.myopts:
7101 addl += " " + colorize(blocker_style, resolved)
7103 addl = "[%s %s] %s%s" % \
7104 (colorize(blocker_style, "blocks"),
7105 addl, indent, colorize(blocker_style, resolved))
7106 block_parents = self._blocker_parents.parent_nodes(x)
7107 block_parents = set([pnode[2] for pnode in block_parents])
7108 block_parents = ", ".join(block_parents)
7110 addl += colorize(blocker_style,
7111 " (\"%s\" is blocking %s)") % \
7112 (str(x.atom).lstrip("!"), block_parents)
7114 addl += colorize(blocker_style,
7115 " (is blocking %s)") % block_parents
7116 if isinstance(x, Blocker) and x.satisfied:
7121 blockers.append(addl)
7124 pkg_merge = ordered and pkg_status == "merge"
7125 if not pkg_merge and pkg_status == "merge":
7126 pkg_status = "nomerge"
7127 built = pkg_type != "ebuild"
7128 installed = pkg_type == "installed"
7130 metadata = pkg.metadata
7132 repo_name = metadata["repository"]
7133 if pkg_type == "ebuild":
7134 ebuild_path = portdb.findname(pkg_key)
7135 if not ebuild_path: # shouldn't happen
7136 raise portage.exception.PackageNotFound(pkg_key)
7137 repo_path_real = os.path.dirname(os.path.dirname(
7138 os.path.dirname(ebuild_path)))
7140 repo_path_real = portdb.getRepositoryPath(repo_name)
7141 pkg_use = list(pkg.use.enabled)
7143 restrict = flatten(use_reduce(paren_reduce(
7144 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7145 except portage.exception.InvalidDependString, e:
7146 if not pkg.installed:
7147 show_invalid_depstring_notice(x,
7148 pkg.metadata["RESTRICT"], str(e))
7152 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7153 "fetch" in restrict:
7156 counters.restrict_fetch += 1
7157 if portdb.fetch_check(pkg_key, pkg_use):
7160 counters.restrict_fetch_satisfied += 1
7162 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7163 #param is used for -u, where you still *do* want to see when something is being upgraded.
7166 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7167 if vardb.cpv_exists(pkg_key):
7168 addl=" "+yellow("R")+fetch+" "
7171 counters.reinst += 1
7172 elif pkg_status == "uninstall":
7173 counters.uninst += 1
7174 # filter out old-style virtual matches
7175 elif installed_versions and \
7176 portage.cpv_getkey(installed_versions[0]) == \
7177 portage.cpv_getkey(pkg_key):
7178 myinslotlist = vardb.match(pkg.slot_atom)
7179 # If this is the first install of a new-style virtual, we
7180 # need to filter out old-style virtual matches.
7181 if myinslotlist and \
7182 portage.cpv_getkey(myinslotlist[0]) != \
7183 portage.cpv_getkey(pkg_key):
7186 myoldbest = myinslotlist[:]
7188 if not portage.dep.cpvequal(pkg_key,
7189 portage.best([pkg_key] + myoldbest)):
7191 addl += turquoise("U")+blue("D")
7193 counters.downgrades += 1
7196 addl += turquoise("U") + " "
7198 counters.upgrades += 1
7200 # New slot, mark it new.
7201 addl = " " + green("NS") + fetch + " "
7202 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7204 counters.newslot += 1
7206 if "--changelog" in self.myopts:
7207 inst_matches = vardb.match(pkg.slot_atom)
7209 changelogs.extend(self.calc_changelog(
7210 portdb.findname(pkg_key),
7211 inst_matches[0], pkg_key))
7213 addl = " " + green("N") + " " + fetch + " "
7222 forced_flags = set()
7223 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7224 forced_flags.update(pkgsettings.useforce)
7225 forced_flags.update(pkgsettings.usemask)
7227 cur_use = [flag for flag in pkg.use.enabled \
7228 if flag in pkg.iuse.all]
7229 cur_iuse = sorted(pkg.iuse.all)
7231 if myoldbest and myinslotlist:
7232 previous_cpv = myoldbest[0]
7234 previous_cpv = pkg.cpv
7235 if vardb.cpv_exists(previous_cpv):
7236 old_iuse, old_use = vardb.aux_get(
7237 previous_cpv, ["IUSE", "USE"])
7238 old_iuse = list(set(
7239 filter_iuse_defaults(old_iuse.split())))
7241 old_use = old_use.split()
7248 old_use = [flag for flag in old_use if flag in old_iuse]
7250 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7252 use_expand.reverse()
7253 use_expand_hidden = \
7254 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7256 def map_to_use_expand(myvals, forcedFlags=False,
7260 for exp in use_expand:
7263 for val in myvals[:]:
7264 if val.startswith(exp.lower()+"_"):
7265 if val in forced_flags:
7266 forced[exp].add(val[len(exp)+1:])
7267 ret[exp].append(val[len(exp)+1:])
7270 forced["USE"] = [val for val in myvals \
7271 if val in forced_flags]
7273 for exp in use_expand_hidden:
7279 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7280 # are the only thing that triggered reinstallation.
7281 reinst_flags_map = {}
7282 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7283 reinst_expand_map = None
7284 if reinstall_for_flags:
7285 reinst_flags_map = map_to_use_expand(
7286 list(reinstall_for_flags), removeHidden=False)
7287 for k in list(reinst_flags_map):
7288 if not reinst_flags_map[k]:
7289 del reinst_flags_map[k]
7290 if not reinst_flags_map.get("USE"):
7291 reinst_expand_map = reinst_flags_map.copy()
7292 reinst_expand_map.pop("USE", None)
7293 if reinst_expand_map and \
7294 not set(reinst_expand_map).difference(
7296 use_expand_hidden = \
7297 set(use_expand_hidden).difference(
7300 cur_iuse_map, iuse_forced = \
7301 map_to_use_expand(cur_iuse, forcedFlags=True)
7302 cur_use_map = map_to_use_expand(cur_use)
7303 old_iuse_map = map_to_use_expand(old_iuse)
7304 old_use_map = map_to_use_expand(old_use)
7307 use_expand.insert(0, "USE")
7309 for key in use_expand:
7310 if key in use_expand_hidden:
7312 verboseadd += create_use_string(key.upper(),
7313 cur_iuse_map[key], iuse_forced[key],
7314 cur_use_map[key], old_iuse_map[key],
7315 old_use_map[key], is_new,
7316 reinst_flags_map.get(key))
7321 if pkg_type == "ebuild" and pkg_merge:
7323 myfilesdict = portdb.getfetchsizes(pkg_key,
7324 useflags=pkg_use, debug=self.edebug)
7325 except portage.exception.InvalidDependString, e:
7326 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7327 show_invalid_depstring_notice(x, src_uri, str(e))
7330 if myfilesdict is None:
7331 myfilesdict="[empty/missing/bad digest]"
7333 for myfetchfile in myfilesdict:
7334 if myfetchfile not in myfetchlist:
7335 mysize+=myfilesdict[myfetchfile]
7336 myfetchlist.append(myfetchfile)
7338 counters.totalsize += mysize
7339 verboseadd += format_size(mysize)
7342 # assign index for a previous version in the same slot
7343 has_previous = False
7344 repo_name_prev = None
7345 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7347 slot_matches = vardb.match(slot_atom)
7350 repo_name_prev = vardb.aux_get(slot_matches[0],
7353 # now use the data to generate output
7354 if pkg.installed or not has_previous:
7355 repoadd = repo_display.repoStr(repo_path_real)
7357 repo_path_prev = None
7359 repo_path_prev = portdb.getRepositoryPath(
7361 if repo_path_prev == repo_path_real:
7362 repoadd = repo_display.repoStr(repo_path_real)
7364 repoadd = "%s=>%s" % (
7365 repo_display.repoStr(repo_path_prev),
7366 repo_display.repoStr(repo_path_real))
7368 repoadd_set.add(repoadd)
7370 xs = [portage.cpv_getkey(pkg_key)] + \
7371 list(portage.catpkgsplit(pkg_key)[2:])
7378 if "COLUMNWIDTH" in self.settings:
7380 mywidth = int(self.settings["COLUMNWIDTH"])
7381 except ValueError, e:
7382 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7384 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7385 self.settings["COLUMNWIDTH"], noiselevel=-1)
7387 oldlp = mywidth - 30
7390 # Convert myoldbest from a list to a string.
7394 for pos, key in enumerate(myoldbest):
7395 key = portage.catpkgsplit(key)[2] + \
7396 "-" + portage.catpkgsplit(key)[3]
7397 if key[-3:] == "-r0":
7399 myoldbest[pos] = key
7400 myoldbest = blue("["+", ".join(myoldbest)+"]")
7403 root_config = self.roots[myroot]
7404 system_set = root_config.sets["system"]
7405 world_set = root_config.sets["world"]
7410 pkg_system = system_set.findAtomForPackage(pkg)
7411 pkg_world = world_set.findAtomForPackage(pkg)
7412 if not (oneshot or pkg_world) and \
7413 myroot == self.target_root and \
7414 favorites_set.findAtomForPackage(pkg):
7415 # Maybe it will be added to world now.
7416 if create_world_atom(pkg, favorites_set, root_config):
7418 except portage.exception.InvalidDependString:
7419 # This is reported elsewhere if relevant.
7422 def pkgprint(pkg_str):
7425 return colorize("PKG_MERGE_SYSTEM", pkg_str)
7427 return colorize("PKG_MERGE_WORLD", pkg_str)
7429 return colorize("PKG_MERGE", pkg_str)
7430 elif pkg_status == "uninstall":
7431 return colorize("PKG_UNINSTALL", pkg_str)
7434 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7436 return colorize("PKG_NOMERGE_WORLD", pkg_str)
7438 return colorize("PKG_NOMERGE", pkg_str)
7441 properties = flatten(use_reduce(paren_reduce(
7442 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7443 except portage.exception.InvalidDependString, e:
7444 if not pkg.installed:
7445 show_invalid_depstring_notice(pkg,
7446 pkg.metadata["PROPERTIES"], str(e))
7450 interactive = "interactive" in properties
7451 if interactive and pkg.operation == "merge":
7452 addl = colorize("WARN", "I") + addl[1:]
7454 counters.interactive += 1
7459 if "--columns" in self.myopts:
7460 if "--quiet" in self.myopts:
7461 myprint=addl+" "+indent+pkgprint(pkg_cp)
7462 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7463 myprint=myprint+myoldbest
7464 myprint=myprint+darkgreen("to "+x[1])
7468 myprint = "[%s] %s%s" % \
7469 (pkgprint(pkg_status.ljust(13)),
7470 indent, pkgprint(pkg.cp))
7472 myprint = "[%s %s] %s%s" % \
7473 (pkgprint(pkg.type_name), addl,
7474 indent, pkgprint(pkg.cp))
7475 if (newlp-nc_len(myprint)) > 0:
7476 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7477 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7478 if (oldlp-nc_len(myprint)) > 0:
7479 myprint=myprint+" "*(oldlp-nc_len(myprint))
7480 myprint=myprint+myoldbest
7481 myprint += darkgreen("to " + pkg.root)
7484 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7486 myprint = "[" + pkg_type + " " + addl + "] "
7487 myprint += indent + pkgprint(pkg_key) + " " + \
7488 myoldbest + darkgreen("to " + myroot)
7490 if "--columns" in self.myopts:
7491 if "--quiet" in self.myopts:
7492 myprint=addl+" "+indent+pkgprint(pkg_cp)
7493 myprint=myprint+" "+green(xs[1]+xs[2])+" "
7494 myprint=myprint+myoldbest
7498 myprint = "[%s] %s%s" % \
7499 (pkgprint(pkg_status.ljust(13)),
7500 indent, pkgprint(pkg.cp))
7502 myprint = "[%s %s] %s%s" % \
7503 (pkgprint(pkg.type_name), addl,
7504 indent, pkgprint(pkg.cp))
7505 if (newlp-nc_len(myprint)) > 0:
7506 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7507 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7508 if (oldlp-nc_len(myprint)) > 0:
7509 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7510 myprint += myoldbest
7513 myprint = "[%s] %s%s %s" % \
7514 (pkgprint(pkg_status.ljust(13)),
7515 indent, pkgprint(pkg.cpv),
7518 myprint = "[%s %s] %s%s %s" % \
7519 (pkgprint(pkg_type), addl, indent,
7520 pkgprint(pkg.cpv), myoldbest)
7522 if columns and pkg.operation == "uninstall":
7524 p.append((myprint, verboseadd, repoadd))
7526 if "--tree" not in self.myopts and \
7527 "--quiet" not in self.myopts and \
7528 not self._opts_no_restart.intersection(self.myopts) and \
7529 pkg.root == self._running_root.root and \
7530 portage.match_from_list(
7531 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7532 not vardb.cpv_exists(pkg.cpv) and \
7533 "--quiet" not in self.myopts:
7534 if mylist_index < len(mylist) - 1:
7535 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7536 p.append(colorize("WARN", " then resume the merge."))
7539 show_repos = repoadd_set and repoadd_set != set(["0"])
7542 if isinstance(x, basestring):
7543 out.write("%s\n" % (x,))
7546 myprint, verboseadd, repoadd = x
7549 myprint += " " + verboseadd
7551 if show_repos and repoadd:
7552 myprint += " " + teal("[%s]" % repoadd)
7554 out.write("%s\n" % (myprint,))
7563 sys.stdout.write(str(repo_display))
7565 if "--changelog" in self.myopts:
7567 for revision,text in changelogs:
7568 print bold('*'+revision)
7569 sys.stdout.write(text)
7574 def display_problems(self):
7576 Display problems with the dependency graph such as slot collisions.
7577 This is called internally by display() to show the problems _after_
7578 the merge list where it is most likely to be seen, but if display()
7579 is not going to be called then this method should be called explicitly
7580 to ensure that the user is notified of problems with the graph.
7582 All output goes to stderr, except for unsatisfied dependencies which
7583 go to stdout for parsing by programs such as autounmask.
7586 # Note that show_masked_packages() sends it's output to
7587 # stdout, and some programs such as autounmask parse the
7588 # output in cases when emerge bails out. However, when
7589 # show_masked_packages() is called for installed packages
7590 # here, the message is a warning that is more appropriate
7591 # to send to stderr, so temporarily redirect stdout to
7592 # stderr. TODO: Fix output code so there's a cleaner way
7593 # to redirect everything to stderr.
7598 sys.stdout = sys.stderr
7599 self._display_problems()
7605 # This goes to stdout for parsing by programs like autounmask.
7606 for pargs, kwargs in self._unsatisfied_deps_for_display:
7607 self._show_unsatisfied_dep(*pargs, **kwargs)
7609 def _display_problems(self):
7610 if self._circular_deps_for_display is not None:
7611 self._show_circular_deps(
7612 self._circular_deps_for_display)
7614 # The user is only notified of a slot conflict if
7615 # there are no unresolvable blocker conflicts.
7616 if self._unsatisfied_blockers_for_display is not None:
7617 self._show_unsatisfied_blockers(
7618 self._unsatisfied_blockers_for_display)
7620 self._show_slot_collision_notice()
7622 # TODO: Add generic support for "set problem" handlers so that
7623 # the below warnings aren't special cases for world only.
7625 if self._missing_args:
7626 world_problems = False
7627 if "world" in self._sets:
7628 # Filter out indirect members of world (from nested sets)
7629 # since only direct members of world are desired here.
7630 world_set = self.roots[self.target_root].sets["world"]
7631 for arg, atom in self._missing_args:
7632 if arg.name == "world" and atom in world_set:
7633 world_problems = True
7637 sys.stderr.write("\n!!! Problems have been " + \
7638 "detected with your world file\n")
7639 sys.stderr.write("!!! Please run " + \
7640 green("emaint --check world")+"\n\n")
7642 if self._missing_args:
7643 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7644 " Ebuilds for the following packages are either all\n")
7645 sys.stderr.write(colorize("BAD", "!!!") + \
7646 " masked or don't exist:\n")
7647 sys.stderr.write(" ".join(str(atom) for arg, atom in \
7648 self._missing_args) + "\n")
7650 if self._pprovided_args:
7652 for arg, atom in self._pprovided_args:
7653 if isinstance(arg, SetArg):
7655 arg_atom = (atom, atom)
7658 arg_atom = (arg.arg, atom)
7659 refs = arg_refs.setdefault(arg_atom, [])
7660 if parent not in refs:
7663 msg.append(bad("\nWARNING: "))
7664 if len(self._pprovided_args) > 1:
7665 msg.append("Requested packages will not be " + \
7666 "merged because they are listed in\n")
7668 msg.append("A requested package will not be " + \
7669 "merged because it is listed in\n")
7670 msg.append("package.provided:\n\n")
7671 problems_sets = set()
7672 for (arg, atom), refs in arg_refs.iteritems():
7675 problems_sets.update(refs)
7677 ref_string = ", ".join(["'%s'" % name for name in refs])
7678 ref_string = " pulled in by " + ref_string
7679 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7681 if "world" in problems_sets:
7682 msg.append("This problem can be solved in one of the following ways:\n\n")
7683 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
7684 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
7685 msg.append(" C) Remove offending entries from package.provided.\n\n")
7686 msg.append("The best course of action depends on the reason that an offending\n")
7687 msg.append("package.provided entry exists.\n\n")
7688 sys.stderr.write("".join(msg))
7690 masked_packages = []
7691 for pkg in self._masked_installed:
7692 root_config = pkg.root_config
7693 pkgsettings = self.pkgsettings[pkg.root]
7694 mreasons = get_masking_status(pkg, pkgsettings, root_config)
7695 masked_packages.append((root_config, pkgsettings,
7696 pkg.cpv, pkg.metadata, mreasons))
7698 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7699 " The following installed packages are masked:\n")
7700 show_masked_packages(masked_packages)
7704 def calc_changelog(self,ebuildpath,current,next):
7705 if ebuildpath == None or not os.path.exists(ebuildpath):
7707 current = '-'.join(portage.catpkgsplit(current)[1:])
7708 if current.endswith('-r0'):
7709 current = current[:-3]
7710 next = '-'.join(portage.catpkgsplit(next)[1:])
7711 if next.endswith('-r0'):
7713 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
7715 changelog = open(changelogpath).read()
7716 except SystemExit, e:
7717 raise # Needed else can't exit
7720 divisions = self.find_changelog_tags(changelog)
7721 #print 'XX from',current,'to',next
7722 #for div,text in divisions: print 'XX',div
7723 # skip entries for all revisions above the one we are about to emerge
7724 for i in range(len(divisions)):
7725 if divisions[i][0]==next:
7726 divisions = divisions[i:]
7728 # find out how many entries we are going to display
7729 for i in range(len(divisions)):
7730 if divisions[i][0]==current:
7731 divisions = divisions[:i]
7734 # couldnt find the current revision in the list. display nothing
7738 def find_changelog_tags(self,changelog):
7742 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
7744 if release is not None:
7745 divs.append((release,changelog))
7747 if release is not None:
7748 divs.append((release,changelog[:match.start()]))
7749 changelog = changelog[match.end():]
7750 release = match.group(1)
7751 if release.endswith('.ebuild'):
7752 release = release[:-7]
7753 if release.endswith('-r0'):
7754 release = release[:-3]
7756 def saveNomergeFavorites(self):
7757 """Find atoms in favorites that are not in the mergelist and add them
7758 to the world file if necessary."""
7759 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7760 "--oneshot", "--onlydeps", "--pretend"):
7761 if x in self.myopts:
7763 root_config = self.roots[self.target_root]
7764 world_set = root_config.sets["world"]
7766 world_locked = False
7767 if hasattr(world_set, "lock"):
7771 if hasattr(world_set, "load"):
7772 world_set.load() # maybe it's changed on disk
7774 args_set = self._sets["args"]
7775 portdb = self.trees[self.target_root]["porttree"].dbapi
7776 added_favorites = set()
7777 for x in self._set_nodes:
7778 pkg_type, root, pkg_key, pkg_status = x
7779 if pkg_status != "nomerge":
7783 myfavkey = create_world_atom(x, args_set, root_config)
7785 if myfavkey in added_favorites:
7787 added_favorites.add(myfavkey)
7788 except portage.exception.InvalidDependString, e:
7789 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7790 (pkg_key, str(e)), noiselevel=-1)
7791 writemsg("!!! see '%s'\n\n" % os.path.join(
7792 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
7795 for k in self._sets:
7796 if k in ("args", "world") or not root_config.sets[k].world_candidate:
7801 all_added.append(SETPREFIX + k)
7802 all_added.extend(added_favorites)
7805 print ">>> Recording %s in \"world\" favorites file..." % \
7806 colorize("INFORM", str(a))
7808 world_set.update(all_added)
7813 def loadResumeCommand(self, resume_data, skip_masked=False):
7815 Add a resume command to the graph and validate it in the process. This
7816 will raise a PackageNotFound exception if a package is not available.
7819 if not isinstance(resume_data, dict):
7822 mergelist = resume_data.get("mergelist")
7823 if not isinstance(mergelist, list):
7826 fakedb = self.mydbapi
7828 serialized_tasks = []
7831 if not (isinstance(x, list) and len(x) == 4):
7833 pkg_type, myroot, pkg_key, action = x
7834 if pkg_type not in self.pkg_tree_map:
7836 if action != "merge":
7838 tree_type = self.pkg_tree_map[pkg_type]
7839 mydb = trees[myroot][tree_type].dbapi
7840 db_keys = list(self._trees_orig[myroot][
7841 tree_type].dbapi._aux_cache_keys)
7843 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
7845 # It does no exist or it is corrupt.
7846 if action == "uninstall":
7848 raise portage.exception.PackageNotFound(pkg_key)
7849 installed = action == "uninstall"
7850 built = pkg_type != "ebuild"
7851 root_config = self.roots[myroot]
7852 pkg = Package(built=built, cpv=pkg_key,
7853 installed=installed, metadata=metadata,
7854 operation=action, root_config=root_config,
7856 if pkg_type == "ebuild":
7857 pkgsettings = self.pkgsettings[myroot]
7858 pkgsettings.setcpv(pkg)
7859 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
7860 self._pkg_cache[pkg] = pkg
7862 root_config = self.roots[pkg.root]
7863 if "merge" == pkg.operation and \
7864 not visible(root_config.settings, pkg):
7866 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
7868 self._unsatisfied_deps_for_display.append(
7869 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
7871 fakedb[myroot].cpv_inject(pkg)
7872 serialized_tasks.append(pkg)
7873 self.spinner.update()
7875 if self._unsatisfied_deps_for_display:
7878 if not serialized_tasks or "--nodeps" in self.myopts:
7879 self._serialized_tasks_cache = serialized_tasks
7880 self._scheduler_graph = self.digraph
7882 self._select_package = self._select_pkg_from_graph
7883 self.myparams.add("selective")
7885 favorites = resume_data.get("favorites")
7886 args_set = self._sets["args"]
7887 if isinstance(favorites, list):
7888 args = self._load_favorites(favorites)
7892 for task in serialized_tasks:
7893 if isinstance(task, Package) and \
7894 task.operation == "merge":
7895 if not self._add_pkg(task, None):
7898 # Packages for argument atoms need to be explicitly
7899 # added via _add_pkg() so that they are included in the
7900 # digraph (needed at least for --tree display).
7902 for atom in arg.set:
7903 pkg, existing_node = self._select_package(
7904 arg.root_config.root, atom)
7905 if existing_node is None and \
7907 if not self._add_pkg(pkg, Dependency(atom=atom,
7908 root=pkg.root, parent=arg)):
7911 # Allow unsatisfied deps here to avoid showing a masking
7912 # message for an unsatisfied dep that isn't necessarily
7914 if not self._create_graph(allow_unsatisfied=True):
7916 if masked_tasks or self._unsatisfied_deps:
7917 # This probably means that a required package
7918 # was dropped via --skipfirst. It makes the
7919 # resume list invalid, so convert it to a
7920 # UnsatisfiedResumeDep exception.
7921 raise self.UnsatisfiedResumeDep(self,
7922 masked_tasks + self._unsatisfied_deps)
7923 self._serialized_tasks_cache = None
7926 except self._unknown_internal_error:
7931 def _load_favorites(self, favorites):
7933 Use a list of favorites to resume state from a
7934 previous select_files() call. This creates similar
7935 DependencyArg instances to those that would have
7936 been created by the original select_files() call.
7937 This allows Package instances to be matched with
7938 DependencyArg instances during graph creation.
7940 root_config = self.roots[self.target_root]
7941 getSetAtoms = root_config.setconfig.getSetAtoms
7942 sets = root_config.sets
7945 if not isinstance(x, basestring):
7947 if x in ("system", "world"):
7949 if x.startswith(SETPREFIX):
7950 s = x[len(SETPREFIX):]
7955 # Recursively expand sets so that containment tests in
7956 # self._get_parent_sets() properly match atoms in nested
7957 # sets (like if world contains system).
7958 expanded_set = InternalPackageSet(
7959 initial_atoms=getSetAtoms(s))
7960 self._sets[s] = expanded_set
7961 args.append(SetArg(arg=x, set=expanded_set,
7962 root_config=root_config))
7964 if not portage.isvalidatom(x):
7966 args.append(AtomArg(arg=x, atom=x,
7967 root_config=root_config))
7969 # Create the "args" package set from atoms and
7970 # packages given as arguments.
7971 args_set = self._sets["args"]
7973 if not isinstance(arg, (AtomArg, PackageArg)):
7976 if myatom in args_set:
7978 args_set.add(myatom)
7979 self._set_atoms.update(chain(*self._sets.itervalues()))
7980 atom_arg_map = self._atom_arg_map
7982 for atom in arg.set:
7983 atom_key = (atom, arg.root_config.root)
7984 refs = atom_arg_map.get(atom_key)
7987 atom_arg_map[atom_key] = refs
7992 class UnsatisfiedResumeDep(portage.exception.PortageException):
7994 A dependency of a resume list is not installed. This
7995 can occur when a required package is dropped from the
7996 merge list via --skipfirst.
7998 def __init__(self, depgraph, value):
7999 portage.exception.PortageException.__init__(self, value)
8000 self.depgraph = depgraph
8002 class _internal_exception(portage.exception.PortageException):
8003 def __init__(self, value=""):
8004 portage.exception.PortageException.__init__(self, value)
8006 class _unknown_internal_error(_internal_exception):
8008 Used by the depgraph internally to terminate graph creation.
8009 The specific reason for the failure should have been dumped
8010 to stderr, unfortunately, the exact reason for the failure
8014 class _serialize_tasks_retry(_internal_exception):
8016 This is raised by the _serialize_tasks() method when it needs to
8017 be called again for some reason. The only case that it's currently
8018 used for is when neglected dependencies need to be added to the
8019 graph in order to avoid making a potentially unsafe decision.
8022 class _dep_check_composite_db(portage.dbapi):
8024 A dbapi-like interface that is optimized for use in dep_check() calls.
8025 This is built on top of the existing depgraph package selection logic.
8026 Some packages that have been added to the graph may be masked from this
8027 view in order to influence the atom preference selection that occurs
8030 def __init__(self, depgraph, root):
8031 portage.dbapi.__init__(self)
8032 self._depgraph = depgraph
8034 self._match_cache = {}
8035 self._cpv_pkg_map = {}
8037 def match(self, atom):
8038 ret = self._match_cache.get(atom)
8043 atom = self._dep_expand(atom)
8044 pkg, existing = self._depgraph._select_package(self._root, atom)
8048 # Return the highest available from select_package() as well as
8049 # any matching slots in the graph db.
8051 slots.add(pkg.metadata["SLOT"])
8052 atom_cp = portage.dep_getkey(atom)
8053 if pkg.cp.startswith("virtual/"):
8054 # For new-style virtual lookahead that occurs inside
8055 # dep_check(), examine all slots. This is needed
8056 # so that newer slots will not unnecessarily be pulled in
8057 # when a satisfying lower slot is already installed. For
8058 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8059 # there's no need to pull in a newer slot to satisfy a
8060 # virtual/jdk dependency.
8061 for db, pkg_type, built, installed, db_keys in \
8062 self._depgraph._filtered_trees[self._root]["dbs"]:
8063 for cpv in db.match(atom):
8064 if portage.cpv_getkey(cpv) != pkg.cp:
8066 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8068 if self._visible(pkg):
8069 self._cpv_pkg_map[pkg.cpv] = pkg
8071 slots.remove(pkg.metadata["SLOT"])
8073 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8074 pkg, existing = self._depgraph._select_package(
8075 self._root, slot_atom)
8078 if not self._visible(pkg):
8080 self._cpv_pkg_map[pkg.cpv] = pkg
8083 self._cpv_sort_ascending(ret)
8084 self._match_cache[orig_atom] = ret
8087 def _visible(self, pkg):
8088 if pkg.installed and "selective" not in self._depgraph.myparams:
8090 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8091 except (StopIteration, portage.exception.InvalidDependString):
8098 self._depgraph.pkgsettings[pkg.root], pkg):
8100 except portage.exception.InvalidDependString:
8104 def _dep_expand(self, atom):
8106 This is only needed for old installed packages that may
8107 contain atoms that are not fully qualified with a specific
8108 category. Emulate the cpv_expand() function that's used by
8109 dbapi.match() in cases like this. If there are multiple
8110 matches, it's often due to a new-style virtual that has
8111 been added, so try to filter those out to avoid raising
8114 root_config = self._depgraph.roots[self._root]
8116 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8117 if len(expanded_atoms) > 1:
8118 non_virtual_atoms = []
8119 for x in expanded_atoms:
8120 if not portage.dep_getkey(x).startswith("virtual/"):
8121 non_virtual_atoms.append(x)
8122 if len(non_virtual_atoms) == 1:
8123 expanded_atoms = non_virtual_atoms
8124 if len(expanded_atoms) > 1:
8125 # compatible with portage.cpv_expand()
8126 raise portage.exception.AmbiguousPackageName(
8127 [portage.dep_getkey(x) for x in expanded_atoms])
8129 atom = expanded_atoms[0]
8131 null_atom = insert_category_into_atom(atom, "null")
8132 null_cp = portage.dep_getkey(null_atom)
8133 cat, atom_pn = portage.catsplit(null_cp)
8134 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8136 # Allow the resolver to choose which virtual.
8137 atom = insert_category_into_atom(atom, "virtual")
8139 atom = insert_category_into_atom(atom, "null")
8142 def aux_get(self, cpv, wants):
8143 metadata = self._cpv_pkg_map[cpv].metadata
8144 return [metadata.get(x, "") for x in wants]
8146 class _package_cache(dict):
8147 def __init__(self, depgraph):
8149 self._depgraph = depgraph
8151 def __setitem__(self, k, v):
8152 dict.__setitem__(self, k, v)
8153 root_config = self._depgraph.roots[v.root]
8155 if visible(root_config.settings, v) and \
8156 not (v.installed and \
8157 v.root_config.settings._getMissingKeywords(v.cpv, v.metadata)):
8158 root_config.visible_pkgs.cpv_inject(v)
8159 except portage.exception.InvalidDependString:
8162 class RepoDisplay(object):
8163 def __init__(self, roots):
8164 self._shown_repos = {}
8165 self._unknown_repo = False
8167 for root_config in roots.itervalues():
8168 portdir = root_config.settings.get("PORTDIR")
8170 repo_paths.add(portdir)
8171 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8173 repo_paths.update(overlays.split())
8174 repo_paths = list(repo_paths)
8175 self._repo_paths = repo_paths
8176 self._repo_paths_real = [ os.path.realpath(repo_path) \
8177 for repo_path in repo_paths ]
8179 # pre-allocate index for PORTDIR so that it always has index 0.
8180 for root_config in roots.itervalues():
8181 portdb = root_config.trees["porttree"].dbapi
8182 portdir = portdb.porttree_root
8184 self.repoStr(portdir)
8186 def repoStr(self, repo_path_real):
8189 real_index = self._repo_paths_real.index(repo_path_real)
8190 if real_index == -1:
8192 self._unknown_repo = True
8194 shown_repos = self._shown_repos
8195 repo_paths = self._repo_paths
8196 repo_path = repo_paths[real_index]
8197 index = shown_repos.get(repo_path)
8199 index = len(shown_repos)
8200 shown_repos[repo_path] = index
8206 shown_repos = self._shown_repos
8207 unknown_repo = self._unknown_repo
8208 if shown_repos or self._unknown_repo:
8209 output.append("Portage tree and overlays:\n")
8210 show_repo_paths = list(shown_repos)
8211 for repo_path, repo_index in shown_repos.iteritems():
8212 show_repo_paths[repo_index] = repo_path
8214 for index, repo_path in enumerate(show_repo_paths):
8215 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8217 output.append(" "+teal("[?]") + \
8218 " indicates that the source repository could not be determined\n")
8219 return "".join(output)
8221 class PackageCounters(object):
8231 self.blocks_satisfied = 0
8233 self.restrict_fetch = 0
8234 self.restrict_fetch_satisfied = 0
8235 self.interactive = 0
8238 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8241 myoutput.append("Total: %s package" % total_installs)
8242 if total_installs != 1:
8243 myoutput.append("s")
8244 if total_installs != 0:
8245 myoutput.append(" (")
8246 if self.upgrades > 0:
8247 details.append("%s upgrade" % self.upgrades)
8248 if self.upgrades > 1:
8250 if self.downgrades > 0:
8251 details.append("%s downgrade" % self.downgrades)
8252 if self.downgrades > 1:
8255 details.append("%s new" % self.new)
8256 if self.newslot > 0:
8257 details.append("%s in new slot" % self.newslot)
8258 if self.newslot > 1:
8261 details.append("%s reinstall" % self.reinst)
8265 details.append("%s uninstall" % self.uninst)
8268 if self.interactive > 0:
8269 details.append("%s %s" % (self.interactive,
8270 colorize("WARN", "interactive")))
8271 myoutput.append(", ".join(details))
8272 if total_installs != 0:
8273 myoutput.append(")")
8274 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8275 if self.restrict_fetch:
8276 myoutput.append("\nFetch Restriction: %s package" % \
8277 self.restrict_fetch)
8278 if self.restrict_fetch > 1:
8279 myoutput.append("s")
8280 if self.restrict_fetch_satisfied < self.restrict_fetch:
8281 myoutput.append(bad(" (%s unsatisfied)") % \
8282 (self.restrict_fetch - self.restrict_fetch_satisfied))
8284 myoutput.append("\nConflict: %s block" % \
8287 myoutput.append("s")
8288 if self.blocks_satisfied < self.blocks:
8289 myoutput.append(bad(" (%s unsatisfied)") % \
8290 (self.blocks - self.blocks_satisfied))
8291 return "".join(myoutput)
8293 class PollConstants(object):
8296 Provides POLL* constants that are equivalent to those from the
8297 select module, for use by PollSelectAdapter.
8300 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
8303 locals()[k] = getattr(select, k, v)
8307 class PollSelectAdapter(PollConstants):
8310 Use select to emulate a poll object, for
8311 systems that don't support poll().
8315 self._registered = {}
8316 self._select_args = [[], [], []]
8318 def register(self, fd, *args):
8320 Only POLLIN is currently supported!
8324 "register expected at most 2 arguments, got " + \
8325 repr(1 + len(args)))
8327 eventmask = PollConstants.POLLIN | \
8328 PollConstants.POLLPRI | PollConstants.POLLOUT
8332 self._registered[fd] = eventmask
8333 self._select_args = None
8335 def unregister(self, fd):
8336 self._select_args = None
8337 del self._registered[fd]
8339 def poll(self, *args):
8342 "poll expected at most 2 arguments, got " + \
8343 repr(1 + len(args)))
8349 select_args = self._select_args
8350 if select_args is None:
8351 select_args = [self._registered.keys(), [], []]
8353 if timeout is not None:
8354 select_args = select_args[:]
8355 # Translate poll() timeout args to select() timeout args:
8357 # | units | value(s) for indefinite block
8358 # ---------|--------------|------------------------------
8359 # poll | milliseconds | omitted, negative, or None
8360 # ---------|--------------|------------------------------
8361 # select | seconds | omitted
8362 # ---------|--------------|------------------------------
8364 if timeout is not None and timeout < 0:
8366 if timeout is not None:
8367 select_args.append(timeout / 1000)
8369 select_events = select.select(*select_args)
8371 for fd in select_events[0]:
8372 poll_events.append((fd, PollConstants.POLLIN))
8375 class SequentialTaskQueue(SlotObject):
8377 __slots__ = ("max_jobs", "running_tasks") + \
8378 ("_dirty", "_scheduling", "_task_queue")
8380 def __init__(self, **kwargs):
8381 SlotObject.__init__(self, **kwargs)
8382 self._task_queue = deque()
8383 self.running_tasks = set()
8384 if self.max_jobs is None:
8388 def add(self, task):
8389 self._task_queue.append(task)
8392 def addFront(self, task):
8393 self._task_queue.appendleft(task)
8404 if self._scheduling:
8405 # Ignore any recursive schedule() calls triggered via
8406 # self._task_exit().
8409 self._scheduling = True
8411 task_queue = self._task_queue
8412 running_tasks = self.running_tasks
8413 max_jobs = self.max_jobs
8414 state_changed = False
8416 while task_queue and \
8417 (max_jobs is True or len(running_tasks) < max_jobs):
8418 task = task_queue.popleft()
8419 cancelled = getattr(task, "cancelled", None)
8421 running_tasks.add(task)
8422 task.addExitListener(self._task_exit)
8424 state_changed = True
8427 self._scheduling = False
8429 return state_changed
8431 def _task_exit(self, task):
8433 Since we can always rely on exit listeners being called, the set of
8434 running tasks is always pruned automatically and there is never any need
8435 to actively prune it.
8437 self.running_tasks.remove(task)
8438 if self._task_queue:
8442 self._task_queue.clear()
8443 running_tasks = self.running_tasks
8444 while running_tasks:
8445 task = running_tasks.pop()
8446 task.removeExitListener(self._task_exit)
8450 def __nonzero__(self):
8451 return bool(self._task_queue or self.running_tasks)
8454 return len(self._task_queue) + len(self.running_tasks)
8456 _can_poll_device = None
8458 def can_poll_device():
8460 Test if it's possible to use poll() on a device such as a pty. This
8461 is known to fail on Darwin.
8463 @returns: True if poll() on a device succeeds, False otherwise.
8466 global _can_poll_device
8467 if _can_poll_device is not None:
8468 return _can_poll_device
8470 if not hasattr(select, "poll"):
8471 _can_poll_device = False
8472 return _can_poll_device
8475 dev_null = open('/dev/null', 'rb')
8477 _can_poll_device = False
8478 return _can_poll_device
8481 p.register(dev_null.fileno(), PollConstants.POLLIN)
8483 invalid_request = False
8484 for f, event in p.poll():
8485 if event & PollConstants.POLLNVAL:
8486 invalid_request = True
8490 _can_poll_device = not invalid_request
8491 return _can_poll_device
8493 def create_poll_instance():
8495 Create an instance of select.poll, or an instance of
8496 PollSelectAdapter there is no poll() implementation or
8497 it is broken somehow.
8499 if can_poll_device():
8500 return select.poll()
8501 return PollSelectAdapter()
8503 class PollScheduler(object):
8505 class _sched_iface_class(SlotObject):
8506 __slots__ = ("register", "schedule", "unregister")
8510 self._max_load = None
8512 self._poll_event_queue = []
8513 self._poll_event_handlers = {}
8514 self._poll_event_handler_ids = {}
8515 # Increment id for each new handler.
8516 self._event_handler_id = 0
8517 self._poll_obj = create_poll_instance()
8518 self._scheduling = False
8520 def _schedule(self):
8522 Calls _schedule_tasks() and automatically returns early from
8523 any recursive calls to this method that the _schedule_tasks()
8524 call might trigger. This makes _schedule() safe to call from
8525 inside exit listeners.
8527 if self._scheduling:
8529 self._scheduling = True
8531 return self._schedule_tasks()
8533 self._scheduling = False
8535 def _running_job_count(self):
8538 def _can_add_job(self):
8539 max_jobs = self._max_jobs
8540 max_load = self._max_load
8542 if self._max_jobs is not True and \
8543 self._running_job_count() >= self._max_jobs:
8546 if max_load is not None and \
8547 (max_jobs is True or max_jobs > 1) and \
8548 self._running_job_count() >= 1:
8550 avg1, avg5, avg15 = os.getloadavg()
8551 except (AttributeError, OSError), e:
8552 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8557 if avg1 >= max_load:
8562 def _poll(self, timeout=None):
8564 All poll() calls pass through here. The poll events
8565 are added directly to self._poll_event_queue.
8566 In order to avoid endless blocking, this raises
8567 StopIteration if timeout is None and there are
8568 no file descriptors to poll.
8570 if not self._poll_event_handlers:
8572 if timeout is None and \
8573 not self._poll_event_handlers:
8574 raise StopIteration(
8575 "timeout is None and there are no poll() event handlers")
8577 # The following error is known to occur with Linux kernel versions
8580 # select.error: (4, 'Interrupted system call')
8582 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8583 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8584 # without any events.
8587 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8589 except select.error, e:
8590 writemsg_level("\n!!! select error: %s\n" % (e,),
8591 level=logging.ERROR, noiselevel=-1)
8593 if timeout is not None:
8596 def _next_poll_event(self, timeout=None):
8598 Since the _schedule_wait() loop is called by event
8599 handlers from _poll_loop(), maintain a central event
8600 queue for both of them to share events from a single
8601 poll() call. In order to avoid endless blocking, this
8602 raises StopIteration if timeout is None and there are
8603 no file descriptors to poll.
8605 if not self._poll_event_queue:
8607 return self._poll_event_queue.pop()
8609 def _poll_loop(self):
8611 event_handlers = self._poll_event_handlers
8612 event_handled = False
8615 while event_handlers:
8616 f, event = self._next_poll_event()
8617 handler, reg_id = event_handlers[f]
8619 event_handled = True
8620 except StopIteration:
8621 event_handled = True
8623 if not event_handled:
8624 raise AssertionError("tight loop")
8626 def _schedule_yield(self):
8628 Schedule for a short period of time chosen by the scheduler based
8629 on internal state. Synchronous tasks should call this periodically
8630 in order to allow the scheduler to service pending poll events. The
8631 scheduler will call poll() exactly once, without blocking, and any
8632 resulting poll events will be serviced.
8634 event_handlers = self._poll_event_handlers
8637 if not event_handlers:
8638 return bool(events_handled)
8640 if not self._poll_event_queue:
8644 while event_handlers and self._poll_event_queue:
8645 f, event = self._next_poll_event()
8646 handler, reg_id = event_handlers[f]
8649 except StopIteration:
8652 return bool(events_handled)
8654 def _register(self, f, eventmask, handler):
8657 @return: A unique registration id, for use in schedule() or
8660 if f in self._poll_event_handlers:
8661 raise AssertionError("fd %d is already registered" % f)
8662 self._event_handler_id += 1
8663 reg_id = self._event_handler_id
8664 self._poll_event_handler_ids[reg_id] = f
8665 self._poll_event_handlers[f] = (handler, reg_id)
8666 self._poll_obj.register(f, eventmask)
8669 def _unregister(self, reg_id):
8670 f = self._poll_event_handler_ids[reg_id]
8671 self._poll_obj.unregister(f)
8672 del self._poll_event_handlers[f]
8673 del self._poll_event_handler_ids[reg_id]
8675 def _schedule_wait(self, wait_ids):
8677 Schedule until wait_id is not longer registered
8680 @param wait_id: a task id to wait for
8682 event_handlers = self._poll_event_handlers
8683 handler_ids = self._poll_event_handler_ids
8684 event_handled = False
8686 if isinstance(wait_ids, int):
8687 wait_ids = frozenset([wait_ids])
8690 while wait_ids.intersection(handler_ids):
8691 f, event = self._next_poll_event()
8692 handler, reg_id = event_handlers[f]
8694 event_handled = True
8695 except StopIteration:
8696 event_handled = True
8698 return event_handled
8700 class QueueScheduler(PollScheduler):
8703 Add instances of SequentialTaskQueue and then call run(). The
8704 run() method returns when no tasks remain.
8707 def __init__(self, max_jobs=None, max_load=None):
8708 PollScheduler.__init__(self)
8710 if max_jobs is None:
8713 self._max_jobs = max_jobs
8714 self._max_load = max_load
8715 self.sched_iface = self._sched_iface_class(
8716 register=self._register,
8717 schedule=self._schedule_wait,
8718 unregister=self._unregister)
8721 self._schedule_listeners = []
8724 self._queues.append(q)
8726 def remove(self, q):
8727 self._queues.remove(q)
8731 while self._schedule():
8734 while self._running_job_count():
8737 def _schedule_tasks(self):
8740 @returns: True if there may be remaining tasks to schedule,
8743 while self._can_add_job():
8744 n = self._max_jobs - self._running_job_count()
8748 if not self._start_next_job(n):
8751 for q in self._queues:
8756 def _running_job_count(self):
8758 for q in self._queues:
8759 job_count += len(q.running_tasks)
8760 self._jobs = job_count
8763 def _start_next_job(self, n=1):
8765 for q in self._queues:
8766 initial_job_count = len(q.running_tasks)
8768 final_job_count = len(q.running_tasks)
8769 if final_job_count > initial_job_count:
8770 started_count += (final_job_count - initial_job_count)
8771 if started_count >= n:
8773 return started_count
8775 class TaskScheduler(object):
8778 A simple way to handle scheduling of AsynchrousTask instances. Simply
8779 add tasks and call run(). The run() method returns when no tasks remain.
8782 def __init__(self, max_jobs=None, max_load=None):
8783 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
8784 self._scheduler = QueueScheduler(
8785 max_jobs=max_jobs, max_load=max_load)
8786 self.sched_iface = self._scheduler.sched_iface
8787 self.run = self._scheduler.run
8788 self._scheduler.add(self._queue)
8790 def add(self, task):
8791 self._queue.add(task)
8794 self._scheduler.schedule()
8796 class JobStatusDisplay(object):
8798 _bound_properties = ("curval", "failed", "running")
8799 _jobs_column_width = 48
8801 # Don't update the display unless at least this much
8802 # time has passed, in units of seconds.
8803 _min_display_latency = 2
8805 _default_term_codes = {
8811 _termcap_name_map = {
8812 'carriage_return' : 'cr',
8817 def __init__(self, out=sys.stdout, quiet=False):
8818 object.__setattr__(self, "out", out)
8819 object.__setattr__(self, "quiet", quiet)
8820 object.__setattr__(self, "maxval", 0)
8821 object.__setattr__(self, "merges", 0)
8822 object.__setattr__(self, "_changed", False)
8823 object.__setattr__(self, "_displayed", False)
8824 object.__setattr__(self, "_last_display_time", 0)
8825 object.__setattr__(self, "width", 80)
8828 isatty = hasattr(out, "isatty") and out.isatty()
8829 object.__setattr__(self, "_isatty", isatty)
8830 if not isatty or not self._init_term():
8832 for k, capname in self._termcap_name_map.iteritems():
8833 term_codes[k] = self._default_term_codes[capname]
8834 object.__setattr__(self, "_term_codes", term_codes)
8836 def _init_term(self):
8838 Initialize term control codes.
8840 @returns: True if term codes were successfully initialized,
8844 term_type = os.environ.get("TERM", "vt100")
8850 curses.setupterm(term_type, self.out.fileno())
8851 tigetstr = curses.tigetstr
8852 except curses.error:
8857 if tigetstr is None:
8861 for k, capname in self._termcap_name_map.iteritems():
8862 code = tigetstr(capname)
8864 code = self._default_term_codes[capname]
8865 term_codes[k] = code
8866 object.__setattr__(self, "_term_codes", term_codes)
8869 def _format_msg(self, msg):
8870 return ">>> %s" % msg
8874 self._term_codes['carriage_return'] + \
8875 self._term_codes['clr_eol'])
8876 self._displayed = False
8878 def _display(self, line):
8879 self.out.write(line)
8880 self._displayed = True
8882 def _update(self, msg):
8885 if not self._isatty:
8886 out.write(self._format_msg(msg) + self._term_codes['newline'])
8887 self._displayed = True
8893 self._display(self._format_msg(msg))
8895 def displayMessage(self, msg):
8897 was_displayed = self._displayed
8899 if self._isatty and self._displayed:
8902 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
8903 self._displayed = False
8906 self._changed = True
8912 for name in self._bound_properties:
8913 object.__setattr__(self, name, 0)
8916 self.out.write(self._term_codes['newline'])
8917 self._displayed = False
8919 def __setattr__(self, name, value):
8920 old_value = getattr(self, name)
8921 if value == old_value:
8923 object.__setattr__(self, name, value)
8924 if name in self._bound_properties:
8925 self._property_change(name, old_value, value)
8927 def _property_change(self, name, old_value, new_value):
8928 self._changed = True
8931 def _load_avg_str(self):
8933 avg = os.getloadavg()
8934 except (AttributeError, OSError), e:
8946 return ", ".join(("%%.%df" % digits ) % x for x in avg)
8950 Display status on stdout, but only if something has
8951 changed since the last call.
8957 current_time = time.time()
8958 time_delta = current_time - self._last_display_time
8959 if self._displayed and \
8961 if not self._isatty:
8963 if time_delta < self._min_display_latency:
8966 self._last_display_time = current_time
8967 self._changed = False
8968 self._display_status()
8970 def _display_status(self):
8971 # Don't use len(self._completed_tasks) here since that also
8972 # can include uninstall tasks.
8973 curval_str = str(self.curval)
8974 maxval_str = str(self.maxval)
8975 running_str = str(self.running)
8976 failed_str = str(self.failed)
8977 load_avg_str = self._load_avg_str()
8979 color_output = StringIO.StringIO()
8980 plain_output = StringIO.StringIO()
8981 style_file = portage.output.ConsoleStyleFile(color_output)
8982 style_file.write_listener = plain_output
8983 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
8984 style_writer.style_listener = style_file.new_styles
8985 f = formatter.AbstractFormatter(style_writer)
8987 number_style = "INFORM"
8988 f.add_literal_data("Jobs: ")
8989 f.push_style(number_style)
8990 f.add_literal_data(curval_str)
8992 f.add_literal_data(" of ")
8993 f.push_style(number_style)
8994 f.add_literal_data(maxval_str)
8996 f.add_literal_data(" complete")
8999 f.add_literal_data(", ")
9000 f.push_style(number_style)
9001 f.add_literal_data(running_str)
9003 f.add_literal_data(" running")
9006 f.add_literal_data(", ")
9007 f.push_style(number_style)
9008 f.add_literal_data(failed_str)
9010 f.add_literal_data(" failed")
9012 padding = self._jobs_column_width - len(plain_output.getvalue())
9014 f.add_literal_data(padding * " ")
9016 f.add_literal_data("Load avg: ")
9017 f.add_literal_data(load_avg_str)
9019 # Truncate to fit width, to avoid making the terminal scroll if the
9020 # line overflows (happens when the load average is large).
9021 plain_output = plain_output.getvalue()
9022 if self._isatty and len(plain_output) > self.width:
9023 # Use plain_output here since it's easier to truncate
9024 # properly than the color output which contains console
9026 self._update(plain_output[:self.width])
9028 self._update(color_output.getvalue())
9030 xtermTitle(" ".join(plain_output.split()))
9032 class Scheduler(PollScheduler):
9034 _opts_ignore_blockers = \
9035 frozenset(["--buildpkgonly",
9036 "--fetchonly", "--fetch-all-uri",
9037 "--nodeps", "--pretend"])
9039 _opts_no_background = \
9040 frozenset(["--pretend",
9041 "--fetchonly", "--fetch-all-uri"])
9043 _opts_no_restart = frozenset(["--buildpkgonly",
9044 "--fetchonly", "--fetch-all-uri", "--pretend"])
9046 _bad_resume_opts = set(["--ask", "--changelog",
9047 "--resume", "--skipfirst"])
9049 _fetch_log = "/var/log/emerge-fetch.log"
9051 class _iface_class(SlotObject):
9052 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9053 "dblinkElog", "fetch", "register", "schedule",
9054 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9057 class _fetch_iface_class(SlotObject):
9058 __slots__ = ("log_file", "schedule")
9060 _task_queues_class = slot_dict_class(
9061 ("merge", "jobs", "fetch", "unpack"), prefix="")
9063 class _build_opts_class(SlotObject):
9064 __slots__ = ("buildpkg", "buildpkgonly",
9065 "fetch_all_uri", "fetchonly", "pretend")
9067 class _binpkg_opts_class(SlotObject):
9068 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9070 class _pkg_count_class(SlotObject):
9071 __slots__ = ("curval", "maxval")
9073 class _emerge_log_class(SlotObject):
9074 __slots__ = ("xterm_titles",)
9076 def log(self, *pargs, **kwargs):
9077 if not self.xterm_titles:
9078 # Avoid interference with the scheduler's status display.
9079 kwargs.pop("short_msg", None)
9080 emergelog(self.xterm_titles, *pargs, **kwargs)
9082 class _failed_pkg(SlotObject):
9083 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9085 class _ConfigPool(object):
9086 """Interface for a task to temporarily allocate a config
9087 instance from a pool. This allows a task to be constructed
9088 long before the config instance actually becomes needed, like
9089 when prefetchers are constructed for the whole merge list."""
9090 __slots__ = ("_root", "_allocate", "_deallocate")
9091 def __init__(self, root, allocate, deallocate):
9093 self._allocate = allocate
9094 self._deallocate = deallocate
9096 return self._allocate(self._root)
9097 def deallocate(self, settings):
9098 self._deallocate(settings)
9100 class _unknown_internal_error(portage.exception.PortageException):
9102 Used internally to terminate scheduling. The specific reason for
9103 the failure should have been dumped to stderr.
9105 def __init__(self, value=""):
9106 portage.exception.PortageException.__init__(self, value)
9108 def __init__(self, settings, trees, mtimedb, myopts,
9109 spinner, mergelist, favorites, digraph):
9110 PollScheduler.__init__(self)
9111 self.settings = settings
9112 self.target_root = settings["ROOT"]
9114 self.myopts = myopts
9115 self._spinner = spinner
9116 self._mtimedb = mtimedb
9117 self._mergelist = mergelist
9118 self._favorites = favorites
9119 self._args_set = InternalPackageSet(favorites)
9120 self._build_opts = self._build_opts_class()
9121 for k in self._build_opts.__slots__:
9122 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9123 self._binpkg_opts = self._binpkg_opts_class()
9124 for k in self._binpkg_opts.__slots__:
9125 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9128 self._logger = self._emerge_log_class()
9129 self._task_queues = self._task_queues_class()
9130 for k in self._task_queues.allowed_keys:
9131 setattr(self._task_queues, k,
9132 SequentialTaskQueue())
9133 self._status_display = JobStatusDisplay()
9134 self._max_load = myopts.get("--load-average")
9135 max_jobs = myopts.get("--jobs")
9136 if max_jobs is None:
9138 self._set_max_jobs(max_jobs)
9140 # The root where the currently running
9141 # portage instance is installed.
9142 self._running_root = trees["/"]["root_config"]
9144 if settings.get("PORTAGE_DEBUG", "") == "1":
9146 self.pkgsettings = {}
9147 self._config_pool = {}
9148 self._blocker_db = {}
9150 self._config_pool[root] = []
9151 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9153 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9154 schedule=self._schedule_fetch)
9155 self._sched_iface = self._iface_class(
9156 dblinkEbuildPhase=self._dblink_ebuild_phase,
9157 dblinkDisplayMerge=self._dblink_display_merge,
9158 dblinkElog=self._dblink_elog,
9159 fetch=fetch_iface, register=self._register,
9160 schedule=self._schedule_wait,
9161 scheduleSetup=self._schedule_setup,
9162 scheduleUnpack=self._schedule_unpack,
9163 scheduleYield=self._schedule_yield,
9164 unregister=self._unregister)
9166 self._prefetchers = weakref.WeakValueDictionary()
9167 self._pkg_queue = []
9168 self._completed_tasks = set()
9170 self._failed_pkgs = []
9171 self._failed_pkgs_all = []
9172 self._failed_pkgs_die_msgs = []
9173 self._post_mod_echo_msgs = []
9174 self._parallel_fetch = False
9175 merge_count = len([x for x in mergelist \
9176 if isinstance(x, Package) and x.operation == "merge"])
9177 self._pkg_count = self._pkg_count_class(
9178 curval=0, maxval=merge_count)
9179 self._status_display.maxval = self._pkg_count.maxval
9181 # The load average takes some time to respond when new
9182 # jobs are added, so we need to limit the rate of adding
9184 self._job_delay_max = 10
9185 self._job_delay_factor = 1.0
9186 self._job_delay_exp = 1.5
9187 self._previous_job_start_time = None
9189 self._set_digraph(digraph)
9191 # This is used to memoize the _choose_pkg() result when
9192 # no packages can be chosen until one of the existing
9194 self._choose_pkg_return_early = False
9196 features = self.settings.features
9197 if "parallel-fetch" in features and \
9198 not ("--pretend" in self.myopts or \
9199 "--fetch-all-uri" in self.myopts or \
9200 "--fetchonly" in self.myopts):
9201 if "distlocks" not in features:
9202 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9203 portage.writemsg(red("!!!")+" parallel-fetching " + \
9204 "requires the distlocks feature enabled"+"\n",
9206 portage.writemsg(red("!!!")+" you have it disabled, " + \
9207 "thus parallel-fetching is being disabled"+"\n",
9209 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9210 elif len(mergelist) > 1:
9211 self._parallel_fetch = True
9213 if self._parallel_fetch:
9214 # clear out existing fetch log if it exists
9216 open(self._fetch_log, 'w')
9217 except EnvironmentError:
9220 self._running_portage = None
9221 portage_match = self._running_root.trees["vartree"].dbapi.match(
9222 portage.const.PORTAGE_PACKAGE_ATOM)
9224 cpv = portage_match.pop()
9225 self._running_portage = self._pkg(cpv, "installed",
9226 self._running_root, installed=True)
9228 def _poll(self, timeout=None):
9230 PollScheduler._poll(self, timeout=timeout)
9232 def _set_max_jobs(self, max_jobs):
9233 self._max_jobs = max_jobs
9234 self._task_queues.jobs.max_jobs = max_jobs
9236 def _background_mode(self):
9238 Check if background mode is enabled and adjust states as necessary.
9241 @returns: True if background mode is enabled, False otherwise.
9243 background = (self._max_jobs is True or \
9244 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9245 not bool(self._opts_no_background.intersection(self.myopts))
9248 interactive_tasks = self._get_interactive_tasks()
9249 if interactive_tasks:
9251 writemsg_level(">>> Sending package output to stdio due " + \
9252 "to interactive package(s):\n",
9253 level=logging.INFO, noiselevel=-1)
9255 for pkg in interactive_tasks:
9256 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9258 pkg_str += " for " + pkg.root
9261 writemsg_level("".join("%s\n" % (l,) for l in msg),
9262 level=logging.INFO, noiselevel=-1)
9263 if self._max_jobs is True or self._max_jobs > 1:
9264 self._set_max_jobs(1)
9265 writemsg_level(">>> Setting --jobs=1 due " + \
9266 "to the above interactive package(s)\n",
9267 level=logging.INFO, noiselevel=-1)
9269 self._status_display.quiet = \
9271 ("--quiet" in self.myopts and \
9272 "--verbose" not in self.myopts)
9274 self._logger.xterm_titles = \
9275 "notitles" not in self.settings.features and \
9276 self._status_display.quiet
9280 def _get_interactive_tasks(self):
9281 from portage import flatten
9282 from portage.dep import use_reduce, paren_reduce
9283 interactive_tasks = []
9284 for task in self._mergelist:
9285 if not (isinstance(task, Package) and \
9286 task.operation == "merge"):
9289 properties = flatten(use_reduce(paren_reduce(
9290 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9291 except portage.exception.InvalidDependString, e:
9292 show_invalid_depstring_notice(task,
9293 task.metadata["PROPERTIES"], str(e))
9294 raise self._unknown_internal_error()
9295 if "interactive" in properties:
9296 interactive_tasks.append(task)
9297 return interactive_tasks
9299 def _set_digraph(self, digraph):
9300 if "--nodeps" in self.myopts or \
9301 (self._max_jobs is not True and self._max_jobs < 2):
9303 self._digraph = None
9306 self._digraph = digraph
9307 self._prune_digraph()
9309 def _prune_digraph(self):
9311 Prune any root nodes that are irrelevant.
9314 graph = self._digraph
9315 completed_tasks = self._completed_tasks
9316 removed_nodes = set()
9318 for node in graph.root_nodes():
9319 if not isinstance(node, Package) or \
9320 (node.installed and node.operation == "nomerge") or \
9322 node in completed_tasks:
9323 removed_nodes.add(node)
9325 graph.difference_update(removed_nodes)
9326 if not removed_nodes:
9328 removed_nodes.clear()
9330 class _pkg_failure(portage.exception.PortageException):
9332 An instance of this class is raised by unmerge() when
9333 an uninstallation fails.
9336 def __init__(self, *pargs):
9337 portage.exception.PortageException.__init__(self, pargs)
9339 self.status = pargs[0]
9341 def _schedule_fetch(self, fetcher):
9343 Schedule a fetcher on the fetch queue, in order to
9344 serialize access to the fetch log.
9346 self._task_queues.fetch.addFront(fetcher)
9348 def _schedule_setup(self, setup_phase):
9350 Schedule a setup phase on the merge queue, in order to
9351 serialize unsandboxed access to the live filesystem.
9353 self._task_queues.merge.addFront(setup_phase)
9356 def _schedule_unpack(self, unpack_phase):
9358 Schedule an unpack phase on the unpack queue, in order
9359 to serialize $DISTDIR access for live ebuilds.
9361 self._task_queues.unpack.add(unpack_phase)
9363 def _find_blockers(self, new_pkg):
9365 Returns a callable which should be called only when
9366 the vdb lock has been acquired.
9369 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9372 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9373 if self._opts_ignore_blockers.intersection(self.myopts):
9376 # Call gc.collect() here to avoid heap overflow that
9377 # triggers 'Cannot allocate memory' errors (reported
9382 blocker_db = self._blocker_db[new_pkg.root]
9384 blocker_dblinks = []
9385 for blocking_pkg in blocker_db.findInstalledBlockers(
9386 new_pkg, acquire_lock=acquire_lock):
9387 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9389 if new_pkg.cpv == blocking_pkg.cpv:
9391 blocker_dblinks.append(portage.dblink(
9392 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9393 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9394 vartree=self.trees[blocking_pkg.root]["vartree"]))
9398 return blocker_dblinks
9400 def _dblink_pkg(self, pkg_dblink):
9401 cpv = pkg_dblink.mycpv
9402 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9403 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9404 installed = type_name == "installed"
9405 return self._pkg(cpv, type_name, root_config, installed=installed)
9407 def _append_to_log_path(self, log_path, msg):
9408 f = open(log_path, 'a')
9414 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9416 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9419 background = self._background
9421 if background and log_path is not None:
9422 log_file = open(log_path, 'a')
9427 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9429 if log_file is not None:
9432 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9433 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9434 background = self._background
9436 if log_path is None:
9437 if not (background and level < logging.WARN):
9438 portage.util.writemsg_level(msg,
9439 level=level, noiselevel=noiselevel)
9442 portage.util.writemsg_level(msg,
9443 level=level, noiselevel=noiselevel)
9444 self._append_to_log_path(log_path, msg)
9446 def _dblink_ebuild_phase(self,
9447 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9449 Using this callback for merge phases allows the scheduler
9450 to run while these phases execute asynchronously, and allows
9451 the scheduler control output handling.
9454 scheduler = self._sched_iface
9455 settings = pkg_dblink.settings
9456 pkg = self._dblink_pkg(pkg_dblink)
9457 background = self._background
9458 log_path = settings.get("PORTAGE_LOG_FILE")
9460 ebuild_phase = EbuildPhase(background=background,
9461 pkg=pkg, phase=phase, scheduler=scheduler,
9462 settings=settings, tree=pkg_dblink.treetype)
9463 ebuild_phase.start()
9466 return ebuild_phase.returncode
9468 def _check_manifests(self):
9469 # Verify all the manifests now so that the user is notified of failure
9470 # as soon as possible.
9471 if "strict" not in self.settings.features or \
9472 "--fetchonly" in self.myopts or \
9473 "--fetch-all-uri" in self.myopts:
9476 shown_verifying_msg = False
9478 for myroot, pkgsettings in self.pkgsettings.iteritems():
9479 quiet_config = portage.config(clone=pkgsettings)
9480 quiet_config["PORTAGE_QUIET"] = "1"
9481 quiet_config.backup_changes("PORTAGE_QUIET")
9482 quiet_settings[myroot] = quiet_config
9485 for x in self._mergelist:
9486 if not isinstance(x, Package) or \
9487 x.type_name != "ebuild":
9490 if not shown_verifying_msg:
9491 shown_verifying_msg = True
9492 self._status_msg("Verifying ebuild manifests")
9494 root_config = x.root_config
9495 portdb = root_config.trees["porttree"].dbapi
9496 quiet_config = quiet_settings[root_config.root]
9497 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9498 if not portage.digestcheck([], quiet_config, strict=True):
9503 def _add_prefetchers(self):
9505 if not self._parallel_fetch:
9508 if self._parallel_fetch:
9509 self._status_msg("Starting parallel fetch")
9511 prefetchers = self._prefetchers
9512 getbinpkg = "--getbinpkg" in self.myopts
9514 # In order to avoid "waiting for lock" messages
9515 # at the beginning, which annoy users, never
9516 # spawn a prefetcher for the first package.
9517 for pkg in self._mergelist[1:]:
9518 prefetcher = self._create_prefetcher(pkg)
9519 if prefetcher is not None:
9520 self._task_queues.fetch.add(prefetcher)
9521 prefetchers[pkg] = prefetcher
9523 def _create_prefetcher(self, pkg):
9525 @return: a prefetcher, or None if not applicable
9529 if not isinstance(pkg, Package):
9532 elif pkg.type_name == "ebuild":
9534 prefetcher = EbuildFetcher(background=True,
9535 config_pool=self._ConfigPool(pkg.root,
9536 self._allocate_config, self._deallocate_config),
9537 fetchonly=1, logfile=self._fetch_log,
9538 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9540 elif pkg.type_name == "binary" and \
9541 "--getbinpkg" in self.myopts and \
9542 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9544 prefetcher = BinpkgFetcher(background=True,
9545 logfile=self._fetch_log, pkg=pkg,
9546 scheduler=self._sched_iface)
9550 def _is_restart_scheduled(self):
9552 Check if the merge list contains a replacement
9553 for the current running instance, that will result
9554 in restart after merge.
9556 @returns: True if a restart is scheduled, False otherwise.
9558 if self._opts_no_restart.intersection(self.myopts):
9561 mergelist = self._mergelist
9563 for i, pkg in enumerate(mergelist):
9564 if self._is_restart_necessary(pkg) and \
9565 i != len(mergelist) - 1:
9570 def _is_restart_necessary(self, pkg):
9572 @return: True if merging the given package
9573 requires restart, False otherwise.
9576 # Figure out if we need a restart.
9577 if pkg.root == self._running_root.root and \
9578 portage.match_from_list(
9579 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9580 if self._running_portage:
9581 return cmp(pkg, self._running_portage) != 0
9585 def _restart_if_necessary(self, pkg):
9587 Use execv() to restart emerge. This happens
9588 if portage upgrades itself and there are
9589 remaining packages in the list.
9592 if self._opts_no_restart.intersection(self.myopts):
9595 if not self._is_restart_necessary(pkg):
9598 if pkg == self._mergelist[-1]:
9601 self._main_loop_cleanup()
9603 logger = self._logger
9604 pkg_count = self._pkg_count
9605 mtimedb = self._mtimedb
9606 bad_resume_opts = self._bad_resume_opts
9608 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9609 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9611 logger.log(" *** RESTARTING " + \
9612 "emerge via exec() after change of " + \
9615 mtimedb["resume"]["mergelist"].remove(list(pkg))
9617 portage.run_exitfuncs()
9618 mynewargv = [sys.argv[0], "--resume"]
9619 resume_opts = self.myopts.copy()
9620 # For automatic resume, we need to prevent
9621 # any of bad_resume_opts from leaking in
9622 # via EMERGE_DEFAULT_OPTS.
9623 resume_opts["--ignore-default-opts"] = True
9624 for myopt, myarg in resume_opts.iteritems():
9625 if myopt not in bad_resume_opts:
9627 mynewargv.append(myopt)
9629 mynewargv.append(myopt +"="+ str(myarg))
9630 # priority only needs to be adjusted on the first run
9631 os.environ["PORTAGE_NICENESS"] = "0"
9632 os.execv(mynewargv[0], mynewargv)
9636 if "--resume" in self.myopts:
9638 portage.writemsg_stdout(
9639 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9640 self._logger.log(" *** Resuming merge...")
9642 self._save_resume_list()
9645 self._background = self._background_mode()
9646 except self._unknown_internal_error:
9649 for root in self.trees:
9650 root_config = self.trees[root]["root_config"]
9651 if self._background:
9652 root_config.settings.unlock()
9653 root_config.settings["PORTAGE_BACKGROUND"] = "1"
9654 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
9655 root_config.settings.lock()
9657 self.pkgsettings[root] = portage.config(
9658 clone=root_config.settings)
9660 rval = self._check_manifests()
9661 if rval != os.EX_OK:
9664 keep_going = "--keep-going" in self.myopts
9665 fetchonly = self._build_opts.fetchonly
9666 mtimedb = self._mtimedb
9667 failed_pkgs = self._failed_pkgs
9670 rval = self._merge()
9671 if rval == os.EX_OK or fetchonly or not keep_going:
9673 if "resume" not in mtimedb:
9675 mergelist = self._mtimedb["resume"].get("mergelist")
9682 for failed_pkg in failed_pkgs:
9683 mergelist.remove(list(failed_pkg.pkg))
9685 self._failed_pkgs_all.extend(failed_pkgs)
9691 if not self._calc_resume_list():
9694 clear_caches(self.trees)
9695 if not self._mergelist:
9698 self._save_resume_list()
9699 self._pkg_count.curval = 0
9700 self._pkg_count.maxval = len([x for x in self._mergelist \
9701 if isinstance(x, Package) and x.operation == "merge"])
9702 self._status_display.maxval = self._pkg_count.maxval
9704 self._logger.log(" *** Finished. Cleaning up...")
9707 self._failed_pkgs_all.extend(failed_pkgs)
9710 background = self._background
9711 failure_log_shown = False
9712 if background and len(self._failed_pkgs_all) == 1:
9713 # If only one package failed then just show it's
9714 # whole log for easy viewing.
9715 failed_pkg = self._failed_pkgs_all[-1]
9716 build_dir = failed_pkg.build_dir
9719 log_paths = [failed_pkg.build_log]
9721 log_path = self._locate_failure_log(failed_pkg)
9722 if log_path is not None:
9724 log_file = open(log_path, 'rb')
9728 if log_file is not None:
9730 for line in log_file:
9731 writemsg_level(line, noiselevel=-1)
9734 failure_log_shown = True
9736 # Dump mod_echo output now since it tends to flood the terminal.
9737 # This allows us to avoid having more important output, generated
9738 # later, from being swept away by the mod_echo output.
9739 mod_echo_output = _flush_elog_mod_echo()
9741 if background and not failure_log_shown and \
9742 self._failed_pkgs_all and \
9743 self._failed_pkgs_die_msgs and \
9744 not mod_echo_output:
9746 printer = portage.output.EOutput()
9747 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
9749 if mysettings["ROOT"] != "/":
9750 root_msg = " merged to %s" % mysettings["ROOT"]
9752 printer.einfo("Error messages for package %s%s:" % \
9753 (colorize("INFORM", key), root_msg))
9755 for phase in portage.const.EBUILD_PHASES:
9756 if phase not in logentries:
9758 for msgtype, msgcontent in logentries[phase]:
9759 if isinstance(msgcontent, basestring):
9760 msgcontent = [msgcontent]
9761 for line in msgcontent:
9762 printer.eerror(line.strip("\n"))
9764 if self._post_mod_echo_msgs:
9765 for msg in self._post_mod_echo_msgs:
9768 if len(self._failed_pkgs_all) > 1:
9769 msg = "The following packages have " + \
9770 "failed to build or install:"
9772 writemsg(prefix + "\n", noiselevel=-1)
9773 from textwrap import wrap
9774 for line in wrap(msg, 72):
9775 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
9776 writemsg(prefix + "\n", noiselevel=-1)
9777 for failed_pkg in self._failed_pkgs_all:
9778 writemsg("%s\t%s\n" % (prefix,
9779 colorize("INFORM", str(failed_pkg.pkg))),
9781 writemsg(prefix + "\n", noiselevel=-1)
9785 def _elog_listener(self, mysettings, key, logentries, fulltext):
9786 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
9788 self._failed_pkgs_die_msgs.append(
9789 (mysettings, key, errors))
9791 def _locate_failure_log(self, failed_pkg):
9793 build_dir = failed_pkg.build_dir
9796 log_paths = [failed_pkg.build_log]
9798 for log_path in log_paths:
9803 log_size = os.stat(log_path).st_size
9814 def _add_packages(self):
9815 pkg_queue = self._pkg_queue
9816 for pkg in self._mergelist:
9817 if isinstance(pkg, Package):
9818 pkg_queue.append(pkg)
9819 elif isinstance(pkg, Blocker):
9822 def _merge_exit(self, merge):
9823 self._do_merge_exit(merge)
9824 self._deallocate_config(merge.merge.settings)
9825 if merge.returncode == os.EX_OK and \
9826 not merge.merge.pkg.installed:
9827 self._status_display.curval += 1
9828 self._status_display.merges = len(self._task_queues.merge)
9831 def _do_merge_exit(self, merge):
9832 pkg = merge.merge.pkg
9833 if merge.returncode != os.EX_OK:
9834 settings = merge.merge.settings
9835 build_dir = settings.get("PORTAGE_BUILDDIR")
9836 build_log = settings.get("PORTAGE_LOG_FILE")
9838 self._failed_pkgs.append(self._failed_pkg(
9839 build_dir=build_dir, build_log=build_log,
9841 returncode=merge.returncode))
9842 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
9844 self._status_display.failed = len(self._failed_pkgs)
9847 self._task_complete(pkg)
9848 pkg_to_replace = merge.merge.pkg_to_replace
9849 if pkg_to_replace is not None:
9850 # When a package is replaced, mark it's uninstall
9851 # task complete (if any).
9853 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
9854 self._task_complete(uninst_hash_key)
9859 self._restart_if_necessary(pkg)
9861 # Call mtimedb.commit() after each merge so that
9862 # --resume still works after being interrupted
9863 # by reboot, sigkill or similar.
9864 mtimedb = self._mtimedb
9865 mtimedb["resume"]["mergelist"].remove(list(pkg))
9866 if not mtimedb["resume"]["mergelist"]:
9867 del mtimedb["resume"]
9870 def _build_exit(self, build):
9871 if build.returncode == os.EX_OK:
9873 merge = PackageMerge(merge=build)
9874 merge.addExitListener(self._merge_exit)
9875 self._task_queues.merge.add(merge)
9876 self._status_display.merges = len(self._task_queues.merge)
9878 settings = build.settings
9879 build_dir = settings.get("PORTAGE_BUILDDIR")
9880 build_log = settings.get("PORTAGE_LOG_FILE")
9882 self._failed_pkgs.append(self._failed_pkg(
9883 build_dir=build_dir, build_log=build_log,
9885 returncode=build.returncode))
9886 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
9888 self._status_display.failed = len(self._failed_pkgs)
9889 self._deallocate_config(build.settings)
9891 self._status_display.running = self._jobs
9894 def _extract_exit(self, build):
9895 self._build_exit(build)
9897 def _task_complete(self, pkg):
9898 self._completed_tasks.add(pkg)
9899 self._choose_pkg_return_early = False
9903 self._add_prefetchers()
9904 self._add_packages()
9905 pkg_queue = self._pkg_queue
9906 failed_pkgs = self._failed_pkgs
9907 portage.locks._quiet = self._background
9908 portage.elog._emerge_elog_listener = self._elog_listener
9914 self._main_loop_cleanup()
9915 portage.locks._quiet = False
9916 portage.elog._emerge_elog_listener = None
9918 rval = failed_pkgs[-1].returncode
9922 def _main_loop_cleanup(self):
9923 del self._pkg_queue[:]
9924 self._completed_tasks.clear()
9925 self._choose_pkg_return_early = False
9926 self._status_display.reset()
9927 self._digraph = None
9928 self._task_queues.fetch.clear()
9930 def _choose_pkg(self):
9932 Choose a task that has all it's dependencies satisfied.
9935 if self._choose_pkg_return_early:
9938 if self._digraph is None:
9939 if (self._jobs or self._task_queues.merge) and \
9940 not ("--nodeps" in self.myopts and \
9941 (self._max_jobs is True or self._max_jobs > 1)):
9942 self._choose_pkg_return_early = True
9944 return self._pkg_queue.pop(0)
9946 if not (self._jobs or self._task_queues.merge):
9947 return self._pkg_queue.pop(0)
9949 self._prune_digraph()
9952 later = set(self._pkg_queue)
9953 for pkg in self._pkg_queue:
9955 if not self._dependent_on_scheduled_merges(pkg, later):
9959 if chosen_pkg is not None:
9960 self._pkg_queue.remove(chosen_pkg)
9962 if chosen_pkg is None:
9963 # There's no point in searching for a package to
9964 # choose until at least one of the existing jobs
9966 self._choose_pkg_return_early = True
9970 def _dependent_on_scheduled_merges(self, pkg, later):
9972 Traverse the subgraph of the given packages deep dependencies
9973 to see if it contains any scheduled merges.
9974 @param pkg: a package to check dependencies for
9976 @param later: packages for which dependence should be ignored
9977 since they will be merged later than pkg anyway and therefore
9978 delaying the merge of pkg will not result in a more optimal
9982 @returns: True if the package is dependent, False otherwise.
9985 graph = self._digraph
9986 completed_tasks = self._completed_tasks
9989 traversed_nodes = set([pkg])
9990 direct_deps = graph.child_nodes(pkg)
9991 node_stack = direct_deps
9992 direct_deps = frozenset(direct_deps)
9994 node = node_stack.pop()
9995 if node in traversed_nodes:
9997 traversed_nodes.add(node)
9998 if not ((node.installed and node.operation == "nomerge") or \
9999 (node.operation == "uninstall" and \
10000 node not in direct_deps) or \
10001 node in completed_tasks or \
10005 node_stack.extend(graph.child_nodes(node))
10009 def _allocate_config(self, root):
10011 Allocate a unique config instance for a task in order
10012 to prevent interference between parallel tasks.
10014 if self._config_pool[root]:
10015 temp_settings = self._config_pool[root].pop()
10017 temp_settings = portage.config(clone=self.pkgsettings[root])
10018 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10019 # performance reasons, call it here to make sure all settings from the
10020 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10021 temp_settings.reload()
10022 temp_settings.reset()
10023 return temp_settings
10025 def _deallocate_config(self, settings):
10026 self._config_pool[settings["ROOT"]].append(settings)
10028 def _main_loop(self):
10030 # Only allow 1 job max if a restart is scheduled
10031 # due to portage update.
10032 if self._is_restart_scheduled() or \
10033 self._opts_no_background.intersection(self.myopts):
10034 self._set_max_jobs(1)
10036 merge_queue = self._task_queues.merge
10038 while self._schedule():
10039 if self._poll_event_handlers:
10044 if not (self._jobs or merge_queue):
10046 if self._poll_event_handlers:
10049 def _keep_scheduling(self):
10050 return bool(self._pkg_queue and \
10051 not (self._failed_pkgs and not self._build_opts.fetchonly))
10053 def _schedule_tasks(self):
10054 self._schedule_tasks_imp()
10055 self._status_display.display()
10058 for q in self._task_queues.values():
10062 # Cancel prefetchers if they're the only reason
10063 # the main poll loop is still running.
10064 if self._failed_pkgs and not self._build_opts.fetchonly and \
10065 not (self._jobs or self._task_queues.merge) and \
10066 self._task_queues.fetch:
10067 self._task_queues.fetch.clear()
10071 self._schedule_tasks_imp()
10072 self._status_display.display()
10074 return self._keep_scheduling()
10076 def _job_delay(self):
10079 @returns: True if job scheduling should be delayed, False otherwise.
10082 if self._jobs and self._max_load is not None:
10084 current_time = time.time()
10086 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10087 if delay > self._job_delay_max:
10088 delay = self._job_delay_max
10089 if (current_time - self._previous_job_start_time) < delay:
10094 def _schedule_tasks_imp(self):
10097 @returns: True if state changed, False otherwise.
10104 if not self._keep_scheduling():
10105 return bool(state_change)
10107 if self._choose_pkg_return_early or \
10108 not self._can_add_job() or \
10110 return bool(state_change)
10112 pkg = self._choose_pkg()
10114 return bool(state_change)
10118 if not pkg.installed:
10119 self._pkg_count.curval += 1
10121 task = self._task(pkg)
10124 merge = PackageMerge(merge=task)
10125 merge.addExitListener(self._merge_exit)
10126 self._task_queues.merge.add(merge)
10130 self._previous_job_start_time = time.time()
10131 self._status_display.running = self._jobs
10132 task.addExitListener(self._extract_exit)
10133 self._task_queues.jobs.add(task)
10137 self._previous_job_start_time = time.time()
10138 self._status_display.running = self._jobs
10139 task.addExitListener(self._build_exit)
10140 self._task_queues.jobs.add(task)
10142 return bool(state_change)
10144 def _task(self, pkg):
10146 pkg_to_replace = None
10147 if pkg.operation != "uninstall":
10148 vardb = pkg.root_config.trees["vartree"].dbapi
10149 previous_cpv = vardb.match(pkg.slot_atom)
10151 previous_cpv = previous_cpv.pop()
10152 pkg_to_replace = self._pkg(previous_cpv,
10153 "installed", pkg.root_config, installed=True)
10155 task = MergeListItem(args_set=self._args_set,
10156 background=self._background, binpkg_opts=self._binpkg_opts,
10157 build_opts=self._build_opts,
10158 config_pool=self._ConfigPool(pkg.root,
10159 self._allocate_config, self._deallocate_config),
10160 emerge_opts=self.myopts,
10161 find_blockers=self._find_blockers(pkg), logger=self._logger,
10162 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10163 pkg_to_replace=pkg_to_replace,
10164 prefetcher=self._prefetchers.get(pkg),
10165 scheduler=self._sched_iface,
10166 settings=self._allocate_config(pkg.root),
10167 statusMessage=self._status_msg,
10168 world_atom=self._world_atom)
10172 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10173 pkg = failed_pkg.pkg
10174 msg = "%s to %s %s" % \
10175 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10176 if pkg.root != "/":
10177 msg += " %s %s" % (preposition, pkg.root)
10179 log_path = self._locate_failure_log(failed_pkg)
10180 if log_path is not None:
10181 msg += ", Log file:"
10182 self._status_msg(msg)
10184 if log_path is not None:
10185 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10187 def _status_msg(self, msg):
10189 Display a brief status message (no newlines) in the status display.
10190 This is called by tasks to provide feedback to the user. This
10191 delegates the resposibility of generating \r and \n control characters,
10192 to guarantee that lines are created or erased when necessary and
10196 @param msg: a brief status message (no newlines allowed)
10198 if not self._background:
10199 writemsg_level("\n")
10200 self._status_display.displayMessage(msg)
10202 def _save_resume_list(self):
10204 Do this before verifying the ebuild Manifests since it might
10205 be possible for the user to use --resume --skipfirst get past
10206 a non-essential package with a broken digest.
10208 mtimedb = self._mtimedb
10209 mtimedb["resume"]["mergelist"] = [list(x) \
10210 for x in self._mergelist \
10211 if isinstance(x, Package) and x.operation == "merge"]
10215 def _calc_resume_list(self):
10217 Use the current resume list to calculate a new one,
10218 dropping any packages with unsatisfied deps.
10220 @returns: True if successful, False otherwise.
10222 print colorize("GOOD", "*** Resuming merge...")
10224 if self._show_list():
10225 if "--tree" in self.myopts:
10226 portage.writemsg_stdout("\n" + \
10227 darkgreen("These are the packages that " + \
10228 "would be merged, in reverse order:\n\n"))
10231 portage.writemsg_stdout("\n" + \
10232 darkgreen("These are the packages that " + \
10233 "would be merged, in order:\n\n"))
10235 show_spinner = "--quiet" not in self.myopts and \
10236 "--nodeps" not in self.myopts
10239 print "Calculating dependencies ",
10241 myparams = create_depgraph_params(self.myopts, None)
10245 success, mydepgraph, dropped_tasks = resume_depgraph(
10246 self.settings, self.trees, self._mtimedb, self.myopts,
10247 myparams, self._spinner, skip_unsatisfied=True)
10248 except depgraph.UnsatisfiedResumeDep, e:
10249 mydepgraph = e.depgraph
10250 dropped_tasks = set()
10253 print "\b\b... done!"
10256 def unsatisfied_resume_dep_msg():
10257 mydepgraph.display_problems()
10258 out = portage.output.EOutput()
10259 out.eerror("One or more packages are either masked or " + \
10260 "have missing dependencies:")
10263 show_parents = set()
10264 for dep in e.value:
10265 if dep.parent in show_parents:
10267 show_parents.add(dep.parent)
10268 if dep.atom is None:
10269 out.eerror(indent + "Masked package:")
10270 out.eerror(2 * indent + str(dep.parent))
10273 out.eerror(indent + str(dep.atom) + " pulled in by:")
10274 out.eerror(2 * indent + str(dep.parent))
10276 msg = "The resume list contains packages " + \
10277 "that are either masked or have " + \
10278 "unsatisfied dependencies. " + \
10279 "Please restart/continue " + \
10280 "the operation manually, or use --skipfirst " + \
10281 "to skip the first package in the list and " + \
10282 "any other packages that may be " + \
10283 "masked or have missing dependencies."
10284 for line in textwrap.wrap(msg, 72):
10286 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10289 if success and self._show_list():
10290 mylist = mydepgraph.altlist()
10292 if "--tree" in self.myopts:
10294 mydepgraph.display(mylist, favorites=self._favorites)
10297 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10299 mydepgraph.display_problems()
10301 mylist = mydepgraph.altlist()
10302 mydepgraph.break_refs(mylist)
10303 self._mergelist = mylist
10304 self._set_digraph(mydepgraph.schedulerGraph())
10307 for task in dropped_tasks:
10308 if not (isinstance(task, Package) and task.operation == "merge"):
10311 msg = "emerge --keep-going:" + \
10313 if pkg.root != "/":
10314 msg += " for %s" % (pkg.root,)
10315 msg += " dropped due to unsatisfied dependency."
10316 for line in textwrap.wrap(msg, msg_width):
10317 eerror(line, phase="other", key=pkg.cpv)
10318 settings = self.pkgsettings[pkg.root]
10319 # Ensure that log collection from $T is disabled inside
10320 # elog_process(), since any logs that might exist are
10322 settings.pop("T", None)
10323 portage.elog.elog_process(pkg.cpv, settings)
10327 def _show_list(self):
10328 myopts = self.myopts
10329 if "--quiet" not in myopts and \
10330 ("--ask" in myopts or "--tree" in myopts or \
10331 "--verbose" in myopts):
10335 def _world_atom(self, pkg):
10337 Add the package to the world file, but only if
10338 it's supposed to be added. Otherwise, do nothing.
10341 if set(("--buildpkgonly", "--fetchonly",
10343 "--oneshot", "--onlydeps",
10344 "--pretend")).intersection(self.myopts):
10347 if pkg.root != self.target_root:
10350 args_set = self._args_set
10351 if not args_set.findAtomForPackage(pkg):
10354 logger = self._logger
10355 pkg_count = self._pkg_count
10356 root_config = pkg.root_config
10357 world_set = root_config.sets["world"]
10358 world_locked = False
10359 if hasattr(world_set, "lock"):
10361 world_locked = True
10364 if hasattr(world_set, "load"):
10365 world_set.load() # maybe it's changed on disk
10367 atom = create_world_atom(pkg, args_set, root_config)
10369 if hasattr(world_set, "add"):
10370 self._status_msg(('Recording %s in "world" ' + \
10371 'favorites file...') % atom)
10372 logger.log(" === (%s of %s) Updating world file (%s)" % \
10373 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10374 world_set.add(atom)
10376 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10377 (atom,), level=logging.WARN, noiselevel=-1)
10382 def _pkg(self, cpv, type_name, root_config, installed=False):
10384 Get a package instance from the cache, or create a new
10385 one if necessary. Raises KeyError from aux_get if it
10386 failures for some reason (package does not exist or is
10389 operation = "merge"
10391 operation = "nomerge"
10393 if self._digraph is not None:
10394 # Reuse existing instance when available.
10395 pkg = self._digraph.get(
10396 (type_name, root_config.root, cpv, operation))
10397 if pkg is not None:
10400 tree_type = depgraph.pkg_tree_map[type_name]
10401 db = root_config.trees[tree_type].dbapi
10402 db_keys = list(self.trees[root_config.root][
10403 tree_type].dbapi._aux_cache_keys)
10404 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10405 pkg = Package(cpv=cpv, metadata=metadata,
10406 root_config=root_config, installed=installed)
10407 if type_name == "ebuild":
10408 settings = self.pkgsettings[root_config.root]
10409 settings.setcpv(pkg)
10410 pkg.metadata["USE"] = settings["PORTAGE_USE"]
10414 class MetadataRegen(PollScheduler):
10416 def __init__(self, portdb, max_jobs=None, max_load=None):
10417 PollScheduler.__init__(self)
10418 self._portdb = portdb
10420 if max_jobs is None:
10423 self._max_jobs = max_jobs
10424 self._max_load = max_load
10425 self._sched_iface = self._sched_iface_class(
10426 register=self._register,
10427 schedule=self._schedule_wait,
10428 unregister=self._unregister)
10430 self._valid_pkgs = set()
10431 self._process_iter = self._iter_metadata_processes()
10433 def _iter_metadata_processes(self):
10434 portdb = self._portdb
10435 valid_pkgs = self._valid_pkgs
10436 every_cp = portdb.cp_all()
10437 every_cp.sort(reverse=True)
10440 cp = every_cp.pop()
10441 portage.writemsg_stdout("Processing %s\n" % cp)
10442 cpv_list = portdb.cp_list(cp)
10443 for cpv in cpv_list:
10444 valid_pkgs.add(cpv)
10445 ebuild_path, repo_path = portdb.findname2(cpv)
10446 metadata_process = portdb._metadata_process(
10447 cpv, ebuild_path, repo_path)
10448 if metadata_process is None:
10450 yield metadata_process
10454 portdb = self._portdb
10455 from portage.cache.cache_errors import CacheError
10458 for mytree in portdb.porttrees:
10460 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10461 except CacheError, e:
10462 portage.writemsg("Error listing cache entries for " + \
10463 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10468 while self._schedule():
10475 for y in self._valid_pkgs:
10476 for mytree in portdb.porttrees:
10477 if portdb.findname2(y, mytree=mytree)[0]:
10478 dead_nodes[mytree].discard(y)
10480 for mytree, nodes in dead_nodes.iteritems():
10481 auxdb = portdb.auxdb[mytree]
10485 except (KeyError, CacheError):
10488 def _schedule_tasks(self):
10491 @returns: True if there may be remaining tasks to schedule,
10494 while self._can_add_job():
10496 metadata_process = self._process_iter.next()
10497 except StopIteration:
10501 metadata_process.scheduler = self._sched_iface
10502 metadata_process.addExitListener(self._metadata_exit)
10503 metadata_process.start()
10506 def _metadata_exit(self, metadata_process):
10508 if metadata_process.returncode != os.EX_OK:
10509 self._valid_pkgs.discard(metadata_process.cpv)
10510 portage.writemsg("Error processing %s, continuing...\n" % \
10511 (metadata_process.cpv,))
10514 class UninstallFailure(portage.exception.PortageException):
10516 An instance of this class is raised by unmerge() when
10517 an uninstallation fails.
10520 def __init__(self, *pargs):
10521 portage.exception.PortageException.__init__(self, pargs)
10523 self.status = pargs[0]
10525 def unmerge(root_config, myopts, unmerge_action,
10526 unmerge_files, ldpath_mtimes, autoclean=0,
10527 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10528 scheduler=None, writemsg_level=portage.util.writemsg_level):
10530 quiet = "--quiet" in myopts
10531 settings = root_config.settings
10532 sets = root_config.sets
10533 vartree = root_config.trees["vartree"]
10534 candidate_catpkgs=[]
10536 xterm_titles = "notitles" not in settings.features
10537 out = portage.output.EOutput()
10539 db_keys = list(vartree.dbapi._aux_cache_keys)
10542 pkg = pkg_cache.get(cpv)
10544 pkg = Package(cpv=cpv, installed=True,
10545 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10546 root_config=root_config,
10547 type_name="installed")
10548 pkg_cache[cpv] = pkg
10551 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10553 # At least the parent needs to exist for the lock file.
10554 portage.util.ensure_dirs(vdb_path)
10555 except portage.exception.PortageException:
10559 if os.access(vdb_path, os.W_OK):
10560 vdb_lock = portage.locks.lockdir(vdb_path)
10561 realsyslist = sets["system"].getAtoms()
10563 for x in realsyslist:
10564 mycp = portage.dep_getkey(x)
10565 if mycp in settings.getvirtuals():
10567 for provider in settings.getvirtuals()[mycp]:
10568 if vartree.dbapi.match(provider):
10569 providers.append(provider)
10570 if len(providers) == 1:
10571 syslist.extend(providers)
10573 syslist.append(mycp)
10575 mysettings = portage.config(clone=settings)
10577 if not unmerge_files:
10578 if unmerge_action == "unmerge":
10580 print bold("emerge unmerge") + " can only be used with specific package names"
10586 localtree = vartree
10587 # process all arguments and add all
10588 # valid db entries to candidate_catpkgs
10590 if not unmerge_files:
10591 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10593 #we've got command-line arguments
10594 if not unmerge_files:
10595 print "\nNo packages to unmerge have been provided.\n"
10597 for x in unmerge_files:
10598 arg_parts = x.split('/')
10599 if x[0] not in [".","/"] and \
10600 arg_parts[-1][-7:] != ".ebuild":
10601 #possible cat/pkg or dep; treat as such
10602 candidate_catpkgs.append(x)
10603 elif unmerge_action in ["prune","clean"]:
10604 print "\n!!! Prune and clean do not accept individual" + \
10605 " ebuilds as arguments;\n skipping.\n"
10608 # it appears that the user is specifying an installed
10609 # ebuild and we're in "unmerge" mode, so it's ok.
10610 if not os.path.exists(x):
10611 print "\n!!! The path '"+x+"' doesn't exist.\n"
10614 absx = os.path.abspath(x)
10615 sp_absx = absx.split("/")
10616 if sp_absx[-1][-7:] == ".ebuild":
10618 absx = "/".join(sp_absx)
10620 sp_absx_len = len(sp_absx)
10622 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10623 vdb_len = len(vdb_path)
10625 sp_vdb = vdb_path.split("/")
10626 sp_vdb_len = len(sp_vdb)
10628 if not os.path.exists(absx+"/CONTENTS"):
10629 print "!!! Not a valid db dir: "+str(absx)
10632 if sp_absx_len <= sp_vdb_len:
10633 # The Path is shorter... so it can't be inside the vdb.
10636 print "\n!!!",x,"cannot be inside "+ \
10637 vdb_path+"; aborting.\n"
10640 for idx in range(0,sp_vdb_len):
10641 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
10644 print "\n!!!", x, "is not inside "+\
10645 vdb_path+"; aborting.\n"
10648 print "="+"/".join(sp_absx[sp_vdb_len:])
10649 candidate_catpkgs.append(
10650 "="+"/".join(sp_absx[sp_vdb_len:]))
10653 if (not "--quiet" in myopts):
10655 if settings["ROOT"] != "/":
10656 writemsg_level(darkgreen(newline+ \
10657 ">>> Using system located in ROOT tree %s\n" % \
10660 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
10661 not ("--quiet" in myopts):
10662 writemsg_level(darkgreen(newline+\
10663 ">>> These are the packages that would be unmerged:\n"))
10665 # Preservation of order is required for --depclean and --prune so
10666 # that dependencies are respected. Use all_selected to eliminate
10667 # duplicate packages since the same package may be selected by
10670 all_selected = set()
10671 for x in candidate_catpkgs:
10672 # cycle through all our candidate deps and determine
10673 # what will and will not get unmerged
10675 mymatch = vartree.dbapi.match(x)
10676 except portage.exception.AmbiguousPackageName, errpkgs:
10677 print "\n\n!!! The short ebuild name \"" + \
10678 x + "\" is ambiguous. Please specify"
10679 print "!!! one of the following fully-qualified " + \
10680 "ebuild names instead:\n"
10681 for i in errpkgs[0]:
10682 print " " + green(i)
10686 if not mymatch and x[0] not in "<>=~":
10687 mymatch = localtree.dep_match(x)
10689 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
10690 (x, unmerge_action), noiselevel=-1)
10694 {"protected": set(), "selected": set(), "omitted": set()})
10695 mykey = len(pkgmap) - 1
10696 if unmerge_action=="unmerge":
10698 if y not in all_selected:
10699 pkgmap[mykey]["selected"].add(y)
10700 all_selected.add(y)
10701 elif unmerge_action == "prune":
10702 if len(mymatch) == 1:
10704 best_version = mymatch[0]
10705 best_slot = vartree.getslot(best_version)
10706 best_counter = vartree.dbapi.cpv_counter(best_version)
10707 for mypkg in mymatch[1:]:
10708 myslot = vartree.getslot(mypkg)
10709 mycounter = vartree.dbapi.cpv_counter(mypkg)
10710 if (myslot == best_slot and mycounter > best_counter) or \
10711 mypkg == portage.best([mypkg, best_version]):
10712 if myslot == best_slot:
10713 if mycounter < best_counter:
10714 # On slot collision, keep the one with the
10715 # highest counter since it is the most
10716 # recently installed.
10718 best_version = mypkg
10720 best_counter = mycounter
10721 pkgmap[mykey]["protected"].add(best_version)
10722 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
10723 if mypkg != best_version and mypkg not in all_selected)
10724 all_selected.update(pkgmap[mykey]["selected"])
10726 # unmerge_action == "clean"
10728 for mypkg in mymatch:
10729 if unmerge_action == "clean":
10730 myslot = localtree.getslot(mypkg)
10732 # since we're pruning, we don't care about slots
10733 # and put all the pkgs in together
10735 if myslot not in slotmap:
10736 slotmap[myslot] = {}
10737 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
10739 for myslot in slotmap:
10740 counterkeys = slotmap[myslot].keys()
10741 if not counterkeys:
10744 pkgmap[mykey]["protected"].add(
10745 slotmap[myslot][counterkeys[-1]])
10746 del counterkeys[-1]
10747 #be pretty and get them in order of merge:
10748 for ckey in counterkeys:
10749 mypkg = slotmap[myslot][ckey]
10750 if mypkg not in all_selected:
10751 pkgmap[mykey]["selected"].add(mypkg)
10752 all_selected.add(mypkg)
10753 # ok, now the last-merged package
10754 # is protected, and the rest are selected
10755 numselected = len(all_selected)
10756 if global_unmerge and not numselected:
10757 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
10760 if not numselected:
10761 portage.writemsg_stdout(
10762 "\n>>> No packages selected for removal by " + \
10763 unmerge_action + "\n")
10767 vartree.dbapi.flush_cache()
10768 portage.locks.unlockdir(vdb_lock)
10770 for cp in xrange(len(pkgmap)):
10771 for cpv in pkgmap[cp]["selected"].copy():
10775 # It could have been uninstalled
10776 # by a concurrent process.
10779 if unmerge_action != "clean" and \
10780 root_config.root == "/" and \
10781 portage.match_from_list(
10782 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10783 msg = ("Not unmerging package %s since there is no valid " + \
10784 "reason for portage to unmerge itself.") % (pkg.cpv,)
10785 for line in textwrap.wrap(msg, 75):
10787 # adjust pkgmap so the display output is correct
10788 pkgmap[cp]["selected"].remove(cpv)
10789 all_selected.remove(cpv)
10790 pkgmap[cp]["protected"].add(cpv)
10793 numselected = len(all_selected)
10794 if not numselected:
10796 "\n>>> No packages selected for removal by " + \
10797 unmerge_action + "\n")
10800 # Unmerge order only matters in some cases
10804 selected = d["selected"]
10807 cp = portage.cpv_getkey(iter(selected).next())
10808 cp_dict = unordered.get(cp)
10809 if cp_dict is None:
10811 unordered[cp] = cp_dict
10814 for k, v in d.iteritems():
10815 cp_dict[k].update(v)
10816 pkgmap = [unordered[cp] for cp in sorted(unordered)]
10818 for x in xrange(len(pkgmap)):
10819 selected = pkgmap[x]["selected"]
10822 for mytype, mylist in pkgmap[x].iteritems():
10823 if mytype == "selected":
10825 mylist.difference_update(all_selected)
10826 cp = portage.cpv_getkey(iter(selected).next())
10827 for y in localtree.dep_match(cp):
10828 if y not in pkgmap[x]["omitted"] and \
10829 y not in pkgmap[x]["selected"] and \
10830 y not in pkgmap[x]["protected"] and \
10831 y not in all_selected:
10832 pkgmap[x]["omitted"].add(y)
10833 if global_unmerge and not pkgmap[x]["selected"]:
10834 #avoid cluttering the preview printout with stuff that isn't getting unmerged
10836 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
10837 writemsg_level(colorize("BAD","\a\n\n!!! " + \
10838 "'%s' is part of your system profile.\n" % cp),
10839 level=logging.WARNING, noiselevel=-1)
10840 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
10841 "be damaging to your system.\n\n"),
10842 level=logging.WARNING, noiselevel=-1)
10843 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
10844 countdown(int(settings["EMERGE_WARNING_DELAY"]),
10845 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
10847 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
10849 writemsg_level(bold(cp) + ": ", noiselevel=-1)
10850 for mytype in ["selected","protected","omitted"]:
10852 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
10853 if pkgmap[x][mytype]:
10854 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
10855 sorted_pkgs.sort(portage.pkgcmp)
10856 for pn, ver, rev in sorted_pkgs:
10860 myversion = ver + "-" + rev
10861 if mytype == "selected":
10863 colorize("UNMERGE_WARN", myversion + " "),
10867 colorize("GOOD", myversion + " "), noiselevel=-1)
10869 writemsg_level("none ", noiselevel=-1)
10871 writemsg_level("\n", noiselevel=-1)
10873 writemsg_level("\n", noiselevel=-1)
10875 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
10876 " packages are slated for removal.\n")
10877 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
10878 " and " + colorize("GOOD", "'omitted'") + \
10879 " packages will not be removed.\n\n")
10881 if "--pretend" in myopts:
10882 #we're done... return
10884 if "--ask" in myopts:
10885 if userquery("Would you like to unmerge these packages?")=="No":
10886 # enter pretend mode for correct formatting of results
10887 myopts["--pretend"] = True
10892 #the real unmerging begins, after a short delay....
10893 if clean_delay and not autoclean:
10894 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
10896 for x in xrange(len(pkgmap)):
10897 for y in pkgmap[x]["selected"]:
10898 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
10899 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
10900 mysplit = y.split("/")
10902 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
10903 mysettings, unmerge_action not in ["clean","prune"],
10904 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
10905 scheduler=scheduler)
10907 if retval != os.EX_OK:
10908 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
10910 raise UninstallFailure(retval)
10913 if clean_world and hasattr(sets["world"], "cleanPackage"):
10914 sets["world"].cleanPackage(vartree.dbapi, y)
10915 emergelog(xterm_titles, " >>> unmerge success: "+y)
10916 if clean_world and hasattr(sets["world"], "remove"):
10917 for s in root_config.setconfig.active:
10918 sets["world"].remove(SETPREFIX+s)
10921 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
10923 if os.path.exists("/usr/bin/install-info"):
10924 out = portage.output.EOutput()
10929 inforoot=normpath(root+z)
10930 if os.path.isdir(inforoot):
10931 infomtime = long(os.stat(inforoot).st_mtime)
10932 if inforoot not in prev_mtimes or \
10933 prev_mtimes[inforoot] != infomtime:
10934 regen_infodirs.append(inforoot)
10936 if not regen_infodirs:
10937 portage.writemsg_stdout("\n")
10938 out.einfo("GNU info directory index is up-to-date.")
10940 portage.writemsg_stdout("\n")
10941 out.einfo("Regenerating GNU info directory index...")
10943 dir_extensions = ("", ".gz", ".bz2")
10947 for inforoot in regen_infodirs:
10951 if not os.path.isdir(inforoot) or \
10952 not os.access(inforoot, os.W_OK):
10955 file_list = os.listdir(inforoot)
10957 dir_file = os.path.join(inforoot, "dir")
10958 moved_old_dir = False
10959 processed_count = 0
10960 for x in file_list:
10961 if x.startswith(".") or \
10962 os.path.isdir(os.path.join(inforoot, x)):
10964 if x.startswith("dir"):
10966 for ext in dir_extensions:
10967 if x == "dir" + ext or \
10968 x == "dir" + ext + ".old":
10973 if processed_count == 0:
10974 for ext in dir_extensions:
10976 os.rename(dir_file + ext, dir_file + ext + ".old")
10977 moved_old_dir = True
10978 except EnvironmentError, e:
10979 if e.errno != errno.ENOENT:
10982 processed_count += 1
10983 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
10984 existsstr="already exists, for file `"
10986 if re.search(existsstr,myso):
10987 # Already exists... Don't increment the count for this.
10989 elif myso[:44]=="install-info: warning: no info dir entry in ":
10990 # This info file doesn't contain a DIR-header: install-info produces this
10991 # (harmless) warning (the --quiet switch doesn't seem to work).
10992 # Don't increment the count for this.
10995 badcount=badcount+1
10996 errmsg += myso + "\n"
10999 if moved_old_dir and not os.path.exists(dir_file):
11000 # We didn't generate a new dir file, so put the old file
11001 # back where it was originally found.
11002 for ext in dir_extensions:
11004 os.rename(dir_file + ext + ".old", dir_file + ext)
11005 except EnvironmentError, e:
11006 if e.errno != errno.ENOENT:
11010 # Clean dir.old cruft so that they don't prevent
11011 # unmerge of otherwise empty directories.
11012 for ext in dir_extensions:
11014 os.unlink(dir_file + ext + ".old")
11015 except EnvironmentError, e:
11016 if e.errno != errno.ENOENT:
11020 #update mtime so we can potentially avoid regenerating.
11021 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11024 out.eerror("Processed %d info files; %d errors." % \
11025 (icount, badcount))
11026 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11029 out.einfo("Processed %d info files." % (icount,))
11032 def display_news_notification(root_config, myopts):
11033 target_root = root_config.root
11034 trees = root_config.trees
11035 settings = trees["vartree"].settings
11036 portdb = trees["porttree"].dbapi
11037 vardb = trees["vartree"].dbapi
11038 NEWS_PATH = os.path.join("metadata", "news")
11039 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11040 newsReaderDisplay = False
11041 update = "--pretend" not in myopts
11043 for repo in portdb.getRepositories():
11044 unreadItems = checkUpdatedNewsItems(
11045 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11047 if not newsReaderDisplay:
11048 newsReaderDisplay = True
11050 print colorize("WARN", " * IMPORTANT:"),
11051 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11054 if newsReaderDisplay:
11055 print colorize("WARN", " *"),
11056 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11059 def _flush_elog_mod_echo():
11061 Dump the mod_echo output now so that our other
11062 notifications are shown last.
11064 @returns: True if messages were shown, False otherwise.
11066 messages_shown = False
11068 from portage.elog import mod_echo
11069 except ImportError:
11070 pass # happens during downgrade to a version without the module
11072 messages_shown = bool(mod_echo._items)
11073 mod_echo.finalize()
11074 return messages_shown
11076 def post_emerge(root_config, myopts, mtimedb, retval):
11078 Misc. things to run at the end of a merge session.
11081 Update Config Files
11084 Display preserved libs warnings
11087 @param trees: A dictionary mapping each ROOT to it's package databases
11089 @param mtimedb: The mtimeDB to store data needed across merge invocations
11090 @type mtimedb: MtimeDB class instance
11091 @param retval: Emerge's return value
11095 1. Calls sys.exit(retval)
11098 target_root = root_config.root
11099 trees = { target_root : root_config.trees }
11100 vardbapi = trees[target_root]["vartree"].dbapi
11101 settings = vardbapi.settings
11102 info_mtimes = mtimedb["info"]
11104 # Load the most current variables from ${ROOT}/etc/profile.env
11107 settings.regenerate()
11110 config_protect = settings.get("CONFIG_PROTECT","").split()
11111 infodirs = settings.get("INFOPATH","").split(":") + \
11112 settings.get("INFODIR","").split(":")
11116 if retval == os.EX_OK:
11117 exit_msg = " *** exiting successfully."
11119 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11120 emergelog("notitles" not in settings.features, exit_msg)
11122 _flush_elog_mod_echo()
11124 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11125 if counter_hash is not None and \
11126 counter_hash == vardbapi._counter_hash():
11127 # If vdb state has not changed then there's nothing else to do.
11130 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11131 portage.util.ensure_dirs(vdb_path)
11133 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11134 vdb_lock = portage.locks.lockdir(vdb_path)
11138 if "noinfo" not in settings.features:
11139 chk_updated_info_files(target_root,
11140 infodirs, info_mtimes, retval)
11144 portage.locks.unlockdir(vdb_lock)
11146 chk_updated_cfg_files(target_root, config_protect)
11148 display_news_notification(root_config, myopts)
11153 def chk_updated_cfg_files(target_root, config_protect):
11155 #number of directories with some protect files in them
11157 for x in config_protect:
11158 x = os.path.join(target_root, x.lstrip(os.path.sep))
11159 if not os.access(x, os.W_OK):
11160 # Avoid Permission denied errors generated
11164 mymode = os.lstat(x).st_mode
11167 if stat.S_ISLNK(mymode):
11168 # We want to treat it like a directory if it
11169 # is a symlink to an existing directory.
11171 real_mode = os.stat(x).st_mode
11172 if stat.S_ISDIR(real_mode):
11176 if stat.S_ISDIR(mymode):
11177 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11179 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11180 os.path.split(x.rstrip(os.path.sep))
11181 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11182 a = commands.getstatusoutput(mycommand)
11184 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11186 # Show the error message alone, sending stdout to /dev/null.
11187 os.system(mycommand + " 1>/dev/null")
11189 files = a[1].split('\0')
11190 # split always produces an empty string as the last element
11191 if files and not files[-1]:
11195 print "\n"+colorize("WARN", " * IMPORTANT:"),
11196 if stat.S_ISDIR(mymode):
11197 print "%d config files in '%s' need updating." % \
11200 print "config file '%s' needs updating." % x
11203 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11204 " section of the " + bold("emerge")
11205 print " "+yellow("*")+" man page to learn how to update config files."
11207 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11210 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11211 Returns the number of unread (yet relevent) items.
11213 @param portdb: a portage tree database
11214 @type portdb: pordbapi
11215 @param vardb: an installed package database
11216 @type vardb: vardbapi
11219 @param UNREAD_PATH:
11225 1. The number of unread but relevant news items.
11228 from portage.news import NewsManager
11229 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11230 return manager.getUnreadItems( repo_id, update=update )
11232 def insert_category_into_atom(atom, category):
11233 alphanum = re.search(r'\w', atom)
11235 ret = atom[:alphanum.start()] + "%s/" % category + \
11236 atom[alphanum.start():]
11241 def is_valid_package_atom(x):
11243 alphanum = re.search(r'\w', x)
11245 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11246 return portage.isvalidatom(x)
11248 def show_blocker_docs_link():
11250 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11251 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11253 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11256 def show_mask_docs():
11257 print "For more information, see the MASKED PACKAGES section in the emerge"
11258 print "man page or refer to the Gentoo Handbook."
11260 def action_sync(settings, trees, mtimedb, myopts, myaction):
11261 xterm_titles = "notitles" not in settings.features
11262 emergelog(xterm_titles, " === sync")
11263 myportdir = settings.get("PORTDIR", None)
11264 out = portage.output.EOutput()
11266 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
11268 if myportdir[-1]=="/":
11269 myportdir=myportdir[:-1]
11270 if not os.path.exists(myportdir):
11271 print ">>>",myportdir,"not found, creating it."
11272 os.makedirs(myportdir,0755)
11273 syncuri = settings.get("SYNC", "").strip()
11275 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11276 noiselevel=-1, level=logging.ERROR)
11280 updatecache_flg = False
11281 if myaction == "metadata":
11282 print "skipping sync"
11283 updatecache_flg = True
11284 elif syncuri[:8]=="rsync://":
11285 if not os.path.exists("/usr/bin/rsync"):
11286 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11287 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11292 import shlex, StringIO
11293 if settings["PORTAGE_RSYNC_OPTS"] == "":
11294 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11295 rsync_opts.extend([
11296 "--recursive", # Recurse directories
11297 "--links", # Consider symlinks
11298 "--safe-links", # Ignore links outside of tree
11299 "--perms", # Preserve permissions
11300 "--times", # Preserive mod times
11301 "--compress", # Compress the data transmitted
11302 "--force", # Force deletion on non-empty dirs
11303 "--whole-file", # Don't do block transfers, only entire files
11304 "--delete", # Delete files that aren't in the master tree
11305 "--stats", # Show final statistics about what was transfered
11306 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11307 "--exclude=/distfiles", # Exclude distfiles from consideration
11308 "--exclude=/local", # Exclude local from consideration
11309 "--exclude=/packages", # Exclude packages from consideration
11313 # The below validation is not needed when using the above hardcoded
11316 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11317 lexer = shlex.shlex(StringIO.StringIO(
11318 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11319 lexer.whitespace_split = True
11320 rsync_opts.extend(lexer)
11323 for opt in ("--recursive", "--times"):
11324 if opt not in rsync_opts:
11325 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11326 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11327 rsync_opts.append(opt)
11329 for exclude in ("distfiles", "local", "packages"):
11330 opt = "--exclude=/%s" % exclude
11331 if opt not in rsync_opts:
11332 portage.writemsg(yellow("WARNING:") + \
11333 " adding required option %s not included in " % opt + \
11334 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11335 rsync_opts.append(opt)
11337 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11338 def rsync_opt_startswith(opt_prefix):
11339 for x in rsync_opts:
11340 if x.startswith(opt_prefix):
11344 if not rsync_opt_startswith("--timeout="):
11345 rsync_opts.append("--timeout=%d" % mytimeout)
11347 for opt in ("--compress", "--whole-file"):
11348 if opt not in rsync_opts:
11349 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11350 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11351 rsync_opts.append(opt)
11353 if "--quiet" in myopts:
11354 rsync_opts.append("--quiet") # Shut up a lot
11356 rsync_opts.append("--verbose") # Print filelist
11358 if "--verbose" in myopts:
11359 rsync_opts.append("--progress") # Progress meter for each file
11361 if "--debug" in myopts:
11362 rsync_opts.append("--checksum") # Force checksum on all files
11364 # Real local timestamp file.
11365 servertimestampfile = os.path.join(
11366 myportdir, "metadata", "timestamp.chk")
11368 content = portage.util.grabfile(servertimestampfile)
11372 mytimestamp = time.mktime(time.strptime(content[0],
11373 "%a, %d %b %Y %H:%M:%S +0000"))
11374 except (OverflowError, ValueError):
11379 rsync_initial_timeout = \
11380 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11382 rsync_initial_timeout = 15
11385 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11386 except SystemExit, e:
11387 raise # Needed else can't exit
11389 maxretries=3 #default number of retries
11392 user_name, hostname, port = re.split(
11393 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11396 if user_name is None:
11398 updatecache_flg=True
11399 all_rsync_opts = set(rsync_opts)
11400 lexer = shlex.shlex(StringIO.StringIO(
11401 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
11402 lexer.whitespace_split = True
11403 extra_rsync_opts = list(lexer)
11405 all_rsync_opts.update(extra_rsync_opts)
11406 family = socket.AF_INET
11407 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
11408 family = socket.AF_INET
11409 elif socket.has_ipv6 and \
11410 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
11411 family = socket.AF_INET6
11413 SERVER_OUT_OF_DATE = -1
11414 EXCEEDED_MAX_RETRIES = -2
11420 for addrinfo in socket.getaddrinfo(
11421 hostname, None, family, socket.SOCK_STREAM):
11422 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
11423 # IPv6 addresses need to be enclosed in square brackets
11424 ips.append("[%s]" % addrinfo[4][0])
11426 ips.append(addrinfo[4][0])
11427 from random import shuffle
11429 except SystemExit, e:
11430 raise # Needed else can't exit
11431 except Exception, e:
11432 print "Notice:",str(e)
11437 dosyncuri = syncuri.replace(
11438 "//" + user_name + hostname + port + "/",
11439 "//" + user_name + ips[0] + port + "/", 1)
11440 except SystemExit, e:
11441 raise # Needed else can't exit
11442 except Exception, e:
11443 print "Notice:",str(e)
11447 if "--ask" in myopts:
11448 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
11453 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
11454 if "--quiet" not in myopts:
11455 print ">>> Starting rsync with "+dosyncuri+"..."
11457 emergelog(xterm_titles,
11458 ">>> Starting retry %d of %d with %s" % \
11459 (retries,maxretries,dosyncuri))
11460 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
11462 if mytimestamp != 0 and "--quiet" not in myopts:
11463 print ">>> Checking server timestamp ..."
11465 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
11467 if "--debug" in myopts:
11470 exitcode = os.EX_OK
11471 servertimestamp = 0
11472 # Even if there's no timestamp available locally, fetch the
11473 # timestamp anyway as an initial probe to verify that the server is
11474 # responsive. This protects us from hanging indefinitely on a
11475 # connection attempt to an unresponsive server which rsync's
11476 # --timeout option does not prevent.
11478 # Temporary file for remote server timestamp comparison.
11479 from tempfile import mkstemp
11480 fd, tmpservertimestampfile = mkstemp()
11482 mycommand = rsynccommand[:]
11483 mycommand.append(dosyncuri.rstrip("/") + \
11484 "/metadata/timestamp.chk")
11485 mycommand.append(tmpservertimestampfile)
11489 def timeout_handler(signum, frame):
11490 raise portage.exception.PortageException("timed out")
11491 signal.signal(signal.SIGALRM, timeout_handler)
11492 # Timeout here in case the server is unresponsive. The
11493 # --timeout rsync option doesn't apply to the initial
11494 # connection attempt.
11495 if rsync_initial_timeout:
11496 signal.alarm(rsync_initial_timeout)
11498 mypids.extend(portage.process.spawn(
11499 mycommand, env=settings.environ(), returnpid=True))
11500 exitcode = os.waitpid(mypids[0], 0)[1]
11501 content = portage.grabfile(tmpservertimestampfile)
11503 if rsync_initial_timeout:
11506 os.unlink(tmpservertimestampfile)
11509 except portage.exception.PortageException, e:
11513 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
11514 os.kill(mypids[0], signal.SIGTERM)
11515 os.waitpid(mypids[0], 0)
11516 # This is the same code rsync uses for timeout.
11519 if exitcode != os.EX_OK:
11520 if exitcode & 0xff:
11521 exitcode = (exitcode & 0xff) << 8
11523 exitcode = exitcode >> 8
11525 portage.process.spawned_pids.remove(mypids[0])
11528 servertimestamp = time.mktime(time.strptime(
11529 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
11530 except (OverflowError, ValueError):
11532 del mycommand, mypids, content
11533 if exitcode == os.EX_OK:
11534 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
11535 emergelog(xterm_titles,
11536 ">>> Cancelling sync -- Already current.")
11539 print ">>> Timestamps on the server and in the local repository are the same."
11540 print ">>> Cancelling all further sync action. You are already up to date."
11542 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11546 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
11547 emergelog(xterm_titles,
11548 ">>> Server out of date: %s" % dosyncuri)
11551 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
11553 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11556 exitcode = SERVER_OUT_OF_DATE
11557 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
11559 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
11560 exitcode = portage.process.spawn(mycommand,
11561 env=settings.environ())
11562 if exitcode in [0,1,3,4,11,14,20,21]:
11564 elif exitcode in [1,3,4,11,14,20,21]:
11567 # Code 2 indicates protocol incompatibility, which is expected
11568 # for servers with protocol < 29 that don't support
11569 # --prune-empty-directories. Retry for a server that supports
11570 # at least rsync protocol version 29 (>=rsync-2.6.4).
11575 if retries<=maxretries:
11576 print ">>> Retrying..."
11581 updatecache_flg=False
11582 exitcode = EXCEEDED_MAX_RETRIES
11586 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
11587 elif exitcode == SERVER_OUT_OF_DATE:
11589 elif exitcode == EXCEEDED_MAX_RETRIES:
11591 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
11596 msg.append("Rsync has reported that there is a syntax error. Please ensure")
11597 msg.append("that your SYNC statement is proper.")
11598 msg.append("SYNC=" + settings["SYNC"])
11600 msg.append("Rsync has reported that there is a File IO error. Normally")
11601 msg.append("this means your disk is full, but can be caused by corruption")
11602 msg.append("on the filesystem that contains PORTDIR. Please investigate")
11603 msg.append("and try again after the problem has been fixed.")
11604 msg.append("PORTDIR=" + settings["PORTDIR"])
11606 msg.append("Rsync was killed before it finished.")
11608 msg.append("Rsync has not successfully finished. It is recommended that you keep")
11609 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
11610 msg.append("to use rsync due to firewall or other restrictions. This should be a")
11611 msg.append("temporary problem unless complications exist with your network")
11612 msg.append("(and possibly your system's filesystem) configuration.")
11616 elif syncuri[:6]=="cvs://":
11617 if not os.path.exists("/usr/bin/cvs"):
11618 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
11619 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
11621 cvsroot=syncuri[6:]
11622 cvsdir=os.path.dirname(myportdir)
11623 if not os.path.exists(myportdir+"/CVS"):
11625 print ">>> Starting initial cvs checkout with "+syncuri+"..."
11626 if os.path.exists(cvsdir+"/gentoo-x86"):
11627 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
11630 os.rmdir(myportdir)
11632 if e.errno != errno.ENOENT:
11634 "!!! existing '%s' directory; exiting.\n" % myportdir)
11637 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
11638 print "!!! cvs checkout error; exiting."
11640 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
11643 print ">>> Starting cvs update with "+syncuri+"..."
11644 retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
11645 myportdir, settings, free=1)
11646 if retval != os.EX_OK:
11648 dosyncuri = syncuri
11650 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
11651 noiselevel=-1, level=logging.ERROR)
11654 if updatecache_flg and \
11655 myaction != "metadata" and \
11656 "metadata-transfer" not in settings.features:
11657 updatecache_flg = False
11659 # Reload the whole config from scratch.
11660 settings, trees, mtimedb = load_emerge_config(trees=trees)
11661 root_config = trees[settings["ROOT"]]["root_config"]
11662 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11664 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
11665 action_metadata(settings, portdb, myopts)
11667 if portage._global_updates(trees, mtimedb["updates"]):
11669 # Reload the whole config from scratch.
11670 settings, trees, mtimedb = load_emerge_config(trees=trees)
11671 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11672 root_config = trees[settings["ROOT"]]["root_config"]
11674 mybestpv = portdb.xmatch("bestmatch-visible",
11675 portage.const.PORTAGE_PACKAGE_ATOM)
11676 mypvs = portage.best(
11677 trees[settings["ROOT"]]["vartree"].dbapi.match(
11678 portage.const.PORTAGE_PACKAGE_ATOM))
11680 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
11682 if myaction != "metadata":
11683 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
11684 retval = portage.process.spawn(
11685 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
11686 dosyncuri], env=settings.environ())
11687 if retval != os.EX_OK:
11688 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
11690 if(mybestpv != mypvs) and not "--quiet" in myopts:
11692 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
11693 print red(" * ")+"that you update portage now, before any other packages are updated."
11695 print red(" * ")+"To update portage, run 'emerge portage' now."
11698 display_news_notification(root_config, myopts)
11701 def action_metadata(settings, portdb, myopts):
11702 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
11703 old_umask = os.umask(0002)
11704 cachedir = os.path.normpath(settings.depcachedir)
11705 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
11706 "/lib", "/opt", "/proc", "/root", "/sbin",
11707 "/sys", "/tmp", "/usr", "/var"]:
11708 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
11709 "ROOT DIRECTORY ON YOUR SYSTEM."
11710 print >> sys.stderr, \
11711 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
11713 if not os.path.exists(cachedir):
11716 ec = portage.eclass_cache.cache(portdb.porttree_root)
11717 myportdir = os.path.realpath(settings["PORTDIR"])
11718 cm = settings.load_best_module("portdbapi.metadbmodule")(
11719 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
11721 from portage.cache import util
11723 class percentage_noise_maker(util.quiet_mirroring):
11724 def __init__(self, dbapi):
11726 self.cp_all = dbapi.cp_all()
11727 l = len(self.cp_all)
11728 self.call_update_min = 100000000
11729 self.min_cp_all = l/100.0
11733 def __iter__(self):
11734 for x in self.cp_all:
11736 if self.count > self.min_cp_all:
11737 self.call_update_min = 0
11739 for y in self.dbapi.cp_list(x):
11741 self.call_update_mine = 0
11743 def update(self, *arg):
11744 try: self.pstr = int(self.pstr) + 1
11745 except ValueError: self.pstr = 1
11746 sys.stdout.write("%s%i%%" % \
11747 ("\b" * (len(str(self.pstr))+1), self.pstr))
11749 self.call_update_min = 10000000
11751 def finish(self, *arg):
11752 sys.stdout.write("\b\b\b\b100%\n")
11755 if "--quiet" in myopts:
11756 def quicky_cpv_generator(cp_all_list):
11757 for x in cp_all_list:
11758 for y in portdb.cp_list(x):
11760 source = quicky_cpv_generator(portdb.cp_all())
11761 noise_maker = portage.cache.util.quiet_mirroring()
11763 noise_maker = source = percentage_noise_maker(portdb)
11764 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
11765 eclass_cache=ec, verbose_instance=noise_maker)
11768 os.umask(old_umask)
11770 def action_regen(settings, portdb, max_jobs, max_load):
11771 xterm_titles = "notitles" not in settings.features
11772 emergelog(xterm_titles, " === regen")
11773 #regenerate cache entries
11774 portage.writemsg_stdout("Regenerating cache entries...\n")
11776 os.close(sys.stdin.fileno())
11777 except SystemExit, e:
11778 raise # Needed else can't exit
11783 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
11786 portage.writemsg_stdout("done!\n")
11788 def action_config(settings, trees, myopts, myfiles):
11789 if len(myfiles) != 1:
11790 print red("!!! config can only take a single package atom at this time\n")
11792 if not is_valid_package_atom(myfiles[0]):
11793 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
11795 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
11796 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
11800 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
11801 except portage.exception.AmbiguousPackageName, e:
11802 # Multiple matches thrown from cpv_expand
11805 print "No packages found.\n"
11807 elif len(pkgs) > 1:
11808 if "--ask" in myopts:
11810 print "Please select a package to configure:"
11814 options.append(str(idx))
11815 print options[-1]+") "+pkg
11817 options.append("X")
11818 idx = userquery("Selection?", options)
11821 pkg = pkgs[int(idx)-1]
11823 print "The following packages available:"
11826 print "\nPlease use a specific atom or the --ask option."
11832 if "--ask" in myopts:
11833 if userquery("Ready to configure "+pkg+"?") == "No":
11836 print "Configuring pkg..."
11838 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
11839 mysettings = portage.config(clone=settings)
11840 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
11841 debug = mysettings.get("PORTAGE_DEBUG") == "1"
11842 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
11844 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
11845 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
11846 if retval == os.EX_OK:
11847 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
11848 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
11851 def action_info(settings, trees, myopts, myfiles):
11852 print getportageversion(settings["PORTDIR"], settings["ROOT"],
11853 settings.profile_path, settings["CHOST"],
11854 trees[settings["ROOT"]]["vartree"].dbapi)
11856 header_title = "System Settings"
11858 print header_width * "="
11859 print header_title.rjust(int(header_width/2 + len(header_title)/2))
11860 print header_width * "="
11861 print "System uname: "+platform.platform(aliased=1)
11863 lastSync = portage.grabfile(os.path.join(
11864 settings["PORTDIR"], "metadata", "timestamp.chk"))
11865 print "Timestamp of tree:",
11871 output=commands.getstatusoutput("distcc --version")
11873 print str(output[1].split("\n",1)[0]),
11874 if "distcc" in settings.features:
11879 output=commands.getstatusoutput("ccache -V")
11881 print str(output[1].split("\n",1)[0]),
11882 if "ccache" in settings.features:
11887 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
11888 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
11889 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
11890 myvars = portage.util.unique_array(myvars)
11894 if portage.isvalidatom(x):
11895 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
11896 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
11897 pkg_matches.sort(portage.pkgcmp)
11899 for pn, ver, rev in pkg_matches:
11901 pkgs.append(ver + "-" + rev)
11905 pkgs = ", ".join(pkgs)
11906 print "%-20s %s" % (x+":", pkgs)
11908 print "%-20s %s" % (x+":", "[NOT VALID]")
11910 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
11912 if "--verbose" in myopts:
11913 myvars=settings.keys()
11915 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
11916 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
11917 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
11918 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
11920 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
11922 myvars = portage.util.unique_array(myvars)
11928 print '%s="%s"' % (x, settings[x])
11930 use = set(settings["USE"].split())
11931 use_expand = settings["USE_EXPAND"].split()
11933 for varname in use_expand:
11934 flag_prefix = varname.lower() + "_"
11935 for f in list(use):
11936 if f.startswith(flag_prefix):
11940 print 'USE="%s"' % " ".join(use),
11941 for varname in use_expand:
11942 myval = settings.get(varname)
11944 print '%s="%s"' % (varname, myval),
11947 unset_vars.append(x)
11949 print "Unset: "+", ".join(unset_vars)
11952 if "--debug" in myopts:
11953 for x in dir(portage):
11954 module = getattr(portage, x)
11955 if "cvs_id_string" in dir(module):
11956 print "%s: %s" % (str(x), str(module.cvs_id_string))
11958 # See if we can find any packages installed matching the strings
11959 # passed on the command line
11961 vardb = trees[settings["ROOT"]]["vartree"].dbapi
11962 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11964 mypkgs.extend(vardb.match(x))
11966 # If some packages were found...
11968 # Get our global settings (we only print stuff if it varies from
11969 # the current config)
11970 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
11971 auxkeys = mydesiredvars + [ "USE", "IUSE"]
11973 pkgsettings = portage.config(clone=settings)
11975 for myvar in mydesiredvars:
11976 global_vals[myvar] = set(settings.get(myvar, "").split())
11978 # Loop through each package
11979 # Only print settings if they differ from global settings
11980 header_title = "Package Settings"
11981 print header_width * "="
11982 print header_title.rjust(int(header_width/2 + len(header_title)/2))
11983 print header_width * "="
11984 from portage.output import EOutput
11987 # Get all package specific variables
11988 auxvalues = vardb.aux_get(pkg, auxkeys)
11990 for i in xrange(len(auxkeys)):
11991 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
11993 for myvar in mydesiredvars:
11994 # If the package variable doesn't match the
11995 # current global variable, something has changed
11996 # so set diff_found so we know to print
11997 if valuesmap[myvar] != global_vals[myvar]:
11998 diff_values[myvar] = valuesmap[myvar]
11999 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12000 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12001 pkgsettings.reset()
12002 # If a matching ebuild is no longer available in the tree, maybe it
12003 # would make sense to compare against the flags for the best
12004 # available version with the same slot?
12006 if portdb.cpv_exists(pkg):
12008 pkgsettings.setcpv(pkg, mydb=mydb)
12009 if valuesmap["IUSE"].intersection(
12010 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12011 diff_values["USE"] = valuesmap["USE"]
12012 # If a difference was found, print the info for
12015 # Print package info
12016 print "%s was built with the following:" % pkg
12017 for myvar in mydesiredvars + ["USE"]:
12018 if myvar in diff_values:
12019 mylist = list(diff_values[myvar])
12021 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12023 print ">>> Attempting to run pkg_info() for '%s'" % pkg
12024 ebuildpath = vardb.findname(pkg)
12025 if not ebuildpath or not os.path.exists(ebuildpath):
12026 out.ewarn("No ebuild found for '%s'" % pkg)
12028 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12029 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12030 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12033 def action_search(root_config, myopts, myfiles, spinner):
12035 print "emerge: no search terms provided."
12037 searchinstance = search(root_config,
12038 spinner, "--searchdesc" in myopts,
12039 "--quiet" not in myopts, "--usepkg" in myopts,
12040 "--usepkgonly" in myopts)
12041 for mysearch in myfiles:
12043 searchinstance.execute(mysearch)
12044 except re.error, comment:
12045 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12047 searchinstance.output()
12049 def action_depclean(settings, trees, ldpath_mtimes,
12050 myopts, action, myfiles, spinner):
12051 # Kill packages that aren't explicitly merged or are required as a
12052 # dependency of another package. World file is explicit.
12054 # Global depclean or prune operations are not very safe when there are
12055 # missing dependencies since it's unknown how badly incomplete
12056 # the dependency graph is, and we might accidentally remove packages
12057 # that should have been pulled into the graph. On the other hand, it's
12058 # relatively safe to ignore missing deps when only asked to remove
12059 # specific packages.
12060 allow_missing_deps = len(myfiles) > 0
12063 msg.append("Always study the list of packages to be cleaned for any obvious\n")
12064 msg.append("mistakes. Packages that are part of the world set will always\n")
12065 msg.append("be kept. They can be manually added to this set with\n")
12066 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
12067 msg.append("package.provided (see portage(5)) will be removed by\n")
12068 msg.append("depclean, even if they are part of the world set.\n")
12070 msg.append("As a safety measure, depclean will not remove any packages\n")
12071 msg.append("unless *all* required dependencies have been resolved. As a\n")
12072 msg.append("consequence, it is often necessary to run %s\n" % \
12073 good("`emerge --update"))
12074 msg.append(good("--newuse --deep world`") + \
12075 " prior to depclean.\n")
12077 if action == "depclean" and "--quiet" not in myopts and not myfiles:
12078 portage.writemsg_stdout("\n")
12080 portage.writemsg_stdout(colorize("WARN", " * ") + x)
12082 xterm_titles = "notitles" not in settings.features
12083 myroot = settings["ROOT"]
12084 root_config = trees[myroot]["root_config"]
12085 getSetAtoms = root_config.setconfig.getSetAtoms
12086 vardb = trees[myroot]["vartree"].dbapi
12088 required_set_names = ("system", "world")
12092 for s in required_set_names:
12093 required_sets[s] = InternalPackageSet(
12094 initial_atoms=getSetAtoms(s))
12097 # When removing packages, use a temporary version of world
12098 # which excludes packages that are intended to be eligible for
12100 world_temp_set = required_sets["world"]
12101 system_set = required_sets["system"]
12103 if not system_set or not world_temp_set:
12106 writemsg_level("!!! You have no system list.\n",
12107 level=logging.ERROR, noiselevel=-1)
12109 if not world_temp_set:
12110 writemsg_level("!!! You have no world file.\n",
12111 level=logging.WARNING, noiselevel=-1)
12113 writemsg_level("!!! Proceeding is likely to " + \
12114 "break your installation.\n",
12115 level=logging.WARNING, noiselevel=-1)
12116 if "--pretend" not in myopts:
12117 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12119 if action == "depclean":
12120 emergelog(xterm_titles, " >>> depclean")
12123 args_set = InternalPackageSet()
12126 if not is_valid_package_atom(x):
12127 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12128 level=logging.ERROR, noiselevel=-1)
12129 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12132 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12133 except portage.exception.AmbiguousPackageName, e:
12134 msg = "The short ebuild name \"" + x + \
12135 "\" is ambiguous. Please specify " + \
12136 "one of the following " + \
12137 "fully-qualified ebuild names instead:"
12138 for line in textwrap.wrap(msg, 70):
12139 writemsg_level("!!! %s\n" % (line,),
12140 level=logging.ERROR, noiselevel=-1)
12142 writemsg_level(" %s\n" % colorize("INFORM", i),
12143 level=logging.ERROR, noiselevel=-1)
12144 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12147 matched_packages = False
12150 matched_packages = True
12152 if not matched_packages:
12153 writemsg_level(">>> No packages selected for removal by %s\n" % \
12157 writemsg_level("\nCalculating dependencies ")
12158 resolver_params = create_depgraph_params(myopts, "remove")
12159 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12160 vardb = resolver.trees[myroot]["vartree"].dbapi
12162 if action == "depclean":
12165 # Pull in everything that's installed but not matched
12166 # by an argument atom since we don't want to clean any
12167 # package if something depends on it.
12169 world_temp_set.clear()
12174 if args_set.findAtomForPackage(pkg) is None:
12175 world_temp_set.add("=" + pkg.cpv)
12177 except portage.exception.InvalidDependString, e:
12178 show_invalid_depstring_notice(pkg,
12179 pkg.metadata["PROVIDE"], str(e))
12181 world_temp_set.add("=" + pkg.cpv)
12184 elif action == "prune":
12186 # Pull in everything that's installed since we don't
12187 # to prune a package if something depends on it.
12188 world_temp_set.clear()
12189 world_temp_set.update(vardb.cp_all())
12193 # Try to prune everything that's slotted.
12194 for cp in vardb.cp_all():
12195 if len(vardb.cp_list(cp)) > 1:
12198 # Remove atoms from world that match installed packages
12199 # that are also matched by argument atoms, but do not remove
12200 # them if they match the highest installed version.
12203 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12204 if not pkgs_for_cp or pkg not in pkgs_for_cp:
12205 raise AssertionError("package expected in matches: " + \
12206 "cp = %s, cpv = %s matches = %s" % \
12207 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12209 highest_version = pkgs_for_cp[-1]
12210 if pkg == highest_version:
12211 # pkg is the highest version
12212 world_temp_set.add("=" + pkg.cpv)
12215 if len(pkgs_for_cp) <= 1:
12216 raise AssertionError("more packages expected: " + \
12217 "cp = %s, cpv = %s matches = %s" % \
12218 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12221 if args_set.findAtomForPackage(pkg) is None:
12222 world_temp_set.add("=" + pkg.cpv)
12224 except portage.exception.InvalidDependString, e:
12225 show_invalid_depstring_notice(pkg,
12226 pkg.metadata["PROVIDE"], str(e))
12228 world_temp_set.add("=" + pkg.cpv)
12232 for s, package_set in required_sets.iteritems():
12233 set_atom = SETPREFIX + s
12234 set_arg = SetArg(arg=set_atom, set=package_set,
12235 root_config=resolver.roots[myroot])
12236 set_args[s] = set_arg
12237 for atom in set_arg.set:
12238 resolver._dep_stack.append(
12239 Dependency(atom=atom, root=myroot, parent=set_arg))
12240 resolver.digraph.add(set_arg, None)
12242 success = resolver._complete_graph()
12243 writemsg_level("\b\b... done!\n")
12245 resolver.display_problems()
12250 def unresolved_deps():
12252 unresolvable = set()
12253 for dep in resolver._initially_unsatisfied_deps:
12254 if isinstance(dep.parent, Package) and \
12255 (dep.priority > UnmergeDepPriority.SOFT):
12256 unresolvable.add((dep.atom, dep.parent.cpv))
12258 if not unresolvable:
12261 if unresolvable and not allow_missing_deps:
12262 prefix = bad(" * ")
12264 msg.append("Dependencies could not be completely resolved due to")
12265 msg.append("the following required packages not being installed:")
12267 for atom, parent in unresolvable:
12268 msg.append(" %s pulled in by:" % (atom,))
12269 msg.append(" %s" % (parent,))
12271 msg.append("Have you forgotten to run " + \
12272 good("`emerge --update --newuse --deep world`") + " prior to")
12273 msg.append(("%s? It may be necessary to manually " + \
12274 "uninstall packages that no longer") % action)
12275 msg.append("exist in the portage tree since " + \
12276 "it may not be possible to satisfy their")
12277 msg.append("dependencies. Also, be aware of " + \
12278 "the --with-bdeps option that is documented")
12279 msg.append("in " + good("`man emerge`") + ".")
12280 if action == "prune":
12282 msg.append("If you would like to ignore " + \
12283 "dependencies then use %s." % good("--nodeps"))
12284 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
12285 level=logging.ERROR, noiselevel=-1)
12289 if unresolved_deps():
12292 graph = resolver.digraph.copy()
12293 required_pkgs_total = 0
12295 if isinstance(node, Package):
12296 required_pkgs_total += 1
12298 def show_parents(child_node):
12299 parent_nodes = graph.parent_nodes(child_node)
12300 if not parent_nodes:
12301 # With --prune, the highest version can be pulled in without any
12302 # real parent since all installed packages are pulled in. In that
12303 # case there's nothing to show here.
12306 for node in parent_nodes:
12307 parent_strs.append(str(getattr(node, "cpv", node)))
12310 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
12311 for parent_str in parent_strs:
12312 msg.append(" %s\n" % (parent_str,))
12314 portage.writemsg_stdout("".join(msg), noiselevel=-1)
12316 def create_cleanlist():
12317 pkgs_to_remove = []
12319 if action == "depclean":
12325 arg_atom = args_set.findAtomForPackage(pkg)
12326 except portage.exception.InvalidDependString:
12327 # this error has already been displayed by now
12331 if pkg not in graph:
12332 pkgs_to_remove.append(pkg)
12333 elif "--verbose" in myopts:
12338 if pkg not in graph:
12339 pkgs_to_remove.append(pkg)
12340 elif "--verbose" in myopts:
12343 elif action == "prune":
12344 # Prune really uses all installed instead of world. It's not
12345 # a real reverse dependency so don't display it as such.
12346 graph.remove(set_args["world"])
12348 for atom in args_set:
12349 for pkg in vardb.match_pkgs(atom):
12350 if pkg not in graph:
12351 pkgs_to_remove.append(pkg)
12352 elif "--verbose" in myopts:
12355 if not pkgs_to_remove:
12357 ">>> No packages selected for removal by %s\n" % action)
12358 if "--verbose" not in myopts:
12360 ">>> To see reverse dependencies, use %s\n" % \
12362 if action == "prune":
12364 ">>> To ignore dependencies, use %s\n" % \
12367 return pkgs_to_remove
12369 cleanlist = create_cleanlist()
12372 clean_set = set(cleanlist)
12374 # Use a topological sort to create an unmerge order such that
12375 # each package is unmerged before it's dependencies. This is
12376 # necessary to avoid breaking things that may need to run
12377 # during pkg_prerm or pkg_postrm phases.
12379 # Create a new graph to account for dependencies between the
12380 # packages being unmerged.
12384 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
12385 runtime = UnmergeDepPriority(runtime=True)
12386 runtime_post = UnmergeDepPriority(runtime_post=True)
12387 buildtime = UnmergeDepPriority(buildtime=True)
12389 "RDEPEND": runtime,
12390 "PDEPEND": runtime_post,
12391 "DEPEND": buildtime,
12394 for node in clean_set:
12395 graph.add(node, None)
12397 node_use = node.metadata["USE"].split()
12398 for dep_type in dep_keys:
12399 depstr = node.metadata[dep_type]
12403 portage.dep._dep_check_strict = False
12404 success, atoms = portage.dep_check(depstr, None, settings,
12405 myuse=node_use, trees=resolver._graph_trees,
12408 portage.dep._dep_check_strict = True
12410 show_invalid_depstring_notice(
12411 ("installed", myroot, node, "nomerge"),
12415 priority = priority_map[dep_type]
12417 if atom.startswith("!"):
12419 matches = vardb.match_pkgs(atom)
12422 for child_node in matches:
12423 if child_node in clean_set:
12424 graph.add(child_node, node, priority=priority)
12427 if len(graph.order) == len(graph.root_nodes()):
12428 # If there are no dependencies between packages
12429 # let unmerge() group them by cat/pn.
12431 cleanlist = [pkg.cpv for pkg in graph.order]
12433 # Order nodes from lowest to highest overall reference count for
12434 # optimal root node selection.
12435 node_refcounts = {}
12436 for node in graph.order:
12437 node_refcounts[node] = len(graph.parent_nodes(node))
12438 def cmp_reference_count(node1, node2):
12439 return node_refcounts[node1] - node_refcounts[node2]
12440 graph.order.sort(cmp_reference_count)
12442 ignore_priority_range = [None]
12443 ignore_priority_range.extend(
12444 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
12445 while not graph.empty():
12446 for ignore_priority in ignore_priority_range:
12447 nodes = graph.root_nodes(ignore_priority=ignore_priority)
12451 raise AssertionError("no root nodes")
12452 if ignore_priority is not None:
12453 # Some deps have been dropped due to circular dependencies,
12454 # so only pop one node in order do minimize the number that
12459 cleanlist.append(node.cpv)
12461 unmerge(root_config, myopts, "unmerge", cleanlist,
12462 ldpath_mtimes, ordered=ordered)
12464 if action == "prune":
12467 if not cleanlist and "--quiet" in myopts:
12470 print "Packages installed: "+str(len(vardb.cpv_all()))
12471 print "Packages in world: " + \
12472 str(len(root_config.sets["world"].getAtoms()))
12473 print "Packages in system: " + \
12474 str(len(root_config.sets["system"].getAtoms()))
12475 print "Required packages: "+str(required_pkgs_total)
12476 if "--pretend" in myopts:
12477 print "Number to remove: "+str(len(cleanlist))
12479 print "Number removed: "+str(len(cleanlist))
12481 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
12482 skip_masked=False, skip_unsatisfied=False):
12484 Construct a depgraph for the given resume list. This will raise
12485 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
12487 @returns: (success, depgraph, dropped_tasks)
12489 mergelist = mtimedb["resume"]["mergelist"]
12490 dropped_tasks = set()
12492 mydepgraph = depgraph(settings, trees,
12493 myopts, myparams, spinner)
12495 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
12496 skip_masked=skip_masked)
12497 except depgraph.UnsatisfiedResumeDep, e:
12498 if not skip_unsatisfied:
12501 graph = mydepgraph.digraph
12502 unsatisfied_parents = dict((dep.parent, dep.parent) \
12503 for dep in e.value)
12504 traversed_nodes = set()
12505 unsatisfied_stack = list(unsatisfied_parents)
12506 while unsatisfied_stack:
12507 pkg = unsatisfied_stack.pop()
12508 if pkg in traversed_nodes:
12510 traversed_nodes.add(pkg)
12512 # If this package was pulled in by a parent
12513 # package scheduled for merge, removing this
12514 # package may cause the the parent package's
12515 # dependency to become unsatisfied.
12516 for parent_node in graph.parent_nodes(pkg):
12517 if not isinstance(parent_node, Package) \
12518 or parent_node.operation not in ("merge", "nomerge"):
12521 graph.child_nodes(parent_node,
12522 ignore_priority=DepPriority.SOFT)
12523 if pkg in unsatisfied:
12524 unsatisfied_parents[parent_node] = parent_node
12525 unsatisfied_stack.append(parent_node)
12527 pruned_mergelist = [x for x in mergelist \
12528 if isinstance(x, list) and \
12529 tuple(x) not in unsatisfied_parents]
12531 # If the mergelist doesn't shrink then this loop is infinite.
12532 if len(pruned_mergelist) == len(mergelist):
12533 # This happens if a package can't be dropped because
12534 # it's already installed, but it has unsatisfied PDEPEND.
12536 mergelist[:] = pruned_mergelist
12538 # Exclude installed packages that have been removed from the graph due
12539 # to failure to build/install runtime dependencies after the dependent
12540 # package has already been installed.
12541 dropped_tasks.update(pkg for pkg in \
12542 unsatisfied_parents if pkg.operation != "nomerge")
12543 mydepgraph.break_refs(unsatisfied_parents)
12545 del e, graph, traversed_nodes, \
12546 unsatisfied_parents, unsatisfied_stack
12550 return (success, mydepgraph, dropped_tasks)
12552 def action_build(settings, trees, mtimedb,
12553 myopts, myaction, myfiles, spinner):
12555 # validate the state of the resume data
12556 # so that we can make assumptions later.
12557 for k in ("resume", "resume_backup"):
12558 if k not in mtimedb:
12560 resume_data = mtimedb[k]
12561 if not isinstance(resume_data, dict):
12564 mergelist = resume_data.get("mergelist")
12565 if not isinstance(mergelist, list):
12568 resume_opts = resume_data.get("myopts")
12569 if not isinstance(resume_opts, (dict, list)):
12572 favorites = resume_data.get("favorites")
12573 if not isinstance(favorites, list):
12578 if "--resume" in myopts and \
12579 ("resume" in mtimedb or
12580 "resume_backup" in mtimedb):
12582 if "resume" not in mtimedb:
12583 mtimedb["resume"] = mtimedb["resume_backup"]
12584 del mtimedb["resume_backup"]
12586 # "myopts" is a list for backward compatibility.
12587 resume_opts = mtimedb["resume"].get("myopts", [])
12588 if isinstance(resume_opts, list):
12589 resume_opts = dict((k,True) for k in resume_opts)
12590 for opt in ("--skipfirst", "--ask", "--tree"):
12591 resume_opts.pop(opt, None)
12592 myopts.update(resume_opts)
12594 if "--debug" in myopts:
12595 writemsg_level("myopts %s\n" % (myopts,))
12597 # Adjust config according to options of the command being resumed.
12598 for myroot in trees:
12599 mysettings = trees[myroot]["vartree"].settings
12600 mysettings.unlock()
12601 adjust_config(myopts, mysettings)
12603 del myroot, mysettings
12605 ldpath_mtimes = mtimedb["ldpath"]
12608 buildpkgonly = "--buildpkgonly" in myopts
12609 pretend = "--pretend" in myopts
12610 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
12611 ask = "--ask" in myopts
12612 nodeps = "--nodeps" in myopts
12613 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
12614 tree = "--tree" in myopts
12615 if nodeps and tree:
12617 del myopts["--tree"]
12618 portage.writemsg(colorize("WARN", " * ") + \
12619 "--tree is broken with --nodeps. Disabling...\n")
12620 debug = "--debug" in myopts
12621 verbose = "--verbose" in myopts
12622 quiet = "--quiet" in myopts
12623 if pretend or fetchonly:
12624 # make the mtimedb readonly
12625 mtimedb.filename = None
12626 if "--digest" in myopts:
12627 msg = "The --digest option can prevent corruption from being" + \
12628 " noticed. The `repoman manifest` command is the preferred" + \
12629 " way to generate manifests and it is capable of doing an" + \
12630 " entire repository or category at once."
12631 prefix = bad(" * ")
12632 writemsg(prefix + "\n")
12633 from textwrap import wrap
12634 for line in wrap(msg, 72):
12635 writemsg("%s%s\n" % (prefix, line))
12636 writemsg(prefix + "\n")
12638 if "--quiet" not in myopts and \
12639 ("--pretend" in myopts or "--ask" in myopts or \
12640 "--tree" in myopts or "--verbose" in myopts):
12642 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
12644 elif "--buildpkgonly" in myopts:
12648 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
12650 print darkgreen("These are the packages that would be %s, in reverse order:") % action
12654 print darkgreen("These are the packages that would be %s, in order:") % action
12657 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
12658 if not show_spinner:
12659 spinner.update = spinner.update_quiet
12662 favorites = mtimedb["resume"].get("favorites")
12663 if not isinstance(favorites, list):
12667 print "Calculating dependencies ",
12668 myparams = create_depgraph_params(myopts, myaction)
12670 resume_data = mtimedb["resume"]
12671 mergelist = resume_data["mergelist"]
12672 if mergelist and "--skipfirst" in myopts:
12673 for i, task in enumerate(mergelist):
12674 if isinstance(task, list) and \
12675 task and task[-1] == "merge":
12679 skip_masked = "--skipfirst" in myopts
12680 skip_unsatisfied = "--skipfirst" in myopts
12684 success, mydepgraph, dropped_tasks = resume_depgraph(
12685 settings, trees, mtimedb, myopts, myparams, spinner,
12686 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
12687 except (portage.exception.PackageNotFound,
12688 depgraph.UnsatisfiedResumeDep), e:
12689 if isinstance(e, depgraph.UnsatisfiedResumeDep):
12690 mydepgraph = e.depgraph
12693 from textwrap import wrap
12694 from portage.output import EOutput
12697 resume_data = mtimedb["resume"]
12698 mergelist = resume_data.get("mergelist")
12699 if not isinstance(mergelist, list):
12701 if mergelist and debug or (verbose and not quiet):
12702 out.eerror("Invalid resume list:")
12705 for task in mergelist:
12706 if isinstance(task, list):
12707 out.eerror(indent + str(tuple(task)))
12710 if isinstance(e, depgraph.UnsatisfiedResumeDep):
12711 out.eerror("One or more packages are either masked or " + \
12712 "have missing dependencies:")
12715 for dep in e.value:
12716 if dep.atom is None:
12717 out.eerror(indent + "Masked package:")
12718 out.eerror(2 * indent + str(dep.parent))
12721 out.eerror(indent + str(dep.atom) + " pulled in by:")
12722 out.eerror(2 * indent + str(dep.parent))
12724 msg = "The resume list contains packages " + \
12725 "that are either masked or have " + \
12726 "unsatisfied dependencies. " + \
12727 "Please restart/continue " + \
12728 "the operation manually, or use --skipfirst " + \
12729 "to skip the first package in the list and " + \
12730 "any other packages that may be " + \
12731 "masked or have missing dependencies."
12732 for line in wrap(msg, 72):
12734 elif isinstance(e, portage.exception.PackageNotFound):
12735 out.eerror("An expected package is " + \
12736 "not available: %s" % str(e))
12738 msg = "The resume list contains one or more " + \
12739 "packages that are no longer " + \
12740 "available. Please restart/continue " + \
12741 "the operation manually."
12742 for line in wrap(msg, 72):
12746 print "\b\b... done!"
12750 portage.writemsg("!!! One or more packages have been " + \
12751 "dropped due to\n" + \
12752 "!!! masking or unsatisfied dependencies:\n\n",
12754 for task in dropped_tasks:
12755 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
12756 portage.writemsg("\n", noiselevel=-1)
12759 if mydepgraph is not None:
12760 mydepgraph.display_problems()
12761 if not (ask or pretend):
12762 # delete the current list and also the backup
12763 # since it's probably stale too.
12764 for k in ("resume", "resume_backup"):
12765 mtimedb.pop(k, None)
12770 if ("--resume" in myopts):
12771 print darkgreen("emerge: It seems we have nothing to resume...")
12774 myparams = create_depgraph_params(myopts, myaction)
12775 if "--quiet" not in myopts and "--nodeps" not in myopts:
12776 print "Calculating dependencies ",
12778 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
12780 retval, favorites = mydepgraph.select_files(myfiles)
12781 except portage.exception.PackageNotFound, e:
12782 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
12784 except portage.exception.PackageSetNotFound, e:
12785 root_config = trees[settings["ROOT"]]["root_config"]
12786 display_missing_pkg_set(root_config, e.value)
12789 print "\b\b... done!"
12791 mydepgraph.display_problems()
12794 if "--pretend" not in myopts and \
12795 ("--ask" in myopts or "--tree" in myopts or \
12796 "--verbose" in myopts) and \
12797 not ("--quiet" in myopts and "--ask" not in myopts):
12798 if "--resume" in myopts:
12799 mymergelist = mydepgraph.altlist()
12800 if len(mymergelist) == 0:
12801 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12803 favorites = mtimedb["resume"]["favorites"]
12804 retval = mydepgraph.display(
12805 mydepgraph.altlist(reversed=tree),
12806 favorites=favorites)
12807 mydepgraph.display_problems()
12808 if retval != os.EX_OK:
12810 prompt="Would you like to resume merging these packages?"
12812 retval = mydepgraph.display(
12813 mydepgraph.altlist(reversed=("--tree" in myopts)),
12814 favorites=favorites)
12815 mydepgraph.display_problems()
12816 if retval != os.EX_OK:
12819 for x in mydepgraph.altlist():
12820 if isinstance(x, Package) and x.operation == "merge":
12824 sets = trees[settings["ROOT"]]["root_config"].sets
12825 world_candidates = None
12826 if "--noreplace" in myopts and \
12827 not oneshot and favorites:
12828 # Sets that are not world candidates are filtered
12829 # out here since the favorites list needs to be
12830 # complete for depgraph.loadResumeCommand() to
12831 # operate correctly.
12832 world_candidates = [x for x in favorites \
12833 if not (x.startswith(SETPREFIX) and \
12834 not sets[x[1:]].world_candidate)]
12835 if "--noreplace" in myopts and \
12836 not oneshot and world_candidates:
12838 for x in world_candidates:
12839 print " %s %s" % (good("*"), x)
12840 prompt="Would you like to add these packages to your world favorites?"
12841 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
12842 prompt="Nothing to merge; would you like to auto-clean packages?"
12845 print "Nothing to merge; quitting."
12848 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
12849 prompt="Would you like to fetch the source files for these packages?"
12851 prompt="Would you like to merge these packages?"
12853 if "--ask" in myopts and userquery(prompt) == "No":
12858 # Don't ask again (e.g. when auto-cleaning packages after merge)
12859 myopts.pop("--ask", None)
12861 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
12862 if ("--resume" in myopts):
12863 mymergelist = mydepgraph.altlist()
12864 if len(mymergelist) == 0:
12865 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12867 favorites = mtimedb["resume"]["favorites"]
12868 retval = mydepgraph.display(
12869 mydepgraph.altlist(reversed=tree),
12870 favorites=favorites)
12871 mydepgraph.display_problems()
12872 if retval != os.EX_OK:
12875 retval = mydepgraph.display(
12876 mydepgraph.altlist(reversed=("--tree" in myopts)),
12877 favorites=favorites)
12878 mydepgraph.display_problems()
12879 if retval != os.EX_OK:
12881 if "--buildpkgonly" in myopts:
12882 graph_copy = mydepgraph.digraph.clone()
12883 for node in list(graph_copy.order):
12884 if not isinstance(node, Package):
12885 graph_copy.remove(node)
12886 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
12887 print "\n!!! --buildpkgonly requires all dependencies to be merged."
12888 print "!!! You have to merge the dependencies before you can build this package.\n"
12891 if "--buildpkgonly" in myopts:
12892 graph_copy = mydepgraph.digraph.clone()
12893 for node in list(graph_copy.order):
12894 if not isinstance(node, Package):
12895 graph_copy.remove(node)
12896 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
12897 print "\n!!! --buildpkgonly requires all dependencies to be merged."
12898 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
12901 if ("--resume" in myopts):
12902 favorites=mtimedb["resume"]["favorites"]
12903 mymergelist = mydepgraph.altlist()
12904 mydepgraph.break_refs(mymergelist)
12905 mergetask = Scheduler(settings, trees, mtimedb, myopts,
12906 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
12907 del mydepgraph, mymergelist
12908 clear_caches(trees)
12910 retval = mergetask.merge()
12911 merge_count = mergetask.curval
12913 if "resume" in mtimedb and \
12914 "mergelist" in mtimedb["resume"] and \
12915 len(mtimedb["resume"]["mergelist"]) > 1:
12916 mtimedb["resume_backup"] = mtimedb["resume"]
12917 del mtimedb["resume"]
12919 mtimedb["resume"]={}
12920 # Stored as a dict starting with portage-2.1.6_rc1, and supported
12921 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
12922 # a list type for options.
12923 mtimedb["resume"]["myopts"] = myopts.copy()
12925 # Convert Atom instances to plain str since the mtimedb loader
12926 # sets unpickler.find_global = None which causes unpickler.load()
12927 # to raise the following exception:
12929 # cPickle.UnpicklingError: Global and instance pickles are not supported.
12931 # TODO: Maybe stop setting find_global = None, or find some other
12932 # way to avoid accidental triggering of the above UnpicklingError.
12933 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
12935 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
12936 for pkgline in mydepgraph.altlist():
12937 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
12938 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
12939 tmpsettings = portage.config(clone=settings)
12941 if settings.get("PORTAGE_DEBUG", "") == "1":
12943 retval = portage.doebuild(
12944 y, "digest", settings["ROOT"], tmpsettings, edebug,
12945 ("--pretend" in myopts),
12946 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
12949 pkglist = mydepgraph.altlist()
12950 mydepgraph.saveNomergeFavorites()
12951 mydepgraph.break_refs(pkglist)
12952 mergetask = Scheduler(settings, trees, mtimedb, myopts,
12953 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
12954 del mydepgraph, pkglist
12955 clear_caches(trees)
12957 retval = mergetask.merge()
12958 merge_count = mergetask.curval
12960 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
12961 if "yes" == settings.get("AUTOCLEAN"):
12962 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
12963 unmerge(trees[settings["ROOT"]]["root_config"],
12964 myopts, "clean", [],
12965 ldpath_mtimes, autoclean=1)
12967 portage.writemsg_stdout(colorize("WARN", "WARNING:")
12968 + " AUTOCLEAN is disabled. This can cause serious"
12969 + " problems due to overlapping packages.\n")
12973 def multiple_actions(action1, action2):
12974 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
12975 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
12978 def insert_optional_args(args):
12980 Parse optional arguments and insert a value if one has
12981 not been provided. This is done before feeding the args
12982 to the optparse parser since that parser does not support
12983 this feature natively.
12987 jobs_opts = ("-j", "--jobs")
12988 arg_stack = args[:]
12989 arg_stack.reverse()
12991 arg = arg_stack.pop()
12993 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
12994 if not (short_job_opt or arg in jobs_opts):
12995 new_args.append(arg)
12998 # Insert an empty placeholder in order to
12999 # satisfy the requirements of optparse.
13001 new_args.append("--jobs")
13004 if short_job_opt and len(arg) > 2:
13005 if arg[:2] == "-j":
13007 job_count = int(arg[2:])
13009 saved_opts = arg[2:]
13012 saved_opts = arg[1:].replace("j", "")
13014 if job_count is None and arg_stack:
13016 job_count = int(arg_stack[-1])
13020 # Discard the job count from the stack
13021 # since we're consuming it here.
13024 if job_count is None:
13025 # unlimited number of jobs
13026 new_args.append("True")
13028 new_args.append(str(job_count))
13030 if saved_opts is not None:
13031 new_args.append("-" + saved_opts)
13035 def parse_opts(tmpcmdline, silent=False):
13040 global actions, options, shortmapping
13042 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13043 argument_options = {
13045 "help":"specify the location for portage configuration files",
13049 "help":"enable or disable color output",
13051 "choices":("y", "n")
13056 "help" : "Specifies the number of packages to build " + \
13062 "--load-average": {
13064 "help" :"Specifies that no new builds should be started " + \
13065 "if there are other builds running and the load average " + \
13066 "is at least LOAD (a floating-point number).",
13072 "help":"include unnecessary build time dependencies",
13074 "choices":("y", "n")
13077 "help":"specify conditions to trigger package reinstallation",
13079 "choices":["changed-use"]
13083 from optparse import OptionParser
13084 parser = OptionParser()
13085 if parser.has_option("--help"):
13086 parser.remove_option("--help")
13088 for action_opt in actions:
13089 parser.add_option("--" + action_opt, action="store_true",
13090 dest=action_opt.replace("-", "_"), default=False)
13091 for myopt in options:
13092 parser.add_option(myopt, action="store_true",
13093 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13094 for shortopt, longopt in shortmapping.iteritems():
13095 parser.add_option("-" + shortopt, action="store_true",
13096 dest=longopt.lstrip("--").replace("-", "_"), default=False)
13097 for myalias, myopt in longopt_aliases.iteritems():
13098 parser.add_option(myalias, action="store_true",
13099 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13101 for myopt, kwargs in argument_options.iteritems():
13102 parser.add_option(myopt,
13103 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
13105 tmpcmdline = insert_optional_args(tmpcmdline)
13107 myoptions, myargs = parser.parse_args(args=tmpcmdline)
13111 if myoptions.jobs == "True":
13115 jobs = int(myoptions.jobs)
13119 if jobs is not True and \
13123 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
13124 (myoptions.jobs,), noiselevel=-1)
13126 myoptions.jobs = jobs
13128 if myoptions.load_average:
13130 load_average = float(myoptions.load_average)
13134 if load_average <= 0.0:
13135 load_average = None
13137 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
13138 (myoptions.load_average,), noiselevel=-1)
13140 myoptions.load_average = load_average
13142 for myopt in options:
13143 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
13145 myopts[myopt] = True
13147 for myopt in argument_options:
13148 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
13152 for action_opt in actions:
13153 v = getattr(myoptions, action_opt.replace("-", "_"))
13156 multiple_actions(myaction, action_opt)
13158 myaction = action_opt
13162 return myaction, myopts, myfiles
13164 def validate_ebuild_environment(trees):
13165 for myroot in trees:
13166 settings = trees[myroot]["vartree"].settings
13167 settings.validate()
13169 def clear_caches(trees):
13170 for d in trees.itervalues():
13171 d["porttree"].dbapi.melt()
13172 d["porttree"].dbapi._aux_cache.clear()
13173 d["bintree"].dbapi._aux_cache.clear()
13174 d["bintree"].dbapi._clear_cache()
13175 portage.dircache.clear()
13178 def load_emerge_config(trees=None):
13180 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
13181 v = os.environ.get(envvar, None)
13182 if v and v.strip():
13184 trees = portage.create_trees(trees=trees, **kwargs)
13186 for root, root_trees in trees.iteritems():
13187 settings = root_trees["vartree"].settings
13188 setconfig = load_default_config(settings, root_trees)
13189 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
13191 settings = trees["/"]["vartree"].settings
13193 for myroot in trees:
13195 settings = trees[myroot]["vartree"].settings
13198 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
13199 mtimedb = portage.MtimeDB(mtimedbfile)
13201 return settings, trees, mtimedb
13203 def adjust_config(myopts, settings):
13204 """Make emerge specific adjustments to the config."""
13206 # To enhance usability, make some vars case insensitive by forcing them to
13208 for myvar in ("AUTOCLEAN", "NOCOLOR"):
13209 if myvar in settings:
13210 settings[myvar] = settings[myvar].lower()
13211 settings.backup_changes(myvar)
13214 # Kill noauto as it will break merges otherwise.
13215 if "noauto" in settings.features:
13216 while "noauto" in settings.features:
13217 settings.features.remove("noauto")
13218 settings["FEATURES"] = " ".join(settings.features)
13219 settings.backup_changes("FEATURES")
13223 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
13224 except ValueError, e:
13225 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13226 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
13227 settings["CLEAN_DELAY"], noiselevel=-1)
13228 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
13229 settings.backup_changes("CLEAN_DELAY")
13231 EMERGE_WARNING_DELAY = 10
13233 EMERGE_WARNING_DELAY = int(settings.get(
13234 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
13235 except ValueError, e:
13236 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13237 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
13238 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
13239 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
13240 settings.backup_changes("EMERGE_WARNING_DELAY")
13242 if "--quiet" in myopts:
13243 settings["PORTAGE_QUIET"]="1"
13244 settings.backup_changes("PORTAGE_QUIET")
13246 if "--verbose" in myopts:
13247 settings["PORTAGE_VERBOSE"] = "1"
13248 settings.backup_changes("PORTAGE_VERBOSE")
13250 # Set so that configs will be merged regardless of remembered status
13251 if ("--noconfmem" in myopts):
13252 settings["NOCONFMEM"]="1"
13253 settings.backup_changes("NOCONFMEM")
13255 # Set various debug markers... They should be merged somehow.
13258 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
13259 if PORTAGE_DEBUG not in (0, 1):
13260 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
13261 PORTAGE_DEBUG, noiselevel=-1)
13262 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
13265 except ValueError, e:
13266 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13267 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
13268 settings["PORTAGE_DEBUG"], noiselevel=-1)
13270 if "--debug" in myopts:
13272 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
13273 settings.backup_changes("PORTAGE_DEBUG")
13275 if settings.get("NOCOLOR") not in ("yes","true"):
13276 portage.output.havecolor = 1
13278 """The explicit --color < y | n > option overrides the NOCOLOR environment
13279 variable and stdout auto-detection."""
13280 if "--color" in myopts:
13281 if "y" == myopts["--color"]:
13282 portage.output.havecolor = 1
13283 settings["NOCOLOR"] = "false"
13285 portage.output.havecolor = 0
13286 settings["NOCOLOR"] = "true"
13287 settings.backup_changes("NOCOLOR")
13288 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
13289 portage.output.havecolor = 0
13290 settings["NOCOLOR"] = "true"
13291 settings.backup_changes("NOCOLOR")
13293 def apply_priorities(settings):
13297 def nice(settings):
13299 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
13300 except (OSError, ValueError), e:
13301 out = portage.output.EOutput()
13302 out.eerror("Failed to change nice value to '%s'" % \
13303 settings["PORTAGE_NICENESS"])
13304 out.eerror("%s\n" % str(e))
13306 def ionice(settings):
13308 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
13310 ionice_cmd = shlex.split(ionice_cmd)
13314 from portage.util import varexpand
13315 variables = {"PID" : str(os.getpid())}
13316 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
13319 rval = portage.process.spawn(cmd, env=os.environ)
13320 except portage.exception.CommandNotFound:
13321 # The OS kernel probably doesn't support ionice,
13322 # so return silently.
13325 if rval != os.EX_OK:
13326 out = portage.output.EOutput()
13327 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
13328 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
13330 def display_missing_pkg_set(root_config, set_name):
13333 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
13334 "The following sets exist:") % \
13335 colorize("INFORM", set_name))
13338 for s in sorted(root_config.sets):
13339 msg.append(" %s" % s)
13342 writemsg_level("".join("%s\n" % l for l in msg),
13343 level=logging.ERROR, noiselevel=-1)
13345 def expand_set_arguments(myfiles, myaction, root_config):
13347 if myaction != "search":
13353 if x[:1] == SETPREFIX:
13355 msg.append("'%s' is not a valid package atom." % (x,))
13356 msg.append("Please check ebuild(5) for full details.")
13357 writemsg_level("".join("!!! %s\n" % line for line in msg),
13358 level=logging.ERROR, noiselevel=-1)
13359 return (myfiles, 1)
13360 elif x == "system":
13365 if myaction is not None:
13367 multiple_actions("system", myaction)
13368 return (myfiles, 1)
13370 multiple_actions("world", myaction)
13371 return (myfiles, 1)
13373 if system and world:
13374 multiple_actions("system", "world")
13375 return (myfiles, 1)
13377 return (myfiles, os.EX_OK)
13379 def repo_name_check(trees):
13380 missing_repo_names = set()
13381 for root, root_trees in trees.iteritems():
13382 if "porttree" in root_trees:
13383 portdb = root_trees["porttree"].dbapi
13384 missing_repo_names.update(portdb.porttrees)
13385 repos = portdb.getRepositories()
13387 missing_repo_names.discard(portdb.getRepositoryPath(r))
13389 if missing_repo_names:
13391 msg.append("WARNING: One or more repositories " + \
13392 "have missing repo_name entries:")
13394 for p in missing_repo_names:
13395 msg.append("\t%s/profiles/repo_name" % (p,))
13397 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
13398 "should be a plain text file containing a unique " + \
13399 "name for the repository on the first line.", 70))
13400 writemsg_level("".join("%s\n" % l for l in msg),
13401 level=logging.WARNING, noiselevel=-1)
13403 return bool(missing_repo_names)
13406 global portage # NFC why this is necessary now - genone
13407 portage._disable_legacy_globals()
13408 # Disable color until we're sure that it should be enabled (after
13409 # EMERGE_DEFAULT_OPTS has been parsed).
13410 portage.output.havecolor = 0
13411 # This first pass is just for options that need to be known as early as
13412 # possible, such as --config-root. They will be parsed again later,
13413 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
13414 # the value of --config-root).
13415 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
13416 if "--debug" in myopts:
13417 os.environ["PORTAGE_DEBUG"] = "1"
13418 if "--config-root" in myopts:
13419 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
13421 # Portage needs to ensure a sane umask for the files it creates.
13423 settings, trees, mtimedb = load_emerge_config()
13424 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13426 if portage._global_updates(trees, mtimedb["updates"]):
13428 # Reload the whole config from scratch.
13429 settings, trees, mtimedb = load_emerge_config(trees=trees)
13430 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13432 xterm_titles = "notitles" not in settings.features
13435 if "--ignore-default-opts" not in myopts:
13436 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
13437 tmpcmdline.extend(sys.argv[1:])
13438 myaction, myopts, myfiles = parse_opts(tmpcmdline)
13440 if "--digest" in myopts:
13441 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
13442 # Reload the whole config from scratch so that the portdbapi internal
13443 # config is updated with new FEATURES.
13444 settings, trees, mtimedb = load_emerge_config(trees=trees)
13445 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13447 for myroot in trees:
13448 mysettings = trees[myroot]["vartree"].settings
13449 mysettings.unlock()
13450 adjust_config(myopts, mysettings)
13451 mysettings["PORTAGE_COUNTER_HASH"] = \
13452 trees[myroot]["vartree"].dbapi._counter_hash()
13453 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
13455 del myroot, mysettings
13457 apply_priorities(settings)
13459 spinner = stdout_spinner()
13460 if "candy" in settings.features:
13461 spinner.update = spinner.update_scroll
13463 if "--quiet" not in myopts:
13464 portage.deprecated_profile_check()
13465 repo_name_check(trees)
13467 eclasses_overridden = {}
13468 for mytrees in trees.itervalues():
13469 mydb = mytrees["porttree"].dbapi
13470 # Freeze the portdbapi for performance (memoize all xmatch results).
13472 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
13475 if eclasses_overridden and \
13476 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
13477 prefix = bad(" * ")
13478 if len(eclasses_overridden) == 1:
13479 writemsg(prefix + "Overlay eclass overrides " + \
13480 "eclass from PORTDIR:\n", noiselevel=-1)
13482 writemsg(prefix + "Overlay eclasses override " + \
13483 "eclasses from PORTDIR:\n", noiselevel=-1)
13484 writemsg(prefix + "\n", noiselevel=-1)
13485 for eclass_name in sorted(eclasses_overridden):
13486 writemsg(prefix + " '%s/%s.eclass'\n" % \
13487 (eclasses_overridden[eclass_name], eclass_name),
13489 writemsg(prefix + "\n", noiselevel=-1)
13490 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
13491 "because it will trigger invalidation of cached ebuild metadata " + \
13492 "that is distributed with the portage tree. If you must " + \
13493 "override eclasses from PORTDIR then you are advised to add " + \
13494 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
13495 "`emerge --regen` after each time that you run `emerge --sync`. " + \
13496 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
13497 "you would like to disable this warning."
13498 from textwrap import wrap
13499 for line in wrap(msg, 72):
13500 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
13502 if "moo" in myfiles:
13505 Larry loves Gentoo (""" + platform.system() + """)
13507 _______________________
13508 < Have you mooed today? >
13509 -----------------------
13519 ext = os.path.splitext(x)[1]
13520 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
13521 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
13524 root_config = trees[settings["ROOT"]]["root_config"]
13525 if myaction == "list-sets":
13526 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
13530 # only expand sets for actions taking package arguments
13531 oldargs = myfiles[:]
13532 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
13533 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
13534 if retval != os.EX_OK:
13537 # Need to handle empty sets specially, otherwise emerge will react
13538 # with the help message for empty argument lists
13539 if oldargs and not myfiles:
13540 print "emerge: no targets left after set expansion"
13543 if ("--tree" in myopts) and ("--columns" in myopts):
13544 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
13547 if ("--quiet" in myopts):
13548 spinner.update = spinner.update_quiet
13549 portage.util.noiselimit = -1
13551 # Always create packages if FEATURES=buildpkg
13552 # Imply --buildpkg if --buildpkgonly
13553 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
13554 if "--buildpkg" not in myopts:
13555 myopts["--buildpkg"] = True
13557 # Also allow -S to invoke search action (-sS)
13558 if ("--searchdesc" in myopts):
13559 if myaction and myaction != "search":
13560 myfiles.append(myaction)
13561 if "--search" not in myopts:
13562 myopts["--search"] = True
13563 myaction = "search"
13565 # Always try and fetch binary packages if FEATURES=getbinpkg
13566 if ("getbinpkg" in settings.features):
13567 myopts["--getbinpkg"] = True
13569 if "--buildpkgonly" in myopts:
13570 # --buildpkgonly will not merge anything, so
13571 # it cancels all binary package options.
13572 for opt in ("--getbinpkg", "--getbinpkgonly",
13573 "--usepkg", "--usepkgonly"):
13574 myopts.pop(opt, None)
13576 if "--fetch-all-uri" in myopts:
13577 myopts["--fetchonly"] = True
13579 if "--skipfirst" in myopts and "--resume" not in myopts:
13580 myopts["--resume"] = True
13582 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
13583 myopts["--usepkgonly"] = True
13585 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
13586 myopts["--getbinpkg"] = True
13588 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
13589 myopts["--usepkg"] = True
13591 # Also allow -K to apply --usepkg/-k
13592 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
13593 myopts["--usepkg"] = True
13595 # Allow -p to remove --ask
13596 if ("--pretend" in myopts) and ("--ask" in myopts):
13597 print ">>> --pretend disables --ask... removing --ask from options."
13598 del myopts["--ask"]
13600 # forbid --ask when not in a terminal
13601 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
13602 if ("--ask" in myopts) and (not sys.stdin.isatty()):
13603 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
13607 if settings.get("PORTAGE_DEBUG", "") == "1":
13608 spinner.update = spinner.update_quiet
13610 if "python-trace" in settings.features:
13611 import portage.debug
13612 portage.debug.set_trace(True)
13614 if not ("--quiet" in myopts):
13615 if not sys.stdout.isatty() or ("--nospinner" in myopts):
13616 spinner.update = spinner.update_basic
13618 if "--version" in myopts:
13619 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13620 settings.profile_path, settings["CHOST"],
13621 trees[settings["ROOT"]]["vartree"].dbapi)
13623 elif "--help" in myopts:
13624 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13627 if "--debug" in myopts:
13628 print "myaction", myaction
13629 print "myopts", myopts
13631 if not myaction and not myfiles and "--resume" not in myopts:
13632 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13635 pretend = "--pretend" in myopts
13636 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13637 buildpkgonly = "--buildpkgonly" in myopts
13639 # check if root user is the current user for the actions where emerge needs this
13640 if portage.secpass < 2:
13641 # We've already allowed "--version" and "--help" above.
13642 if "--pretend" not in myopts and myaction not in ("search","info"):
13643 need_superuser = not \
13645 (buildpkgonly and secpass >= 1) or \
13646 myaction in ("metadata", "regen") or \
13647 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
13648 if portage.secpass < 1 or \
13651 access_desc = "superuser"
13653 access_desc = "portage group"
13654 # Always show portage_group_warning() when only portage group
13655 # access is required but the user is not in the portage group.
13656 from portage.data import portage_group_warning
13657 if "--ask" in myopts:
13658 myopts["--pretend"] = True
13659 del myopts["--ask"]
13660 print ("%s access is required... " + \
13661 "adding --pretend to options.\n") % access_desc
13662 if portage.secpass < 1 and not need_superuser:
13663 portage_group_warning()
13665 sys.stderr.write(("emerge: %s access is " + \
13666 "required.\n\n") % access_desc)
13667 if portage.secpass < 1 and not need_superuser:
13668 portage_group_warning()
13671 disable_emergelog = False
13672 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
13674 disable_emergelog = True
13676 if myaction in ("search", "info"):
13677 disable_emergelog = True
13678 if disable_emergelog:
13679 """ Disable emergelog for everything except build or unmerge
13680 operations. This helps minimize parallel emerge.log entries that can
13681 confuse log parsers. We especially want it disabled during
13682 parallel-fetch, which uses --resume --fetchonly."""
13684 def emergelog(*pargs, **kargs):
13687 if not "--pretend" in myopts:
13688 emergelog(xterm_titles, "Started emerge on: "+\
13689 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
13692 myelogstr=" ".join(myopts)
13694 myelogstr+=" "+myaction
13696 myelogstr += " " + " ".join(oldargs)
13697 emergelog(xterm_titles, " *** emerge " + myelogstr)
13700 def emergeexitsig(signum, frame):
13701 signal.signal(signal.SIGINT, signal.SIG_IGN)
13702 signal.signal(signal.SIGTERM, signal.SIG_IGN)
13703 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
13704 sys.exit(100+signum)
13705 signal.signal(signal.SIGINT, emergeexitsig)
13706 signal.signal(signal.SIGTERM, emergeexitsig)
13709 """This gets out final log message in before we quit."""
13710 if "--pretend" not in myopts:
13711 emergelog(xterm_titles, " *** terminating.")
13712 if "notitles" not in settings.features:
13714 portage.atexit_register(emergeexit)
13716 if myaction in ("config", "metadata", "regen", "sync"):
13717 if "--pretend" in myopts:
13718 sys.stderr.write(("emerge: The '%s' action does " + \
13719 "not support '--pretend'.\n") % myaction)
13722 if "sync" == myaction:
13723 return action_sync(settings, trees, mtimedb, myopts, myaction)
13724 elif "metadata" == myaction:
13725 action_metadata(settings, portdb, myopts)
13726 elif myaction=="regen":
13727 validate_ebuild_environment(trees)
13728 action_regen(settings, portdb, myopts.get("--jobs"),
13729 myopts.get("--load-average"))
13731 elif "config"==myaction:
13732 validate_ebuild_environment(trees)
13733 action_config(settings, trees, myopts, myfiles)
13736 elif "search"==myaction:
13737 validate_ebuild_environment(trees)
13738 action_search(trees[settings["ROOT"]]["root_config"],
13739 myopts, myfiles, spinner)
13740 elif myaction in ("clean", "unmerge") or \
13741 (myaction == "prune" and "--nodeps" in myopts):
13742 validate_ebuild_environment(trees)
13744 # Ensure atoms are valid before calling unmerge().
13745 # For backward compat, leading '=' is not required.
13747 if is_valid_package_atom(x) or \
13748 is_valid_package_atom("=" + x):
13751 msg.append("'%s' is not a valid package atom." % (x,))
13752 msg.append("Please check ebuild(5) for full details.")
13753 writemsg_level("".join("!!! %s\n" % line for line in msg),
13754 level=logging.ERROR, noiselevel=-1)
13757 # When given a list of atoms, unmerge
13758 # them in the order given.
13759 ordered = myaction == "unmerge"
13760 if 1 == unmerge(root_config, myopts, myaction, myfiles,
13761 mtimedb["ldpath"], ordered=ordered):
13762 if not (buildpkgonly or fetchonly or pretend):
13763 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
13765 elif myaction in ("depclean", "info", "prune"):
13767 # Ensure atoms are valid before calling unmerge().
13768 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13771 if is_valid_package_atom(x):
13773 valid_atoms.append(
13774 portage.dep_expand(x, mydb=vardb, settings=settings))
13775 except portage.exception.AmbiguousPackageName, e:
13776 msg = "The short ebuild name \"" + x + \
13777 "\" is ambiguous. Please specify " + \
13778 "one of the following " + \
13779 "fully-qualified ebuild names instead:"
13780 for line in textwrap.wrap(msg, 70):
13781 writemsg_level("!!! %s\n" % (line,),
13782 level=logging.ERROR, noiselevel=-1)
13784 writemsg_level(" %s\n" % colorize("INFORM", i),
13785 level=logging.ERROR, noiselevel=-1)
13786 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13790 msg.append("'%s' is not a valid package atom." % (x,))
13791 msg.append("Please check ebuild(5) for full details.")
13792 writemsg_level("".join("!!! %s\n" % line for line in msg),
13793 level=logging.ERROR, noiselevel=-1)
13796 if myaction == "info":
13797 return action_info(settings, trees, myopts, valid_atoms)
13799 validate_ebuild_environment(trees)
13800 action_depclean(settings, trees, mtimedb["ldpath"],
13801 myopts, myaction, valid_atoms, spinner)
13802 if not (buildpkgonly or fetchonly or pretend):
13803 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
13804 # "update", "system", or just process files:
13806 validate_ebuild_environment(trees)
13807 if "--pretend" not in myopts:
13808 display_news_notification(root_config, myopts)
13809 retval = action_build(settings, trees, mtimedb,
13810 myopts, myaction, myfiles, spinner)
13811 root_config = trees[settings["ROOT"]]["root_config"]
13812 post_emerge(root_config, myopts, mtimedb, retval)