2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 # This block ensures that ^C interrupts are handled quietly.
11 def exithandler(signum,frame):
12 signal.signal(signal.SIGINT, signal.SIG_IGN)
13 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16 signal.signal(signal.SIGINT, exithandler)
17 signal.signal(signal.SIGTERM, exithandler)
18 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
20 except KeyboardInterrupt:
24 from collections import deque
41 from os import path as osp
42 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
45 from portage import digraph
46 from portage.const import NEWS_LIB_PATH
49 import portage.xpak, commands, errno, re, socket, time, types
50 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
51 nc_len, red, teal, turquoise, xtermTitle, \
52 xtermTitleReset, yellow
53 from portage.output import create_color_func
54 good = create_color_func("GOOD")
55 bad = create_color_func("BAD")
56 # white looks bad on terminals with white background
57 from portage.output import bold as white
61 portage.dep._dep_check_strict = True
64 import portage.exception
65 from portage.data import secpass
66 from portage.elog.messages import eerror
67 from portage.util import normalize_path as normpath
68 from portage.util import writemsg, writemsg_level
69 from portage.sets import load_default_config, SETPREFIX
70 from portage.sets.base import InternalPackageSet
72 from itertools import chain, izip
73 from UserDict import DictMixin
76 import cPickle as pickle
81 import cStringIO as StringIO
85 class stdout_spinner(object):
87 "Gentoo Rocks ("+platform.system()+")",
88 "Thank you for using Gentoo. :)",
89 "Are you actually trying to read this?",
90 "How many times have you stared at this?",
91 "We are generating the cache right now",
92 "You are paying too much attention.",
93 "A theory is better than its explanation.",
94 "Phasers locked on target, Captain.",
95 "Thrashing is just virtual crashing.",
96 "To be is to program.",
97 "Real Users hate Real Programmers.",
98 "When all else fails, read the instructions.",
99 "Functionality breeds Contempt.",
100 "The future lies ahead.",
101 "3.1415926535897932384626433832795028841971694",
102 "Sometimes insanity is the only alternative.",
103 "Inaccuracy saves a world of explanation.",
106 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
110 self.update = self.update_twirl
111 self.scroll_sequence = self.scroll_msgs[
112 int(time.time() * 100) % len(self.scroll_msgs)]
114 self.min_display_latency = 0.05
116 def _return_early(self):
118 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
119 each update* method should return without doing any output when this
122 cur_time = time.time()
123 if cur_time - self.last_update < self.min_display_latency:
125 self.last_update = cur_time
128 def update_basic(self):
129 self.spinpos = (self.spinpos + 1) % 500
130 if self._return_early():
132 if (self.spinpos % 100) == 0:
133 if self.spinpos == 0:
134 sys.stdout.write(". ")
136 sys.stdout.write(".")
139 def update_scroll(self):
140 if self._return_early():
142 if(self.spinpos >= len(self.scroll_sequence)):
143 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
144 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
146 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
148 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
150 def update_twirl(self):
151 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
152 if self._return_early():
154 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
157 def update_quiet(self):
160 def userquery(prompt, responses=None, colours=None):
161 """Displays a prompt and a set of responses, then waits for a response
162 which is checked against the responses and the first to match is
163 returned. An empty response will match the first value in responses. The
164 input buffer is *not* cleared prior to the prompt!
167 responses: a List of Strings.
168 colours: a List of Functions taking and returning a String, used to
169 process the responses for display. Typically these will be functions
170 like red() but could be e.g. lambda x: "DisplayString".
171 If responses is omitted, defaults to ["Yes", "No"], [green, red].
172 If only colours is omitted, defaults to [bold, ...].
174 Returns a member of the List responses. (If called without optional
175 arguments, returns "Yes" or "No".)
176 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
178 if responses is None:
179 responses = ["Yes", "No"]
181 create_color_func("PROMPT_CHOICE_DEFAULT"),
182 create_color_func("PROMPT_CHOICE_OTHER")
184 elif colours is None:
186 colours=(colours*len(responses))[:len(responses)]
190 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
191 for key in responses:
192 # An empty response will match the first value in responses.
193 if response.upper()==key[:len(response)].upper():
195 print "Sorry, response '%s' not understood." % response,
196 except (EOFError, KeyboardInterrupt):
200 actions = frozenset([
201 "clean", "config", "depclean",
202 "info", "list-sets", "metadata",
203 "prune", "regen", "search",
207 "--ask", "--alphabetical",
208 "--buildpkg", "--buildpkgonly",
209 "--changelog", "--columns",
214 "--fetchonly", "--fetch-all-uri",
215 "--getbinpkg", "--getbinpkgonly",
216 "--help", "--ignore-default-opts",
219 "--newuse", "--nocolor",
220 "--nodeps", "--noreplace",
221 "--nospinner", "--oneshot",
222 "--onlydeps", "--pretend",
223 "--quiet", "--resume",
224 "--searchdesc", "--selective",
228 "--usepkg", "--usepkgonly",
229 "--verbose", "--version"
235 "b":"--buildpkg", "B":"--buildpkgonly",
236 "c":"--clean", "C":"--unmerge",
237 "d":"--debug", "D":"--deep",
239 "f":"--fetchonly", "F":"--fetch-all-uri",
240 "g":"--getbinpkg", "G":"--getbinpkgonly",
242 "k":"--usepkg", "K":"--usepkgonly",
244 "n":"--noreplace", "N":"--newuse",
245 "o":"--onlydeps", "O":"--nodeps",
246 "p":"--pretend", "P":"--prune",
248 "s":"--search", "S":"--searchdesc",
251 "v":"--verbose", "V":"--version"
254 def emergelog(xterm_titles, mystr, short_msg=None):
255 if xterm_titles and short_msg:
256 if "HOSTNAME" in os.environ:
257 short_msg = os.environ["HOSTNAME"]+": "+short_msg
258 xtermTitle(short_msg)
260 file_path = "/var/log/emerge.log"
261 mylogfile = open(file_path, "a")
262 portage.util.apply_secpass_permissions(file_path,
263 uid=portage.portage_uid, gid=portage.portage_gid,
267 mylock = portage.locks.lockfile(mylogfile)
268 # seek because we may have gotten held up by the lock.
269 # if so, we may not be positioned at the end of the file.
271 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
275 portage.locks.unlockfile(mylock)
277 except (IOError,OSError,portage.exception.PortageException), e:
279 print >> sys.stderr, "emergelog():",e
281 def countdown(secs=5, doing="Starting"):
283 print ">>> Waiting",secs,"seconds before starting..."
284 print ">>> (Control-C to abort)...\n"+doing+" in: ",
288 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
293 # formats a size given in bytes nicely
294 def format_size(mysize):
295 if type(mysize) not in [types.IntType,types.LongType]:
297 if 0 != mysize % 1024:
298 # Always round up to the next kB so that it doesn't show 0 kB when
299 # some small file still needs to be fetched.
300 mysize += 1024 - mysize % 1024
301 mystr=str(mysize/1024)
305 mystr=mystr[:mycount]+","+mystr[mycount:]
309 def getgccversion(chost):
312 return: the current in-use gcc version
315 gcc_ver_command = 'gcc -dumpversion'
316 gcc_ver_prefix = 'gcc-'
318 gcc_not_found_error = red(
319 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
320 "!!! to update the environment of this terminal and possibly\n" +
321 "!!! other terminals also.\n"
324 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
325 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
326 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
328 mystatus, myoutput = commands.getstatusoutput(
329 chost + "-" + gcc_ver_command)
330 if mystatus == os.EX_OK:
331 return gcc_ver_prefix + myoutput
333 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
334 if mystatus == os.EX_OK:
335 return gcc_ver_prefix + myoutput
337 portage.writemsg(gcc_not_found_error, noiselevel=-1)
338 return "[unavailable]"
340 def getportageversion(portdir, target_root, profile, chost, vardb):
341 profilever = "unavailable"
343 realpath = os.path.realpath(profile)
344 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
345 if realpath.startswith(basepath):
346 profilever = realpath[1 + len(basepath):]
349 profilever = "!" + os.readlink(profile)
352 del realpath, basepath
355 libclist = vardb.match("virtual/libc")
356 libclist += vardb.match("virtual/glibc")
357 libclist = portage.util.unique_array(libclist)
359 xs=portage.catpkgsplit(x)
361 libcver+=","+"-".join(xs[1:])
363 libcver="-".join(xs[1:])
365 libcver="unavailable"
367 gccver = getgccversion(chost)
368 unameout=platform.release()+" "+platform.machine()
370 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
372 def create_depgraph_params(myopts, myaction):
373 #configure emerge engine parameters
375 # self: include _this_ package regardless of if it is merged.
376 # selective: exclude the package if it is merged
377 # recurse: go into the dependencies
378 # deep: go into the dependencies of already merged packages
379 # empty: pretend nothing is merged
380 # complete: completely account for all known dependencies
381 # remove: build graph for use in removing packages
382 myparams = set(["recurse"])
384 if myaction == "remove":
385 myparams.add("remove")
386 myparams.add("complete")
389 if "--update" in myopts or \
390 "--newuse" in myopts or \
391 "--reinstall" in myopts or \
392 "--noreplace" in myopts:
393 myparams.add("selective")
394 if "--emptytree" in myopts:
395 myparams.add("empty")
396 myparams.discard("selective")
397 if "--nodeps" in myopts:
398 myparams.discard("recurse")
399 if "--deep" in myopts:
401 if "--complete-graph" in myopts:
402 myparams.add("complete")
405 # search functionality
406 class search(object):
417 def __init__(self, root_config, spinner, searchdesc,
418 verbose, usepkg, usepkgonly):
419 """Searches the available and installed packages for the supplied search key.
420 The list of available and installed packages is created at object instantiation.
421 This makes successive searches faster."""
422 self.settings = root_config.settings
423 self.vartree = root_config.trees["vartree"]
424 self.spinner = spinner
425 self.verbose = verbose
426 self.searchdesc = searchdesc
427 self.root_config = root_config
428 self.setconfig = root_config.setconfig
429 self.matches = {"pkg" : []}
434 self.portdb = fake_portdb
435 for attrib in ("aux_get", "cp_all",
436 "xmatch", "findname", "getFetchMap"):
437 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
441 portdb = root_config.trees["porttree"].dbapi
442 bindb = root_config.trees["bintree"].dbapi
443 vardb = root_config.trees["vartree"].dbapi
445 if not usepkgonly and portdb._have_root_eclass_dir:
446 self._dbs.append(portdb)
448 if (usepkg or usepkgonly) and bindb.cp_all():
449 self._dbs.append(bindb)
451 self._dbs.append(vardb)
452 self._portdb = portdb
457 cp_all.update(db.cp_all())
458 return list(sorted(cp_all))
460 def _aux_get(self, *args, **kwargs):
463 return db.aux_get(*args, **kwargs)
468 def _findname(self, *args, **kwargs):
470 if db is not self._portdb:
471 # We don't want findname to return anything
472 # unless it's an ebuild in a portage tree.
473 # Otherwise, it's already built and we don't
476 func = getattr(db, "findname", None)
478 value = func(*args, **kwargs)
483 def _getFetchMap(self, *args, **kwargs):
485 func = getattr(db, "getFetchMap", None)
487 value = func(*args, **kwargs)
492 def _visible(self, db, cpv, metadata):
493 installed = db is self.vartree.dbapi
494 built = installed or db is not self._portdb
497 pkg_type = "installed"
500 return visible(self.settings,
501 Package(type_name=pkg_type, root_config=self.root_config,
502 cpv=cpv, built=built, installed=installed, metadata=metadata))
504 def _xmatch(self, level, atom):
506 This method does not expand old-style virtuals because it
507 is restricted to returning matches for a single ${CATEGORY}/${PN}
508 and old-style virual matches unreliable for that when querying
509 multiple package databases. If necessary, old-style virtuals
510 can be performed on atoms prior to calling this method.
512 cp = portage.dep_getkey(atom)
513 if level == "match-all":
516 if hasattr(db, "xmatch"):
517 matches.update(db.xmatch(level, atom))
519 matches.update(db.match(atom))
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "match-visible":
525 if hasattr(db, "xmatch"):
526 matches.update(db.xmatch(level, atom))
528 db_keys = list(db._aux_cache_keys)
529 for cpv in db.match(atom):
530 metadata = izip(db_keys,
531 db.aux_get(cpv, db_keys))
532 if not self._visible(db, cpv, metadata):
535 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
536 db._cpv_sort_ascending(result)
537 elif level == "bestmatch-visible":
540 if hasattr(db, "xmatch"):
541 cpv = db.xmatch("bestmatch-visible", atom)
542 if not cpv or portage.cpv_getkey(cpv) != cp:
544 if not result or cpv == portage.best([cpv, result]):
547 db_keys = Package.metadata_keys
548 # break out of this loop with highest visible
549 # match, checked in descending order
550 for cpv in reversed(db.match(atom)):
551 if portage.cpv_getkey(cpv) != cp:
553 metadata = izip(db_keys,
554 db.aux_get(cpv, db_keys))
555 if not self._visible(db, cpv, metadata):
557 if not result or cpv == portage.best([cpv, result]):
561 raise NotImplementedError(level)
564 def execute(self,searchkey):
565 """Performs the search for the supplied search key"""
567 self.searchkey=searchkey
568 self.packagematches = []
571 self.matches = {"pkg":[], "desc":[], "set":[]}
574 self.matches = {"pkg":[], "set":[]}
575 print "Searching... ",
578 if self.searchkey.startswith('%'):
580 self.searchkey = self.searchkey[1:]
581 if self.searchkey.startswith('@'):
583 self.searchkey = self.searchkey[1:]
585 self.searchre=re.compile(self.searchkey,re.I)
587 self.searchre=re.compile(re.escape(self.searchkey), re.I)
588 for package in self.portdb.cp_all():
589 self.spinner.update()
592 match_string = package[:]
594 match_string = package.split("/")[-1]
597 if self.searchre.search(match_string):
598 if not self.portdb.xmatch("match-visible", package):
600 self.matches["pkg"].append([package,masked])
601 elif self.searchdesc: # DESCRIPTION searching
602 full_package = self.portdb.xmatch("bestmatch-visible", package)
604 #no match found; we don't want to query description
605 full_package = portage.best(
606 self.portdb.xmatch("match-all", package))
612 full_desc = self.portdb.aux_get(
613 full_package, ["DESCRIPTION"])[0]
615 print "emerge: search: aux_get() failed, skipping"
617 if self.searchre.search(full_desc):
618 self.matches["desc"].append([full_package,masked])
620 self.sdict = self.setconfig.getSets()
621 for setname in self.sdict:
622 self.spinner.update()
624 match_string = setname
626 match_string = setname.split("/")[-1]
628 if self.searchre.search(match_string):
629 self.matches["set"].append([setname, False])
630 elif self.searchdesc:
631 if self.searchre.search(
632 self.sdict[setname].getMetadata("DESCRIPTION")):
633 self.matches["set"].append([setname, False])
636 for mtype in self.matches:
637 self.matches[mtype].sort()
638 self.mlen += len(self.matches[mtype])
641 if not self.portdb.xmatch("match-all", cp):
644 if not self.portdb.xmatch("bestmatch-visible", cp):
646 self.matches["pkg"].append([cp, masked])
650 """Outputs the results of the search."""
651 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
652 print "[ Applications found : "+white(str(self.mlen))+" ]"
654 vardb = self.vartree.dbapi
655 for mtype in self.matches:
656 for match,masked in self.matches[mtype]:
660 full_package = self.portdb.xmatch(
661 "bestmatch-visible", match)
663 #no match found; we don't want to query description
665 full_package = portage.best(
666 self.portdb.xmatch("match-all",match))
667 elif mtype == "desc":
669 match = portage.cpv_getkey(match)
671 print green("*")+" "+white(match)
672 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
676 desc, homepage, license = self.portdb.aux_get(
677 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
679 print "emerge: search: aux_get() failed, skipping"
682 print green("*")+" "+white(match)+" "+red("[ Masked ]")
684 print green("*")+" "+white(match)
685 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
689 mycat = match.split("/")[0]
690 mypkg = match.split("/")[1]
691 mycpv = match + "-" + myversion
692 myebuild = self.portdb.findname(mycpv)
694 pkgdir = os.path.dirname(myebuild)
695 from portage import manifest
696 mf = manifest.Manifest(
697 pkgdir, self.settings["DISTDIR"])
699 uri_map = self.portdb.getFetchMap(mycpv)
700 except portage.exception.InvalidDependString, e:
701 file_size_str = "Unknown (%s)" % (e,)
705 mysum[0] = mf.getDistfilesSize(uri_map)
707 file_size_str = "Unknown (missing " + \
708 "digest for %s)" % (e,)
713 if db is not vardb and \
714 db.cpv_exists(mycpv):
716 if not myebuild and hasattr(db, "bintree"):
717 myebuild = db.bintree.getname(mycpv)
719 mysum[0] = os.stat(myebuild).st_size
724 if myebuild and file_size_str is None:
725 mystr = str(mysum[0] / 1024)
729 mystr = mystr[:mycount] + "," + mystr[mycount:]
730 file_size_str = mystr + " kB"
734 print " ", darkgreen("Latest version available:"),myversion
735 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
738 (darkgreen("Size of files:"), file_size_str)
739 print " ", darkgreen("Homepage:")+" ",homepage
740 print " ", darkgreen("Description:")+" ",desc
741 print " ", darkgreen("License:")+" ",license
746 def getInstallationStatus(self,package):
747 installed_package = self.vartree.dep_bestmatch(package)
749 version = self.getVersion(installed_package,search.VERSION_RELEASE)
751 result = darkgreen("Latest version installed:")+" "+version
753 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
756 def getVersion(self,full_package,detail):
757 if len(full_package) > 1:
758 package_parts = portage.catpkgsplit(full_package)
759 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
760 result = package_parts[2]+ "-" + package_parts[3]
762 result = package_parts[2]
767 class RootConfig(object):
768 """This is used internally by depgraph to track information about a
772 "ebuild" : "porttree",
773 "binary" : "bintree",
774 "installed" : "vartree"
778 for k, v in pkg_tree_map.iteritems():
781 def __init__(self, settings, trees, setconfig):
783 self.settings = settings
784 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
785 self.root = self.settings["ROOT"]
786 self.setconfig = setconfig
787 self.sets = self.setconfig.getSets()
788 self.visible_pkgs = PackageVirtualDbapi(self.settings)
790 def create_world_atom(pkg, args_set, root_config):
791 """Create a new atom for the world file if one does not exist. If the
792 argument atom is precise enough to identify a specific slot then a slot
793 atom will be returned. Atoms that are in the system set may also be stored
794 in world since system atoms can only match one slot while world atoms can
795 be greedy with respect to slots. Unslotted system packages will not be
798 arg_atom = args_set.findAtomForPackage(pkg)
801 cp = portage.dep_getkey(arg_atom)
803 sets = root_config.sets
804 portdb = root_config.trees["porttree"].dbapi
805 vardb = root_config.trees["vartree"].dbapi
806 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
807 for cpv in portdb.match(cp))
808 slotted = len(available_slots) > 1 or \
809 (len(available_slots) == 1 and "0" not in available_slots)
811 # check the vdb in case this is multislot
812 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
813 for cpv in vardb.match(cp))
814 slotted = len(available_slots) > 1 or \
815 (len(available_slots) == 1 and "0" not in available_slots)
816 if slotted and arg_atom != cp:
817 # If the user gave a specific atom, store it as a
818 # slot atom in the world file.
819 slot_atom = pkg.slot_atom
821 # For USE=multislot, there are a couple of cases to
824 # 1) SLOT="0", but the real SLOT spontaneously changed to some
825 # unknown value, so just record an unslotted atom.
827 # 2) SLOT comes from an installed package and there is no
828 # matching SLOT in the portage tree.
830 # Make sure that the slot atom is available in either the
831 # portdb or the vardb, since otherwise the user certainly
832 # doesn't want the SLOT atom recorded in the world file
833 # (case 1 above). If it's only available in the vardb,
834 # the user may be trying to prevent a USE=multislot
835 # package from being removed by --depclean (case 2 above).
838 if not portdb.match(slot_atom):
839 # SLOT seems to come from an installed multislot package
841 # If there is no installed package matching the SLOT atom,
842 # it probably changed SLOT spontaneously due to USE=multislot,
843 # so just record an unslotted atom.
844 if vardb.match(slot_atom):
845 # Now verify that the argument is precise
846 # enough to identify a specific slot.
847 matches = mydb.match(arg_atom)
848 matched_slots = set()
850 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
851 if len(matched_slots) == 1:
852 new_world_atom = slot_atom
854 if new_world_atom == sets["world"].findAtomForPackage(pkg):
855 # Both atoms would be identical, so there's nothing to add.
858 # Unlike world atoms, system atoms are not greedy for slots, so they
859 # can't be safely excluded from world if they are slotted.
860 system_atom = sets["system"].findAtomForPackage(pkg)
862 if not portage.dep_getkey(system_atom).startswith("virtual/"):
864 # System virtuals aren't safe to exclude from world since they can
865 # match multiple old-style virtuals but only one of them will be
866 # pulled in by update or depclean.
867 providers = portdb.mysettings.getvirtuals().get(
868 portage.dep_getkey(system_atom))
869 if providers and len(providers) == 1 and providers[0] == cp:
871 return new_world_atom
873 def filter_iuse_defaults(iuse):
875 if flag.startswith("+") or flag.startswith("-"):
880 class SlotObject(object):
881 __slots__ = ("__weakref__",)
883 def __init__(self, **kwargs):
884 classes = [self.__class__]
889 classes.extend(c.__bases__)
890 slots = getattr(c, "__slots__", None)
894 myvalue = kwargs.get(myattr, None)
895 setattr(self, myattr, myvalue)
899 Create a new instance and copy all attributes
900 defined from __slots__ (including those from
903 obj = self.__class__()
905 classes = [self.__class__]
910 classes.extend(c.__bases__)
911 slots = getattr(c, "__slots__", None)
915 setattr(obj, myattr, getattr(self, myattr))
919 class AbstractDepPriority(SlotObject):
920 __slots__ = ("buildtime", "runtime", "runtime_post")
922 def __lt__(self, other):
923 return self.__int__() < other
925 def __le__(self, other):
926 return self.__int__() <= other
928 def __eq__(self, other):
929 return self.__int__() == other
931 def __ne__(self, other):
932 return self.__int__() != other
934 def __gt__(self, other):
935 return self.__int__() > other
937 def __ge__(self, other):
938 return self.__int__() >= other
942 return copy.copy(self)
944 class DepPriority(AbstractDepPriority):
946 This class generates an integer priority level based of various
947 attributes of the dependency relationship. Attributes can be assigned
948 at any time and the new integer value will be generated on calls to the
949 __int__() method. Rich comparison operators are supported.
951 The boolean attributes that affect the integer value are "satisfied",
952 "buildtime", "runtime", and "system". Various combinations of
953 attributes lead to the following priority levels:
955 Combination of properties Priority Category
957 not satisfied and buildtime 0 HARD
958 not satisfied and runtime -1 MEDIUM
959 not satisfied and runtime_post -2 MEDIUM_SOFT
960 satisfied and buildtime and rebuild -3 SOFT
961 satisfied and buildtime -4 SOFT
962 satisfied and runtime -5 SOFT
963 satisfied and runtime_post -6 SOFT
964 (none of the above) -6 SOFT
966 Several integer constants are defined for categorization of priority
969 MEDIUM The upper boundary for medium dependencies.
970 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
971 SOFT The upper boundary for soft dependencies.
972 MIN The lower boundary for soft dependencies.
974 __slots__ = ("satisfied", "rebuild")
981 if not self.satisfied:
986 if self.runtime_post:
994 if self.runtime_post:
999 myvalue = self.__int__()
1000 if myvalue > self.MEDIUM:
1002 if myvalue > self.MEDIUM_SOFT:
1004 if myvalue > self.SOFT:
1005 return "medium-soft"
1008 class BlockerDepPriority(DepPriority):
1013 BlockerDepPriority.instance = BlockerDepPriority()
1015 class UnmergeDepPriority(AbstractDepPriority):
1016 __slots__ = ("satisfied",)
1018 Combination of properties Priority Category
1021 runtime_post -1 HARD
1023 (none of the above) -2 SOFT
1033 if self.runtime_post:
1040 myvalue = self.__int__()
1041 if myvalue > self.SOFT:
1045 class FakeVartree(portage.vartree):
1046 """This is implements an in-memory copy of a vartree instance that provides
1047 all the interfaces required for use by the depgraph. The vardb is locked
1048 during the constructor call just long enough to read a copy of the
1049 installed package information. This allows the depgraph to do it's
1050 dependency calculations without holding a lock on the vardb. It also
1051 allows things like vardb global updates to be done in memory so that the
1052 user doesn't necessarily need write access to the vardb in cases where
1053 global updates are necessary (updates are performed when necessary if there
1054 is not a matching ebuild in the tree)."""
1055 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1056 self._root_config = root_config
1057 if pkg_cache is None:
1059 real_vartree = root_config.trees["vartree"]
1060 portdb = root_config.trees["porttree"].dbapi
1061 self.root = real_vartree.root
1062 self.settings = real_vartree.settings
1063 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1064 self._pkg_cache = pkg_cache
1065 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1066 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1068 # At least the parent needs to exist for the lock file.
1069 portage.util.ensure_dirs(vdb_path)
1070 except portage.exception.PortageException:
1074 if acquire_lock and os.access(vdb_path, os.W_OK):
1075 vdb_lock = portage.locks.lockdir(vdb_path)
1076 real_dbapi = real_vartree.dbapi
1078 for cpv in real_dbapi.cpv_all():
1079 cache_key = ("installed", self.root, cpv, "nomerge")
1080 pkg = self._pkg_cache.get(cache_key)
1082 metadata = pkg.metadata
1084 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1085 myslot = metadata["SLOT"]
1086 mycp = portage.dep_getkey(cpv)
1087 myslot_atom = "%s:%s" % (mycp, myslot)
1089 mycounter = long(metadata["COUNTER"])
1092 metadata["COUNTER"] = str(mycounter)
1093 other_counter = slot_counters.get(myslot_atom, None)
1094 if other_counter is not None:
1095 if other_counter > mycounter:
1097 slot_counters[myslot_atom] = mycounter
1099 pkg = Package(built=True, cpv=cpv,
1100 installed=True, metadata=metadata,
1101 root_config=root_config, type_name="installed")
1102 self._pkg_cache[pkg] = pkg
1103 self.dbapi.cpv_inject(pkg)
1104 real_dbapi.flush_cache()
1107 portage.locks.unlockdir(vdb_lock)
1108 # Populate the old-style virtuals using the cached values.
1109 if not self.settings.treeVirtuals:
1110 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1111 portage.getCPFromCPV, self.get_all_provides())
1113 # Intialize variables needed for lazy cache pulls of the live ebuild
1114 # metadata. This ensures that the vardb lock is released ASAP, without
1115 # being delayed in case cache generation is triggered.
1116 self._aux_get = self.dbapi.aux_get
1117 self.dbapi.aux_get = self._aux_get_wrapper
1118 self._match = self.dbapi.match
1119 self.dbapi.match = self._match_wrapper
1120 self._aux_get_history = set()
1121 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1122 self._portdb = portdb
1123 self._global_updates = None
1125 def _match_wrapper(self, cpv, use_cache=1):
1127 Make sure the metadata in Package instances gets updated for any
1128 cpv that is returned from a match() call, since the metadata can
1129 be accessed directly from the Package instance instead of via
1132 matches = self._match(cpv, use_cache=use_cache)
1134 if cpv in self._aux_get_history:
1136 self._aux_get_wrapper(cpv, [])
1139 def _aux_get_wrapper(self, pkg, wants):
1140 if pkg in self._aux_get_history:
1141 return self._aux_get(pkg, wants)
1142 self._aux_get_history.add(pkg)
1144 # Use the live ebuild metadata if possible.
1145 live_metadata = dict(izip(self._portdb_keys,
1146 self._portdb.aux_get(pkg, self._portdb_keys)))
1147 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1149 self.dbapi.aux_update(pkg, live_metadata)
1150 except (KeyError, portage.exception.PortageException):
1151 if self._global_updates is None:
1152 self._global_updates = \
1153 grab_global_updates(self._portdb.porttree_root)
1154 perform_global_updates(
1155 pkg, self.dbapi, self._global_updates)
1156 return self._aux_get(pkg, wants)
1158 def sync(self, acquire_lock=1):
1160 Call this method to synchronize state with the real vardb
1161 after one or more packages may have been installed or
1164 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1166 # At least the parent needs to exist for the lock file.
1167 portage.util.ensure_dirs(vdb_path)
1168 except portage.exception.PortageException:
1172 if acquire_lock and os.access(vdb_path, os.W_OK):
1173 vdb_lock = portage.locks.lockdir(vdb_path)
1177 portage.locks.unlockdir(vdb_lock)
1181 real_vardb = self._root_config.trees["vartree"].dbapi
1182 current_cpv_set = frozenset(real_vardb.cpv_all())
1183 pkg_vardb = self.dbapi
1184 aux_get_history = self._aux_get_history
1186 # Remove any packages that have been uninstalled.
1187 for pkg in list(pkg_vardb):
1188 if pkg.cpv not in current_cpv_set:
1189 pkg_vardb.cpv_remove(pkg)
1190 aux_get_history.discard(pkg.cpv)
1192 # Validate counters and timestamps.
1195 validation_keys = ["COUNTER", "_mtime_"]
1196 for cpv in current_cpv_set:
1198 pkg_hash_key = ("installed", root, cpv, "nomerge")
1199 pkg = pkg_vardb.get(pkg_hash_key)
1201 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1203 if counter != pkg.metadata["COUNTER"] or \
1205 pkg_vardb.cpv_remove(pkg)
1206 aux_get_history.discard(pkg.cpv)
1210 pkg = self._pkg(cpv)
1212 other_counter = slot_counters.get(pkg.slot_atom)
1213 if other_counter is not None:
1214 if other_counter > pkg.counter:
1217 slot_counters[pkg.slot_atom] = pkg.counter
1218 pkg_vardb.cpv_inject(pkg)
1220 real_vardb.flush_cache()
1222 def _pkg(self, cpv):
1223 root_config = self._root_config
1224 real_vardb = root_config.trees["vartree"].dbapi
1225 db_keys = list(real_vardb._aux_cache_keys)
1226 pkg = Package(cpv=cpv, installed=True,
1227 metadata=izip(db_keys, real_vardb.aux_get(cpv, db_keys)),
1228 root_config=root_config,
1229 type_name="installed")
1232 def grab_global_updates(portdir):
1233 from portage.update import grab_updates, parse_updates
1234 updpath = os.path.join(portdir, "profiles", "updates")
1236 rawupdates = grab_updates(updpath)
1237 except portage.exception.DirectoryNotFound:
1240 for mykey, mystat, mycontent in rawupdates:
1241 commands, errors = parse_updates(mycontent)
1242 upd_commands.extend(commands)
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246 from portage.update import update_dbentries
1247 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249 updates = update_dbentries(mycommands, aux_dict)
1251 mydb.aux_update(mycpv, updates)
1253 def visible(pkgsettings, pkg):
1255 Check if a package is visible. This can raise an InvalidDependString
1256 exception if LICENSE is invalid.
1257 TODO: optionally generate a list of masking reasons
1259 @returns: True if the package is visible, False otherwise.
1261 if not pkg.metadata["SLOT"]:
1263 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264 if not pkgsettings._accept_chost(pkg):
1266 eapi = pkg.metadata["EAPI"]
1267 if not portage.eapi_is_supported(eapi):
1269 if not pkg.installed:
1270 if portage._eapi_is_deprecated(eapi):
1272 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1274 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1276 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1279 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1281 except portage.exception.InvalidDependString:
1285 def get_masking_status(pkg, pkgsettings, root_config):
1287 mreasons = portage.getmaskingstatus(
1288 pkg, settings=pkgsettings,
1289 portdb=root_config.trees["porttree"].dbapi)
1291 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292 if not pkgsettings._accept_chost(pkg):
1293 mreasons.append("CHOST: %s" % \
1294 pkg.metadata["CHOST"])
1296 if not pkg.metadata["SLOT"]:
1297 mreasons.append("invalid: SLOT is undefined")
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302 db, pkg_type, built, installed, db_keys):
1305 metadata = dict(izip(db_keys,
1306 db.aux_get(cpv, db_keys)))
1309 if metadata and not built:
1310 pkgsettings.setcpv(cpv, mydb=metadata)
1311 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312 if metadata is None:
1313 mreasons = ["corruption"]
1315 pkg = Package(type_name=pkg_type, root_config=root_config,
1316 cpv=cpv, built=built, installed=installed, metadata=metadata)
1317 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318 return metadata, mreasons
1320 def show_masked_packages(masked_packages):
1321 shown_licenses = set()
1322 shown_comments = set()
1323 # Maybe there is both an ebuild and a binary. Only
1324 # show one of them to avoid redundant appearance.
1326 have_eapi_mask = False
1327 for (root_config, pkgsettings, cpv,
1328 metadata, mreasons) in masked_packages:
1329 if cpv in shown_cpvs:
1332 comment, filename = None, None
1333 if "package.mask" in mreasons:
1334 comment, filename = \
1335 portage.getmaskingreason(
1336 cpv, metadata=metadata,
1337 settings=pkgsettings,
1338 portdb=root_config.trees["porttree"].dbapi,
1339 return_location=True)
1340 missing_licenses = []
1342 if not portage.eapi_is_supported(metadata["EAPI"]):
1343 have_eapi_mask = True
1345 missing_licenses = \
1346 pkgsettings._getMissingLicenses(
1348 except portage.exception.InvalidDependString:
1349 # This will have already been reported
1350 # above via mreasons.
1353 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354 if comment and comment not in shown_comments:
1357 shown_comments.add(comment)
1358 portdb = root_config.trees["porttree"].dbapi
1359 for l in missing_licenses:
1360 l_path = portdb.findLicensePath(l)
1361 if l in shown_licenses:
1363 msg = ("A copy of the '%s' license" + \
1364 " is located at '%s'.") % (l, l_path)
1367 shown_licenses.add(l)
1368 return have_eapi_mask
1370 class Task(SlotObject):
1371 __slots__ = ("_hash_key", "_hash_value")
1373 def _get_hash_key(self):
1374 hash_key = getattr(self, "_hash_key", None)
1375 if hash_key is None:
1376 raise NotImplementedError(self)
1379 def __eq__(self, other):
1380 return self._get_hash_key() == other
1382 def __ne__(self, other):
1383 return self._get_hash_key() != other
1386 hash_value = getattr(self, "_hash_value", None)
1387 if hash_value is None:
1388 self._hash_value = hash(self._get_hash_key())
1389 return self._hash_value
1392 return len(self._get_hash_key())
1394 def __getitem__(self, key):
1395 return self._get_hash_key()[key]
1398 return iter(self._get_hash_key())
1400 def __contains__(self, key):
1401 return key in self._get_hash_key()
1404 return str(self._get_hash_key())
1406 class Blocker(Task):
1408 __hash__ = Task.__hash__
1409 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1411 def __init__(self, **kwargs):
1412 Task.__init__(self, **kwargs)
1413 self.cp = portage.dep_getkey(self.atom)
1415 def _get_hash_key(self):
1416 hash_key = getattr(self, "_hash_key", None)
1417 if hash_key is None:
1419 ("blocks", self.root, self.atom, self.eapi)
1420 return self._hash_key
1422 class Package(Task):
1424 __hash__ = Task.__hash__
1425 __slots__ = ("built", "cpv", "depth",
1426 "installed", "metadata", "onlydeps", "operation",
1427 "root_config", "type_name",
1428 "category", "counter", "cp", "cpv_split",
1429 "inherited", "iuse", "mtime",
1430 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1433 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434 "INHERITED", "IUSE", "KEYWORDS",
1435 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1438 def __init__(self, **kwargs):
1439 Task.__init__(self, **kwargs)
1440 self.root = self.root_config.root
1441 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442 self.cp = portage.cpv_getkey(self.cpv)
1443 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444 self.category, self.pf = portage.catsplit(self.cpv)
1445 self.cpv_split = portage.catpkgsplit(self.cpv)
1446 self.pv_split = self.cpv_split[1:]
1450 __slots__ = ("__weakref__", "enabled")
1452 def __init__(self, use):
1453 self.enabled = frozenset(use)
1455 class _iuse(object):
1457 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1459 def __init__(self, tokens, iuse_implicit):
1460 self.tokens = tuple(tokens)
1461 self.iuse_implicit = iuse_implicit
1468 enabled.append(x[1:])
1470 disabled.append(x[1:])
1473 self.enabled = frozenset(enabled)
1474 self.disabled = frozenset(disabled)
1475 self.all = frozenset(chain(enabled, disabled, other))
1477 def __getattribute__(self, name):
1480 return object.__getattribute__(self, "regex")
1481 except AttributeError:
1482 all = object.__getattribute__(self, "all")
1483 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484 # Escape anything except ".*" which is supposed
1485 # to pass through from _get_implicit_iuse()
1486 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487 regex = "^(%s)$" % "|".join(regex)
1488 regex = regex.replace("\\.\\*", ".*")
1489 self.regex = re.compile(regex)
1490 return object.__getattribute__(self, name)
1492 def _get_hash_key(self):
1493 hash_key = getattr(self, "_hash_key", None)
1494 if hash_key is None:
1495 if self.operation is None:
1496 self.operation = "merge"
1497 if self.onlydeps or self.installed:
1498 self.operation = "nomerge"
1500 (self.type_name, self.root, self.cpv, self.operation)
1501 return self._hash_key
1503 def __cmp__(self, other):
1510 def __lt__(self, other):
1511 if other.cp != self.cp:
1513 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1517 def __le__(self, other):
1518 if other.cp != self.cp:
1520 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1524 def __gt__(self, other):
1525 if other.cp != self.cp:
1527 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1531 def __ge__(self, other):
1532 if other.cp != self.cp:
1534 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1538 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1539 if not x.startswith("UNUSED_"))
1540 _all_metadata_keys.discard("CDEPEND")
1541 _all_metadata_keys.update(Package.metadata_keys)
1543 from portage.cache.mappings import slot_dict_class
1544 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1546 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1548 Detect metadata updates and synchronize Package attributes.
1551 __slots__ = ("_pkg",)
1552 _wrapped_keys = frozenset(
1553 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1555 def __init__(self, pkg, metadata):
1556 _PackageMetadataWrapperBase.__init__(self)
1558 self.update(metadata)
1560 def __setitem__(self, k, v):
1561 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1562 if k in self._wrapped_keys:
1563 getattr(self, "_set_" + k.lower())(k, v)
1565 def _set_inherited(self, k, v):
1566 if isinstance(v, basestring):
1567 v = frozenset(v.split())
1568 self._pkg.inherited = v
1570 def _set_iuse(self, k, v):
1571 self._pkg.iuse = self._pkg._iuse(
1572 v.split(), self._pkg.root_config.iuse_implicit)
1574 def _set_slot(self, k, v):
1577 def _set_use(self, k, v):
1578 self._pkg.use = self._pkg._use(v.split())
1580 def _set_counter(self, k, v):
1581 if isinstance(v, basestring):
1586 self._pkg.counter = v
1588 def _set__mtime_(self, k, v):
1589 if isinstance(v, basestring):
1591 v = float(v.strip())
1596 class EbuildFetchonly(SlotObject):
1598 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1601 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1602 # ensuring sane $PWD (bug #239560) and storing elog
1603 # messages. Use a private temp directory, in order
1604 # to avoid locking the main one.
1605 settings = self.settings
1606 global_tmpdir = settings["PORTAGE_TMPDIR"]
1607 from tempfile import mkdtemp
1609 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1611 if e.errno != portage.exception.PermissionDenied.errno:
1613 raise portage.exception.PermissionDenied(global_tmpdir)
1614 settings["PORTAGE_TMPDIR"] = private_tmpdir
1615 settings.backup_changes("PORTAGE_TMPDIR")
1617 retval = self._execute()
1619 settings["PORTAGE_TMPDIR"] = global_tmpdir
1620 settings.backup_changes("PORTAGE_TMPDIR")
1621 shutil.rmtree(private_tmpdir)
1625 settings = self.settings
1627 root_config = pkg.root_config
1628 portdb = root_config.trees["porttree"].dbapi
1629 ebuild_path = portdb.findname(pkg.cpv)
1630 settings.setcpv(pkg)
1631 debug = settings.get("PORTAGE_DEBUG") == "1"
1632 use_cache = 1 # always true
1633 portage.doebuild_environment(ebuild_path, "fetch",
1634 root_config.root, settings, debug, use_cache, portdb)
1635 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1637 retval = portage.doebuild(ebuild_path, "fetch",
1638 self.settings["ROOT"], self.settings, debug=debug,
1639 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1640 mydbapi=portdb, tree="porttree")
1642 if retval != os.EX_OK:
1643 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1644 eerror(msg, phase="unpack", key=pkg.cpv)
1646 portage.elog.elog_process(self.pkg.cpv, self.settings)
1649 class AsynchronousTask(SlotObject):
1651 Subclasses override _wait() and _poll() so that calls
1652 to public methods can be wrapped for implementing
1653 hooks such as exit listener notification.
1655 Sublasses should call self.wait() to notify exit listeners after
1656 the task is complete and self.returncode has been set.
1659 __slots__ = ("background", "cancelled", "returncode") + \
1660 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1664 Start an asynchronous task and then return as soon as possible.
1670 raise NotImplementedError(self)
1673 return self.returncode is None
1680 return self.returncode
1683 if self.returncode is None:
1686 return self.returncode
1689 return self.returncode
1692 self.cancelled = True
1695 def addStartListener(self, f):
1697 The function will be called with one argument, a reference to self.
1699 if self._start_listeners is None:
1700 self._start_listeners = []
1701 self._start_listeners.append(f)
1703 def removeStartListener(self, f):
1704 if self._start_listeners is None:
1706 self._start_listeners.remove(f)
1708 def _start_hook(self):
1709 if self._start_listeners is not None:
1710 start_listeners = self._start_listeners
1711 self._start_listeners = None
1713 for f in start_listeners:
1716 def addExitListener(self, f):
1718 The function will be called with one argument, a reference to self.
1720 if self._exit_listeners is None:
1721 self._exit_listeners = []
1722 self._exit_listeners.append(f)
1724 def removeExitListener(self, f):
1725 if self._exit_listeners is None:
1726 if self._exit_listener_stack is not None:
1727 self._exit_listener_stack.remove(f)
1729 self._exit_listeners.remove(f)
1731 def _wait_hook(self):
1733 Call this method after the task completes, just before returning
1734 the returncode from wait() or poll(). This hook is
1735 used to trigger exit listeners when the returncode first
1738 if self.returncode is not None and \
1739 self._exit_listeners is not None:
1741 # This prevents recursion, in case one of the
1742 # exit handlers triggers this method again by
1743 # calling wait(). Use a stack that gives
1744 # removeExitListener() an opportunity to consume
1745 # listeners from the stack, before they can get
1746 # called below. This is necessary because a call
1747 # to one exit listener may result in a call to
1748 # removeExitListener() for another listener on
1749 # the stack. That listener needs to be removed
1750 # from the stack since it would be inconsistent
1751 # to call it after it has been been passed into
1752 # removeExitListener().
1753 self._exit_listener_stack = self._exit_listeners
1754 self._exit_listeners = None
1756 self._exit_listener_stack.reverse()
1757 while self._exit_listener_stack:
1758 self._exit_listener_stack.pop()(self)
1760 class PipeReader(AsynchronousTask):
1763 Reads output from one or more files and saves it in memory,
1764 for retrieval via the getvalue() method. This is driven by
1765 the scheduler's poll() loop, so it runs entirely within the
1769 __slots__ = ("input_files", "scheduler",) + \
1770 ("pid", "_read_data", "_registered", "_reg_ids")
1775 self._reg_ids = set()
1776 self._read_data = []
1777 for k, f in self.input_files.iteritems():
1778 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1779 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1780 self._reg_ids.add(self.scheduler.register(f.fileno(),
1781 PollConstants.POLLIN, self._output_handler))
1782 self._registered = True
1785 return self._registered
1788 if self.returncode is not None:
1789 return self.returncode
1791 if self._registered:
1792 self.scheduler.schedule(self._reg_ids)
1795 self.returncode = os.EX_OK
1796 return self.returncode
1799 """Retrieve the entire contents"""
1800 return "".join(self._read_data)
1803 """Free the memory buffer."""
1804 self._read_data = None
1806 def _output_handler(self, fd, event):
1807 files = self.input_files
1808 for f in files.itervalues():
1809 if fd == f.fileno():
1812 buf = array.array('B')
1814 buf.fromfile(f, self._bufsize)
1819 self._read_data.append(buf.tostring())
1824 return self._registered
1826 def _unregister(self):
1828 Unregister from the scheduler and close open files.
1831 self._registered = False
1833 if self._reg_ids is not None:
1834 for reg_id in self._reg_ids:
1835 self.scheduler.unregister(reg_id)
1836 self._reg_ids = None
1838 if self.input_files is not None:
1839 for f in self.input_files.itervalues():
1841 self.input_files = None
1843 class CompositeTask(AsynchronousTask):
1845 __slots__ = ("scheduler",) + ("_current_task",)
1848 return self._current_task is not None
1851 self.cancelled = True
1852 if self._current_task is not None:
1853 self._current_task.cancel()
1857 This does a loop calling self._current_task.poll()
1858 repeatedly as long as the value of self._current_task
1859 keeps changing. It calls poll() a maximum of one time
1860 for a given self._current_task instance. This is useful
1861 since calling poll() on a task can trigger advance to
1862 the next task could eventually lead to the returncode
1863 being set in cases when polling only a single task would
1864 not have the same effect.
1869 task = self._current_task
1870 if task is None or task is prev:
1871 # don't poll the same task more than once
1876 return self.returncode
1882 task = self._current_task
1884 # don't wait for the same task more than once
1887 # Before the task.wait() method returned, an exit
1888 # listener should have set self._current_task to either
1889 # a different task or None. Something is wrong.
1890 raise AssertionError("self._current_task has not " + \
1891 "changed since calling wait", self, task)
1895 return self.returncode
1897 def _assert_current(self, task):
1899 Raises an AssertionError if the given task is not the
1900 same one as self._current_task. This can be useful
1903 if task is not self._current_task:
1904 raise AssertionError("Unrecognized task: %s" % (task,))
1906 def _default_exit(self, task):
1908 Calls _assert_current() on the given task and then sets the
1909 composite returncode attribute if task.returncode != os.EX_OK.
1910 If the task failed then self._current_task will be set to None.
1911 Subclasses can use this as a generic task exit callback.
1914 @returns: The task.returncode attribute.
1916 self._assert_current(task)
1917 if task.returncode != os.EX_OK:
1918 self.returncode = task.returncode
1919 self._current_task = None
1920 return task.returncode
1922 def _final_exit(self, task):
1924 Assumes that task is the final task of this composite task.
1925 Calls _default_exit() and sets self.returncode to the task's
1926 returncode and sets self._current_task to None.
1928 self._default_exit(task)
1929 self._current_task = None
1930 self.returncode = task.returncode
1931 return self.returncode
1933 def _default_final_exit(self, task):
1935 This calls _final_exit() and then wait().
1937 Subclasses can use this as a generic final task exit callback.
1940 self._final_exit(task)
1943 def _start_task(self, task, exit_handler):
1945 Register exit handler for the given task, set it
1946 as self._current_task, and call task.start().
1948 Subclasses can use this as a generic way to start
1952 task.addExitListener(exit_handler)
1953 self._current_task = task
1956 class TaskSequence(CompositeTask):
1958 A collection of tasks that executes sequentially. Each task
1959 must have a addExitListener() method that can be used as
1960 a means to trigger movement from one task to the next.
1963 __slots__ = ("_task_queue",)
1965 def __init__(self, **kwargs):
1966 AsynchronousTask.__init__(self, **kwargs)
1967 self._task_queue = deque()
1969 def add(self, task):
1970 self._task_queue.append(task)
1973 self._start_next_task()
1976 self._task_queue.clear()
1977 CompositeTask.cancel(self)
1979 def _start_next_task(self):
1980 self._start_task(self._task_queue.popleft(),
1981 self._task_exit_handler)
1983 def _task_exit_handler(self, task):
1984 if self._default_exit(task) != os.EX_OK:
1986 elif self._task_queue:
1987 self._start_next_task()
1989 self._final_exit(task)
1992 class SubProcess(AsynchronousTask):
1994 __slots__ = ("scheduler",) + ("pid", "_files", "_registered", "_reg_id")
1996 # A file descriptor is required for the scheduler to monitor changes from
1997 # inside a poll() loop. When logging is not enabled, create a pipe just to
1998 # serve this purpose alone.
2002 if self.returncode is not None:
2003 return self.returncode
2004 if self.pid is None:
2005 return self.returncode
2006 if self._registered:
2007 return self.returncode
2010 retval = os.waitpid(self.pid, os.WNOHANG)
2012 if e.errno != errno.ECHILD:
2015 retval = (self.pid, 1)
2017 if retval == (0, 0):
2019 self._set_returncode(retval)
2020 return self.returncode
2025 os.kill(self.pid, signal.SIGTERM)
2027 if e.errno != errno.ESRCH:
2031 self.cancelled = True
2032 if self.pid is not None:
2034 return self.returncode
2037 return self.pid is not None and \
2038 self.returncode is None
2042 if self.returncode is not None:
2043 return self.returncode
2045 if self._registered:
2046 self.scheduler.schedule(self._reg_id)
2048 if self.returncode is not None:
2049 return self.returncode
2052 wait_retval = os.waitpid(self.pid, 0)
2054 if e.errno != errno.ECHILD:
2057 self._set_returncode((self.pid, 1))
2059 self._set_returncode(wait_retval)
2061 return self.returncode
2063 def _unregister(self):
2065 Unregister from the scheduler and close open files.
2068 self._registered = False
2070 if self._reg_id is not None:
2071 self.scheduler.unregister(self._reg_id)
2074 if self._files is not None:
2075 for f in self._files.itervalues():
2079 def _set_returncode(self, wait_retval):
2081 retval = wait_retval[1]
2083 if retval != os.EX_OK:
2085 retval = (retval & 0xff) << 8
2087 retval = retval >> 8
2089 self.returncode = retval
2091 class SpawnProcess(SubProcess):
2094 Constructor keyword args are passed into portage.process.spawn().
2095 The required "args" keyword argument will be passed as the first
2099 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2100 "uid", "gid", "groups", "umask", "logfile",
2101 "path_lookup", "pre_exec")
2103 __slots__ = ("args",) + \
2106 _file_names = ("log", "process", "stdout")
2107 _files_dict = slot_dict_class(_file_names, prefix="")
2115 if self.fd_pipes is None:
2117 fd_pipes = self.fd_pipes
2118 fd_pipes.setdefault(0, sys.stdin.fileno())
2119 fd_pipes.setdefault(1, sys.stdout.fileno())
2120 fd_pipes.setdefault(2, sys.stderr.fileno())
2122 # flush any pending output
2123 for fd in fd_pipes.itervalues():
2124 if fd == sys.stdout.fileno():
2126 if fd == sys.stderr.fileno():
2129 logfile = self.logfile
2130 self._files = self._files_dict()
2133 master_fd, slave_fd = self._pipe(fd_pipes)
2134 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2135 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2138 fd_pipes_orig = fd_pipes.copy()
2140 # TODO: Use job control functions like tcsetpgrp() to control
2141 # access to stdin. Until then, use /dev/null so that any
2142 # attempts to read from stdin will immediately return EOF
2143 # instead of blocking indefinitely.
2144 null_input = open('/dev/null', 'rb')
2145 fd_pipes[0] = null_input.fileno()
2147 fd_pipes[0] = fd_pipes_orig[0]
2149 files.process = os.fdopen(master_fd, 'r')
2150 if logfile is not None:
2152 fd_pipes[1] = slave_fd
2153 fd_pipes[2] = slave_fd
2155 files.log = open(logfile, "a")
2156 portage.util.apply_secpass_permissions(logfile,
2157 uid=portage.portage_uid, gid=portage.portage_gid,
2160 if not self.background:
2161 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2163 output_handler = self._output_handler
2167 # Create a dummy pipe so the scheduler can monitor
2168 # the process from inside a poll() loop.
2169 fd_pipes[self._dummy_pipe_fd] = slave_fd
2171 fd_pipes[1] = slave_fd
2172 fd_pipes[2] = slave_fd
2173 output_handler = self._dummy_handler
2176 for k in self._spawn_kwarg_names:
2177 v = getattr(self, k)
2181 kwargs["fd_pipes"] = fd_pipes
2182 kwargs["returnpid"] = True
2183 kwargs.pop("logfile", None)
2185 retval = self._spawn(self.args, **kwargs)
2188 if null_input is not None:
2191 if isinstance(retval, int):
2193 for f in files.values():
2195 self.returncode = retval
2199 self.pid = retval[0]
2200 portage.process.spawned_pids.remove(self.pid)
2202 self._reg_id = self.scheduler.register(files.process.fileno(),
2203 PollConstants.POLLIN, output_handler)
2204 self._registered = True
2206 def _pipe(self, fd_pipes):
2208 @type fd_pipes: dict
2209 @param fd_pipes: pipes from which to copy terminal size if desired.
2213 def _spawn(self, args, **kwargs):
2214 return portage.process.spawn(args, **kwargs)
2216 def _output_handler(self, fd, event):
2218 buf = array.array('B')
2220 buf.fromfile(files.process, self._bufsize)
2224 if not self.background:
2225 buf.tofile(files.stdout)
2226 files.stdout.flush()
2227 buf.tofile(files.log)
2232 return self._registered
2234 def _dummy_handler(self, fd, event):
2236 This method is mainly interested in detecting EOF, since
2237 the only purpose of the pipe is to allow the scheduler to
2238 monitor the process from inside a poll() loop.
2241 buf = array.array('B')
2243 buf.fromfile(files.process, self._bufsize)
2251 return self._registered
2253 class MiscFunctionsProcess(SpawnProcess):
2255 Spawns misc-functions.sh with an existing ebuild environment.
2258 __slots__ = ("commands", "phase", "pkg", "settings")
2261 settings = self.settings
2262 settings.pop("EBUILD_PHASE", None)
2263 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2264 misc_sh_binary = os.path.join(portage_bin_path,
2265 os.path.basename(portage.const.MISC_SH_BINARY))
2267 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2268 self.logfile = settings.get("PORTAGE_LOG_FILE")
2270 portage._doebuild_exit_status_unlink(
2271 settings.get("EBUILD_EXIT_STATUS_FILE"))
2273 SpawnProcess._start(self)
2275 def _spawn(self, args, **kwargs):
2276 settings = self.settings
2277 debug = settings.get("PORTAGE_DEBUG") == "1"
2278 return portage.spawn(" ".join(args), settings,
2279 debug=debug, **kwargs)
2281 def _set_returncode(self, wait_retval):
2282 SpawnProcess._set_returncode(self, wait_retval)
2283 self.returncode = portage._doebuild_exit_status_check_and_log(
2284 self.settings, self.phase, self.returncode)
2286 class EbuildFetcher(SpawnProcess):
2288 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2293 root_config = self.pkg.root_config
2294 portdb = root_config.trees["porttree"].dbapi
2295 ebuild_path = portdb.findname(self.pkg.cpv)
2296 settings = self.config_pool.allocate()
2297 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2298 self._build_dir.lock()
2299 self._build_dir.clean()
2300 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2301 if self.logfile is None:
2302 self.logfile = settings.get("PORTAGE_LOG_FILE")
2308 # If any incremental variables have been overridden
2309 # via the environment, those values need to be passed
2310 # along here so that they are correctly considered by
2311 # the config instance in the subproccess.
2312 fetch_env = os.environ.copy()
2314 fetch_env["PORTAGE_NICENESS"] = "0"
2316 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2318 ebuild_binary = os.path.join(
2319 settings["PORTAGE_BIN_PATH"], "ebuild")
2321 fetch_args = [ebuild_binary, ebuild_path, phase]
2322 debug = settings.get("PORTAGE_DEBUG") == "1"
2324 fetch_args.append("--debug")
2326 self.args = fetch_args
2327 self.env = fetch_env
2328 SpawnProcess._start(self)
2330 def _pipe(self, fd_pipes):
2331 """When appropriate, use a pty so that fetcher progress bars,
2332 like wget has, will work properly."""
2333 if self.background or not sys.stdout.isatty():
2334 # When the output only goes to a log file,
2335 # there's no point in creating a pty.
2337 stdout_pipe = fd_pipes.get(1)
2338 got_pty, master_fd, slave_fd = \
2339 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2340 return (master_fd, slave_fd)
2342 def _set_returncode(self, wait_retval):
2343 SpawnProcess._set_returncode(self, wait_retval)
2344 # Collect elog messages that might have been
2345 # created by the pkg_nofetch phase.
2346 if self._build_dir is not None:
2347 # Skip elog messages for prefetch, in order to avoid duplicates.
2348 if not self.prefetch and self.returncode != os.EX_OK:
2350 if self.logfile is not None:
2352 elog_out = open(self.logfile, 'a')
2353 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2354 if self.logfile is not None:
2355 msg += ", Log file:"
2356 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2357 if self.logfile is not None:
2358 eerror(" '%s'" % (self.logfile,),
2359 phase="unpack", key=self.pkg.cpv, out=elog_out)
2360 if elog_out is not None:
2362 if not self.prefetch:
2363 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2364 features = self._build_dir.settings.features
2365 if self.returncode == os.EX_OK:
2366 self._build_dir.clean()
2367 self._build_dir.unlock()
2368 self.config_pool.deallocate(self._build_dir.settings)
2369 self._build_dir = None
2371 class EbuildBuildDir(SlotObject):
2373 __slots__ = ("dir_path", "pkg", "settings",
2374 "locked", "_catdir", "_lock_obj")
2376 def __init__(self, **kwargs):
2377 SlotObject.__init__(self, **kwargs)
2382 This raises an AlreadyLocked exception if lock() is called
2383 while a lock is already held. In order to avoid this, call
2384 unlock() or check whether the "locked" attribute is True
2385 or False before calling lock().
2387 if self._lock_obj is not None:
2388 raise self.AlreadyLocked((self._lock_obj,))
2390 dir_path = self.dir_path
2391 if dir_path is None:
2392 root_config = self.pkg.root_config
2393 portdb = root_config.trees["porttree"].dbapi
2394 ebuild_path = portdb.findname(self.pkg.cpv)
2395 settings = self.settings
2396 settings.setcpv(self.pkg)
2397 debug = settings.get("PORTAGE_DEBUG") == "1"
2398 use_cache = 1 # always true
2399 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2400 self.settings, debug, use_cache, portdb)
2401 dir_path = self.settings["PORTAGE_BUILDDIR"]
2403 catdir = os.path.dirname(dir_path)
2404 self._catdir = catdir
2406 portage.util.ensure_dirs(os.path.dirname(catdir),
2407 gid=portage.portage_gid,
2411 catdir_lock = portage.locks.lockdir(catdir)
2412 portage.util.ensure_dirs(catdir,
2413 gid=portage.portage_gid,
2415 self._lock_obj = portage.locks.lockdir(dir_path)
2417 self.locked = self._lock_obj is not None
2418 if catdir_lock is not None:
2419 portage.locks.unlockdir(catdir_lock)
2422 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2423 by keepwork or keeptemp in FEATURES."""
2424 settings = self.settings
2425 features = settings.features
2426 if not ("keepwork" in features or "keeptemp" in features):
2428 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2429 except EnvironmentError, e:
2430 if e.errno != errno.ENOENT:
2435 if self._lock_obj is None:
2438 portage.locks.unlockdir(self._lock_obj)
2439 self._lock_obj = None
2442 catdir = self._catdir
2445 catdir_lock = portage.locks.lockdir(catdir)
2451 if e.errno not in (errno.ENOENT,
2452 errno.ENOTEMPTY, errno.EEXIST):
2455 portage.locks.unlockdir(catdir_lock)
2457 class AlreadyLocked(portage.exception.PortageException):
2460 class EbuildBuild(CompositeTask):
2462 __slots__ = ("args_set", "config_pool", "find_blockers",
2463 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2464 "prefetcher", "settings", "world_atom") + \
2465 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2469 logger = self.logger
2472 settings = self.settings
2473 world_atom = self.world_atom
2474 root_config = pkg.root_config
2477 portdb = root_config.trees[tree].dbapi
2478 settings["EMERGE_FROM"] = pkg.type_name
2479 settings.backup_changes("EMERGE_FROM")
2481 ebuild_path = portdb.findname(self.pkg.cpv)
2482 self._ebuild_path = ebuild_path
2484 prefetcher = self.prefetcher
2485 if prefetcher is None:
2487 elif not prefetcher.isAlive():
2489 elif prefetcher.poll() is None:
2491 waiting_msg = "Fetching files " + \
2492 "in the background. " + \
2493 "To view fetch progress, run `tail -f " + \
2494 "/var/log/emerge-fetch.log` in another " + \
2496 msg_prefix = colorize("GOOD", " * ")
2497 from textwrap import wrap
2498 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2499 for line in wrap(waiting_msg, 65))
2500 if not self.background:
2501 writemsg(waiting_msg, noiselevel=-1)
2503 self._current_task = prefetcher
2504 prefetcher.addExitListener(self._prefetch_exit)
2507 self._prefetch_exit(prefetcher)
2509 def _prefetch_exit(self, prefetcher):
2513 settings = self.settings
2516 fetcher = EbuildFetchonly(
2517 fetch_all=opts.fetch_all_uri,
2518 pkg=pkg, pretend=opts.pretend,
2520 retval = fetcher.execute()
2521 self.returncode = retval
2525 fetcher = EbuildFetcher(config_pool=self.config_pool,
2526 fetchall=opts.fetch_all_uri,
2527 fetchonly=opts.fetchonly,
2528 background=self.background,
2529 pkg=pkg, scheduler=self.scheduler)
2531 self._start_task(fetcher, self._fetch_exit)
2533 def _fetch_exit(self, fetcher):
2537 fetch_failed = False
2539 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2541 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2543 if fetch_failed and fetcher.logfile is not None and \
2544 os.path.exists(fetcher.logfile):
2545 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2547 if not fetch_failed and fetcher.logfile is not None:
2548 # Fetch was successful, so remove the fetch log.
2550 os.unlink(fetcher.logfile)
2554 if fetch_failed or opts.fetchonly:
2558 logger = self.logger
2560 pkg_count = self.pkg_count
2561 scheduler = self.scheduler
2562 settings = self.settings
2563 features = settings.features
2564 ebuild_path = self._ebuild_path
2565 system_set = pkg.root_config.sets["system"]
2567 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2568 self._build_dir.lock()
2570 # Cleaning is triggered before the setup
2571 # phase, in portage.doebuild().
2572 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2573 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2574 short_msg = "emerge: (%s of %s) %s Clean" % \
2575 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2576 logger.log(msg, short_msg=short_msg)
2578 #buildsyspkg: Check if we need to _force_ binary package creation
2579 self._issyspkg = "buildsyspkg" in features and \
2580 system_set.findAtomForPackage(pkg) and \
2583 if opts.buildpkg or self._issyspkg:
2585 self._buildpkg = True
2587 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2588 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2589 short_msg = "emerge: (%s of %s) %s Compile" % \
2590 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2591 logger.log(msg, short_msg=short_msg)
2594 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2595 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2596 short_msg = "emerge: (%s of %s) %s Compile" % \
2597 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2598 logger.log(msg, short_msg=short_msg)
2600 build = EbuildExecuter(background=self.background, pkg=pkg,
2601 scheduler=scheduler, settings=settings)
2602 self._start_task(build, self._build_exit)
2604 def _unlock_builddir(self):
2605 portage.elog.elog_process(self.pkg.cpv, self.settings)
2606 self._build_dir.unlock()
2608 def _build_exit(self, build):
2609 if self._default_exit(build) != os.EX_OK:
2610 self._unlock_builddir()
2615 buildpkg = self._buildpkg
2618 self._final_exit(build)
2623 msg = ">>> This is a system package, " + \
2624 "let's pack a rescue tarball.\n"
2626 log_path = self.settings.get("PORTAGE_LOG_FILE")
2627 if log_path is not None:
2628 log_file = open(log_path, 'a')
2634 if not self.background:
2635 portage.writemsg_stdout(msg, noiselevel=-1)
2637 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2638 scheduler=self.scheduler, settings=self.settings)
2640 self._start_task(packager, self._buildpkg_exit)
2642 def _buildpkg_exit(self, packager):
2644 Released build dir lock when there is a failure or
2645 when in buildpkgonly mode. Otherwise, the lock will
2646 be released when merge() is called.
2649 if self._default_exit(packager) == os.EX_OK and \
2650 self.opts.buildpkgonly:
2651 # Need to call "clean" phase for buildpkgonly mode
2652 portage.elog.elog_process(self.pkg.cpv, self.settings)
2654 clean_phase = EbuildPhase(background=self.background,
2655 pkg=self.pkg, phase=phase,
2656 scheduler=self.scheduler, settings=self.settings,
2658 self._start_task(clean_phase, self._clean_exit)
2661 if self._final_exit(packager) != os.EX_OK or \
2662 self.opts.buildpkgonly:
2663 self._unlock_builddir()
2666 def _clean_exit(self, clean_phase):
2667 if self._final_exit(clean_phase) != os.EX_OK or \
2668 self.opts.buildpkgonly:
2669 self._unlock_builddir()
2674 Install the package and then clean up and release locks.
2675 Only call this after the build has completed successfully
2676 and neither fetchonly nor buildpkgonly mode are enabled.
2679 find_blockers = self.find_blockers
2680 ldpath_mtimes = self.ldpath_mtimes
2681 logger = self.logger
2683 pkg_count = self.pkg_count
2684 settings = self.settings
2685 world_atom = self.world_atom
2686 ebuild_path = self._ebuild_path
2689 merge = EbuildMerge(find_blockers=self.find_blockers,
2690 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2691 pkg_count=pkg_count, pkg_path=ebuild_path,
2692 scheduler=self.scheduler,
2693 settings=settings, tree=tree, world_atom=world_atom)
2695 msg = " === (%s of %s) Merging (%s::%s)" % \
2696 (pkg_count.curval, pkg_count.maxval,
2697 pkg.cpv, ebuild_path)
2698 short_msg = "emerge: (%s of %s) %s Merge" % \
2699 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2700 logger.log(msg, short_msg=short_msg)
2703 rval = merge.execute()
2705 self._unlock_builddir()
2709 class EbuildExecuter(CompositeTask):
2711 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2713 _phases = ("prepare", "configure", "compile", "test", "install")
2715 _live_eclasses = frozenset([
2725 self._tree = "porttree"
2728 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2729 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2730 self._start_task(clean_phase, self._clean_phase_exit)
2732 def _clean_phase_exit(self, clean_phase):
2734 if self._default_exit(clean_phase) != os.EX_OK:
2739 scheduler = self.scheduler
2740 settings = self.settings
2743 # This initializes PORTAGE_LOG_FILE.
2744 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2746 setup_phase = EbuildPhase(background=self.background,
2747 pkg=pkg, phase="setup", scheduler=scheduler,
2748 settings=settings, tree=self._tree)
2750 setup_phase.addExitListener(self._setup_exit)
2751 self._current_task = setup_phase
2752 self.scheduler.scheduleSetup(setup_phase)
2754 def _setup_exit(self, setup_phase):
2756 if self._default_exit(setup_phase) != os.EX_OK:
2760 unpack_phase = EbuildPhase(background=self.background,
2761 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2762 settings=self.settings, tree=self._tree)
2764 if self._live_eclasses.intersection(self.pkg.inherited):
2765 # Serialize $DISTDIR access for live ebuilds since
2766 # otherwise they can interfere with eachother.
2768 unpack_phase.addExitListener(self._unpack_exit)
2769 self._current_task = unpack_phase
2770 self.scheduler.scheduleUnpack(unpack_phase)
2773 self._start_task(unpack_phase, self._unpack_exit)
2775 def _unpack_exit(self, unpack_phase):
2777 if self._default_exit(unpack_phase) != os.EX_OK:
2781 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2784 phases = self._phases
2785 eapi = pkg.metadata["EAPI"]
2786 if eapi in ("0", "1", "2_pre1"):
2787 # skip src_prepare and src_configure
2789 elif eapi in ("2_pre2",):
2793 for phase in phases:
2794 ebuild_phases.add(EbuildPhase(background=self.background,
2795 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2796 settings=self.settings, tree=self._tree))
2798 self._start_task(ebuild_phases, self._default_final_exit)
2800 class EbuildMetadataPhase(SubProcess):
2803 Asynchronous interface for the ebuild "depend" phase which is
2804 used to extract metadata from the ebuild.
2807 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2808 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2811 _file_names = ("ebuild",)
2812 _files_dict = slot_dict_class(_file_names, prefix="")
2813 _bufsize = SpawnProcess._bufsize
2817 settings = self.settings
2819 ebuild_path = self.ebuild_path
2820 debug = settings.get("PORTAGE_DEBUG") == "1"
2824 if self.fd_pipes is not None:
2825 fd_pipes = self.fd_pipes.copy()
2829 fd_pipes.setdefault(0, sys.stdin.fileno())
2830 fd_pipes.setdefault(1, sys.stdout.fileno())
2831 fd_pipes.setdefault(2, sys.stderr.fileno())
2833 # flush any pending output
2834 for fd in fd_pipes.itervalues():
2835 if fd == sys.stdout.fileno():
2837 if fd == sys.stderr.fileno():
2840 fd_pipes_orig = fd_pipes.copy()
2841 self._files = self._files_dict()
2844 master_fd, slave_fd = os.pipe()
2845 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2846 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2848 fd_pipes[self._metadata_fd] = slave_fd
2850 retval = portage.doebuild(ebuild_path, "depend",
2851 settings["ROOT"], settings, debug,
2852 mydbapi=self.portdb, tree="porttree",
2853 fd_pipes=fd_pipes, returnpid=True)
2857 if isinstance(retval, int):
2858 # doebuild failed before spawning
2860 self.returncode = retval
2864 self.pid = retval[0]
2865 portage.process.spawned_pids.remove(self.pid)
2867 self._raw_metadata = []
2868 files.ebuild = os.fdopen(master_fd, 'r')
2869 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2870 PollConstants.POLLIN, self._output_handler)
2871 self._registered = True
2873 def _output_handler(self, fd, event):
2875 self._raw_metadata.append(files.ebuild.read())
2876 if not self._raw_metadata[-1]:
2880 if self.returncode == os.EX_OK:
2881 metadata = izip(portage.auxdbkeys,
2882 "".join(self._raw_metadata).splitlines())
2883 self.metadata_callback(self.cpv, self.ebuild_path,
2884 self.repo_path, metadata, self.ebuild_mtime)
2886 return self._registered
2888 class EbuildProcess(SpawnProcess):
2890 __slots__ = ("phase", "pkg", "settings", "tree")
2893 # Don't open the log file during the clean phase since the
2894 # open file can result in an nfs lock on $T/build.log which
2895 # prevents the clean phase from removing $T.
2896 if self.phase not in ("clean", "cleanrm"):
2897 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2898 SpawnProcess._start(self)
2900 def _pipe(self, fd_pipes):
2901 stdout_pipe = fd_pipes.get(1)
2902 got_pty, master_fd, slave_fd = \
2903 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2904 return (master_fd, slave_fd)
2906 def _spawn(self, args, **kwargs):
2908 root_config = self.pkg.root_config
2910 mydbapi = root_config.trees[tree].dbapi
2911 settings = self.settings
2912 ebuild_path = settings["EBUILD"]
2913 debug = settings.get("PORTAGE_DEBUG") == "1"
2915 rval = portage.doebuild(ebuild_path, self.phase,
2916 root_config.root, settings, debug,
2917 mydbapi=mydbapi, tree=tree, **kwargs)
2921 def _set_returncode(self, wait_retval):
2922 SpawnProcess._set_returncode(self, wait_retval)
2924 if self.phase not in ("clean", "cleanrm"):
2925 self.returncode = portage._doebuild_exit_status_check_and_log(
2926 self.settings, self.phase, self.returncode)
2928 portage._post_phase_userpriv_perms(self.settings)
2930 class EbuildPhase(CompositeTask):
2932 __slots__ = ("background", "pkg", "phase",
2933 "scheduler", "settings", "tree")
2935 _post_phase_cmds = portage._post_phase_cmds
2939 ebuild_process = EbuildProcess(background=self.background,
2940 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
2941 settings=self.settings, tree=self.tree)
2943 self._start_task(ebuild_process, self._ebuild_exit)
2945 def _ebuild_exit(self, ebuild_process):
2947 if self.phase == "install":
2949 log_path = self.settings.get("PORTAGE_LOG_FILE")
2951 if self.background and log_path is not None:
2952 log_file = open(log_path, 'a')
2955 portage._check_build_log(self.settings, out=out)
2957 if log_file is not None:
2960 if self._default_exit(ebuild_process) != os.EX_OK:
2964 settings = self.settings
2966 if self.phase == "install":
2967 portage._post_src_install_uid_fix(settings)
2969 post_phase_cmds = self._post_phase_cmds.get(self.phase)
2970 if post_phase_cmds is not None:
2971 post_phase = MiscFunctionsProcess(background=self.background,
2972 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
2973 scheduler=self.scheduler, settings=settings)
2974 self._start_task(post_phase, self._post_phase_exit)
2977 self.returncode = ebuild_process.returncode
2978 self._current_task = None
2981 def _post_phase_exit(self, post_phase):
2982 if self._final_exit(post_phase) != os.EX_OK:
2983 writemsg("!!! post %s failed; exiting.\n" % self.phase,
2985 self._current_task = None
2989 class EbuildBinpkg(EbuildProcess):
2991 This assumes that src_install() has successfully completed.
2993 __slots__ = ("_binpkg_tmpfile",)
2996 self.phase = "package"
2997 self.tree = "porttree"
2999 root_config = pkg.root_config
3000 portdb = root_config.trees["porttree"].dbapi
3001 bintree = root_config.trees["bintree"]
3002 ebuild_path = portdb.findname(self.pkg.cpv)
3003 settings = self.settings
3004 debug = settings.get("PORTAGE_DEBUG") == "1"
3006 bintree.prevent_collision(pkg.cpv)
3007 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3008 pkg.cpv + ".tbz2." + str(os.getpid()))
3009 self._binpkg_tmpfile = binpkg_tmpfile
3010 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3011 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3014 EbuildProcess._start(self)
3016 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3018 def _set_returncode(self, wait_retval):
3019 EbuildProcess._set_returncode(self, wait_retval)
3022 bintree = pkg.root_config.trees["bintree"]
3023 binpkg_tmpfile = self._binpkg_tmpfile
3024 if self.returncode == os.EX_OK:
3025 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3027 class EbuildMerge(SlotObject):
3029 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3030 "pkg", "pkg_count", "pkg_path", "pretend",
3031 "scheduler", "settings", "tree", "world_atom")
3034 root_config = self.pkg.root_config
3035 settings = self.settings
3036 retval = portage.merge(settings["CATEGORY"],
3037 settings["PF"], settings["D"],
3038 os.path.join(settings["PORTAGE_BUILDDIR"],
3039 "build-info"), root_config.root, settings,
3040 myebuild=settings["EBUILD"],
3041 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3042 vartree=root_config.trees["vartree"],
3043 prev_mtimes=self.ldpath_mtimes,
3044 scheduler=self.scheduler,
3045 blockers=self.find_blockers)
3047 if retval == os.EX_OK:
3048 self.world_atom(self.pkg)
3053 def _log_success(self):
3055 pkg_count = self.pkg_count
3056 pkg_path = self.pkg_path
3057 logger = self.logger
3058 if "noclean" not in self.settings.features:
3059 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3060 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3061 logger.log((" === (%s of %s) " + \
3062 "Post-Build Cleaning (%s::%s)") % \
3063 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3064 short_msg=short_msg)
3065 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3066 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3068 class PackageUninstall(AsynchronousTask):
3070 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3074 unmerge(self.pkg.root_config, self.opts, "unmerge",
3075 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3076 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3077 writemsg_level=self._writemsg_level)
3078 except UninstallFailure, e:
3079 self.returncode = e.status
3081 self.returncode = os.EX_OK
3084 def _writemsg_level(self, msg, level=0, noiselevel=0):
3086 log_path = self.settings.get("PORTAGE_LOG_FILE")
3087 background = self.background
3089 if log_path is None:
3090 if not (background and level < logging.WARNING):
3091 portage.util.writemsg_level(msg,
3092 level=level, noiselevel=noiselevel)
3095 portage.util.writemsg_level(msg,
3096 level=level, noiselevel=noiselevel)
3098 f = open(log_path, 'a')
3104 class Binpkg(CompositeTask):
3106 __slots__ = ("find_blockers",
3107 "ldpath_mtimes", "logger", "opts",
3108 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3109 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3110 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3112 def _writemsg_level(self, msg, level=0, noiselevel=0):
3114 if not self.background:
3115 portage.util.writemsg_level(msg,
3116 level=level, noiselevel=noiselevel)
3118 log_path = self.settings.get("PORTAGE_LOG_FILE")
3119 if log_path is not None:
3120 f = open(log_path, 'a')
3129 settings = self.settings
3130 settings.setcpv(pkg)
3131 self._tree = "bintree"
3132 self._bintree = self.pkg.root_config.trees[self._tree]
3133 self._verify = "strict" in self.settings.features and \
3134 not self.opts.pretend
3136 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3137 "portage", pkg.category, pkg.pf)
3138 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3139 pkg=pkg, settings=settings)
3140 self._image_dir = os.path.join(dir_path, "image")
3141 self._infloc = os.path.join(dir_path, "build-info")
3142 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3144 # The prefetcher has already completed or it
3145 # could be running now. If it's running now,
3146 # wait for it to complete since it holds
3147 # a lock on the file being fetched. The
3148 # portage.locks functions are only designed
3149 # to work between separate processes. Since
3150 # the lock is held by the current process,
3151 # use the scheduler and fetcher methods to
3152 # synchronize with the fetcher.
3153 prefetcher = self.prefetcher
3154 if prefetcher is None:
3156 elif not prefetcher.isAlive():
3158 elif prefetcher.poll() is None:
3160 waiting_msg = ("Fetching '%s' " + \
3161 "in the background. " + \
3162 "To view fetch progress, run `tail -f " + \
3163 "/var/log/emerge-fetch.log` in another " + \
3164 "terminal.") % prefetcher.pkg_path
3165 msg_prefix = colorize("GOOD", " * ")
3166 from textwrap import wrap
3167 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3168 for line in wrap(waiting_msg, 65))
3169 if not self.background:
3170 writemsg(waiting_msg, noiselevel=-1)
3172 self._current_task = prefetcher
3173 prefetcher.addExitListener(self._prefetch_exit)
3176 self._prefetch_exit(prefetcher)
3178 def _prefetch_exit(self, prefetcher):
3181 pkg_count = self.pkg_count
3182 fetcher = BinpkgFetcher(background=self.background,
3183 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3184 scheduler=self.scheduler)
3185 pkg_path = fetcher.pkg_path
3186 self._pkg_path = pkg_path
3188 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3190 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3191 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3192 short_msg = "emerge: (%s of %s) %s Fetch" % \
3193 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3194 self.logger.log(msg, short_msg=short_msg)
3197 fetcher.addExitListener(self._fetcher_exit)
3198 self._current_task = fetcher
3199 self.scheduler.fetch.schedule(fetcher)
3201 self._start_task(fetcher, self._fetcher_exit)
3204 self._fetcher_exit(fetcher)
3206 def _fetcher_exit(self, fetcher):
3208 # The fetcher only has a returncode when
3209 # --getbinpkg is enabled.
3210 if fetcher.returncode is not None:
3211 self._fetched_pkg = True
3212 if self.opts.fetchonly:
3213 self._final_exit(fetcher)
3216 elif self._default_exit(fetcher) != os.EX_OK:
3222 verifier = BinpkgVerifier(background=self.background,
3223 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3226 verifier.addExitListener(self._verifier_exit)
3227 self._current_task = verifier
3228 self.scheduler.fetch.schedule(verifier)
3230 self._start_task(verifier, self._verifier_exit)
3233 self._verifier_exit(verifier)
3235 def _verifier_exit(self, verifier):
3236 if verifier is not None and \
3237 self._default_exit(verifier) != os.EX_OK:
3241 logger = self.logger
3243 pkg_count = self.pkg_count
3244 pkg_path = self._pkg_path
3246 if self._fetched_pkg:
3247 self._bintree.inject(pkg.cpv, filename=pkg_path)
3249 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3250 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3251 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3252 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3253 logger.log(msg, short_msg=short_msg)
3255 self._build_dir.lock()
3258 settings = self.settings
3259 settings.setcpv(pkg)
3260 settings["EBUILD"] = self._ebuild_path
3261 ebuild_phase = EbuildPhase(background=self.background,
3262 pkg=pkg, phase=phase, scheduler=self.scheduler,
3263 settings=settings, tree=self._tree)
3265 self._start_task(ebuild_phase, self._clean_exit)
3267 def _clean_exit(self, clean_phase):
3268 if self._default_exit(clean_phase) != os.EX_OK:
3269 self._unlock_builddir()
3273 dir_path = self._build_dir.dir_path
3276 shutil.rmtree(dir_path)
3277 except (IOError, OSError), e:
3278 if e.errno != errno.ENOENT:
3282 infloc = self._infloc
3284 pkg_path = self._pkg_path
3287 for mydir in (dir_path, self._image_dir, infloc):
3288 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3289 gid=portage.data.portage_gid, mode=dir_mode)
3291 # This initializes PORTAGE_LOG_FILE.
3292 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3293 self._writemsg_level(">>> Extracting info\n")
3295 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3296 check_missing_metadata = ("CATEGORY", "PF")
3297 missing_metadata = set()
3298 for k in check_missing_metadata:
3299 v = pkg_xpak.getfile(k)
3301 missing_metadata.add(k)
3303 pkg_xpak.unpackinfo(infloc)
3304 for k in missing_metadata:
3312 f = open(os.path.join(infloc, k), 'wb')
3318 # Store the md5sum in the vdb.
3319 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3321 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3325 # This gives bashrc users an opportunity to do various things
3326 # such as remove binary packages after they're installed.
3327 settings = self.settings
3328 settings.setcpv(self.pkg)
3329 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3330 settings.backup_changes("PORTAGE_BINPKG_FILE")
3333 setup_phase = EbuildPhase(background=self.background,
3334 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3335 settings=settings, tree=self._tree)
3337 setup_phase.addExitListener(self._setup_exit)
3338 self._current_task = setup_phase
3339 self.scheduler.scheduleSetup(setup_phase)
3341 def _setup_exit(self, setup_phase):
3342 if self._default_exit(setup_phase) != os.EX_OK:
3343 self._unlock_builddir()
3347 extractor = BinpkgExtractorAsync(background=self.background,
3348 image_dir=self._image_dir,
3349 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3350 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3351 self._start_task(extractor, self._extractor_exit)
3353 def _extractor_exit(self, extractor):
3354 if self._final_exit(extractor) != os.EX_OK:
3355 self._unlock_builddir()
3356 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3360 def _unlock_builddir(self):
3361 portage.elog.elog_process(self.pkg.cpv, self.settings)
3362 self._build_dir.unlock()
3366 # This gives bashrc users an opportunity to do various things
3367 # such as remove binary packages after they're installed.
3368 settings = self.settings
3369 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3370 settings.backup_changes("PORTAGE_BINPKG_FILE")
3372 merge = EbuildMerge(find_blockers=self.find_blockers,
3373 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3374 pkg=self.pkg, pkg_count=self.pkg_count,
3375 pkg_path=self._pkg_path, scheduler=self.scheduler,
3376 settings=settings, tree=self._tree, world_atom=self.world_atom)
3379 retval = merge.execute()
3381 settings.pop("PORTAGE_BINPKG_FILE", None)
3382 self._unlock_builddir()
3385 class BinpkgFetcher(SpawnProcess):
3388 "locked", "pkg_path", "_lock_obj")
3390 def __init__(self, **kwargs):
3391 SpawnProcess.__init__(self, **kwargs)
3393 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3401 bintree = pkg.root_config.trees["bintree"]
3402 settings = bintree.settings
3403 use_locks = "distlocks" in settings.features
3404 pkg_path = self.pkg_path
3405 resume = os.path.exists(pkg_path)
3407 # urljoin doesn't work correctly with
3408 # unrecognized protocols like sftp
3409 if bintree._remote_has_index:
3410 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3412 rel_uri = pkg.cpv + ".tbz2"
3413 uri = bintree._remote_base_uri.rstrip("/") + \
3414 "/" + rel_uri.lstrip("/")
3416 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3417 "/" + pkg.pf + ".tbz2"
3419 protocol = urlparse.urlparse(uri)[0]
3420 fcmd_prefix = "FETCHCOMMAND"
3422 fcmd_prefix = "RESUMECOMMAND"
3423 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3425 fcmd = settings.get(fcmd_prefix)
3428 "DISTDIR" : os.path.dirname(pkg_path),
3430 "FILE" : os.path.basename(pkg_path)
3433 fetch_env = dict(settings.iteritems())
3434 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3435 for x in shlex.split(fcmd)]
3437 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3441 if self.fd_pipes is None:
3443 fd_pipes = self.fd_pipes
3445 # Redirect all output to stdout since some fetchers like
3446 # wget pollute stderr (if portage detects a problem then it
3447 # can send it's own message to stderr).
3448 fd_pipes.setdefault(0, sys.stdin.fileno())
3449 fd_pipes.setdefault(1, sys.stdout.fileno())
3450 fd_pipes.setdefault(2, sys.stdout.fileno())
3452 self.args = fetch_args
3453 self.env = fetch_env
3454 SpawnProcess._start(self)
3456 def _set_returncode(self, wait_retval):
3457 SpawnProcess._set_returncode(self, wait_retval)
3463 This raises an AlreadyLocked exception if lock() is called
3464 while a lock is already held. In order to avoid this, call
3465 unlock() or check whether the "locked" attribute is True
3466 or False before calling lock().
3468 if self._lock_obj is not None:
3469 raise self.AlreadyLocked((self._lock_obj,))
3471 self._lock_obj = portage.locks.lockfile(
3472 self.pkg_path, wantnewlockfile=1)
3475 class AlreadyLocked(portage.exception.PortageException):
3479 if self._lock_obj is None:
3481 portage.locks.unlockfile(self._lock_obj)
3482 self._lock_obj = None
3485 class BinpkgVerifier(AsynchronousTask):
3486 __slots__ = ("logfile", "pkg",)
3490 Note: Unlike a normal AsynchronousTask.start() method,
3491 this one does all work is synchronously. The returncode
3492 attribute will be set before it returns.
3496 root_config = pkg.root_config
3497 bintree = root_config.trees["bintree"]
3499 stdout_orig = sys.stdout
3500 stderr_orig = sys.stderr
3502 if self.background and self.logfile is not None:
3503 log_file = open(self.logfile, 'a')
3505 if log_file is not None:
3506 sys.stdout = log_file
3507 sys.stderr = log_file
3509 bintree.digestCheck(pkg)
3510 except portage.exception.FileNotFound:
3511 writemsg("!!! Fetching Binary failed " + \
3512 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3514 except portage.exception.DigestException, e:
3515 writemsg("\n!!! Digest verification failed:\n",
3517 writemsg("!!! %s\n" % e.value[0],
3519 writemsg("!!! Reason: %s\n" % e.value[1],
3521 writemsg("!!! Got: %s\n" % e.value[2],
3523 writemsg("!!! Expected: %s\n" % e.value[3],
3527 sys.stdout = stdout_orig
3528 sys.stderr = stderr_orig
3529 if log_file is not None:
3532 self.returncode = rval
3535 class BinpkgExtractorAsync(SpawnProcess):
3537 __slots__ = ("image_dir", "pkg", "pkg_path")
3539 _shell_binary = portage.const.BASH_BINARY
3542 self.args = [self._shell_binary, "-c",
3543 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3544 (portage._shell_quote(self.pkg_path),
3545 portage._shell_quote(self.image_dir))]
3547 self.env = self.pkg.root_config.settings.environ()
3548 SpawnProcess._start(self)
3550 class MergeListItem(CompositeTask):
3553 TODO: For parallel scheduling, everything here needs asynchronous
3554 execution support (start, poll, and wait methods).
3557 __slots__ = ("args_set",
3558 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3559 "find_blockers", "logger", "mtimedb", "pkg",
3560 "pkg_count", "pkg_to_replace", "prefetcher",
3561 "settings", "statusMessage", "world_atom") + \
3567 build_opts = self.build_opts
3570 # uninstall, executed by self.merge()
3571 self.returncode = os.EX_OK
3575 args_set = self.args_set
3576 find_blockers = self.find_blockers
3577 logger = self.logger
3578 mtimedb = self.mtimedb
3579 pkg_count = self.pkg_count
3580 scheduler = self.scheduler
3581 settings = self.settings
3582 world_atom = self.world_atom
3583 ldpath_mtimes = mtimedb["ldpath"]
3585 action_desc = "Emerging"
3587 if pkg.type_name == "binary":
3588 action_desc += " binary"
3590 if build_opts.fetchonly:
3591 action_desc = "Fetching"
3593 msg = "%s (%s of %s) %s" % \
3595 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3596 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3597 colorize("GOOD", pkg.cpv))
3600 msg += " %s %s" % (preposition, pkg.root)
3602 if not build_opts.pretend:
3603 self.statusMessage(msg)
3604 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3605 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3607 if pkg.type_name == "ebuild":
3609 build = EbuildBuild(args_set=args_set,
3610 background=self.background,
3611 config_pool=self.config_pool,
3612 find_blockers=find_blockers,
3613 ldpath_mtimes=ldpath_mtimes, logger=logger,
3614 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3615 prefetcher=self.prefetcher, scheduler=scheduler,
3616 settings=settings, world_atom=world_atom)
3618 self._install_task = build
3619 self._start_task(build, self._default_final_exit)
3622 elif pkg.type_name == "binary":
3624 binpkg = Binpkg(background=self.background,
3625 find_blockers=find_blockers,
3626 ldpath_mtimes=ldpath_mtimes, logger=logger,
3627 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3628 prefetcher=self.prefetcher, settings=settings,
3629 scheduler=scheduler, world_atom=world_atom)
3631 self._install_task = binpkg
3632 self._start_task(binpkg, self._default_final_exit)
3636 self._install_task.poll()
3637 return self.returncode
3640 self._install_task.wait()
3641 return self.returncode
3646 build_opts = self.build_opts
3647 find_blockers = self.find_blockers
3648 logger = self.logger
3649 mtimedb = self.mtimedb
3650 pkg_count = self.pkg_count
3651 prefetcher = self.prefetcher
3652 scheduler = self.scheduler
3653 settings = self.settings
3654 world_atom = self.world_atom
3655 ldpath_mtimes = mtimedb["ldpath"]
3658 if not (build_opts.buildpkgonly or \
3659 build_opts.fetchonly or build_opts.pretend):
3661 uninstall = PackageUninstall(background=self.background,
3662 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3663 pkg=pkg, scheduler=scheduler, settings=settings)
3666 retval = uninstall.wait()
3667 if retval != os.EX_OK:
3671 if build_opts.fetchonly or \
3672 build_opts.buildpkgonly:
3673 return self.returncode
3675 retval = self._install_task.install()
3678 class PackageMerge(AsynchronousTask):
3680 TODO: Implement asynchronous merge so that the scheduler can
3681 run while a merge is executing.
3684 __slots__ = ("merge",)
3688 pkg = self.merge.pkg
3689 pkg_count = self.merge.pkg_count
3692 action_desc = "Uninstalling"
3693 preposition = "from"
3695 action_desc = "Installing"
3698 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3701 msg += " %s %s" % (preposition, pkg.root)
3703 if not self.merge.build_opts.fetchonly and \
3704 not self.merge.build_opts.pretend and \
3705 not self.merge.build_opts.buildpkgonly:
3706 self.merge.statusMessage(msg)
3708 self.returncode = self.merge.merge()
3711 class DependencyArg(object):
3712 def __init__(self, arg=None, root_config=None):
3714 self.root_config = root_config
3719 class AtomArg(DependencyArg):
3720 def __init__(self, atom=None, **kwargs):
3721 DependencyArg.__init__(self, **kwargs)
3723 if not isinstance(self.atom, portage.dep.Atom):
3724 self.atom = portage.dep.Atom(self.atom)
3725 self.set = (self.atom, )
3727 class PackageArg(DependencyArg):
3728 def __init__(self, package=None, **kwargs):
3729 DependencyArg.__init__(self, **kwargs)
3730 self.package = package
3731 self.atom = portage.dep.Atom("=" + package.cpv)
3732 self.set = (self.atom, )
3734 class SetArg(DependencyArg):
3735 def __init__(self, set=None, **kwargs):
3736 DependencyArg.__init__(self, **kwargs)
3738 self.name = self.arg[len(SETPREFIX):]
3740 class Dependency(SlotObject):
3741 __slots__ = ("atom", "blocker", "depth",
3742 "parent", "onlydeps", "priority", "root")
3743 def __init__(self, **kwargs):
3744 SlotObject.__init__(self, **kwargs)
3745 if self.priority is None:
3746 self.priority = DepPriority()
3747 if self.depth is None:
3750 class BlockerCache(DictMixin):
3751 """This caches blockers of installed packages so that dep_check does not
3752 have to be done for every single installed package on every invocation of
3753 emerge. The cache is invalidated whenever it is detected that something
3754 has changed that might alter the results of dep_check() calls:
3755 1) the set of installed packages (including COUNTER) has changed
3756 2) the old-style virtuals have changed
3759 # Number of uncached packages to trigger cache update, since
3760 # it's wasteful to update it for every vdb change.
3761 _cache_threshold = 5
3763 class BlockerData(object):
3765 __slots__ = ("__weakref__", "atoms", "counter")
3767 def __init__(self, counter, atoms):
3768 self.counter = counter
3771 def __init__(self, myroot, vardb):
3773 self._virtuals = vardb.settings.getvirtuals()
3774 self._cache_filename = os.path.join(myroot,
3775 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3776 self._cache_version = "1"
3777 self._cache_data = None
3778 self._modified = set()
3783 f = open(self._cache_filename)
3784 mypickle = pickle.Unpickler(f)
3785 mypickle.find_global = None
3786 self._cache_data = mypickle.load()
3789 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3790 if isinstance(e, pickle.UnpicklingError):
3791 writemsg("!!! Error loading '%s': %s\n" % \
3792 (self._cache_filename, str(e)), noiselevel=-1)
3795 cache_valid = self._cache_data and \
3796 isinstance(self._cache_data, dict) and \
3797 self._cache_data.get("version") == self._cache_version and \
3798 isinstance(self._cache_data.get("blockers"), dict)
3800 # Validate all the atoms and counters so that
3801 # corruption is detected as soon as possible.
3802 invalid_items = set()
3803 for k, v in self._cache_data["blockers"].iteritems():
3804 if not isinstance(k, basestring):
3805 invalid_items.add(k)
3808 if portage.catpkgsplit(k) is None:
3809 invalid_items.add(k)
3811 except portage.exception.InvalidData:
3812 invalid_items.add(k)
3814 if not isinstance(v, tuple) or \
3816 invalid_items.add(k)
3819 if not isinstance(counter, (int, long)):
3820 invalid_items.add(k)
3822 if not isinstance(atoms, (list, tuple)):
3823 invalid_items.add(k)
3825 invalid_atom = False
3827 if not isinstance(atom, basestring):
3830 if atom[:1] != "!" or \
3831 not portage.isvalidatom(
3832 atom, allow_blockers=True):
3836 invalid_items.add(k)
3839 for k in invalid_items:
3840 del self._cache_data["blockers"][k]
3841 if not self._cache_data["blockers"]:
3845 self._cache_data = {"version":self._cache_version}
3846 self._cache_data["blockers"] = {}
3847 self._cache_data["virtuals"] = self._virtuals
3848 self._modified.clear()
3851 """If the current user has permission and the internal blocker cache
3852 been updated, save it to disk and mark it unmodified. This is called
3853 by emerge after it has proccessed blockers for all installed packages.
3854 Currently, the cache is only written if the user has superuser
3855 privileges (since that's required to obtain a lock), but all users
3856 have read access and benefit from faster blocker lookups (as long as
3857 the entire cache is still valid). The cache is stored as a pickled
3858 dict object with the following format:
3862 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
3863 "virtuals" : vardb.settings.getvirtuals()
3866 if len(self._modified) >= self._cache_threshold and \
3869 f = portage.util.atomic_ofstream(self._cache_filename)
3870 pickle.dump(self._cache_data, f, -1)
3872 portage.util.apply_secpass_permissions(
3873 self._cache_filename, gid=portage.portage_gid, mode=0644)
3874 except (IOError, OSError), e:
3876 self._modified.clear()
3878 def __setitem__(self, cpv, blocker_data):
3880 Update the cache and mark it as modified for a future call to
3883 @param cpv: Package for which to cache blockers.
3885 @param blocker_data: An object with counter and atoms attributes.
3886 @type blocker_data: BlockerData
3888 self._cache_data["blockers"][cpv] = \
3889 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
3890 self._modified.add(cpv)
3893 return iter(self._cache_data["blockers"])
3895 def __delitem__(self, cpv):
3896 del self._cache_data["blockers"][cpv]
3898 def __getitem__(self, cpv):
3901 @returns: An object with counter and atoms attributes.
3903 return self.BlockerData(*self._cache_data["blockers"][cpv])
3906 """This needs to be implemented so that self.__repr__() doesn't raise
3907 an AttributeError."""
3910 class BlockerDB(object):
3912 def __init__(self, root_config):
3913 self._root_config = root_config
3914 self._vartree = root_config.trees["vartree"]
3915 self._portdb = root_config.trees["porttree"].dbapi
3917 self._dep_check_trees = None
3918 self._fake_vartree = None
3920 def _get_fake_vartree(self, acquire_lock=0):
3921 fake_vartree = self._fake_vartree
3922 if fake_vartree is None:
3923 fake_vartree = FakeVartree(self._root_config,
3924 acquire_lock=acquire_lock)
3925 self._fake_vartree = fake_vartree
3926 self._dep_check_trees = { self._vartree.root : {
3927 "porttree" : fake_vartree,
3928 "vartree" : fake_vartree,
3931 fake_vartree.sync(acquire_lock=acquire_lock)
3934 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
3935 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
3936 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3937 settings = self._vartree.settings
3938 stale_cache = set(blocker_cache)
3939 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
3940 dep_check_trees = self._dep_check_trees
3941 vardb = fake_vartree.dbapi
3942 installed_pkgs = list(vardb)
3944 for inst_pkg in installed_pkgs:
3945 stale_cache.discard(inst_pkg.cpv)
3946 cached_blockers = blocker_cache.get(inst_pkg.cpv)
3947 if cached_blockers is not None and \
3948 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
3949 cached_blockers = None
3950 if cached_blockers is not None:
3951 blocker_atoms = cached_blockers.atoms
3953 # Use aux_get() to trigger FakeVartree global
3954 # updates on *DEPEND when appropriate.
3955 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
3957 portage.dep._dep_check_strict = False
3958 success, atoms = portage.dep_check(depstr,
3959 vardb, settings, myuse=inst_pkg.use.enabled,
3960 trees=dep_check_trees, myroot=inst_pkg.root)
3962 portage.dep._dep_check_strict = True
3964 pkg_location = os.path.join(inst_pkg.root,
3965 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
3966 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
3967 (pkg_location, atoms), noiselevel=-1)
3970 blocker_atoms = [atom for atom in atoms \
3971 if atom.startswith("!")]
3972 blocker_atoms.sort()
3973 counter = long(inst_pkg.metadata["COUNTER"])
3974 blocker_cache[inst_pkg.cpv] = \
3975 blocker_cache.BlockerData(counter, blocker_atoms)
3976 for cpv in stale_cache:
3977 del blocker_cache[cpv]
3978 blocker_cache.flush()
3980 blocker_parents = digraph()
3982 for pkg in installed_pkgs:
3983 for blocker_atom in blocker_cache[pkg.cpv].atoms:
3984 blocker_atom = blocker_atom.lstrip("!")
3985 blocker_atoms.append(blocker_atom)
3986 blocker_parents.add(blocker_atom, pkg)
3988 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3989 blocking_pkgs = set()
3990 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
3991 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
3993 # Check for blockers in the other direction.
3994 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
3996 portage.dep._dep_check_strict = False
3997 success, atoms = portage.dep_check(depstr,
3998 vardb, settings, myuse=new_pkg.use.enabled,
3999 trees=dep_check_trees, myroot=new_pkg.root)
4001 portage.dep._dep_check_strict = True
4003 # We should never get this far with invalid deps.
4004 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4007 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4010 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4011 for inst_pkg in installed_pkgs:
4013 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4014 except (portage.exception.InvalidDependString, StopIteration):
4016 blocking_pkgs.add(inst_pkg)
4018 return blocking_pkgs
4020 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4022 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4023 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4024 p_type, p_root, p_key, p_status = parent_node
4026 if p_status == "nomerge":
4027 category, pf = portage.catsplit(p_key)
4028 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4029 msg.append("Portage is unable to process the dependencies of the ")
4030 msg.append("'%s' package. " % p_key)
4031 msg.append("In order to correct this problem, the package ")
4032 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4033 msg.append("As a temporary workaround, the --nodeps option can ")
4034 msg.append("be used to ignore all dependencies. For reference, ")
4035 msg.append("the problematic dependencies can be found in the ")
4036 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4038 msg.append("This package can not be installed. ")
4039 msg.append("Please notify the '%s' package maintainer " % p_key)
4040 msg.append("about this problem.")
4042 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4043 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4045 class PackageVirtualDbapi(portage.dbapi):
4047 A dbapi-like interface class that represents the state of the installed
4048 package database as new packages are installed, replacing any packages
4049 that previously existed in the same slot. The main difference between
4050 this class and fakedbapi is that this one uses Package instances
4051 internally (passed in via cpv_inject() and cpv_remove() calls).
4053 def __init__(self, settings):
4054 portage.dbapi.__init__(self)
4055 self.settings = settings
4056 self._match_cache = {}
4062 Remove all packages.
4066 self._cp_map.clear()
4067 self._cpv_map.clear()
4070 obj = PackageVirtualDbapi(self.settings)
4071 obj._match_cache = self._match_cache.copy()
4072 obj._cp_map = self._cp_map.copy()
4073 for k, v in obj._cp_map.iteritems():
4074 obj._cp_map[k] = v[:]
4075 obj._cpv_map = self._cpv_map.copy()
4079 return self._cpv_map.itervalues()
4081 def __contains__(self, item):
4082 existing = self._cpv_map.get(item.cpv)
4083 if existing is not None and \
4088 def get(self, item, default=None):
4089 cpv = getattr(item, "cpv", None)
4093 type_name, root, cpv, operation = item
4095 existing = self._cpv_map.get(cpv)
4096 if existing is not None and \
4101 def match_pkgs(self, atom):
4102 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4104 def _clear_cache(self):
4105 if self._categories is not None:
4106 self._categories = None
4107 if self._match_cache:
4108 self._match_cache = {}
4110 def match(self, origdep, use_cache=1):
4111 result = self._match_cache.get(origdep)
4112 if result is not None:
4114 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4115 self._match_cache[origdep] = result
4118 def cpv_exists(self, cpv):
4119 return cpv in self._cpv_map
4121 def cp_list(self, mycp, use_cache=1):
4122 cachelist = self._match_cache.get(mycp)
4123 # cp_list() doesn't expand old-style virtuals
4124 if cachelist and cachelist[0].startswith(mycp):
4126 cpv_list = self._cp_map.get(mycp)
4127 if cpv_list is None:
4130 cpv_list = [pkg.cpv for pkg in cpv_list]
4131 self._cpv_sort_ascending(cpv_list)
4132 if not (not cpv_list and mycp.startswith("virtual/")):
4133 self._match_cache[mycp] = cpv_list
4137 return list(self._cp_map)
4140 return list(self._cpv_map)
4142 def cpv_inject(self, pkg):
4143 cp_list = self._cp_map.get(pkg.cp)
4146 self._cp_map[pkg.cp] = cp_list
4147 e_pkg = self._cpv_map.get(pkg.cpv)
4148 if e_pkg is not None:
4151 self.cpv_remove(e_pkg)
4152 for e_pkg in cp_list:
4153 if e_pkg.slot_atom == pkg.slot_atom:
4156 self.cpv_remove(e_pkg)
4159 self._cpv_map[pkg.cpv] = pkg
4162 def cpv_remove(self, pkg):
4163 old_pkg = self._cpv_map.get(pkg.cpv)
4166 self._cp_map[pkg.cp].remove(pkg)
4167 del self._cpv_map[pkg.cpv]
4170 def aux_get(self, cpv, wants):
4171 metadata = self._cpv_map[cpv].metadata
4172 return [metadata.get(x, "") for x in wants]
4174 def aux_update(self, cpv, values):
4175 self._cpv_map[cpv].metadata.update(values)
4178 class depgraph(object):
4180 pkg_tree_map = RootConfig.pkg_tree_map
4182 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4184 def __init__(self, settings, trees, myopts, myparams, spinner):
4185 self.settings = settings
4186 self.target_root = settings["ROOT"]
4187 self.myopts = myopts
4188 self.myparams = myparams
4190 if settings.get("PORTAGE_DEBUG", "") == "1":
4192 self.spinner = spinner
4193 self._running_root = trees["/"]["root_config"]
4194 self._opts_no_restart = Scheduler._opts_no_restart
4195 self.pkgsettings = {}
4196 # Maps slot atom to package for each Package added to the graph.
4197 self._slot_pkg_map = {}
4198 # Maps nodes to the reasons they were selected for reinstallation.
4199 self._reinstall_nodes = {}
4202 self._trees_orig = trees
4204 # Contains a filtered view of preferred packages that are selected
4205 # from available repositories.
4206 self._filtered_trees = {}
4207 # Contains installed packages and new packages that have been added
4209 self._graph_trees = {}
4210 # All Package instances
4211 self._pkg_cache = self._package_cache(self)
4212 for myroot in trees:
4213 self.trees[myroot] = {}
4214 # Create a RootConfig instance that references
4215 # the FakeVartree instead of the real one.
4216 self.roots[myroot] = RootConfig(
4217 trees[myroot]["vartree"].settings,
4219 trees[myroot]["root_config"].setconfig)
4220 for tree in ("porttree", "bintree"):
4221 self.trees[myroot][tree] = trees[myroot][tree]
4222 self.trees[myroot]["vartree"] = \
4223 FakeVartree(trees[myroot]["root_config"],
4224 pkg_cache=self._pkg_cache)
4225 self.pkgsettings[myroot] = portage.config(
4226 clone=self.trees[myroot]["vartree"].settings)
4227 self._slot_pkg_map[myroot] = {}
4228 vardb = self.trees[myroot]["vartree"].dbapi
4229 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4230 "--buildpkgonly" not in self.myopts
4231 # This fakedbapi instance will model the state that the vdb will
4232 # have after new packages have been installed.
4233 fakedb = PackageVirtualDbapi(vardb.settings)
4234 if preload_installed_pkgs:
4236 self.spinner.update()
4237 # This triggers metadata updates via FakeVartree.
4238 vardb.aux_get(pkg.cpv, [])
4239 fakedb.cpv_inject(pkg)
4241 # Now that the vardb state is cached in our FakeVartree,
4242 # we won't be needing the real vartree cache for awhile.
4243 # To make some room on the heap, clear the vardbapi
4245 trees[myroot]["vartree"].dbapi._clear_cache()
4248 self.mydbapi[myroot] = fakedb
4251 graph_tree.dbapi = fakedb
4252 self._graph_trees[myroot] = {}
4253 self._filtered_trees[myroot] = {}
4254 # Substitute the graph tree for the vartree in dep_check() since we
4255 # want atom selections to be consistent with package selections
4256 # have already been made.
4257 self._graph_trees[myroot]["porttree"] = graph_tree
4258 self._graph_trees[myroot]["vartree"] = graph_tree
4259 def filtered_tree():
4261 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4262 self._filtered_trees[myroot]["porttree"] = filtered_tree
4264 # Passing in graph_tree as the vartree here could lead to better
4265 # atom selections in some cases by causing atoms for packages that
4266 # have been added to the graph to be preferred over other choices.
4267 # However, it can trigger atom selections that result in
4268 # unresolvable direct circular dependencies. For example, this
4269 # happens with gwydion-dylan which depends on either itself or
4270 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4271 # gwydion-dylan-bin needs to be selected in order to avoid a
4272 # an unresolvable direct circular dependency.
4274 # To solve the problem described above, pass in "graph_db" so that
4275 # packages that have been added to the graph are distinguishable
4276 # from other available packages and installed packages. Also, pass
4277 # the parent package into self._select_atoms() calls so that
4278 # unresolvable direct circular dependencies can be detected and
4279 # avoided when possible.
4280 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4281 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4284 portdb = self.trees[myroot]["porttree"].dbapi
4285 bindb = self.trees[myroot]["bintree"].dbapi
4286 vardb = self.trees[myroot]["vartree"].dbapi
4287 # (db, pkg_type, built, installed, db_keys)
4288 if "--usepkgonly" not in self.myopts:
4289 db_keys = list(portdb._aux_cache_keys)
4290 dbs.append((portdb, "ebuild", False, False, db_keys))
4291 if "--usepkg" in self.myopts:
4292 db_keys = list(bindb._aux_cache_keys)
4293 dbs.append((bindb, "binary", True, False, db_keys))
4294 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4295 dbs.append((vardb, "installed", True, True, db_keys))
4296 self._filtered_trees[myroot]["dbs"] = dbs
4297 if "--usepkg" in self.myopts:
4298 self.trees[myroot]["bintree"].populate(
4299 "--getbinpkg" in self.myopts,
4300 "--getbinpkgonly" in self.myopts)
4303 self.digraph=portage.digraph()
4304 # contains all sets added to the graph
4306 # contains atoms given as arguments
4307 self._sets["args"] = InternalPackageSet()
4308 # contains all atoms from all sets added to the graph, including
4309 # atoms given as arguments
4310 self._set_atoms = InternalPackageSet()
4311 self._atom_arg_map = {}
4312 # contains all nodes pulled in by self._set_atoms
4313 self._set_nodes = set()
4314 # Contains only Blocker -> Uninstall edges
4315 self._blocker_uninstalls = digraph()
4316 # Contains only Package -> Blocker edges
4317 self._blocker_parents = digraph()
4318 # Contains only irrelevant Package -> Blocker edges
4319 self._irrelevant_blockers = digraph()
4320 # Contains only unsolvable Package -> Blocker edges
4321 self._unsolvable_blockers = digraph()
4322 self._slot_collision_info = {}
4323 # Slot collision nodes are not allowed to block other packages since
4324 # blocker validation is only able to account for one package per slot.
4325 self._slot_collision_nodes = set()
4326 self._parent_atoms = {}
4327 self._slot_conflict_parent_atoms = set()
4328 self._serialized_tasks_cache = None
4329 self._scheduler_graph = None
4330 self._displayed_list = None
4331 self._pprovided_args = []
4332 self._missing_args = []
4333 self._masked_installed = set()
4334 self._unsatisfied_deps_for_display = []
4335 self._unsatisfied_blockers_for_display = None
4336 self._circular_deps_for_display = None
4337 self._dep_stack = []
4338 self._unsatisfied_deps = []
4339 self._initially_unsatisfied_deps = []
4340 self._ignored_deps = []
4341 self._required_set_names = set(["system", "world"])
4342 self._select_atoms = self._select_atoms_highest_available
4343 self._select_package = self._select_pkg_highest_available
4344 self._highest_pkg_cache = {}
4346 def _show_slot_collision_notice(self):
4347 """Show an informational message advising the user to mask one of the
4348 the packages. In some cases it may be possible to resolve this
4349 automatically, but support for backtracking (removal nodes that have
4350 already been selected) will be required in order to handle all possible
4354 if not self._slot_collision_info:
4357 self._show_merge_list()
4360 msg.append("\n!!! Multiple package instances within a single " + \
4361 "package slot have been pulled\n")
4362 msg.append("!!! into the dependency graph, resulting" + \
4363 " in a slot conflict:\n\n")
4365 # Max number of parents shown, to avoid flooding the display.
4367 explanation_columns = 70
4369 for (slot_atom, root), slot_nodes \
4370 in self._slot_collision_info.iteritems():
4371 msg.append(str(slot_atom))
4374 for node in slot_nodes:
4376 msg.append(str(node))
4377 parent_atoms = self._parent_atoms.get(node)
4380 # Prefer conflict atoms over others.
4381 for parent_atom in parent_atoms:
4382 if len(pruned_list) >= max_parents:
4384 if parent_atom in self._slot_conflict_parent_atoms:
4385 pruned_list.add(parent_atom)
4387 # If this package was pulled in by conflict atoms then
4388 # show those alone since those are the most interesting.
4390 # When generating the pruned list, prefer instances
4391 # of DependencyArg over instances of Package.
4392 for parent_atom in parent_atoms:
4393 if len(pruned_list) >= max_parents:
4395 parent, atom = parent_atom
4396 if isinstance(parent, DependencyArg):
4397 pruned_list.add(parent_atom)
4398 # Prefer Packages instances that themselves have been
4399 # pulled into collision slots.
4400 for parent_atom in parent_atoms:
4401 if len(pruned_list) >= max_parents:
4403 parent, atom = parent_atom
4404 if isinstance(parent, Package) and \
4405 (parent.slot_atom, parent.root) \
4406 in self._slot_collision_info:
4407 pruned_list.add(parent_atom)
4408 for parent_atom in parent_atoms:
4409 if len(pruned_list) >= max_parents:
4411 pruned_list.add(parent_atom)
4412 omitted_parents = len(parent_atoms) - len(pruned_list)
4413 parent_atoms = pruned_list
4414 msg.append(" pulled in by\n")
4415 for parent_atom in parent_atoms:
4416 parent, atom = parent_atom
4417 msg.append(2*indent)
4418 if isinstance(parent,
4419 (PackageArg, AtomArg)):
4420 # For PackageArg and AtomArg types, it's
4421 # redundant to display the atom attribute.
4422 msg.append(str(parent))
4424 # Display the specific atom from SetArg or
4426 msg.append("%s required by %s" % (atom, parent))
4429 msg.append(2*indent)
4430 msg.append("(and %d more)\n" % omitted_parents)
4432 msg.append(" (no parents)\n")
4434 explanation = self._slot_conflict_explanation(slot_nodes)
4437 msg.append(indent + "Explanation:\n\n")
4438 for line in textwrap.wrap(explanation, explanation_columns):
4439 msg.append(2*indent + line + "\n")
4442 sys.stderr.write("".join(msg))
4445 explanations_for_all = explanations == len(self._slot_collision_info)
4447 if explanations_for_all or "--quiet" in self.myopts:
4451 msg.append("It may be possible to solve this problem ")
4452 msg.append("by using package.mask to prevent one of ")
4453 msg.append("those packages from being selected. ")
4454 msg.append("However, it is also possible that conflicting ")
4455 msg.append("dependencies exist such that they are impossible to ")
4456 msg.append("satisfy simultaneously. If such a conflict exists in ")
4457 msg.append("the dependencies of two different packages, then those ")
4458 msg.append("packages can not be installed simultaneously.")
4460 from formatter import AbstractFormatter, DumbWriter
4461 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4463 f.add_flowing_data(x)
4467 msg.append("For more information, see MASKED PACKAGES ")
4468 msg.append("section in the emerge man page or refer ")
4469 msg.append("to the Gentoo Handbook.")
4471 f.add_flowing_data(x)
4475 def _slot_conflict_explanation(self, slot_nodes):
4477 When a slot conflict occurs due to USE deps, there are a few
4478 different cases to consider:
4480 1) New USE are correctly set but --newuse wasn't requested so an
4481 installed package with incorrect USE happened to get pulled
4482 into graph before the new one.
4484 2) New USE are incorrectly set but an installed package has correct
4485 USE so it got pulled into the graph, and a new instance also got
4486 pulled in due to --newuse or an upgrade.
4488 3) Multiple USE deps exist that can't be satisfied simultaneously,
4489 and multiple package instances got pulled into the same slot to
4490 satisfy the conflicting deps.
4492 Currently, explanations and suggested courses of action are generated
4493 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4496 if len(slot_nodes) != 2:
4497 # Suggestions are only implemented for
4498 # conflicts between two packages.
4501 all_conflict_atoms = self._slot_conflict_parent_atoms
4503 matched_atoms = None
4504 unmatched_node = None
4505 for node in slot_nodes:
4506 parent_atoms = self._parent_atoms.get(node)
4507 if not parent_atoms:
4508 # Normally, there are always parent atoms. If there are
4509 # none then something unexpected is happening and there's
4510 # currently no suggestion for this case.
4512 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4513 for parent_atom in conflict_atoms:
4514 parent, atom = parent_atom
4516 # Suggestions are currently only implemented for cases
4517 # in which all conflict atoms have USE deps.
4520 if matched_node is not None:
4521 # If conflict atoms match multiple nodes
4522 # then there's no suggestion.
4525 matched_atoms = conflict_atoms
4527 if unmatched_node is not None:
4528 # Neither node is matched by conflict atoms, and
4529 # there is no suggestion for this case.
4531 unmatched_node = node
4533 if matched_node is None or unmatched_node is None:
4534 # This shouldn't happen.
4537 if unmatched_node.installed and not matched_node.installed:
4538 return "New USE are correctly set, but --newuse wasn't" + \
4539 " requested, so an installed package with incorrect USE " + \
4540 "happened to get pulled into the dependency graph. " + \
4541 "In order to solve " + \
4542 "this, either specify the --newuse option or explicitly " + \
4543 " reinstall '%s'." % matched_node.slot_atom
4545 if matched_node.installed and not unmatched_node.installed:
4546 atoms = sorted(set(atom for parent, atom in matched_atoms))
4547 explanation = ("New USE for '%s' are incorrectly set. " + \
4548 "In order to solve this, adjust USE to satisfy '%s'") % \
4549 (matched_node.slot_atom, atoms[0])
4551 for atom in atoms[1:-1]:
4552 explanation += ", '%s'" % (atom,)
4555 explanation += " and '%s'" % (atoms[-1],)
4561 def _process_slot_conflicts(self):
4563 Process slot conflict data to identify specific atoms which
4564 lead to conflict. These atoms only match a subset of the
4565 packages that have been pulled into a given slot.
4567 for (slot_atom, root), slot_nodes \
4568 in self._slot_collision_info.iteritems():
4570 all_parent_atoms = set()
4571 for pkg in slot_nodes:
4572 parent_atoms = self._parent_atoms.get(pkg)
4573 if not parent_atoms:
4575 all_parent_atoms.update(parent_atoms)
4577 for pkg in slot_nodes:
4578 parent_atoms = self._parent_atoms.get(pkg)
4579 if parent_atoms is None:
4580 parent_atoms = set()
4581 self._parent_atoms[pkg] = parent_atoms
4582 for parent_atom in all_parent_atoms:
4583 if parent_atom in parent_atoms:
4585 # Use package set for matching since it will match via
4586 # PROVIDE when necessary, while match_from_list does not.
4587 parent, atom = parent_atom
4588 atom_set = InternalPackageSet(
4589 initial_atoms=(atom,))
4590 if atom_set.findAtomForPackage(pkg):
4591 parent_atoms.add(parent_atom)
4593 self._slot_conflict_parent_atoms.add(parent_atom)
4595 def _reinstall_for_flags(self, forced_flags,
4596 orig_use, orig_iuse, cur_use, cur_iuse):
4597 """Return a set of flags that trigger reinstallation, or None if there
4598 are no such flags."""
4599 if "--newuse" in self.myopts:
4600 flags = set(orig_iuse.symmetric_difference(
4601 cur_iuse).difference(forced_flags))
4602 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4603 cur_iuse.intersection(cur_use)))
4606 elif "changed-use" == self.myopts.get("--reinstall"):
4607 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4608 cur_iuse.intersection(cur_use))
4613 def _create_graph(self, allow_unsatisfied=False):
4614 dep_stack = self._dep_stack
4616 self.spinner.update()
4617 dep = dep_stack.pop()
4618 if isinstance(dep, Package):
4619 if not self._add_pkg_deps(dep,
4620 allow_unsatisfied=allow_unsatisfied):
4623 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4627 def _add_dep(self, dep, allow_unsatisfied=False):
4628 debug = "--debug" in self.myopts
4629 buildpkgonly = "--buildpkgonly" in self.myopts
4630 nodeps = "--nodeps" in self.myopts
4631 empty = "empty" in self.myparams
4632 deep = "deep" in self.myparams
4633 update = "--update" in self.myopts and dep.depth <= 1
4635 if not buildpkgonly and \
4637 dep.parent not in self._slot_collision_nodes:
4638 if dep.parent.onlydeps:
4639 # It's safe to ignore blockers if the
4640 # parent is an --onlydeps node.
4642 # The blocker applies to the root where
4643 # the parent is or will be installed.
4644 blocker = Blocker(atom=dep.atom,
4645 eapi=dep.parent.metadata["EAPI"],
4646 root=dep.parent.root)
4647 self._blocker_parents.add(blocker, dep.parent)
4649 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4650 onlydeps=dep.onlydeps)
4652 if allow_unsatisfied:
4653 self._unsatisfied_deps.append(dep)
4655 self._unsatisfied_deps_for_display.append(
4656 ((dep.root, dep.atom), {"myparent":dep.parent}))
4658 # In some cases, dep_check will return deps that shouldn't
4659 # be proccessed any further, so they are identified and
4660 # discarded here. Try to discard as few as possible since
4661 # discarded dependencies reduce the amount of information
4662 # available for optimization of merge order.
4663 if dep.priority.satisfied and \
4664 not (existing_node or empty or deep or update):
4666 if dep.root == self.target_root:
4668 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4669 except StopIteration:
4671 except portage.exception.InvalidDependString:
4672 if not dep_pkg.installed:
4673 # This shouldn't happen since the package
4674 # should have been masked.
4677 self._ignored_deps.append(dep)
4680 if not self._add_pkg(dep_pkg, dep):
4684 def _add_pkg(self, pkg, dep):
4691 myparent = dep.parent
4692 priority = dep.priority
4694 if priority is None:
4695 priority = DepPriority()
4697 Fills the digraph with nodes comprised of packages to merge.
4698 mybigkey is the package spec of the package to merge.
4699 myparent is the package depending on mybigkey ( or None )
4700 addme = Should we add this package to the digraph or are we just looking at it's deps?
4701 Think --onlydeps, we need to ignore packages in that case.
4704 #IUSE-aware emerge -> USE DEP aware depgraph
4705 #"no downgrade" emerge
4707 # Ensure that the dependencies of the same package
4708 # are never processed more than once.
4709 previously_added = pkg in self.digraph
4711 # select the correct /var database that we'll be checking against
4712 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4713 pkgsettings = self.pkgsettings[pkg.root]
4718 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4719 except portage.exception.InvalidDependString, e:
4720 if not pkg.installed:
4721 show_invalid_depstring_notice(
4722 pkg, pkg.metadata["PROVIDE"], str(e))
4726 if not pkg.onlydeps:
4727 if not pkg.installed and \
4728 "empty" not in self.myparams and \
4729 vardbapi.match(pkg.slot_atom):
4730 # Increase the priority of dependencies on packages that
4731 # are being rebuilt. This optimizes merge order so that
4732 # dependencies are rebuilt/updated as soon as possible,
4733 # which is needed especially when emerge is called by
4734 # revdep-rebuild since dependencies may be affected by ABI
4735 # breakage that has rendered them useless. Don't adjust
4736 # priority here when in "empty" mode since all packages
4737 # are being merged in that case.
4738 priority.rebuild = True
4740 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4741 slot_collision = False
4743 existing_node_matches = pkg.cpv == existing_node.cpv
4744 if existing_node_matches and \
4745 pkg != existing_node and \
4746 dep.atom is not None:
4747 # Use package set for matching since it will match via
4748 # PROVIDE when necessary, while match_from_list does not.
4749 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4750 if not atom_set.findAtomForPackage(existing_node):
4751 existing_node_matches = False
4752 if existing_node_matches:
4753 # The existing node can be reused.
4755 for parent_atom in arg_atoms:
4756 parent, atom = parent_atom
4757 self.digraph.add(existing_node, parent,
4759 self._add_parent_atom(existing_node, parent_atom)
4760 # If a direct circular dependency is not an unsatisfied
4761 # buildtime dependency then drop it here since otherwise
4762 # it can skew the merge order calculation in an unwanted
4764 if existing_node != myparent or \
4765 (priority.buildtime and not priority.satisfied):
4766 self.digraph.addnode(existing_node, myparent,
4768 if dep.atom is not None and dep.parent is not None:
4769 self._add_parent_atom(existing_node,
4770 (dep.parent, dep.atom))
4774 # A slot collision has occurred. Sometimes this coincides
4775 # with unresolvable blockers, so the slot collision will be
4776 # shown later if there are no unresolvable blockers.
4777 self._add_slot_conflict(pkg)
4778 slot_collision = True
4781 # Now add this node to the graph so that self.display()
4782 # can show use flags and --tree portage.output. This node is
4783 # only being partially added to the graph. It must not be
4784 # allowed to interfere with the other nodes that have been
4785 # added. Do not overwrite data for existing nodes in
4786 # self.mydbapi since that data will be used for blocker
4788 # Even though the graph is now invalid, continue to process
4789 # dependencies so that things like --fetchonly can still
4790 # function despite collisions.
4793 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4794 self.mydbapi[pkg.root].cpv_inject(pkg)
4796 if not pkg.installed:
4797 # Allow this package to satisfy old-style virtuals in case it
4798 # doesn't already. Any pre-existing providers will be preferred
4801 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4802 # For consistency, also update the global virtuals.
4803 settings = self.roots[pkg.root].settings
4805 settings.setinst(pkg.cpv, pkg.metadata)
4807 except portage.exception.InvalidDependString, e:
4808 show_invalid_depstring_notice(
4809 pkg, pkg.metadata["PROVIDE"], str(e))
4814 self._set_nodes.add(pkg)
4816 # Do this even when addme is False (--onlydeps) so that the
4817 # parent/child relationship is always known in case
4818 # self._show_slot_collision_notice() needs to be called later.
4819 self.digraph.add(pkg, myparent, priority=priority)
4820 if dep.atom is not None and dep.parent is not None:
4821 self._add_parent_atom(pkg, (dep.parent, dep.atom))
4824 for parent_atom in arg_atoms:
4825 parent, atom = parent_atom
4826 self.digraph.add(pkg, parent, priority=priority)
4827 self._add_parent_atom(pkg, parent_atom)
4829 """ This section determines whether we go deeper into dependencies or not.
4830 We want to go deeper on a few occasions:
4831 Installing package A, we need to make sure package A's deps are met.
4832 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4833 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4835 dep_stack = self._dep_stack
4836 if "recurse" not in self.myparams:
4838 elif pkg.installed and \
4839 "deep" not in self.myparams:
4840 dep_stack = self._ignored_deps
4842 self.spinner.update()
4847 if not previously_added:
4848 dep_stack.append(pkg)
4851 def _add_parent_atom(self, pkg, parent_atom):
4852 parent_atoms = self._parent_atoms.get(pkg)
4853 if parent_atoms is None:
4854 parent_atoms = set()
4855 self._parent_atoms[pkg] = parent_atoms
4856 parent_atoms.add(parent_atom)
4858 def _add_slot_conflict(self, pkg):
4859 self._slot_collision_nodes.add(pkg)
4860 slot_key = (pkg.slot_atom, pkg.root)
4861 slot_nodes = self._slot_collision_info.get(slot_key)
4862 if slot_nodes is None:
4864 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
4865 self._slot_collision_info[slot_key] = slot_nodes
4868 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
4870 mytype = pkg.type_name
4873 metadata = pkg.metadata
4874 myuse = pkg.use.enabled
4876 depth = pkg.depth + 1
4877 removal_action = "remove" in self.myparams
4880 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
4882 edepend[k] = metadata[k]
4884 if not pkg.built and \
4885 "--buildpkgonly" in self.myopts and \
4886 "deep" not in self.myparams and \
4887 "empty" not in self.myparams:
4888 edepend["RDEPEND"] = ""
4889 edepend["PDEPEND"] = ""
4890 bdeps_satisfied = False
4892 if pkg.built and not removal_action:
4893 if self.myopts.get("--with-bdeps", "n") == "y":
4894 # Pull in build time deps as requested, but marked them as
4895 # "satisfied" since they are not strictly required. This allows
4896 # more freedom in the merge order calculation for solving
4897 # circular dependencies. Don't convert to PDEPEND since that
4898 # could make --with-bdeps=y less effective if it is used to
4899 # adjust merge order to prevent built_with_use() calls from
4901 bdeps_satisfied = True
4903 # built packages do not have build time dependencies.
4904 edepend["DEPEND"] = ""
4906 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
4907 edepend["DEPEND"] = ""
4910 ("/", edepend["DEPEND"],
4911 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
4912 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
4913 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
4916 debug = "--debug" in self.myopts
4917 strict = mytype != "installed"
4919 for dep_root, dep_string, dep_priority in deps:
4921 # Decrease priority so that --buildpkgonly
4922 # hasallzeros() works correctly.
4923 dep_priority = DepPriority()
4928 print "Parent: ", jbigkey
4929 print "Depstring:", dep_string
4930 print "Priority:", dep_priority
4931 vardb = self.roots[dep_root].trees["vartree"].dbapi
4933 selected_atoms = self._select_atoms(dep_root,
4934 dep_string, myuse=myuse, parent=pkg, strict=strict)
4935 except portage.exception.InvalidDependString, e:
4936 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
4939 print "Candidates:", selected_atoms
4941 for atom in selected_atoms:
4944 atom = portage.dep.Atom(atom)
4946 mypriority = dep_priority.copy()
4947 if not atom.blocker and vardb.match(atom):
4948 mypriority.satisfied = True
4950 if not self._add_dep(Dependency(atom=atom,
4951 blocker=atom.blocker, depth=depth, parent=pkg,
4952 priority=mypriority, root=dep_root),
4953 allow_unsatisfied=allow_unsatisfied):
4956 except portage.exception.InvalidAtom, e:
4957 show_invalid_depstring_notice(
4958 pkg, dep_string, str(e))
4960 if not pkg.installed:
4964 print "Exiting...", jbigkey
4965 except portage.exception.AmbiguousPackageName, e:
4967 portage.writemsg("\n\n!!! An atom in the dependencies " + \
4968 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
4970 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
4971 portage.writemsg("\n", noiselevel=-1)
4972 if mytype == "binary":
4974 "!!! This binary package cannot be installed: '%s'\n" % \
4975 mykey, noiselevel=-1)
4976 elif mytype == "ebuild":
4977 portdb = self.roots[myroot].trees["porttree"].dbapi
4978 myebuild, mylocation = portdb.findname2(mykey)
4979 portage.writemsg("!!! This ebuild cannot be installed: " + \
4980 "'%s'\n" % myebuild, noiselevel=-1)
4981 portage.writemsg("!!! Please notify the package maintainer " + \
4982 "that atoms must be fully-qualified.\n", noiselevel=-1)
4986 def _priority(self, **kwargs):
4987 if "remove" in self.myparams:
4988 priority_constructor = UnmergeDepPriority
4990 priority_constructor = DepPriority
4991 return priority_constructor(**kwargs)
4993 def _dep_expand(self, root_config, atom_without_category):
4995 @param root_config: a root config instance
4996 @type root_config: RootConfig
4997 @param atom_without_category: an atom without a category component
4998 @type atom_without_category: String
5000 @returns: a list of atoms containing categories (possibly empty)
5002 null_cp = portage.dep_getkey(insert_category_into_atom(
5003 atom_without_category, "null"))
5004 cat, atom_pn = portage.catsplit(null_cp)
5007 for db, pkg_type, built, installed, db_keys in \
5008 self._filtered_trees[root_config.root]["dbs"]:
5009 cp_set.update(db.cp_all())
5010 for cp in list(cp_set):
5011 cat, pn = portage.catsplit(cp)
5016 cat, pn = portage.catsplit(cp)
5017 deps.append(insert_category_into_atom(
5018 atom_without_category, cat))
5021 def _have_new_virt(self, root, atom_cp):
5023 for db, pkg_type, built, installed, db_keys in \
5024 self._filtered_trees[root]["dbs"]:
5025 if db.cp_list(atom_cp):
5030 def _iter_atoms_for_pkg(self, pkg):
5031 # TODO: add multiple $ROOT support
5032 if pkg.root != self.target_root:
5034 atom_arg_map = self._atom_arg_map
5035 root_config = self.roots[pkg.root]
5036 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5037 atom_cp = portage.dep_getkey(atom)
5038 if atom_cp != pkg.cp and \
5039 self._have_new_virt(pkg.root, atom_cp):
5041 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5042 visible_pkgs.reverse() # descending order
5044 for visible_pkg in visible_pkgs:
5045 if visible_pkg.cp != atom_cp:
5047 if pkg >= visible_pkg:
5048 # This is descending order, and we're not
5049 # interested in any versions <= pkg given.
5051 if pkg.slot_atom != visible_pkg.slot_atom:
5052 higher_slot = visible_pkg
5054 if higher_slot is not None:
5056 for arg in atom_arg_map[(atom, pkg.root)]:
5057 if isinstance(arg, PackageArg) and \
5062 def select_files(self, myfiles):
5063 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5064 appropriate depgraph and return a favorite list."""
5065 debug = "--debug" in self.myopts
5066 root_config = self.roots[self.target_root]
5067 sets = root_config.sets
5068 getSetAtoms = root_config.setconfig.getSetAtoms
5070 myroot = self.target_root
5071 dbs = self._filtered_trees[myroot]["dbs"]
5072 vardb = self.trees[myroot]["vartree"].dbapi
5073 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5074 portdb = self.trees[myroot]["porttree"].dbapi
5075 bindb = self.trees[myroot]["bintree"].dbapi
5076 pkgsettings = self.pkgsettings[myroot]
5078 onlydeps = "--onlydeps" in self.myopts
5081 ext = os.path.splitext(x)[1]
5083 if not os.path.exists(x):
5085 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5086 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5087 elif os.path.exists(
5088 os.path.join(pkgsettings["PKGDIR"], x)):
5089 x = os.path.join(pkgsettings["PKGDIR"], x)
5091 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5092 print "!!! Please ensure the tbz2 exists as specified.\n"
5093 return 0, myfavorites
5094 mytbz2=portage.xpak.tbz2(x)
5095 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5096 if os.path.realpath(x) != \
5097 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5098 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5099 return 0, myfavorites
5100 db_keys = list(bindb._aux_cache_keys)
5101 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5102 pkg = Package(type_name="binary", root_config=root_config,
5103 cpv=mykey, built=True, metadata=metadata,
5105 self._pkg_cache[pkg] = pkg
5106 args.append(PackageArg(arg=x, package=pkg,
5107 root_config=root_config))
5108 elif ext==".ebuild":
5109 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5110 pkgdir = os.path.dirname(ebuild_path)
5111 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5112 cp = pkgdir[len(tree_root)+1:]
5113 e = portage.exception.PackageNotFound(
5114 ("%s is not in a valid portage tree " + \
5115 "hierarchy or does not exist") % x)
5116 if not portage.isvalidatom(cp):
5118 cat = portage.catsplit(cp)[0]
5119 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5120 if not portage.isvalidatom("="+mykey):
5122 ebuild_path = portdb.findname(mykey)
5124 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5125 cp, os.path.basename(ebuild_path)):
5126 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5127 return 0, myfavorites
5128 if mykey not in portdb.xmatch(
5129 "match-visible", portage.dep_getkey(mykey)):
5130 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5131 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5132 print colorize("BAD", "*** page for details.")
5133 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5136 raise portage.exception.PackageNotFound(
5137 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5138 db_keys = list(portdb._aux_cache_keys)
5139 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5140 pkg = Package(type_name="ebuild", root_config=root_config,
5141 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5142 pkgsettings.setcpv(pkg)
5143 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5144 self._pkg_cache[pkg] = pkg
5145 args.append(PackageArg(arg=x, package=pkg,
5146 root_config=root_config))
5147 elif x.startswith(os.path.sep):
5148 if not x.startswith(myroot):
5149 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5150 " $ROOT.\n") % x, noiselevel=-1)
5152 # Queue these up since it's most efficient to handle
5153 # multiple files in a single iter_owners() call.
5154 lookup_owners.append(x)
5156 if x in ("system", "world"):
5158 if x.startswith(SETPREFIX):
5159 s = x[len(SETPREFIX):]
5161 raise portage.exception.PackageSetNotFound(s)
5164 # Recursively expand sets so that containment tests in
5165 # self._get_parent_sets() properly match atoms in nested
5166 # sets (like if world contains system).
5167 expanded_set = InternalPackageSet(
5168 initial_atoms=getSetAtoms(s))
5169 self._sets[s] = expanded_set
5170 args.append(SetArg(arg=x, set=expanded_set,
5171 root_config=root_config))
5172 myfavorites.append(x)
5174 if not is_valid_package_atom(x):
5175 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5177 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5178 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5180 # Don't expand categories or old-style virtuals here unless
5181 # necessary. Expansion of old-style virtuals here causes at
5182 # least the following problems:
5183 # 1) It's more difficult to determine which set(s) an atom
5184 # came from, if any.
5185 # 2) It takes away freedom from the resolver to choose other
5186 # possible expansions when necessary.
5188 args.append(AtomArg(arg=x, atom=x,
5189 root_config=root_config))
5191 expanded_atoms = self._dep_expand(root_config, x)
5192 installed_cp_set = set()
5193 for atom in expanded_atoms:
5194 atom_cp = portage.dep_getkey(atom)
5195 if vardb.cp_list(atom_cp):
5196 installed_cp_set.add(atom_cp)
5197 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5198 installed_cp = iter(installed_cp_set).next()
5199 expanded_atoms = [atom for atom in expanded_atoms \
5200 if portage.dep_getkey(atom) == installed_cp]
5202 if len(expanded_atoms) > 1:
5205 ambiguous_package_name(x, expanded_atoms, root_config,
5206 self.spinner, self.myopts)
5207 return False, myfavorites
5209 atom = expanded_atoms[0]
5211 null_atom = insert_category_into_atom(x, "null")
5212 null_cp = portage.dep_getkey(null_atom)
5213 cat, atom_pn = portage.catsplit(null_cp)
5214 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5216 # Allow the depgraph to choose which virtual.
5217 atom = insert_category_into_atom(x, "virtual")
5219 atom = insert_category_into_atom(x, "null")
5221 args.append(AtomArg(arg=x, atom=atom,
5222 root_config=root_config))
5226 search_for_multiple = False
5227 if len(lookup_owners) > 1:
5228 search_for_multiple = True
5230 for x in lookup_owners:
5231 if not search_for_multiple and os.path.isdir(x):
5232 search_for_multiple = True
5233 relative_paths.append(x[len(myroot):])
5236 for pkg, relative_path in \
5237 real_vardb._owners.iter_owners(relative_paths):
5238 owners.add(pkg.mycpv)
5239 if not search_for_multiple:
5243 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5244 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5248 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5250 # portage now masks packages with missing slot, but it's
5251 # possible that one was installed by an older version
5252 atom = portage.cpv_getkey(cpv)
5254 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5255 args.append(AtomArg(arg=atom, atom=atom,
5256 root_config=root_config))
5258 if "--update" in self.myopts:
5259 # Enable greedy SLOT atoms for atoms given as arguments.
5260 # This is currently disabled for sets since greedy SLOT
5261 # atoms could be a property of the set itself.
5264 # In addition to any installed slots, also try to pull
5265 # in the latest new slot that may be available.
5266 greedy_atoms.append(arg)
5267 if not isinstance(arg, (AtomArg, PackageArg)):
5269 atom_cp = portage.dep_getkey(arg.atom)
5271 for cpv in vardb.match(arg.atom):
5272 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5274 greedy_atoms.append(
5275 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5276 root_config=root_config))
5280 # Create the "args" package set from atoms and
5281 # packages given as arguments.
5282 args_set = self._sets["args"]
5284 if not isinstance(arg, (AtomArg, PackageArg)):
5287 if myatom in args_set:
5289 args_set.add(myatom)
5290 myfavorites.append(myatom)
5291 self._set_atoms.update(chain(*self._sets.itervalues()))
5292 atom_arg_map = self._atom_arg_map
5294 for atom in arg.set:
5295 atom_key = (atom, myroot)
5296 refs = atom_arg_map.get(atom_key)
5299 atom_arg_map[atom_key] = refs
5302 pprovideddict = pkgsettings.pprovideddict
5304 portage.writemsg("\n", noiselevel=-1)
5305 # Order needs to be preserved since a feature of --nodeps
5306 # is to allow the user to force a specific merge order.
5310 for atom in arg.set:
5311 self.spinner.update()
5312 dep = Dependency(atom=atom, onlydeps=onlydeps,
5313 root=myroot, parent=arg)
5314 atom_cp = portage.dep_getkey(atom)
5316 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5317 if pprovided and portage.match_from_list(atom, pprovided):
5318 # A provided package has been specified on the command line.
5319 self._pprovided_args.append((arg, atom))
5321 if isinstance(arg, PackageArg):
5322 if not self._add_pkg(arg.package, dep) or \
5323 not self._create_graph():
5324 sys.stderr.write(("\n\n!!! Problem resolving " + \
5325 "dependencies for %s\n") % arg.arg)
5326 return 0, myfavorites
5329 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5330 (arg, atom), noiselevel=-1)
5331 pkg, existing_node = self._select_package(
5332 myroot, atom, onlydeps=onlydeps)
5334 if not (isinstance(arg, SetArg) and \
5335 arg.name in ("system", "world")):
5336 self._unsatisfied_deps_for_display.append(
5337 ((myroot, atom), {}))
5338 return 0, myfavorites
5339 self._missing_args.append((arg, atom))
5341 if atom_cp != pkg.cp:
5342 # For old-style virtuals, we need to repeat the
5343 # package.provided check against the selected package.
5344 expanded_atom = atom.replace(atom_cp, pkg.cp)
5345 pprovided = pprovideddict.get(pkg.cp)
5347 portage.match_from_list(expanded_atom, pprovided):
5348 # A provided package has been
5349 # specified on the command line.
5350 self._pprovided_args.append((arg, atom))
5352 if pkg.installed and "selective" not in self.myparams:
5353 self._unsatisfied_deps_for_display.append(
5354 ((myroot, atom), {}))
5355 # Previous behavior was to bail out in this case, but
5356 # since the dep is satisfied by the installed package,
5357 # it's more friendly to continue building the graph
5358 # and just show a warning message. Therefore, only bail
5359 # out here if the atom is not from either the system or
5361 if not (isinstance(arg, SetArg) and \
5362 arg.name in ("system", "world")):
5363 return 0, myfavorites
5365 # Add the selected package to the graph as soon as possible
5366 # so that later dep_check() calls can use it as feedback
5367 # for making more consistent atom selections.
5368 if not self._add_pkg(pkg, dep):
5369 if isinstance(arg, SetArg):
5370 sys.stderr.write(("\n\n!!! Problem resolving " + \
5371 "dependencies for %s from %s\n") % \
5374 sys.stderr.write(("\n\n!!! Problem resolving " + \
5375 "dependencies for %s\n") % atom)
5376 return 0, myfavorites
5378 except portage.exception.MissingSignature, e:
5379 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5380 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5381 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5382 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5383 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5384 return 0, myfavorites
5385 except portage.exception.InvalidSignature, e:
5386 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5387 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5388 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5389 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5390 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5391 return 0, myfavorites
5392 except SystemExit, e:
5393 raise # Needed else can't exit
5394 except Exception, e:
5395 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5396 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5399 # Now that the root packages have been added to the graph,
5400 # process the dependencies.
5401 if not self._create_graph():
5402 return 0, myfavorites
5405 if "--usepkgonly" in self.myopts:
5406 for xs in self.digraph.all_nodes():
5407 if not isinstance(xs, Package):
5409 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5413 print "Missing binary for:",xs[2]
5417 except self._unknown_internal_error:
5418 return False, myfavorites
5420 # We're true here unless we are missing binaries.
5421 return (not missing,myfavorites)
5423 def _select_atoms_from_graph(self, *pargs, **kwargs):
5425 Prefer atoms matching packages that have already been
5426 added to the graph or those that are installed and have
5427 not been scheduled for replacement.
5429 kwargs["trees"] = self._graph_trees
5430 return self._select_atoms_highest_available(*pargs, **kwargs)
5432 def _select_atoms_highest_available(self, root, depstring,
5433 myuse=None, parent=None, strict=True, trees=None):
5434 """This will raise InvalidDependString if necessary. If trees is
5435 None then self._filtered_trees is used."""
5436 pkgsettings = self.pkgsettings[root]
5438 trees = self._filtered_trees
5441 if parent is not None:
5442 trees[root]["parent"] = parent
5444 portage.dep._dep_check_strict = False
5445 mycheck = portage.dep_check(depstring, None,
5446 pkgsettings, myuse=myuse,
5447 myroot=root, trees=trees)
5449 if parent is not None:
5450 trees[root].pop("parent")
5451 portage.dep._dep_check_strict = True
5453 raise portage.exception.InvalidDependString(mycheck[1])
5454 selected_atoms = mycheck[1]
5455 return selected_atoms
5457 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5458 atom = portage.dep.Atom(atom)
5459 atom_set = InternalPackageSet(initial_atoms=(atom,))
5460 atom_without_use = atom
5462 atom_without_use = portage.dep.remove_slot(atom)
5464 atom_without_use += ":" + atom.slot
5465 atom_without_use = portage.dep.Atom(atom_without_use)
5466 xinfo = '"%s"' % atom
5469 # Discard null/ from failed cpv_expand category expansion.
5470 xinfo = xinfo.replace("null/", "")
5471 masked_packages = []
5473 missing_licenses = []
5474 have_eapi_mask = False
5475 pkgsettings = self.pkgsettings[root]
5476 implicit_iuse = pkgsettings._get_implicit_iuse()
5477 root_config = self.roots[root]
5478 portdb = self.roots[root].trees["porttree"].dbapi
5479 dbs = self._filtered_trees[root]["dbs"]
5480 for db, pkg_type, built, installed, db_keys in dbs:
5484 if hasattr(db, "xmatch"):
5485 cpv_list = db.xmatch("match-all", atom_without_use)
5487 cpv_list = db.match(atom_without_use)
5490 for cpv in cpv_list:
5491 metadata, mreasons = get_mask_info(root_config, cpv,
5492 pkgsettings, db, pkg_type, built, installed, db_keys)
5493 if metadata is not None:
5494 pkg = Package(built=built, cpv=cpv,
5495 installed=installed, metadata=metadata,
5496 root_config=root_config)
5497 if pkg.cp != atom.cp:
5498 # A cpv can be returned from dbapi.match() as an
5499 # old-style virtual match even in cases when the
5500 # package does not actually PROVIDE the virtual.
5501 # Filter out any such false matches here.
5502 if not atom_set.findAtomForPackage(pkg):
5504 if atom.use and not mreasons:
5505 missing_use.append(pkg)
5507 masked_packages.append(
5508 (root_config, pkgsettings, cpv, metadata, mreasons))
5510 missing_use_reasons = []
5511 missing_iuse_reasons = []
5512 for pkg in missing_use:
5513 use = pkg.use.enabled
5514 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5515 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5517 for x in atom.use.required:
5518 if iuse_re.match(x) is None:
5519 missing_iuse.append(x)
5522 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5523 missing_iuse_reasons.append((pkg, mreasons))
5525 need_enable = sorted(atom.use.enabled.difference(use))
5526 need_disable = sorted(atom.use.disabled.intersection(use))
5527 if need_enable or need_disable:
5529 changes.extend(colorize("red", "+" + x) \
5530 for x in need_enable)
5531 changes.extend(colorize("blue", "-" + x) \
5532 for x in need_disable)
5533 mreasons.append("Change USE: %s" % " ".join(changes))
5534 missing_use_reasons.append((pkg, mreasons))
5536 if missing_iuse_reasons and not missing_use_reasons:
5537 missing_use_reasons = missing_iuse_reasons
5538 elif missing_use_reasons:
5539 # Only show the latest version.
5540 del missing_use_reasons[1:]
5542 if missing_use_reasons:
5543 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5544 print "!!! One of the following packages is required to complete your request:"
5545 for pkg, mreasons in missing_use_reasons:
5546 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5548 elif masked_packages:
5550 colorize("BAD", "All ebuilds that could satisfy ") + \
5551 colorize("INFORM", xinfo) + \
5552 colorize("BAD", " have been masked.")
5553 print "!!! One of the following masked packages is required to complete your request:"
5554 have_eapi_mask = show_masked_packages(masked_packages)
5557 msg = ("The current version of portage supports " + \
5558 "EAPI '%s'. You must upgrade to a newer version" + \
5559 " of portage before EAPI masked packages can" + \
5560 " be installed.") % portage.const.EAPI
5561 from textwrap import wrap
5562 for line in wrap(msg, 75):
5567 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5569 # Show parent nodes and the argument that pulled them in.
5570 traversed_nodes = set()
5573 while node is not None:
5574 traversed_nodes.add(node)
5575 msg.append('(dependency required by "%s" [%s])' % \
5576 (colorize('INFORM', str(node.cpv)), node.type_name))
5577 # When traversing to parents, prefer arguments over packages
5578 # since arguments are root nodes. Never traverse the same
5579 # package twice, in order to prevent an infinite loop.
5580 selected_parent = None
5581 for parent in self.digraph.parent_nodes(node):
5582 if isinstance(parent, DependencyArg):
5583 msg.append('(dependency required by "%s" [argument])' % \
5584 (colorize('INFORM', str(parent))))
5585 selected_parent = None
5587 if parent not in traversed_nodes:
5588 selected_parent = parent
5589 node = selected_parent
5595 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5596 cache_key = (root, atom, onlydeps)
5597 ret = self._highest_pkg_cache.get(cache_key)
5600 if pkg and not existing:
5601 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5602 if existing and existing == pkg:
5603 # Update the cache to reflect that the
5604 # package has been added to the graph.
5606 self._highest_pkg_cache[cache_key] = ret
5608 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5609 self._highest_pkg_cache[cache_key] = ret
5612 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5613 root_config = self.roots[root]
5614 pkgsettings = self.pkgsettings[root]
5615 dbs = self._filtered_trees[root]["dbs"]
5616 vardb = self.roots[root].trees["vartree"].dbapi
5617 portdb = self.roots[root].trees["porttree"].dbapi
5618 # List of acceptable packages, ordered by type preference.
5619 matched_packages = []
5620 highest_version = None
5621 if not isinstance(atom, portage.dep.Atom):
5622 atom = portage.dep.Atom(atom)
5624 atom_set = InternalPackageSet(initial_atoms=(atom,))
5625 existing_node = None
5627 usepkgonly = "--usepkgonly" in self.myopts
5628 empty = "empty" in self.myparams
5629 selective = "selective" in self.myparams
5631 noreplace = "--noreplace" in self.myopts
5632 # Behavior of the "selective" parameter depends on
5633 # whether or not a package matches an argument atom.
5634 # If an installed package provides an old-style
5635 # virtual that is no longer provided by an available
5636 # package, the installed package may match an argument
5637 # atom even though none of the available packages do.
5638 # Therefore, "selective" logic does not consider
5639 # whether or not an installed package matches an
5640 # argument atom. It only considers whether or not
5641 # available packages match argument atoms, which is
5642 # represented by the found_available_arg flag.
5643 found_available_arg = False
5644 for find_existing_node in True, False:
5647 for db, pkg_type, built, installed, db_keys in dbs:
5650 if installed and not find_existing_node:
5651 want_reinstall = reinstall or empty or \
5652 (found_available_arg and not selective)
5653 if want_reinstall and matched_packages:
5655 if hasattr(db, "xmatch"):
5656 cpv_list = db.xmatch("match-all", atom)
5658 cpv_list = db.match(atom)
5660 # USE=multislot can make an installed package appear as if
5661 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5662 # won't do any good as long as USE=multislot is enabled since
5663 # the newly built package still won't have the expected slot.
5664 # Therefore, assume that such SLOT dependencies are already
5665 # satisfied rather than forcing a rebuild.
5666 if installed and not cpv_list and atom.slot:
5667 for cpv in db.match(atom.cp):
5668 slot_available = False
5669 for other_db, other_type, other_built, \
5670 other_installed, other_keys in dbs:
5673 other_db.aux_get(cpv, ["SLOT"])[0]:
5674 slot_available = True
5678 if not slot_available:
5680 inst_pkg = self._pkg(cpv, "installed",
5681 root_config, installed=installed)
5682 # Remove the slot from the atom and verify that
5683 # the package matches the resulting atom.
5684 atom_without_slot = portage.dep.remove_slot(atom)
5686 atom_without_slot += str(atom.use)
5687 atom_without_slot = portage.dep.Atom(atom_without_slot)
5688 if portage.match_from_list(
5689 atom_without_slot, [inst_pkg]):
5690 cpv_list = [inst_pkg.cpv]
5695 pkg_status = "merge"
5696 if installed or onlydeps:
5697 pkg_status = "nomerge"
5700 for cpv in cpv_list:
5701 # Make --noreplace take precedence over --newuse.
5702 if not installed and noreplace and \
5703 cpv in vardb.match(atom):
5704 # If the installed version is masked, it may
5705 # be necessary to look at lower versions,
5706 # in case there is a visible downgrade.
5708 reinstall_for_flags = None
5709 cache_key = (pkg_type, root, cpv, pkg_status)
5710 calculated_use = True
5711 pkg = self._pkg_cache.get(cache_key)
5713 calculated_use = False
5715 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5718 pkg = Package(built=built, cpv=cpv,
5719 installed=installed, metadata=metadata,
5720 onlydeps=onlydeps, root_config=root_config,
5722 metadata = pkg.metadata
5723 if not built and ("?" in metadata["LICENSE"] or \
5724 "?" in metadata["PROVIDE"]):
5725 # This is avoided whenever possible because
5726 # it's expensive. It only needs to be done here
5727 # if it has an effect on visibility.
5728 pkgsettings.setcpv(pkg)
5729 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5730 calculated_use = True
5731 self._pkg_cache[pkg] = pkg
5733 if not installed or (installed and matched_packages):
5734 # Only enforce visibility on installed packages
5735 # if there is at least one other visible package
5736 # available. By filtering installed masked packages
5737 # here, packages that have been masked since they
5738 # were installed can be automatically downgraded
5739 # to an unmasked version.
5741 if not visible(pkgsettings, pkg):
5743 except portage.exception.InvalidDependString:
5747 # Enable upgrade or downgrade to a version
5748 # with visible KEYWORDS when the installed
5749 # version is masked by KEYWORDS, but never
5750 # reinstall the same exact version only due
5751 # to a KEYWORDS mask.
5752 if installed and matched_packages and \
5753 pkgsettings._getMissingKeywords(
5754 pkg.cpv, pkg.metadata):
5755 different_version = None
5756 for avail_pkg in matched_packages:
5757 if not portage.dep.cpvequal(
5758 pkg.cpv, avail_pkg.cpv):
5759 different_version = avail_pkg
5761 if different_version is not None:
5762 # Only reinstall for KEYWORDS if
5763 # it's not the same version.
5766 if not pkg.built and not calculated_use:
5767 # This is avoided whenever possible because
5769 pkgsettings.setcpv(pkg)
5770 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5772 if pkg.cp != atom.cp:
5773 # A cpv can be returned from dbapi.match() as an
5774 # old-style virtual match even in cases when the
5775 # package does not actually PROVIDE the virtual.
5776 # Filter out any such false matches here.
5777 if not atom_set.findAtomForPackage(pkg):
5781 if root == self.target_root:
5783 # Ebuild USE must have been calculated prior
5784 # to this point, in case atoms have USE deps.
5785 myarg = self._iter_atoms_for_pkg(pkg).next()
5786 except StopIteration:
5788 except portage.exception.InvalidDependString:
5790 # masked by corruption
5792 if not installed and myarg:
5793 found_available_arg = True
5795 if atom.use and not pkg.built:
5796 use = pkg.use.enabled
5797 if atom.use.enabled.difference(use):
5799 if atom.use.disabled.intersection(use):
5801 if pkg.cp == atom_cp:
5802 if highest_version is None:
5803 highest_version = pkg
5804 elif pkg > highest_version:
5805 highest_version = pkg
5806 # At this point, we've found the highest visible
5807 # match from the current repo. Any lower versions
5808 # from this repo are ignored, so this so the loop
5809 # will always end with a break statement below
5811 if find_existing_node:
5812 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5815 if portage.dep.match_from_list(atom, [e_pkg]):
5816 if highest_version and \
5817 e_pkg.cp == atom_cp and \
5818 e_pkg < highest_version and \
5819 e_pkg.slot_atom != highest_version.slot_atom:
5820 # There is a higher version available in a
5821 # different slot, so this existing node is
5825 matched_packages.append(e_pkg)
5826 existing_node = e_pkg
5828 # Compare built package to current config and
5829 # reject the built package if necessary.
5830 if built and not installed and \
5831 ("--newuse" in self.myopts or \
5832 "--reinstall" in self.myopts):
5833 iuses = pkg.iuse.all
5834 old_use = pkg.use.enabled
5836 pkgsettings.setcpv(myeb)
5838 pkgsettings.setcpv(pkg)
5839 now_use = pkgsettings["PORTAGE_USE"].split()
5840 forced_flags = set()
5841 forced_flags.update(pkgsettings.useforce)
5842 forced_flags.update(pkgsettings.usemask)
5844 if myeb and not usepkgonly:
5845 cur_iuse = myeb.iuse.all
5846 if self._reinstall_for_flags(forced_flags,
5850 # Compare current config to installed package
5851 # and do not reinstall if possible.
5852 if not installed and \
5853 ("--newuse" in self.myopts or \
5854 "--reinstall" in self.myopts) and \
5855 cpv in vardb.match(atom):
5856 pkgsettings.setcpv(pkg)
5857 forced_flags = set()
5858 forced_flags.update(pkgsettings.useforce)
5859 forced_flags.update(pkgsettings.usemask)
5860 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
5861 old_iuse = set(filter_iuse_defaults(
5862 vardb.aux_get(cpv, ["IUSE"])[0].split()))
5863 cur_use = pkgsettings["PORTAGE_USE"].split()
5864 cur_iuse = pkg.iuse.all
5865 reinstall_for_flags = \
5866 self._reinstall_for_flags(
5867 forced_flags, old_use, old_iuse,
5869 if reinstall_for_flags:
5873 matched_packages.append(pkg)
5874 if reinstall_for_flags:
5875 self._reinstall_nodes[pkg] = \
5879 if not matched_packages:
5882 if "--debug" in self.myopts:
5883 for pkg in matched_packages:
5884 portage.writemsg("%s %s\n" % \
5885 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
5887 # Filter out any old-style virtual matches if they are
5888 # mixed with new-style virtual matches.
5889 cp = portage.dep_getkey(atom)
5890 if len(matched_packages) > 1 and \
5891 "virtual" == portage.catsplit(cp)[0]:
5892 for pkg in matched_packages:
5895 # Got a new-style virtual, so filter
5896 # out any old-style virtuals.
5897 matched_packages = [pkg for pkg in matched_packages \
5901 # If the installed version is in a different slot and it is higher than
5902 # the highest available visible package, _iter_atoms_for_pkg() may fail
5903 # to properly match the available package with a corresponding argument
5904 # atom. Detect this case and correct it here.
5905 if not selective and len(matched_packages) > 1 and \
5906 matched_packages[-1].installed and \
5907 matched_packages[-1].slot_atom != \
5908 matched_packages[-2].slot_atom and \
5909 matched_packages[-1] > matched_packages[-2]:
5910 pkg = matched_packages[-2]
5911 if pkg.root == self.target_root and \
5912 self._set_atoms.findAtomForPackage(pkg):
5913 # Select the available package instead
5914 # of the installed package.
5915 matched_packages.pop()
5917 if len(matched_packages) > 1:
5918 bestmatch = portage.best(
5919 [pkg.cpv for pkg in matched_packages])
5920 matched_packages = [pkg for pkg in matched_packages \
5921 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
5923 # ordered by type preference ("ebuild" type is the last resort)
5924 return matched_packages[-1], existing_node
5926 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
5928 Select packages that have already been added to the graph or
5929 those that are installed and have not been scheduled for
5932 graph_db = self._graph_trees[root]["porttree"].dbapi
5933 matches = graph_db.match(atom)
5936 cpv = matches[-1] # highest match
5937 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
5938 graph_db.aux_get(cpv, ["SLOT"])[0])
5939 e_pkg = self._slot_pkg_map[root].get(slot_atom)
5942 # Since this cpv exists in the graph_db,
5943 # we must have a cached Package instance.
5944 cache_key = ("installed", root, cpv, "nomerge")
5945 return (self._pkg_cache[cache_key], None)
5947 def _complete_graph(self):
5949 Add any deep dependencies of required sets (args, system, world) that
5950 have not been pulled into the graph yet. This ensures that the graph
5951 is consistent such that initially satisfied deep dependencies are not
5952 broken in the new graph. Initially unsatisfied dependencies are
5953 irrelevant since we only want to avoid breaking dependencies that are
5956 Since this method can consume enough time to disturb users, it is
5957 currently only enabled by the --complete-graph option.
5959 if "--buildpkgonly" in self.myopts or \
5960 "recurse" not in self.myparams:
5963 if "complete" not in self.myparams:
5964 # Skip this to avoid consuming enough time to disturb users.
5967 # Put the depgraph into a mode that causes it to only
5968 # select packages that have already been added to the
5969 # graph or those that are installed and have not been
5970 # scheduled for replacement. Also, toggle the "deep"
5971 # parameter so that all dependencies are traversed and
5973 self._select_atoms = self._select_atoms_from_graph
5974 self._select_package = self._select_pkg_from_graph
5975 already_deep = "deep" in self.myparams
5976 if not already_deep:
5977 self.myparams.add("deep")
5979 for root in self.roots:
5980 required_set_names = self._required_set_names.copy()
5981 if root == self.target_root and \
5982 (already_deep or "empty" in self.myparams):
5983 required_set_names.difference_update(self._sets)
5984 if not required_set_names and not self._ignored_deps:
5986 root_config = self.roots[root]
5987 setconfig = root_config.setconfig
5989 # Reuse existing SetArg instances when available.
5990 for arg in self.digraph.root_nodes():
5991 if not isinstance(arg, SetArg):
5993 if arg.root_config != root_config:
5995 if arg.name in required_set_names:
5997 required_set_names.remove(arg.name)
5998 # Create new SetArg instances only when necessary.
5999 for s in required_set_names:
6000 expanded_set = InternalPackageSet(
6001 initial_atoms=setconfig.getSetAtoms(s))
6002 atom = SETPREFIX + s
6003 args.append(SetArg(arg=atom, set=expanded_set,
6004 root_config=root_config))
6005 vardb = root_config.trees["vartree"].dbapi
6007 for atom in arg.set:
6008 self._dep_stack.append(
6009 Dependency(atom=atom, root=root, parent=arg))
6010 if self._ignored_deps:
6011 self._dep_stack.extend(self._ignored_deps)
6012 self._ignored_deps = []
6013 if not self._create_graph(allow_unsatisfied=True):
6015 # Check the unsatisfied deps to see if any initially satisfied deps
6016 # will become unsatisfied due to an upgrade. Initially unsatisfied
6017 # deps are irrelevant since we only want to avoid breaking deps
6018 # that are initially satisfied.
6019 while self._unsatisfied_deps:
6020 dep = self._unsatisfied_deps.pop()
6021 matches = vardb.match_pkgs(dep.atom)
6023 self._initially_unsatisfied_deps.append(dep)
6025 # An scheduled installation broke a deep dependency.
6026 # Add the installed package to the graph so that it
6027 # will be appropriately reported as a slot collision
6028 # (possibly solvable via backtracking).
6029 pkg = matches[-1] # highest match
6030 if not self._add_pkg(pkg, dep):
6032 if not self._create_graph(allow_unsatisfied=True):
6036 def _pkg(self, cpv, type_name, root_config, installed=False):
6038 Get a package instance from the cache, or create a new
6039 one if necessary. Raises KeyError from aux_get if it
6040 failures for some reason (package does not exist or is
6045 operation = "nomerge"
6046 pkg = self._pkg_cache.get(
6047 (type_name, root_config.root, cpv, operation))
6049 tree_type = self.pkg_tree_map[type_name]
6050 db = root_config.trees[tree_type].dbapi
6051 db_keys = list(self._trees_orig[root_config.root][
6052 tree_type].dbapi._aux_cache_keys)
6053 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6054 pkg = Package(cpv=cpv, metadata=metadata,
6055 root_config=root_config, installed=installed)
6056 if type_name == "ebuild":
6057 settings = self.pkgsettings[root_config.root]
6058 settings.setcpv(pkg)
6059 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6060 self._pkg_cache[pkg] = pkg
6063 def validate_blockers(self):
6064 """Remove any blockers from the digraph that do not match any of the
6065 packages within the graph. If necessary, create hard deps to ensure
6066 correct merge order such that mutually blocking packages are never
6067 installed simultaneously."""
6069 if "--buildpkgonly" in self.myopts or \
6070 "--nodeps" in self.myopts:
6073 #if "deep" in self.myparams:
6075 # Pull in blockers from all installed packages that haven't already
6076 # been pulled into the depgraph. This is not enabled by default
6077 # due to the performance penalty that is incurred by all the
6078 # additional dep_check calls that are required.
6080 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6081 for myroot in self.trees:
6082 vardb = self.trees[myroot]["vartree"].dbapi
6083 portdb = self.trees[myroot]["porttree"].dbapi
6084 pkgsettings = self.pkgsettings[myroot]
6085 final_db = self.mydbapi[myroot]
6087 blocker_cache = BlockerCache(myroot, vardb)
6088 stale_cache = set(blocker_cache)
6091 stale_cache.discard(cpv)
6092 pkg_in_graph = self.digraph.contains(pkg)
6094 # Check for masked installed packages. Only warn about
6095 # packages that are in the graph in order to avoid warning
6096 # about those that will be automatically uninstalled during
6097 # the merge process or by --depclean.
6099 if pkg_in_graph and not visible(pkgsettings, pkg):
6100 self._masked_installed.add(pkg)
6102 blocker_atoms = None
6108 self._blocker_parents.child_nodes(pkg))
6113 self._irrelevant_blockers.child_nodes(pkg))
6116 if blockers is not None:
6117 blockers = set(str(blocker.atom) \
6118 for blocker in blockers)
6120 # If this node has any blockers, create a "nomerge"
6121 # node for it so that they can be enforced.
6122 self.spinner.update()
6123 blocker_data = blocker_cache.get(cpv)
6124 if blocker_data is not None and \
6125 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6128 # If blocker data from the graph is available, use
6129 # it to validate the cache and update the cache if
6131 if blocker_data is not None and \
6132 blockers is not None:
6133 if not blockers.symmetric_difference(
6134 blocker_data.atoms):
6138 if blocker_data is None and \
6139 blockers is not None:
6140 # Re-use the blockers from the graph.
6141 blocker_atoms = sorted(blockers)
6142 counter = long(pkg.metadata["COUNTER"])
6144 blocker_cache.BlockerData(counter, blocker_atoms)
6145 blocker_cache[pkg.cpv] = blocker_data
6149 blocker_atoms = blocker_data.atoms
6151 # Use aux_get() to trigger FakeVartree global
6152 # updates on *DEPEND when appropriate.
6153 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6154 # It is crucial to pass in final_db here in order to
6155 # optimize dep_check calls by eliminating atoms via
6156 # dep_wordreduce and dep_eval calls.
6158 portage.dep._dep_check_strict = False
6160 success, atoms = portage.dep_check(depstr,
6161 final_db, pkgsettings, myuse=pkg.use.enabled,
6162 trees=self._graph_trees, myroot=myroot)
6163 except Exception, e:
6164 if isinstance(e, SystemExit):
6166 # This is helpful, for example, if a ValueError
6167 # is thrown from cpv_expand due to multiple
6168 # matches (this can happen if an atom lacks a
6170 show_invalid_depstring_notice(
6171 pkg, depstr, str(e))
6175 portage.dep._dep_check_strict = True
6177 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6178 if replacement_pkg and \
6179 replacement_pkg[0].operation == "merge":
6180 # This package is being replaced anyway, so
6181 # ignore invalid dependencies so as not to
6182 # annoy the user too much (otherwise they'd be
6183 # forced to manually unmerge it first).
6185 show_invalid_depstring_notice(pkg, depstr, atoms)
6187 blocker_atoms = [myatom for myatom in atoms \
6188 if myatom.startswith("!")]
6189 blocker_atoms.sort()
6190 counter = long(pkg.metadata["COUNTER"])
6191 blocker_cache[cpv] = \
6192 blocker_cache.BlockerData(counter, blocker_atoms)
6195 for atom in blocker_atoms:
6196 blocker = Blocker(atom=portage.dep.Atom(atom),
6197 eapi=pkg.metadata["EAPI"], root=myroot)
6198 self._blocker_parents.add(blocker, pkg)
6199 except portage.exception.InvalidAtom, e:
6200 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6201 show_invalid_depstring_notice(
6202 pkg, depstr, "Invalid Atom: %s" % (e,))
6204 for cpv in stale_cache:
6205 del blocker_cache[cpv]
6206 blocker_cache.flush()
6209 # Discard any "uninstall" tasks scheduled by previous calls
6210 # to this method, since those tasks may not make sense given
6211 # the current graph state.
6212 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6213 if previous_uninstall_tasks:
6214 self._blocker_uninstalls = digraph()
6215 self.digraph.difference_update(previous_uninstall_tasks)
6217 for blocker in self._blocker_parents.leaf_nodes():
6218 self.spinner.update()
6219 root_config = self.roots[blocker.root]
6220 virtuals = root_config.settings.getvirtuals()
6221 myroot = blocker.root
6222 initial_db = self.trees[myroot]["vartree"].dbapi
6223 final_db = self.mydbapi[myroot]
6225 provider_virtual = False
6226 if blocker.cp in virtuals and \
6227 not self._have_new_virt(blocker.root, blocker.cp):
6228 provider_virtual = True
6230 if provider_virtual:
6232 for provider_entry in virtuals[blocker.cp]:
6234 portage.dep_getkey(provider_entry)
6235 atoms.append(blocker.atom.replace(
6236 blocker.cp, provider_cp))
6238 atoms = [blocker.atom]
6240 blocked_initial = []
6242 blocked_initial.extend(initial_db.match_pkgs(atom))
6246 blocked_final.extend(final_db.match_pkgs(atom))
6248 if not blocked_initial and not blocked_final:
6249 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6250 self._blocker_parents.remove(blocker)
6251 # Discard any parents that don't have any more blockers.
6252 for pkg in parent_pkgs:
6253 self._irrelevant_blockers.add(blocker, pkg)
6254 if not self._blocker_parents.child_nodes(pkg):
6255 self._blocker_parents.remove(pkg)
6257 for parent in self._blocker_parents.parent_nodes(blocker):
6258 unresolved_blocks = False
6259 depends_on_order = set()
6260 for pkg in blocked_initial:
6261 if pkg.slot_atom == parent.slot_atom:
6262 # TODO: Support blocks within slots in cases where it
6263 # might make sense. For example, a new version might
6264 # require that the old version be uninstalled at build
6267 if parent.installed:
6268 # Two currently installed packages conflict with
6269 # eachother. Ignore this case since the damage
6270 # is already done and this would be likely to
6271 # confuse users if displayed like a normal blocker.
6273 if parent.operation == "merge":
6274 # Maybe the blocked package can be replaced or simply
6275 # unmerged to resolve this block.
6276 depends_on_order.add((pkg, parent))
6278 # None of the above blocker resolutions techniques apply,
6279 # so apparently this one is unresolvable.
6280 unresolved_blocks = True
6281 for pkg in blocked_final:
6282 if pkg.slot_atom == parent.slot_atom:
6283 # TODO: Support blocks within slots.
6285 if parent.operation == "nomerge" and \
6286 pkg.operation == "nomerge":
6287 # This blocker will be handled the next time that a
6288 # merge of either package is triggered.
6291 # Maybe the blocking package can be
6292 # unmerged to resolve this block.
6293 if parent.operation == "merge" and pkg.installed:
6294 depends_on_order.add((pkg, parent))
6296 elif parent.operation == "nomerge":
6297 depends_on_order.add((parent, pkg))
6299 # None of the above blocker resolutions techniques apply,
6300 # so apparently this one is unresolvable.
6301 unresolved_blocks = True
6303 # Make sure we don't unmerge any package that have been pulled
6305 if not unresolved_blocks and depends_on_order:
6306 for inst_pkg, inst_task in depends_on_order:
6307 if self.digraph.contains(inst_pkg) and \
6308 self.digraph.parent_nodes(inst_pkg):
6309 unresolved_blocks = True
6312 if not unresolved_blocks and depends_on_order:
6313 for inst_pkg, inst_task in depends_on_order:
6314 uninst_task = Package(built=inst_pkg.built,
6315 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6316 metadata=inst_pkg.metadata,
6317 operation="uninstall",
6318 root_config=inst_pkg.root_config,
6319 type_name=inst_pkg.type_name)
6320 self._pkg_cache[uninst_task] = uninst_task
6321 # Enforce correct merge order with a hard dep.
6322 self.digraph.addnode(uninst_task, inst_task,
6323 priority=BlockerDepPriority.instance)
6324 # Count references to this blocker so that it can be
6325 # invalidated after nodes referencing it have been
6327 self._blocker_uninstalls.addnode(uninst_task, blocker)
6328 if not unresolved_blocks and not depends_on_order:
6329 self._irrelevant_blockers.add(blocker, parent)
6330 self._blocker_parents.remove_edge(blocker, parent)
6331 if not self._blocker_parents.parent_nodes(blocker):
6332 self._blocker_parents.remove(blocker)
6333 if not self._blocker_parents.child_nodes(parent):
6334 self._blocker_parents.remove(parent)
6335 if unresolved_blocks:
6336 self._unsolvable_blockers.add(blocker, parent)
6340 def _accept_blocker_conflicts(self):
6342 for x in ("--buildpkgonly", "--fetchonly",
6343 "--fetch-all-uri", "--nodeps", "--pretend"):
6344 if x in self.myopts:
6349 def _merge_order_bias(self, mygraph):
6350 """Order nodes from highest to lowest overall reference count for
6351 optimal leaf node selection."""
6353 for node in mygraph.order:
6354 node_info[node] = len(mygraph.parent_nodes(node))
6355 def cmp_merge_preference(node1, node2):
6356 return node_info[node2] - node_info[node1]
6357 mygraph.order.sort(cmp_merge_preference)
6359 def altlist(self, reversed=False):
6361 while self._serialized_tasks_cache is None:
6362 self._resolve_conflicts()
6364 self._serialized_tasks_cache, self._scheduler_graph = \
6365 self._serialize_tasks()
6366 except self._serialize_tasks_retry:
6369 retlist = self._serialized_tasks_cache[:]
6374 def schedulerGraph(self):
6376 The scheduler graph is identical to the normal one except that
6377 uninstall edges are reversed in specific cases that require
6378 conflicting packages to be temporarily installed simultaneously.
6379 This is intended for use by the Scheduler in it's parallelization
6380 logic. It ensures that temporary simultaneous installation of
6381 conflicting packages is avoided when appropriate (especially for
6382 !!atom blockers), but allowed in specific cases that require it.
6384 Note that this method calls break_refs() which alters the state of
6385 internal Package instances such that this depgraph instance should
6386 not be used to perform any more calculations.
6388 if self._scheduler_graph is None:
6390 self.break_refs(self._scheduler_graph.order)
6391 return self._scheduler_graph
6393 def break_refs(self, nodes):
6395 Take a mergelist like that returned from self.altlist() and
6396 break any references that lead back to the depgraph. This is
6397 useful if you want to hold references to packages without
6398 also holding the depgraph on the heap.
6401 if hasattr(node, "root_config"):
6402 # The FakeVartree references the _package_cache which
6403 # references the depgraph. So that Package instances don't
6404 # hold the depgraph and FakeVartree on the heap, replace
6405 # the RootConfig that references the FakeVartree with the
6406 # original RootConfig instance which references the actual
6408 node.root_config = \
6409 self._trees_orig[node.root_config.root]["root_config"]
6411 def _resolve_conflicts(self):
6412 if not self._complete_graph():
6413 raise self._unknown_internal_error()
6415 if not self.validate_blockers():
6416 raise self._unknown_internal_error()
6418 if self._slot_collision_info:
6419 self._process_slot_conflicts()
6421 def _serialize_tasks(self):
6422 scheduler_graph = self.digraph.copy()
6423 mygraph=self.digraph.copy()
6424 # Prune "nomerge" root nodes if nothing depends on them, since
6425 # otherwise they slow down merge order calculation. Don't remove
6426 # non-root nodes since they help optimize merge order in some cases
6427 # such as revdep-rebuild.
6428 removed_nodes = set()
6430 for node in mygraph.root_nodes():
6431 if not isinstance(node, Package) or \
6432 node.installed or node.onlydeps:
6433 removed_nodes.add(node)
6435 self.spinner.update()
6436 mygraph.difference_update(removed_nodes)
6437 if not removed_nodes:
6439 removed_nodes.clear()
6440 self._merge_order_bias(mygraph)
6441 def cmp_circular_bias(n1, n2):
6443 RDEPEND is stronger than PDEPEND and this function
6444 measures such a strength bias within a circular
6445 dependency relationship.
6447 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6448 ignore_priority=DepPriority.MEDIUM_SOFT)
6449 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6450 ignore_priority=DepPriority.MEDIUM_SOFT)
6451 if n1_n2_medium == n2_n1_medium:
6456 myblocker_uninstalls = self._blocker_uninstalls.copy()
6458 # Contains uninstall tasks that have been scheduled to
6459 # occur after overlapping blockers have been installed.
6460 scheduled_uninstalls = set()
6461 # Contains any Uninstall tasks that have been ignored
6462 # in order to avoid the circular deps code path. These
6463 # correspond to blocker conflicts that could not be
6465 ignored_uninstall_tasks = set()
6466 have_uninstall_task = False
6467 complete = "complete" in self.myparams
6468 myblocker_parents = self._blocker_parents.copy()
6471 def get_nodes(**kwargs):
6473 Returns leaf nodes excluding Uninstall instances
6474 since those should be executed as late as possible.
6476 return [node for node in mygraph.leaf_nodes(**kwargs) \
6477 if isinstance(node, Package) and \
6478 (node.operation != "uninstall" or \
6479 node in scheduled_uninstalls)]
6481 # sys-apps/portage needs special treatment if ROOT="/"
6482 running_root = self._running_root.root
6483 from portage.const import PORTAGE_PACKAGE_ATOM
6484 runtime_deps = InternalPackageSet(
6485 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6486 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6487 PORTAGE_PACKAGE_ATOM)
6488 replacement_portage = self.mydbapi[running_root].match_pkgs(
6489 PORTAGE_PACKAGE_ATOM)
6492 running_portage = running_portage[0]
6494 running_portage = None
6496 if replacement_portage:
6497 replacement_portage = replacement_portage[0]
6499 replacement_portage = None
6501 if replacement_portage == running_portage:
6502 replacement_portage = None
6504 if replacement_portage is not None:
6505 # update from running_portage to replacement_portage asap
6506 asap_nodes.append(replacement_portage)
6508 if running_portage is not None:
6510 portage_rdepend = self._select_atoms_highest_available(
6511 running_root, running_portage.metadata["RDEPEND"],
6512 myuse=running_portage.use.enabled,
6513 parent=running_portage, strict=False)
6514 except portage.exception.InvalidDependString, e:
6515 portage.writemsg("!!! Invalid RDEPEND in " + \
6516 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6517 (running_root, running_portage.cpv, e), noiselevel=-1)
6519 portage_rdepend = []
6520 runtime_deps.update(atom for atom in portage_rdepend \
6521 if not atom.startswith("!"))
6523 ignore_priority_soft_range = [None]
6524 ignore_priority_soft_range.extend(
6525 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6526 tree_mode = "--tree" in self.myopts
6527 # Tracks whether or not the current iteration should prefer asap_nodes
6528 # if available. This is set to False when the previous iteration
6529 # failed to select any nodes. It is reset whenever nodes are
6530 # successfully selected.
6533 # By default, try to avoid selecting root nodes whenever possible. This
6534 # helps ensure that the maximimum possible number of soft dependencies
6535 # have been removed from the graph before their parent nodes have
6536 # selected. This is especially important when those dependencies are
6537 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6538 # CHOST has been changed (like when building a stage3 from a stage2).
6539 accept_root_node = False
6541 # State of prefer_asap and accept_root_node flags for successive
6542 # iterations that loosen the criteria for node selection.
6544 # iteration prefer_asap accept_root_node
6549 # If no nodes are selected on the 3rd iteration, it is due to
6550 # unresolved blockers or circular dependencies.
6552 while not mygraph.empty():
6553 self.spinner.update()
6554 selected_nodes = None
6555 ignore_priority = None
6556 if prefer_asap and asap_nodes:
6557 """ASAP nodes are merged before their soft deps."""
6558 asap_nodes = [node for node in asap_nodes \
6559 if mygraph.contains(node)]
6560 for node in asap_nodes:
6561 if not mygraph.child_nodes(node,
6562 ignore_priority=DepPriority.SOFT):
6563 selected_nodes = [node]
6564 asap_nodes.remove(node)
6566 if not selected_nodes and \
6567 not (prefer_asap and asap_nodes):
6568 for ignore_priority in ignore_priority_soft_range:
6569 nodes = get_nodes(ignore_priority=ignore_priority)
6573 if ignore_priority is None and not tree_mode:
6574 # Greedily pop all of these nodes since no relationship
6575 # has been ignored. This optimization destroys --tree
6576 # output, so it's disabled in reversed mode. If there
6577 # is a mix of merge and uninstall nodes, save the
6578 # uninstall nodes from later since sometimes a merge
6579 # node will render an install node unnecessary, and
6580 # we want to avoid doing a separate uninstall task in
6582 merge_nodes = [node for node in nodes \
6583 if node.operation == "merge"]
6585 selected_nodes = merge_nodes
6587 selected_nodes = nodes
6589 # For optimal merge order:
6590 # * Only pop one node.
6591 # * Removing a root node (node without a parent)
6592 # will not produce a leaf node, so avoid it.
6594 if mygraph.parent_nodes(node):
6595 # found a non-root node
6596 selected_nodes = [node]
6598 if not selected_nodes and \
6599 (accept_root_node or ignore_priority is None):
6600 # settle for a root node
6601 selected_nodes = [nodes[0]]
6603 if not selected_nodes:
6604 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6606 """Recursively gather a group of nodes that RDEPEND on
6607 eachother. This ensures that they are merged as a group
6608 and get their RDEPENDs satisfied as soon as possible."""
6609 def gather_deps(ignore_priority,
6610 mergeable_nodes, selected_nodes, node):
6611 if node in selected_nodes:
6613 if node not in mergeable_nodes:
6615 if node == replacement_portage and \
6616 mygraph.child_nodes(node,
6617 ignore_priority=DepPriority.MEDIUM_SOFT):
6618 # Make sure that portage always has all of it's
6619 # RDEPENDs installed first.
6621 selected_nodes.add(node)
6622 for child in mygraph.child_nodes(node,
6623 ignore_priority=ignore_priority):
6624 if not gather_deps(ignore_priority,
6625 mergeable_nodes, selected_nodes, child):
6628 mergeable_nodes = set(nodes)
6629 if prefer_asap and asap_nodes:
6631 for ignore_priority in xrange(DepPriority.SOFT,
6632 DepPriority.MEDIUM_SOFT + 1):
6634 if nodes is not asap_nodes and \
6635 not accept_root_node and \
6636 not mygraph.parent_nodes(node):
6638 selected_nodes = set()
6639 if gather_deps(ignore_priority,
6640 mergeable_nodes, selected_nodes, node):
6643 selected_nodes = None
6647 # If any nodes have been selected here, it's always
6648 # possible that anything up to a MEDIUM_SOFT priority
6649 # relationship has been ignored. This state is recorded
6650 # in ignore_priority so that relevant nodes will be
6651 # added to asap_nodes when appropriate.
6653 ignore_priority = DepPriority.MEDIUM_SOFT
6655 if prefer_asap and asap_nodes and not selected_nodes:
6656 # We failed to find any asap nodes to merge, so ignore
6657 # them for the next iteration.
6661 if not selected_nodes and not accept_root_node:
6662 # Maybe there are only root nodes left, so accept them
6663 # for the next iteration.
6664 accept_root_node = True
6667 if selected_nodes and ignore_priority > DepPriority.SOFT:
6668 # Try to merge ignored medium deps as soon as possible.
6669 for node in selected_nodes:
6670 children = set(mygraph.child_nodes(node))
6671 soft = children.difference(
6672 mygraph.child_nodes(node,
6673 ignore_priority=DepPriority.SOFT))
6674 medium_soft = children.difference(
6675 mygraph.child_nodes(node,
6676 ignore_priority=DepPriority.MEDIUM_SOFT))
6677 medium_soft.difference_update(soft)
6678 for child in medium_soft:
6679 if child in selected_nodes:
6681 if child in asap_nodes:
6683 asap_nodes.append(child)
6685 if selected_nodes and len(selected_nodes) > 1:
6686 if not isinstance(selected_nodes, list):
6687 selected_nodes = list(selected_nodes)
6688 selected_nodes.sort(cmp_circular_bias)
6690 if not selected_nodes and not myblocker_uninstalls.is_empty():
6691 # An Uninstall task needs to be executed in order to
6692 # avoid conflict if possible.
6693 min_parent_deps = None
6695 for task in myblocker_uninstalls.leaf_nodes():
6696 # Do some sanity checks so that system or world packages
6697 # don't get uninstalled inappropriately here (only really
6698 # necessary when --complete-graph has not been enabled).
6700 if task in ignored_uninstall_tasks:
6703 if task in scheduled_uninstalls:
6704 # It's been scheduled but it hasn't
6705 # been executed yet due to dependence
6706 # on installation of blocking packages.
6709 root_config = self.roots[task.root]
6710 inst_pkg = self._pkg_cache[
6711 ("installed", task.root, task.cpv, "nomerge")]
6713 if self.digraph.contains(inst_pkg):
6716 forbid_overlap = False
6717 heuristic_overlap = False
6718 for blocker in myblocker_uninstalls.parent_nodes(task):
6719 if blocker.eapi in ("0", "1"):
6720 heuristic_overlap = True
6721 elif blocker.atom.blocker.overlap.forbid:
6722 forbid_overlap = True
6724 if forbid_overlap and running_root == task.root:
6727 if heuristic_overlap and running_root == task.root:
6728 # Never uninstall sys-apps/portage or it's essential
6729 # dependencies, except through replacement.
6731 runtime_dep_atoms = \
6732 list(runtime_deps.iterAtomsForPackage(task))
6733 except portage.exception.InvalidDependString, e:
6734 portage.writemsg("!!! Invalid PROVIDE in " + \
6735 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6736 (task.root, task.cpv, e), noiselevel=-1)
6740 # Don't uninstall a runtime dep if it appears
6741 # to be the only suitable one installed.
6743 vardb = root_config.trees["vartree"].dbapi
6744 for atom in runtime_dep_atoms:
6745 other_version = None
6746 for pkg in vardb.match_pkgs(atom):
6747 if pkg.cpv == task.cpv and \
6748 pkg.metadata["COUNTER"] == \
6749 task.metadata["COUNTER"]:
6753 if other_version is None:
6759 # For packages in the system set, don't take
6760 # any chances. If the conflict can't be resolved
6761 # by a normal replacement operation then abort.
6764 for atom in root_config.sets[
6765 "system"].iterAtomsForPackage(task):
6768 except portage.exception.InvalidDependString, e:
6769 portage.writemsg("!!! Invalid PROVIDE in " + \
6770 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6771 (task.root, task.cpv, e), noiselevel=-1)
6777 # Note that the world check isn't always
6778 # necessary since self._complete_graph() will
6779 # add all packages from the system and world sets to the
6780 # graph. This just allows unresolved conflicts to be
6781 # detected as early as possible, which makes it possible
6782 # to avoid calling self._complete_graph() when it is
6783 # unnecessary due to blockers triggering an abortion.
6785 # For packages in the world set, go ahead an uninstall
6786 # when necessary, as long as the atom will be satisfied
6787 # in the final state.
6788 graph_db = self.mydbapi[task.root]
6791 for atom in root_config.sets[
6792 "world"].iterAtomsForPackage(task):
6794 for pkg in graph_db.match_pkgs(atom):
6802 except portage.exception.InvalidDependString, e:
6803 portage.writemsg("!!! Invalid PROVIDE in " + \
6804 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6805 (task.root, task.cpv, e), noiselevel=-1)
6811 # Check the deps of parent nodes to ensure that
6812 # the chosen task produces a leaf node. Maybe
6813 # this can be optimized some more to make the
6814 # best possible choice, but the current algorithm
6815 # is simple and should be near optimal for most
6818 for parent in mygraph.parent_nodes(task):
6819 parent_deps.update(mygraph.child_nodes(parent,
6820 ignore_priority=DepPriority.MEDIUM_SOFT))
6821 parent_deps.remove(task)
6822 if min_parent_deps is None or \
6823 len(parent_deps) < min_parent_deps:
6824 min_parent_deps = len(parent_deps)
6827 if uninst_task is not None:
6828 # The uninstall is performed only after blocking
6829 # packages have been merged on top of it. File
6830 # collisions between blocking packages are detected
6831 # and removed from the list of files to be uninstalled.
6832 scheduled_uninstalls.add(uninst_task)
6833 parent_nodes = mygraph.parent_nodes(uninst_task)
6835 # Reverse the parent -> uninstall edges since we want
6836 # to do the uninstall after blocking packages have
6837 # been merged on top of it.
6838 mygraph.remove(uninst_task)
6839 for blocked_pkg in parent_nodes:
6840 mygraph.add(blocked_pkg, uninst_task,
6841 priority=BlockerDepPriority.instance)
6842 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6843 scheduler_graph.add(blocked_pkg, uninst_task,
6844 priority=BlockerDepPriority.instance)
6847 # None of the Uninstall tasks are acceptable, so
6848 # the corresponding blockers are unresolvable.
6849 # We need to drop an Uninstall task here in order
6850 # to avoid the circular deps code path, but the
6851 # blocker will still be counted as an unresolved
6853 for node in myblocker_uninstalls.leaf_nodes():
6855 mygraph.remove(node)
6859 ignored_uninstall_tasks.add(node)
6862 # After dropping an Uninstall task, reset
6863 # the state variables for leaf node selection and
6864 # continue trying to select leaf nodes.
6866 accept_root_node = False
6869 if not selected_nodes:
6870 self._circular_deps_for_display = mygraph
6871 raise self._unknown_internal_error()
6873 # At this point, we've succeeded in selecting one or more nodes, so
6874 # it's now safe to reset the prefer_asap and accept_root_node flags
6875 # to their default states.
6877 accept_root_node = False
6879 mygraph.difference_update(selected_nodes)
6881 for node in selected_nodes:
6882 if isinstance(node, Package) and \
6883 node.operation == "nomerge":
6886 # Handle interactions between blockers
6887 # and uninstallation tasks.
6888 solved_blockers = set()
6890 if isinstance(node, Package) and \
6891 "uninstall" == node.operation:
6892 have_uninstall_task = True
6895 vardb = self.trees[node.root]["vartree"].dbapi
6896 previous_cpv = vardb.match(node.slot_atom)
6898 # The package will be replaced by this one, so remove
6899 # the corresponding Uninstall task if necessary.
6900 previous_cpv = previous_cpv[0]
6902 ("installed", node.root, previous_cpv, "uninstall")
6904 mygraph.remove(uninst_task)
6908 if uninst_task is not None and \
6909 uninst_task not in ignored_uninstall_tasks and \
6910 myblocker_uninstalls.contains(uninst_task):
6911 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6912 myblocker_uninstalls.remove(uninst_task)
6913 # Discard any blockers that this Uninstall solves.
6914 for blocker in blocker_nodes:
6915 if not myblocker_uninstalls.child_nodes(blocker):
6916 myblocker_uninstalls.remove(blocker)
6917 solved_blockers.add(blocker)
6919 retlist.append(node)
6921 if (isinstance(node, Package) and \
6922 "uninstall" == node.operation) or \
6923 (uninst_task is not None and \
6924 uninst_task in scheduled_uninstalls):
6925 # Include satisfied blockers in the merge list
6926 # since the user might be interested and also
6927 # it serves as an indicator that blocking packages
6928 # will be temporarily installed simultaneously.
6929 for blocker in solved_blockers:
6930 retlist.append(Blocker(atom=blocker.atom,
6931 root=blocker.root, eapi=blocker.eapi,
6934 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
6935 for node in myblocker_uninstalls.root_nodes():
6936 unsolvable_blockers.add(node)
6938 for blocker in unsolvable_blockers:
6939 retlist.append(blocker)
6941 # If any Uninstall tasks need to be executed in order
6942 # to avoid a conflict, complete the graph with any
6943 # dependencies that may have been initially
6944 # neglected (to ensure that unsafe Uninstall tasks
6945 # are properly identified and blocked from execution).
6946 if have_uninstall_task and \
6948 not unsolvable_blockers:
6949 self.myparams.add("complete")
6950 raise self._serialize_tasks_retry("")
6952 if unsolvable_blockers and \
6953 not self._accept_blocker_conflicts():
6954 self._unsatisfied_blockers_for_display = unsolvable_blockers
6955 self._serialized_tasks_cache = retlist[:]
6956 self._scheduler_graph = scheduler_graph
6957 raise self._unknown_internal_error()
6959 if self._slot_collision_info and \
6960 not self._accept_blocker_conflicts():
6961 self._serialized_tasks_cache = retlist[:]
6962 self._scheduler_graph = scheduler_graph
6963 raise self._unknown_internal_error()
6965 return retlist, scheduler_graph
6967 def _show_circular_deps(self, mygraph):
6968 # No leaf nodes are available, so we have a circular
6969 # dependency panic situation. Reduce the noise level to a
6970 # minimum via repeated elimination of root nodes since they
6971 # have no parents and thus can not be part of a cycle.
6973 root_nodes = mygraph.root_nodes(
6974 ignore_priority=DepPriority.MEDIUM_SOFT)
6977 mygraph.difference_update(root_nodes)
6978 # Display the USE flags that are enabled on nodes that are part
6979 # of dependency cycles in case that helps the user decide to
6980 # disable some of them.
6982 tempgraph = mygraph.copy()
6983 while not tempgraph.empty():
6984 nodes = tempgraph.leaf_nodes()
6986 node = tempgraph.order[0]
6989 display_order.append(node)
6990 tempgraph.remove(node)
6991 display_order.reverse()
6992 self.myopts.pop("--quiet", None)
6993 self.myopts.pop("--verbose", None)
6994 self.myopts["--tree"] = True
6995 portage.writemsg("\n\n", noiselevel=-1)
6996 self.display(display_order)
6997 prefix = colorize("BAD", " * ")
6998 portage.writemsg("\n", noiselevel=-1)
6999 portage.writemsg(prefix + "Error: circular dependencies:\n",
7001 portage.writemsg("\n", noiselevel=-1)
7002 mygraph.debug_print()
7003 portage.writemsg("\n", noiselevel=-1)
7004 portage.writemsg(prefix + "Note that circular dependencies " + \
7005 "can often be avoided by temporarily\n", noiselevel=-1)
7006 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7007 "optional dependencies.\n", noiselevel=-1)
7009 def _show_merge_list(self):
7010 if self._serialized_tasks_cache is not None and \
7011 not (self._displayed_list and \
7012 (self._displayed_list == self._serialized_tasks_cache or \
7013 self._displayed_list == \
7014 list(reversed(self._serialized_tasks_cache)))):
7015 display_list = self._serialized_tasks_cache[:]
7016 if "--tree" in self.myopts:
7017 display_list.reverse()
7018 self.display(display_list)
7020 def _show_unsatisfied_blockers(self, blockers):
7021 self._show_merge_list()
7022 msg = "Error: The above package list contains " + \
7023 "packages which cannot be installed " + \
7024 "at the same time on the same system."
7025 prefix = colorize("BAD", " * ")
7026 from textwrap import wrap
7027 portage.writemsg("\n", noiselevel=-1)
7028 for line in wrap(msg, 70):
7029 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7030 if "--quiet" not in self.myopts:
7031 show_blocker_docs_link()
7033 def display(self, mylist, favorites=[], verbosity=None):
7035 # This is used to prevent display_problems() from
7036 # redundantly displaying this exact same merge list
7037 # again via _show_merge_list().
7038 self._displayed_list = mylist
7040 if verbosity is None:
7041 verbosity = ("--quiet" in self.myopts and 1 or \
7042 "--verbose" in self.myopts and 3 or 2)
7043 favorites_set = InternalPackageSet(favorites)
7044 oneshot = "--oneshot" in self.myopts or \
7045 "--onlydeps" in self.myopts
7046 columns = "--columns" in self.myopts
7051 counters = PackageCounters()
7053 if verbosity == 1 and "--verbose" not in self.myopts:
7054 def create_use_string(*args):
7057 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7059 is_new, reinst_flags,
7060 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7061 alphabetical=("--alphabetical" in self.myopts)):
7069 cur_iuse = set(cur_iuse)
7070 enabled_flags = cur_iuse.intersection(cur_use)
7071 removed_iuse = set(old_iuse).difference(cur_iuse)
7072 any_iuse = cur_iuse.union(old_iuse)
7073 any_iuse = list(any_iuse)
7075 for flag in any_iuse:
7078 reinst_flag = reinst_flags and flag in reinst_flags
7079 if flag in enabled_flags:
7081 if is_new or flag in old_use and \
7082 (all_flags or reinst_flag):
7083 flag_str = red(flag)
7084 elif flag not in old_iuse:
7085 flag_str = yellow(flag) + "%*"
7086 elif flag not in old_use:
7087 flag_str = green(flag) + "*"
7088 elif flag in removed_iuse:
7089 if all_flags or reinst_flag:
7090 flag_str = yellow("-" + flag) + "%"
7093 flag_str = "(" + flag_str + ")"
7094 removed.append(flag_str)
7097 if is_new or flag in old_iuse and \
7098 flag not in old_use and \
7099 (all_flags or reinst_flag):
7100 flag_str = blue("-" + flag)
7101 elif flag not in old_iuse:
7102 flag_str = yellow("-" + flag)
7103 if flag not in iuse_forced:
7105 elif flag in old_use:
7106 flag_str = green("-" + flag) + "*"
7108 if flag in iuse_forced:
7109 flag_str = "(" + flag_str + ")"
7111 enabled.append(flag_str)
7113 disabled.append(flag_str)
7116 ret = " ".join(enabled)
7118 ret = " ".join(enabled + disabled + removed)
7120 ret = '%s="%s" ' % (name, ret)
7123 repo_display = RepoDisplay(self.roots)
7127 mygraph = self.digraph.copy()
7129 # If there are any Uninstall instances, add the corresponding
7130 # blockers to the digraph (useful for --tree display).
7132 executed_uninstalls = set(node for node in mylist \
7133 if isinstance(node, Package) and node.operation == "unmerge")
7135 for uninstall in self._blocker_uninstalls.leaf_nodes():
7136 uninstall_parents = \
7137 self._blocker_uninstalls.parent_nodes(uninstall)
7138 if not uninstall_parents:
7141 # Remove the corresponding "nomerge" node and substitute
7142 # the Uninstall node.
7143 inst_pkg = self._pkg_cache[
7144 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7146 mygraph.remove(inst_pkg)
7151 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7153 inst_pkg_blockers = []
7155 # Break the Package -> Uninstall edges.
7156 mygraph.remove(uninstall)
7158 # Resolution of a package's blockers
7159 # depend on it's own uninstallation.
7160 for blocker in inst_pkg_blockers:
7161 mygraph.add(uninstall, blocker)
7163 # Expand Package -> Uninstall edges into
7164 # Package -> Blocker -> Uninstall edges.
7165 for blocker in uninstall_parents:
7166 mygraph.add(uninstall, blocker)
7167 for parent in self._blocker_parents.parent_nodes(blocker):
7168 if parent != inst_pkg:
7169 mygraph.add(blocker, parent)
7171 # If the uninstall task did not need to be executed because
7172 # of an upgrade, display Blocker -> Upgrade edges since the
7173 # corresponding Blocker -> Uninstall edges will not be shown.
7175 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7176 if upgrade_node is not None and \
7177 uninstall not in executed_uninstalls:
7178 for blocker in uninstall_parents:
7179 mygraph.add(upgrade_node, blocker)
7181 unsatisfied_blockers = []
7186 if isinstance(x, Blocker) and not x.satisfied:
7187 unsatisfied_blockers.append(x)
7190 if "--tree" in self.myopts:
7191 depth = len(tree_nodes)
7192 while depth and graph_key not in \
7193 mygraph.child_nodes(tree_nodes[depth-1]):
7196 tree_nodes = tree_nodes[:depth]
7197 tree_nodes.append(graph_key)
7198 display_list.append((x, depth, True))
7199 shown_edges.add((graph_key, tree_nodes[depth-1]))
7201 traversed_nodes = set() # prevent endless circles
7202 traversed_nodes.add(graph_key)
7203 def add_parents(current_node, ordered):
7205 # Do not traverse to parents if this node is an
7206 # an argument or a direct member of a set that has
7207 # been specified as an argument (system or world).
7208 if current_node not in self._set_nodes:
7209 parent_nodes = mygraph.parent_nodes(current_node)
7211 child_nodes = set(mygraph.child_nodes(current_node))
7212 selected_parent = None
7213 # First, try to avoid a direct cycle.
7214 for node in parent_nodes:
7215 if not isinstance(node, (Blocker, Package)):
7217 if node not in traversed_nodes and \
7218 node not in child_nodes:
7219 edge = (current_node, node)
7220 if edge in shown_edges:
7222 selected_parent = node
7224 if not selected_parent:
7225 # A direct cycle is unavoidable.
7226 for node in parent_nodes:
7227 if not isinstance(node, (Blocker, Package)):
7229 if node not in traversed_nodes:
7230 edge = (current_node, node)
7231 if edge in shown_edges:
7233 selected_parent = node
7236 shown_edges.add((current_node, selected_parent))
7237 traversed_nodes.add(selected_parent)
7238 add_parents(selected_parent, False)
7239 display_list.append((current_node,
7240 len(tree_nodes), ordered))
7241 tree_nodes.append(current_node)
7243 add_parents(graph_key, True)
7245 display_list.append((x, depth, True))
7246 mylist = display_list
7247 for x in unsatisfied_blockers:
7248 mylist.append((x, 0, True))
7250 last_merge_depth = 0
7251 for i in xrange(len(mylist)-1,-1,-1):
7252 graph_key, depth, ordered = mylist[i]
7253 if not ordered and depth == 0 and i > 0 \
7254 and graph_key == mylist[i-1][0] and \
7255 mylist[i-1][1] == 0:
7256 # An ordered node got a consecutive duplicate when the tree was
7260 if ordered and graph_key[-1] != "nomerge":
7261 last_merge_depth = depth
7263 if depth >= last_merge_depth or \
7264 i < len(mylist) - 1 and \
7265 depth >= mylist[i+1][1]:
7268 from portage import flatten
7269 from portage.dep import use_reduce, paren_reduce
7270 # files to fetch list - avoids counting a same file twice
7271 # in size display (verbose mode)
7274 # Use this set to detect when all the "repoadd" strings are "[0]"
7275 # and disable the entire repo display in this case.
7278 for mylist_index in xrange(len(mylist)):
7279 x, depth, ordered = mylist[mylist_index]
7283 portdb = self.trees[myroot]["porttree"].dbapi
7284 bindb = self.trees[myroot]["bintree"].dbapi
7285 vardb = self.trees[myroot]["vartree"].dbapi
7286 vartree = self.trees[myroot]["vartree"]
7287 pkgsettings = self.pkgsettings[myroot]
7290 indent = " " * depth
7292 if isinstance(x, Blocker):
7294 blocker_style = "PKG_BLOCKER_SATISFIED"
7295 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7297 blocker_style = "PKG_BLOCKER"
7298 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7300 counters.blocks += 1
7302 counters.blocks_satisfied += 1
7303 resolved = portage.key_expand(
7304 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7305 if "--columns" in self.myopts and "--quiet" in self.myopts:
7306 addl += " " + colorize(blocker_style, resolved)
7308 addl = "[%s %s] %s%s" % \
7309 (colorize(blocker_style, "blocks"),
7310 addl, indent, colorize(blocker_style, resolved))
7311 block_parents = self._blocker_parents.parent_nodes(x)
7312 block_parents = set([pnode[2] for pnode in block_parents])
7313 block_parents = ", ".join(block_parents)
7315 addl += colorize(blocker_style,
7316 " (\"%s\" is blocking %s)") % \
7317 (str(x.atom).lstrip("!"), block_parents)
7319 addl += colorize(blocker_style,
7320 " (is blocking %s)") % block_parents
7321 if isinstance(x, Blocker) and x.satisfied:
7326 blockers.append(addl)
7329 pkg_merge = ordered and pkg_status == "merge"
7330 if not pkg_merge and pkg_status == "merge":
7331 pkg_status = "nomerge"
7332 built = pkg_type != "ebuild"
7333 installed = pkg_type == "installed"
7335 metadata = pkg.metadata
7337 repo_name = metadata["repository"]
7338 if pkg_type == "ebuild":
7339 ebuild_path = portdb.findname(pkg_key)
7340 if not ebuild_path: # shouldn't happen
7341 raise portage.exception.PackageNotFound(pkg_key)
7342 repo_path_real = os.path.dirname(os.path.dirname(
7343 os.path.dirname(ebuild_path)))
7345 repo_path_real = portdb.getRepositoryPath(repo_name)
7346 pkg_use = list(pkg.use.enabled)
7348 restrict = flatten(use_reduce(paren_reduce(
7349 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7350 except portage.exception.InvalidDependString, e:
7351 if not pkg.installed:
7352 show_invalid_depstring_notice(x,
7353 pkg.metadata["RESTRICT"], str(e))
7357 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7358 "fetch" in restrict:
7361 counters.restrict_fetch += 1
7362 if portdb.fetch_check(pkg_key, pkg_use):
7365 counters.restrict_fetch_satisfied += 1
7367 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7368 #param is used for -u, where you still *do* want to see when something is being upgraded.
7371 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7372 if vardb.cpv_exists(pkg_key):
7373 addl=" "+yellow("R")+fetch+" "
7376 counters.reinst += 1
7377 elif pkg_status == "uninstall":
7378 counters.uninst += 1
7379 # filter out old-style virtual matches
7380 elif installed_versions and \
7381 portage.cpv_getkey(installed_versions[0]) == \
7382 portage.cpv_getkey(pkg_key):
7383 myinslotlist = vardb.match(pkg.slot_atom)
7384 # If this is the first install of a new-style virtual, we
7385 # need to filter out old-style virtual matches.
7386 if myinslotlist and \
7387 portage.cpv_getkey(myinslotlist[0]) != \
7388 portage.cpv_getkey(pkg_key):
7391 myoldbest = myinslotlist[:]
7393 if not portage.dep.cpvequal(pkg_key,
7394 portage.best([pkg_key] + myoldbest)):
7396 addl += turquoise("U")+blue("D")
7398 counters.downgrades += 1
7401 addl += turquoise("U") + " "
7403 counters.upgrades += 1
7405 # New slot, mark it new.
7406 addl = " " + green("NS") + fetch + " "
7407 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7409 counters.newslot += 1
7411 if "--changelog" in self.myopts:
7412 inst_matches = vardb.match(pkg.slot_atom)
7414 changelogs.extend(self.calc_changelog(
7415 portdb.findname(pkg_key),
7416 inst_matches[0], pkg_key))
7418 addl = " " + green("N") + " " + fetch + " "
7427 forced_flags = set()
7428 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7429 forced_flags.update(pkgsettings.useforce)
7430 forced_flags.update(pkgsettings.usemask)
7432 cur_use = [flag for flag in pkg.use.enabled \
7433 if flag in pkg.iuse.all]
7434 cur_iuse = sorted(pkg.iuse.all)
7436 if myoldbest and myinslotlist:
7437 previous_cpv = myoldbest[0]
7439 previous_cpv = pkg.cpv
7440 if vardb.cpv_exists(previous_cpv):
7441 old_iuse, old_use = vardb.aux_get(
7442 previous_cpv, ["IUSE", "USE"])
7443 old_iuse = list(set(
7444 filter_iuse_defaults(old_iuse.split())))
7446 old_use = old_use.split()
7453 old_use = [flag for flag in old_use if flag in old_iuse]
7455 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7457 use_expand.reverse()
7458 use_expand_hidden = \
7459 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7461 def map_to_use_expand(myvals, forcedFlags=False,
7465 for exp in use_expand:
7468 for val in myvals[:]:
7469 if val.startswith(exp.lower()+"_"):
7470 if val in forced_flags:
7471 forced[exp].add(val[len(exp)+1:])
7472 ret[exp].append(val[len(exp)+1:])
7475 forced["USE"] = [val for val in myvals \
7476 if val in forced_flags]
7478 for exp in use_expand_hidden:
7484 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7485 # are the only thing that triggered reinstallation.
7486 reinst_flags_map = {}
7487 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7488 reinst_expand_map = None
7489 if reinstall_for_flags:
7490 reinst_flags_map = map_to_use_expand(
7491 list(reinstall_for_flags), removeHidden=False)
7492 for k in list(reinst_flags_map):
7493 if not reinst_flags_map[k]:
7494 del reinst_flags_map[k]
7495 if not reinst_flags_map.get("USE"):
7496 reinst_expand_map = reinst_flags_map.copy()
7497 reinst_expand_map.pop("USE", None)
7498 if reinst_expand_map and \
7499 not set(reinst_expand_map).difference(
7501 use_expand_hidden = \
7502 set(use_expand_hidden).difference(
7505 cur_iuse_map, iuse_forced = \
7506 map_to_use_expand(cur_iuse, forcedFlags=True)
7507 cur_use_map = map_to_use_expand(cur_use)
7508 old_iuse_map = map_to_use_expand(old_iuse)
7509 old_use_map = map_to_use_expand(old_use)
7512 use_expand.insert(0, "USE")
7514 for key in use_expand:
7515 if key in use_expand_hidden:
7517 verboseadd += create_use_string(key.upper(),
7518 cur_iuse_map[key], iuse_forced[key],
7519 cur_use_map[key], old_iuse_map[key],
7520 old_use_map[key], is_new,
7521 reinst_flags_map.get(key))
7526 if pkg_type == "ebuild" and pkg_merge:
7528 myfilesdict = portdb.getfetchsizes(pkg_key,
7529 useflags=pkg_use, debug=self.edebug)
7530 except portage.exception.InvalidDependString, e:
7531 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7532 show_invalid_depstring_notice(x, src_uri, str(e))
7535 if myfilesdict is None:
7536 myfilesdict="[empty/missing/bad digest]"
7538 for myfetchfile in myfilesdict:
7539 if myfetchfile not in myfetchlist:
7540 mysize+=myfilesdict[myfetchfile]
7541 myfetchlist.append(myfetchfile)
7543 counters.totalsize += mysize
7544 verboseadd += format_size(mysize)
7547 # assign index for a previous version in the same slot
7548 has_previous = False
7549 repo_name_prev = None
7550 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7552 slot_matches = vardb.match(slot_atom)
7555 repo_name_prev = vardb.aux_get(slot_matches[0],
7558 # now use the data to generate output
7559 if pkg.installed or not has_previous:
7560 repoadd = repo_display.repoStr(repo_path_real)
7562 repo_path_prev = None
7564 repo_path_prev = portdb.getRepositoryPath(
7566 if repo_path_prev == repo_path_real:
7567 repoadd = repo_display.repoStr(repo_path_real)
7569 repoadd = "%s=>%s" % (
7570 repo_display.repoStr(repo_path_prev),
7571 repo_display.repoStr(repo_path_real))
7573 repoadd_set.add(repoadd)
7575 xs = [portage.cpv_getkey(pkg_key)] + \
7576 list(portage.catpkgsplit(pkg_key)[2:])
7583 if "COLUMNWIDTH" in self.settings:
7585 mywidth = int(self.settings["COLUMNWIDTH"])
7586 except ValueError, e:
7587 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7589 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7590 self.settings["COLUMNWIDTH"], noiselevel=-1)
7592 oldlp = mywidth - 30
7595 # Convert myoldbest from a list to a string.
7599 for pos, key in enumerate(myoldbest):
7600 key = portage.catpkgsplit(key)[2] + \
7601 "-" + portage.catpkgsplit(key)[3]
7602 if key[-3:] == "-r0":
7604 myoldbest[pos] = key
7605 myoldbest = blue("["+", ".join(myoldbest)+"]")
7608 root_config = self.roots[myroot]
7609 system_set = root_config.sets["system"]
7610 world_set = root_config.sets["world"]
7615 pkg_system = system_set.findAtomForPackage(pkg)
7616 pkg_world = world_set.findAtomForPackage(pkg)
7617 if not (oneshot or pkg_world) and \
7618 myroot == self.target_root and \
7619 favorites_set.findAtomForPackage(pkg):
7620 # Maybe it will be added to world now.
7621 if create_world_atom(pkg, favorites_set, root_config):
7623 except portage.exception.InvalidDependString:
7624 # This is reported elsewhere if relevant.
7627 def pkgprint(pkg_str):
7630 return colorize("PKG_MERGE_SYSTEM", pkg_str)
7632 return colorize("PKG_MERGE_WORLD", pkg_str)
7634 return colorize("PKG_MERGE", pkg_str)
7635 elif pkg_status == "uninstall":
7636 return colorize("PKG_UNINSTALL", pkg_str)
7639 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7641 return colorize("PKG_NOMERGE_WORLD", pkg_str)
7643 return colorize("PKG_NOMERGE", pkg_str)
7646 properties = flatten(use_reduce(paren_reduce(
7647 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7648 except portage.exception.InvalidDependString, e:
7649 if not pkg.installed:
7650 show_invalid_depstring_notice(pkg,
7651 pkg.metadata["PROPERTIES"], str(e))
7655 interactive = "interactive" in properties
7656 if interactive and pkg.operation == "merge":
7657 addl = colorize("WARN", "I") + addl[1:]
7659 counters.interactive += 1
7664 if "--columns" in self.myopts:
7665 if "--quiet" in self.myopts:
7666 myprint=addl+" "+indent+pkgprint(pkg_cp)
7667 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7668 myprint=myprint+myoldbest
7669 myprint=myprint+darkgreen("to "+x[1])
7673 myprint = "[%s] %s%s" % \
7674 (pkgprint(pkg_status.ljust(13)),
7675 indent, pkgprint(pkg.cp))
7677 myprint = "[%s %s] %s%s" % \
7678 (pkgprint(pkg.type_name), addl,
7679 indent, pkgprint(pkg.cp))
7680 if (newlp-nc_len(myprint)) > 0:
7681 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7682 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7683 if (oldlp-nc_len(myprint)) > 0:
7684 myprint=myprint+" "*(oldlp-nc_len(myprint))
7685 myprint=myprint+myoldbest
7686 myprint += darkgreen("to " + pkg.root)
7689 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7691 myprint = "[" + pkg_type + " " + addl + "] "
7692 myprint += indent + pkgprint(pkg_key) + " " + \
7693 myoldbest + darkgreen("to " + myroot)
7695 if "--columns" in self.myopts:
7696 if "--quiet" in self.myopts:
7697 myprint=addl+" "+indent+pkgprint(pkg_cp)
7698 myprint=myprint+" "+green(xs[1]+xs[2])+" "
7699 myprint=myprint+myoldbest
7703 myprint = "[%s] %s%s" % \
7704 (pkgprint(pkg_status.ljust(13)),
7705 indent, pkgprint(pkg.cp))
7707 myprint = "[%s %s] %s%s" % \
7708 (pkgprint(pkg.type_name), addl,
7709 indent, pkgprint(pkg.cp))
7710 if (newlp-nc_len(myprint)) > 0:
7711 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7712 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7713 if (oldlp-nc_len(myprint)) > 0:
7714 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7715 myprint += myoldbest
7718 myprint = "[%s] %s%s %s" % \
7719 (pkgprint(pkg_status.ljust(13)),
7720 indent, pkgprint(pkg.cpv),
7723 myprint = "[%s %s] %s%s %s" % \
7724 (pkgprint(pkg_type), addl, indent,
7725 pkgprint(pkg.cpv), myoldbest)
7727 if columns and pkg.operation == "uninstall":
7729 p.append((myprint, verboseadd, repoadd))
7731 if "--tree" not in self.myopts and \
7732 "--quiet" not in self.myopts and \
7733 not self._opts_no_restart.intersection(self.myopts) and \
7734 pkg.root == self._running_root.root and \
7735 portage.match_from_list(
7736 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7737 not vardb.cpv_exists(pkg.cpv) and \
7738 "--quiet" not in self.myopts:
7739 if mylist_index < len(mylist) - 1:
7740 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7741 p.append(colorize("WARN", " then resume the merge."))
7744 show_repos = repoadd_set and repoadd_set != set(["0"])
7747 if isinstance(x, basestring):
7748 out.write("%s\n" % (x,))
7751 myprint, verboseadd, repoadd = x
7754 myprint += " " + verboseadd
7756 if show_repos and repoadd:
7757 myprint += " " + teal("[%s]" % repoadd)
7759 out.write("%s\n" % (myprint,))
7768 sys.stdout.write(str(repo_display))
7770 if "--changelog" in self.myopts:
7772 for revision,text in changelogs:
7773 print bold('*'+revision)
7774 sys.stdout.write(text)
7779 def display_problems(self):
7781 Display problems with the dependency graph such as slot collisions.
7782 This is called internally by display() to show the problems _after_
7783 the merge list where it is most likely to be seen, but if display()
7784 is not going to be called then this method should be called explicitly
7785 to ensure that the user is notified of problems with the graph.
7787 All output goes to stderr, except for unsatisfied dependencies which
7788 go to stdout for parsing by programs such as autounmask.
7791 # Note that show_masked_packages() sends it's output to
7792 # stdout, and some programs such as autounmask parse the
7793 # output in cases when emerge bails out. However, when
7794 # show_masked_packages() is called for installed packages
7795 # here, the message is a warning that is more appropriate
7796 # to send to stderr, so temporarily redirect stdout to
7797 # stderr. TODO: Fix output code so there's a cleaner way
7798 # to redirect everything to stderr.
7803 sys.stdout = sys.stderr
7804 self._display_problems()
7810 # This goes to stdout for parsing by programs like autounmask.
7811 for pargs, kwargs in self._unsatisfied_deps_for_display:
7812 self._show_unsatisfied_dep(*pargs, **kwargs)
7814 def _display_problems(self):
7815 if self._circular_deps_for_display is not None:
7816 self._show_circular_deps(
7817 self._circular_deps_for_display)
7819 # The user is only notified of a slot conflict if
7820 # there are no unresolvable blocker conflicts.
7821 if self._unsatisfied_blockers_for_display is not None:
7822 self._show_unsatisfied_blockers(
7823 self._unsatisfied_blockers_for_display)
7825 self._show_slot_collision_notice()
7827 # TODO: Add generic support for "set problem" handlers so that
7828 # the below warnings aren't special cases for world only.
7830 if self._missing_args:
7831 world_problems = False
7832 if "world" in self._sets:
7833 # Filter out indirect members of world (from nested sets)
7834 # since only direct members of world are desired here.
7835 world_set = self.roots[self.target_root].sets["world"]
7836 for arg, atom in self._missing_args:
7837 if arg.name == "world" and atom in world_set:
7838 world_problems = True
7842 sys.stderr.write("\n!!! Problems have been " + \
7843 "detected with your world file\n")
7844 sys.stderr.write("!!! Please run " + \
7845 green("emaint --check world")+"\n\n")
7847 if self._missing_args:
7848 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7849 " Ebuilds for the following packages are either all\n")
7850 sys.stderr.write(colorize("BAD", "!!!") + \
7851 " masked or don't exist:\n")
7852 sys.stderr.write(" ".join(str(atom) for arg, atom in \
7853 self._missing_args) + "\n")
7855 if self._pprovided_args:
7857 for arg, atom in self._pprovided_args:
7858 if isinstance(arg, SetArg):
7860 arg_atom = (atom, atom)
7863 arg_atom = (arg.arg, atom)
7864 refs = arg_refs.setdefault(arg_atom, [])
7865 if parent not in refs:
7868 msg.append(bad("\nWARNING: "))
7869 if len(self._pprovided_args) > 1:
7870 msg.append("Requested packages will not be " + \
7871 "merged because they are listed in\n")
7873 msg.append("A requested package will not be " + \
7874 "merged because it is listed in\n")
7875 msg.append("package.provided:\n\n")
7876 problems_sets = set()
7877 for (arg, atom), refs in arg_refs.iteritems():
7880 problems_sets.update(refs)
7882 ref_string = ", ".join(["'%s'" % name for name in refs])
7883 ref_string = " pulled in by " + ref_string
7884 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7886 if "world" in problems_sets:
7887 msg.append("This problem can be solved in one of the following ways:\n\n")
7888 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
7889 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
7890 msg.append(" C) Remove offending entries from package.provided.\n\n")
7891 msg.append("The best course of action depends on the reason that an offending\n")
7892 msg.append("package.provided entry exists.\n\n")
7893 sys.stderr.write("".join(msg))
7895 masked_packages = []
7896 for pkg in self._masked_installed:
7897 root_config = pkg.root_config
7898 pkgsettings = self.pkgsettings[pkg.root]
7899 mreasons = get_masking_status(pkg, pkgsettings, root_config)
7900 masked_packages.append((root_config, pkgsettings,
7901 pkg.cpv, pkg.metadata, mreasons))
7903 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7904 " The following installed packages are masked:\n")
7905 show_masked_packages(masked_packages)
7909 def calc_changelog(self,ebuildpath,current,next):
7910 if ebuildpath == None or not os.path.exists(ebuildpath):
7912 current = '-'.join(portage.catpkgsplit(current)[1:])
7913 if current.endswith('-r0'):
7914 current = current[:-3]
7915 next = '-'.join(portage.catpkgsplit(next)[1:])
7916 if next.endswith('-r0'):
7918 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
7920 changelog = open(changelogpath).read()
7921 except SystemExit, e:
7922 raise # Needed else can't exit
7925 divisions = self.find_changelog_tags(changelog)
7926 #print 'XX from',current,'to',next
7927 #for div,text in divisions: print 'XX',div
7928 # skip entries for all revisions above the one we are about to emerge
7929 for i in range(len(divisions)):
7930 if divisions[i][0]==next:
7931 divisions = divisions[i:]
7933 # find out how many entries we are going to display
7934 for i in range(len(divisions)):
7935 if divisions[i][0]==current:
7936 divisions = divisions[:i]
7939 # couldnt find the current revision in the list. display nothing
7943 def find_changelog_tags(self,changelog):
7947 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
7949 if release is not None:
7950 divs.append((release,changelog))
7952 if release is not None:
7953 divs.append((release,changelog[:match.start()]))
7954 changelog = changelog[match.end():]
7955 release = match.group(1)
7956 if release.endswith('.ebuild'):
7957 release = release[:-7]
7958 if release.endswith('-r0'):
7959 release = release[:-3]
7961 def saveNomergeFavorites(self):
7962 """Find atoms in favorites that are not in the mergelist and add them
7963 to the world file if necessary."""
7964 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7965 "--oneshot", "--onlydeps", "--pretend"):
7966 if x in self.myopts:
7968 root_config = self.roots[self.target_root]
7969 world_set = root_config.sets["world"]
7971 world_locked = False
7972 if hasattr(world_set, "lock"):
7976 if hasattr(world_set, "load"):
7977 world_set.load() # maybe it's changed on disk
7979 args_set = self._sets["args"]
7980 portdb = self.trees[self.target_root]["porttree"].dbapi
7981 added_favorites = set()
7982 for x in self._set_nodes:
7983 pkg_type, root, pkg_key, pkg_status = x
7984 if pkg_status != "nomerge":
7988 myfavkey = create_world_atom(x, args_set, root_config)
7990 if myfavkey in added_favorites:
7992 added_favorites.add(myfavkey)
7993 except portage.exception.InvalidDependString, e:
7994 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7995 (pkg_key, str(e)), noiselevel=-1)
7996 writemsg("!!! see '%s'\n\n" % os.path.join(
7997 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8000 for k in self._sets:
8001 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8006 all_added.append(SETPREFIX + k)
8007 all_added.extend(added_favorites)
8010 print ">>> Recording %s in \"world\" favorites file..." % \
8011 colorize("INFORM", str(a))
8013 world_set.update(all_added)
8018 def loadResumeCommand(self, resume_data, skip_masked=False):
8020 Add a resume command to the graph and validate it in the process. This
8021 will raise a PackageNotFound exception if a package is not available.
8024 if not isinstance(resume_data, dict):
8027 mergelist = resume_data.get("mergelist")
8028 if not isinstance(mergelist, list):
8031 fakedb = self.mydbapi
8033 serialized_tasks = []
8036 if not (isinstance(x, list) and len(x) == 4):
8038 pkg_type, myroot, pkg_key, action = x
8039 if pkg_type not in self.pkg_tree_map:
8041 if action != "merge":
8043 tree_type = self.pkg_tree_map[pkg_type]
8044 mydb = trees[myroot][tree_type].dbapi
8045 db_keys = list(self._trees_orig[myroot][
8046 tree_type].dbapi._aux_cache_keys)
8048 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8050 # It does no exist or it is corrupt.
8051 if action == "uninstall":
8053 raise portage.exception.PackageNotFound(pkg_key)
8054 installed = action == "uninstall"
8055 built = pkg_type != "ebuild"
8056 root_config = self.roots[myroot]
8057 pkg = Package(built=built, cpv=pkg_key,
8058 installed=installed, metadata=metadata,
8059 operation=action, root_config=root_config,
8061 if pkg_type == "ebuild":
8062 pkgsettings = self.pkgsettings[myroot]
8063 pkgsettings.setcpv(pkg)
8064 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8065 self._pkg_cache[pkg] = pkg
8067 root_config = self.roots[pkg.root]
8068 if "merge" == pkg.operation and \
8069 not visible(root_config.settings, pkg):
8071 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8073 self._unsatisfied_deps_for_display.append(
8074 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8076 fakedb[myroot].cpv_inject(pkg)
8077 serialized_tasks.append(pkg)
8078 self.spinner.update()
8080 if self._unsatisfied_deps_for_display:
8083 if not serialized_tasks or "--nodeps" in self.myopts:
8084 self._serialized_tasks_cache = serialized_tasks
8085 self._scheduler_graph = self.digraph
8087 self._select_package = self._select_pkg_from_graph
8088 self.myparams.add("selective")
8090 favorites = resume_data.get("favorites")
8091 args_set = self._sets["args"]
8092 if isinstance(favorites, list):
8093 args = self._load_favorites(favorites)
8097 for task in serialized_tasks:
8098 if isinstance(task, Package) and \
8099 task.operation == "merge":
8100 if not self._add_pkg(task, None):
8103 # Packages for argument atoms need to be explicitly
8104 # added via _add_pkg() so that they are included in the
8105 # digraph (needed at least for --tree display).
8107 for atom in arg.set:
8108 pkg, existing_node = self._select_package(
8109 arg.root_config.root, atom)
8110 if existing_node is None and \
8112 if not self._add_pkg(pkg, Dependency(atom=atom,
8113 root=pkg.root, parent=arg)):
8116 # Allow unsatisfied deps here to avoid showing a masking
8117 # message for an unsatisfied dep that isn't necessarily
8119 if not self._create_graph(allow_unsatisfied=True):
8121 if masked_tasks or self._unsatisfied_deps:
8122 # This probably means that a required package
8123 # was dropped via --skipfirst. It makes the
8124 # resume list invalid, so convert it to a
8125 # UnsatisfiedResumeDep exception.
8126 raise self.UnsatisfiedResumeDep(self,
8127 masked_tasks + self._unsatisfied_deps)
8128 self._serialized_tasks_cache = None
8131 except self._unknown_internal_error:
8136 def _load_favorites(self, favorites):
8138 Use a list of favorites to resume state from a
8139 previous select_files() call. This creates similar
8140 DependencyArg instances to those that would have
8141 been created by the original select_files() call.
8142 This allows Package instances to be matched with
8143 DependencyArg instances during graph creation.
8145 root_config = self.roots[self.target_root]
8146 getSetAtoms = root_config.setconfig.getSetAtoms
8147 sets = root_config.sets
8150 if not isinstance(x, basestring):
8152 if x in ("system", "world"):
8154 if x.startswith(SETPREFIX):
8155 s = x[len(SETPREFIX):]
8160 # Recursively expand sets so that containment tests in
8161 # self._get_parent_sets() properly match atoms in nested
8162 # sets (like if world contains system).
8163 expanded_set = InternalPackageSet(
8164 initial_atoms=getSetAtoms(s))
8165 self._sets[s] = expanded_set
8166 args.append(SetArg(arg=x, set=expanded_set,
8167 root_config=root_config))
8169 if not portage.isvalidatom(x):
8171 args.append(AtomArg(arg=x, atom=x,
8172 root_config=root_config))
8174 # Create the "args" package set from atoms and
8175 # packages given as arguments.
8176 args_set = self._sets["args"]
8178 if not isinstance(arg, (AtomArg, PackageArg)):
8181 if myatom in args_set:
8183 args_set.add(myatom)
8184 self._set_atoms.update(chain(*self._sets.itervalues()))
8185 atom_arg_map = self._atom_arg_map
8187 for atom in arg.set:
8188 atom_key = (atom, arg.root_config.root)
8189 refs = atom_arg_map.get(atom_key)
8192 atom_arg_map[atom_key] = refs
8197 class UnsatisfiedResumeDep(portage.exception.PortageException):
8199 A dependency of a resume list is not installed. This
8200 can occur when a required package is dropped from the
8201 merge list via --skipfirst.
8203 def __init__(self, depgraph, value):
8204 portage.exception.PortageException.__init__(self, value)
8205 self.depgraph = depgraph
8207 class _internal_exception(portage.exception.PortageException):
8208 def __init__(self, value=""):
8209 portage.exception.PortageException.__init__(self, value)
8211 class _unknown_internal_error(_internal_exception):
8213 Used by the depgraph internally to terminate graph creation.
8214 The specific reason for the failure should have been dumped
8215 to stderr, unfortunately, the exact reason for the failure
8219 class _serialize_tasks_retry(_internal_exception):
8221 This is raised by the _serialize_tasks() method when it needs to
8222 be called again for some reason. The only case that it's currently
8223 used for is when neglected dependencies need to be added to the
8224 graph in order to avoid making a potentially unsafe decision.
8227 class _dep_check_composite_db(portage.dbapi):
8229 A dbapi-like interface that is optimized for use in dep_check() calls.
8230 This is built on top of the existing depgraph package selection logic.
8231 Some packages that have been added to the graph may be masked from this
8232 view in order to influence the atom preference selection that occurs
8235 def __init__(self, depgraph, root):
8236 portage.dbapi.__init__(self)
8237 self._depgraph = depgraph
8239 self._match_cache = {}
8240 self._cpv_pkg_map = {}
8242 def match(self, atom):
8243 ret = self._match_cache.get(atom)
8248 atom = self._dep_expand(atom)
8249 pkg, existing = self._depgraph._select_package(self._root, atom)
8253 # Return the highest available from select_package() as well as
8254 # any matching slots in the graph db.
8256 slots.add(pkg.metadata["SLOT"])
8257 atom_cp = portage.dep_getkey(atom)
8258 if pkg.cp.startswith("virtual/"):
8259 # For new-style virtual lookahead that occurs inside
8260 # dep_check(), examine all slots. This is needed
8261 # so that newer slots will not unnecessarily be pulled in
8262 # when a satisfying lower slot is already installed. For
8263 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8264 # there's no need to pull in a newer slot to satisfy a
8265 # virtual/jdk dependency.
8266 for db, pkg_type, built, installed, db_keys in \
8267 self._depgraph._filtered_trees[self._root]["dbs"]:
8268 for cpv in db.match(atom):
8269 if portage.cpv_getkey(cpv) != pkg.cp:
8271 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8273 if self._visible(pkg):
8274 self._cpv_pkg_map[pkg.cpv] = pkg
8276 slots.remove(pkg.metadata["SLOT"])
8278 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8279 pkg, existing = self._depgraph._select_package(
8280 self._root, slot_atom)
8283 if not self._visible(pkg):
8285 self._cpv_pkg_map[pkg.cpv] = pkg
8288 self._cpv_sort_ascending(ret)
8289 self._match_cache[orig_atom] = ret
8292 def _visible(self, pkg):
8293 if pkg.installed and "selective" not in self._depgraph.myparams:
8295 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8296 except (StopIteration, portage.exception.InvalidDependString):
8303 self._depgraph.pkgsettings[pkg.root], pkg):
8305 except portage.exception.InvalidDependString:
8309 def _dep_expand(self, atom):
8311 This is only needed for old installed packages that may
8312 contain atoms that are not fully qualified with a specific
8313 category. Emulate the cpv_expand() function that's used by
8314 dbapi.match() in cases like this. If there are multiple
8315 matches, it's often due to a new-style virtual that has
8316 been added, so try to filter those out to avoid raising
8319 root_config = self._depgraph.roots[self._root]
8321 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8322 if len(expanded_atoms) > 1:
8323 non_virtual_atoms = []
8324 for x in expanded_atoms:
8325 if not portage.dep_getkey(x).startswith("virtual/"):
8326 non_virtual_atoms.append(x)
8327 if len(non_virtual_atoms) == 1:
8328 expanded_atoms = non_virtual_atoms
8329 if len(expanded_atoms) > 1:
8330 # compatible with portage.cpv_expand()
8331 raise portage.exception.AmbiguousPackageName(
8332 [portage.dep_getkey(x) for x in expanded_atoms])
8334 atom = expanded_atoms[0]
8336 null_atom = insert_category_into_atom(atom, "null")
8337 null_cp = portage.dep_getkey(null_atom)
8338 cat, atom_pn = portage.catsplit(null_cp)
8339 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8341 # Allow the resolver to choose which virtual.
8342 atom = insert_category_into_atom(atom, "virtual")
8344 atom = insert_category_into_atom(atom, "null")
8347 def aux_get(self, cpv, wants):
8348 metadata = self._cpv_pkg_map[cpv].metadata
8349 return [metadata.get(x, "") for x in wants]
8351 class _package_cache(dict):
8352 def __init__(self, depgraph):
8354 self._depgraph = depgraph
8356 def __setitem__(self, k, v):
8357 dict.__setitem__(self, k, v)
8358 root_config = self._depgraph.roots[v.root]
8360 if visible(root_config.settings, v) and \
8361 not (v.installed and \
8362 v.root_config.settings._getMissingKeywords(v.cpv, v.metadata)):
8363 root_config.visible_pkgs.cpv_inject(v)
8364 except portage.exception.InvalidDependString:
8367 class RepoDisplay(object):
8368 def __init__(self, roots):
8369 self._shown_repos = {}
8370 self._unknown_repo = False
8372 for root_config in roots.itervalues():
8373 portdir = root_config.settings.get("PORTDIR")
8375 repo_paths.add(portdir)
8376 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8378 repo_paths.update(overlays.split())
8379 repo_paths = list(repo_paths)
8380 self._repo_paths = repo_paths
8381 self._repo_paths_real = [ os.path.realpath(repo_path) \
8382 for repo_path in repo_paths ]
8384 # pre-allocate index for PORTDIR so that it always has index 0.
8385 for root_config in roots.itervalues():
8386 portdb = root_config.trees["porttree"].dbapi
8387 portdir = portdb.porttree_root
8389 self.repoStr(portdir)
8391 def repoStr(self, repo_path_real):
8394 real_index = self._repo_paths_real.index(repo_path_real)
8395 if real_index == -1:
8397 self._unknown_repo = True
8399 shown_repos = self._shown_repos
8400 repo_paths = self._repo_paths
8401 repo_path = repo_paths[real_index]
8402 index = shown_repos.get(repo_path)
8404 index = len(shown_repos)
8405 shown_repos[repo_path] = index
8411 shown_repos = self._shown_repos
8412 unknown_repo = self._unknown_repo
8413 if shown_repos or self._unknown_repo:
8414 output.append("Portage tree and overlays:\n")
8415 show_repo_paths = list(shown_repos)
8416 for repo_path, repo_index in shown_repos.iteritems():
8417 show_repo_paths[repo_index] = repo_path
8419 for index, repo_path in enumerate(show_repo_paths):
8420 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8422 output.append(" "+teal("[?]") + \
8423 " indicates that the source repository could not be determined\n")
8424 return "".join(output)
8426 class PackageCounters(object):
8436 self.blocks_satisfied = 0
8438 self.restrict_fetch = 0
8439 self.restrict_fetch_satisfied = 0
8440 self.interactive = 0
8443 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8446 myoutput.append("Total: %s package" % total_installs)
8447 if total_installs != 1:
8448 myoutput.append("s")
8449 if total_installs != 0:
8450 myoutput.append(" (")
8451 if self.upgrades > 0:
8452 details.append("%s upgrade" % self.upgrades)
8453 if self.upgrades > 1:
8455 if self.downgrades > 0:
8456 details.append("%s downgrade" % self.downgrades)
8457 if self.downgrades > 1:
8460 details.append("%s new" % self.new)
8461 if self.newslot > 0:
8462 details.append("%s in new slot" % self.newslot)
8463 if self.newslot > 1:
8466 details.append("%s reinstall" % self.reinst)
8470 details.append("%s uninstall" % self.uninst)
8473 if self.interactive > 0:
8474 details.append("%s %s" % (self.interactive,
8475 colorize("WARN", "interactive")))
8476 myoutput.append(", ".join(details))
8477 if total_installs != 0:
8478 myoutput.append(")")
8479 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8480 if self.restrict_fetch:
8481 myoutput.append("\nFetch Restriction: %s package" % \
8482 self.restrict_fetch)
8483 if self.restrict_fetch > 1:
8484 myoutput.append("s")
8485 if self.restrict_fetch_satisfied < self.restrict_fetch:
8486 myoutput.append(bad(" (%s unsatisfied)") % \
8487 (self.restrict_fetch - self.restrict_fetch_satisfied))
8489 myoutput.append("\nConflict: %s block" % \
8492 myoutput.append("s")
8493 if self.blocks_satisfied < self.blocks:
8494 myoutput.append(bad(" (%s unsatisfied)") % \
8495 (self.blocks - self.blocks_satisfied))
8496 return "".join(myoutput)
8498 class PollConstants(object):
8501 Provides POLL* constants that are equivalent to those from the
8502 select module, for use by PollSelectAdapter.
8505 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
8508 locals()[k] = getattr(select, k, v)
8512 class PollSelectAdapter(PollConstants):
8515 Use select to emulate a poll object, for
8516 systems that don't support poll().
8520 self._registered = {}
8521 self._select_args = [[], [], []]
8523 def register(self, fd, *args):
8525 Only POLLIN is currently supported!
8529 "register expected at most 2 arguments, got " + \
8530 repr(1 + len(args)))
8532 eventmask = PollConstants.POLLIN | \
8533 PollConstants.POLLPRI | PollConstants.POLLOUT
8537 self._registered[fd] = eventmask
8538 self._select_args = None
8540 def unregister(self, fd):
8541 self._select_args = None
8542 del self._registered[fd]
8544 def poll(self, *args):
8547 "poll expected at most 2 arguments, got " + \
8548 repr(1 + len(args)))
8554 select_args = self._select_args
8555 if select_args is None:
8556 select_args = [self._registered.keys(), [], []]
8558 if timeout is not None:
8559 select_args = select_args[:]
8560 # Translate poll() timeout args to select() timeout args:
8562 # | units | value(s) for indefinite block
8563 # ---------|--------------|------------------------------
8564 # poll | milliseconds | omitted, negative, or None
8565 # ---------|--------------|------------------------------
8566 # select | seconds | omitted
8567 # ---------|--------------|------------------------------
8569 if timeout is not None and timeout < 0:
8571 if timeout is not None:
8572 select_args.append(timeout / 1000)
8574 select_events = select.select(*select_args)
8576 for fd in select_events[0]:
8577 poll_events.append((fd, PollConstants.POLLIN))
8580 class SequentialTaskQueue(SlotObject):
8582 __slots__ = ("max_jobs", "running_tasks") + \
8583 ("_dirty", "_scheduling", "_task_queue")
8585 def __init__(self, **kwargs):
8586 SlotObject.__init__(self, **kwargs)
8587 self._task_queue = deque()
8588 self.running_tasks = set()
8589 if self.max_jobs is None:
8593 def add(self, task):
8594 self._task_queue.append(task)
8597 def addFront(self, task):
8598 self._task_queue.appendleft(task)
8609 if self._scheduling:
8610 # Ignore any recursive schedule() calls triggered via
8611 # self._task_exit().
8614 self._scheduling = True
8616 task_queue = self._task_queue
8617 running_tasks = self.running_tasks
8618 max_jobs = self.max_jobs
8619 state_changed = False
8621 while task_queue and \
8622 (max_jobs is True or len(running_tasks) < max_jobs):
8623 task = task_queue.popleft()
8624 cancelled = getattr(task, "cancelled", None)
8626 running_tasks.add(task)
8627 task.addExitListener(self._task_exit)
8629 state_changed = True
8632 self._scheduling = False
8634 return state_changed
8636 def _task_exit(self, task):
8638 Since we can always rely on exit listeners being called, the set of
8639 running tasks is always pruned automatically and there is never any need
8640 to actively prune it.
8642 self.running_tasks.remove(task)
8643 if self._task_queue:
8647 self._task_queue.clear()
8648 running_tasks = self.running_tasks
8649 while running_tasks:
8650 task = running_tasks.pop()
8651 task.removeExitListener(self._task_exit)
8655 def __nonzero__(self):
8656 return bool(self._task_queue or self.running_tasks)
8659 return len(self._task_queue) + len(self.running_tasks)
8661 _can_poll_device = None
8663 def can_poll_device():
8665 Test if it's possible to use poll() on a device such as a pty. This
8666 is known to fail on Darwin.
8668 @returns: True if poll() on a device succeeds, False otherwise.
8671 global _can_poll_device
8672 if _can_poll_device is not None:
8673 return _can_poll_device
8675 if not hasattr(select, "poll"):
8676 _can_poll_device = False
8677 return _can_poll_device
8680 dev_null = open('/dev/null', 'rb')
8682 _can_poll_device = False
8683 return _can_poll_device
8686 p.register(dev_null.fileno(), PollConstants.POLLIN)
8688 invalid_request = False
8689 for f, event in p.poll():
8690 if event & PollConstants.POLLNVAL:
8691 invalid_request = True
8695 _can_poll_device = not invalid_request
8696 return _can_poll_device
8698 def create_poll_instance():
8700 Create an instance of select.poll, or an instance of
8701 PollSelectAdapter there is no poll() implementation or
8702 it is broken somehow.
8704 if can_poll_device():
8705 return select.poll()
8706 return PollSelectAdapter()
8708 class PollScheduler(object):
8710 class _sched_iface_class(SlotObject):
8711 __slots__ = ("register", "schedule", "unregister")
8715 self._max_load = None
8717 self._poll_event_queue = []
8718 self._poll_event_handlers = {}
8719 self._poll_event_handler_ids = {}
8720 # Increment id for each new handler.
8721 self._event_handler_id = 0
8722 self._poll_obj = create_poll_instance()
8723 self._scheduling = False
8725 def _schedule(self):
8727 Calls _schedule_tasks() and automatically returns early from
8728 any recursive calls to this method that the _schedule_tasks()
8729 call might trigger. This makes _schedule() safe to call from
8730 inside exit listeners.
8732 if self._scheduling:
8734 self._scheduling = True
8736 return self._schedule_tasks()
8738 self._scheduling = False
8740 def _running_job_count(self):
8743 def _can_add_job(self):
8744 max_jobs = self._max_jobs
8745 max_load = self._max_load
8747 if self._max_jobs is not True and \
8748 self._running_job_count() >= self._max_jobs:
8751 if max_load is not None and \
8752 (max_jobs is True or max_jobs > 1) and \
8753 self._running_job_count() >= 1:
8755 avg1, avg5, avg15 = os.getloadavg()
8756 except (AttributeError, OSError), e:
8757 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8762 if avg1 >= max_load:
8767 def _poll(self, timeout=None):
8769 All poll() calls pass through here. The poll events
8770 are added directly to self._poll_event_queue.
8771 In order to avoid endless blocking, this raises
8772 StopIteration if timeout is None and there are
8773 no file descriptors to poll.
8775 if not self._poll_event_handlers:
8777 if timeout is None and \
8778 not self._poll_event_handlers:
8779 raise StopIteration(
8780 "timeout is None and there are no poll() event handlers")
8782 # The following error is known to occur with Linux kernel versions
8785 # select.error: (4, 'Interrupted system call')
8787 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8788 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8789 # without any events.
8792 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8794 except select.error, e:
8795 writemsg_level("\n!!! select error: %s\n" % (e,),
8796 level=logging.ERROR, noiselevel=-1)
8798 if timeout is not None:
8801 def _next_poll_event(self, timeout=None):
8803 Since the _schedule_wait() loop is called by event
8804 handlers from _poll_loop(), maintain a central event
8805 queue for both of them to share events from a single
8806 poll() call. In order to avoid endless blocking, this
8807 raises StopIteration if timeout is None and there are
8808 no file descriptors to poll.
8810 if not self._poll_event_queue:
8812 return self._poll_event_queue.pop()
8814 def _poll_loop(self):
8816 event_handlers = self._poll_event_handlers
8817 event_handled = False
8820 while event_handlers:
8821 f, event = self._next_poll_event()
8822 handler, reg_id = event_handlers[f]
8824 event_handled = True
8825 except StopIteration:
8826 event_handled = True
8828 if not event_handled:
8829 raise AssertionError("tight loop")
8831 def _schedule_yield(self):
8833 Schedule for a short period of time chosen by the scheduler based
8834 on internal state. Synchronous tasks should call this periodically
8835 in order to allow the scheduler to service pending poll events. The
8836 scheduler will call poll() exactly once, without blocking, and any
8837 resulting poll events will be serviced.
8839 event_handlers = self._poll_event_handlers
8842 if not event_handlers:
8843 return bool(events_handled)
8845 if not self._poll_event_queue:
8849 while event_handlers and self._poll_event_queue:
8850 f, event = self._next_poll_event()
8851 handler, reg_id = event_handlers[f]
8854 except StopIteration:
8857 return bool(events_handled)
8859 def _register(self, f, eventmask, handler):
8862 @return: A unique registration id, for use in schedule() or
8865 if f in self._poll_event_handlers:
8866 raise AssertionError("fd %d is already registered" % f)
8867 self._event_handler_id += 1
8868 reg_id = self._event_handler_id
8869 self._poll_event_handler_ids[reg_id] = f
8870 self._poll_event_handlers[f] = (handler, reg_id)
8871 self._poll_obj.register(f, eventmask)
8874 def _unregister(self, reg_id):
8875 f = self._poll_event_handler_ids[reg_id]
8876 self._poll_obj.unregister(f)
8877 del self._poll_event_handlers[f]
8878 del self._poll_event_handler_ids[reg_id]
8880 def _schedule_wait(self, wait_ids):
8882 Schedule until wait_id is not longer registered
8885 @param wait_id: a task id to wait for
8887 event_handlers = self._poll_event_handlers
8888 handler_ids = self._poll_event_handler_ids
8889 event_handled = False
8891 if isinstance(wait_ids, int):
8892 wait_ids = frozenset([wait_ids])
8895 while wait_ids.intersection(handler_ids):
8896 f, event = self._next_poll_event()
8897 handler, reg_id = event_handlers[f]
8899 event_handled = True
8900 except StopIteration:
8901 event_handled = True
8903 return event_handled
8905 class QueueScheduler(PollScheduler):
8908 Add instances of SequentialTaskQueue and then call run(). The
8909 run() method returns when no tasks remain.
8912 def __init__(self, max_jobs=None, max_load=None):
8913 PollScheduler.__init__(self)
8915 if max_jobs is None:
8918 self._max_jobs = max_jobs
8919 self._max_load = max_load
8920 self.sched_iface = self._sched_iface_class(
8921 register=self._register,
8922 schedule=self._schedule_wait,
8923 unregister=self._unregister)
8926 self._schedule_listeners = []
8929 self._queues.append(q)
8931 def remove(self, q):
8932 self._queues.remove(q)
8936 while self._schedule():
8939 while self._running_job_count():
8942 def _schedule_tasks(self):
8945 @returns: True if there may be remaining tasks to schedule,
8948 while self._can_add_job():
8949 n = self._max_jobs - self._running_job_count()
8953 if not self._start_next_job(n):
8956 for q in self._queues:
8961 def _running_job_count(self):
8963 for q in self._queues:
8964 job_count += len(q.running_tasks)
8965 self._jobs = job_count
8968 def _start_next_job(self, n=1):
8970 for q in self._queues:
8971 initial_job_count = len(q.running_tasks)
8973 final_job_count = len(q.running_tasks)
8974 if final_job_count > initial_job_count:
8975 started_count += (final_job_count - initial_job_count)
8976 if started_count >= n:
8978 return started_count
8980 class TaskScheduler(object):
8983 A simple way to handle scheduling of AsynchrousTask instances. Simply
8984 add tasks and call run(). The run() method returns when no tasks remain.
8987 def __init__(self, max_jobs=None, max_load=None):
8988 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
8989 self._scheduler = QueueScheduler(
8990 max_jobs=max_jobs, max_load=max_load)
8991 self.sched_iface = self._scheduler.sched_iface
8992 self.run = self._scheduler.run
8993 self._scheduler.add(self._queue)
8995 def add(self, task):
8996 self._queue.add(task)
8999 self._scheduler.schedule()
9001 class JobStatusDisplay(object):
9003 _bound_properties = ("curval", "failed", "running")
9004 _jobs_column_width = 48
9006 # Don't update the display unless at least this much
9007 # time has passed, in units of seconds.
9008 _min_display_latency = 2
9010 _default_term_codes = {
9016 _termcap_name_map = {
9017 'carriage_return' : 'cr',
9022 def __init__(self, out=sys.stdout, quiet=False):
9023 object.__setattr__(self, "out", out)
9024 object.__setattr__(self, "quiet", quiet)
9025 object.__setattr__(self, "maxval", 0)
9026 object.__setattr__(self, "merges", 0)
9027 object.__setattr__(self, "_changed", False)
9028 object.__setattr__(self, "_displayed", False)
9029 object.__setattr__(self, "_last_display_time", 0)
9030 object.__setattr__(self, "width", 80)
9033 isatty = hasattr(out, "isatty") and out.isatty()
9034 object.__setattr__(self, "_isatty", isatty)
9035 if not isatty or not self._init_term():
9037 for k, capname in self._termcap_name_map.iteritems():
9038 term_codes[k] = self._default_term_codes[capname]
9039 object.__setattr__(self, "_term_codes", term_codes)
9041 def _init_term(self):
9043 Initialize term control codes.
9045 @returns: True if term codes were successfully initialized,
9049 term_type = os.environ.get("TERM", "vt100")
9055 curses.setupterm(term_type, self.out.fileno())
9056 tigetstr = curses.tigetstr
9057 except curses.error:
9062 if tigetstr is None:
9066 for k, capname in self._termcap_name_map.iteritems():
9067 code = tigetstr(capname)
9069 code = self._default_term_codes[capname]
9070 term_codes[k] = code
9071 object.__setattr__(self, "_term_codes", term_codes)
9074 def _format_msg(self, msg):
9075 return ">>> %s" % msg
9079 self._term_codes['carriage_return'] + \
9080 self._term_codes['clr_eol'])
9082 self._displayed = False
9084 def _display(self, line):
9085 self.out.write(line)
9087 self._displayed = True
9089 def _update(self, msg):
9092 if not self._isatty:
9093 out.write(self._format_msg(msg) + self._term_codes['newline'])
9095 self._displayed = True
9101 self._display(self._format_msg(msg))
9103 def displayMessage(self, msg):
9105 was_displayed = self._displayed
9107 if self._isatty and self._displayed:
9110 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9112 self._displayed = False
9115 self._changed = True
9121 for name in self._bound_properties:
9122 object.__setattr__(self, name, 0)
9125 self.out.write(self._term_codes['newline'])
9127 self._displayed = False
9129 def __setattr__(self, name, value):
9130 old_value = getattr(self, name)
9131 if value == old_value:
9133 object.__setattr__(self, name, value)
9134 if name in self._bound_properties:
9135 self._property_change(name, old_value, value)
9137 def _property_change(self, name, old_value, new_value):
9138 self._changed = True
9141 def _load_avg_str(self):
9143 avg = os.getloadavg()
9144 except (AttributeError, OSError), e:
9156 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9160 Display status on stdout, but only if something has
9161 changed since the last call.
9167 current_time = time.time()
9168 time_delta = current_time - self._last_display_time
9169 if self._displayed and \
9171 if not self._isatty:
9173 if time_delta < self._min_display_latency:
9176 self._last_display_time = current_time
9177 self._changed = False
9178 self._display_status()
9180 def _display_status(self):
9181 # Don't use len(self._completed_tasks) here since that also
9182 # can include uninstall tasks.
9183 curval_str = str(self.curval)
9184 maxval_str = str(self.maxval)
9185 running_str = str(self.running)
9186 failed_str = str(self.failed)
9187 load_avg_str = self._load_avg_str()
9189 color_output = StringIO.StringIO()
9190 plain_output = StringIO.StringIO()
9191 style_file = portage.output.ConsoleStyleFile(color_output)
9192 style_file.write_listener = plain_output
9193 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9194 style_writer.style_listener = style_file.new_styles
9195 f = formatter.AbstractFormatter(style_writer)
9197 number_style = "INFORM"
9198 f.add_literal_data("Jobs: ")
9199 f.push_style(number_style)
9200 f.add_literal_data(curval_str)
9202 f.add_literal_data(" of ")
9203 f.push_style(number_style)
9204 f.add_literal_data(maxval_str)
9206 f.add_literal_data(" complete")
9209 f.add_literal_data(", ")
9210 f.push_style(number_style)
9211 f.add_literal_data(running_str)
9213 f.add_literal_data(" running")
9216 f.add_literal_data(", ")
9217 f.push_style(number_style)
9218 f.add_literal_data(failed_str)
9220 f.add_literal_data(" failed")
9222 padding = self._jobs_column_width - len(plain_output.getvalue())
9224 f.add_literal_data(padding * " ")
9226 f.add_literal_data("Load avg: ")
9227 f.add_literal_data(load_avg_str)
9229 # Truncate to fit width, to avoid making the terminal scroll if the
9230 # line overflows (happens when the load average is large).
9231 plain_output = plain_output.getvalue()
9232 if self._isatty and len(plain_output) > self.width:
9233 # Use plain_output here since it's easier to truncate
9234 # properly than the color output which contains console
9236 self._update(plain_output[:self.width])
9238 self._update(color_output.getvalue())
9240 xtermTitle(" ".join(plain_output.split()))
9242 class Scheduler(PollScheduler):
9244 _opts_ignore_blockers = \
9245 frozenset(["--buildpkgonly",
9246 "--fetchonly", "--fetch-all-uri",
9247 "--nodeps", "--pretend"])
9249 _opts_no_background = \
9250 frozenset(["--pretend",
9251 "--fetchonly", "--fetch-all-uri"])
9253 _opts_no_restart = frozenset(["--buildpkgonly",
9254 "--fetchonly", "--fetch-all-uri", "--pretend"])
9256 _bad_resume_opts = set(["--ask", "--changelog",
9257 "--resume", "--skipfirst"])
9259 _fetch_log = "/var/log/emerge-fetch.log"
9261 class _iface_class(SlotObject):
9262 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9263 "dblinkElog", "fetch", "register", "schedule",
9264 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9267 class _fetch_iface_class(SlotObject):
9268 __slots__ = ("log_file", "schedule")
9270 _task_queues_class = slot_dict_class(
9271 ("merge", "jobs", "fetch", "unpack"), prefix="")
9273 class _build_opts_class(SlotObject):
9274 __slots__ = ("buildpkg", "buildpkgonly",
9275 "fetch_all_uri", "fetchonly", "pretend")
9277 class _binpkg_opts_class(SlotObject):
9278 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9280 class _pkg_count_class(SlotObject):
9281 __slots__ = ("curval", "maxval")
9283 class _emerge_log_class(SlotObject):
9284 __slots__ = ("xterm_titles",)
9286 def log(self, *pargs, **kwargs):
9287 if not self.xterm_titles:
9288 # Avoid interference with the scheduler's status display.
9289 kwargs.pop("short_msg", None)
9290 emergelog(self.xterm_titles, *pargs, **kwargs)
9292 class _failed_pkg(SlotObject):
9293 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9295 class _ConfigPool(object):
9296 """Interface for a task to temporarily allocate a config
9297 instance from a pool. This allows a task to be constructed
9298 long before the config instance actually becomes needed, like
9299 when prefetchers are constructed for the whole merge list."""
9300 __slots__ = ("_root", "_allocate", "_deallocate")
9301 def __init__(self, root, allocate, deallocate):
9303 self._allocate = allocate
9304 self._deallocate = deallocate
9306 return self._allocate(self._root)
9307 def deallocate(self, settings):
9308 self._deallocate(settings)
9310 class _unknown_internal_error(portage.exception.PortageException):
9312 Used internally to terminate scheduling. The specific reason for
9313 the failure should have been dumped to stderr.
9315 def __init__(self, value=""):
9316 portage.exception.PortageException.__init__(self, value)
9318 def __init__(self, settings, trees, mtimedb, myopts,
9319 spinner, mergelist, favorites, digraph):
9320 PollScheduler.__init__(self)
9321 self.settings = settings
9322 self.target_root = settings["ROOT"]
9324 self.myopts = myopts
9325 self._spinner = spinner
9326 self._mtimedb = mtimedb
9327 self._mergelist = mergelist
9328 self._favorites = favorites
9329 self._args_set = InternalPackageSet(favorites)
9330 self._build_opts = self._build_opts_class()
9331 for k in self._build_opts.__slots__:
9332 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9333 self._binpkg_opts = self._binpkg_opts_class()
9334 for k in self._binpkg_opts.__slots__:
9335 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9338 self._logger = self._emerge_log_class()
9339 self._task_queues = self._task_queues_class()
9340 for k in self._task_queues.allowed_keys:
9341 setattr(self._task_queues, k,
9342 SequentialTaskQueue())
9343 self._status_display = JobStatusDisplay()
9344 self._max_load = myopts.get("--load-average")
9345 max_jobs = myopts.get("--jobs")
9346 if max_jobs is None:
9348 self._set_max_jobs(max_jobs)
9350 # The root where the currently running
9351 # portage instance is installed.
9352 self._running_root = trees["/"]["root_config"]
9354 if settings.get("PORTAGE_DEBUG", "") == "1":
9356 self.pkgsettings = {}
9357 self._config_pool = {}
9358 self._blocker_db = {}
9360 self._config_pool[root] = []
9361 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9363 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9364 schedule=self._schedule_fetch)
9365 self._sched_iface = self._iface_class(
9366 dblinkEbuildPhase=self._dblink_ebuild_phase,
9367 dblinkDisplayMerge=self._dblink_display_merge,
9368 dblinkElog=self._dblink_elog,
9369 fetch=fetch_iface, register=self._register,
9370 schedule=self._schedule_wait,
9371 scheduleSetup=self._schedule_setup,
9372 scheduleUnpack=self._schedule_unpack,
9373 scheduleYield=self._schedule_yield,
9374 unregister=self._unregister)
9376 self._prefetchers = weakref.WeakValueDictionary()
9377 self._pkg_queue = []
9378 self._completed_tasks = set()
9380 self._failed_pkgs = []
9381 self._failed_pkgs_all = []
9382 self._failed_pkgs_die_msgs = []
9383 self._post_mod_echo_msgs = []
9384 self._parallel_fetch = False
9385 merge_count = len([x for x in mergelist \
9386 if isinstance(x, Package) and x.operation == "merge"])
9387 self._pkg_count = self._pkg_count_class(
9388 curval=0, maxval=merge_count)
9389 self._status_display.maxval = self._pkg_count.maxval
9391 # The load average takes some time to respond when new
9392 # jobs are added, so we need to limit the rate of adding
9394 self._job_delay_max = 10
9395 self._job_delay_factor = 1.0
9396 self._job_delay_exp = 1.5
9397 self._previous_job_start_time = None
9399 self._set_digraph(digraph)
9401 # This is used to memoize the _choose_pkg() result when
9402 # no packages can be chosen until one of the existing
9404 self._choose_pkg_return_early = False
9406 features = self.settings.features
9407 if "parallel-fetch" in features and \
9408 not ("--pretend" in self.myopts or \
9409 "--fetch-all-uri" in self.myopts or \
9410 "--fetchonly" in self.myopts):
9411 if "distlocks" not in features:
9412 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9413 portage.writemsg(red("!!!")+" parallel-fetching " + \
9414 "requires the distlocks feature enabled"+"\n",
9416 portage.writemsg(red("!!!")+" you have it disabled, " + \
9417 "thus parallel-fetching is being disabled"+"\n",
9419 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9420 elif len(mergelist) > 1:
9421 self._parallel_fetch = True
9423 if self._parallel_fetch:
9424 # clear out existing fetch log if it exists
9426 open(self._fetch_log, 'w')
9427 except EnvironmentError:
9430 self._running_portage = None
9431 portage_match = self._running_root.trees["vartree"].dbapi.match(
9432 portage.const.PORTAGE_PACKAGE_ATOM)
9434 cpv = portage_match.pop()
9435 self._running_portage = self._pkg(cpv, "installed",
9436 self._running_root, installed=True)
9438 def _poll(self, timeout=None):
9440 PollScheduler._poll(self, timeout=timeout)
9442 def _set_max_jobs(self, max_jobs):
9443 self._max_jobs = max_jobs
9444 self._task_queues.jobs.max_jobs = max_jobs
9446 def _background_mode(self):
9448 Check if background mode is enabled and adjust states as necessary.
9451 @returns: True if background mode is enabled, False otherwise.
9453 background = (self._max_jobs is True or \
9454 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9455 not bool(self._opts_no_background.intersection(self.myopts))
9458 interactive_tasks = self._get_interactive_tasks()
9459 if interactive_tasks:
9461 writemsg_level(">>> Sending package output to stdio due " + \
9462 "to interactive package(s):\n",
9463 level=logging.INFO, noiselevel=-1)
9465 for pkg in interactive_tasks:
9466 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9468 pkg_str += " for " + pkg.root
9471 writemsg_level("".join("%s\n" % (l,) for l in msg),
9472 level=logging.INFO, noiselevel=-1)
9473 if self._max_jobs is True or self._max_jobs > 1:
9474 self._set_max_jobs(1)
9475 writemsg_level(">>> Setting --jobs=1 due " + \
9476 "to the above interactive package(s)\n",
9477 level=logging.INFO, noiselevel=-1)
9479 self._status_display.quiet = \
9481 ("--quiet" in self.myopts and \
9482 "--verbose" not in self.myopts)
9484 self._logger.xterm_titles = \
9485 "notitles" not in self.settings.features and \
9486 self._status_display.quiet
9490 def _get_interactive_tasks(self):
9491 from portage import flatten
9492 from portage.dep import use_reduce, paren_reduce
9493 interactive_tasks = []
9494 for task in self._mergelist:
9495 if not (isinstance(task, Package) and \
9496 task.operation == "merge"):
9499 properties = flatten(use_reduce(paren_reduce(
9500 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9501 except portage.exception.InvalidDependString, e:
9502 show_invalid_depstring_notice(task,
9503 task.metadata["PROPERTIES"], str(e))
9504 raise self._unknown_internal_error()
9505 if "interactive" in properties:
9506 interactive_tasks.append(task)
9507 return interactive_tasks
9509 def _set_digraph(self, digraph):
9510 if "--nodeps" in self.myopts or \
9511 (self._max_jobs is not True and self._max_jobs < 2):
9513 self._digraph = None
9516 self._digraph = digraph
9517 self._prune_digraph()
9519 def _prune_digraph(self):
9521 Prune any root nodes that are irrelevant.
9524 graph = self._digraph
9525 completed_tasks = self._completed_tasks
9526 removed_nodes = set()
9528 for node in graph.root_nodes():
9529 if not isinstance(node, Package) or \
9530 (node.installed and node.operation == "nomerge") or \
9532 node in completed_tasks:
9533 removed_nodes.add(node)
9535 graph.difference_update(removed_nodes)
9536 if not removed_nodes:
9538 removed_nodes.clear()
9540 class _pkg_failure(portage.exception.PortageException):
9542 An instance of this class is raised by unmerge() when
9543 an uninstallation fails.
9546 def __init__(self, *pargs):
9547 portage.exception.PortageException.__init__(self, pargs)
9549 self.status = pargs[0]
9551 def _schedule_fetch(self, fetcher):
9553 Schedule a fetcher on the fetch queue, in order to
9554 serialize access to the fetch log.
9556 self._task_queues.fetch.addFront(fetcher)
9558 def _schedule_setup(self, setup_phase):
9560 Schedule a setup phase on the merge queue, in order to
9561 serialize unsandboxed access to the live filesystem.
9563 self._task_queues.merge.addFront(setup_phase)
9566 def _schedule_unpack(self, unpack_phase):
9568 Schedule an unpack phase on the unpack queue, in order
9569 to serialize $DISTDIR access for live ebuilds.
9571 self._task_queues.unpack.add(unpack_phase)
9573 def _find_blockers(self, new_pkg):
9575 Returns a callable which should be called only when
9576 the vdb lock has been acquired.
9579 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9582 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9583 if self._opts_ignore_blockers.intersection(self.myopts):
9586 # Call gc.collect() here to avoid heap overflow that
9587 # triggers 'Cannot allocate memory' errors (reported
9592 blocker_db = self._blocker_db[new_pkg.root]
9594 blocker_dblinks = []
9595 for blocking_pkg in blocker_db.findInstalledBlockers(
9596 new_pkg, acquire_lock=acquire_lock):
9597 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9599 if new_pkg.cpv == blocking_pkg.cpv:
9601 blocker_dblinks.append(portage.dblink(
9602 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9603 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9604 vartree=self.trees[blocking_pkg.root]["vartree"]))
9608 return blocker_dblinks
9610 def _dblink_pkg(self, pkg_dblink):
9611 cpv = pkg_dblink.mycpv
9612 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9613 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9614 installed = type_name == "installed"
9615 return self._pkg(cpv, type_name, root_config, installed=installed)
9617 def _append_to_log_path(self, log_path, msg):
9618 f = open(log_path, 'a')
9624 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9626 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9629 background = self._background
9631 if background and log_path is not None:
9632 log_file = open(log_path, 'a')
9637 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9639 if log_file is not None:
9642 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9643 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9644 background = self._background
9646 if log_path is None:
9647 if not (background and level < logging.WARN):
9648 portage.util.writemsg_level(msg,
9649 level=level, noiselevel=noiselevel)
9652 portage.util.writemsg_level(msg,
9653 level=level, noiselevel=noiselevel)
9654 self._append_to_log_path(log_path, msg)
9656 def _dblink_ebuild_phase(self,
9657 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9659 Using this callback for merge phases allows the scheduler
9660 to run while these phases execute asynchronously, and allows
9661 the scheduler control output handling.
9664 scheduler = self._sched_iface
9665 settings = pkg_dblink.settings
9666 pkg = self._dblink_pkg(pkg_dblink)
9667 background = self._background
9668 log_path = settings.get("PORTAGE_LOG_FILE")
9670 ebuild_phase = EbuildPhase(background=background,
9671 pkg=pkg, phase=phase, scheduler=scheduler,
9672 settings=settings, tree=pkg_dblink.treetype)
9673 ebuild_phase.start()
9676 return ebuild_phase.returncode
9678 def _check_manifests(self):
9679 # Verify all the manifests now so that the user is notified of failure
9680 # as soon as possible.
9681 if "strict" not in self.settings.features or \
9682 "--fetchonly" in self.myopts or \
9683 "--fetch-all-uri" in self.myopts:
9686 shown_verifying_msg = False
9688 for myroot, pkgsettings in self.pkgsettings.iteritems():
9689 quiet_config = portage.config(clone=pkgsettings)
9690 quiet_config["PORTAGE_QUIET"] = "1"
9691 quiet_config.backup_changes("PORTAGE_QUIET")
9692 quiet_settings[myroot] = quiet_config
9695 for x in self._mergelist:
9696 if not isinstance(x, Package) or \
9697 x.type_name != "ebuild":
9700 if not shown_verifying_msg:
9701 shown_verifying_msg = True
9702 self._status_msg("Verifying ebuild manifests")
9704 root_config = x.root_config
9705 portdb = root_config.trees["porttree"].dbapi
9706 quiet_config = quiet_settings[root_config.root]
9707 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9708 if not portage.digestcheck([], quiet_config, strict=True):
9713 def _add_prefetchers(self):
9715 if not self._parallel_fetch:
9718 if self._parallel_fetch:
9719 self._status_msg("Starting parallel fetch")
9721 prefetchers = self._prefetchers
9722 getbinpkg = "--getbinpkg" in self.myopts
9724 # In order to avoid "waiting for lock" messages
9725 # at the beginning, which annoy users, never
9726 # spawn a prefetcher for the first package.
9727 for pkg in self._mergelist[1:]:
9728 prefetcher = self._create_prefetcher(pkg)
9729 if prefetcher is not None:
9730 self._task_queues.fetch.add(prefetcher)
9731 prefetchers[pkg] = prefetcher
9733 def _create_prefetcher(self, pkg):
9735 @return: a prefetcher, or None if not applicable
9739 if not isinstance(pkg, Package):
9742 elif pkg.type_name == "ebuild":
9744 prefetcher = EbuildFetcher(background=True,
9745 config_pool=self._ConfigPool(pkg.root,
9746 self._allocate_config, self._deallocate_config),
9747 fetchonly=1, logfile=self._fetch_log,
9748 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9750 elif pkg.type_name == "binary" and \
9751 "--getbinpkg" in self.myopts and \
9752 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9754 prefetcher = BinpkgFetcher(background=True,
9755 logfile=self._fetch_log, pkg=pkg,
9756 scheduler=self._sched_iface)
9760 def _is_restart_scheduled(self):
9762 Check if the merge list contains a replacement
9763 for the current running instance, that will result
9764 in restart after merge.
9766 @returns: True if a restart is scheduled, False otherwise.
9768 if self._opts_no_restart.intersection(self.myopts):
9771 mergelist = self._mergelist
9773 for i, pkg in enumerate(mergelist):
9774 if self._is_restart_necessary(pkg) and \
9775 i != len(mergelist) - 1:
9780 def _is_restart_necessary(self, pkg):
9782 @return: True if merging the given package
9783 requires restart, False otherwise.
9786 # Figure out if we need a restart.
9787 if pkg.root == self._running_root.root and \
9788 portage.match_from_list(
9789 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9790 if self._running_portage:
9791 return cmp(pkg, self._running_portage) != 0
9795 def _restart_if_necessary(self, pkg):
9797 Use execv() to restart emerge. This happens
9798 if portage upgrades itself and there are
9799 remaining packages in the list.
9802 if self._opts_no_restart.intersection(self.myopts):
9805 if not self._is_restart_necessary(pkg):
9808 if pkg == self._mergelist[-1]:
9811 self._main_loop_cleanup()
9813 logger = self._logger
9814 pkg_count = self._pkg_count
9815 mtimedb = self._mtimedb
9816 bad_resume_opts = self._bad_resume_opts
9818 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9819 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9821 logger.log(" *** RESTARTING " + \
9822 "emerge via exec() after change of " + \
9825 mtimedb["resume"]["mergelist"].remove(list(pkg))
9827 portage.run_exitfuncs()
9828 mynewargv = [sys.argv[0], "--resume"]
9829 resume_opts = self.myopts.copy()
9830 # For automatic resume, we need to prevent
9831 # any of bad_resume_opts from leaking in
9832 # via EMERGE_DEFAULT_OPTS.
9833 resume_opts["--ignore-default-opts"] = True
9834 for myopt, myarg in resume_opts.iteritems():
9835 if myopt not in bad_resume_opts:
9837 mynewargv.append(myopt)
9839 mynewargv.append(myopt +"="+ str(myarg))
9840 # priority only needs to be adjusted on the first run
9841 os.environ["PORTAGE_NICENESS"] = "0"
9842 os.execv(mynewargv[0], mynewargv)
9846 if "--resume" in self.myopts:
9848 portage.writemsg_stdout(
9849 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9850 self._logger.log(" *** Resuming merge...")
9852 self._save_resume_list()
9855 self._background = self._background_mode()
9856 except self._unknown_internal_error:
9859 for root in self.trees:
9860 root_config = self.trees[root]["root_config"]
9862 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
9863 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
9864 # for ensuring sane $PWD (bug #239560) and storing elog messages.
9865 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
9866 if not tmpdir or not os.path.isdir(tmpdir):
9867 msg = "The directory specified in your " + \
9868 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
9869 "does not exist. Please create this " + \
9870 "directory or correct your PORTAGE_TMPDIR setting."
9871 msg = textwrap.wrap(msg, 70)
9872 out = portage.output.EOutput()
9877 if self._background:
9878 root_config.settings.unlock()
9879 root_config.settings["PORTAGE_BACKGROUND"] = "1"
9880 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
9881 root_config.settings.lock()
9883 self.pkgsettings[root] = portage.config(
9884 clone=root_config.settings)
9886 rval = self._check_manifests()
9887 if rval != os.EX_OK:
9890 keep_going = "--keep-going" in self.myopts
9891 fetchonly = self._build_opts.fetchonly
9892 mtimedb = self._mtimedb
9893 failed_pkgs = self._failed_pkgs
9896 rval = self._merge()
9897 if rval == os.EX_OK or fetchonly or not keep_going:
9899 if "resume" not in mtimedb:
9901 mergelist = self._mtimedb["resume"].get("mergelist")
9908 for failed_pkg in failed_pkgs:
9909 mergelist.remove(list(failed_pkg.pkg))
9911 self._failed_pkgs_all.extend(failed_pkgs)
9917 if not self._calc_resume_list():
9920 clear_caches(self.trees)
9921 if not self._mergelist:
9924 self._save_resume_list()
9925 self._pkg_count.curval = 0
9926 self._pkg_count.maxval = len([x for x in self._mergelist \
9927 if isinstance(x, Package) and x.operation == "merge"])
9928 self._status_display.maxval = self._pkg_count.maxval
9930 self._logger.log(" *** Finished. Cleaning up...")
9933 self._failed_pkgs_all.extend(failed_pkgs)
9936 background = self._background
9937 failure_log_shown = False
9938 if background and len(self._failed_pkgs_all) == 1:
9939 # If only one package failed then just show it's
9940 # whole log for easy viewing.
9941 failed_pkg = self._failed_pkgs_all[-1]
9942 build_dir = failed_pkg.build_dir
9945 log_paths = [failed_pkg.build_log]
9947 log_path = self._locate_failure_log(failed_pkg)
9948 if log_path is not None:
9950 log_file = open(log_path, 'rb')
9954 if log_file is not None:
9956 for line in log_file:
9957 writemsg_level(line, noiselevel=-1)
9960 failure_log_shown = True
9962 # Dump mod_echo output now since it tends to flood the terminal.
9963 # This allows us to avoid having more important output, generated
9964 # later, from being swept away by the mod_echo output.
9965 mod_echo_output = _flush_elog_mod_echo()
9967 if background and not failure_log_shown and \
9968 self._failed_pkgs_all and \
9969 self._failed_pkgs_die_msgs and \
9970 not mod_echo_output:
9972 printer = portage.output.EOutput()
9973 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
9975 if mysettings["ROOT"] != "/":
9976 root_msg = " merged to %s" % mysettings["ROOT"]
9978 printer.einfo("Error messages for package %s%s:" % \
9979 (colorize("INFORM", key), root_msg))
9981 for phase in portage.const.EBUILD_PHASES:
9982 if phase not in logentries:
9984 for msgtype, msgcontent in logentries[phase]:
9985 if isinstance(msgcontent, basestring):
9986 msgcontent = [msgcontent]
9987 for line in msgcontent:
9988 printer.eerror(line.strip("\n"))
9990 if self._post_mod_echo_msgs:
9991 for msg in self._post_mod_echo_msgs:
9994 if len(self._failed_pkgs_all) > 1:
9995 msg = "The following packages have " + \
9996 "failed to build or install:"
9998 writemsg(prefix + "\n", noiselevel=-1)
9999 from textwrap import wrap
10000 for line in wrap(msg, 72):
10001 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10002 writemsg(prefix + "\n", noiselevel=-1)
10003 for failed_pkg in self._failed_pkgs_all:
10004 writemsg("%s\t%s\n" % (prefix,
10005 colorize("INFORM", str(failed_pkg.pkg))),
10007 writemsg(prefix + "\n", noiselevel=-1)
10011 def _elog_listener(self, mysettings, key, logentries, fulltext):
10012 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10014 self._failed_pkgs_die_msgs.append(
10015 (mysettings, key, errors))
10017 def _locate_failure_log(self, failed_pkg):
10019 build_dir = failed_pkg.build_dir
10022 log_paths = [failed_pkg.build_log]
10024 for log_path in log_paths:
10029 log_size = os.stat(log_path).st_size
10040 def _add_packages(self):
10041 pkg_queue = self._pkg_queue
10042 for pkg in self._mergelist:
10043 if isinstance(pkg, Package):
10044 pkg_queue.append(pkg)
10045 elif isinstance(pkg, Blocker):
10048 def _merge_exit(self, merge):
10049 self._do_merge_exit(merge)
10050 self._deallocate_config(merge.merge.settings)
10051 if merge.returncode == os.EX_OK and \
10052 not merge.merge.pkg.installed:
10053 self._status_display.curval += 1
10054 self._status_display.merges = len(self._task_queues.merge)
10057 def _do_merge_exit(self, merge):
10058 pkg = merge.merge.pkg
10059 if merge.returncode != os.EX_OK:
10060 settings = merge.merge.settings
10061 build_dir = settings.get("PORTAGE_BUILDDIR")
10062 build_log = settings.get("PORTAGE_LOG_FILE")
10064 self._failed_pkgs.append(self._failed_pkg(
10065 build_dir=build_dir, build_log=build_log,
10067 returncode=merge.returncode))
10068 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10070 self._status_display.failed = len(self._failed_pkgs)
10073 self._task_complete(pkg)
10074 pkg_to_replace = merge.merge.pkg_to_replace
10075 if pkg_to_replace is not None:
10076 # When a package is replaced, mark it's uninstall
10077 # task complete (if any).
10078 uninst_hash_key = \
10079 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10080 self._task_complete(uninst_hash_key)
10085 self._restart_if_necessary(pkg)
10087 # Call mtimedb.commit() after each merge so that
10088 # --resume still works after being interrupted
10089 # by reboot, sigkill or similar.
10090 mtimedb = self._mtimedb
10091 mtimedb["resume"]["mergelist"].remove(list(pkg))
10092 if not mtimedb["resume"]["mergelist"]:
10093 del mtimedb["resume"]
10096 def _build_exit(self, build):
10097 if build.returncode == os.EX_OK:
10099 merge = PackageMerge(merge=build)
10100 merge.addExitListener(self._merge_exit)
10101 self._task_queues.merge.add(merge)
10102 self._status_display.merges = len(self._task_queues.merge)
10104 settings = build.settings
10105 build_dir = settings.get("PORTAGE_BUILDDIR")
10106 build_log = settings.get("PORTAGE_LOG_FILE")
10108 self._failed_pkgs.append(self._failed_pkg(
10109 build_dir=build_dir, build_log=build_log,
10111 returncode=build.returncode))
10112 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10114 self._status_display.failed = len(self._failed_pkgs)
10115 self._deallocate_config(build.settings)
10117 self._status_display.running = self._jobs
10120 def _extract_exit(self, build):
10121 self._build_exit(build)
10123 def _task_complete(self, pkg):
10124 self._completed_tasks.add(pkg)
10125 self._choose_pkg_return_early = False
10129 self._add_prefetchers()
10130 self._add_packages()
10131 pkg_queue = self._pkg_queue
10132 failed_pkgs = self._failed_pkgs
10133 portage.locks._quiet = self._background
10134 portage.elog._emerge_elog_listener = self._elog_listener
10140 self._main_loop_cleanup()
10141 portage.locks._quiet = False
10142 portage.elog._emerge_elog_listener = None
10144 rval = failed_pkgs[-1].returncode
10148 def _main_loop_cleanup(self):
10149 del self._pkg_queue[:]
10150 self._completed_tasks.clear()
10151 self._choose_pkg_return_early = False
10152 self._status_display.reset()
10153 self._digraph = None
10154 self._task_queues.fetch.clear()
10156 def _choose_pkg(self):
10158 Choose a task that has all it's dependencies satisfied.
10161 if self._choose_pkg_return_early:
10164 if self._digraph is None:
10165 if (self._jobs or self._task_queues.merge) and \
10166 not ("--nodeps" in self.myopts and \
10167 (self._max_jobs is True or self._max_jobs > 1)):
10168 self._choose_pkg_return_early = True
10170 return self._pkg_queue.pop(0)
10172 if not (self._jobs or self._task_queues.merge):
10173 return self._pkg_queue.pop(0)
10175 self._prune_digraph()
10178 later = set(self._pkg_queue)
10179 for pkg in self._pkg_queue:
10181 if not self._dependent_on_scheduled_merges(pkg, later):
10185 if chosen_pkg is not None:
10186 self._pkg_queue.remove(chosen_pkg)
10188 if chosen_pkg is None:
10189 # There's no point in searching for a package to
10190 # choose until at least one of the existing jobs
10192 self._choose_pkg_return_early = True
10196 def _dependent_on_scheduled_merges(self, pkg, later):
10198 Traverse the subgraph of the given packages deep dependencies
10199 to see if it contains any scheduled merges.
10200 @param pkg: a package to check dependencies for
10202 @param later: packages for which dependence should be ignored
10203 since they will be merged later than pkg anyway and therefore
10204 delaying the merge of pkg will not result in a more optimal
10208 @returns: True if the package is dependent, False otherwise.
10211 graph = self._digraph
10212 completed_tasks = self._completed_tasks
10215 traversed_nodes = set([pkg])
10216 direct_deps = graph.child_nodes(pkg)
10217 node_stack = direct_deps
10218 direct_deps = frozenset(direct_deps)
10220 node = node_stack.pop()
10221 if node in traversed_nodes:
10223 traversed_nodes.add(node)
10224 if not ((node.installed and node.operation == "nomerge") or \
10225 (node.operation == "uninstall" and \
10226 node not in direct_deps) or \
10227 node in completed_tasks or \
10231 node_stack.extend(graph.child_nodes(node))
10235 def _allocate_config(self, root):
10237 Allocate a unique config instance for a task in order
10238 to prevent interference between parallel tasks.
10240 if self._config_pool[root]:
10241 temp_settings = self._config_pool[root].pop()
10243 temp_settings = portage.config(clone=self.pkgsettings[root])
10244 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10245 # performance reasons, call it here to make sure all settings from the
10246 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10247 temp_settings.reload()
10248 temp_settings.reset()
10249 return temp_settings
10251 def _deallocate_config(self, settings):
10252 self._config_pool[settings["ROOT"]].append(settings)
10254 def _main_loop(self):
10256 # Only allow 1 job max if a restart is scheduled
10257 # due to portage update.
10258 if self._is_restart_scheduled() or \
10259 self._opts_no_background.intersection(self.myopts):
10260 self._set_max_jobs(1)
10262 merge_queue = self._task_queues.merge
10264 while self._schedule():
10265 if self._poll_event_handlers:
10270 if not (self._jobs or merge_queue):
10272 if self._poll_event_handlers:
10275 def _keep_scheduling(self):
10276 return bool(self._pkg_queue and \
10277 not (self._failed_pkgs and not self._build_opts.fetchonly))
10279 def _schedule_tasks(self):
10280 self._schedule_tasks_imp()
10281 self._status_display.display()
10284 for q in self._task_queues.values():
10288 # Cancel prefetchers if they're the only reason
10289 # the main poll loop is still running.
10290 if self._failed_pkgs and not self._build_opts.fetchonly and \
10291 not (self._jobs or self._task_queues.merge) and \
10292 self._task_queues.fetch:
10293 self._task_queues.fetch.clear()
10297 self._schedule_tasks_imp()
10298 self._status_display.display()
10300 return self._keep_scheduling()
10302 def _job_delay(self):
10305 @returns: True if job scheduling should be delayed, False otherwise.
10308 if self._jobs and self._max_load is not None:
10310 current_time = time.time()
10312 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10313 if delay > self._job_delay_max:
10314 delay = self._job_delay_max
10315 if (current_time - self._previous_job_start_time) < delay:
10320 def _schedule_tasks_imp(self):
10323 @returns: True if state changed, False otherwise.
10330 if not self._keep_scheduling():
10331 return bool(state_change)
10333 if self._choose_pkg_return_early or \
10334 not self._can_add_job() or \
10336 return bool(state_change)
10338 pkg = self._choose_pkg()
10340 return bool(state_change)
10344 if not pkg.installed:
10345 self._pkg_count.curval += 1
10347 task = self._task(pkg)
10350 merge = PackageMerge(merge=task)
10351 merge.addExitListener(self._merge_exit)
10352 self._task_queues.merge.add(merge)
10356 self._previous_job_start_time = time.time()
10357 self._status_display.running = self._jobs
10358 task.addExitListener(self._extract_exit)
10359 self._task_queues.jobs.add(task)
10363 self._previous_job_start_time = time.time()
10364 self._status_display.running = self._jobs
10365 task.addExitListener(self._build_exit)
10366 self._task_queues.jobs.add(task)
10368 return bool(state_change)
10370 def _task(self, pkg):
10372 pkg_to_replace = None
10373 if pkg.operation != "uninstall":
10374 vardb = pkg.root_config.trees["vartree"].dbapi
10375 previous_cpv = vardb.match(pkg.slot_atom)
10377 previous_cpv = previous_cpv.pop()
10378 pkg_to_replace = self._pkg(previous_cpv,
10379 "installed", pkg.root_config, installed=True)
10381 task = MergeListItem(args_set=self._args_set,
10382 background=self._background, binpkg_opts=self._binpkg_opts,
10383 build_opts=self._build_opts,
10384 config_pool=self._ConfigPool(pkg.root,
10385 self._allocate_config, self._deallocate_config),
10386 emerge_opts=self.myopts,
10387 find_blockers=self._find_blockers(pkg), logger=self._logger,
10388 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10389 pkg_to_replace=pkg_to_replace,
10390 prefetcher=self._prefetchers.get(pkg),
10391 scheduler=self._sched_iface,
10392 settings=self._allocate_config(pkg.root),
10393 statusMessage=self._status_msg,
10394 world_atom=self._world_atom)
10398 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10399 pkg = failed_pkg.pkg
10400 msg = "%s to %s %s" % \
10401 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10402 if pkg.root != "/":
10403 msg += " %s %s" % (preposition, pkg.root)
10405 log_path = self._locate_failure_log(failed_pkg)
10406 if log_path is not None:
10407 msg += ", Log file:"
10408 self._status_msg(msg)
10410 if log_path is not None:
10411 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10413 def _status_msg(self, msg):
10415 Display a brief status message (no newlines) in the status display.
10416 This is called by tasks to provide feedback to the user. This
10417 delegates the resposibility of generating \r and \n control characters,
10418 to guarantee that lines are created or erased when necessary and
10422 @param msg: a brief status message (no newlines allowed)
10424 if not self._background:
10425 writemsg_level("\n")
10426 self._status_display.displayMessage(msg)
10428 def _save_resume_list(self):
10430 Do this before verifying the ebuild Manifests since it might
10431 be possible for the user to use --resume --skipfirst get past
10432 a non-essential package with a broken digest.
10434 mtimedb = self._mtimedb
10435 mtimedb["resume"]["mergelist"] = [list(x) \
10436 for x in self._mergelist \
10437 if isinstance(x, Package) and x.operation == "merge"]
10441 def _calc_resume_list(self):
10443 Use the current resume list to calculate a new one,
10444 dropping any packages with unsatisfied deps.
10446 @returns: True if successful, False otherwise.
10448 print colorize("GOOD", "*** Resuming merge...")
10450 if self._show_list():
10451 if "--tree" in self.myopts:
10452 portage.writemsg_stdout("\n" + \
10453 darkgreen("These are the packages that " + \
10454 "would be merged, in reverse order:\n\n"))
10457 portage.writemsg_stdout("\n" + \
10458 darkgreen("These are the packages that " + \
10459 "would be merged, in order:\n\n"))
10461 show_spinner = "--quiet" not in self.myopts and \
10462 "--nodeps" not in self.myopts
10465 print "Calculating dependencies ",
10467 myparams = create_depgraph_params(self.myopts, None)
10471 success, mydepgraph, dropped_tasks = resume_depgraph(
10472 self.settings, self.trees, self._mtimedb, self.myopts,
10473 myparams, self._spinner, skip_unsatisfied=True)
10474 except depgraph.UnsatisfiedResumeDep, e:
10475 mydepgraph = e.depgraph
10476 dropped_tasks = set()
10479 print "\b\b... done!"
10482 def unsatisfied_resume_dep_msg():
10483 mydepgraph.display_problems()
10484 out = portage.output.EOutput()
10485 out.eerror("One or more packages are either masked or " + \
10486 "have missing dependencies:")
10489 show_parents = set()
10490 for dep in e.value:
10491 if dep.parent in show_parents:
10493 show_parents.add(dep.parent)
10494 if dep.atom is None:
10495 out.eerror(indent + "Masked package:")
10496 out.eerror(2 * indent + str(dep.parent))
10499 out.eerror(indent + str(dep.atom) + " pulled in by:")
10500 out.eerror(2 * indent + str(dep.parent))
10502 msg = "The resume list contains packages " + \
10503 "that are either masked or have " + \
10504 "unsatisfied dependencies. " + \
10505 "Please restart/continue " + \
10506 "the operation manually, or use --skipfirst " + \
10507 "to skip the first package in the list and " + \
10508 "any other packages that may be " + \
10509 "masked or have missing dependencies."
10510 for line in textwrap.wrap(msg, 72):
10512 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10515 if success and self._show_list():
10516 mylist = mydepgraph.altlist()
10518 if "--tree" in self.myopts:
10520 mydepgraph.display(mylist, favorites=self._favorites)
10523 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10525 mydepgraph.display_problems()
10527 mylist = mydepgraph.altlist()
10528 mydepgraph.break_refs(mylist)
10529 mydepgraph.break_refs(dropped_tasks)
10530 self._mergelist = mylist
10531 self._set_digraph(mydepgraph.schedulerGraph())
10534 for task in dropped_tasks:
10535 if not (isinstance(task, Package) and task.operation == "merge"):
10538 msg = "emerge --keep-going:" + \
10540 if pkg.root != "/":
10541 msg += " for %s" % (pkg.root,)
10542 msg += " dropped due to unsatisfied dependency."
10543 for line in textwrap.wrap(msg, msg_width):
10544 eerror(line, phase="other", key=pkg.cpv)
10545 settings = self.pkgsettings[pkg.root]
10546 # Ensure that log collection from $T is disabled inside
10547 # elog_process(), since any logs that might exist are
10549 settings.pop("T", None)
10550 portage.elog.elog_process(pkg.cpv, settings)
10551 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10555 def _show_list(self):
10556 myopts = self.myopts
10557 if "--quiet" not in myopts and \
10558 ("--ask" in myopts or "--tree" in myopts or \
10559 "--verbose" in myopts):
10563 def _world_atom(self, pkg):
10565 Add the package to the world file, but only if
10566 it's supposed to be added. Otherwise, do nothing.
10569 if set(("--buildpkgonly", "--fetchonly",
10571 "--oneshot", "--onlydeps",
10572 "--pretend")).intersection(self.myopts):
10575 if pkg.root != self.target_root:
10578 args_set = self._args_set
10579 if not args_set.findAtomForPackage(pkg):
10582 logger = self._logger
10583 pkg_count = self._pkg_count
10584 root_config = pkg.root_config
10585 world_set = root_config.sets["world"]
10586 world_locked = False
10587 if hasattr(world_set, "lock"):
10589 world_locked = True
10592 if hasattr(world_set, "load"):
10593 world_set.load() # maybe it's changed on disk
10595 atom = create_world_atom(pkg, args_set, root_config)
10597 if hasattr(world_set, "add"):
10598 self._status_msg(('Recording %s in "world" ' + \
10599 'favorites file...') % atom)
10600 logger.log(" === (%s of %s) Updating world file (%s)" % \
10601 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10602 world_set.add(atom)
10604 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10605 (atom,), level=logging.WARN, noiselevel=-1)
10610 def _pkg(self, cpv, type_name, root_config, installed=False):
10612 Get a package instance from the cache, or create a new
10613 one if necessary. Raises KeyError from aux_get if it
10614 failures for some reason (package does not exist or is
10617 operation = "merge"
10619 operation = "nomerge"
10621 if self._digraph is not None:
10622 # Reuse existing instance when available.
10623 pkg = self._digraph.get(
10624 (type_name, root_config.root, cpv, operation))
10625 if pkg is not None:
10628 tree_type = depgraph.pkg_tree_map[type_name]
10629 db = root_config.trees[tree_type].dbapi
10630 db_keys = list(self.trees[root_config.root][
10631 tree_type].dbapi._aux_cache_keys)
10632 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10633 pkg = Package(cpv=cpv, metadata=metadata,
10634 root_config=root_config, installed=installed)
10635 if type_name == "ebuild":
10636 settings = self.pkgsettings[root_config.root]
10637 settings.setcpv(pkg)
10638 pkg.metadata["USE"] = settings["PORTAGE_USE"]
10642 class MetadataRegen(PollScheduler):
10644 def __init__(self, portdb, max_jobs=None, max_load=None):
10645 PollScheduler.__init__(self)
10646 self._portdb = portdb
10648 if max_jobs is None:
10651 self._max_jobs = max_jobs
10652 self._max_load = max_load
10653 self._sched_iface = self._sched_iface_class(
10654 register=self._register,
10655 schedule=self._schedule_wait,
10656 unregister=self._unregister)
10658 self._valid_pkgs = set()
10659 self._process_iter = self._iter_metadata_processes()
10661 def _iter_metadata_processes(self):
10662 portdb = self._portdb
10663 valid_pkgs = self._valid_pkgs
10664 every_cp = portdb.cp_all()
10665 every_cp.sort(reverse=True)
10668 cp = every_cp.pop()
10669 portage.writemsg_stdout("Processing %s\n" % cp)
10670 cpv_list = portdb.cp_list(cp)
10671 for cpv in cpv_list:
10672 valid_pkgs.add(cpv)
10673 ebuild_path, repo_path = portdb.findname2(cpv)
10674 metadata_process = portdb._metadata_process(
10675 cpv, ebuild_path, repo_path)
10676 if metadata_process is None:
10678 yield metadata_process
10682 portdb = self._portdb
10683 from portage.cache.cache_errors import CacheError
10686 for mytree in portdb.porttrees:
10688 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10689 except CacheError, e:
10690 portage.writemsg("Error listing cache entries for " + \
10691 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10696 while self._schedule():
10703 for y in self._valid_pkgs:
10704 for mytree in portdb.porttrees:
10705 if portdb.findname2(y, mytree=mytree)[0]:
10706 dead_nodes[mytree].discard(y)
10708 for mytree, nodes in dead_nodes.iteritems():
10709 auxdb = portdb.auxdb[mytree]
10713 except (KeyError, CacheError):
10716 def _schedule_tasks(self):
10719 @returns: True if there may be remaining tasks to schedule,
10722 while self._can_add_job():
10724 metadata_process = self._process_iter.next()
10725 except StopIteration:
10729 metadata_process.scheduler = self._sched_iface
10730 metadata_process.addExitListener(self._metadata_exit)
10731 metadata_process.start()
10734 def _metadata_exit(self, metadata_process):
10736 if metadata_process.returncode != os.EX_OK:
10737 self._valid_pkgs.discard(metadata_process.cpv)
10738 portage.writemsg("Error processing %s, continuing...\n" % \
10739 (metadata_process.cpv,))
10742 class UninstallFailure(portage.exception.PortageException):
10744 An instance of this class is raised by unmerge() when
10745 an uninstallation fails.
10748 def __init__(self, *pargs):
10749 portage.exception.PortageException.__init__(self, pargs)
10751 self.status = pargs[0]
10753 def unmerge(root_config, myopts, unmerge_action,
10754 unmerge_files, ldpath_mtimes, autoclean=0,
10755 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10756 scheduler=None, writemsg_level=portage.util.writemsg_level):
10758 quiet = "--quiet" in myopts
10759 settings = root_config.settings
10760 sets = root_config.sets
10761 vartree = root_config.trees["vartree"]
10762 candidate_catpkgs=[]
10764 xterm_titles = "notitles" not in settings.features
10765 out = portage.output.EOutput()
10767 db_keys = list(vartree.dbapi._aux_cache_keys)
10770 pkg = pkg_cache.get(cpv)
10772 pkg = Package(cpv=cpv, installed=True,
10773 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10774 root_config=root_config,
10775 type_name="installed")
10776 pkg_cache[cpv] = pkg
10779 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10781 # At least the parent needs to exist for the lock file.
10782 portage.util.ensure_dirs(vdb_path)
10783 except portage.exception.PortageException:
10787 if os.access(vdb_path, os.W_OK):
10788 vdb_lock = portage.locks.lockdir(vdb_path)
10789 realsyslist = sets["system"].getAtoms()
10791 for x in realsyslist:
10792 mycp = portage.dep_getkey(x)
10793 if mycp in settings.getvirtuals():
10795 for provider in settings.getvirtuals()[mycp]:
10796 if vartree.dbapi.match(provider):
10797 providers.append(provider)
10798 if len(providers) == 1:
10799 syslist.extend(providers)
10801 syslist.append(mycp)
10803 mysettings = portage.config(clone=settings)
10805 if not unmerge_files:
10806 if unmerge_action == "unmerge":
10808 print bold("emerge unmerge") + " can only be used with specific package names"
10814 localtree = vartree
10815 # process all arguments and add all
10816 # valid db entries to candidate_catpkgs
10818 if not unmerge_files:
10819 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10821 #we've got command-line arguments
10822 if not unmerge_files:
10823 print "\nNo packages to unmerge have been provided.\n"
10825 for x in unmerge_files:
10826 arg_parts = x.split('/')
10827 if x[0] not in [".","/"] and \
10828 arg_parts[-1][-7:] != ".ebuild":
10829 #possible cat/pkg or dep; treat as such
10830 candidate_catpkgs.append(x)
10831 elif unmerge_action in ["prune","clean"]:
10832 print "\n!!! Prune and clean do not accept individual" + \
10833 " ebuilds as arguments;\n skipping.\n"
10836 # it appears that the user is specifying an installed
10837 # ebuild and we're in "unmerge" mode, so it's ok.
10838 if not os.path.exists(x):
10839 print "\n!!! The path '"+x+"' doesn't exist.\n"
10842 absx = os.path.abspath(x)
10843 sp_absx = absx.split("/")
10844 if sp_absx[-1][-7:] == ".ebuild":
10846 absx = "/".join(sp_absx)
10848 sp_absx_len = len(sp_absx)
10850 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10851 vdb_len = len(vdb_path)
10853 sp_vdb = vdb_path.split("/")
10854 sp_vdb_len = len(sp_vdb)
10856 if not os.path.exists(absx+"/CONTENTS"):
10857 print "!!! Not a valid db dir: "+str(absx)
10860 if sp_absx_len <= sp_vdb_len:
10861 # The Path is shorter... so it can't be inside the vdb.
10864 print "\n!!!",x,"cannot be inside "+ \
10865 vdb_path+"; aborting.\n"
10868 for idx in range(0,sp_vdb_len):
10869 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
10872 print "\n!!!", x, "is not inside "+\
10873 vdb_path+"; aborting.\n"
10876 print "="+"/".join(sp_absx[sp_vdb_len:])
10877 candidate_catpkgs.append(
10878 "="+"/".join(sp_absx[sp_vdb_len:]))
10881 if (not "--quiet" in myopts):
10883 if settings["ROOT"] != "/":
10884 writemsg_level(darkgreen(newline+ \
10885 ">>> Using system located in ROOT tree %s\n" % \
10888 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
10889 not ("--quiet" in myopts):
10890 writemsg_level(darkgreen(newline+\
10891 ">>> These are the packages that would be unmerged:\n"))
10893 # Preservation of order is required for --depclean and --prune so
10894 # that dependencies are respected. Use all_selected to eliminate
10895 # duplicate packages since the same package may be selected by
10898 all_selected = set()
10899 for x in candidate_catpkgs:
10900 # cycle through all our candidate deps and determine
10901 # what will and will not get unmerged
10903 mymatch = vartree.dbapi.match(x)
10904 except portage.exception.AmbiguousPackageName, errpkgs:
10905 print "\n\n!!! The short ebuild name \"" + \
10906 x + "\" is ambiguous. Please specify"
10907 print "!!! one of the following fully-qualified " + \
10908 "ebuild names instead:\n"
10909 for i in errpkgs[0]:
10910 print " " + green(i)
10914 if not mymatch and x[0] not in "<>=~":
10915 mymatch = localtree.dep_match(x)
10917 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
10918 (x, unmerge_action), noiselevel=-1)
10922 {"protected": set(), "selected": set(), "omitted": set()})
10923 mykey = len(pkgmap) - 1
10924 if unmerge_action=="unmerge":
10926 if y not in all_selected:
10927 pkgmap[mykey]["selected"].add(y)
10928 all_selected.add(y)
10929 elif unmerge_action == "prune":
10930 if len(mymatch) == 1:
10932 best_version = mymatch[0]
10933 best_slot = vartree.getslot(best_version)
10934 best_counter = vartree.dbapi.cpv_counter(best_version)
10935 for mypkg in mymatch[1:]:
10936 myslot = vartree.getslot(mypkg)
10937 mycounter = vartree.dbapi.cpv_counter(mypkg)
10938 if (myslot == best_slot and mycounter > best_counter) or \
10939 mypkg == portage.best([mypkg, best_version]):
10940 if myslot == best_slot:
10941 if mycounter < best_counter:
10942 # On slot collision, keep the one with the
10943 # highest counter since it is the most
10944 # recently installed.
10946 best_version = mypkg
10948 best_counter = mycounter
10949 pkgmap[mykey]["protected"].add(best_version)
10950 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
10951 if mypkg != best_version and mypkg not in all_selected)
10952 all_selected.update(pkgmap[mykey]["selected"])
10954 # unmerge_action == "clean"
10956 for mypkg in mymatch:
10957 if unmerge_action == "clean":
10958 myslot = localtree.getslot(mypkg)
10960 # since we're pruning, we don't care about slots
10961 # and put all the pkgs in together
10963 if myslot not in slotmap:
10964 slotmap[myslot] = {}
10965 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
10967 for myslot in slotmap:
10968 counterkeys = slotmap[myslot].keys()
10969 if not counterkeys:
10972 pkgmap[mykey]["protected"].add(
10973 slotmap[myslot][counterkeys[-1]])
10974 del counterkeys[-1]
10975 #be pretty and get them in order of merge:
10976 for ckey in counterkeys:
10977 mypkg = slotmap[myslot][ckey]
10978 if mypkg not in all_selected:
10979 pkgmap[mykey]["selected"].add(mypkg)
10980 all_selected.add(mypkg)
10981 # ok, now the last-merged package
10982 # is protected, and the rest are selected
10983 numselected = len(all_selected)
10984 if global_unmerge and not numselected:
10985 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
10988 if not numselected:
10989 portage.writemsg_stdout(
10990 "\n>>> No packages selected for removal by " + \
10991 unmerge_action + "\n")
10995 vartree.dbapi.flush_cache()
10996 portage.locks.unlockdir(vdb_lock)
10998 from portage.sets.base import EditablePackageSet
11000 # generate a list of package sets that are directly or indirectly listed in "world",
11001 # as there is no persistent list of "installed" sets
11002 installed_sets = ["world"]
11007 pos = len(installed_sets)
11008 for s in installed_sets[pos - 1:]:
11011 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11014 installed_sets += candidates
11015 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11018 # we don't want to unmerge packages that are still listed in user-editable package sets
11019 # listed in "world" as they would be remerged on the next update of "world" or the
11020 # relevant package sets.
11021 unknown_sets = set()
11022 for cp in xrange(len(pkgmap)):
11023 for cpv in pkgmap[cp]["selected"].copy():
11027 # It could have been uninstalled
11028 # by a concurrent process.
11031 if unmerge_action != "clean" and \
11032 root_config.root == "/" and \
11033 portage.match_from_list(
11034 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11035 msg = ("Not unmerging package %s since there is no valid " + \
11036 "reason for portage to unmerge itself.") % (pkg.cpv,)
11037 for line in textwrap.wrap(msg, 75):
11039 # adjust pkgmap so the display output is correct
11040 pkgmap[cp]["selected"].remove(cpv)
11041 all_selected.remove(cpv)
11042 pkgmap[cp]["protected"].add(cpv)
11046 for s in installed_sets:
11047 # skip sets that the user requested to unmerge, and skip world
11048 # unless we're unmerging a package set (as the package would be
11049 # removed from "world" later on)
11050 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11054 if s in unknown_sets:
11056 unknown_sets.add(s)
11057 out = portage.output.EOutput()
11058 out.eerror(("Unknown set '@%s' in " + \
11059 "%svar/lib/portage/world_sets") % \
11060 (s, root_config.root))
11063 # only check instances of EditablePackageSet as other classes are generally used for
11064 # special purposes and can be ignored here (and are usually generated dynamically, so the
11065 # user can't do much about them anyway)
11066 if isinstance(sets[s], EditablePackageSet):
11068 # This is derived from a snippet of code in the
11069 # depgraph._iter_atoms_for_pkg() method.
11070 for atom in sets[s].iterAtomsForPackage(pkg):
11071 inst_matches = vartree.dbapi.match(atom)
11072 inst_matches.reverse() # descending order
11074 for inst_cpv in inst_matches:
11076 inst_pkg = _pkg(inst_cpv)
11078 # It could have been uninstalled
11079 # by a concurrent process.
11082 if inst_pkg.cp != atom.cp:
11084 if pkg >= inst_pkg:
11085 # This is descending order, and we're not
11086 # interested in any versions <= pkg given.
11088 if pkg.slot_atom != inst_pkg.slot_atom:
11089 higher_slot = inst_pkg
11091 if higher_slot is None:
11095 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11096 #print colorize("WARN", "but still listed in the following package sets:")
11097 #print " %s\n" % ", ".join(parents)
11098 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11099 print colorize("WARN", "still referenced by the following package sets:")
11100 print " %s\n" % ", ".join(parents)
11101 # adjust pkgmap so the display output is correct
11102 pkgmap[cp]["selected"].remove(cpv)
11103 all_selected.remove(cpv)
11104 pkgmap[cp]["protected"].add(cpv)
11108 numselected = len(all_selected)
11109 if not numselected:
11111 "\n>>> No packages selected for removal by " + \
11112 unmerge_action + "\n")
11115 # Unmerge order only matters in some cases
11119 selected = d["selected"]
11122 cp = portage.cpv_getkey(iter(selected).next())
11123 cp_dict = unordered.get(cp)
11124 if cp_dict is None:
11126 unordered[cp] = cp_dict
11129 for k, v in d.iteritems():
11130 cp_dict[k].update(v)
11131 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11133 for x in xrange(len(pkgmap)):
11134 selected = pkgmap[x]["selected"]
11137 for mytype, mylist in pkgmap[x].iteritems():
11138 if mytype == "selected":
11140 mylist.difference_update(all_selected)
11141 cp = portage.cpv_getkey(iter(selected).next())
11142 for y in localtree.dep_match(cp):
11143 if y not in pkgmap[x]["omitted"] and \
11144 y not in pkgmap[x]["selected"] and \
11145 y not in pkgmap[x]["protected"] and \
11146 y not in all_selected:
11147 pkgmap[x]["omitted"].add(y)
11148 if global_unmerge and not pkgmap[x]["selected"]:
11149 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11151 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11152 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11153 "'%s' is part of your system profile.\n" % cp),
11154 level=logging.WARNING, noiselevel=-1)
11155 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11156 "be damaging to your system.\n\n"),
11157 level=logging.WARNING, noiselevel=-1)
11158 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11159 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11160 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11162 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11164 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11165 for mytype in ["selected","protected","omitted"]:
11167 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11168 if pkgmap[x][mytype]:
11169 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11170 sorted_pkgs.sort(portage.pkgcmp)
11171 for pn, ver, rev in sorted_pkgs:
11175 myversion = ver + "-" + rev
11176 if mytype == "selected":
11178 colorize("UNMERGE_WARN", myversion + " "),
11182 colorize("GOOD", myversion + " "), noiselevel=-1)
11184 writemsg_level("none ", noiselevel=-1)
11186 writemsg_level("\n", noiselevel=-1)
11188 writemsg_level("\n", noiselevel=-1)
11190 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11191 " packages are slated for removal.\n")
11192 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11193 " and " + colorize("GOOD", "'omitted'") + \
11194 " packages will not be removed.\n\n")
11196 if "--pretend" in myopts:
11197 #we're done... return
11199 if "--ask" in myopts:
11200 if userquery("Would you like to unmerge these packages?")=="No":
11201 # enter pretend mode for correct formatting of results
11202 myopts["--pretend"] = True
11207 #the real unmerging begins, after a short delay....
11208 if clean_delay and not autoclean:
11209 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11211 for x in xrange(len(pkgmap)):
11212 for y in pkgmap[x]["selected"]:
11213 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11214 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11215 mysplit = y.split("/")
11217 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11218 mysettings, unmerge_action not in ["clean","prune"],
11219 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11220 scheduler=scheduler)
11222 if retval != os.EX_OK:
11223 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11225 raise UninstallFailure(retval)
11228 if clean_world and hasattr(sets["world"], "cleanPackage"):
11229 sets["world"].cleanPackage(vartree.dbapi, y)
11230 emergelog(xterm_titles, " >>> unmerge success: "+y)
11231 if clean_world and hasattr(sets["world"], "remove"):
11232 for s in root_config.setconfig.active:
11233 sets["world"].remove(SETPREFIX+s)
11236 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11238 if os.path.exists("/usr/bin/install-info"):
11239 out = portage.output.EOutput()
11244 inforoot=normpath(root+z)
11245 if os.path.isdir(inforoot):
11246 infomtime = long(os.stat(inforoot).st_mtime)
11247 if inforoot not in prev_mtimes or \
11248 prev_mtimes[inforoot] != infomtime:
11249 regen_infodirs.append(inforoot)
11251 if not regen_infodirs:
11252 portage.writemsg_stdout("\n")
11253 out.einfo("GNU info directory index is up-to-date.")
11255 portage.writemsg_stdout("\n")
11256 out.einfo("Regenerating GNU info directory index...")
11258 dir_extensions = ("", ".gz", ".bz2")
11262 for inforoot in regen_infodirs:
11266 if not os.path.isdir(inforoot) or \
11267 not os.access(inforoot, os.W_OK):
11270 file_list = os.listdir(inforoot)
11272 dir_file = os.path.join(inforoot, "dir")
11273 moved_old_dir = False
11274 processed_count = 0
11275 for x in file_list:
11276 if x.startswith(".") or \
11277 os.path.isdir(os.path.join(inforoot, x)):
11279 if x.startswith("dir"):
11281 for ext in dir_extensions:
11282 if x == "dir" + ext or \
11283 x == "dir" + ext + ".old":
11288 if processed_count == 0:
11289 for ext in dir_extensions:
11291 os.rename(dir_file + ext, dir_file + ext + ".old")
11292 moved_old_dir = True
11293 except EnvironmentError, e:
11294 if e.errno != errno.ENOENT:
11297 processed_count += 1
11298 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11299 existsstr="already exists, for file `"
11301 if re.search(existsstr,myso):
11302 # Already exists... Don't increment the count for this.
11304 elif myso[:44]=="install-info: warning: no info dir entry in ":
11305 # This info file doesn't contain a DIR-header: install-info produces this
11306 # (harmless) warning (the --quiet switch doesn't seem to work).
11307 # Don't increment the count for this.
11310 badcount=badcount+1
11311 errmsg += myso + "\n"
11314 if moved_old_dir and not os.path.exists(dir_file):
11315 # We didn't generate a new dir file, so put the old file
11316 # back where it was originally found.
11317 for ext in dir_extensions:
11319 os.rename(dir_file + ext + ".old", dir_file + ext)
11320 except EnvironmentError, e:
11321 if e.errno != errno.ENOENT:
11325 # Clean dir.old cruft so that they don't prevent
11326 # unmerge of otherwise empty directories.
11327 for ext in dir_extensions:
11329 os.unlink(dir_file + ext + ".old")
11330 except EnvironmentError, e:
11331 if e.errno != errno.ENOENT:
11335 #update mtime so we can potentially avoid regenerating.
11336 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11339 out.eerror("Processed %d info files; %d errors." % \
11340 (icount, badcount))
11341 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11344 out.einfo("Processed %d info files." % (icount,))
11347 def display_news_notification(root_config, myopts):
11348 target_root = root_config.root
11349 trees = root_config.trees
11350 settings = trees["vartree"].settings
11351 portdb = trees["porttree"].dbapi
11352 vardb = trees["vartree"].dbapi
11353 NEWS_PATH = os.path.join("metadata", "news")
11354 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11355 newsReaderDisplay = False
11356 update = "--pretend" not in myopts
11358 for repo in portdb.getRepositories():
11359 unreadItems = checkUpdatedNewsItems(
11360 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11362 if not newsReaderDisplay:
11363 newsReaderDisplay = True
11365 print colorize("WARN", " * IMPORTANT:"),
11366 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11369 if newsReaderDisplay:
11370 print colorize("WARN", " *"),
11371 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11374 def display_preserved_libs(vardbapi):
11377 # Ensure the registry is consistent with existing files.
11378 vardbapi.plib_registry.pruneNonExisting()
11380 if vardbapi.plib_registry.hasEntries():
11382 print colorize("WARN", "!!!") + " existing preserved libs:"
11383 plibdata = vardbapi.plib_registry.getPreservedLibs()
11384 linkmap = vardbapi.linkmap
11387 linkmap_broken = False
11391 except portage.exception.CommandNotFound, e:
11392 writemsg_level("!!! Command Not Found: %s\n" % (e,),
11393 level=logging.ERROR, noiselevel=-1)
11395 linkmap_broken = True
11397 search_for_owners = set()
11398 for cpv in plibdata:
11399 for f in plibdata[cpv]:
11400 if f in consumer_map:
11402 consumers = list(linkmap.findConsumers(f))
11404 consumer_map[f] = consumers
11405 search_for_owners.update(consumers[:MAX_DISPLAY+1])
11407 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11409 for cpv in plibdata:
11410 print colorize("WARN", ">>>") + " package: %s" % cpv
11412 for f in plibdata[cpv]:
11413 obj_key = linkmap._obj_key(f)
11414 alt_paths = samefile_map.get(obj_key)
11415 if alt_paths is None:
11417 samefile_map[obj_key] = alt_paths
11420 for alt_paths in samefile_map.itervalues():
11421 alt_paths = sorted(alt_paths)
11422 for p in alt_paths:
11423 print colorize("WARN", " * ") + " - %s" % (p,)
11425 consumers = consumer_map.get(f, [])
11426 for c in consumers[:MAX_DISPLAY]:
11427 print colorize("WARN", " * ") + " used by %s (%s)" % \
11428 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11429 if len(consumers) == MAX_DISPLAY + 1:
11430 print colorize("WARN", " * ") + " used by %s (%s)" % \
11431 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11432 for x in owners.get(consumers[MAX_DISPLAY], [])))
11433 elif len(consumers) > MAX_DISPLAY:
11434 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
11435 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11438 def _flush_elog_mod_echo():
11440 Dump the mod_echo output now so that our other
11441 notifications are shown last.
11443 @returns: True if messages were shown, False otherwise.
11445 messages_shown = False
11447 from portage.elog import mod_echo
11448 except ImportError:
11449 pass # happens during downgrade to a version without the module
11451 messages_shown = bool(mod_echo._items)
11452 mod_echo.finalize()
11453 return messages_shown
11455 def post_emerge(root_config, myopts, mtimedb, retval):
11457 Misc. things to run at the end of a merge session.
11460 Update Config Files
11463 Display preserved libs warnings
11466 @param trees: A dictionary mapping each ROOT to it's package databases
11468 @param mtimedb: The mtimeDB to store data needed across merge invocations
11469 @type mtimedb: MtimeDB class instance
11470 @param retval: Emerge's return value
11474 1. Calls sys.exit(retval)
11477 target_root = root_config.root
11478 trees = { target_root : root_config.trees }
11479 vardbapi = trees[target_root]["vartree"].dbapi
11480 settings = vardbapi.settings
11481 info_mtimes = mtimedb["info"]
11483 # Load the most current variables from ${ROOT}/etc/profile.env
11486 settings.regenerate()
11489 config_protect = settings.get("CONFIG_PROTECT","").split()
11490 infodirs = settings.get("INFOPATH","").split(":") + \
11491 settings.get("INFODIR","").split(":")
11495 if retval == os.EX_OK:
11496 exit_msg = " *** exiting successfully."
11498 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11499 emergelog("notitles" not in settings.features, exit_msg)
11501 _flush_elog_mod_echo()
11503 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11504 if counter_hash is not None and \
11505 counter_hash == vardbapi._counter_hash():
11506 # If vdb state has not changed then there's nothing else to do.
11509 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11510 portage.util.ensure_dirs(vdb_path)
11512 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11513 vdb_lock = portage.locks.lockdir(vdb_path)
11517 if "noinfo" not in settings.features:
11518 chk_updated_info_files(target_root,
11519 infodirs, info_mtimes, retval)
11523 portage.locks.unlockdir(vdb_lock)
11525 chk_updated_cfg_files(target_root, config_protect)
11527 display_news_notification(root_config, myopts)
11528 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11529 display_preserved_libs(vardbapi)
11534 def chk_updated_cfg_files(target_root, config_protect):
11536 #number of directories with some protect files in them
11538 for x in config_protect:
11539 x = os.path.join(target_root, x.lstrip(os.path.sep))
11540 if not os.access(x, os.W_OK):
11541 # Avoid Permission denied errors generated
11545 mymode = os.lstat(x).st_mode
11548 if stat.S_ISLNK(mymode):
11549 # We want to treat it like a directory if it
11550 # is a symlink to an existing directory.
11552 real_mode = os.stat(x).st_mode
11553 if stat.S_ISDIR(real_mode):
11557 if stat.S_ISDIR(mymode):
11558 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11560 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11561 os.path.split(x.rstrip(os.path.sep))
11562 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11563 a = commands.getstatusoutput(mycommand)
11565 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11567 # Show the error message alone, sending stdout to /dev/null.
11568 os.system(mycommand + " 1>/dev/null")
11570 files = a[1].split('\0')
11571 # split always produces an empty string as the last element
11572 if files and not files[-1]:
11576 print "\n"+colorize("WARN", " * IMPORTANT:"),
11577 if stat.S_ISDIR(mymode):
11578 print "%d config files in '%s' need updating." % \
11581 print "config file '%s' needs updating." % x
11584 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11585 " section of the " + bold("emerge")
11586 print " "+yellow("*")+" man page to learn how to update config files."
11588 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11591 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11592 Returns the number of unread (yet relevent) items.
11594 @param portdb: a portage tree database
11595 @type portdb: pordbapi
11596 @param vardb: an installed package database
11597 @type vardb: vardbapi
11600 @param UNREAD_PATH:
11606 1. The number of unread but relevant news items.
11609 from portage.news import NewsManager
11610 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11611 return manager.getUnreadItems( repo_id, update=update )
11613 def insert_category_into_atom(atom, category):
11614 alphanum = re.search(r'\w', atom)
11616 ret = atom[:alphanum.start()] + "%s/" % category + \
11617 atom[alphanum.start():]
11622 def is_valid_package_atom(x):
11624 alphanum = re.search(r'\w', x)
11626 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11627 return portage.isvalidatom(x)
11629 def show_blocker_docs_link():
11631 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11632 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11634 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11637 def show_mask_docs():
11638 print "For more information, see the MASKED PACKAGES section in the emerge"
11639 print "man page or refer to the Gentoo Handbook."
11641 def action_sync(settings, trees, mtimedb, myopts, myaction):
11642 xterm_titles = "notitles" not in settings.features
11643 emergelog(xterm_titles, " === sync")
11644 myportdir = settings.get("PORTDIR", None)
11645 out = portage.output.EOutput()
11647 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
11649 if myportdir[-1]=="/":
11650 myportdir=myportdir[:-1]
11651 if not os.path.exists(myportdir):
11652 print ">>>",myportdir,"not found, creating it."
11653 os.makedirs(myportdir,0755)
11654 syncuri = settings.get("SYNC", "").strip()
11656 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11657 noiselevel=-1, level=logging.ERROR)
11661 updatecache_flg = False
11662 if myaction == "metadata":
11663 print "skipping sync"
11664 updatecache_flg = True
11665 elif syncuri[:8]=="rsync://":
11666 if not os.path.exists("/usr/bin/rsync"):
11667 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11668 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11673 import shlex, StringIO
11674 if settings["PORTAGE_RSYNC_OPTS"] == "":
11675 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11676 rsync_opts.extend([
11677 "--recursive", # Recurse directories
11678 "--links", # Consider symlinks
11679 "--safe-links", # Ignore links outside of tree
11680 "--perms", # Preserve permissions
11681 "--times", # Preserive mod times
11682 "--compress", # Compress the data transmitted
11683 "--force", # Force deletion on non-empty dirs
11684 "--whole-file", # Don't do block transfers, only entire files
11685 "--delete", # Delete files that aren't in the master tree
11686 "--stats", # Show final statistics about what was transfered
11687 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11688 "--exclude=/distfiles", # Exclude distfiles from consideration
11689 "--exclude=/local", # Exclude local from consideration
11690 "--exclude=/packages", # Exclude packages from consideration
11694 # The below validation is not needed when using the above hardcoded
11697 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11698 lexer = shlex.shlex(StringIO.StringIO(
11699 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11700 lexer.whitespace_split = True
11701 rsync_opts.extend(lexer)
11704 for opt in ("--recursive", "--times"):
11705 if opt not in rsync_opts:
11706 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11707 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11708 rsync_opts.append(opt)
11710 for exclude in ("distfiles", "local", "packages"):
11711 opt = "--exclude=/%s" % exclude
11712 if opt not in rsync_opts:
11713 portage.writemsg(yellow("WARNING:") + \
11714 " adding required option %s not included in " % opt + \
11715 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11716 rsync_opts.append(opt)
11718 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11719 def rsync_opt_startswith(opt_prefix):
11720 for x in rsync_opts:
11721 if x.startswith(opt_prefix):
11725 if not rsync_opt_startswith("--timeout="):
11726 rsync_opts.append("--timeout=%d" % mytimeout)
11728 for opt in ("--compress", "--whole-file"):
11729 if opt not in rsync_opts:
11730 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11731 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11732 rsync_opts.append(opt)
11734 if "--quiet" in myopts:
11735 rsync_opts.append("--quiet") # Shut up a lot
11737 rsync_opts.append("--verbose") # Print filelist
11739 if "--verbose" in myopts:
11740 rsync_opts.append("--progress") # Progress meter for each file
11742 if "--debug" in myopts:
11743 rsync_opts.append("--checksum") # Force checksum on all files
11745 # Real local timestamp file.
11746 servertimestampfile = os.path.join(
11747 myportdir, "metadata", "timestamp.chk")
11749 content = portage.util.grabfile(servertimestampfile)
11753 mytimestamp = time.mktime(time.strptime(content[0],
11754 "%a, %d %b %Y %H:%M:%S +0000"))
11755 except (OverflowError, ValueError):
11760 rsync_initial_timeout = \
11761 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11763 rsync_initial_timeout = 15
11766 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11767 except SystemExit, e:
11768 raise # Needed else can't exit
11770 maxretries=3 #default number of retries
11773 user_name, hostname, port = re.split(
11774 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11777 if user_name is None:
11779 updatecache_flg=True
11780 all_rsync_opts = set(rsync_opts)
11781 lexer = shlex.shlex(StringIO.StringIO(
11782 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
11783 lexer.whitespace_split = True
11784 extra_rsync_opts = list(lexer)
11786 all_rsync_opts.update(extra_rsync_opts)
11787 family = socket.AF_INET
11788 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
11789 family = socket.AF_INET
11790 elif socket.has_ipv6 and \
11791 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
11792 family = socket.AF_INET6
11794 SERVER_OUT_OF_DATE = -1
11795 EXCEEDED_MAX_RETRIES = -2
11801 for addrinfo in socket.getaddrinfo(
11802 hostname, None, family, socket.SOCK_STREAM):
11803 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
11804 # IPv6 addresses need to be enclosed in square brackets
11805 ips.append("[%s]" % addrinfo[4][0])
11807 ips.append(addrinfo[4][0])
11808 from random import shuffle
11810 except SystemExit, e:
11811 raise # Needed else can't exit
11812 except Exception, e:
11813 print "Notice:",str(e)
11818 dosyncuri = syncuri.replace(
11819 "//" + user_name + hostname + port + "/",
11820 "//" + user_name + ips[0] + port + "/", 1)
11821 except SystemExit, e:
11822 raise # Needed else can't exit
11823 except Exception, e:
11824 print "Notice:",str(e)
11828 if "--ask" in myopts:
11829 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
11834 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
11835 if "--quiet" not in myopts:
11836 print ">>> Starting rsync with "+dosyncuri+"..."
11838 emergelog(xterm_titles,
11839 ">>> Starting retry %d of %d with %s" % \
11840 (retries,maxretries,dosyncuri))
11841 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
11843 if mytimestamp != 0 and "--quiet" not in myopts:
11844 print ">>> Checking server timestamp ..."
11846 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
11848 if "--debug" in myopts:
11851 exitcode = os.EX_OK
11852 servertimestamp = 0
11853 # Even if there's no timestamp available locally, fetch the
11854 # timestamp anyway as an initial probe to verify that the server is
11855 # responsive. This protects us from hanging indefinitely on a
11856 # connection attempt to an unresponsive server which rsync's
11857 # --timeout option does not prevent.
11859 # Temporary file for remote server timestamp comparison.
11860 from tempfile import mkstemp
11861 fd, tmpservertimestampfile = mkstemp()
11863 mycommand = rsynccommand[:]
11864 mycommand.append(dosyncuri.rstrip("/") + \
11865 "/metadata/timestamp.chk")
11866 mycommand.append(tmpservertimestampfile)
11870 def timeout_handler(signum, frame):
11871 raise portage.exception.PortageException("timed out")
11872 signal.signal(signal.SIGALRM, timeout_handler)
11873 # Timeout here in case the server is unresponsive. The
11874 # --timeout rsync option doesn't apply to the initial
11875 # connection attempt.
11876 if rsync_initial_timeout:
11877 signal.alarm(rsync_initial_timeout)
11879 mypids.extend(portage.process.spawn(
11880 mycommand, env=settings.environ(), returnpid=True))
11881 exitcode = os.waitpid(mypids[0], 0)[1]
11882 content = portage.grabfile(tmpservertimestampfile)
11884 if rsync_initial_timeout:
11887 os.unlink(tmpservertimestampfile)
11890 except portage.exception.PortageException, e:
11894 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
11895 os.kill(mypids[0], signal.SIGTERM)
11896 os.waitpid(mypids[0], 0)
11897 # This is the same code rsync uses for timeout.
11900 if exitcode != os.EX_OK:
11901 if exitcode & 0xff:
11902 exitcode = (exitcode & 0xff) << 8
11904 exitcode = exitcode >> 8
11906 portage.process.spawned_pids.remove(mypids[0])
11909 servertimestamp = time.mktime(time.strptime(
11910 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
11911 except (OverflowError, ValueError):
11913 del mycommand, mypids, content
11914 if exitcode == os.EX_OK:
11915 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
11916 emergelog(xterm_titles,
11917 ">>> Cancelling sync -- Already current.")
11920 print ">>> Timestamps on the server and in the local repository are the same."
11921 print ">>> Cancelling all further sync action. You are already up to date."
11923 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11927 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
11928 emergelog(xterm_titles,
11929 ">>> Server out of date: %s" % dosyncuri)
11932 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
11934 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11937 exitcode = SERVER_OUT_OF_DATE
11938 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
11940 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
11941 exitcode = portage.process.spawn(mycommand,
11942 env=settings.environ())
11943 if exitcode in [0,1,3,4,11,14,20,21]:
11945 elif exitcode in [1,3,4,11,14,20,21]:
11948 # Code 2 indicates protocol incompatibility, which is expected
11949 # for servers with protocol < 29 that don't support
11950 # --prune-empty-directories. Retry for a server that supports
11951 # at least rsync protocol version 29 (>=rsync-2.6.4).
11956 if retries<=maxretries:
11957 print ">>> Retrying..."
11962 updatecache_flg=False
11963 exitcode = EXCEEDED_MAX_RETRIES
11967 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
11968 elif exitcode == SERVER_OUT_OF_DATE:
11970 elif exitcode == EXCEEDED_MAX_RETRIES:
11972 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
11977 msg.append("Rsync has reported that there is a syntax error. Please ensure")
11978 msg.append("that your SYNC statement is proper.")
11979 msg.append("SYNC=" + settings["SYNC"])
11981 msg.append("Rsync has reported that there is a File IO error. Normally")
11982 msg.append("this means your disk is full, but can be caused by corruption")
11983 msg.append("on the filesystem that contains PORTDIR. Please investigate")
11984 msg.append("and try again after the problem has been fixed.")
11985 msg.append("PORTDIR=" + settings["PORTDIR"])
11987 msg.append("Rsync was killed before it finished.")
11989 msg.append("Rsync has not successfully finished. It is recommended that you keep")
11990 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
11991 msg.append("to use rsync due to firewall or other restrictions. This should be a")
11992 msg.append("temporary problem unless complications exist with your network")
11993 msg.append("(and possibly your system's filesystem) configuration.")
11997 elif syncuri[:6]=="cvs://":
11998 if not os.path.exists("/usr/bin/cvs"):
11999 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12000 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12002 cvsroot=syncuri[6:]
12003 cvsdir=os.path.dirname(myportdir)
12004 if not os.path.exists(myportdir+"/CVS"):
12006 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12007 if os.path.exists(cvsdir+"/gentoo-x86"):
12008 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12011 os.rmdir(myportdir)
12013 if e.errno != errno.ENOENT:
12015 "!!! existing '%s' directory; exiting.\n" % myportdir)
12018 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12019 print "!!! cvs checkout error; exiting."
12021 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12024 print ">>> Starting cvs update with "+syncuri+"..."
12025 retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
12026 myportdir, settings, free=1)
12027 if retval != os.EX_OK:
12029 dosyncuri = syncuri
12031 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12032 noiselevel=-1, level=logging.ERROR)
12035 if updatecache_flg and \
12036 myaction != "metadata" and \
12037 "metadata-transfer" not in settings.features:
12038 updatecache_flg = False
12040 # Reload the whole config from scratch.
12041 settings, trees, mtimedb = load_emerge_config(trees=trees)
12042 root_config = trees[settings["ROOT"]]["root_config"]
12043 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12045 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12046 action_metadata(settings, portdb, myopts)
12048 if portage._global_updates(trees, mtimedb["updates"]):
12050 # Reload the whole config from scratch.
12051 settings, trees, mtimedb = load_emerge_config(trees=trees)
12052 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12053 root_config = trees[settings["ROOT"]]["root_config"]
12055 mybestpv = portdb.xmatch("bestmatch-visible",
12056 portage.const.PORTAGE_PACKAGE_ATOM)
12057 mypvs = portage.best(
12058 trees[settings["ROOT"]]["vartree"].dbapi.match(
12059 portage.const.PORTAGE_PACKAGE_ATOM))
12061 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12063 if myaction != "metadata":
12064 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12065 retval = portage.process.spawn(
12066 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12067 dosyncuri], env=settings.environ())
12068 if retval != os.EX_OK:
12069 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12071 if(mybestpv != mypvs) and not "--quiet" in myopts:
12073 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12074 print red(" * ")+"that you update portage now, before any other packages are updated."
12076 print red(" * ")+"To update portage, run 'emerge portage' now."
12079 display_news_notification(root_config, myopts)
12082 def action_metadata(settings, portdb, myopts):
12083 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
12084 old_umask = os.umask(0002)
12085 cachedir = os.path.normpath(settings.depcachedir)
12086 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
12087 "/lib", "/opt", "/proc", "/root", "/sbin",
12088 "/sys", "/tmp", "/usr", "/var"]:
12089 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12090 "ROOT DIRECTORY ON YOUR SYSTEM."
12091 print >> sys.stderr, \
12092 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12094 if not os.path.exists(cachedir):
12097 ec = portage.eclass_cache.cache(portdb.porttree_root)
12098 myportdir = os.path.realpath(settings["PORTDIR"])
12099 cm = settings.load_best_module("portdbapi.metadbmodule")(
12100 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12102 from portage.cache import util
12104 class percentage_noise_maker(util.quiet_mirroring):
12105 def __init__(self, dbapi):
12107 self.cp_all = dbapi.cp_all()
12108 l = len(self.cp_all)
12109 self.call_update_min = 100000000
12110 self.min_cp_all = l/100.0
12114 def __iter__(self):
12115 for x in self.cp_all:
12117 if self.count > self.min_cp_all:
12118 self.call_update_min = 0
12120 for y in self.dbapi.cp_list(x):
12122 self.call_update_mine = 0
12124 def update(self, *arg):
12125 try: self.pstr = int(self.pstr) + 1
12126 except ValueError: self.pstr = 1
12127 sys.stdout.write("%s%i%%" % \
12128 ("\b" * (len(str(self.pstr))+1), self.pstr))
12130 self.call_update_min = 10000000
12132 def finish(self, *arg):
12133 sys.stdout.write("\b\b\b\b100%\n")
12136 if "--quiet" in myopts:
12137 def quicky_cpv_generator(cp_all_list):
12138 for x in cp_all_list:
12139 for y in portdb.cp_list(x):
12141 source = quicky_cpv_generator(portdb.cp_all())
12142 noise_maker = portage.cache.util.quiet_mirroring()
12144 noise_maker = source = percentage_noise_maker(portdb)
12145 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12146 eclass_cache=ec, verbose_instance=noise_maker)
12149 os.umask(old_umask)
12151 def action_regen(settings, portdb, max_jobs, max_load):
12152 xterm_titles = "notitles" not in settings.features
12153 emergelog(xterm_titles, " === regen")
12154 #regenerate cache entries
12155 portage.writemsg_stdout("Regenerating cache entries...\n")
12157 os.close(sys.stdin.fileno())
12158 except SystemExit, e:
12159 raise # Needed else can't exit
12164 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12167 portage.writemsg_stdout("done!\n")
12169 def action_config(settings, trees, myopts, myfiles):
12170 if len(myfiles) != 1:
12171 print red("!!! config can only take a single package atom at this time\n")
12173 if not is_valid_package_atom(myfiles[0]):
12174 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12176 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12177 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12181 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12182 except portage.exception.AmbiguousPackageName, e:
12183 # Multiple matches thrown from cpv_expand
12186 print "No packages found.\n"
12188 elif len(pkgs) > 1:
12189 if "--ask" in myopts:
12191 print "Please select a package to configure:"
12195 options.append(str(idx))
12196 print options[-1]+") "+pkg
12198 options.append("X")
12199 idx = userquery("Selection?", options)
12202 pkg = pkgs[int(idx)-1]
12204 print "The following packages available:"
12207 print "\nPlease use a specific atom or the --ask option."
12213 if "--ask" in myopts:
12214 if userquery("Ready to configure "+pkg+"?") == "No":
12217 print "Configuring pkg..."
12219 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12220 mysettings = portage.config(clone=settings)
12221 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12222 debug = mysettings.get("PORTAGE_DEBUG") == "1"
12223 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12225 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12226 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12227 if retval == os.EX_OK:
12228 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12229 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12232 def action_info(settings, trees, myopts, myfiles):
12233 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12234 settings.profile_path, settings["CHOST"],
12235 trees[settings["ROOT"]]["vartree"].dbapi)
12237 header_title = "System Settings"
12239 print header_width * "="
12240 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12241 print header_width * "="
12242 print "System uname: "+platform.platform(aliased=1)
12244 lastSync = portage.grabfile(os.path.join(
12245 settings["PORTDIR"], "metadata", "timestamp.chk"))
12246 print "Timestamp of tree:",
12252 output=commands.getstatusoutput("distcc --version")
12254 print str(output[1].split("\n",1)[0]),
12255 if "distcc" in settings.features:
12260 output=commands.getstatusoutput("ccache -V")
12262 print str(output[1].split("\n",1)[0]),
12263 if "ccache" in settings.features:
12268 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12269 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
12270 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12271 myvars = portage.util.unique_array(myvars)
12275 if portage.isvalidatom(x):
12276 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12277 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12278 pkg_matches.sort(portage.pkgcmp)
12280 for pn, ver, rev in pkg_matches:
12282 pkgs.append(ver + "-" + rev)
12286 pkgs = ", ".join(pkgs)
12287 print "%-20s %s" % (x+":", pkgs)
12289 print "%-20s %s" % (x+":", "[NOT VALID]")
12291 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12293 if "--verbose" in myopts:
12294 myvars=settings.keys()
12296 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12297 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12298 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12299 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12301 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12303 myvars = portage.util.unique_array(myvars)
12309 print '%s="%s"' % (x, settings[x])
12311 use = set(settings["USE"].split())
12312 use_expand = settings["USE_EXPAND"].split()
12314 for varname in use_expand:
12315 flag_prefix = varname.lower() + "_"
12316 for f in list(use):
12317 if f.startswith(flag_prefix):
12321 print 'USE="%s"' % " ".join(use),
12322 for varname in use_expand:
12323 myval = settings.get(varname)
12325 print '%s="%s"' % (varname, myval),
12328 unset_vars.append(x)
12330 print "Unset: "+", ".join(unset_vars)
12333 if "--debug" in myopts:
12334 for x in dir(portage):
12335 module = getattr(portage, x)
12336 if "cvs_id_string" in dir(module):
12337 print "%s: %s" % (str(x), str(module.cvs_id_string))
12339 # See if we can find any packages installed matching the strings
12340 # passed on the command line
12342 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12343 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12345 mypkgs.extend(vardb.match(x))
12347 # If some packages were found...
12349 # Get our global settings (we only print stuff if it varies from
12350 # the current config)
12351 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12352 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12354 pkgsettings = portage.config(clone=settings)
12356 for myvar in mydesiredvars:
12357 global_vals[myvar] = set(settings.get(myvar, "").split())
12359 # Loop through each package
12360 # Only print settings if they differ from global settings
12361 header_title = "Package Settings"
12362 print header_width * "="
12363 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12364 print header_width * "="
12365 from portage.output import EOutput
12368 # Get all package specific variables
12369 auxvalues = vardb.aux_get(pkg, auxkeys)
12371 for i in xrange(len(auxkeys)):
12372 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12374 for myvar in mydesiredvars:
12375 # If the package variable doesn't match the
12376 # current global variable, something has changed
12377 # so set diff_found so we know to print
12378 if valuesmap[myvar] != global_vals[myvar]:
12379 diff_values[myvar] = valuesmap[myvar]
12380 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12381 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12382 pkgsettings.reset()
12383 # If a matching ebuild is no longer available in the tree, maybe it
12384 # would make sense to compare against the flags for the best
12385 # available version with the same slot?
12387 if portdb.cpv_exists(pkg):
12389 pkgsettings.setcpv(pkg, mydb=mydb)
12390 if valuesmap["IUSE"].intersection(
12391 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12392 diff_values["USE"] = valuesmap["USE"]
12393 # If a difference was found, print the info for
12396 # Print package info
12397 print "%s was built with the following:" % pkg
12398 for myvar in mydesiredvars + ["USE"]:
12399 if myvar in diff_values:
12400 mylist = list(diff_values[myvar])
12402 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12404 print ">>> Attempting to run pkg_info() for '%s'" % pkg
12405 ebuildpath = vardb.findname(pkg)
12406 if not ebuildpath or not os.path.exists(ebuildpath):
12407 out.ewarn("No ebuild found for '%s'" % pkg)
12409 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12410 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12411 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12414 def action_search(root_config, myopts, myfiles, spinner):
12416 print "emerge: no search terms provided."
12418 searchinstance = search(root_config,
12419 spinner, "--searchdesc" in myopts,
12420 "--quiet" not in myopts, "--usepkg" in myopts,
12421 "--usepkgonly" in myopts)
12422 for mysearch in myfiles:
12424 searchinstance.execute(mysearch)
12425 except re.error, comment:
12426 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12428 searchinstance.output()
12430 def action_depclean(settings, trees, ldpath_mtimes,
12431 myopts, action, myfiles, spinner):
12432 # Kill packages that aren't explicitly merged or are required as a
12433 # dependency of another package. World file is explicit.
12435 # Global depclean or prune operations are not very safe when there are
12436 # missing dependencies since it's unknown how badly incomplete
12437 # the dependency graph is, and we might accidentally remove packages
12438 # that should have been pulled into the graph. On the other hand, it's
12439 # relatively safe to ignore missing deps when only asked to remove
12440 # specific packages.
12441 allow_missing_deps = len(myfiles) > 0
12444 msg.append("Always study the list of packages to be cleaned for any obvious\n")
12445 msg.append("mistakes. Packages that are part of the world set will always\n")
12446 msg.append("be kept. They can be manually added to this set with\n")
12447 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
12448 msg.append("package.provided (see portage(5)) will be removed by\n")
12449 msg.append("depclean, even if they are part of the world set.\n")
12451 msg.append("As a safety measure, depclean will not remove any packages\n")
12452 msg.append("unless *all* required dependencies have been resolved. As a\n")
12453 msg.append("consequence, it is often necessary to run %s\n" % \
12454 good("`emerge --update"))
12455 msg.append(good("--newuse --deep @system @world`") + \
12456 " prior to depclean.\n")
12458 if action == "depclean" and "--quiet" not in myopts and not myfiles:
12459 portage.writemsg_stdout("\n")
12461 portage.writemsg_stdout(colorize("WARN", " * ") + x)
12463 xterm_titles = "notitles" not in settings.features
12464 myroot = settings["ROOT"]
12465 root_config = trees[myroot]["root_config"]
12466 getSetAtoms = root_config.setconfig.getSetAtoms
12467 vardb = trees[myroot]["vartree"].dbapi
12469 required_set_names = ("system", "world")
12473 for s in required_set_names:
12474 required_sets[s] = InternalPackageSet(
12475 initial_atoms=getSetAtoms(s))
12478 # When removing packages, use a temporary version of world
12479 # which excludes packages that are intended to be eligible for
12481 world_temp_set = required_sets["world"]
12482 system_set = required_sets["system"]
12484 if not system_set or not world_temp_set:
12487 writemsg_level("!!! You have no system list.\n",
12488 level=logging.ERROR, noiselevel=-1)
12490 if not world_temp_set:
12491 writemsg_level("!!! You have no world file.\n",
12492 level=logging.WARNING, noiselevel=-1)
12494 writemsg_level("!!! Proceeding is likely to " + \
12495 "break your installation.\n",
12496 level=logging.WARNING, noiselevel=-1)
12497 if "--pretend" not in myopts:
12498 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12500 if action == "depclean":
12501 emergelog(xterm_titles, " >>> depclean")
12504 args_set = InternalPackageSet()
12507 if not is_valid_package_atom(x):
12508 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12509 level=logging.ERROR, noiselevel=-1)
12510 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12513 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12514 except portage.exception.AmbiguousPackageName, e:
12515 msg = "The short ebuild name \"" + x + \
12516 "\" is ambiguous. Please specify " + \
12517 "one of the following " + \
12518 "fully-qualified ebuild names instead:"
12519 for line in textwrap.wrap(msg, 70):
12520 writemsg_level("!!! %s\n" % (line,),
12521 level=logging.ERROR, noiselevel=-1)
12523 writemsg_level(" %s\n" % colorize("INFORM", i),
12524 level=logging.ERROR, noiselevel=-1)
12525 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12528 matched_packages = False
12531 matched_packages = True
12533 if not matched_packages:
12534 writemsg_level(">>> No packages selected for removal by %s\n" % \
12538 writemsg_level("\nCalculating dependencies ")
12539 resolver_params = create_depgraph_params(myopts, "remove")
12540 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12541 vardb = resolver.trees[myroot]["vartree"].dbapi
12543 if action == "depclean":
12546 # Pull in everything that's installed but not matched
12547 # by an argument atom since we don't want to clean any
12548 # package if something depends on it.
12550 world_temp_set.clear()
12555 if args_set.findAtomForPackage(pkg) is None:
12556 world_temp_set.add("=" + pkg.cpv)
12558 except portage.exception.InvalidDependString, e:
12559 show_invalid_depstring_notice(pkg,
12560 pkg.metadata["PROVIDE"], str(e))
12562 world_temp_set.add("=" + pkg.cpv)
12565 elif action == "prune":
12567 # Pull in everything that's installed since we don't
12568 # to prune a package if something depends on it.
12569 world_temp_set.clear()
12570 world_temp_set.update(vardb.cp_all())
12574 # Try to prune everything that's slotted.
12575 for cp in vardb.cp_all():
12576 if len(vardb.cp_list(cp)) > 1:
12579 # Remove atoms from world that match installed packages
12580 # that are also matched by argument atoms, but do not remove
12581 # them if they match the highest installed version.
12584 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12585 if not pkgs_for_cp or pkg not in pkgs_for_cp:
12586 raise AssertionError("package expected in matches: " + \
12587 "cp = %s, cpv = %s matches = %s" % \
12588 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12590 highest_version = pkgs_for_cp[-1]
12591 if pkg == highest_version:
12592 # pkg is the highest version
12593 world_temp_set.add("=" + pkg.cpv)
12596 if len(pkgs_for_cp) <= 1:
12597 raise AssertionError("more packages expected: " + \
12598 "cp = %s, cpv = %s matches = %s" % \
12599 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12602 if args_set.findAtomForPackage(pkg) is None:
12603 world_temp_set.add("=" + pkg.cpv)
12605 except portage.exception.InvalidDependString, e:
12606 show_invalid_depstring_notice(pkg,
12607 pkg.metadata["PROVIDE"], str(e))
12609 world_temp_set.add("=" + pkg.cpv)
12613 for s, package_set in required_sets.iteritems():
12614 set_atom = SETPREFIX + s
12615 set_arg = SetArg(arg=set_atom, set=package_set,
12616 root_config=resolver.roots[myroot])
12617 set_args[s] = set_arg
12618 for atom in set_arg.set:
12619 resolver._dep_stack.append(
12620 Dependency(atom=atom, root=myroot, parent=set_arg))
12621 resolver.digraph.add(set_arg, None)
12623 success = resolver._complete_graph()
12624 writemsg_level("\b\b... done!\n")
12626 resolver.display_problems()
12631 def unresolved_deps():
12633 unresolvable = set()
12634 for dep in resolver._initially_unsatisfied_deps:
12635 if isinstance(dep.parent, Package) and \
12636 (dep.priority > UnmergeDepPriority.SOFT):
12637 unresolvable.add((dep.atom, dep.parent.cpv))
12639 if not unresolvable:
12642 if unresolvable and not allow_missing_deps:
12643 prefix = bad(" * ")
12645 msg.append("Dependencies could not be completely resolved due to")
12646 msg.append("the following required packages not being installed:")
12648 for atom, parent in unresolvable:
12649 msg.append(" %s pulled in by:" % (atom,))
12650 msg.append(" %s" % (parent,))
12652 msg.append("Have you forgotten to run " + \
12653 good("`emerge --update --newuse --deep world`") + " prior to")
12654 msg.append(("%s? It may be necessary to manually " + \
12655 "uninstall packages that no longer") % action)
12656 msg.append("exist in the portage tree since " + \
12657 "it may not be possible to satisfy their")
12658 msg.append("dependencies. Also, be aware of " + \
12659 "the --with-bdeps option that is documented")
12660 msg.append("in " + good("`man emerge`") + ".")
12661 if action == "prune":
12663 msg.append("If you would like to ignore " + \
12664 "dependencies then use %s." % good("--nodeps"))
12665 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
12666 level=logging.ERROR, noiselevel=-1)
12670 if unresolved_deps():
12673 graph = resolver.digraph.copy()
12674 required_pkgs_total = 0
12676 if isinstance(node, Package):
12677 required_pkgs_total += 1
12679 def show_parents(child_node):
12680 parent_nodes = graph.parent_nodes(child_node)
12681 if not parent_nodes:
12682 # With --prune, the highest version can be pulled in without any
12683 # real parent since all installed packages are pulled in. In that
12684 # case there's nothing to show here.
12687 for node in parent_nodes:
12688 parent_strs.append(str(getattr(node, "cpv", node)))
12691 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
12692 for parent_str in parent_strs:
12693 msg.append(" %s\n" % (parent_str,))
12695 portage.writemsg_stdout("".join(msg), noiselevel=-1)
12697 def create_cleanlist():
12698 pkgs_to_remove = []
12700 if action == "depclean":
12706 arg_atom = args_set.findAtomForPackage(pkg)
12707 except portage.exception.InvalidDependString:
12708 # this error has already been displayed by now
12712 if pkg not in graph:
12713 pkgs_to_remove.append(pkg)
12714 elif "--verbose" in myopts:
12719 if pkg not in graph:
12720 pkgs_to_remove.append(pkg)
12721 elif "--verbose" in myopts:
12724 elif action == "prune":
12725 # Prune really uses all installed instead of world. It's not
12726 # a real reverse dependency so don't display it as such.
12727 graph.remove(set_args["world"])
12729 for atom in args_set:
12730 for pkg in vardb.match_pkgs(atom):
12731 if pkg not in graph:
12732 pkgs_to_remove.append(pkg)
12733 elif "--verbose" in myopts:
12736 if not pkgs_to_remove:
12738 ">>> No packages selected for removal by %s\n" % action)
12739 if "--verbose" not in myopts:
12741 ">>> To see reverse dependencies, use %s\n" % \
12743 if action == "prune":
12745 ">>> To ignore dependencies, use %s\n" % \
12748 return pkgs_to_remove
12750 cleanlist = create_cleanlist()
12753 clean_set = set(cleanlist)
12755 # Check if any of these package are the sole providers of libraries
12756 # with consumers that have not been selected for removal. If so, these
12757 # packages and any dependencies need to be added to the graph.
12758 real_vardb = trees[myroot]["vartree"].dbapi
12759 linkmap = real_vardb.linkmap
12760 liblist = linkmap.listLibraryObjects()
12761 consumer_cache = {}
12762 provider_cache = {}
12766 writemsg_level(">>> Checking for lib consumers...\n")
12768 for pkg in cleanlist:
12769 pkg_dblink = real_vardb._dblink(pkg.cpv)
12770 provided_libs = set()
12772 for lib in liblist:
12773 if pkg_dblink.isowner(lib, myroot):
12774 provided_libs.add(lib)
12776 if not provided_libs:
12780 for lib in provided_libs:
12781 lib_consumers = consumer_cache.get(lib)
12782 if lib_consumers is None:
12783 lib_consumers = linkmap.findConsumers(lib)
12784 consumer_cache[lib] = lib_consumers
12786 consumers[lib] = lib_consumers
12791 for lib, lib_consumers in consumers.items():
12792 for consumer_file in list(lib_consumers):
12793 if pkg_dblink.isowner(consumer_file, myroot):
12794 lib_consumers.remove(consumer_file)
12795 if not lib_consumers:
12801 for lib, lib_consumers in consumers.iteritems():
12803 soname = soname_cache.get(lib)
12805 soname = linkmap.getSoname(lib)
12806 soname_cache[lib] = soname
12808 consumer_providers = []
12809 for lib_consumer in lib_consumers:
12810 providers = provider_cache.get(lib)
12811 if providers is None:
12812 providers = linkmap.findProviders(lib_consumer)
12813 provider_cache[lib_consumer] = providers
12814 if soname not in providers:
12815 # Why does this happen?
12817 consumer_providers.append(
12818 (lib_consumer, providers[soname]))
12820 consumers[lib] = consumer_providers
12822 consumer_map[pkg] = consumers
12826 search_files = set()
12827 for consumers in consumer_map.itervalues():
12828 for lib, consumer_providers in consumers.iteritems():
12829 for lib_consumer, providers in consumer_providers:
12830 search_files.add(lib_consumer)
12831 search_files.update(providers)
12833 writemsg_level(">>> Assigning files to packages...\n")
12834 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
12836 for pkg, consumers in consumer_map.items():
12837 for lib, consumer_providers in consumers.items():
12838 lib_consumers = set()
12840 for lib_consumer, providers in consumer_providers:
12841 owner_set = file_owners.get(lib_consumer)
12842 provider_dblinks = set()
12843 provider_pkgs = set()
12845 if len(providers) > 1:
12846 for provider in providers:
12847 provider_set = file_owners.get(provider)
12848 if provider_set is not None:
12849 provider_dblinks.update(provider_set)
12851 if len(provider_dblinks) > 1:
12852 for provider_dblink in provider_dblinks:
12853 pkg_key = ("installed", myroot,
12854 provider_dblink.mycpv, "nomerge")
12855 if pkg_key not in clean_set:
12856 provider_pkgs.add(vardb.get(pkg_key))
12861 if owner_set is not None:
12862 lib_consumers.update(owner_set)
12864 for consumer_dblink in list(lib_consumers):
12865 if ("installed", myroot, consumer_dblink.mycpv,
12866 "nomerge") in clean_set:
12867 lib_consumers.remove(consumer_dblink)
12871 consumers[lib] = lib_consumers
12875 del consumer_map[pkg]
12878 # TODO: Implement a package set for rebuilding consumer packages.
12880 msg = "In order to avoid breakage of link level " + \
12881 "dependencies, one or more packages will not be removed. " + \
12882 "This can be solved by rebuilding " + \
12883 "the packages that pulled them in."
12885 prefix = bad(" * ")
12886 from textwrap import wrap
12887 writemsg_level("".join(prefix + "%s\n" % line for \
12888 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
12891 for pkg, consumers in consumer_map.iteritems():
12892 unique_consumers = set(chain(*consumers.values()))
12893 unique_consumers = sorted(consumer.mycpv \
12894 for consumer in unique_consumers)
12896 msg.append(" %s pulled in by:" % (pkg.cpv,))
12897 for consumer in unique_consumers:
12898 msg.append(" %s" % (consumer,))
12900 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
12901 level=logging.WARNING, noiselevel=-1)
12903 # Add lib providers to the graph as children of lib consumers,
12904 # and also add any dependencies pulled in by the provider.
12905 writemsg_level(">>> Adding lib providers to graph...\n")
12907 for pkg, consumers in consumer_map.iteritems():
12908 for consumer_dblink in set(chain(*consumers.values())):
12909 consumer_pkg = vardb.get(("installed", myroot,
12910 consumer_dblink.mycpv, "nomerge"))
12911 if not resolver._add_pkg(pkg,
12912 Dependency(parent=consumer_pkg,
12913 priority=UnmergeDepPriority(runtime=True),
12915 resolver.display_problems()
12918 writemsg_level("\nCalculating dependencies ")
12919 success = resolver._complete_graph()
12920 writemsg_level("\b\b... done!\n")
12921 resolver.display_problems()
12924 if unresolved_deps():
12927 graph = resolver.digraph.copy()
12928 required_pkgs_total = 0
12930 if isinstance(node, Package):
12931 required_pkgs_total += 1
12932 cleanlist = create_cleanlist()
12935 clean_set = set(cleanlist)
12937 # Use a topological sort to create an unmerge order such that
12938 # each package is unmerged before it's dependencies. This is
12939 # necessary to avoid breaking things that may need to run
12940 # during pkg_prerm or pkg_postrm phases.
12942 # Create a new graph to account for dependencies between the
12943 # packages being unmerged.
12947 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
12948 runtime = UnmergeDepPriority(runtime=True)
12949 runtime_post = UnmergeDepPriority(runtime_post=True)
12950 buildtime = UnmergeDepPriority(buildtime=True)
12952 "RDEPEND": runtime,
12953 "PDEPEND": runtime_post,
12954 "DEPEND": buildtime,
12957 for node in clean_set:
12958 graph.add(node, None)
12960 node_use = node.metadata["USE"].split()
12961 for dep_type in dep_keys:
12962 depstr = node.metadata[dep_type]
12966 portage.dep._dep_check_strict = False
12967 success, atoms = portage.dep_check(depstr, None, settings,
12968 myuse=node_use, trees=resolver._graph_trees,
12971 portage.dep._dep_check_strict = True
12973 # Ignore invalid deps of packages that will
12974 # be uninstalled anyway.
12977 priority = priority_map[dep_type]
12979 if not isinstance(atom, portage.dep.Atom):
12980 # Ignore invalid atoms returned from dep_check().
12984 matches = vardb.match_pkgs(atom)
12987 for child_node in matches:
12988 if child_node in clean_set:
12989 graph.add(child_node, node, priority=priority)
12992 if len(graph.order) == len(graph.root_nodes()):
12993 # If there are no dependencies between packages
12994 # let unmerge() group them by cat/pn.
12996 cleanlist = [pkg.cpv for pkg in graph.order]
12998 # Order nodes from lowest to highest overall reference count for
12999 # optimal root node selection.
13000 node_refcounts = {}
13001 for node in graph.order:
13002 node_refcounts[node] = len(graph.parent_nodes(node))
13003 def cmp_reference_count(node1, node2):
13004 return node_refcounts[node1] - node_refcounts[node2]
13005 graph.order.sort(cmp_reference_count)
13007 ignore_priority_range = [None]
13008 ignore_priority_range.extend(
13009 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13010 while not graph.empty():
13011 for ignore_priority in ignore_priority_range:
13012 nodes = graph.root_nodes(ignore_priority=ignore_priority)
13016 raise AssertionError("no root nodes")
13017 if ignore_priority is not None:
13018 # Some deps have been dropped due to circular dependencies,
13019 # so only pop one node in order do minimize the number that
13024 cleanlist.append(node.cpv)
13026 unmerge(root_config, myopts, "unmerge", cleanlist,
13027 ldpath_mtimes, ordered=ordered)
13029 if action == "prune":
13032 if not cleanlist and "--quiet" in myopts:
13035 print "Packages installed: "+str(len(vardb.cpv_all()))
13036 print "Packages in world: " + \
13037 str(len(root_config.sets["world"].getAtoms()))
13038 print "Packages in system: " + \
13039 str(len(root_config.sets["system"].getAtoms()))
13040 print "Required packages: "+str(required_pkgs_total)
13041 if "--pretend" in myopts:
13042 print "Number to remove: "+str(len(cleanlist))
13044 print "Number removed: "+str(len(cleanlist))
13046 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13047 skip_masked=False, skip_unsatisfied=False):
13049 Construct a depgraph for the given resume list. This will raise
13050 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13052 @returns: (success, depgraph, dropped_tasks)
13054 mergelist = mtimedb["resume"]["mergelist"]
13055 dropped_tasks = set()
13057 mydepgraph = depgraph(settings, trees,
13058 myopts, myparams, spinner)
13060 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13061 skip_masked=skip_masked)
13062 except depgraph.UnsatisfiedResumeDep, e:
13063 if not skip_unsatisfied:
13066 graph = mydepgraph.digraph
13067 unsatisfied_parents = dict((dep.parent, dep.parent) \
13068 for dep in e.value)
13069 traversed_nodes = set()
13070 unsatisfied_stack = list(unsatisfied_parents)
13071 while unsatisfied_stack:
13072 pkg = unsatisfied_stack.pop()
13073 if pkg in traversed_nodes:
13075 traversed_nodes.add(pkg)
13077 # If this package was pulled in by a parent
13078 # package scheduled for merge, removing this
13079 # package may cause the the parent package's
13080 # dependency to become unsatisfied.
13081 for parent_node in graph.parent_nodes(pkg):
13082 if not isinstance(parent_node, Package) \
13083 or parent_node.operation not in ("merge", "nomerge"):
13086 graph.child_nodes(parent_node,
13087 ignore_priority=DepPriority.SOFT)
13088 if pkg in unsatisfied:
13089 unsatisfied_parents[parent_node] = parent_node
13090 unsatisfied_stack.append(parent_node)
13092 pruned_mergelist = [x for x in mergelist \
13093 if isinstance(x, list) and \
13094 tuple(x) not in unsatisfied_parents]
13096 # If the mergelist doesn't shrink then this loop is infinite.
13097 if len(pruned_mergelist) == len(mergelist):
13098 # This happens if a package can't be dropped because
13099 # it's already installed, but it has unsatisfied PDEPEND.
13101 mergelist[:] = pruned_mergelist
13103 # Exclude installed packages that have been removed from the graph due
13104 # to failure to build/install runtime dependencies after the dependent
13105 # package has already been installed.
13106 dropped_tasks.update(pkg for pkg in \
13107 unsatisfied_parents if pkg.operation != "nomerge")
13108 mydepgraph.break_refs(unsatisfied_parents)
13110 del e, graph, traversed_nodes, \
13111 unsatisfied_parents, unsatisfied_stack
13115 return (success, mydepgraph, dropped_tasks)
13117 def action_build(settings, trees, mtimedb,
13118 myopts, myaction, myfiles, spinner):
13120 # validate the state of the resume data
13121 # so that we can make assumptions later.
13122 for k in ("resume", "resume_backup"):
13123 if k not in mtimedb:
13125 resume_data = mtimedb[k]
13126 if not isinstance(resume_data, dict):
13129 mergelist = resume_data.get("mergelist")
13130 if not isinstance(mergelist, list):
13133 resume_opts = resume_data.get("myopts")
13134 if not isinstance(resume_opts, (dict, list)):
13137 favorites = resume_data.get("favorites")
13138 if not isinstance(favorites, list):
13143 if "--resume" in myopts and \
13144 ("resume" in mtimedb or
13145 "resume_backup" in mtimedb):
13147 if "resume" not in mtimedb:
13148 mtimedb["resume"] = mtimedb["resume_backup"]
13149 del mtimedb["resume_backup"]
13151 # "myopts" is a list for backward compatibility.
13152 resume_opts = mtimedb["resume"].get("myopts", [])
13153 if isinstance(resume_opts, list):
13154 resume_opts = dict((k,True) for k in resume_opts)
13155 for opt in ("--skipfirst", "--ask", "--tree"):
13156 resume_opts.pop(opt, None)
13157 myopts.update(resume_opts)
13159 if "--debug" in myopts:
13160 writemsg_level("myopts %s\n" % (myopts,))
13162 # Adjust config according to options of the command being resumed.
13163 for myroot in trees:
13164 mysettings = trees[myroot]["vartree"].settings
13165 mysettings.unlock()
13166 adjust_config(myopts, mysettings)
13168 del myroot, mysettings
13170 ldpath_mtimes = mtimedb["ldpath"]
13173 buildpkgonly = "--buildpkgonly" in myopts
13174 pretend = "--pretend" in myopts
13175 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13176 ask = "--ask" in myopts
13177 nodeps = "--nodeps" in myopts
13178 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13179 tree = "--tree" in myopts
13180 if nodeps and tree:
13182 del myopts["--tree"]
13183 portage.writemsg(colorize("WARN", " * ") + \
13184 "--tree is broken with --nodeps. Disabling...\n")
13185 debug = "--debug" in myopts
13186 verbose = "--verbose" in myopts
13187 quiet = "--quiet" in myopts
13188 if pretend or fetchonly:
13189 # make the mtimedb readonly
13190 mtimedb.filename = None
13191 if "--digest" in myopts:
13192 msg = "The --digest option can prevent corruption from being" + \
13193 " noticed. The `repoman manifest` command is the preferred" + \
13194 " way to generate manifests and it is capable of doing an" + \
13195 " entire repository or category at once."
13196 prefix = bad(" * ")
13197 writemsg(prefix + "\n")
13198 from textwrap import wrap
13199 for line in wrap(msg, 72):
13200 writemsg("%s%s\n" % (prefix, line))
13201 writemsg(prefix + "\n")
13203 if "--quiet" not in myopts and \
13204 ("--pretend" in myopts or "--ask" in myopts or \
13205 "--tree" in myopts or "--verbose" in myopts):
13207 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13209 elif "--buildpkgonly" in myopts:
13213 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13215 print darkgreen("These are the packages that would be %s, in reverse order:") % action
13219 print darkgreen("These are the packages that would be %s, in order:") % action
13222 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13223 if not show_spinner:
13224 spinner.update = spinner.update_quiet
13227 favorites = mtimedb["resume"].get("favorites")
13228 if not isinstance(favorites, list):
13232 print "Calculating dependencies ",
13233 myparams = create_depgraph_params(myopts, myaction)
13235 resume_data = mtimedb["resume"]
13236 mergelist = resume_data["mergelist"]
13237 if mergelist and "--skipfirst" in myopts:
13238 for i, task in enumerate(mergelist):
13239 if isinstance(task, list) and \
13240 task and task[-1] == "merge":
13244 skip_masked = "--skipfirst" in myopts
13245 skip_unsatisfied = "--skipfirst" in myopts
13249 success, mydepgraph, dropped_tasks = resume_depgraph(
13250 settings, trees, mtimedb, myopts, myparams, spinner,
13251 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13252 except (portage.exception.PackageNotFound,
13253 depgraph.UnsatisfiedResumeDep), e:
13254 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13255 mydepgraph = e.depgraph
13258 from textwrap import wrap
13259 from portage.output import EOutput
13262 resume_data = mtimedb["resume"]
13263 mergelist = resume_data.get("mergelist")
13264 if not isinstance(mergelist, list):
13266 if mergelist and debug or (verbose and not quiet):
13267 out.eerror("Invalid resume list:")
13270 for task in mergelist:
13271 if isinstance(task, list):
13272 out.eerror(indent + str(tuple(task)))
13275 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13276 out.eerror("One or more packages are either masked or " + \
13277 "have missing dependencies:")
13280 for dep in e.value:
13281 if dep.atom is None:
13282 out.eerror(indent + "Masked package:")
13283 out.eerror(2 * indent + str(dep.parent))
13286 out.eerror(indent + str(dep.atom) + " pulled in by:")
13287 out.eerror(2 * indent + str(dep.parent))
13289 msg = "The resume list contains packages " + \
13290 "that are either masked or have " + \
13291 "unsatisfied dependencies. " + \
13292 "Please restart/continue " + \
13293 "the operation manually, or use --skipfirst " + \
13294 "to skip the first package in the list and " + \
13295 "any other packages that may be " + \
13296 "masked or have missing dependencies."
13297 for line in wrap(msg, 72):
13299 elif isinstance(e, portage.exception.PackageNotFound):
13300 out.eerror("An expected package is " + \
13301 "not available: %s" % str(e))
13303 msg = "The resume list contains one or more " + \
13304 "packages that are no longer " + \
13305 "available. Please restart/continue " + \
13306 "the operation manually."
13307 for line in wrap(msg, 72):
13311 print "\b\b... done!"
13315 portage.writemsg("!!! One or more packages have been " + \
13316 "dropped due to\n" + \
13317 "!!! masking or unsatisfied dependencies:\n\n",
13319 for task in dropped_tasks:
13320 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
13321 portage.writemsg("\n", noiselevel=-1)
13324 if mydepgraph is not None:
13325 mydepgraph.display_problems()
13326 if not (ask or pretend):
13327 # delete the current list and also the backup
13328 # since it's probably stale too.
13329 for k in ("resume", "resume_backup"):
13330 mtimedb.pop(k, None)
13335 if ("--resume" in myopts):
13336 print darkgreen("emerge: It seems we have nothing to resume...")
13339 myparams = create_depgraph_params(myopts, myaction)
13340 if "--quiet" not in myopts and "--nodeps" not in myopts:
13341 print "Calculating dependencies ",
13343 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13345 retval, favorites = mydepgraph.select_files(myfiles)
13346 except portage.exception.PackageNotFound, e:
13347 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13349 except portage.exception.PackageSetNotFound, e:
13350 root_config = trees[settings["ROOT"]]["root_config"]
13351 display_missing_pkg_set(root_config, e.value)
13354 print "\b\b... done!"
13356 mydepgraph.display_problems()
13359 if "--pretend" not in myopts and \
13360 ("--ask" in myopts or "--tree" in myopts or \
13361 "--verbose" in myopts) and \
13362 not ("--quiet" in myopts and "--ask" not in myopts):
13363 if "--resume" in myopts:
13364 mymergelist = mydepgraph.altlist()
13365 if len(mymergelist) == 0:
13366 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13368 favorites = mtimedb["resume"]["favorites"]
13369 retval = mydepgraph.display(
13370 mydepgraph.altlist(reversed=tree),
13371 favorites=favorites)
13372 mydepgraph.display_problems()
13373 if retval != os.EX_OK:
13375 prompt="Would you like to resume merging these packages?"
13377 retval = mydepgraph.display(
13378 mydepgraph.altlist(reversed=("--tree" in myopts)),
13379 favorites=favorites)
13380 mydepgraph.display_problems()
13381 if retval != os.EX_OK:
13384 for x in mydepgraph.altlist():
13385 if isinstance(x, Package) and x.operation == "merge":
13389 sets = trees[settings["ROOT"]]["root_config"].sets
13390 world_candidates = None
13391 if "--noreplace" in myopts and \
13392 not oneshot and favorites:
13393 # Sets that are not world candidates are filtered
13394 # out here since the favorites list needs to be
13395 # complete for depgraph.loadResumeCommand() to
13396 # operate correctly.
13397 world_candidates = [x for x in favorites \
13398 if not (x.startswith(SETPREFIX) and \
13399 not sets[x[1:]].world_candidate)]
13400 if "--noreplace" in myopts and \
13401 not oneshot and world_candidates:
13403 for x in world_candidates:
13404 print " %s %s" % (good("*"), x)
13405 prompt="Would you like to add these packages to your world favorites?"
13406 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13407 prompt="Nothing to merge; would you like to auto-clean packages?"
13410 print "Nothing to merge; quitting."
13413 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13414 prompt="Would you like to fetch the source files for these packages?"
13416 prompt="Would you like to merge these packages?"
13418 if "--ask" in myopts and userquery(prompt) == "No":
13423 # Don't ask again (e.g. when auto-cleaning packages after merge)
13424 myopts.pop("--ask", None)
13426 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13427 if ("--resume" in myopts):
13428 mymergelist = mydepgraph.altlist()
13429 if len(mymergelist) == 0:
13430 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13432 favorites = mtimedb["resume"]["favorites"]
13433 retval = mydepgraph.display(
13434 mydepgraph.altlist(reversed=tree),
13435 favorites=favorites)
13436 mydepgraph.display_problems()
13437 if retval != os.EX_OK:
13440 retval = mydepgraph.display(
13441 mydepgraph.altlist(reversed=("--tree" in myopts)),
13442 favorites=favorites)
13443 mydepgraph.display_problems()
13444 if retval != os.EX_OK:
13446 if "--buildpkgonly" in myopts:
13447 graph_copy = mydepgraph.digraph.clone()
13448 for node in list(graph_copy.order):
13449 if not isinstance(node, Package):
13450 graph_copy.remove(node)
13451 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13452 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13453 print "!!! You have to merge the dependencies before you can build this package.\n"
13456 if "--buildpkgonly" in myopts:
13457 graph_copy = mydepgraph.digraph.clone()
13458 for node in list(graph_copy.order):
13459 if not isinstance(node, Package):
13460 graph_copy.remove(node)
13461 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13462 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13463 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13466 if ("--resume" in myopts):
13467 favorites=mtimedb["resume"]["favorites"]
13468 mymergelist = mydepgraph.altlist()
13469 mydepgraph.break_refs(mymergelist)
13470 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13471 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13472 del mydepgraph, mymergelist
13473 clear_caches(trees)
13475 retval = mergetask.merge()
13476 merge_count = mergetask.curval
13478 if "resume" in mtimedb and \
13479 "mergelist" in mtimedb["resume"] and \
13480 len(mtimedb["resume"]["mergelist"]) > 1:
13481 mtimedb["resume_backup"] = mtimedb["resume"]
13482 del mtimedb["resume"]
13484 mtimedb["resume"]={}
13485 # Stored as a dict starting with portage-2.2_rc7, and supported
13486 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13487 # a list type for options.
13488 mtimedb["resume"]["myopts"] = myopts.copy()
13490 # Convert Atom instances to plain str since the mtimedb loader
13491 # sets unpickler.find_global = None which causes unpickler.load()
13492 # to raise the following exception:
13494 # cPickle.UnpicklingError: Global and instance pickles are not supported.
13496 # TODO: Maybe stop setting find_global = None, or find some other
13497 # way to avoid accidental triggering of the above UnpicklingError.
13498 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13500 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13501 for pkgline in mydepgraph.altlist():
13502 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13503 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13504 tmpsettings = portage.config(clone=settings)
13506 if settings.get("PORTAGE_DEBUG", "") == "1":
13508 retval = portage.doebuild(
13509 y, "digest", settings["ROOT"], tmpsettings, edebug,
13510 ("--pretend" in myopts),
13511 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13514 pkglist = mydepgraph.altlist()
13515 mydepgraph.saveNomergeFavorites()
13516 mydepgraph.break_refs(pkglist)
13517 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13518 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13519 del mydepgraph, pkglist
13520 clear_caches(trees)
13522 retval = mergetask.merge()
13523 merge_count = mergetask.curval
13525 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13526 if "yes" == settings.get("AUTOCLEAN"):
13527 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
13528 unmerge(trees[settings["ROOT"]]["root_config"],
13529 myopts, "clean", [],
13530 ldpath_mtimes, autoclean=1)
13532 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13533 + " AUTOCLEAN is disabled. This can cause serious"
13534 + " problems due to overlapping packages.\n")
13535 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
13539 def multiple_actions(action1, action2):
13540 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13541 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
13544 def insert_optional_args(args):
13546 Parse optional arguments and insert a value if one has
13547 not been provided. This is done before feeding the args
13548 to the optparse parser since that parser does not support
13549 this feature natively.
13553 jobs_opts = ("-j", "--jobs")
13554 arg_stack = args[:]
13555 arg_stack.reverse()
13557 arg = arg_stack.pop()
13559 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
13560 if not (short_job_opt or arg in jobs_opts):
13561 new_args.append(arg)
13564 # Insert an empty placeholder in order to
13565 # satisfy the requirements of optparse.
13567 new_args.append("--jobs")
13570 if short_job_opt and len(arg) > 2:
13571 if arg[:2] == "-j":
13573 job_count = int(arg[2:])
13575 saved_opts = arg[2:]
13578 saved_opts = arg[1:].replace("j", "")
13580 if job_count is None and arg_stack:
13582 job_count = int(arg_stack[-1])
13586 # Discard the job count from the stack
13587 # since we're consuming it here.
13590 if job_count is None:
13591 # unlimited number of jobs
13592 new_args.append("True")
13594 new_args.append(str(job_count))
13596 if saved_opts is not None:
13597 new_args.append("-" + saved_opts)
13601 def parse_opts(tmpcmdline, silent=False):
13606 global actions, options, shortmapping
13608 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13609 argument_options = {
13611 "help":"specify the location for portage configuration files",
13615 "help":"enable or disable color output",
13617 "choices":("y", "n")
13622 "help" : "Specifies the number of packages to build " + \
13628 "--load-average": {
13630 "help" :"Specifies that no new builds should be started " + \
13631 "if there are other builds running and the load average " + \
13632 "is at least LOAD (a floating-point number).",
13638 "help":"include unnecessary build time dependencies",
13640 "choices":("y", "n")
13643 "help":"specify conditions to trigger package reinstallation",
13645 "choices":["changed-use"]
13649 from optparse import OptionParser
13650 parser = OptionParser()
13651 if parser.has_option("--help"):
13652 parser.remove_option("--help")
13654 for action_opt in actions:
13655 parser.add_option("--" + action_opt, action="store_true",
13656 dest=action_opt.replace("-", "_"), default=False)
13657 for myopt in options:
13658 parser.add_option(myopt, action="store_true",
13659 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13660 for shortopt, longopt in shortmapping.iteritems():
13661 parser.add_option("-" + shortopt, action="store_true",
13662 dest=longopt.lstrip("--").replace("-", "_"), default=False)
13663 for myalias, myopt in longopt_aliases.iteritems():
13664 parser.add_option(myalias, action="store_true",
13665 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13667 for myopt, kwargs in argument_options.iteritems():
13668 parser.add_option(myopt,
13669 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
13671 tmpcmdline = insert_optional_args(tmpcmdline)
13673 myoptions, myargs = parser.parse_args(args=tmpcmdline)
13677 if myoptions.jobs == "True":
13681 jobs = int(myoptions.jobs)
13685 if jobs is not True and \
13689 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
13690 (myoptions.jobs,), noiselevel=-1)
13692 myoptions.jobs = jobs
13694 if myoptions.load_average:
13696 load_average = float(myoptions.load_average)
13700 if load_average <= 0.0:
13701 load_average = None
13703 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
13704 (myoptions.load_average,), noiselevel=-1)
13706 myoptions.load_average = load_average
13708 for myopt in options:
13709 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
13711 myopts[myopt] = True
13713 for myopt in argument_options:
13714 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
13718 for action_opt in actions:
13719 v = getattr(myoptions, action_opt.replace("-", "_"))
13722 multiple_actions(myaction, action_opt)
13724 myaction = action_opt
13728 return myaction, myopts, myfiles
13730 def validate_ebuild_environment(trees):
13731 for myroot in trees:
13732 settings = trees[myroot]["vartree"].settings
13733 settings.validate()
13735 def clear_caches(trees):
13736 for d in trees.itervalues():
13737 d["porttree"].dbapi.melt()
13738 d["porttree"].dbapi._aux_cache.clear()
13739 d["bintree"].dbapi._aux_cache.clear()
13740 d["bintree"].dbapi._clear_cache()
13741 d["vartree"].dbapi.linkmap._clear_cache()
13742 portage.dircache.clear()
13745 def load_emerge_config(trees=None):
13747 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
13748 v = os.environ.get(envvar, None)
13749 if v and v.strip():
13751 trees = portage.create_trees(trees=trees, **kwargs)
13753 for root, root_trees in trees.iteritems():
13754 settings = root_trees["vartree"].settings
13755 setconfig = load_default_config(settings, root_trees)
13756 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
13758 settings = trees["/"]["vartree"].settings
13760 for myroot in trees:
13762 settings = trees[myroot]["vartree"].settings
13765 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
13766 mtimedb = portage.MtimeDB(mtimedbfile)
13768 return settings, trees, mtimedb
13770 def adjust_config(myopts, settings):
13771 """Make emerge specific adjustments to the config."""
13773 # To enhance usability, make some vars case insensitive by forcing them to
13775 for myvar in ("AUTOCLEAN", "NOCOLOR"):
13776 if myvar in settings:
13777 settings[myvar] = settings[myvar].lower()
13778 settings.backup_changes(myvar)
13781 # Kill noauto as it will break merges otherwise.
13782 if "noauto" in settings.features:
13783 while "noauto" in settings.features:
13784 settings.features.remove("noauto")
13785 settings["FEATURES"] = " ".join(settings.features)
13786 settings.backup_changes("FEATURES")
13790 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
13791 except ValueError, e:
13792 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13793 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
13794 settings["CLEAN_DELAY"], noiselevel=-1)
13795 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
13796 settings.backup_changes("CLEAN_DELAY")
13798 EMERGE_WARNING_DELAY = 10
13800 EMERGE_WARNING_DELAY = int(settings.get(
13801 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
13802 except ValueError, e:
13803 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13804 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
13805 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
13806 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
13807 settings.backup_changes("EMERGE_WARNING_DELAY")
13809 if "--quiet" in myopts:
13810 settings["PORTAGE_QUIET"]="1"
13811 settings.backup_changes("PORTAGE_QUIET")
13813 if "--verbose" in myopts:
13814 settings["PORTAGE_VERBOSE"] = "1"
13815 settings.backup_changes("PORTAGE_VERBOSE")
13817 # Set so that configs will be merged regardless of remembered status
13818 if ("--noconfmem" in myopts):
13819 settings["NOCONFMEM"]="1"
13820 settings.backup_changes("NOCONFMEM")
13822 # Set various debug markers... They should be merged somehow.
13825 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
13826 if PORTAGE_DEBUG not in (0, 1):
13827 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
13828 PORTAGE_DEBUG, noiselevel=-1)
13829 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
13832 except ValueError, e:
13833 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13834 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
13835 settings["PORTAGE_DEBUG"], noiselevel=-1)
13837 if "--debug" in myopts:
13839 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
13840 settings.backup_changes("PORTAGE_DEBUG")
13842 if settings.get("NOCOLOR") not in ("yes","true"):
13843 portage.output.havecolor = 1
13845 """The explicit --color < y | n > option overrides the NOCOLOR environment
13846 variable and stdout auto-detection."""
13847 if "--color" in myopts:
13848 if "y" == myopts["--color"]:
13849 portage.output.havecolor = 1
13850 settings["NOCOLOR"] = "false"
13852 portage.output.havecolor = 0
13853 settings["NOCOLOR"] = "true"
13854 settings.backup_changes("NOCOLOR")
13855 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
13856 portage.output.havecolor = 0
13857 settings["NOCOLOR"] = "true"
13858 settings.backup_changes("NOCOLOR")
13860 def apply_priorities(settings):
13864 def nice(settings):
13866 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
13867 except (OSError, ValueError), e:
13868 out = portage.output.EOutput()
13869 out.eerror("Failed to change nice value to '%s'" % \
13870 settings["PORTAGE_NICENESS"])
13871 out.eerror("%s\n" % str(e))
13873 def ionice(settings):
13875 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
13877 ionice_cmd = shlex.split(ionice_cmd)
13881 from portage.util import varexpand
13882 variables = {"PID" : str(os.getpid())}
13883 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
13886 rval = portage.process.spawn(cmd, env=os.environ)
13887 except portage.exception.CommandNotFound:
13888 # The OS kernel probably doesn't support ionice,
13889 # so return silently.
13892 if rval != os.EX_OK:
13893 out = portage.output.EOutput()
13894 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
13895 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
13897 def display_missing_pkg_set(root_config, set_name):
13900 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
13901 "The following sets exist:") % \
13902 colorize("INFORM", set_name))
13905 for s in sorted(root_config.sets):
13906 msg.append(" %s" % s)
13909 writemsg_level("".join("%s\n" % l for l in msg),
13910 level=logging.ERROR, noiselevel=-1)
13912 def expand_set_arguments(myfiles, myaction, root_config):
13914 setconfig = root_config.setconfig
13916 sets = setconfig.getSets()
13918 # In order to know exactly which atoms/sets should be added to the
13919 # world file, the depgraph performs set expansion later. It will get
13920 # confused about where the atoms came from if it's not allowed to
13921 # expand them itself.
13922 do_not_expand = (None, )
13925 if a in ("system", "world"):
13926 newargs.append(SETPREFIX+a)
13933 # separators for set arguments
13937 # WARNING: all operators must be of equal length
13939 DIFF_OPERATOR = "-@"
13940 UNION_OPERATOR = "+@"
13942 for i in range(0, len(myfiles)):
13943 if myfiles[i].startswith(SETPREFIX):
13946 x = myfiles[i][len(SETPREFIX):]
13949 start = x.find(ARG_START)
13950 end = x.find(ARG_END)
13951 if start > 0 and start < end:
13952 namepart = x[:start]
13953 argpart = x[start+1:end]
13955 # TODO: implement proper quoting
13956 args = argpart.split(",")
13960 k, v = a.split("=", 1)
13963 options[a] = "True"
13964 setconfig.update(namepart, options)
13965 newset += (x[:start-len(namepart)]+namepart)
13966 x = x[end+len(ARG_END):]
13970 myfiles[i] = SETPREFIX+newset
13972 sets = setconfig.getSets()
13974 # display errors that occured while loading the SetConfig instance
13975 for e in setconfig.errors:
13976 print colorize("BAD", "Error during set creation: %s" % e)
13978 # emerge relies on the existance of sets with names "world" and "system"
13979 required_sets = ("world", "system")
13981 for s in required_sets:
13983 msg = ["emerge: incomplete set configuration, " + \
13984 "no \"%s\" set defined" % s]
13985 msg.append(" sets defined: %s" % ", ".join(sets))
13987 sys.stderr.write(line + "\n")
13989 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
13992 if a.startswith(SETPREFIX):
13993 # support simple set operations (intersection, difference and union)
13994 # on the commandline. Expressions are evaluated strictly left-to-right
13995 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
13996 expression = a[len(SETPREFIX):]
13999 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14000 is_pos = expression.rfind(IS_OPERATOR)
14001 diff_pos = expression.rfind(DIFF_OPERATOR)
14002 union_pos = expression.rfind(UNION_OPERATOR)
14003 op_pos = max(is_pos, diff_pos, union_pos)
14004 s1 = expression[:op_pos]
14005 s2 = expression[op_pos+len(IS_OPERATOR):]
14006 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14008 display_missing_pkg_set(root_config, s2)
14010 expr_sets.insert(0, s2)
14011 expr_ops.insert(0, op)
14013 if not expression in sets:
14014 display_missing_pkg_set(root_config, expression)
14016 expr_sets.insert(0, expression)
14017 result = set(setconfig.getSetAtoms(expression))
14018 for i in range(0, len(expr_ops)):
14019 s2 = setconfig.getSetAtoms(expr_sets[i+1])
14020 if expr_ops[i] == IS_OPERATOR:
14021 result.intersection_update(s2)
14022 elif expr_ops[i] == DIFF_OPERATOR:
14023 result.difference_update(s2)
14024 elif expr_ops[i] == UNION_OPERATOR:
14027 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14028 newargs.extend(result)
14030 s = a[len(SETPREFIX):]
14032 display_missing_pkg_set(root_config, s)
14034 setconfig.active.append(s)
14036 set_atoms = setconfig.getSetAtoms(s)
14037 except portage.exception.PackageSetNotFound, e:
14038 writemsg_level(("emerge: the given set '%s' " + \
14039 "contains a non-existent set named '%s'.\n") % \
14040 (s, e), level=logging.ERROR, noiselevel=-1)
14042 if myaction in unmerge_actions and \
14043 not sets[s].supportsOperation("unmerge"):
14044 sys.stderr.write("emerge: the given set '%s' does " % s + \
14045 "not support unmerge operations\n")
14047 elif not set_atoms:
14048 print "emerge: '%s' is an empty set" % s
14049 elif myaction not in do_not_expand:
14050 newargs.extend(set_atoms)
14052 newargs.append(SETPREFIX+s)
14053 for e in sets[s].errors:
14057 return (newargs, retval)
14059 def repo_name_check(trees):
14060 missing_repo_names = set()
14061 for root, root_trees in trees.iteritems():
14062 if "porttree" in root_trees:
14063 portdb = root_trees["porttree"].dbapi
14064 missing_repo_names.update(portdb.porttrees)
14065 repos = portdb.getRepositories()
14067 missing_repo_names.discard(portdb.getRepositoryPath(r))
14069 if missing_repo_names:
14071 msg.append("WARNING: One or more repositories " + \
14072 "have missing repo_name entries:")
14074 for p in missing_repo_names:
14075 msg.append("\t%s/profiles/repo_name" % (p,))
14077 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14078 "should be a plain text file containing a unique " + \
14079 "name for the repository on the first line.", 70))
14080 writemsg_level("".join("%s\n" % l for l in msg),
14081 level=logging.WARNING, noiselevel=-1)
14083 return bool(missing_repo_names)
14085 def config_protect_check(trees):
14086 for root, root_trees in trees.iteritems():
14087 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14088 msg = "!!! CONFIG_PROTECT is empty"
14090 msg += " for '%s'" % root
14091 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14093 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14095 if "--quiet" in myopts:
14096 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14097 print "!!! one of the following fully-qualified ebuild names instead:\n"
14098 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14099 print " " + colorize("INFORM", cp)
14102 s = search(root_config, spinner, "--searchdesc" in myopts,
14103 "--quiet" not in myopts, "--usepkg" in myopts,
14104 "--usepkgonly" in myopts)
14105 null_cp = portage.dep_getkey(insert_category_into_atom(
14107 cat, atom_pn = portage.catsplit(null_cp)
14108 s.searchkey = atom_pn
14109 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14112 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14113 print "!!! one of the above fully-qualified ebuild names instead.\n"
14115 def profile_check(trees, myaction, myopts):
14116 if myaction in ("info", "sync"):
14118 elif "--version" in myopts or "--help" in myopts:
14120 for root, root_trees in trees.iteritems():
14121 if root_trees["root_config"].settings.profiles:
14123 # generate some profile related warning messages
14124 validate_ebuild_environment(trees)
14125 msg = "If you have just changed your profile configuration, you " + \
14126 "should revert back to the previous configuration. Due to " + \
14127 "your current profile being invalid, allowed actions are " + \
14128 "limited to --help, --info, --sync, and --version."
14129 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14130 level=logging.ERROR, noiselevel=-1)
14135 global portage # NFC why this is necessary now - genone
14136 portage._disable_legacy_globals()
14137 # Disable color until we're sure that it should be enabled (after
14138 # EMERGE_DEFAULT_OPTS has been parsed).
14139 portage.output.havecolor = 0
14140 # This first pass is just for options that need to be known as early as
14141 # possible, such as --config-root. They will be parsed again later,
14142 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14143 # the value of --config-root).
14144 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14145 if "--debug" in myopts:
14146 os.environ["PORTAGE_DEBUG"] = "1"
14147 if "--config-root" in myopts:
14148 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14150 # Portage needs to ensure a sane umask for the files it creates.
14152 settings, trees, mtimedb = load_emerge_config()
14153 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14154 rval = profile_check(trees, myaction, myopts)
14155 if rval != os.EX_OK:
14158 if portage._global_updates(trees, mtimedb["updates"]):
14160 # Reload the whole config from scratch.
14161 settings, trees, mtimedb = load_emerge_config(trees=trees)
14162 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14164 xterm_titles = "notitles" not in settings.features
14167 if "--ignore-default-opts" not in myopts:
14168 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14169 tmpcmdline.extend(sys.argv[1:])
14170 myaction, myopts, myfiles = parse_opts(tmpcmdline)
14172 if "--digest" in myopts:
14173 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14174 # Reload the whole config from scratch so that the portdbapi internal
14175 # config is updated with new FEATURES.
14176 settings, trees, mtimedb = load_emerge_config(trees=trees)
14177 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14179 for myroot in trees:
14180 mysettings = trees[myroot]["vartree"].settings
14181 mysettings.unlock()
14182 adjust_config(myopts, mysettings)
14183 mysettings["PORTAGE_COUNTER_HASH"] = \
14184 trees[myroot]["vartree"].dbapi._counter_hash()
14185 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14187 del myroot, mysettings
14189 apply_priorities(settings)
14191 spinner = stdout_spinner()
14192 if "candy" in settings.features:
14193 spinner.update = spinner.update_scroll
14195 if "--quiet" not in myopts:
14196 portage.deprecated_profile_check()
14197 repo_name_check(trees)
14198 config_protect_check(trees)
14200 eclasses_overridden = {}
14201 for mytrees in trees.itervalues():
14202 mydb = mytrees["porttree"].dbapi
14203 # Freeze the portdbapi for performance (memoize all xmatch results).
14205 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14208 if eclasses_overridden and \
14209 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14210 prefix = bad(" * ")
14211 if len(eclasses_overridden) == 1:
14212 writemsg(prefix + "Overlay eclass overrides " + \
14213 "eclass from PORTDIR:\n", noiselevel=-1)
14215 writemsg(prefix + "Overlay eclasses override " + \
14216 "eclasses from PORTDIR:\n", noiselevel=-1)
14217 writemsg(prefix + "\n", noiselevel=-1)
14218 for eclass_name in sorted(eclasses_overridden):
14219 writemsg(prefix + " '%s/%s.eclass'\n" % \
14220 (eclasses_overridden[eclass_name], eclass_name),
14222 writemsg(prefix + "\n", noiselevel=-1)
14223 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14224 "because it will trigger invalidation of cached ebuild metadata " + \
14225 "that is distributed with the portage tree. If you must " + \
14226 "override eclasses from PORTDIR then you are advised to add " + \
14227 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14228 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14229 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14230 "you would like to disable this warning."
14231 from textwrap import wrap
14232 for line in wrap(msg, 72):
14233 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14235 if "moo" in myfiles:
14238 Larry loves Gentoo (""" + platform.system() + """)
14240 _______________________
14241 < Have you mooed today? >
14242 -----------------------
14252 ext = os.path.splitext(x)[1]
14253 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14254 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14257 root_config = trees[settings["ROOT"]]["root_config"]
14258 if myaction == "list-sets":
14259 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14263 # only expand sets for actions taking package arguments
14264 oldargs = myfiles[:]
14265 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14266 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14267 if retval != os.EX_OK:
14270 # Need to handle empty sets specially, otherwise emerge will react
14271 # with the help message for empty argument lists
14272 if oldargs and not myfiles:
14273 print "emerge: no targets left after set expansion"
14276 if ("--tree" in myopts) and ("--columns" in myopts):
14277 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14280 if ("--quiet" in myopts):
14281 spinner.update = spinner.update_quiet
14282 portage.util.noiselimit = -1
14284 # Always create packages if FEATURES=buildpkg
14285 # Imply --buildpkg if --buildpkgonly
14286 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14287 if "--buildpkg" not in myopts:
14288 myopts["--buildpkg"] = True
14290 # Also allow -S to invoke search action (-sS)
14291 if ("--searchdesc" in myopts):
14292 if myaction and myaction != "search":
14293 myfiles.append(myaction)
14294 if "--search" not in myopts:
14295 myopts["--search"] = True
14296 myaction = "search"
14298 # Always try and fetch binary packages if FEATURES=getbinpkg
14299 if ("getbinpkg" in settings.features):
14300 myopts["--getbinpkg"] = True
14302 if "--buildpkgonly" in myopts:
14303 # --buildpkgonly will not merge anything, so
14304 # it cancels all binary package options.
14305 for opt in ("--getbinpkg", "--getbinpkgonly",
14306 "--usepkg", "--usepkgonly"):
14307 myopts.pop(opt, None)
14309 if "--fetch-all-uri" in myopts:
14310 myopts["--fetchonly"] = True
14312 if "--skipfirst" in myopts and "--resume" not in myopts:
14313 myopts["--resume"] = True
14315 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14316 myopts["--usepkgonly"] = True
14318 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14319 myopts["--getbinpkg"] = True
14321 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14322 myopts["--usepkg"] = True
14324 # Also allow -K to apply --usepkg/-k
14325 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14326 myopts["--usepkg"] = True
14328 # Allow -p to remove --ask
14329 if ("--pretend" in myopts) and ("--ask" in myopts):
14330 print ">>> --pretend disables --ask... removing --ask from options."
14331 del myopts["--ask"]
14333 # forbid --ask when not in a terminal
14334 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14335 if ("--ask" in myopts) and (not sys.stdin.isatty()):
14336 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14340 if settings.get("PORTAGE_DEBUG", "") == "1":
14341 spinner.update = spinner.update_quiet
14343 if "python-trace" in settings.features:
14344 import portage.debug
14345 portage.debug.set_trace(True)
14347 if not ("--quiet" in myopts):
14348 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14349 spinner.update = spinner.update_basic
14351 if "--version" in myopts:
14352 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14353 settings.profile_path, settings["CHOST"],
14354 trees[settings["ROOT"]]["vartree"].dbapi)
14356 elif "--help" in myopts:
14357 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14360 if "--debug" in myopts:
14361 print "myaction", myaction
14362 print "myopts", myopts
14364 if not myaction and not myfiles and "--resume" not in myopts:
14365 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14368 pretend = "--pretend" in myopts
14369 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14370 buildpkgonly = "--buildpkgonly" in myopts
14372 # check if root user is the current user for the actions where emerge needs this
14373 if portage.secpass < 2:
14374 # We've already allowed "--version" and "--help" above.
14375 if "--pretend" not in myopts and myaction not in ("search","info"):
14376 need_superuser = not \
14378 (buildpkgonly and secpass >= 1) or \
14379 myaction in ("metadata", "regen") or \
14380 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
14381 if portage.secpass < 1 or \
14384 access_desc = "superuser"
14386 access_desc = "portage group"
14387 # Always show portage_group_warning() when only portage group
14388 # access is required but the user is not in the portage group.
14389 from portage.data import portage_group_warning
14390 if "--ask" in myopts:
14391 myopts["--pretend"] = True
14392 del myopts["--ask"]
14393 print ("%s access is required... " + \
14394 "adding --pretend to options.\n") % access_desc
14395 if portage.secpass < 1 and not need_superuser:
14396 portage_group_warning()
14398 sys.stderr.write(("emerge: %s access is " + \
14399 "required.\n\n") % access_desc)
14400 if portage.secpass < 1 and not need_superuser:
14401 portage_group_warning()
14404 disable_emergelog = False
14405 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
14407 disable_emergelog = True
14409 if myaction in ("search", "info"):
14410 disable_emergelog = True
14411 if disable_emergelog:
14412 """ Disable emergelog for everything except build or unmerge
14413 operations. This helps minimize parallel emerge.log entries that can
14414 confuse log parsers. We especially want it disabled during
14415 parallel-fetch, which uses --resume --fetchonly."""
14417 def emergelog(*pargs, **kargs):
14420 if not "--pretend" in myopts:
14421 emergelog(xterm_titles, "Started emerge on: "+\
14422 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
14425 myelogstr=" ".join(myopts)
14427 myelogstr+=" "+myaction
14429 myelogstr += " " + " ".join(oldargs)
14430 emergelog(xterm_titles, " *** emerge " + myelogstr)
14433 def emergeexitsig(signum, frame):
14434 signal.signal(signal.SIGINT, signal.SIG_IGN)
14435 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14436 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
14437 sys.exit(100+signum)
14438 signal.signal(signal.SIGINT, emergeexitsig)
14439 signal.signal(signal.SIGTERM, emergeexitsig)
14442 """This gets out final log message in before we quit."""
14443 if "--pretend" not in myopts:
14444 emergelog(xterm_titles, " *** terminating.")
14445 if "notitles" not in settings.features:
14447 portage.atexit_register(emergeexit)
14449 if myaction in ("config", "metadata", "regen", "sync"):
14450 if "--pretend" in myopts:
14451 sys.stderr.write(("emerge: The '%s' action does " + \
14452 "not support '--pretend'.\n") % myaction)
14455 if "sync" == myaction:
14456 return action_sync(settings, trees, mtimedb, myopts, myaction)
14457 elif "metadata" == myaction:
14458 action_metadata(settings, portdb, myopts)
14459 elif myaction=="regen":
14460 validate_ebuild_environment(trees)
14461 action_regen(settings, portdb, myopts.get("--jobs"),
14462 myopts.get("--load-average"))
14464 elif "config"==myaction:
14465 validate_ebuild_environment(trees)
14466 action_config(settings, trees, myopts, myfiles)
14469 elif "search"==myaction:
14470 validate_ebuild_environment(trees)
14471 action_search(trees[settings["ROOT"]]["root_config"],
14472 myopts, myfiles, spinner)
14473 elif myaction in ("clean", "unmerge") or \
14474 (myaction == "prune" and "--nodeps" in myopts):
14475 validate_ebuild_environment(trees)
14477 # Ensure atoms are valid before calling unmerge().
14478 # For backward compat, leading '=' is not required.
14480 if is_valid_package_atom(x) or \
14481 is_valid_package_atom("=" + x):
14484 msg.append("'%s' is not a valid package atom." % (x,))
14485 msg.append("Please check ebuild(5) for full details.")
14486 writemsg_level("".join("!!! %s\n" % line for line in msg),
14487 level=logging.ERROR, noiselevel=-1)
14490 # When given a list of atoms, unmerge
14491 # them in the order given.
14492 ordered = myaction == "unmerge"
14493 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14494 mtimedb["ldpath"], ordered=ordered):
14495 if not (buildpkgonly or fetchonly or pretend):
14496 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14498 elif myaction in ("depclean", "info", "prune"):
14500 # Ensure atoms are valid before calling unmerge().
14501 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14504 if is_valid_package_atom(x):
14506 valid_atoms.append(
14507 portage.dep_expand(x, mydb=vardb, settings=settings))
14508 except portage.exception.AmbiguousPackageName, e:
14509 msg = "The short ebuild name \"" + x + \
14510 "\" is ambiguous. Please specify " + \
14511 "one of the following " + \
14512 "fully-qualified ebuild names instead:"
14513 for line in textwrap.wrap(msg, 70):
14514 writemsg_level("!!! %s\n" % (line,),
14515 level=logging.ERROR, noiselevel=-1)
14517 writemsg_level(" %s\n" % colorize("INFORM", i),
14518 level=logging.ERROR, noiselevel=-1)
14519 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14523 msg.append("'%s' is not a valid package atom." % (x,))
14524 msg.append("Please check ebuild(5) for full details.")
14525 writemsg_level("".join("!!! %s\n" % line for line in msg),
14526 level=logging.ERROR, noiselevel=-1)
14529 if myaction == "info":
14530 return action_info(settings, trees, myopts, valid_atoms)
14532 validate_ebuild_environment(trees)
14533 action_depclean(settings, trees, mtimedb["ldpath"],
14534 myopts, myaction, valid_atoms, spinner)
14535 if not (buildpkgonly or fetchonly or pretend):
14536 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14537 # "update", "system", or just process files:
14539 validate_ebuild_environment(trees)
14540 if "--pretend" not in myopts:
14541 display_news_notification(root_config, myopts)
14542 retval = action_build(settings, trees, mtimedb,
14543 myopts, myaction, myfiles, spinner)
14544 root_config = trees[settings["ROOT"]]["root_config"]
14545 post_emerge(root_config, myopts, mtimedb, retval)