2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 # This block ensures that ^C interrupts are handled quietly.
11 def exithandler(signum,frame):
12 signal.signal(signal.SIGINT, signal.SIG_IGN)
13 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16 signal.signal(signal.SIGINT, exithandler)
17 signal.signal(signal.SIGTERM, exithandler)
18 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
20 except KeyboardInterrupt:
24 from collections import deque
41 from os import path as osp
42 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
45 from portage import digraph
46 from portage.const import NEWS_LIB_PATH
49 import portage.xpak, commands, errno, re, socket, time, types
50 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
51 nc_len, red, teal, turquoise, xtermTitle, \
52 xtermTitleReset, yellow
53 from portage.output import create_color_func
54 good = create_color_func("GOOD")
55 bad = create_color_func("BAD")
56 # white looks bad on terminals with white background
57 from portage.output import bold as white
61 portage.dep._dep_check_strict = True
64 import portage.exception
65 from portage.data import secpass
66 from portage.elog.messages import eerror
67 from portage.util import normalize_path as normpath
68 from portage.util import writemsg, writemsg_level
69 from portage.sets import load_default_config, SETPREFIX
70 from portage.sets.base import InternalPackageSet
72 from itertools import chain, izip
73 from UserDict import DictMixin
76 import cPickle as pickle
81 import cStringIO as StringIO
85 class stdout_spinner(object):
87 "Gentoo Rocks ("+platform.system()+")",
88 "Thank you for using Gentoo. :)",
89 "Are you actually trying to read this?",
90 "How many times have you stared at this?",
91 "We are generating the cache right now",
92 "You are paying too much attention.",
93 "A theory is better than its explanation.",
94 "Phasers locked on target, Captain.",
95 "Thrashing is just virtual crashing.",
96 "To be is to program.",
97 "Real Users hate Real Programmers.",
98 "When all else fails, read the instructions.",
99 "Functionality breeds Contempt.",
100 "The future lies ahead.",
101 "3.1415926535897932384626433832795028841971694",
102 "Sometimes insanity is the only alternative.",
103 "Inaccuracy saves a world of explanation.",
106 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
110 self.update = self.update_twirl
111 self.scroll_sequence = self.scroll_msgs[
112 int(time.time() * 100) % len(self.scroll_msgs)]
114 self.min_display_latency = 0.05
116 def _return_early(self):
118 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
119 each update* method should return without doing any output when this
122 cur_time = time.time()
123 if cur_time - self.last_update < self.min_display_latency:
125 self.last_update = cur_time
128 def update_basic(self):
129 self.spinpos = (self.spinpos + 1) % 500
130 if self._return_early():
132 if (self.spinpos % 100) == 0:
133 if self.spinpos == 0:
134 sys.stdout.write(". ")
136 sys.stdout.write(".")
139 def update_scroll(self):
140 if self._return_early():
142 if(self.spinpos >= len(self.scroll_sequence)):
143 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
144 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
146 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
148 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
150 def update_twirl(self):
151 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
152 if self._return_early():
154 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
157 def update_quiet(self):
160 def userquery(prompt, responses=None, colours=None):
161 """Displays a prompt and a set of responses, then waits for a response
162 which is checked against the responses and the first to match is
163 returned. An empty response will match the first value in responses. The
164 input buffer is *not* cleared prior to the prompt!
167 responses: a List of Strings.
168 colours: a List of Functions taking and returning a String, used to
169 process the responses for display. Typically these will be functions
170 like red() but could be e.g. lambda x: "DisplayString".
171 If responses is omitted, defaults to ["Yes", "No"], [green, red].
172 If only colours is omitted, defaults to [bold, ...].
174 Returns a member of the List responses. (If called without optional
175 arguments, returns "Yes" or "No".)
176 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
178 if responses is None:
179 responses = ["Yes", "No"]
181 create_color_func("PROMPT_CHOICE_DEFAULT"),
182 create_color_func("PROMPT_CHOICE_OTHER")
184 elif colours is None:
186 colours=(colours*len(responses))[:len(responses)]
190 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
191 for key in responses:
192 # An empty response will match the first value in responses.
193 if response.upper()==key[:len(response)].upper():
195 print "Sorry, response '%s' not understood." % response,
196 except (EOFError, KeyboardInterrupt):
200 actions = frozenset([
201 "clean", "config", "depclean",
202 "info", "list-sets", "metadata",
203 "prune", "regen", "search",
207 "--ask", "--alphabetical",
208 "--buildpkg", "--buildpkgonly",
209 "--changelog", "--columns",
214 "--fetchonly", "--fetch-all-uri",
215 "--getbinpkg", "--getbinpkgonly",
216 "--help", "--ignore-default-opts",
219 "--newuse", "--nocolor",
220 "--nodeps", "--noreplace",
221 "--nospinner", "--oneshot",
222 "--onlydeps", "--pretend",
223 "--quiet", "--resume",
224 "--searchdesc", "--selective",
228 "--usepkg", "--usepkgonly",
229 "--verbose", "--version"
235 "b":"--buildpkg", "B":"--buildpkgonly",
236 "c":"--clean", "C":"--unmerge",
237 "d":"--debug", "D":"--deep",
239 "f":"--fetchonly", "F":"--fetch-all-uri",
240 "g":"--getbinpkg", "G":"--getbinpkgonly",
242 "k":"--usepkg", "K":"--usepkgonly",
244 "n":"--noreplace", "N":"--newuse",
245 "o":"--onlydeps", "O":"--nodeps",
246 "p":"--pretend", "P":"--prune",
248 "s":"--search", "S":"--searchdesc",
251 "v":"--verbose", "V":"--version"
254 def emergelog(xterm_titles, mystr, short_msg=None):
255 if xterm_titles and short_msg:
256 if "HOSTNAME" in os.environ:
257 short_msg = os.environ["HOSTNAME"]+": "+short_msg
258 xtermTitle(short_msg)
260 file_path = "/var/log/emerge.log"
261 mylogfile = open(file_path, "a")
262 portage.util.apply_secpass_permissions(file_path,
263 uid=portage.portage_uid, gid=portage.portage_gid,
267 mylock = portage.locks.lockfile(mylogfile)
268 # seek because we may have gotten held up by the lock.
269 # if so, we may not be positioned at the end of the file.
271 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
275 portage.locks.unlockfile(mylock)
277 except (IOError,OSError,portage.exception.PortageException), e:
279 print >> sys.stderr, "emergelog():",e
281 def countdown(secs=5, doing="Starting"):
283 print ">>> Waiting",secs,"seconds before starting..."
284 print ">>> (Control-C to abort)...\n"+doing+" in: ",
288 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
293 # formats a size given in bytes nicely
294 def format_size(mysize):
295 if type(mysize) not in [types.IntType,types.LongType]:
297 if 0 != mysize % 1024:
298 # Always round up to the next kB so that it doesn't show 0 kB when
299 # some small file still needs to be fetched.
300 mysize += 1024 - mysize % 1024
301 mystr=str(mysize/1024)
305 mystr=mystr[:mycount]+","+mystr[mycount:]
309 def getgccversion(chost):
312 return: the current in-use gcc version
315 gcc_ver_command = 'gcc -dumpversion'
316 gcc_ver_prefix = 'gcc-'
318 gcc_not_found_error = red(
319 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
320 "!!! to update the environment of this terminal and possibly\n" +
321 "!!! other terminals also.\n"
324 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
325 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
326 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
328 mystatus, myoutput = commands.getstatusoutput(
329 chost + "-" + gcc_ver_command)
330 if mystatus == os.EX_OK:
331 return gcc_ver_prefix + myoutput
333 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
334 if mystatus == os.EX_OK:
335 return gcc_ver_prefix + myoutput
337 portage.writemsg(gcc_not_found_error, noiselevel=-1)
338 return "[unavailable]"
340 def getportageversion(portdir, target_root, profile, chost, vardb):
341 profilever = "unavailable"
343 realpath = os.path.realpath(profile)
344 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
345 if realpath.startswith(basepath):
346 profilever = realpath[1 + len(basepath):]
349 profilever = "!" + os.readlink(profile)
352 del realpath, basepath
355 libclist = vardb.match("virtual/libc")
356 libclist += vardb.match("virtual/glibc")
357 libclist = portage.util.unique_array(libclist)
359 xs=portage.catpkgsplit(x)
361 libcver+=","+"-".join(xs[1:])
363 libcver="-".join(xs[1:])
365 libcver="unavailable"
367 gccver = getgccversion(chost)
368 unameout=platform.release()+" "+platform.machine()
370 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
372 def create_depgraph_params(myopts, myaction):
373 #configure emerge engine parameters
375 # self: include _this_ package regardless of if it is merged.
376 # selective: exclude the package if it is merged
377 # recurse: go into the dependencies
378 # deep: go into the dependencies of already merged packages
379 # empty: pretend nothing is merged
380 # complete: completely account for all known dependencies
381 # remove: build graph for use in removing packages
382 myparams = set(["recurse"])
384 if myaction == "remove":
385 myparams.add("remove")
386 myparams.add("complete")
389 if "--update" in myopts or \
390 "--newuse" in myopts or \
391 "--reinstall" in myopts or \
392 "--noreplace" in myopts:
393 myparams.add("selective")
394 if "--emptytree" in myopts:
395 myparams.add("empty")
396 myparams.discard("selective")
397 if "--nodeps" in myopts:
398 myparams.discard("recurse")
399 if "--deep" in myopts:
401 if "--complete-graph" in myopts:
402 myparams.add("complete")
405 # search functionality
406 class search(object):
417 def __init__(self, root_config, spinner, searchdesc,
418 verbose, usepkg, usepkgonly):
419 """Searches the available and installed packages for the supplied search key.
420 The list of available and installed packages is created at object instantiation.
421 This makes successive searches faster."""
422 self.settings = root_config.settings
423 self.vartree = root_config.trees["vartree"]
424 self.spinner = spinner
425 self.verbose = verbose
426 self.searchdesc = searchdesc
427 self.root_config = root_config
428 self.setconfig = root_config.setconfig
429 self.matches = {"pkg" : []}
434 self.portdb = fake_portdb
435 for attrib in ("aux_get", "cp_all",
436 "xmatch", "findname", "getFetchMap"):
437 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
441 portdb = root_config.trees["porttree"].dbapi
442 bindb = root_config.trees["bintree"].dbapi
443 vardb = root_config.trees["vartree"].dbapi
445 if not usepkgonly and portdb._have_root_eclass_dir:
446 self._dbs.append(portdb)
448 if (usepkg or usepkgonly) and bindb.cp_all():
449 self._dbs.append(bindb)
451 self._dbs.append(vardb)
452 self._portdb = portdb
457 cp_all.update(db.cp_all())
458 return list(sorted(cp_all))
460 def _aux_get(self, *args, **kwargs):
463 return db.aux_get(*args, **kwargs)
468 def _findname(self, *args, **kwargs):
470 if db is not self._portdb:
471 # We don't want findname to return anything
472 # unless it's an ebuild in a portage tree.
473 # Otherwise, it's already built and we don't
476 func = getattr(db, "findname", None)
478 value = func(*args, **kwargs)
483 def _getFetchMap(self, *args, **kwargs):
485 func = getattr(db, "getFetchMap", None)
487 value = func(*args, **kwargs)
492 def _visible(self, db, cpv, metadata):
493 installed = db is self.vartree.dbapi
494 built = installed or db is not self._portdb
497 pkg_type = "installed"
500 return visible(self.settings,
501 Package(type_name=pkg_type, root_config=self.root_config,
502 cpv=cpv, built=built, installed=installed, metadata=metadata))
504 def _xmatch(self, level, atom):
506 This method does not expand old-style virtuals because it
507 is restricted to returning matches for a single ${CATEGORY}/${PN}
508 and old-style virual matches unreliable for that when querying
509 multiple package databases. If necessary, old-style virtuals
510 can be performed on atoms prior to calling this method.
512 cp = portage.dep_getkey(atom)
513 if level == "match-all":
516 if hasattr(db, "xmatch"):
517 matches.update(db.xmatch(level, atom))
519 matches.update(db.match(atom))
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "match-visible":
525 if hasattr(db, "xmatch"):
526 matches.update(db.xmatch(level, atom))
528 db_keys = list(db._aux_cache_keys)
529 for cpv in db.match(atom):
530 metadata = izip(db_keys,
531 db.aux_get(cpv, db_keys))
532 if not self._visible(db, cpv, metadata):
535 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
536 db._cpv_sort_ascending(result)
537 elif level == "bestmatch-visible":
540 if hasattr(db, "xmatch"):
541 cpv = db.xmatch("bestmatch-visible", atom)
542 if not cpv or portage.cpv_getkey(cpv) != cp:
544 if not result or cpv == portage.best([cpv, result]):
547 db_keys = Package.metadata_keys
548 # break out of this loop with highest visible
549 # match, checked in descending order
550 for cpv in reversed(db.match(atom)):
551 if portage.cpv_getkey(cpv) != cp:
553 metadata = izip(db_keys,
554 db.aux_get(cpv, db_keys))
555 if not self._visible(db, cpv, metadata):
557 if not result or cpv == portage.best([cpv, result]):
561 raise NotImplementedError(level)
564 def execute(self,searchkey):
565 """Performs the search for the supplied search key"""
567 self.searchkey=searchkey
568 self.packagematches = []
571 self.matches = {"pkg":[], "desc":[], "set":[]}
574 self.matches = {"pkg":[], "set":[]}
575 print "Searching... ",
578 if self.searchkey.startswith('%'):
580 self.searchkey = self.searchkey[1:]
581 if self.searchkey.startswith('@'):
583 self.searchkey = self.searchkey[1:]
585 self.searchre=re.compile(self.searchkey,re.I)
587 self.searchre=re.compile(re.escape(self.searchkey), re.I)
588 for package in self.portdb.cp_all():
589 self.spinner.update()
592 match_string = package[:]
594 match_string = package.split("/")[-1]
597 if self.searchre.search(match_string):
598 if not self.portdb.xmatch("match-visible", package):
600 self.matches["pkg"].append([package,masked])
601 elif self.searchdesc: # DESCRIPTION searching
602 full_package = self.portdb.xmatch("bestmatch-visible", package)
604 #no match found; we don't want to query description
605 full_package = portage.best(
606 self.portdb.xmatch("match-all", package))
612 full_desc = self.portdb.aux_get(
613 full_package, ["DESCRIPTION"])[0]
615 print "emerge: search: aux_get() failed, skipping"
617 if self.searchre.search(full_desc):
618 self.matches["desc"].append([full_package,masked])
620 self.sdict = self.setconfig.getSets()
621 for setname in self.sdict:
622 self.spinner.update()
624 match_string = setname
626 match_string = setname.split("/")[-1]
628 if self.searchre.search(match_string):
629 self.matches["set"].append([setname, False])
630 elif self.searchdesc:
631 if self.searchre.search(
632 self.sdict[setname].getMetadata("DESCRIPTION")):
633 self.matches["set"].append([setname, False])
636 for mtype in self.matches:
637 self.matches[mtype].sort()
638 self.mlen += len(self.matches[mtype])
641 if not self.portdb.xmatch("match-all", cp):
644 if not self.portdb.xmatch("bestmatch-visible", cp):
646 self.matches["pkg"].append([cp, masked])
650 """Outputs the results of the search."""
651 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
652 print "[ Applications found : "+white(str(self.mlen))+" ]"
654 vardb = self.vartree.dbapi
655 for mtype in self.matches:
656 for match,masked in self.matches[mtype]:
660 full_package = self.portdb.xmatch(
661 "bestmatch-visible", match)
663 #no match found; we don't want to query description
665 full_package = portage.best(
666 self.portdb.xmatch("match-all",match))
667 elif mtype == "desc":
669 match = portage.cpv_getkey(match)
671 print green("*")+" "+white(match)
672 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
676 desc, homepage, license = self.portdb.aux_get(
677 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
679 print "emerge: search: aux_get() failed, skipping"
682 print green("*")+" "+white(match)+" "+red("[ Masked ]")
684 print green("*")+" "+white(match)
685 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
689 mycat = match.split("/")[0]
690 mypkg = match.split("/")[1]
691 mycpv = match + "-" + myversion
692 myebuild = self.portdb.findname(mycpv)
694 pkgdir = os.path.dirname(myebuild)
695 from portage import manifest
696 mf = manifest.Manifest(
697 pkgdir, self.settings["DISTDIR"])
699 uri_map = self.portdb.getFetchMap(mycpv)
700 except portage.exception.InvalidDependString, e:
701 file_size_str = "Unknown (%s)" % (e,)
705 mysum[0] = mf.getDistfilesSize(uri_map)
707 file_size_str = "Unknown (missing " + \
708 "digest for %s)" % (e,)
713 if db is not vardb and \
714 db.cpv_exists(mycpv):
716 if not myebuild and hasattr(db, "bintree"):
717 myebuild = db.bintree.getname(mycpv)
719 mysum[0] = os.stat(myebuild).st_size
724 if myebuild and file_size_str is None:
725 mystr = str(mysum[0] / 1024)
729 mystr = mystr[:mycount] + "," + mystr[mycount:]
730 file_size_str = mystr + " kB"
734 print " ", darkgreen("Latest version available:"),myversion
735 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
738 (darkgreen("Size of files:"), file_size_str)
739 print " ", darkgreen("Homepage:")+" ",homepage
740 print " ", darkgreen("Description:")+" ",desc
741 print " ", darkgreen("License:")+" ",license
746 def getInstallationStatus(self,package):
747 installed_package = self.vartree.dep_bestmatch(package)
749 version = self.getVersion(installed_package,search.VERSION_RELEASE)
751 result = darkgreen("Latest version installed:")+" "+version
753 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
756 def getVersion(self,full_package,detail):
757 if len(full_package) > 1:
758 package_parts = portage.catpkgsplit(full_package)
759 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
760 result = package_parts[2]+ "-" + package_parts[3]
762 result = package_parts[2]
767 class RootConfig(object):
768 """This is used internally by depgraph to track information about a
772 "ebuild" : "porttree",
773 "binary" : "bintree",
774 "installed" : "vartree"
778 for k, v in pkg_tree_map.iteritems():
781 def __init__(self, settings, trees, setconfig):
783 self.settings = settings
784 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
785 self.root = self.settings["ROOT"]
786 self.setconfig = setconfig
787 self.sets = self.setconfig.getSets()
788 self.visible_pkgs = PackageVirtualDbapi(self.settings)
790 def create_world_atom(pkg, args_set, root_config):
791 """Create a new atom for the world file if one does not exist. If the
792 argument atom is precise enough to identify a specific slot then a slot
793 atom will be returned. Atoms that are in the system set may also be stored
794 in world since system atoms can only match one slot while world atoms can
795 be greedy with respect to slots. Unslotted system packages will not be
798 arg_atom = args_set.findAtomForPackage(pkg)
801 cp = portage.dep_getkey(arg_atom)
803 sets = root_config.sets
804 portdb = root_config.trees["porttree"].dbapi
805 vardb = root_config.trees["vartree"].dbapi
806 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
807 for cpv in portdb.match(cp))
808 slotted = len(available_slots) > 1 or \
809 (len(available_slots) == 1 and "0" not in available_slots)
811 # check the vdb in case this is multislot
812 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
813 for cpv in vardb.match(cp))
814 slotted = len(available_slots) > 1 or \
815 (len(available_slots) == 1 and "0" not in available_slots)
816 if slotted and arg_atom != cp:
817 # If the user gave a specific atom, store it as a
818 # slot atom in the world file.
819 slot_atom = pkg.slot_atom
821 # For USE=multislot, there are a couple of cases to
824 # 1) SLOT="0", but the real SLOT spontaneously changed to some
825 # unknown value, so just record an unslotted atom.
827 # 2) SLOT comes from an installed package and there is no
828 # matching SLOT in the portage tree.
830 # Make sure that the slot atom is available in either the
831 # portdb or the vardb, since otherwise the user certainly
832 # doesn't want the SLOT atom recorded in the world file
833 # (case 1 above). If it's only available in the vardb,
834 # the user may be trying to prevent a USE=multislot
835 # package from being removed by --depclean (case 2 above).
838 if not portdb.match(slot_atom):
839 # SLOT seems to come from an installed multislot package
841 # If there is no installed package matching the SLOT atom,
842 # it probably changed SLOT spontaneously due to USE=multislot,
843 # so just record an unslotted atom.
844 if vardb.match(slot_atom):
845 # Now verify that the argument is precise
846 # enough to identify a specific slot.
847 matches = mydb.match(arg_atom)
848 matched_slots = set()
850 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
851 if len(matched_slots) == 1:
852 new_world_atom = slot_atom
854 if new_world_atom == sets["world"].findAtomForPackage(pkg):
855 # Both atoms would be identical, so there's nothing to add.
858 # Unlike world atoms, system atoms are not greedy for slots, so they
859 # can't be safely excluded from world if they are slotted.
860 system_atom = sets["system"].findAtomForPackage(pkg)
862 if not portage.dep_getkey(system_atom).startswith("virtual/"):
864 # System virtuals aren't safe to exclude from world since they can
865 # match multiple old-style virtuals but only one of them will be
866 # pulled in by update or depclean.
867 providers = portdb.mysettings.getvirtuals().get(
868 portage.dep_getkey(system_atom))
869 if providers and len(providers) == 1 and providers[0] == cp:
871 return new_world_atom
873 def filter_iuse_defaults(iuse):
875 if flag.startswith("+") or flag.startswith("-"):
880 class SlotObject(object):
881 __slots__ = ("__weakref__",)
883 def __init__(self, **kwargs):
884 classes = [self.__class__]
889 classes.extend(c.__bases__)
890 slots = getattr(c, "__slots__", None)
894 myvalue = kwargs.get(myattr, None)
895 setattr(self, myattr, myvalue)
899 Create a new instance and copy all attributes
900 defined from __slots__ (including those from
903 obj = self.__class__()
905 classes = [self.__class__]
910 classes.extend(c.__bases__)
911 slots = getattr(c, "__slots__", None)
915 setattr(obj, myattr, getattr(self, myattr))
919 class AbstractDepPriority(SlotObject):
920 __slots__ = ("buildtime", "runtime", "runtime_post")
922 def __lt__(self, other):
923 return self.__int__() < other
925 def __le__(self, other):
926 return self.__int__() <= other
928 def __eq__(self, other):
929 return self.__int__() == other
931 def __ne__(self, other):
932 return self.__int__() != other
934 def __gt__(self, other):
935 return self.__int__() > other
937 def __ge__(self, other):
938 return self.__int__() >= other
942 return copy.copy(self)
944 class DepPriority(AbstractDepPriority):
946 This class generates an integer priority level based of various
947 attributes of the dependency relationship. Attributes can be assigned
948 at any time and the new integer value will be generated on calls to the
949 __int__() method. Rich comparison operators are supported.
951 The boolean attributes that affect the integer value are "satisfied",
952 "buildtime", "runtime", and "system". Various combinations of
953 attributes lead to the following priority levels:
955 Combination of properties Priority Category
957 not satisfied and buildtime 0 HARD
958 not satisfied and runtime -1 MEDIUM
959 not satisfied and runtime_post -2 MEDIUM_SOFT
960 satisfied and buildtime and rebuild -3 SOFT
961 satisfied and buildtime -4 SOFT
962 satisfied and runtime -5 SOFT
963 satisfied and runtime_post -6 SOFT
964 (none of the above) -6 SOFT
966 Several integer constants are defined for categorization of priority
969 MEDIUM The upper boundary for medium dependencies.
970 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
971 SOFT The upper boundary for soft dependencies.
972 MIN The lower boundary for soft dependencies.
974 __slots__ = ("satisfied", "rebuild")
981 if not self.satisfied:
986 if self.runtime_post:
994 if self.runtime_post:
999 myvalue = self.__int__()
1000 if myvalue > self.MEDIUM:
1002 if myvalue > self.MEDIUM_SOFT:
1004 if myvalue > self.SOFT:
1005 return "medium-soft"
1008 class BlockerDepPriority(DepPriority):
1013 BlockerDepPriority.instance = BlockerDepPriority()
1015 class UnmergeDepPriority(AbstractDepPriority):
1016 __slots__ = ("satisfied",)
1018 Combination of properties Priority Category
1021 runtime_post -1 HARD
1023 (none of the above) -2 SOFT
1033 if self.runtime_post:
1040 myvalue = self.__int__()
1041 if myvalue > self.SOFT:
1045 class FakeVartree(portage.vartree):
1046 """This is implements an in-memory copy of a vartree instance that provides
1047 all the interfaces required for use by the depgraph. The vardb is locked
1048 during the constructor call just long enough to read a copy of the
1049 installed package information. This allows the depgraph to do it's
1050 dependency calculations without holding a lock on the vardb. It also
1051 allows things like vardb global updates to be done in memory so that the
1052 user doesn't necessarily need write access to the vardb in cases where
1053 global updates are necessary (updates are performed when necessary if there
1054 is not a matching ebuild in the tree)."""
1055 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1056 self._root_config = root_config
1057 if pkg_cache is None:
1059 real_vartree = root_config.trees["vartree"]
1060 portdb = root_config.trees["porttree"].dbapi
1061 self.root = real_vartree.root
1062 self.settings = real_vartree.settings
1063 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1064 self._pkg_cache = pkg_cache
1065 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1066 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1068 # At least the parent needs to exist for the lock file.
1069 portage.util.ensure_dirs(vdb_path)
1070 except portage.exception.PortageException:
1074 if acquire_lock and os.access(vdb_path, os.W_OK):
1075 vdb_lock = portage.locks.lockdir(vdb_path)
1076 real_dbapi = real_vartree.dbapi
1078 for cpv in real_dbapi.cpv_all():
1079 cache_key = ("installed", self.root, cpv, "nomerge")
1080 pkg = self._pkg_cache.get(cache_key)
1082 metadata = pkg.metadata
1084 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1085 myslot = metadata["SLOT"]
1086 mycp = portage.dep_getkey(cpv)
1087 myslot_atom = "%s:%s" % (mycp, myslot)
1089 mycounter = long(metadata["COUNTER"])
1092 metadata["COUNTER"] = str(mycounter)
1093 other_counter = slot_counters.get(myslot_atom, None)
1094 if other_counter is not None:
1095 if other_counter > mycounter:
1097 slot_counters[myslot_atom] = mycounter
1099 pkg = Package(built=True, cpv=cpv,
1100 installed=True, metadata=metadata,
1101 root_config=root_config, type_name="installed")
1102 self._pkg_cache[pkg] = pkg
1103 self.dbapi.cpv_inject(pkg)
1104 real_dbapi.flush_cache()
1107 portage.locks.unlockdir(vdb_lock)
1108 # Populate the old-style virtuals using the cached values.
1109 if not self.settings.treeVirtuals:
1110 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1111 portage.getCPFromCPV, self.get_all_provides())
1113 # Intialize variables needed for lazy cache pulls of the live ebuild
1114 # metadata. This ensures that the vardb lock is released ASAP, without
1115 # being delayed in case cache generation is triggered.
1116 self._aux_get = self.dbapi.aux_get
1117 self.dbapi.aux_get = self._aux_get_wrapper
1118 self._match = self.dbapi.match
1119 self.dbapi.match = self._match_wrapper
1120 self._aux_get_history = set()
1121 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1122 self._portdb = portdb
1123 self._global_updates = None
1125 def _match_wrapper(self, cpv, use_cache=1):
1127 Make sure the metadata in Package instances gets updated for any
1128 cpv that is returned from a match() call, since the metadata can
1129 be accessed directly from the Package instance instead of via
1132 matches = self._match(cpv, use_cache=use_cache)
1134 if cpv in self._aux_get_history:
1136 self._aux_get_wrapper(cpv, [])
1139 def _aux_get_wrapper(self, pkg, wants):
1140 if pkg in self._aux_get_history:
1141 return self._aux_get(pkg, wants)
1142 self._aux_get_history.add(pkg)
1144 # Use the live ebuild metadata if possible.
1145 live_metadata = dict(izip(self._portdb_keys,
1146 self._portdb.aux_get(pkg, self._portdb_keys)))
1147 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1149 self.dbapi.aux_update(pkg, live_metadata)
1150 except (KeyError, portage.exception.PortageException):
1151 if self._global_updates is None:
1152 self._global_updates = \
1153 grab_global_updates(self._portdb.porttree_root)
1154 perform_global_updates(
1155 pkg, self.dbapi, self._global_updates)
1156 return self._aux_get(pkg, wants)
1158 def sync(self, acquire_lock=1):
1160 Call this method to synchronize state with the real vardb
1161 after one or more packages may have been installed or
1164 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1166 # At least the parent needs to exist for the lock file.
1167 portage.util.ensure_dirs(vdb_path)
1168 except portage.exception.PortageException:
1172 if acquire_lock and os.access(vdb_path, os.W_OK):
1173 vdb_lock = portage.locks.lockdir(vdb_path)
1177 portage.locks.unlockdir(vdb_lock)
1181 real_vardb = self._root_config.trees["vartree"].dbapi
1182 current_cpv_set = frozenset(real_vardb.cpv_all())
1183 pkg_vardb = self.dbapi
1184 aux_get_history = self._aux_get_history
1186 # Remove any packages that have been uninstalled.
1187 for pkg in list(pkg_vardb):
1188 if pkg.cpv not in current_cpv_set:
1189 pkg_vardb.cpv_remove(pkg)
1190 aux_get_history.discard(pkg.cpv)
1192 # Validate counters and timestamps.
1195 validation_keys = ["COUNTER", "_mtime_"]
1196 for cpv in current_cpv_set:
1198 pkg_hash_key = ("installed", root, cpv, "nomerge")
1199 pkg = pkg_vardb.get(pkg_hash_key)
1201 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1203 if counter != pkg.metadata["COUNTER"] or \
1205 pkg_vardb.cpv_remove(pkg)
1206 aux_get_history.discard(pkg.cpv)
1210 pkg = self._pkg(cpv)
1212 other_counter = slot_counters.get(pkg.slot_atom)
1213 if other_counter is not None:
1214 if other_counter > pkg.counter:
1217 slot_counters[pkg.slot_atom] = pkg.counter
1218 pkg_vardb.cpv_inject(pkg)
1220 real_vardb.flush_cache()
1222 def _pkg(self, cpv):
1223 root_config = self._root_config
1224 real_vardb = root_config.trees["vartree"].dbapi
1225 db_keys = list(real_vardb._aux_cache_keys)
1226 pkg = Package(cpv=cpv, installed=True,
1227 metadata=izip(db_keys, real_vardb.aux_get(cpv, db_keys)),
1228 root_config=root_config,
1229 type_name="installed")
1232 def grab_global_updates(portdir):
1233 from portage.update import grab_updates, parse_updates
1234 updpath = os.path.join(portdir, "profiles", "updates")
1236 rawupdates = grab_updates(updpath)
1237 except portage.exception.DirectoryNotFound:
1240 for mykey, mystat, mycontent in rawupdates:
1241 commands, errors = parse_updates(mycontent)
1242 upd_commands.extend(commands)
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246 from portage.update import update_dbentries
1247 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249 updates = update_dbentries(mycommands, aux_dict)
1251 mydb.aux_update(mycpv, updates)
1253 def visible(pkgsettings, pkg):
1255 Check if a package is visible. This can raise an InvalidDependString
1256 exception if LICENSE is invalid.
1257 TODO: optionally generate a list of masking reasons
1259 @returns: True if the package is visible, False otherwise.
1261 if not pkg.metadata["SLOT"]:
1263 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264 if not pkgsettings._accept_chost(pkg):
1266 eapi = pkg.metadata["EAPI"]
1267 if not portage.eapi_is_supported(eapi):
1269 if not pkg.installed:
1270 if portage._eapi_is_deprecated(eapi):
1272 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1274 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1276 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1279 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1281 except portage.exception.InvalidDependString:
1285 def get_masking_status(pkg, pkgsettings, root_config):
1287 mreasons = portage.getmaskingstatus(
1288 pkg, settings=pkgsettings,
1289 portdb=root_config.trees["porttree"].dbapi)
1291 if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292 if not pkgsettings._accept_chost(pkg):
1293 mreasons.append("CHOST: %s" % \
1294 pkg.metadata["CHOST"])
1296 if not pkg.metadata["SLOT"]:
1297 mreasons.append("invalid: SLOT is undefined")
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302 db, pkg_type, built, installed, db_keys):
1305 metadata = dict(izip(db_keys,
1306 db.aux_get(cpv, db_keys)))
1309 if metadata and not built:
1310 pkgsettings.setcpv(cpv, mydb=metadata)
1311 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312 if metadata is None:
1313 mreasons = ["corruption"]
1315 pkg = Package(type_name=pkg_type, root_config=root_config,
1316 cpv=cpv, built=built, installed=installed, metadata=metadata)
1317 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318 return metadata, mreasons
1320 def show_masked_packages(masked_packages):
1321 shown_licenses = set()
1322 shown_comments = set()
1323 # Maybe there is both an ebuild and a binary. Only
1324 # show one of them to avoid redundant appearance.
1326 have_eapi_mask = False
1327 for (root_config, pkgsettings, cpv,
1328 metadata, mreasons) in masked_packages:
1329 if cpv in shown_cpvs:
1332 comment, filename = None, None
1333 if "package.mask" in mreasons:
1334 comment, filename = \
1335 portage.getmaskingreason(
1336 cpv, metadata=metadata,
1337 settings=pkgsettings,
1338 portdb=root_config.trees["porttree"].dbapi,
1339 return_location=True)
1340 missing_licenses = []
1342 if not portage.eapi_is_supported(metadata["EAPI"]):
1343 have_eapi_mask = True
1345 missing_licenses = \
1346 pkgsettings._getMissingLicenses(
1348 except portage.exception.InvalidDependString:
1349 # This will have already been reported
1350 # above via mreasons.
1353 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354 if comment and comment not in shown_comments:
1357 shown_comments.add(comment)
1358 portdb = root_config.trees["porttree"].dbapi
1359 for l in missing_licenses:
1360 l_path = portdb.findLicensePath(l)
1361 if l in shown_licenses:
1363 msg = ("A copy of the '%s' license" + \
1364 " is located at '%s'.") % (l, l_path)
1367 shown_licenses.add(l)
1368 return have_eapi_mask
1370 class Task(SlotObject):
1371 __slots__ = ("_hash_key", "_hash_value")
1373 def _get_hash_key(self):
1374 hash_key = getattr(self, "_hash_key", None)
1375 if hash_key is None:
1376 raise NotImplementedError(self)
1379 def __eq__(self, other):
1380 return self._get_hash_key() == other
1382 def __ne__(self, other):
1383 return self._get_hash_key() != other
1386 hash_value = getattr(self, "_hash_value", None)
1387 if hash_value is None:
1388 self._hash_value = hash(self._get_hash_key())
1389 return self._hash_value
1392 return len(self._get_hash_key())
1394 def __getitem__(self, key):
1395 return self._get_hash_key()[key]
1398 return iter(self._get_hash_key())
1400 def __contains__(self, key):
1401 return key in self._get_hash_key()
1404 return str(self._get_hash_key())
1406 class Blocker(Task):
1408 __hash__ = Task.__hash__
1409 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1411 def __init__(self, **kwargs):
1412 Task.__init__(self, **kwargs)
1413 self.cp = portage.dep_getkey(self.atom)
1415 def _get_hash_key(self):
1416 hash_key = getattr(self, "_hash_key", None)
1417 if hash_key is None:
1419 ("blocks", self.root, self.atom, self.eapi)
1420 return self._hash_key
1422 class Package(Task):
1424 __hash__ = Task.__hash__
1425 __slots__ = ("built", "cpv", "depth",
1426 "installed", "metadata", "onlydeps", "operation",
1427 "root_config", "type_name",
1428 "category", "counter", "cp", "cpv_split",
1429 "inherited", "iuse", "mtime",
1430 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1433 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434 "INHERITED", "IUSE", "KEYWORDS",
1435 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1438 def __init__(self, **kwargs):
1439 Task.__init__(self, **kwargs)
1440 self.root = self.root_config.root
1441 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442 self.cp = portage.cpv_getkey(self.cpv)
1443 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444 self.category, self.pf = portage.catsplit(self.cpv)
1445 self.cpv_split = portage.catpkgsplit(self.cpv)
1446 self.pv_split = self.cpv_split[1:]
1450 __slots__ = ("__weakref__", "enabled")
1452 def __init__(self, use):
1453 self.enabled = frozenset(use)
1455 class _iuse(object):
1457 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1459 def __init__(self, tokens, iuse_implicit):
1460 self.tokens = tuple(tokens)
1461 self.iuse_implicit = iuse_implicit
1468 enabled.append(x[1:])
1470 disabled.append(x[1:])
1473 self.enabled = frozenset(enabled)
1474 self.disabled = frozenset(disabled)
1475 self.all = frozenset(chain(enabled, disabled, other))
1477 def __getattribute__(self, name):
1480 return object.__getattribute__(self, "regex")
1481 except AttributeError:
1482 all = object.__getattribute__(self, "all")
1483 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484 # Escape anything except ".*" which is supposed
1485 # to pass through from _get_implicit_iuse()
1486 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487 regex = "^(%s)$" % "|".join(regex)
1488 regex = regex.replace("\\.\\*", ".*")
1489 self.regex = re.compile(regex)
1490 return object.__getattribute__(self, name)
1492 def _get_hash_key(self):
1493 hash_key = getattr(self, "_hash_key", None)
1494 if hash_key is None:
1495 if self.operation is None:
1496 self.operation = "merge"
1497 if self.onlydeps or self.installed:
1498 self.operation = "nomerge"
1500 (self.type_name, self.root, self.cpv, self.operation)
1501 return self._hash_key
1503 def __cmp__(self, other):
1510 def __lt__(self, other):
1511 if other.cp != self.cp:
1513 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1517 def __le__(self, other):
1518 if other.cp != self.cp:
1520 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1524 def __gt__(self, other):
1525 if other.cp != self.cp:
1527 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1531 def __ge__(self, other):
1532 if other.cp != self.cp:
1534 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1538 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1539 if not x.startswith("UNUSED_"))
1540 _all_metadata_keys.discard("CDEPEND")
1541 _all_metadata_keys.update(Package.metadata_keys)
1543 from portage.cache.mappings import slot_dict_class
1544 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1546 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1548 Detect metadata updates and synchronize Package attributes.
1551 __slots__ = ("_pkg",)
1552 _wrapped_keys = frozenset(
1553 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1555 def __init__(self, pkg, metadata):
1556 _PackageMetadataWrapperBase.__init__(self)
1558 self.update(metadata)
1560 def __setitem__(self, k, v):
1561 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1562 if k in self._wrapped_keys:
1563 getattr(self, "_set_" + k.lower())(k, v)
1565 def _set_inherited(self, k, v):
1566 if isinstance(v, basestring):
1567 v = frozenset(v.split())
1568 self._pkg.inherited = v
1570 def _set_iuse(self, k, v):
1571 self._pkg.iuse = self._pkg._iuse(
1572 v.split(), self._pkg.root_config.iuse_implicit)
1574 def _set_slot(self, k, v):
1577 def _set_use(self, k, v):
1578 self._pkg.use = self._pkg._use(v.split())
1580 def _set_counter(self, k, v):
1581 if isinstance(v, basestring):
1586 self._pkg.counter = v
1588 def _set__mtime_(self, k, v):
1589 if isinstance(v, basestring):
1591 v = float(v.strip())
1596 class EbuildFetchonly(SlotObject):
1598 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1601 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1602 # ensuring sane $PWD (bug #239560) and storing elog
1603 # messages. Use a private temp directory, in order
1604 # to avoid locking the main one.
1605 settings = self.settings
1606 global_tmpdir = settings["PORTAGE_TMPDIR"]
1607 from tempfile import mkdtemp
1609 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1611 if e.errno != portage.exception.PermissionDenied.errno:
1613 raise portage.exception.PermissionDenied(global_tmpdir)
1614 settings["PORTAGE_TMPDIR"] = private_tmpdir
1615 settings.backup_changes("PORTAGE_TMPDIR")
1617 retval = self._execute()
1619 settings["PORTAGE_TMPDIR"] = global_tmpdir
1620 settings.backup_changes("PORTAGE_TMPDIR")
1621 shutil.rmtree(private_tmpdir)
1625 settings = self.settings
1627 root_config = pkg.root_config
1628 portdb = root_config.trees["porttree"].dbapi
1629 ebuild_path = portdb.findname(pkg.cpv)
1630 settings.setcpv(pkg)
1631 debug = settings.get("PORTAGE_DEBUG") == "1"
1632 use_cache = 1 # always true
1633 portage.doebuild_environment(ebuild_path, "fetch",
1634 root_config.root, settings, debug, use_cache, portdb)
1635 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1637 retval = portage.doebuild(ebuild_path, "fetch",
1638 self.settings["ROOT"], self.settings, debug=debug,
1639 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1640 mydbapi=portdb, tree="porttree")
1642 if retval != os.EX_OK:
1643 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1644 eerror(msg, phase="unpack", key=pkg.cpv)
1646 portage.elog.elog_process(self.pkg.cpv, self.settings)
1649 class AsynchronousTask(SlotObject):
1651 Subclasses override _wait() and _poll() so that calls
1652 to public methods can be wrapped for implementing
1653 hooks such as exit listener notification.
1655 Sublasses should call self.wait() to notify exit listeners after
1656 the task is complete and self.returncode has been set.
1659 __slots__ = ("background", "cancelled", "returncode") + \
1660 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1664 Start an asynchronous task and then return as soon as possible.
1670 raise NotImplementedError(self)
1673 return self.returncode is None
1680 return self.returncode
1683 if self.returncode is None:
1686 return self.returncode
1689 return self.returncode
1692 self.cancelled = True
1695 def addStartListener(self, f):
1697 The function will be called with one argument, a reference to self.
1699 if self._start_listeners is None:
1700 self._start_listeners = []
1701 self._start_listeners.append(f)
1703 def removeStartListener(self, f):
1704 if self._start_listeners is None:
1706 self._start_listeners.remove(f)
1708 def _start_hook(self):
1709 if self._start_listeners is not None:
1710 start_listeners = self._start_listeners
1711 self._start_listeners = None
1713 for f in start_listeners:
1716 def addExitListener(self, f):
1718 The function will be called with one argument, a reference to self.
1720 if self._exit_listeners is None:
1721 self._exit_listeners = []
1722 self._exit_listeners.append(f)
1724 def removeExitListener(self, f):
1725 if self._exit_listeners is None:
1726 if self._exit_listener_stack is not None:
1727 self._exit_listener_stack.remove(f)
1729 self._exit_listeners.remove(f)
1731 def _wait_hook(self):
1733 Call this method after the task completes, just before returning
1734 the returncode from wait() or poll(). This hook is
1735 used to trigger exit listeners when the returncode first
1738 if self.returncode is not None and \
1739 self._exit_listeners is not None:
1741 # This prevents recursion, in case one of the
1742 # exit handlers triggers this method again by
1743 # calling wait(). Use a stack that gives
1744 # removeExitListener() an opportunity to consume
1745 # listeners from the stack, before they can get
1746 # called below. This is necessary because a call
1747 # to one exit listener may result in a call to
1748 # removeExitListener() for another listener on
1749 # the stack. That listener needs to be removed
1750 # from the stack since it would be inconsistent
1751 # to call it after it has been been passed into
1752 # removeExitListener().
1753 self._exit_listener_stack = self._exit_listeners
1754 self._exit_listeners = None
1756 self._exit_listener_stack.reverse()
1757 while self._exit_listener_stack:
1758 self._exit_listener_stack.pop()(self)
1760 class PipeReader(AsynchronousTask):
1763 Reads output from one or more files and saves it in memory,
1764 for retrieval via the getvalue() method. This is driven by
1765 the scheduler's poll() loop, so it runs entirely within the
1769 __slots__ = ("input_files", "scheduler",) + \
1770 ("pid", "_read_data", "_registered", "_reg_ids")
1775 self._reg_ids = set()
1776 self._read_data = []
1777 for k, f in self.input_files.iteritems():
1778 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1779 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1780 self._reg_ids.add(self.scheduler.register(f.fileno(),
1781 PollConstants.POLLIN, self._output_handler))
1782 self._registered = True
1785 return self._registered
1788 if self.returncode is not None:
1789 return self.returncode
1791 if self._registered:
1792 self.scheduler.schedule(self._reg_ids)
1795 self.returncode = os.EX_OK
1796 return self.returncode
1799 """Retrieve the entire contents"""
1800 return "".join(self._read_data)
1803 """Free the memory buffer."""
1804 self._read_data = None
1806 def _output_handler(self, fd, event):
1807 files = self.input_files
1808 for f in files.itervalues():
1809 if fd == f.fileno():
1812 buf = array.array('B')
1814 buf.fromfile(f, self._bufsize)
1819 self._read_data.append(buf.tostring())
1824 return self._registered
1826 def _unregister(self):
1828 Unregister from the scheduler and close open files.
1831 self._registered = False
1833 if self._reg_ids is not None:
1834 for reg_id in self._reg_ids:
1835 self.scheduler.unregister(reg_id)
1836 self._reg_ids = None
1838 if self.input_files is not None:
1839 for f in self.input_files.itervalues():
1841 self.input_files = None
1843 class CompositeTask(AsynchronousTask):
1845 __slots__ = ("scheduler",) + ("_current_task",)
1848 return self._current_task is not None
1851 self.cancelled = True
1852 if self._current_task is not None:
1853 self._current_task.cancel()
1857 This does a loop calling self._current_task.poll()
1858 repeatedly as long as the value of self._current_task
1859 keeps changing. It calls poll() a maximum of one time
1860 for a given self._current_task instance. This is useful
1861 since calling poll() on a task can trigger advance to
1862 the next task could eventually lead to the returncode
1863 being set in cases when polling only a single task would
1864 not have the same effect.
1869 task = self._current_task
1870 if task is None or task is prev:
1871 # don't poll the same task more than once
1876 return self.returncode
1882 task = self._current_task
1884 # don't wait for the same task more than once
1887 # Before the task.wait() method returned, an exit
1888 # listener should have set self._current_task to either
1889 # a different task or None. Something is wrong.
1890 raise AssertionError("self._current_task has not " + \
1891 "changed since calling wait", self, task)
1895 return self.returncode
1897 def _assert_current(self, task):
1899 Raises an AssertionError if the given task is not the
1900 same one as self._current_task. This can be useful
1903 if task is not self._current_task:
1904 raise AssertionError("Unrecognized task: %s" % (task,))
1906 def _default_exit(self, task):
1908 Calls _assert_current() on the given task and then sets the
1909 composite returncode attribute if task.returncode != os.EX_OK.
1910 If the task failed then self._current_task will be set to None.
1911 Subclasses can use this as a generic task exit callback.
1914 @returns: The task.returncode attribute.
1916 self._assert_current(task)
1917 if task.returncode != os.EX_OK:
1918 self.returncode = task.returncode
1919 self._current_task = None
1920 return task.returncode
1922 def _final_exit(self, task):
1924 Assumes that task is the final task of this composite task.
1925 Calls _default_exit() and sets self.returncode to the task's
1926 returncode and sets self._current_task to None.
1928 self._default_exit(task)
1929 self._current_task = None
1930 self.returncode = task.returncode
1931 return self.returncode
1933 def _default_final_exit(self, task):
1935 This calls _final_exit() and then wait().
1937 Subclasses can use this as a generic final task exit callback.
1940 self._final_exit(task)
1943 def _start_task(self, task, exit_handler):
1945 Register exit handler for the given task, set it
1946 as self._current_task, and call task.start().
1948 Subclasses can use this as a generic way to start
1952 task.addExitListener(exit_handler)
1953 self._current_task = task
1956 class TaskSequence(CompositeTask):
1958 A collection of tasks that executes sequentially. Each task
1959 must have a addExitListener() method that can be used as
1960 a means to trigger movement from one task to the next.
1963 __slots__ = ("_task_queue",)
1965 def __init__(self, **kwargs):
1966 AsynchronousTask.__init__(self, **kwargs)
1967 self._task_queue = deque()
1969 def add(self, task):
1970 self._task_queue.append(task)
1973 self._start_next_task()
1976 self._task_queue.clear()
1977 CompositeTask.cancel(self)
1979 def _start_next_task(self):
1980 self._start_task(self._task_queue.popleft(),
1981 self._task_exit_handler)
1983 def _task_exit_handler(self, task):
1984 if self._default_exit(task) != os.EX_OK:
1986 elif self._task_queue:
1987 self._start_next_task()
1989 self._final_exit(task)
1992 class SubProcess(AsynchronousTask):
1994 __slots__ = ("scheduler",) + ("pid", "_files", "_registered", "_reg_id")
1996 # A file descriptor is required for the scheduler to monitor changes from
1997 # inside a poll() loop. When logging is not enabled, create a pipe just to
1998 # serve this purpose alone.
2002 if self.returncode is not None:
2003 return self.returncode
2004 if self.pid is None:
2005 return self.returncode
2006 if self._registered:
2007 return self.returncode
2010 retval = os.waitpid(self.pid, os.WNOHANG)
2012 if e.errno != errno.ECHILD:
2015 retval = (self.pid, 1)
2017 if retval == (0, 0):
2019 self._set_returncode(retval)
2020 return self.returncode
2025 os.kill(self.pid, signal.SIGTERM)
2027 if e.errno != errno.ESRCH:
2031 self.cancelled = True
2032 if self.pid is not None:
2034 return self.returncode
2037 return self.pid is not None and \
2038 self.returncode is None
2042 if self.returncode is not None:
2043 return self.returncode
2045 if self._registered:
2046 self.scheduler.schedule(self._reg_id)
2048 if self.returncode is not None:
2049 return self.returncode
2052 wait_retval = os.waitpid(self.pid, 0)
2054 if e.errno != errno.ECHILD:
2057 self._set_returncode((self.pid, 1))
2059 self._set_returncode(wait_retval)
2061 return self.returncode
2063 def _unregister(self):
2065 Unregister from the scheduler and close open files.
2068 self._registered = False
2070 if self._reg_id is not None:
2071 self.scheduler.unregister(self._reg_id)
2074 if self._files is not None:
2075 for f in self._files.itervalues():
2079 def _set_returncode(self, wait_retval):
2081 retval = wait_retval[1]
2083 if retval != os.EX_OK:
2085 retval = (retval & 0xff) << 8
2087 retval = retval >> 8
2089 self.returncode = retval
2091 class SpawnProcess(SubProcess):
2094 Constructor keyword args are passed into portage.process.spawn().
2095 The required "args" keyword argument will be passed as the first
2099 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2100 "uid", "gid", "groups", "umask", "logfile",
2101 "path_lookup", "pre_exec")
2103 __slots__ = ("args",) + \
2106 _file_names = ("log", "process", "stdout")
2107 _files_dict = slot_dict_class(_file_names, prefix="")
2115 if self.fd_pipes is None:
2117 fd_pipes = self.fd_pipes
2118 fd_pipes.setdefault(0, sys.stdin.fileno())
2119 fd_pipes.setdefault(1, sys.stdout.fileno())
2120 fd_pipes.setdefault(2, sys.stderr.fileno())
2122 # flush any pending output
2123 for fd in fd_pipes.itervalues():
2124 if fd == sys.stdout.fileno():
2126 if fd == sys.stderr.fileno():
2129 logfile = self.logfile
2130 self._files = self._files_dict()
2133 master_fd, slave_fd = self._pipe(fd_pipes)
2134 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2135 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2138 fd_pipes_orig = fd_pipes.copy()
2140 # TODO: Use job control functions like tcsetpgrp() to control
2141 # access to stdin. Until then, use /dev/null so that any
2142 # attempts to read from stdin will immediately return EOF
2143 # instead of blocking indefinitely.
2144 null_input = open('/dev/null', 'rb')
2145 fd_pipes[0] = null_input.fileno()
2147 fd_pipes[0] = fd_pipes_orig[0]
2149 files.process = os.fdopen(master_fd, 'r')
2150 if logfile is not None:
2152 fd_pipes[1] = slave_fd
2153 fd_pipes[2] = slave_fd
2155 files.log = open(logfile, "a")
2156 portage.util.apply_secpass_permissions(logfile,
2157 uid=portage.portage_uid, gid=portage.portage_gid,
2160 if not self.background:
2161 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2163 output_handler = self._output_handler
2167 # Create a dummy pipe so the scheduler can monitor
2168 # the process from inside a poll() loop.
2169 fd_pipes[self._dummy_pipe_fd] = slave_fd
2171 fd_pipes[1] = slave_fd
2172 fd_pipes[2] = slave_fd
2173 output_handler = self._dummy_handler
2176 for k in self._spawn_kwarg_names:
2177 v = getattr(self, k)
2181 kwargs["fd_pipes"] = fd_pipes
2182 kwargs["returnpid"] = True
2183 kwargs.pop("logfile", None)
2185 retval = self._spawn(self.args, **kwargs)
2188 if null_input is not None:
2191 if isinstance(retval, int):
2193 for f in files.values():
2195 self.returncode = retval
2199 self.pid = retval[0]
2200 portage.process.spawned_pids.remove(self.pid)
2202 self._reg_id = self.scheduler.register(files.process.fileno(),
2203 PollConstants.POLLIN, output_handler)
2204 self._registered = True
2206 def _pipe(self, fd_pipes):
2208 @type fd_pipes: dict
2209 @param fd_pipes: pipes from which to copy terminal size if desired.
2213 def _spawn(self, args, **kwargs):
2214 return portage.process.spawn(args, **kwargs)
2216 def _output_handler(self, fd, event):
2218 buf = array.array('B')
2220 buf.fromfile(files.process, self._bufsize)
2224 if not self.background:
2225 buf.tofile(files.stdout)
2226 files.stdout.flush()
2227 buf.tofile(files.log)
2232 return self._registered
2234 def _dummy_handler(self, fd, event):
2236 This method is mainly interested in detecting EOF, since
2237 the only purpose of the pipe is to allow the scheduler to
2238 monitor the process from inside a poll() loop.
2241 buf = array.array('B')
2243 buf.fromfile(files.process, self._bufsize)
2251 return self._registered
2253 class MiscFunctionsProcess(SpawnProcess):
2255 Spawns misc-functions.sh with an existing ebuild environment.
2258 __slots__ = ("commands", "phase", "pkg", "settings")
2261 settings = self.settings
2262 settings.pop("EBUILD_PHASE", None)
2263 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2264 misc_sh_binary = os.path.join(portage_bin_path,
2265 os.path.basename(portage.const.MISC_SH_BINARY))
2267 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2268 self.logfile = settings.get("PORTAGE_LOG_FILE")
2270 portage._doebuild_exit_status_unlink(
2271 settings.get("EBUILD_EXIT_STATUS_FILE"))
2273 SpawnProcess._start(self)
2275 def _spawn(self, args, **kwargs):
2276 settings = self.settings
2277 debug = settings.get("PORTAGE_DEBUG") == "1"
2278 return portage.spawn(" ".join(args), settings,
2279 debug=debug, **kwargs)
2281 def _set_returncode(self, wait_retval):
2282 SpawnProcess._set_returncode(self, wait_retval)
2283 self.returncode = portage._doebuild_exit_status_check_and_log(
2284 self.settings, self.phase, self.returncode)
2286 class EbuildFetcher(SpawnProcess):
2288 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2293 root_config = self.pkg.root_config
2294 portdb = root_config.trees["porttree"].dbapi
2295 ebuild_path = portdb.findname(self.pkg.cpv)
2296 settings = self.config_pool.allocate()
2297 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2298 self._build_dir.lock()
2299 self._build_dir.clean()
2300 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2301 if self.logfile is None:
2302 self.logfile = settings.get("PORTAGE_LOG_FILE")
2308 # If any incremental variables have been overridden
2309 # via the environment, those values need to be passed
2310 # along here so that they are correctly considered by
2311 # the config instance in the subproccess.
2312 fetch_env = os.environ.copy()
2314 fetch_env["PORTAGE_NICENESS"] = "0"
2316 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2318 ebuild_binary = os.path.join(
2319 settings["PORTAGE_BIN_PATH"], "ebuild")
2321 fetch_args = [ebuild_binary, ebuild_path, phase]
2322 debug = settings.get("PORTAGE_DEBUG") == "1"
2324 fetch_args.append("--debug")
2326 self.args = fetch_args
2327 self.env = fetch_env
2328 SpawnProcess._start(self)
2330 def _pipe(self, fd_pipes):
2331 """When appropriate, use a pty so that fetcher progress bars,
2332 like wget has, will work properly."""
2333 if self.background or not sys.stdout.isatty():
2334 # When the output only goes to a log file,
2335 # there's no point in creating a pty.
2337 stdout_pipe = fd_pipes.get(1)
2338 got_pty, master_fd, slave_fd = \
2339 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2340 return (master_fd, slave_fd)
2342 def _set_returncode(self, wait_retval):
2343 SpawnProcess._set_returncode(self, wait_retval)
2344 # Collect elog messages that might have been
2345 # created by the pkg_nofetch phase.
2346 if self._build_dir is not None:
2347 # Skip elog messages for prefetch, in order to avoid duplicates.
2348 if not self.prefetch and self.returncode != os.EX_OK:
2350 if self.logfile is not None:
2352 elog_out = open(self.logfile, 'a')
2353 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2354 if self.logfile is not None:
2355 msg += ", Log file:"
2356 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2357 if self.logfile is not None:
2358 eerror(" '%s'" % (self.logfile,),
2359 phase="unpack", key=self.pkg.cpv, out=elog_out)
2360 if elog_out is not None:
2362 if not self.prefetch:
2363 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2364 features = self._build_dir.settings.features
2365 if self.returncode == os.EX_OK:
2366 self._build_dir.clean()
2367 self._build_dir.unlock()
2368 self.config_pool.deallocate(self._build_dir.settings)
2369 self._build_dir = None
2371 class EbuildBuildDir(SlotObject):
2373 __slots__ = ("dir_path", "pkg", "settings",
2374 "locked", "_catdir", "_lock_obj")
2376 def __init__(self, **kwargs):
2377 SlotObject.__init__(self, **kwargs)
2382 This raises an AlreadyLocked exception if lock() is called
2383 while a lock is already held. In order to avoid this, call
2384 unlock() or check whether the "locked" attribute is True
2385 or False before calling lock().
2387 if self._lock_obj is not None:
2388 raise self.AlreadyLocked((self._lock_obj,))
2390 dir_path = self.dir_path
2391 if dir_path is None:
2392 root_config = self.pkg.root_config
2393 portdb = root_config.trees["porttree"].dbapi
2394 ebuild_path = portdb.findname(self.pkg.cpv)
2395 settings = self.settings
2396 settings.setcpv(self.pkg)
2397 debug = settings.get("PORTAGE_DEBUG") == "1"
2398 use_cache = 1 # always true
2399 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2400 self.settings, debug, use_cache, portdb)
2401 dir_path = self.settings["PORTAGE_BUILDDIR"]
2403 catdir = os.path.dirname(dir_path)
2404 self._catdir = catdir
2406 portage.util.ensure_dirs(os.path.dirname(catdir),
2407 gid=portage.portage_gid,
2411 catdir_lock = portage.locks.lockdir(catdir)
2412 portage.util.ensure_dirs(catdir,
2413 gid=portage.portage_gid,
2415 self._lock_obj = portage.locks.lockdir(dir_path)
2417 self.locked = self._lock_obj is not None
2418 if catdir_lock is not None:
2419 portage.locks.unlockdir(catdir_lock)
2422 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2423 by keepwork or keeptemp in FEATURES."""
2424 settings = self.settings
2425 features = settings.features
2426 if not ("keepwork" in features or "keeptemp" in features):
2428 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2429 except EnvironmentError, e:
2430 if e.errno != errno.ENOENT:
2435 if self._lock_obj is None:
2438 portage.locks.unlockdir(self._lock_obj)
2439 self._lock_obj = None
2442 catdir = self._catdir
2445 catdir_lock = portage.locks.lockdir(catdir)
2451 if e.errno not in (errno.ENOENT,
2452 errno.ENOTEMPTY, errno.EEXIST):
2455 portage.locks.unlockdir(catdir_lock)
2457 class AlreadyLocked(portage.exception.PortageException):
2460 class EbuildBuild(CompositeTask):
2462 __slots__ = ("args_set", "config_pool", "find_blockers",
2463 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2464 "prefetcher", "settings", "world_atom") + \
2465 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2469 logger = self.logger
2472 settings = self.settings
2473 world_atom = self.world_atom
2474 root_config = pkg.root_config
2477 portdb = root_config.trees[tree].dbapi
2478 settings["EMERGE_FROM"] = pkg.type_name
2479 settings.backup_changes("EMERGE_FROM")
2481 ebuild_path = portdb.findname(self.pkg.cpv)
2482 self._ebuild_path = ebuild_path
2484 prefetcher = self.prefetcher
2485 if prefetcher is None:
2487 elif not prefetcher.isAlive():
2489 elif prefetcher.poll() is None:
2491 waiting_msg = "Fetching files " + \
2492 "in the background. " + \
2493 "To view fetch progress, run `tail -f " + \
2494 "/var/log/emerge-fetch.log` in another " + \
2496 msg_prefix = colorize("GOOD", " * ")
2497 from textwrap import wrap
2498 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2499 for line in wrap(waiting_msg, 65))
2500 if not self.background:
2501 writemsg(waiting_msg, noiselevel=-1)
2503 self._current_task = prefetcher
2504 prefetcher.addExitListener(self._prefetch_exit)
2507 self._prefetch_exit(prefetcher)
2509 def _prefetch_exit(self, prefetcher):
2513 settings = self.settings
2516 fetcher = EbuildFetchonly(
2517 fetch_all=opts.fetch_all_uri,
2518 pkg=pkg, pretend=opts.pretend,
2520 retval = fetcher.execute()
2521 self.returncode = retval
2525 fetcher = EbuildFetcher(config_pool=self.config_pool,
2526 fetchall=opts.fetch_all_uri,
2527 fetchonly=opts.fetchonly,
2528 background=self.background,
2529 pkg=pkg, scheduler=self.scheduler)
2531 self._start_task(fetcher, self._fetch_exit)
2533 def _fetch_exit(self, fetcher):
2537 fetch_failed = False
2539 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2541 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2543 if fetch_failed and fetcher.logfile is not None and \
2544 os.path.exists(fetcher.logfile):
2545 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2547 if not fetch_failed and fetcher.logfile is not None:
2548 # Fetch was successful, so remove the fetch log.
2550 os.unlink(fetcher.logfile)
2554 if fetch_failed or opts.fetchonly:
2558 logger = self.logger
2560 pkg_count = self.pkg_count
2561 scheduler = self.scheduler
2562 settings = self.settings
2563 features = settings.features
2564 ebuild_path = self._ebuild_path
2565 system_set = pkg.root_config.sets["system"]
2567 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2568 self._build_dir.lock()
2570 # Cleaning is triggered before the setup
2571 # phase, in portage.doebuild().
2572 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2573 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2574 short_msg = "emerge: (%s of %s) %s Clean" % \
2575 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2576 logger.log(msg, short_msg=short_msg)
2578 #buildsyspkg: Check if we need to _force_ binary package creation
2579 self._issyspkg = "buildsyspkg" in features and \
2580 system_set.findAtomForPackage(pkg) and \
2583 if opts.buildpkg or self._issyspkg:
2585 self._buildpkg = True
2587 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2588 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2589 short_msg = "emerge: (%s of %s) %s Compile" % \
2590 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2591 logger.log(msg, short_msg=short_msg)
2594 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2595 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2596 short_msg = "emerge: (%s of %s) %s Compile" % \
2597 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2598 logger.log(msg, short_msg=short_msg)
2600 build = EbuildExecuter(background=self.background, pkg=pkg,
2601 scheduler=scheduler, settings=settings)
2602 self._start_task(build, self._build_exit)
2604 def _unlock_builddir(self):
2605 portage.elog.elog_process(self.pkg.cpv, self.settings)
2606 self._build_dir.unlock()
2608 def _build_exit(self, build):
2609 if self._default_exit(build) != os.EX_OK:
2610 self._unlock_builddir()
2615 buildpkg = self._buildpkg
2618 self._final_exit(build)
2623 msg = ">>> This is a system package, " + \
2624 "let's pack a rescue tarball.\n"
2626 log_path = self.settings.get("PORTAGE_LOG_FILE")
2627 if log_path is not None:
2628 log_file = open(log_path, 'a')
2634 if not self.background:
2635 portage.writemsg_stdout(msg, noiselevel=-1)
2637 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2638 scheduler=self.scheduler, settings=self.settings)
2640 self._start_task(packager, self._buildpkg_exit)
2642 def _buildpkg_exit(self, packager):
2644 Released build dir lock when there is a failure or
2645 when in buildpkgonly mode. Otherwise, the lock will
2646 be released when merge() is called.
2649 if self._default_exit(packager) == os.EX_OK and \
2650 self.opts.buildpkgonly:
2651 # Need to call "clean" phase for buildpkgonly mode
2652 portage.elog.elog_process(self.pkg.cpv, self.settings)
2654 clean_phase = EbuildPhase(background=self.background,
2655 pkg=self.pkg, phase=phase,
2656 scheduler=self.scheduler, settings=self.settings,
2658 self._start_task(clean_phase, self._clean_exit)
2661 if self._final_exit(packager) != os.EX_OK or \
2662 self.opts.buildpkgonly:
2663 self._unlock_builddir()
2666 def _clean_exit(self, clean_phase):
2667 if self._final_exit(clean_phase) != os.EX_OK or \
2668 self.opts.buildpkgonly:
2669 self._unlock_builddir()
2674 Install the package and then clean up and release locks.
2675 Only call this after the build has completed successfully
2676 and neither fetchonly nor buildpkgonly mode are enabled.
2679 find_blockers = self.find_blockers
2680 ldpath_mtimes = self.ldpath_mtimes
2681 logger = self.logger
2683 pkg_count = self.pkg_count
2684 settings = self.settings
2685 world_atom = self.world_atom
2686 ebuild_path = self._ebuild_path
2689 merge = EbuildMerge(find_blockers=self.find_blockers,
2690 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2691 pkg_count=pkg_count, pkg_path=ebuild_path,
2692 scheduler=self.scheduler,
2693 settings=settings, tree=tree, world_atom=world_atom)
2695 msg = " === (%s of %s) Merging (%s::%s)" % \
2696 (pkg_count.curval, pkg_count.maxval,
2697 pkg.cpv, ebuild_path)
2698 short_msg = "emerge: (%s of %s) %s Merge" % \
2699 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2700 logger.log(msg, short_msg=short_msg)
2703 rval = merge.execute()
2705 self._unlock_builddir()
2709 class EbuildExecuter(CompositeTask):
2711 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2713 _phases = ("prepare", "configure", "compile", "test", "install")
2715 _live_eclasses = frozenset([
2725 self._tree = "porttree"
2728 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2729 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2730 self._start_task(clean_phase, self._clean_phase_exit)
2732 def _clean_phase_exit(self, clean_phase):
2734 if self._default_exit(clean_phase) != os.EX_OK:
2739 scheduler = self.scheduler
2740 settings = self.settings
2743 # This initializes PORTAGE_LOG_FILE.
2744 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2746 setup_phase = EbuildPhase(background=self.background,
2747 pkg=pkg, phase="setup", scheduler=scheduler,
2748 settings=settings, tree=self._tree)
2750 setup_phase.addExitListener(self._setup_exit)
2751 self._current_task = setup_phase
2752 self.scheduler.scheduleSetup(setup_phase)
2754 def _setup_exit(self, setup_phase):
2756 if self._default_exit(setup_phase) != os.EX_OK:
2760 unpack_phase = EbuildPhase(background=self.background,
2761 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2762 settings=self.settings, tree=self._tree)
2764 if self._live_eclasses.intersection(self.pkg.inherited):
2765 # Serialize $DISTDIR access for live ebuilds since
2766 # otherwise they can interfere with eachother.
2768 unpack_phase.addExitListener(self._unpack_exit)
2769 self._current_task = unpack_phase
2770 self.scheduler.scheduleUnpack(unpack_phase)
2773 self._start_task(unpack_phase, self._unpack_exit)
2775 def _unpack_exit(self, unpack_phase):
2777 if self._default_exit(unpack_phase) != os.EX_OK:
2781 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2784 phases = self._phases
2785 eapi = pkg.metadata["EAPI"]
2786 if eapi in ("0", "1", "2_pre1"):
2787 # skip src_prepare and src_configure
2789 elif eapi in ("2_pre2",):
2793 for phase in phases:
2794 ebuild_phases.add(EbuildPhase(background=self.background,
2795 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2796 settings=self.settings, tree=self._tree))
2798 self._start_task(ebuild_phases, self._default_final_exit)
2800 class EbuildMetadataPhase(SubProcess):
2803 Asynchronous interface for the ebuild "depend" phase which is
2804 used to extract metadata from the ebuild.
2807 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2808 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2811 _file_names = ("ebuild",)
2812 _files_dict = slot_dict_class(_file_names, prefix="")
2813 _bufsize = SpawnProcess._bufsize
2817 settings = self.settings
2819 ebuild_path = self.ebuild_path
2820 debug = settings.get("PORTAGE_DEBUG") == "1"
2824 if self.fd_pipes is not None:
2825 fd_pipes = self.fd_pipes.copy()
2829 fd_pipes.setdefault(0, sys.stdin.fileno())
2830 fd_pipes.setdefault(1, sys.stdout.fileno())
2831 fd_pipes.setdefault(2, sys.stderr.fileno())
2833 # flush any pending output
2834 for fd in fd_pipes.itervalues():
2835 if fd == sys.stdout.fileno():
2837 if fd == sys.stderr.fileno():
2840 fd_pipes_orig = fd_pipes.copy()
2841 self._files = self._files_dict()
2844 master_fd, slave_fd = os.pipe()
2845 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2846 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2848 fd_pipes[self._metadata_fd] = slave_fd
2850 retval = portage.doebuild(ebuild_path, "depend",
2851 settings["ROOT"], settings, debug,
2852 mydbapi=self.portdb, tree="porttree",
2853 fd_pipes=fd_pipes, returnpid=True)
2857 if isinstance(retval, int):
2858 # doebuild failed before spawning
2860 self.returncode = retval
2864 self.pid = retval[0]
2865 portage.process.spawned_pids.remove(self.pid)
2867 self._raw_metadata = []
2868 files.ebuild = os.fdopen(master_fd, 'r')
2869 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2870 PollConstants.POLLIN, self._output_handler)
2871 self._registered = True
2873 def _output_handler(self, fd, event):
2875 self._raw_metadata.append(files.ebuild.read())
2876 if not self._raw_metadata[-1]:
2880 if self.returncode == os.EX_OK:
2881 metadata = izip(portage.auxdbkeys,
2882 "".join(self._raw_metadata).splitlines())
2883 self.metadata_callback(self.cpv, self.ebuild_path,
2884 self.repo_path, metadata, self.ebuild_mtime)
2886 return self._registered
2888 class EbuildProcess(SpawnProcess):
2890 __slots__ = ("phase", "pkg", "settings", "tree")
2893 # Don't open the log file during the clean phase since the
2894 # open file can result in an nfs lock on $T/build.log which
2895 # prevents the clean phase from removing $T.
2896 if self.phase not in ("clean", "cleanrm"):
2897 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2898 SpawnProcess._start(self)
2900 def _pipe(self, fd_pipes):
2901 stdout_pipe = fd_pipes.get(1)
2902 got_pty, master_fd, slave_fd = \
2903 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2904 return (master_fd, slave_fd)
2906 def _spawn(self, args, **kwargs):
2908 root_config = self.pkg.root_config
2910 mydbapi = root_config.trees[tree].dbapi
2911 settings = self.settings
2912 ebuild_path = settings["EBUILD"]
2913 debug = settings.get("PORTAGE_DEBUG") == "1"
2915 rval = portage.doebuild(ebuild_path, self.phase,
2916 root_config.root, settings, debug,
2917 mydbapi=mydbapi, tree=tree, **kwargs)
2921 def _set_returncode(self, wait_retval):
2922 SpawnProcess._set_returncode(self, wait_retval)
2924 if self.phase not in ("clean", "cleanrm"):
2925 self.returncode = portage._doebuild_exit_status_check_and_log(
2926 self.settings, self.phase, self.returncode)
2928 portage._post_phase_userpriv_perms(self.settings)
2930 class EbuildPhase(CompositeTask):
2932 __slots__ = ("background", "pkg", "phase",
2933 "scheduler", "settings", "tree")
2935 _post_phase_cmds = portage._post_phase_cmds
2939 ebuild_process = EbuildProcess(background=self.background,
2940 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
2941 settings=self.settings, tree=self.tree)
2943 self._start_task(ebuild_process, self._ebuild_exit)
2945 def _ebuild_exit(self, ebuild_process):
2947 if self.phase == "install":
2949 log_path = self.settings.get("PORTAGE_LOG_FILE")
2951 if self.background and log_path is not None:
2952 log_file = open(log_path, 'a')
2955 portage._check_build_log(self.settings, out=out)
2957 if log_file is not None:
2960 if self._default_exit(ebuild_process) != os.EX_OK:
2964 settings = self.settings
2966 if self.phase == "install":
2967 portage._post_src_install_uid_fix(settings)
2969 post_phase_cmds = self._post_phase_cmds.get(self.phase)
2970 if post_phase_cmds is not None:
2971 post_phase = MiscFunctionsProcess(background=self.background,
2972 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
2973 scheduler=self.scheduler, settings=settings)
2974 self._start_task(post_phase, self._post_phase_exit)
2977 self.returncode = ebuild_process.returncode
2978 self._current_task = None
2981 def _post_phase_exit(self, post_phase):
2982 if self._final_exit(post_phase) != os.EX_OK:
2983 writemsg("!!! post %s failed; exiting.\n" % self.phase,
2985 self._current_task = None
2989 class EbuildBinpkg(EbuildProcess):
2991 This assumes that src_install() has successfully completed.
2993 __slots__ = ("_binpkg_tmpfile",)
2996 self.phase = "package"
2997 self.tree = "porttree"
2999 root_config = pkg.root_config
3000 portdb = root_config.trees["porttree"].dbapi
3001 bintree = root_config.trees["bintree"]
3002 ebuild_path = portdb.findname(self.pkg.cpv)
3003 settings = self.settings
3004 debug = settings.get("PORTAGE_DEBUG") == "1"
3006 bintree.prevent_collision(pkg.cpv)
3007 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3008 pkg.cpv + ".tbz2." + str(os.getpid()))
3009 self._binpkg_tmpfile = binpkg_tmpfile
3010 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3011 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3014 EbuildProcess._start(self)
3016 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3018 def _set_returncode(self, wait_retval):
3019 EbuildProcess._set_returncode(self, wait_retval)
3022 bintree = pkg.root_config.trees["bintree"]
3023 binpkg_tmpfile = self._binpkg_tmpfile
3024 if self.returncode == os.EX_OK:
3025 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3027 class EbuildMerge(SlotObject):
3029 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3030 "pkg", "pkg_count", "pkg_path", "pretend",
3031 "scheduler", "settings", "tree", "world_atom")
3034 root_config = self.pkg.root_config
3035 settings = self.settings
3036 retval = portage.merge(settings["CATEGORY"],
3037 settings["PF"], settings["D"],
3038 os.path.join(settings["PORTAGE_BUILDDIR"],
3039 "build-info"), root_config.root, settings,
3040 myebuild=settings["EBUILD"],
3041 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3042 vartree=root_config.trees["vartree"],
3043 prev_mtimes=self.ldpath_mtimes,
3044 scheduler=self.scheduler,
3045 blockers=self.find_blockers)
3047 if retval == os.EX_OK:
3048 self.world_atom(self.pkg)
3053 def _log_success(self):
3055 pkg_count = self.pkg_count
3056 pkg_path = self.pkg_path
3057 logger = self.logger
3058 if "noclean" not in self.settings.features:
3059 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3060 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3061 logger.log((" === (%s of %s) " + \
3062 "Post-Build Cleaning (%s::%s)") % \
3063 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3064 short_msg=short_msg)
3065 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3066 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3068 class PackageUninstall(AsynchronousTask):
3070 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3074 unmerge(self.pkg.root_config, self.opts, "unmerge",
3075 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3076 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3077 writemsg_level=self._writemsg_level)
3078 except UninstallFailure, e:
3079 self.returncode = e.status
3081 self.returncode = os.EX_OK
3084 def _writemsg_level(self, msg, level=0, noiselevel=0):
3086 log_path = self.settings.get("PORTAGE_LOG_FILE")
3087 background = self.background
3089 if log_path is None:
3090 if not (background and level < logging.WARNING):
3091 portage.util.writemsg_level(msg,
3092 level=level, noiselevel=noiselevel)
3095 portage.util.writemsg_level(msg,
3096 level=level, noiselevel=noiselevel)
3098 f = open(log_path, 'a')
3104 class Binpkg(CompositeTask):
3106 __slots__ = ("find_blockers",
3107 "ldpath_mtimes", "logger", "opts",
3108 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3109 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3110 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3112 def _writemsg_level(self, msg, level=0, noiselevel=0):
3114 if not self.background:
3115 portage.util.writemsg_level(msg,
3116 level=level, noiselevel=noiselevel)
3118 log_path = self.settings.get("PORTAGE_LOG_FILE")
3119 if log_path is not None:
3120 f = open(log_path, 'a')
3129 settings = self.settings
3130 settings.setcpv(pkg)
3131 self._tree = "bintree"
3132 self._bintree = self.pkg.root_config.trees[self._tree]
3133 self._verify = "strict" in self.settings.features and \
3134 not self.opts.pretend
3136 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3137 "portage", pkg.category, pkg.pf)
3138 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3139 pkg=pkg, settings=settings)
3140 self._image_dir = os.path.join(dir_path, "image")
3141 self._infloc = os.path.join(dir_path, "build-info")
3142 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3144 # The prefetcher has already completed or it
3145 # could be running now. If it's running now,
3146 # wait for it to complete since it holds
3147 # a lock on the file being fetched. The
3148 # portage.locks functions are only designed
3149 # to work between separate processes. Since
3150 # the lock is held by the current process,
3151 # use the scheduler and fetcher methods to
3152 # synchronize with the fetcher.
3153 prefetcher = self.prefetcher
3154 if prefetcher is None:
3156 elif not prefetcher.isAlive():
3158 elif prefetcher.poll() is None:
3160 waiting_msg = ("Fetching '%s' " + \
3161 "in the background. " + \
3162 "To view fetch progress, run `tail -f " + \
3163 "/var/log/emerge-fetch.log` in another " + \
3164 "terminal.") % prefetcher.pkg_path
3165 msg_prefix = colorize("GOOD", " * ")
3166 from textwrap import wrap
3167 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3168 for line in wrap(waiting_msg, 65))
3169 if not self.background:
3170 writemsg(waiting_msg, noiselevel=-1)
3172 self._current_task = prefetcher
3173 prefetcher.addExitListener(self._prefetch_exit)
3176 self._prefetch_exit(prefetcher)
3178 def _prefetch_exit(self, prefetcher):
3181 pkg_count = self.pkg_count
3182 fetcher = BinpkgFetcher(background=self.background,
3183 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3184 scheduler=self.scheduler)
3185 pkg_path = fetcher.pkg_path
3186 self._pkg_path = pkg_path
3188 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3190 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3191 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3192 short_msg = "emerge: (%s of %s) %s Fetch" % \
3193 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3194 self.logger.log(msg, short_msg=short_msg)
3197 fetcher.addExitListener(self._fetcher_exit)
3198 self._current_task = fetcher
3199 self.scheduler.fetch.schedule(fetcher)
3201 self._start_task(fetcher, self._fetcher_exit)
3204 self._fetcher_exit(fetcher)
3206 def _fetcher_exit(self, fetcher):
3208 # The fetcher only has a returncode when
3209 # --getbinpkg is enabled.
3210 if fetcher.returncode is not None:
3211 self._fetched_pkg = True
3212 if self.opts.fetchonly:
3213 self._final_exit(fetcher)
3216 elif self._default_exit(fetcher) != os.EX_OK:
3222 verifier = BinpkgVerifier(background=self.background,
3223 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3226 verifier.addExitListener(self._verifier_exit)
3227 self._current_task = verifier
3228 self.scheduler.fetch.schedule(verifier)
3230 self._start_task(verifier, self._verifier_exit)
3233 self._verifier_exit(verifier)
3235 def _verifier_exit(self, verifier):
3236 if verifier is not None and \
3237 self._default_exit(verifier) != os.EX_OK:
3241 logger = self.logger
3243 pkg_count = self.pkg_count
3244 pkg_path = self._pkg_path
3246 if self._fetched_pkg:
3247 self._bintree.inject(pkg.cpv, filename=pkg_path)
3249 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3250 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3251 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3252 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3253 logger.log(msg, short_msg=short_msg)
3255 self._build_dir.lock()
3258 settings = self.settings
3259 settings.setcpv(pkg)
3260 settings["EBUILD"] = self._ebuild_path
3261 ebuild_phase = EbuildPhase(background=self.background,
3262 pkg=pkg, phase=phase, scheduler=self.scheduler,
3263 settings=settings, tree=self._tree)
3265 self._start_task(ebuild_phase, self._clean_exit)
3267 def _clean_exit(self, clean_phase):
3268 if self._default_exit(clean_phase) != os.EX_OK:
3269 self._unlock_builddir()
3273 dir_path = self._build_dir.dir_path
3276 shutil.rmtree(dir_path)
3277 except (IOError, OSError), e:
3278 if e.errno != errno.ENOENT:
3282 infloc = self._infloc
3284 pkg_path = self._pkg_path
3287 for mydir in (dir_path, self._image_dir, infloc):
3288 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3289 gid=portage.data.portage_gid, mode=dir_mode)
3291 # This initializes PORTAGE_LOG_FILE.
3292 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3293 self._writemsg_level(">>> Extracting info\n")
3295 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3296 check_missing_metadata = ("CATEGORY", "PF")
3297 missing_metadata = set()
3298 for k in check_missing_metadata:
3299 v = pkg_xpak.getfile(k)
3301 missing_metadata.add(k)
3303 pkg_xpak.unpackinfo(infloc)
3304 for k in missing_metadata:
3312 f = open(os.path.join(infloc, k), 'wb')
3318 # Store the md5sum in the vdb.
3319 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3321 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3325 # This gives bashrc users an opportunity to do various things
3326 # such as remove binary packages after they're installed.
3327 settings = self.settings
3328 settings.setcpv(self.pkg)
3329 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3330 settings.backup_changes("PORTAGE_BINPKG_FILE")
3333 setup_phase = EbuildPhase(background=self.background,
3334 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3335 settings=settings, tree=self._tree)
3337 setup_phase.addExitListener(self._setup_exit)
3338 self._current_task = setup_phase
3339 self.scheduler.scheduleSetup(setup_phase)
3341 def _setup_exit(self, setup_phase):
3342 if self._default_exit(setup_phase) != os.EX_OK:
3343 self._unlock_builddir()
3347 extractor = BinpkgExtractorAsync(background=self.background,
3348 image_dir=self._image_dir,
3349 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3350 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3351 self._start_task(extractor, self._extractor_exit)
3353 def _extractor_exit(self, extractor):
3354 if self._final_exit(extractor) != os.EX_OK:
3355 self._unlock_builddir()
3356 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3360 def _unlock_builddir(self):
3361 portage.elog.elog_process(self.pkg.cpv, self.settings)
3362 self._build_dir.unlock()
3366 # This gives bashrc users an opportunity to do various things
3367 # such as remove binary packages after they're installed.
3368 settings = self.settings
3369 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3370 settings.backup_changes("PORTAGE_BINPKG_FILE")
3372 merge = EbuildMerge(find_blockers=self.find_blockers,
3373 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3374 pkg=self.pkg, pkg_count=self.pkg_count,
3375 pkg_path=self._pkg_path, scheduler=self.scheduler,
3376 settings=settings, tree=self._tree, world_atom=self.world_atom)
3379 retval = merge.execute()
3381 settings.pop("PORTAGE_BINPKG_FILE", None)
3382 self._unlock_builddir()
3385 class BinpkgFetcher(SpawnProcess):
3388 "locked", "pkg_path", "_lock_obj")
3390 def __init__(self, **kwargs):
3391 SpawnProcess.__init__(self, **kwargs)
3393 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3401 bintree = pkg.root_config.trees["bintree"]
3402 settings = bintree.settings
3403 use_locks = "distlocks" in settings.features
3404 pkg_path = self.pkg_path
3405 resume = os.path.exists(pkg_path)
3407 # urljoin doesn't work correctly with
3408 # unrecognized protocols like sftp
3409 if bintree._remote_has_index:
3410 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3412 rel_uri = pkg.cpv + ".tbz2"
3413 uri = bintree._remote_base_uri.rstrip("/") + \
3414 "/" + rel_uri.lstrip("/")
3416 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3417 "/" + pkg.pf + ".tbz2"
3419 protocol = urlparse.urlparse(uri)[0]
3420 fcmd_prefix = "FETCHCOMMAND"
3422 fcmd_prefix = "RESUMECOMMAND"
3423 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3425 fcmd = settings.get(fcmd_prefix)
3428 "DISTDIR" : os.path.dirname(pkg_path),
3430 "FILE" : os.path.basename(pkg_path)
3433 fetch_env = dict(settings.iteritems())
3434 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3435 for x in shlex.split(fcmd)]
3437 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3441 if self.fd_pipes is None:
3443 fd_pipes = self.fd_pipes
3445 # Redirect all output to stdout since some fetchers like
3446 # wget pollute stderr (if portage detects a problem then it
3447 # can send it's own message to stderr).
3448 fd_pipes.setdefault(0, sys.stdin.fileno())
3449 fd_pipes.setdefault(1, sys.stdout.fileno())
3450 fd_pipes.setdefault(2, sys.stdout.fileno())
3452 self.args = fetch_args
3453 self.env = fetch_env
3454 SpawnProcess._start(self)
3456 def _set_returncode(self, wait_retval):
3457 SpawnProcess._set_returncode(self, wait_retval)
3463 This raises an AlreadyLocked exception if lock() is called
3464 while a lock is already held. In order to avoid this, call
3465 unlock() or check whether the "locked" attribute is True
3466 or False before calling lock().
3468 if self._lock_obj is not None:
3469 raise self.AlreadyLocked((self._lock_obj,))
3471 self._lock_obj = portage.locks.lockfile(
3472 self.pkg_path, wantnewlockfile=1)
3475 class AlreadyLocked(portage.exception.PortageException):
3479 if self._lock_obj is None:
3481 portage.locks.unlockfile(self._lock_obj)
3482 self._lock_obj = None
3485 class BinpkgVerifier(AsynchronousTask):
3486 __slots__ = ("logfile", "pkg",)
3490 Note: Unlike a normal AsynchronousTask.start() method,
3491 this one does all work is synchronously. The returncode
3492 attribute will be set before it returns.
3496 root_config = pkg.root_config
3497 bintree = root_config.trees["bintree"]
3499 stdout_orig = sys.stdout
3500 stderr_orig = sys.stderr
3502 if self.background and self.logfile is not None:
3503 log_file = open(self.logfile, 'a')
3505 if log_file is not None:
3506 sys.stdout = log_file
3507 sys.stderr = log_file
3509 bintree.digestCheck(pkg)
3510 except portage.exception.FileNotFound:
3511 writemsg("!!! Fetching Binary failed " + \
3512 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3514 except portage.exception.DigestException, e:
3515 writemsg("\n!!! Digest verification failed:\n",
3517 writemsg("!!! %s\n" % e.value[0],
3519 writemsg("!!! Reason: %s\n" % e.value[1],
3521 writemsg("!!! Got: %s\n" % e.value[2],
3523 writemsg("!!! Expected: %s\n" % e.value[3],
3527 sys.stdout = stdout_orig
3528 sys.stderr = stderr_orig
3529 if log_file is not None:
3532 self.returncode = rval
3535 class BinpkgExtractorAsync(SpawnProcess):
3537 __slots__ = ("image_dir", "pkg", "pkg_path")
3539 _shell_binary = portage.const.BASH_BINARY
3542 self.args = [self._shell_binary, "-c",
3543 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3544 (portage._shell_quote(self.pkg_path),
3545 portage._shell_quote(self.image_dir))]
3547 self.env = self.pkg.root_config.settings.environ()
3548 SpawnProcess._start(self)
3550 class MergeListItem(CompositeTask):
3553 TODO: For parallel scheduling, everything here needs asynchronous
3554 execution support (start, poll, and wait methods).
3557 __slots__ = ("args_set",
3558 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3559 "find_blockers", "logger", "mtimedb", "pkg",
3560 "pkg_count", "pkg_to_replace", "prefetcher",
3561 "settings", "statusMessage", "world_atom") + \
3567 build_opts = self.build_opts
3570 # uninstall, executed by self.merge()
3571 self.returncode = os.EX_OK
3575 args_set = self.args_set
3576 find_blockers = self.find_blockers
3577 logger = self.logger
3578 mtimedb = self.mtimedb
3579 pkg_count = self.pkg_count
3580 scheduler = self.scheduler
3581 settings = self.settings
3582 world_atom = self.world_atom
3583 ldpath_mtimes = mtimedb["ldpath"]
3585 action_desc = "Emerging"
3587 if pkg.type_name == "binary":
3588 action_desc += " binary"
3590 if build_opts.fetchonly:
3591 action_desc = "Fetching"
3593 msg = "%s (%s of %s) %s" % \
3595 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3596 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3597 colorize("GOOD", pkg.cpv))
3600 msg += " %s %s" % (preposition, pkg.root)
3602 if not build_opts.pretend:
3603 self.statusMessage(msg)
3604 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3605 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3607 if pkg.type_name == "ebuild":
3609 build = EbuildBuild(args_set=args_set,
3610 background=self.background,
3611 config_pool=self.config_pool,
3612 find_blockers=find_blockers,
3613 ldpath_mtimes=ldpath_mtimes, logger=logger,
3614 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3615 prefetcher=self.prefetcher, scheduler=scheduler,
3616 settings=settings, world_atom=world_atom)
3618 self._install_task = build
3619 self._start_task(build, self._default_final_exit)
3622 elif pkg.type_name == "binary":
3624 binpkg = Binpkg(background=self.background,
3625 find_blockers=find_blockers,
3626 ldpath_mtimes=ldpath_mtimes, logger=logger,
3627 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3628 prefetcher=self.prefetcher, settings=settings,
3629 scheduler=scheduler, world_atom=world_atom)
3631 self._install_task = binpkg
3632 self._start_task(binpkg, self._default_final_exit)
3636 self._install_task.poll()
3637 return self.returncode
3640 self._install_task.wait()
3641 return self.returncode
3646 build_opts = self.build_opts
3647 find_blockers = self.find_blockers
3648 logger = self.logger
3649 mtimedb = self.mtimedb
3650 pkg_count = self.pkg_count
3651 prefetcher = self.prefetcher
3652 scheduler = self.scheduler
3653 settings = self.settings
3654 world_atom = self.world_atom
3655 ldpath_mtimes = mtimedb["ldpath"]
3658 if not (build_opts.buildpkgonly or \
3659 build_opts.fetchonly or build_opts.pretend):
3661 uninstall = PackageUninstall(background=self.background,
3662 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3663 pkg=pkg, scheduler=scheduler, settings=settings)
3666 retval = uninstall.wait()
3667 if retval != os.EX_OK:
3671 if build_opts.fetchonly or \
3672 build_opts.buildpkgonly:
3673 return self.returncode
3675 retval = self._install_task.install()
3678 class PackageMerge(AsynchronousTask):
3680 TODO: Implement asynchronous merge so that the scheduler can
3681 run while a merge is executing.
3684 __slots__ = ("merge",)
3688 pkg = self.merge.pkg
3689 pkg_count = self.merge.pkg_count
3692 action_desc = "Uninstalling"
3693 preposition = "from"
3695 action_desc = "Installing"
3698 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3701 msg += " %s %s" % (preposition, pkg.root)
3703 if not self.merge.build_opts.fetchonly and \
3704 not self.merge.build_opts.pretend and \
3705 not self.merge.build_opts.buildpkgonly:
3706 self.merge.statusMessage(msg)
3708 self.returncode = self.merge.merge()
3711 class DependencyArg(object):
3712 def __init__(self, arg=None, root_config=None):
3714 self.root_config = root_config
3719 class AtomArg(DependencyArg):
3720 def __init__(self, atom=None, **kwargs):
3721 DependencyArg.__init__(self, **kwargs)
3723 if not isinstance(self.atom, portage.dep.Atom):
3724 self.atom = portage.dep.Atom(self.atom)
3725 self.set = (self.atom, )
3727 class PackageArg(DependencyArg):
3728 def __init__(self, package=None, **kwargs):
3729 DependencyArg.__init__(self, **kwargs)
3730 self.package = package
3731 self.atom = portage.dep.Atom("=" + package.cpv)
3732 self.set = (self.atom, )
3734 class SetArg(DependencyArg):
3735 def __init__(self, set=None, **kwargs):
3736 DependencyArg.__init__(self, **kwargs)
3738 self.name = self.arg[len(SETPREFIX):]
3740 class Dependency(SlotObject):
3741 __slots__ = ("atom", "blocker", "depth",
3742 "parent", "onlydeps", "priority", "root")
3743 def __init__(self, **kwargs):
3744 SlotObject.__init__(self, **kwargs)
3745 if self.priority is None:
3746 self.priority = DepPriority()
3747 if self.depth is None:
3750 class BlockerCache(DictMixin):
3751 """This caches blockers of installed packages so that dep_check does not
3752 have to be done for every single installed package on every invocation of
3753 emerge. The cache is invalidated whenever it is detected that something
3754 has changed that might alter the results of dep_check() calls:
3755 1) the set of installed packages (including COUNTER) has changed
3756 2) the old-style virtuals have changed
3759 # Number of uncached packages to trigger cache update, since
3760 # it's wasteful to update it for every vdb change.
3761 _cache_threshold = 5
3763 class BlockerData(object):
3765 __slots__ = ("__weakref__", "atoms", "counter")
3767 def __init__(self, counter, atoms):
3768 self.counter = counter
3771 def __init__(self, myroot, vardb):
3773 self._virtuals = vardb.settings.getvirtuals()
3774 self._cache_filename = os.path.join(myroot,
3775 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3776 self._cache_version = "1"
3777 self._cache_data = None
3778 self._modified = set()
3783 f = open(self._cache_filename)
3784 mypickle = pickle.Unpickler(f)
3785 mypickle.find_global = None
3786 self._cache_data = mypickle.load()
3789 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3790 if isinstance(e, pickle.UnpicklingError):
3791 writemsg("!!! Error loading '%s': %s\n" % \
3792 (self._cache_filename, str(e)), noiselevel=-1)
3795 cache_valid = self._cache_data and \
3796 isinstance(self._cache_data, dict) and \
3797 self._cache_data.get("version") == self._cache_version and \
3798 isinstance(self._cache_data.get("blockers"), dict)
3800 # Validate all the atoms and counters so that
3801 # corruption is detected as soon as possible.
3802 invalid_items = set()
3803 for k, v in self._cache_data["blockers"].iteritems():
3804 if not isinstance(k, basestring):
3805 invalid_items.add(k)
3808 if portage.catpkgsplit(k) is None:
3809 invalid_items.add(k)
3811 except portage.exception.InvalidData:
3812 invalid_items.add(k)
3814 if not isinstance(v, tuple) or \
3816 invalid_items.add(k)
3819 if not isinstance(counter, (int, long)):
3820 invalid_items.add(k)
3822 if not isinstance(atoms, (list, tuple)):
3823 invalid_items.add(k)
3825 invalid_atom = False
3827 if not isinstance(atom, basestring):
3830 if atom[:1] != "!" or \
3831 not portage.isvalidatom(
3832 atom, allow_blockers=True):
3836 invalid_items.add(k)
3839 for k in invalid_items:
3840 del self._cache_data["blockers"][k]
3841 if not self._cache_data["blockers"]:
3845 self._cache_data = {"version":self._cache_version}
3846 self._cache_data["blockers"] = {}
3847 self._cache_data["virtuals"] = self._virtuals
3848 self._modified.clear()
3851 """If the current user has permission and the internal blocker cache
3852 been updated, save it to disk and mark it unmodified. This is called
3853 by emerge after it has proccessed blockers for all installed packages.
3854 Currently, the cache is only written if the user has superuser
3855 privileges (since that's required to obtain a lock), but all users
3856 have read access and benefit from faster blocker lookups (as long as
3857 the entire cache is still valid). The cache is stored as a pickled
3858 dict object with the following format:
3862 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
3863 "virtuals" : vardb.settings.getvirtuals()
3866 if len(self._modified) >= self._cache_threshold and \
3869 f = portage.util.atomic_ofstream(self._cache_filename)
3870 pickle.dump(self._cache_data, f, -1)
3872 portage.util.apply_secpass_permissions(
3873 self._cache_filename, gid=portage.portage_gid, mode=0644)
3874 except (IOError, OSError), e:
3876 self._modified.clear()
3878 def __setitem__(self, cpv, blocker_data):
3880 Update the cache and mark it as modified for a future call to
3883 @param cpv: Package for which to cache blockers.
3885 @param blocker_data: An object with counter and atoms attributes.
3886 @type blocker_data: BlockerData
3888 self._cache_data["blockers"][cpv] = \
3889 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
3890 self._modified.add(cpv)
3893 return iter(self._cache_data["blockers"])
3895 def __delitem__(self, cpv):
3896 del self._cache_data["blockers"][cpv]
3898 def __getitem__(self, cpv):
3901 @returns: An object with counter and atoms attributes.
3903 return self.BlockerData(*self._cache_data["blockers"][cpv])
3906 """This needs to be implemented so that self.__repr__() doesn't raise
3907 an AttributeError."""
3910 class BlockerDB(object):
3912 def __init__(self, root_config):
3913 self._root_config = root_config
3914 self._vartree = root_config.trees["vartree"]
3915 self._portdb = root_config.trees["porttree"].dbapi
3917 self._dep_check_trees = None
3918 self._fake_vartree = None
3920 def _get_fake_vartree(self, acquire_lock=0):
3921 fake_vartree = self._fake_vartree
3922 if fake_vartree is None:
3923 fake_vartree = FakeVartree(self._root_config,
3924 acquire_lock=acquire_lock)
3925 self._fake_vartree = fake_vartree
3926 self._dep_check_trees = { self._vartree.root : {
3927 "porttree" : fake_vartree,
3928 "vartree" : fake_vartree,
3931 fake_vartree.sync(acquire_lock=acquire_lock)
3934 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
3935 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
3936 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3937 settings = self._vartree.settings
3938 stale_cache = set(blocker_cache)
3939 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
3940 dep_check_trees = self._dep_check_trees
3941 vardb = fake_vartree.dbapi
3942 installed_pkgs = list(vardb)
3944 for inst_pkg in installed_pkgs:
3945 stale_cache.discard(inst_pkg.cpv)
3946 cached_blockers = blocker_cache.get(inst_pkg.cpv)
3947 if cached_blockers is not None and \
3948 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
3949 cached_blockers = None
3950 if cached_blockers is not None:
3951 blocker_atoms = cached_blockers.atoms
3953 # Use aux_get() to trigger FakeVartree global
3954 # updates on *DEPEND when appropriate.
3955 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
3957 portage.dep._dep_check_strict = False
3958 success, atoms = portage.dep_check(depstr,
3959 vardb, settings, myuse=inst_pkg.use.enabled,
3960 trees=dep_check_trees, myroot=inst_pkg.root)
3962 portage.dep._dep_check_strict = True
3964 pkg_location = os.path.join(inst_pkg.root,
3965 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
3966 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
3967 (pkg_location, atoms), noiselevel=-1)
3970 blocker_atoms = [atom for atom in atoms \
3971 if atom.startswith("!")]
3972 blocker_atoms.sort()
3973 counter = long(inst_pkg.metadata["COUNTER"])
3974 blocker_cache[inst_pkg.cpv] = \
3975 blocker_cache.BlockerData(counter, blocker_atoms)
3976 for cpv in stale_cache:
3977 del blocker_cache[cpv]
3978 blocker_cache.flush()
3980 blocker_parents = digraph()
3982 for pkg in installed_pkgs:
3983 for blocker_atom in blocker_cache[pkg.cpv].atoms:
3984 blocker_atom = blocker_atom.lstrip("!")
3985 blocker_atoms.append(blocker_atom)
3986 blocker_parents.add(blocker_atom, pkg)
3988 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3989 blocking_pkgs = set()
3990 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
3991 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
3993 # Check for blockers in the other direction.
3994 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
3996 portage.dep._dep_check_strict = False
3997 success, atoms = portage.dep_check(depstr,
3998 vardb, settings, myuse=new_pkg.use.enabled,
3999 trees=dep_check_trees, myroot=new_pkg.root)
4001 portage.dep._dep_check_strict = True
4003 # We should never get this far with invalid deps.
4004 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4007 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4010 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4011 for inst_pkg in installed_pkgs:
4013 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4014 except (portage.exception.InvalidDependString, StopIteration):
4016 blocking_pkgs.add(inst_pkg)
4018 return blocking_pkgs
4020 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4022 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4023 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4024 p_type, p_root, p_key, p_status = parent_node
4026 if p_status == "nomerge":
4027 category, pf = portage.catsplit(p_key)
4028 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4029 msg.append("Portage is unable to process the dependencies of the ")
4030 msg.append("'%s' package. " % p_key)
4031 msg.append("In order to correct this problem, the package ")
4032 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4033 msg.append("As a temporary workaround, the --nodeps option can ")
4034 msg.append("be used to ignore all dependencies. For reference, ")
4035 msg.append("the problematic dependencies can be found in the ")
4036 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4038 msg.append("This package can not be installed. ")
4039 msg.append("Please notify the '%s' package maintainer " % p_key)
4040 msg.append("about this problem.")
4042 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4043 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4045 class PackageVirtualDbapi(portage.dbapi):
4047 A dbapi-like interface class that represents the state of the installed
4048 package database as new packages are installed, replacing any packages
4049 that previously existed in the same slot. The main difference between
4050 this class and fakedbapi is that this one uses Package instances
4051 internally (passed in via cpv_inject() and cpv_remove() calls).
4053 def __init__(self, settings):
4054 portage.dbapi.__init__(self)
4055 self.settings = settings
4056 self._match_cache = {}
4062 Remove all packages.
4066 self._cp_map.clear()
4067 self._cpv_map.clear()
4070 obj = PackageVirtualDbapi(self.settings)
4071 obj._match_cache = self._match_cache.copy()
4072 obj._cp_map = self._cp_map.copy()
4073 for k, v in obj._cp_map.iteritems():
4074 obj._cp_map[k] = v[:]
4075 obj._cpv_map = self._cpv_map.copy()
4079 return self._cpv_map.itervalues()
4081 def __contains__(self, item):
4082 existing = self._cpv_map.get(item.cpv)
4083 if existing is not None and \
4088 def get(self, item, default=None):
4089 cpv = getattr(item, "cpv", None)
4093 type_name, root, cpv, operation = item
4095 existing = self._cpv_map.get(cpv)
4096 if existing is not None and \
4101 def match_pkgs(self, atom):
4102 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4104 def _clear_cache(self):
4105 if self._categories is not None:
4106 self._categories = None
4107 if self._match_cache:
4108 self._match_cache = {}
4110 def match(self, origdep, use_cache=1):
4111 result = self._match_cache.get(origdep)
4112 if result is not None:
4114 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4115 self._match_cache[origdep] = result
4118 def cpv_exists(self, cpv):
4119 return cpv in self._cpv_map
4121 def cp_list(self, mycp, use_cache=1):
4122 cachelist = self._match_cache.get(mycp)
4123 # cp_list() doesn't expand old-style virtuals
4124 if cachelist and cachelist[0].startswith(mycp):
4126 cpv_list = self._cp_map.get(mycp)
4127 if cpv_list is None:
4130 cpv_list = [pkg.cpv for pkg in cpv_list]
4131 self._cpv_sort_ascending(cpv_list)
4132 if not (not cpv_list and mycp.startswith("virtual/")):
4133 self._match_cache[mycp] = cpv_list
4137 return list(self._cp_map)
4140 return list(self._cpv_map)
4142 def cpv_inject(self, pkg):
4143 cp_list = self._cp_map.get(pkg.cp)
4146 self._cp_map[pkg.cp] = cp_list
4147 e_pkg = self._cpv_map.get(pkg.cpv)
4148 if e_pkg is not None:
4151 self.cpv_remove(e_pkg)
4152 for e_pkg in cp_list:
4153 if e_pkg.slot_atom == pkg.slot_atom:
4156 self.cpv_remove(e_pkg)
4159 self._cpv_map[pkg.cpv] = pkg
4162 def cpv_remove(self, pkg):
4163 old_pkg = self._cpv_map.get(pkg.cpv)
4166 self._cp_map[pkg.cp].remove(pkg)
4167 del self._cpv_map[pkg.cpv]
4170 def aux_get(self, cpv, wants):
4171 metadata = self._cpv_map[cpv].metadata
4172 return [metadata.get(x, "") for x in wants]
4174 def aux_update(self, cpv, values):
4175 self._cpv_map[cpv].metadata.update(values)
4178 class depgraph(object):
4180 pkg_tree_map = RootConfig.pkg_tree_map
4182 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4184 def __init__(self, settings, trees, myopts, myparams, spinner):
4185 self.settings = settings
4186 self.target_root = settings["ROOT"]
4187 self.myopts = myopts
4188 self.myparams = myparams
4190 if settings.get("PORTAGE_DEBUG", "") == "1":
4192 self.spinner = spinner
4193 self._running_root = trees["/"]["root_config"]
4194 self._opts_no_restart = Scheduler._opts_no_restart
4195 self.pkgsettings = {}
4196 # Maps slot atom to package for each Package added to the graph.
4197 self._slot_pkg_map = {}
4198 # Maps nodes to the reasons they were selected for reinstallation.
4199 self._reinstall_nodes = {}
4202 self._trees_orig = trees
4204 # Contains a filtered view of preferred packages that are selected
4205 # from available repositories.
4206 self._filtered_trees = {}
4207 # Contains installed packages and new packages that have been added
4209 self._graph_trees = {}
4210 # All Package instances
4211 self._pkg_cache = self._package_cache(self)
4212 for myroot in trees:
4213 self.trees[myroot] = {}
4214 # Create a RootConfig instance that references
4215 # the FakeVartree instead of the real one.
4216 self.roots[myroot] = RootConfig(
4217 trees[myroot]["vartree"].settings,
4219 trees[myroot]["root_config"].setconfig)
4220 for tree in ("porttree", "bintree"):
4221 self.trees[myroot][tree] = trees[myroot][tree]
4222 self.trees[myroot]["vartree"] = \
4223 FakeVartree(trees[myroot]["root_config"],
4224 pkg_cache=self._pkg_cache)
4225 self.pkgsettings[myroot] = portage.config(
4226 clone=self.trees[myroot]["vartree"].settings)
4227 self._slot_pkg_map[myroot] = {}
4228 vardb = self.trees[myroot]["vartree"].dbapi
4229 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4230 "--buildpkgonly" not in self.myopts
4231 # This fakedbapi instance will model the state that the vdb will
4232 # have after new packages have been installed.
4233 fakedb = PackageVirtualDbapi(vardb.settings)
4234 if preload_installed_pkgs:
4236 self.spinner.update()
4237 # This triggers metadata updates via FakeVartree.
4238 vardb.aux_get(pkg.cpv, [])
4239 fakedb.cpv_inject(pkg)
4241 # Now that the vardb state is cached in our FakeVartree,
4242 # we won't be needing the real vartree cache for awhile.
4243 # To make some room on the heap, clear the vardbapi
4245 trees[myroot]["vartree"].dbapi._clear_cache()
4248 self.mydbapi[myroot] = fakedb
4251 graph_tree.dbapi = fakedb
4252 self._graph_trees[myroot] = {}
4253 self._filtered_trees[myroot] = {}
4254 # Substitute the graph tree for the vartree in dep_check() since we
4255 # want atom selections to be consistent with package selections
4256 # have already been made.
4257 self._graph_trees[myroot]["porttree"] = graph_tree
4258 self._graph_trees[myroot]["vartree"] = graph_tree
4259 def filtered_tree():
4261 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4262 self._filtered_trees[myroot]["porttree"] = filtered_tree
4264 # Passing in graph_tree as the vartree here could lead to better
4265 # atom selections in some cases by causing atoms for packages that
4266 # have been added to the graph to be preferred over other choices.
4267 # However, it can trigger atom selections that result in
4268 # unresolvable direct circular dependencies. For example, this
4269 # happens with gwydion-dylan which depends on either itself or
4270 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4271 # gwydion-dylan-bin needs to be selected in order to avoid a
4272 # an unresolvable direct circular dependency.
4274 # To solve the problem described above, pass in "graph_db" so that
4275 # packages that have been added to the graph are distinguishable
4276 # from other available packages and installed packages. Also, pass
4277 # the parent package into self._select_atoms() calls so that
4278 # unresolvable direct circular dependencies can be detected and
4279 # avoided when possible.
4280 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4281 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4284 portdb = self.trees[myroot]["porttree"].dbapi
4285 bindb = self.trees[myroot]["bintree"].dbapi
4286 vardb = self.trees[myroot]["vartree"].dbapi
4287 # (db, pkg_type, built, installed, db_keys)
4288 if "--usepkgonly" not in self.myopts:
4289 db_keys = list(portdb._aux_cache_keys)
4290 dbs.append((portdb, "ebuild", False, False, db_keys))
4291 if "--usepkg" in self.myopts:
4292 db_keys = list(bindb._aux_cache_keys)
4293 dbs.append((bindb, "binary", True, False, db_keys))
4294 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4295 dbs.append((vardb, "installed", True, True, db_keys))
4296 self._filtered_trees[myroot]["dbs"] = dbs
4297 if "--usepkg" in self.myopts:
4298 self.trees[myroot]["bintree"].populate(
4299 "--getbinpkg" in self.myopts,
4300 "--getbinpkgonly" in self.myopts)
4303 self.digraph=portage.digraph()
4304 # contains all sets added to the graph
4306 # contains atoms given as arguments
4307 self._sets["args"] = InternalPackageSet()
4308 # contains all atoms from all sets added to the graph, including
4309 # atoms given as arguments
4310 self._set_atoms = InternalPackageSet()
4311 self._atom_arg_map = {}
4312 # contains all nodes pulled in by self._set_atoms
4313 self._set_nodes = set()
4314 # Contains only Blocker -> Uninstall edges
4315 self._blocker_uninstalls = digraph()
4316 # Contains only Package -> Blocker edges
4317 self._blocker_parents = digraph()
4318 # Contains only irrelevant Package -> Blocker edges
4319 self._irrelevant_blockers = digraph()
4320 # Contains only unsolvable Package -> Blocker edges
4321 self._unsolvable_blockers = digraph()
4322 self._slot_collision_info = {}
4323 # Slot collision nodes are not allowed to block other packages since
4324 # blocker validation is only able to account for one package per slot.
4325 self._slot_collision_nodes = set()
4326 self._parent_atoms = {}
4327 self._slot_conflict_parent_atoms = set()
4328 self._serialized_tasks_cache = None
4329 self._scheduler_graph = None
4330 self._displayed_list = None
4331 self._pprovided_args = []
4332 self._missing_args = []
4333 self._masked_installed = set()
4334 self._unsatisfied_deps_for_display = []
4335 self._unsatisfied_blockers_for_display = None
4336 self._circular_deps_for_display = None
4337 self._dep_stack = []
4338 self._unsatisfied_deps = []
4339 self._initially_unsatisfied_deps = []
4340 self._ignored_deps = []
4341 self._required_set_names = set(["system", "world"])
4342 self._select_atoms = self._select_atoms_highest_available
4343 self._select_package = self._select_pkg_highest_available
4344 self._highest_pkg_cache = {}
4346 def _show_slot_collision_notice(self):
4347 """Show an informational message advising the user to mask one of the
4348 the packages. In some cases it may be possible to resolve this
4349 automatically, but support for backtracking (removal nodes that have
4350 already been selected) will be required in order to handle all possible
4353 When a slot conflict occurs due to USE deps, there are a few
4354 different cases to consider:
4356 1) New USE are correctly set but --newuse wasn't requested so an
4357 installed package with incorrect USE happened to get pulled
4358 into graph before the new one.
4360 2) New USE are incorrectly set but an installed package has correct
4361 USE so it got pulled into the graph, and a new instance also got
4362 pulled in due to --newuse or an upgrade.
4364 3) Multiple USE deps exist that can't be satisfied simultaneously,
4365 and multiple package instances got pulled into the same slot to
4366 satisfy the conflicting deps.
4368 TODO: Distinguish the above cases and tailor messages to suit them.
4371 if not self._slot_collision_info:
4374 self._show_merge_list()
4377 msg.append("\n!!! Multiple package instances within a single " + \
4378 "package slot have been pulled\n")
4379 msg.append("!!! into the dependency graph, resulting" + \
4380 " in a slot conflict:\n\n")
4382 # Max number of parents shown, to avoid flooding the display.
4384 for (slot_atom, root), slot_nodes \
4385 in self._slot_collision_info.iteritems():
4386 msg.append(str(slot_atom))
4389 for node in slot_nodes:
4391 msg.append(str(node))
4392 parent_atoms = self._parent_atoms.get(node)
4395 # Prefer conflict atoms over others.
4396 for parent_atom in parent_atoms:
4397 if len(pruned_list) >= max_parents:
4399 if parent_atom in self._slot_conflict_parent_atoms:
4400 pruned_list.add(parent_atom)
4402 # If this package was pulled in by conflict atoms then
4403 # show those alone since those are the most interesting.
4405 # When generating the pruned list, prefer instances
4406 # of DependencyArg over instances of Package.
4407 for parent_atom in parent_atoms:
4408 if len(pruned_list) >= max_parents:
4410 parent, atom = parent_atom
4411 if isinstance(parent, DependencyArg):
4412 pruned_list.add(parent_atom)
4413 # Prefer Packages instances that themselves have been
4414 # pulled into collision slots.
4415 for parent_atom in parent_atoms:
4416 if len(pruned_list) >= max_parents:
4418 parent, atom = parent_atom
4419 if isinstance(parent, Package) and \
4420 (parent.slot_atom, parent.root) \
4421 in self._slot_collision_info:
4422 pruned_list.add(parent_atom)
4423 for parent_atom in parent_atoms:
4424 if len(pruned_list) >= max_parents:
4426 pruned_list.add(parent_atom)
4427 omitted_parents = len(parent_atoms) - len(pruned_list)
4428 parent_atoms = pruned_list
4429 msg.append(" pulled in by\n")
4430 for parent_atom in parent_atoms:
4431 parent, atom = parent_atom
4432 msg.append(2*indent)
4433 if isinstance(parent,
4434 (PackageArg, AtomArg)):
4435 # For PackageArg and AtomArg types, it's
4436 # redundant to display the atom attribute.
4437 msg.append(str(parent))
4439 # Display the specific atom from SetArg or
4441 msg.append("%s required by %s" % (atom, parent))
4444 msg.append(2*indent)
4445 msg.append("(and %d more)\n" % omitted_parents)
4447 msg.append(" (no parents)\n")
4450 sys.stderr.write("".join(msg))
4453 if "--quiet" in self.myopts:
4457 msg.append("It may be possible to solve this problem ")
4458 msg.append("by using package.mask to prevent one of ")
4459 msg.append("those packages from being selected. ")
4460 msg.append("However, it is also possible that conflicting ")
4461 msg.append("dependencies exist such that they are impossible to ")
4462 msg.append("satisfy simultaneously. If such a conflict exists in ")
4463 msg.append("the dependencies of two different packages, then those ")
4464 msg.append("packages can not be installed simultaneously.")
4466 from formatter import AbstractFormatter, DumbWriter
4467 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4469 f.add_flowing_data(x)
4473 msg.append("For more information, see MASKED PACKAGES ")
4474 msg.append("section in the emerge man page or refer ")
4475 msg.append("to the Gentoo Handbook.")
4477 f.add_flowing_data(x)
4481 def _process_slot_conflicts(self):
4483 Process slot conflict data to identify specific atoms which
4484 lead to conflict. These atoms only match a subset of the
4485 packages that have been pulled into a given slot.
4487 for (slot_atom, root), slot_nodes \
4488 in self._slot_collision_info.iteritems():
4490 all_parent_atoms = set()
4491 for pkg in slot_nodes:
4492 parent_atoms = self._parent_atoms.get(pkg)
4493 if not parent_atoms:
4495 all_parent_atoms.update(parent_atoms)
4497 for pkg in slot_nodes:
4498 parent_atoms = self._parent_atoms.get(pkg)
4499 if parent_atoms is None:
4500 parent_atoms = set()
4501 self._parent_atoms[pkg] = parent_atoms
4502 for parent_atom in all_parent_atoms:
4503 if parent_atom in parent_atoms:
4505 # Use package set for matching since it will match via
4506 # PROVIDE when necessary, while match_from_list does not.
4507 parent, atom = parent_atom
4508 atom_set = InternalPackageSet(
4509 initial_atoms=(atom,))
4510 if atom_set.findAtomForPackage(pkg):
4511 parent_atoms.add(parent_atom)
4513 self._slot_conflict_parent_atoms.add(parent_atom)
4515 def _reinstall_for_flags(self, forced_flags,
4516 orig_use, orig_iuse, cur_use, cur_iuse):
4517 """Return a set of flags that trigger reinstallation, or None if there
4518 are no such flags."""
4519 if "--newuse" in self.myopts:
4520 flags = set(orig_iuse.symmetric_difference(
4521 cur_iuse).difference(forced_flags))
4522 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4523 cur_iuse.intersection(cur_use)))
4526 elif "changed-use" == self.myopts.get("--reinstall"):
4527 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4528 cur_iuse.intersection(cur_use))
4533 def _create_graph(self, allow_unsatisfied=False):
4534 dep_stack = self._dep_stack
4536 self.spinner.update()
4537 dep = dep_stack.pop()
4538 if isinstance(dep, Package):
4539 if not self._add_pkg_deps(dep,
4540 allow_unsatisfied=allow_unsatisfied):
4543 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4547 def _add_dep(self, dep, allow_unsatisfied=False):
4548 debug = "--debug" in self.myopts
4549 buildpkgonly = "--buildpkgonly" in self.myopts
4550 nodeps = "--nodeps" in self.myopts
4551 empty = "empty" in self.myparams
4552 deep = "deep" in self.myparams
4553 update = "--update" in self.myopts and dep.depth <= 1
4555 if not buildpkgonly and \
4557 dep.parent not in self._slot_collision_nodes:
4558 if dep.parent.onlydeps:
4559 # It's safe to ignore blockers if the
4560 # parent is an --onlydeps node.
4562 # The blocker applies to the root where
4563 # the parent is or will be installed.
4564 blocker = Blocker(atom=dep.atom,
4565 eapi=dep.parent.metadata["EAPI"],
4566 root=dep.parent.root)
4567 self._blocker_parents.add(blocker, dep.parent)
4569 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4570 onlydeps=dep.onlydeps)
4572 if allow_unsatisfied:
4573 self._unsatisfied_deps.append(dep)
4575 self._unsatisfied_deps_for_display.append(
4576 ((dep.root, dep.atom), {"myparent":dep.parent}))
4578 # In some cases, dep_check will return deps that shouldn't
4579 # be proccessed any further, so they are identified and
4580 # discarded here. Try to discard as few as possible since
4581 # discarded dependencies reduce the amount of information
4582 # available for optimization of merge order.
4583 if dep.priority.satisfied and \
4584 not (existing_node or empty or deep or update):
4586 if dep.root == self.target_root:
4588 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4589 except StopIteration:
4591 except portage.exception.InvalidDependString:
4592 if not dep_pkg.installed:
4593 # This shouldn't happen since the package
4594 # should have been masked.
4597 self._ignored_deps.append(dep)
4600 if not self._add_pkg(dep_pkg, dep):
4604 def _add_pkg(self, pkg, dep):
4611 myparent = dep.parent
4612 priority = dep.priority
4614 if priority is None:
4615 priority = DepPriority()
4617 Fills the digraph with nodes comprised of packages to merge.
4618 mybigkey is the package spec of the package to merge.
4619 myparent is the package depending on mybigkey ( or None )
4620 addme = Should we add this package to the digraph or are we just looking at it's deps?
4621 Think --onlydeps, we need to ignore packages in that case.
4624 #IUSE-aware emerge -> USE DEP aware depgraph
4625 #"no downgrade" emerge
4627 # Ensure that the dependencies of the same package
4628 # are never processed more than once.
4629 previously_added = pkg in self.digraph
4631 # select the correct /var database that we'll be checking against
4632 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4633 pkgsettings = self.pkgsettings[pkg.root]
4638 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4639 except portage.exception.InvalidDependString, e:
4640 if not pkg.installed:
4641 show_invalid_depstring_notice(
4642 pkg, pkg.metadata["PROVIDE"], str(e))
4646 if not pkg.onlydeps:
4647 if not pkg.installed and \
4648 "empty" not in self.myparams and \
4649 vardbapi.match(pkg.slot_atom):
4650 # Increase the priority of dependencies on packages that
4651 # are being rebuilt. This optimizes merge order so that
4652 # dependencies are rebuilt/updated as soon as possible,
4653 # which is needed especially when emerge is called by
4654 # revdep-rebuild since dependencies may be affected by ABI
4655 # breakage that has rendered them useless. Don't adjust
4656 # priority here when in "empty" mode since all packages
4657 # are being merged in that case.
4658 priority.rebuild = True
4660 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4661 slot_collision = False
4663 existing_node_matches = pkg.cpv == existing_node.cpv
4664 if existing_node_matches and \
4665 pkg != existing_node and \
4666 dep.atom is not None:
4667 # Use package set for matching since it will match via
4668 # PROVIDE when necessary, while match_from_list does not.
4669 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4670 if not atom_set.findAtomForPackage(existing_node):
4671 existing_node_matches = False
4672 if existing_node_matches:
4673 # The existing node can be reused.
4675 for parent_atom in arg_atoms:
4676 parent, atom = parent_atom
4677 self.digraph.add(existing_node, parent,
4679 self._add_parent_atom(existing_node, parent_atom)
4680 # If a direct circular dependency is not an unsatisfied
4681 # buildtime dependency then drop it here since otherwise
4682 # it can skew the merge order calculation in an unwanted
4684 if existing_node != myparent or \
4685 (priority.buildtime and not priority.satisfied):
4686 self.digraph.addnode(existing_node, myparent,
4688 if dep.atom is not None and dep.parent is not None:
4689 self._add_parent_atom(existing_node,
4690 (dep.parent, dep.atom))
4694 # A slot collision has occurred. Sometimes this coincides
4695 # with unresolvable blockers, so the slot collision will be
4696 # shown later if there are no unresolvable blockers.
4697 self._add_slot_conflict(pkg)
4698 slot_collision = True
4701 # Now add this node to the graph so that self.display()
4702 # can show use flags and --tree portage.output. This node is
4703 # only being partially added to the graph. It must not be
4704 # allowed to interfere with the other nodes that have been
4705 # added. Do not overwrite data for existing nodes in
4706 # self.mydbapi since that data will be used for blocker
4708 # Even though the graph is now invalid, continue to process
4709 # dependencies so that things like --fetchonly can still
4710 # function despite collisions.
4713 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4714 self.mydbapi[pkg.root].cpv_inject(pkg)
4716 if not pkg.installed:
4717 # Allow this package to satisfy old-style virtuals in case it
4718 # doesn't already. Any pre-existing providers will be preferred
4721 pkgsettings.setinst(pkg.cpv, pkg.metadata)
4722 # For consistency, also update the global virtuals.
4723 settings = self.roots[pkg.root].settings
4725 settings.setinst(pkg.cpv, pkg.metadata)
4727 except portage.exception.InvalidDependString, e:
4728 show_invalid_depstring_notice(
4729 pkg, pkg.metadata["PROVIDE"], str(e))
4734 self._set_nodes.add(pkg)
4736 # Do this even when addme is False (--onlydeps) so that the
4737 # parent/child relationship is always known in case
4738 # self._show_slot_collision_notice() needs to be called later.
4739 self.digraph.add(pkg, myparent, priority=priority)
4740 if dep.atom is not None and dep.parent is not None:
4741 self._add_parent_atom(pkg, (dep.parent, dep.atom))
4744 for parent_atom in arg_atoms:
4745 parent, atom = parent_atom
4746 self.digraph.add(pkg, parent, priority=priority)
4747 self._add_parent_atom(pkg, parent_atom)
4749 """ This section determines whether we go deeper into dependencies or not.
4750 We want to go deeper on a few occasions:
4751 Installing package A, we need to make sure package A's deps are met.
4752 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4753 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4755 dep_stack = self._dep_stack
4756 if "recurse" not in self.myparams:
4758 elif pkg.installed and \
4759 "deep" not in self.myparams:
4760 dep_stack = self._ignored_deps
4762 self.spinner.update()
4767 if not previously_added:
4768 dep_stack.append(pkg)
4771 def _add_parent_atom(self, pkg, parent_atom):
4772 parent_atoms = self._parent_atoms.get(pkg)
4773 if parent_atoms is None:
4774 parent_atoms = set()
4775 self._parent_atoms[pkg] = parent_atoms
4776 parent_atoms.add(parent_atom)
4778 def _add_slot_conflict(self, pkg):
4779 self._slot_collision_nodes.add(pkg)
4780 slot_key = (pkg.slot_atom, pkg.root)
4781 slot_nodes = self._slot_collision_info.get(slot_key)
4782 if slot_nodes is None:
4784 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
4785 self._slot_collision_info[slot_key] = slot_nodes
4788 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
4790 mytype = pkg.type_name
4793 metadata = pkg.metadata
4794 myuse = pkg.use.enabled
4796 depth = pkg.depth + 1
4797 removal_action = "remove" in self.myparams
4800 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
4802 edepend[k] = metadata[k]
4804 if not pkg.built and \
4805 "--buildpkgonly" in self.myopts and \
4806 "deep" not in self.myparams and \
4807 "empty" not in self.myparams:
4808 edepend["RDEPEND"] = ""
4809 edepend["PDEPEND"] = ""
4810 bdeps_satisfied = False
4812 if pkg.built and not removal_action:
4813 if self.myopts.get("--with-bdeps", "n") == "y":
4814 # Pull in build time deps as requested, but marked them as
4815 # "satisfied" since they are not strictly required. This allows
4816 # more freedom in the merge order calculation for solving
4817 # circular dependencies. Don't convert to PDEPEND since that
4818 # could make --with-bdeps=y less effective if it is used to
4819 # adjust merge order to prevent built_with_use() calls from
4821 bdeps_satisfied = True
4823 # built packages do not have build time dependencies.
4824 edepend["DEPEND"] = ""
4826 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
4827 edepend["DEPEND"] = ""
4830 ("/", edepend["DEPEND"],
4831 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
4832 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
4833 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
4836 debug = "--debug" in self.myopts
4837 strict = mytype != "installed"
4839 for dep_root, dep_string, dep_priority in deps:
4841 # Decrease priority so that --buildpkgonly
4842 # hasallzeros() works correctly.
4843 dep_priority = DepPriority()
4848 print "Parent: ", jbigkey
4849 print "Depstring:", dep_string
4850 print "Priority:", dep_priority
4851 vardb = self.roots[dep_root].trees["vartree"].dbapi
4853 selected_atoms = self._select_atoms(dep_root,
4854 dep_string, myuse=myuse, parent=pkg, strict=strict)
4855 except portage.exception.InvalidDependString, e:
4856 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
4859 print "Candidates:", selected_atoms
4861 for atom in selected_atoms:
4864 atom = portage.dep.Atom(atom)
4866 mypriority = dep_priority.copy()
4867 if not atom.blocker and vardb.match(atom):
4868 mypriority.satisfied = True
4870 if not self._add_dep(Dependency(atom=atom,
4871 blocker=atom.blocker, depth=depth, parent=pkg,
4872 priority=mypriority, root=dep_root),
4873 allow_unsatisfied=allow_unsatisfied):
4876 except portage.exception.InvalidAtom, e:
4877 show_invalid_depstring_notice(
4878 pkg, dep_string, str(e))
4880 if not pkg.installed:
4884 print "Exiting...", jbigkey
4885 except portage.exception.AmbiguousPackageName, e:
4887 portage.writemsg("\n\n!!! An atom in the dependencies " + \
4888 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
4890 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
4891 portage.writemsg("\n", noiselevel=-1)
4892 if mytype == "binary":
4894 "!!! This binary package cannot be installed: '%s'\n" % \
4895 mykey, noiselevel=-1)
4896 elif mytype == "ebuild":
4897 portdb = self.roots[myroot].trees["porttree"].dbapi
4898 myebuild, mylocation = portdb.findname2(mykey)
4899 portage.writemsg("!!! This ebuild cannot be installed: " + \
4900 "'%s'\n" % myebuild, noiselevel=-1)
4901 portage.writemsg("!!! Please notify the package maintainer " + \
4902 "that atoms must be fully-qualified.\n", noiselevel=-1)
4906 def _priority(self, **kwargs):
4907 if "remove" in self.myparams:
4908 priority_constructor = UnmergeDepPriority
4910 priority_constructor = DepPriority
4911 return priority_constructor(**kwargs)
4913 def _dep_expand(self, root_config, atom_without_category):
4915 @param root_config: a root config instance
4916 @type root_config: RootConfig
4917 @param atom_without_category: an atom without a category component
4918 @type atom_without_category: String
4920 @returns: a list of atoms containing categories (possibly empty)
4922 null_cp = portage.dep_getkey(insert_category_into_atom(
4923 atom_without_category, "null"))
4924 cat, atom_pn = portage.catsplit(null_cp)
4927 for db, pkg_type, built, installed, db_keys in \
4928 self._filtered_trees[root_config.root]["dbs"]:
4929 cp_set.update(db.cp_all())
4930 for cp in list(cp_set):
4931 cat, pn = portage.catsplit(cp)
4936 cat, pn = portage.catsplit(cp)
4937 deps.append(insert_category_into_atom(
4938 atom_without_category, cat))
4941 def _have_new_virt(self, root, atom_cp):
4943 for db, pkg_type, built, installed, db_keys in \
4944 self._filtered_trees[root]["dbs"]:
4945 if db.cp_list(atom_cp):
4950 def _iter_atoms_for_pkg(self, pkg):
4951 # TODO: add multiple $ROOT support
4952 if pkg.root != self.target_root:
4954 atom_arg_map = self._atom_arg_map
4955 root_config = self.roots[pkg.root]
4956 for atom in self._set_atoms.iterAtomsForPackage(pkg):
4957 atom_cp = portage.dep_getkey(atom)
4958 if atom_cp != pkg.cp and \
4959 self._have_new_virt(pkg.root, atom_cp):
4961 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
4962 visible_pkgs.reverse() # descending order
4964 for visible_pkg in visible_pkgs:
4965 if visible_pkg.cp != atom_cp:
4967 if pkg >= visible_pkg:
4968 # This is descending order, and we're not
4969 # interested in any versions <= pkg given.
4971 if pkg.slot_atom != visible_pkg.slot_atom:
4972 higher_slot = visible_pkg
4974 if higher_slot is not None:
4976 for arg in atom_arg_map[(atom, pkg.root)]:
4977 if isinstance(arg, PackageArg) and \
4982 def select_files(self, myfiles):
4983 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
4984 appropriate depgraph and return a favorite list."""
4985 debug = "--debug" in self.myopts
4986 root_config = self.roots[self.target_root]
4987 sets = root_config.sets
4988 getSetAtoms = root_config.setconfig.getSetAtoms
4990 myroot = self.target_root
4991 dbs = self._filtered_trees[myroot]["dbs"]
4992 vardb = self.trees[myroot]["vartree"].dbapi
4993 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
4994 portdb = self.trees[myroot]["porttree"].dbapi
4995 bindb = self.trees[myroot]["bintree"].dbapi
4996 pkgsettings = self.pkgsettings[myroot]
4998 onlydeps = "--onlydeps" in self.myopts
5001 ext = os.path.splitext(x)[1]
5003 if not os.path.exists(x):
5005 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5006 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5007 elif os.path.exists(
5008 os.path.join(pkgsettings["PKGDIR"], x)):
5009 x = os.path.join(pkgsettings["PKGDIR"], x)
5011 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5012 print "!!! Please ensure the tbz2 exists as specified.\n"
5013 return 0, myfavorites
5014 mytbz2=portage.xpak.tbz2(x)
5015 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5016 if os.path.realpath(x) != \
5017 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5018 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5019 return 0, myfavorites
5020 db_keys = list(bindb._aux_cache_keys)
5021 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5022 pkg = Package(type_name="binary", root_config=root_config,
5023 cpv=mykey, built=True, metadata=metadata,
5025 self._pkg_cache[pkg] = pkg
5026 args.append(PackageArg(arg=x, package=pkg,
5027 root_config=root_config))
5028 elif ext==".ebuild":
5029 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5030 pkgdir = os.path.dirname(ebuild_path)
5031 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5032 cp = pkgdir[len(tree_root)+1:]
5033 e = portage.exception.PackageNotFound(
5034 ("%s is not in a valid portage tree " + \
5035 "hierarchy or does not exist") % x)
5036 if not portage.isvalidatom(cp):
5038 cat = portage.catsplit(cp)[0]
5039 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5040 if not portage.isvalidatom("="+mykey):
5042 ebuild_path = portdb.findname(mykey)
5044 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5045 cp, os.path.basename(ebuild_path)):
5046 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5047 return 0, myfavorites
5048 if mykey not in portdb.xmatch(
5049 "match-visible", portage.dep_getkey(mykey)):
5050 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5051 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5052 print colorize("BAD", "*** page for details.")
5053 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5056 raise portage.exception.PackageNotFound(
5057 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5058 db_keys = list(portdb._aux_cache_keys)
5059 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5060 pkg = Package(type_name="ebuild", root_config=root_config,
5061 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5062 pkgsettings.setcpv(pkg)
5063 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5064 self._pkg_cache[pkg] = pkg
5065 args.append(PackageArg(arg=x, package=pkg,
5066 root_config=root_config))
5067 elif x.startswith(os.path.sep):
5068 if not x.startswith(myroot):
5069 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5070 " $ROOT.\n") % x, noiselevel=-1)
5072 # Queue these up since it's most efficient to handle
5073 # multiple files in a single iter_owners() call.
5074 lookup_owners.append(x)
5076 if x in ("system", "world"):
5078 if x.startswith(SETPREFIX):
5079 s = x[len(SETPREFIX):]
5081 raise portage.exception.PackageSetNotFound(s)
5084 # Recursively expand sets so that containment tests in
5085 # self._get_parent_sets() properly match atoms in nested
5086 # sets (like if world contains system).
5087 expanded_set = InternalPackageSet(
5088 initial_atoms=getSetAtoms(s))
5089 self._sets[s] = expanded_set
5090 args.append(SetArg(arg=x, set=expanded_set,
5091 root_config=root_config))
5092 myfavorites.append(x)
5094 if not is_valid_package_atom(x):
5095 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5097 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5098 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5100 # Don't expand categories or old-style virtuals here unless
5101 # necessary. Expansion of old-style virtuals here causes at
5102 # least the following problems:
5103 # 1) It's more difficult to determine which set(s) an atom
5104 # came from, if any.
5105 # 2) It takes away freedom from the resolver to choose other
5106 # possible expansions when necessary.
5108 args.append(AtomArg(arg=x, atom=x,
5109 root_config=root_config))
5111 expanded_atoms = self._dep_expand(root_config, x)
5112 installed_cp_set = set()
5113 for atom in expanded_atoms:
5114 atom_cp = portage.dep_getkey(atom)
5115 if vardb.cp_list(atom_cp):
5116 installed_cp_set.add(atom_cp)
5117 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5118 installed_cp = iter(installed_cp_set).next()
5119 expanded_atoms = [atom for atom in expanded_atoms \
5120 if portage.dep_getkey(atom) == installed_cp]
5122 if len(expanded_atoms) > 1:
5125 ambiguous_package_name(x, expanded_atoms, root_config,
5126 self.spinner, self.myopts)
5127 return False, myfavorites
5129 atom = expanded_atoms[0]
5131 null_atom = insert_category_into_atom(x, "null")
5132 null_cp = portage.dep_getkey(null_atom)
5133 cat, atom_pn = portage.catsplit(null_cp)
5134 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5136 # Allow the depgraph to choose which virtual.
5137 atom = insert_category_into_atom(x, "virtual")
5139 atom = insert_category_into_atom(x, "null")
5141 args.append(AtomArg(arg=x, atom=atom,
5142 root_config=root_config))
5146 search_for_multiple = False
5147 if len(lookup_owners) > 1:
5148 search_for_multiple = True
5150 for x in lookup_owners:
5151 if not search_for_multiple and os.path.isdir(x):
5152 search_for_multiple = True
5153 relative_paths.append(x[len(myroot):])
5156 for pkg, relative_path in \
5157 real_vardb._owners.iter_owners(relative_paths):
5158 owners.add(pkg.mycpv)
5159 if not search_for_multiple:
5163 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5164 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5168 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5170 # portage now masks packages with missing slot, but it's
5171 # possible that one was installed by an older version
5172 atom = portage.cpv_getkey(cpv)
5174 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5175 args.append(AtomArg(arg=atom, atom=atom,
5176 root_config=root_config))
5178 if "--update" in self.myopts:
5179 # Enable greedy SLOT atoms for atoms given as arguments.
5180 # This is currently disabled for sets since greedy SLOT
5181 # atoms could be a property of the set itself.
5184 # In addition to any installed slots, also try to pull
5185 # in the latest new slot that may be available.
5186 greedy_atoms.append(arg)
5187 if not isinstance(arg, (AtomArg, PackageArg)):
5189 atom_cp = portage.dep_getkey(arg.atom)
5191 for cpv in vardb.match(arg.atom):
5192 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5194 greedy_atoms.append(
5195 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5196 root_config=root_config))
5200 # Create the "args" package set from atoms and
5201 # packages given as arguments.
5202 args_set = self._sets["args"]
5204 if not isinstance(arg, (AtomArg, PackageArg)):
5207 if myatom in args_set:
5209 args_set.add(myatom)
5210 myfavorites.append(myatom)
5211 self._set_atoms.update(chain(*self._sets.itervalues()))
5212 atom_arg_map = self._atom_arg_map
5214 for atom in arg.set:
5215 atom_key = (atom, myroot)
5216 refs = atom_arg_map.get(atom_key)
5219 atom_arg_map[atom_key] = refs
5222 pprovideddict = pkgsettings.pprovideddict
5224 portage.writemsg("\n", noiselevel=-1)
5225 # Order needs to be preserved since a feature of --nodeps
5226 # is to allow the user to force a specific merge order.
5230 for atom in arg.set:
5231 self.spinner.update()
5232 dep = Dependency(atom=atom, onlydeps=onlydeps,
5233 root=myroot, parent=arg)
5234 atom_cp = portage.dep_getkey(atom)
5236 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5237 if pprovided and portage.match_from_list(atom, pprovided):
5238 # A provided package has been specified on the command line.
5239 self._pprovided_args.append((arg, atom))
5241 if isinstance(arg, PackageArg):
5242 if not self._add_pkg(arg.package, dep) or \
5243 not self._create_graph():
5244 sys.stderr.write(("\n\n!!! Problem resolving " + \
5245 "dependencies for %s\n") % arg.arg)
5246 return 0, myfavorites
5249 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5250 (arg, atom), noiselevel=-1)
5251 pkg, existing_node = self._select_package(
5252 myroot, atom, onlydeps=onlydeps)
5254 if not (isinstance(arg, SetArg) and \
5255 arg.name in ("system", "world")):
5256 self._unsatisfied_deps_for_display.append(
5257 ((myroot, atom), {}))
5258 return 0, myfavorites
5259 self._missing_args.append((arg, atom))
5261 if atom_cp != pkg.cp:
5262 # For old-style virtuals, we need to repeat the
5263 # package.provided check against the selected package.
5264 expanded_atom = atom.replace(atom_cp, pkg.cp)
5265 pprovided = pprovideddict.get(pkg.cp)
5267 portage.match_from_list(expanded_atom, pprovided):
5268 # A provided package has been
5269 # specified on the command line.
5270 self._pprovided_args.append((arg, atom))
5272 if pkg.installed and "selective" not in self.myparams:
5273 self._unsatisfied_deps_for_display.append(
5274 ((myroot, atom), {}))
5275 # Previous behavior was to bail out in this case, but
5276 # since the dep is satisfied by the installed package,
5277 # it's more friendly to continue building the graph
5278 # and just show a warning message. Therefore, only bail
5279 # out here if the atom is not from either the system or
5281 if not (isinstance(arg, SetArg) and \
5282 arg.name in ("system", "world")):
5283 return 0, myfavorites
5285 # Add the selected package to the graph as soon as possible
5286 # so that later dep_check() calls can use it as feedback
5287 # for making more consistent atom selections.
5288 if not self._add_pkg(pkg, dep):
5289 if isinstance(arg, SetArg):
5290 sys.stderr.write(("\n\n!!! Problem resolving " + \
5291 "dependencies for %s from %s\n") % \
5294 sys.stderr.write(("\n\n!!! Problem resolving " + \
5295 "dependencies for %s\n") % atom)
5296 return 0, myfavorites
5298 except portage.exception.MissingSignature, e:
5299 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5300 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5301 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5302 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5303 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5304 return 0, myfavorites
5305 except portage.exception.InvalidSignature, e:
5306 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5307 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5308 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5309 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5310 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5311 return 0, myfavorites
5312 except SystemExit, e:
5313 raise # Needed else can't exit
5314 except Exception, e:
5315 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5316 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5319 # Now that the root packages have been added to the graph,
5320 # process the dependencies.
5321 if not self._create_graph():
5322 return 0, myfavorites
5325 if "--usepkgonly" in self.myopts:
5326 for xs in self.digraph.all_nodes():
5327 if not isinstance(xs, Package):
5329 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5333 print "Missing binary for:",xs[2]
5337 except self._unknown_internal_error:
5338 return False, myfavorites
5340 # We're true here unless we are missing binaries.
5341 return (not missing,myfavorites)
5343 def _select_atoms_from_graph(self, *pargs, **kwargs):
5345 Prefer atoms matching packages that have already been
5346 added to the graph or those that are installed and have
5347 not been scheduled for replacement.
5349 kwargs["trees"] = self._graph_trees
5350 return self._select_atoms_highest_available(*pargs, **kwargs)
5352 def _select_atoms_highest_available(self, root, depstring,
5353 myuse=None, parent=None, strict=True, trees=None):
5354 """This will raise InvalidDependString if necessary. If trees is
5355 None then self._filtered_trees is used."""
5356 pkgsettings = self.pkgsettings[root]
5358 trees = self._filtered_trees
5361 if parent is not None:
5362 trees[root]["parent"] = parent
5364 portage.dep._dep_check_strict = False
5365 mycheck = portage.dep_check(depstring, None,
5366 pkgsettings, myuse=myuse,
5367 myroot=root, trees=trees)
5369 if parent is not None:
5370 trees[root].pop("parent")
5371 portage.dep._dep_check_strict = True
5373 raise portage.exception.InvalidDependString(mycheck[1])
5374 selected_atoms = mycheck[1]
5375 return selected_atoms
5377 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5378 atom = portage.dep.Atom(atom)
5379 atom_set = InternalPackageSet(initial_atoms=(atom,))
5380 atom_without_use = atom
5382 atom_without_use = portage.dep.remove_slot(atom)
5384 atom_without_use += ":" + atom.slot
5385 atom_without_use = portage.dep.Atom(atom_without_use)
5386 xinfo = '"%s"' % atom
5389 # Discard null/ from failed cpv_expand category expansion.
5390 xinfo = xinfo.replace("null/", "")
5391 masked_packages = []
5393 missing_licenses = []
5394 have_eapi_mask = False
5395 pkgsettings = self.pkgsettings[root]
5396 implicit_iuse = pkgsettings._get_implicit_iuse()
5397 root_config = self.roots[root]
5398 portdb = self.roots[root].trees["porttree"].dbapi
5399 dbs = self._filtered_trees[root]["dbs"]
5400 for db, pkg_type, built, installed, db_keys in dbs:
5404 if hasattr(db, "xmatch"):
5405 cpv_list = db.xmatch("match-all", atom_without_use)
5407 cpv_list = db.match(atom_without_use)
5410 for cpv in cpv_list:
5411 metadata, mreasons = get_mask_info(root_config, cpv,
5412 pkgsettings, db, pkg_type, built, installed, db_keys)
5413 if metadata is not None:
5414 pkg = Package(built=built, cpv=cpv,
5415 installed=installed, metadata=metadata,
5416 root_config=root_config)
5417 if pkg.cp != atom.cp:
5418 # A cpv can be returned from dbapi.match() as an
5419 # old-style virtual match even in cases when the
5420 # package does not actually PROVIDE the virtual.
5421 # Filter out any such false matches here.
5422 if not atom_set.findAtomForPackage(pkg):
5424 if atom.use and not mreasons:
5425 missing_use.append(pkg)
5427 masked_packages.append(
5428 (root_config, pkgsettings, cpv, metadata, mreasons))
5430 missing_use_reasons = []
5431 missing_iuse_reasons = []
5432 for pkg in missing_use:
5433 use = pkg.use.enabled
5434 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5435 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5437 for x in atom.use.required:
5438 if iuse_re.match(x) is None:
5439 missing_iuse.append(x)
5442 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5443 missing_iuse_reasons.append((pkg, mreasons))
5445 need_enable = sorted(atom.use.enabled.difference(use))
5446 need_disable = sorted(atom.use.disabled.intersection(use))
5447 if need_enable or need_disable:
5449 changes.extend(colorize("red", "+" + x) \
5450 for x in need_enable)
5451 changes.extend(colorize("blue", "-" + x) \
5452 for x in need_disable)
5453 mreasons.append("Change USE: %s" % " ".join(changes))
5454 missing_use_reasons.append((pkg, mreasons))
5456 if missing_iuse_reasons and not missing_use_reasons:
5457 missing_use_reasons = missing_iuse_reasons
5458 elif missing_use_reasons:
5459 # Only show the latest version.
5460 del missing_use_reasons[1:]
5462 if missing_use_reasons:
5463 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5464 print "!!! One of the following packages is required to complete your request:"
5465 for pkg, mreasons in missing_use_reasons:
5466 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5468 elif masked_packages:
5470 colorize("BAD", "All ebuilds that could satisfy ") + \
5471 colorize("INFORM", xinfo) + \
5472 colorize("BAD", " have been masked.")
5473 print "!!! One of the following masked packages is required to complete your request:"
5474 have_eapi_mask = show_masked_packages(masked_packages)
5477 msg = ("The current version of portage supports " + \
5478 "EAPI '%s'. You must upgrade to a newer version" + \
5479 " of portage before EAPI masked packages can" + \
5480 " be installed.") % portage.const.EAPI
5481 from textwrap import wrap
5482 for line in wrap(msg, 75):
5487 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5489 # Show parent nodes and the argument that pulled them in.
5490 traversed_nodes = set()
5493 while node is not None:
5494 traversed_nodes.add(node)
5495 msg.append('(dependency required by "%s" [%s])' % \
5496 (colorize('INFORM', str(node.cpv)), node.type_name))
5497 # When traversing to parents, prefer arguments over packages
5498 # since arguments are root nodes. Never traverse the same
5499 # package twice, in order to prevent an infinite loop.
5500 selected_parent = None
5501 for parent in self.digraph.parent_nodes(node):
5502 if isinstance(parent, DependencyArg):
5503 msg.append('(dependency required by "%s" [argument])' % \
5504 (colorize('INFORM', str(parent))))
5505 selected_parent = None
5507 if parent not in traversed_nodes:
5508 selected_parent = parent
5509 node = selected_parent
5515 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5516 cache_key = (root, atom, onlydeps)
5517 ret = self._highest_pkg_cache.get(cache_key)
5520 if pkg and not existing:
5521 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5522 if existing and existing == pkg:
5523 # Update the cache to reflect that the
5524 # package has been added to the graph.
5526 self._highest_pkg_cache[cache_key] = ret
5528 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5529 self._highest_pkg_cache[cache_key] = ret
5532 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5533 root_config = self.roots[root]
5534 pkgsettings = self.pkgsettings[root]
5535 dbs = self._filtered_trees[root]["dbs"]
5536 vardb = self.roots[root].trees["vartree"].dbapi
5537 portdb = self.roots[root].trees["porttree"].dbapi
5538 # List of acceptable packages, ordered by type preference.
5539 matched_packages = []
5540 highest_version = None
5541 if not isinstance(atom, portage.dep.Atom):
5542 atom = portage.dep.Atom(atom)
5544 atom_set = InternalPackageSet(initial_atoms=(atom,))
5545 existing_node = None
5547 usepkgonly = "--usepkgonly" in self.myopts
5548 empty = "empty" in self.myparams
5549 selective = "selective" in self.myparams
5551 noreplace = "--noreplace" in self.myopts
5552 # Behavior of the "selective" parameter depends on
5553 # whether or not a package matches an argument atom.
5554 # If an installed package provides an old-style
5555 # virtual that is no longer provided by an available
5556 # package, the installed package may match an argument
5557 # atom even though none of the available packages do.
5558 # Therefore, "selective" logic does not consider
5559 # whether or not an installed package matches an
5560 # argument atom. It only considers whether or not
5561 # available packages match argument atoms, which is
5562 # represented by the found_available_arg flag.
5563 found_available_arg = False
5564 for find_existing_node in True, False:
5567 for db, pkg_type, built, installed, db_keys in dbs:
5570 if installed and not find_existing_node:
5571 want_reinstall = reinstall or empty or \
5572 (found_available_arg and not selective)
5573 if want_reinstall and matched_packages:
5575 if hasattr(db, "xmatch"):
5576 cpv_list = db.xmatch("match-all", atom)
5578 cpv_list = db.match(atom)
5580 # USE=multislot can make an installed package appear as if
5581 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5582 # won't do any good as long as USE=multislot is enabled since
5583 # the newly built package still won't have the expected slot.
5584 # Therefore, assume that such SLOT dependencies are already
5585 # satisfied rather than forcing a rebuild.
5586 if installed and not cpv_list and atom.slot:
5587 for cpv in db.match(atom.cp):
5588 slot_available = False
5589 for other_db, other_type, other_built, \
5590 other_installed, other_keys in dbs:
5593 other_db.aux_get(cpv, ["SLOT"])[0]:
5594 slot_available = True
5598 if not slot_available:
5600 inst_pkg = self._pkg(cpv, "installed",
5601 root_config, installed=installed)
5602 # Remove the slot from the atom and verify that
5603 # the package matches the resulting atom.
5604 atom_without_slot = portage.dep.remove_slot(atom)
5606 atom_without_slot += str(atom.use)
5607 atom_without_slot = portage.dep.Atom(atom_without_slot)
5608 if portage.match_from_list(
5609 atom_without_slot, [inst_pkg]):
5610 cpv_list = [inst_pkg.cpv]
5615 pkg_status = "merge"
5616 if installed or onlydeps:
5617 pkg_status = "nomerge"
5620 for cpv in cpv_list:
5621 # Make --noreplace take precedence over --newuse.
5622 if not installed and noreplace and \
5623 cpv in vardb.match(atom):
5624 # If the installed version is masked, it may
5625 # be necessary to look at lower versions,
5626 # in case there is a visible downgrade.
5628 reinstall_for_flags = None
5629 cache_key = (pkg_type, root, cpv, pkg_status)
5630 calculated_use = True
5631 pkg = self._pkg_cache.get(cache_key)
5633 calculated_use = False
5635 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5638 pkg = Package(built=built, cpv=cpv,
5639 installed=installed, metadata=metadata,
5640 onlydeps=onlydeps, root_config=root_config,
5642 metadata = pkg.metadata
5643 if not built and ("?" in metadata["LICENSE"] or \
5644 "?" in metadata["PROVIDE"]):
5645 # This is avoided whenever possible because
5646 # it's expensive. It only needs to be done here
5647 # if it has an effect on visibility.
5648 pkgsettings.setcpv(pkg)
5649 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5650 calculated_use = True
5651 self._pkg_cache[pkg] = pkg
5653 if not installed or (installed and matched_packages):
5654 # Only enforce visibility on installed packages
5655 # if there is at least one other visible package
5656 # available. By filtering installed masked packages
5657 # here, packages that have been masked since they
5658 # were installed can be automatically downgraded
5659 # to an unmasked version.
5661 if not visible(pkgsettings, pkg):
5663 except portage.exception.InvalidDependString:
5667 # Enable upgrade or downgrade to a version
5668 # with visible KEYWORDS when the installed
5669 # version is masked by KEYWORDS, but never
5670 # reinstall the same exact version only due
5671 # to a KEYWORDS mask.
5672 if installed and matched_packages and \
5673 pkgsettings._getMissingKeywords(
5674 pkg.cpv, pkg.metadata):
5675 different_version = None
5676 for avail_pkg in matched_packages:
5677 if not portage.dep.cpvequal(
5678 pkg.cpv, avail_pkg.cpv):
5679 different_version = avail_pkg
5681 if different_version is not None:
5682 # Only reinstall for KEYWORDS if
5683 # it's not the same version.
5686 if not pkg.built and not calculated_use:
5687 # This is avoided whenever possible because
5689 pkgsettings.setcpv(pkg)
5690 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5692 if pkg.cp != atom.cp:
5693 # A cpv can be returned from dbapi.match() as an
5694 # old-style virtual match even in cases when the
5695 # package does not actually PROVIDE the virtual.
5696 # Filter out any such false matches here.
5697 if not atom_set.findAtomForPackage(pkg):
5701 if root == self.target_root:
5703 # Ebuild USE must have been calculated prior
5704 # to this point, in case atoms have USE deps.
5705 myarg = self._iter_atoms_for_pkg(pkg).next()
5706 except StopIteration:
5708 except portage.exception.InvalidDependString:
5710 # masked by corruption
5712 if not installed and myarg:
5713 found_available_arg = True
5715 if atom.use and not pkg.built:
5716 use = pkg.use.enabled
5717 if atom.use.enabled.difference(use):
5719 if atom.use.disabled.intersection(use):
5721 if pkg.cp == atom_cp:
5722 if highest_version is None:
5723 highest_version = pkg
5724 elif pkg > highest_version:
5725 highest_version = pkg
5726 # At this point, we've found the highest visible
5727 # match from the current repo. Any lower versions
5728 # from this repo are ignored, so this so the loop
5729 # will always end with a break statement below
5731 if find_existing_node:
5732 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5735 if portage.dep.match_from_list(atom, [e_pkg]):
5736 if highest_version and \
5737 e_pkg.cp == atom_cp and \
5738 e_pkg < highest_version and \
5739 e_pkg.slot_atom != highest_version.slot_atom:
5740 # There is a higher version available in a
5741 # different slot, so this existing node is
5745 matched_packages.append(e_pkg)
5746 existing_node = e_pkg
5748 # Compare built package to current config and
5749 # reject the built package if necessary.
5750 if built and not installed and \
5751 ("--newuse" in self.myopts or \
5752 "--reinstall" in self.myopts):
5753 iuses = pkg.iuse.all
5754 old_use = pkg.use.enabled
5756 pkgsettings.setcpv(myeb)
5758 pkgsettings.setcpv(pkg)
5759 now_use = pkgsettings["PORTAGE_USE"].split()
5760 forced_flags = set()
5761 forced_flags.update(pkgsettings.useforce)
5762 forced_flags.update(pkgsettings.usemask)
5764 if myeb and not usepkgonly:
5765 cur_iuse = myeb.iuse.all
5766 if self._reinstall_for_flags(forced_flags,
5770 # Compare current config to installed package
5771 # and do not reinstall if possible.
5772 if not installed and \
5773 ("--newuse" in self.myopts or \
5774 "--reinstall" in self.myopts) and \
5775 cpv in vardb.match(atom):
5776 pkgsettings.setcpv(pkg)
5777 forced_flags = set()
5778 forced_flags.update(pkgsettings.useforce)
5779 forced_flags.update(pkgsettings.usemask)
5780 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
5781 old_iuse = set(filter_iuse_defaults(
5782 vardb.aux_get(cpv, ["IUSE"])[0].split()))
5783 cur_use = pkgsettings["PORTAGE_USE"].split()
5784 cur_iuse = pkg.iuse.all
5785 reinstall_for_flags = \
5786 self._reinstall_for_flags(
5787 forced_flags, old_use, old_iuse,
5789 if reinstall_for_flags:
5793 matched_packages.append(pkg)
5794 if reinstall_for_flags:
5795 self._reinstall_nodes[pkg] = \
5799 if not matched_packages:
5802 if "--debug" in self.myopts:
5803 for pkg in matched_packages:
5804 portage.writemsg("%s %s\n" % \
5805 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
5807 # Filter out any old-style virtual matches if they are
5808 # mixed with new-style virtual matches.
5809 cp = portage.dep_getkey(atom)
5810 if len(matched_packages) > 1 and \
5811 "virtual" == portage.catsplit(cp)[0]:
5812 for pkg in matched_packages:
5815 # Got a new-style virtual, so filter
5816 # out any old-style virtuals.
5817 matched_packages = [pkg for pkg in matched_packages \
5821 # If the installed version is in a different slot and it is higher than
5822 # the highest available visible package, _iter_atoms_for_pkg() may fail
5823 # to properly match the available package with a corresponding argument
5824 # atom. Detect this case and correct it here.
5825 if not selective and len(matched_packages) > 1 and \
5826 matched_packages[-1].installed and \
5827 matched_packages[-1].slot_atom != \
5828 matched_packages[-2].slot_atom and \
5829 matched_packages[-1] > matched_packages[-2]:
5830 pkg = matched_packages[-2]
5831 if pkg.root == self.target_root and \
5832 self._set_atoms.findAtomForPackage(pkg):
5833 # Select the available package instead
5834 # of the installed package.
5835 matched_packages.pop()
5837 if len(matched_packages) > 1:
5838 bestmatch = portage.best(
5839 [pkg.cpv for pkg in matched_packages])
5840 matched_packages = [pkg for pkg in matched_packages \
5841 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
5843 # ordered by type preference ("ebuild" type is the last resort)
5844 return matched_packages[-1], existing_node
5846 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
5848 Select packages that have already been added to the graph or
5849 those that are installed and have not been scheduled for
5852 graph_db = self._graph_trees[root]["porttree"].dbapi
5853 matches = graph_db.match(atom)
5856 cpv = matches[-1] # highest match
5857 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
5858 graph_db.aux_get(cpv, ["SLOT"])[0])
5859 e_pkg = self._slot_pkg_map[root].get(slot_atom)
5862 # Since this cpv exists in the graph_db,
5863 # we must have a cached Package instance.
5864 cache_key = ("installed", root, cpv, "nomerge")
5865 return (self._pkg_cache[cache_key], None)
5867 def _complete_graph(self):
5869 Add any deep dependencies of required sets (args, system, world) that
5870 have not been pulled into the graph yet. This ensures that the graph
5871 is consistent such that initially satisfied deep dependencies are not
5872 broken in the new graph. Initially unsatisfied dependencies are
5873 irrelevant since we only want to avoid breaking dependencies that are
5876 Since this method can consume enough time to disturb users, it is
5877 currently only enabled by the --complete-graph option.
5879 if "--buildpkgonly" in self.myopts or \
5880 "recurse" not in self.myparams:
5883 if "complete" not in self.myparams:
5884 # Skip this to avoid consuming enough time to disturb users.
5887 # Put the depgraph into a mode that causes it to only
5888 # select packages that have already been added to the
5889 # graph or those that are installed and have not been
5890 # scheduled for replacement. Also, toggle the "deep"
5891 # parameter so that all dependencies are traversed and
5893 self._select_atoms = self._select_atoms_from_graph
5894 self._select_package = self._select_pkg_from_graph
5895 already_deep = "deep" in self.myparams
5896 if not already_deep:
5897 self.myparams.add("deep")
5899 for root in self.roots:
5900 required_set_names = self._required_set_names.copy()
5901 if root == self.target_root and \
5902 (already_deep or "empty" in self.myparams):
5903 required_set_names.difference_update(self._sets)
5904 if not required_set_names and not self._ignored_deps:
5906 root_config = self.roots[root]
5907 setconfig = root_config.setconfig
5909 # Reuse existing SetArg instances when available.
5910 for arg in self.digraph.root_nodes():
5911 if not isinstance(arg, SetArg):
5913 if arg.root_config != root_config:
5915 if arg.name in required_set_names:
5917 required_set_names.remove(arg.name)
5918 # Create new SetArg instances only when necessary.
5919 for s in required_set_names:
5920 expanded_set = InternalPackageSet(
5921 initial_atoms=setconfig.getSetAtoms(s))
5922 atom = SETPREFIX + s
5923 args.append(SetArg(arg=atom, set=expanded_set,
5924 root_config=root_config))
5925 vardb = root_config.trees["vartree"].dbapi
5927 for atom in arg.set:
5928 self._dep_stack.append(
5929 Dependency(atom=atom, root=root, parent=arg))
5930 if self._ignored_deps:
5931 self._dep_stack.extend(self._ignored_deps)
5932 self._ignored_deps = []
5933 if not self._create_graph(allow_unsatisfied=True):
5935 # Check the unsatisfied deps to see if any initially satisfied deps
5936 # will become unsatisfied due to an upgrade. Initially unsatisfied
5937 # deps are irrelevant since we only want to avoid breaking deps
5938 # that are initially satisfied.
5939 while self._unsatisfied_deps:
5940 dep = self._unsatisfied_deps.pop()
5941 matches = vardb.match_pkgs(dep.atom)
5943 self._initially_unsatisfied_deps.append(dep)
5945 # An scheduled installation broke a deep dependency.
5946 # Add the installed package to the graph so that it
5947 # will be appropriately reported as a slot collision
5948 # (possibly solvable via backtracking).
5949 pkg = matches[-1] # highest match
5950 if not self._add_pkg(pkg, dep):
5952 if not self._create_graph(allow_unsatisfied=True):
5956 def _pkg(self, cpv, type_name, root_config, installed=False):
5958 Get a package instance from the cache, or create a new
5959 one if necessary. Raises KeyError from aux_get if it
5960 failures for some reason (package does not exist or is
5965 operation = "nomerge"
5966 pkg = self._pkg_cache.get(
5967 (type_name, root_config.root, cpv, operation))
5969 tree_type = self.pkg_tree_map[type_name]
5970 db = root_config.trees[tree_type].dbapi
5971 db_keys = list(self._trees_orig[root_config.root][
5972 tree_type].dbapi._aux_cache_keys)
5973 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5974 pkg = Package(cpv=cpv, metadata=metadata,
5975 root_config=root_config, installed=installed)
5976 if type_name == "ebuild":
5977 settings = self.pkgsettings[root_config.root]
5978 settings.setcpv(pkg)
5979 pkg.metadata["USE"] = settings["PORTAGE_USE"]
5980 self._pkg_cache[pkg] = pkg
5983 def validate_blockers(self):
5984 """Remove any blockers from the digraph that do not match any of the
5985 packages within the graph. If necessary, create hard deps to ensure
5986 correct merge order such that mutually blocking packages are never
5987 installed simultaneously."""
5989 if "--buildpkgonly" in self.myopts or \
5990 "--nodeps" in self.myopts:
5993 #if "deep" in self.myparams:
5995 # Pull in blockers from all installed packages that haven't already
5996 # been pulled into the depgraph. This is not enabled by default
5997 # due to the performance penalty that is incurred by all the
5998 # additional dep_check calls that are required.
6000 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6001 for myroot in self.trees:
6002 vardb = self.trees[myroot]["vartree"].dbapi
6003 portdb = self.trees[myroot]["porttree"].dbapi
6004 pkgsettings = self.pkgsettings[myroot]
6005 final_db = self.mydbapi[myroot]
6007 blocker_cache = BlockerCache(myroot, vardb)
6008 stale_cache = set(blocker_cache)
6011 stale_cache.discard(cpv)
6012 pkg_in_graph = self.digraph.contains(pkg)
6014 # Check for masked installed packages. Only warn about
6015 # packages that are in the graph in order to avoid warning
6016 # about those that will be automatically uninstalled during
6017 # the merge process or by --depclean.
6019 if pkg_in_graph and not visible(pkgsettings, pkg):
6020 self._masked_installed.add(pkg)
6022 blocker_atoms = None
6028 self._blocker_parents.child_nodes(pkg))
6033 self._irrelevant_blockers.child_nodes(pkg))
6036 if blockers is not None:
6037 blockers = set(str(blocker.atom) \
6038 for blocker in blockers)
6040 # If this node has any blockers, create a "nomerge"
6041 # node for it so that they can be enforced.
6042 self.spinner.update()
6043 blocker_data = blocker_cache.get(cpv)
6044 if blocker_data is not None and \
6045 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6048 # If blocker data from the graph is available, use
6049 # it to validate the cache and update the cache if
6051 if blocker_data is not None and \
6052 blockers is not None:
6053 if not blockers.symmetric_difference(
6054 blocker_data.atoms):
6058 if blocker_data is None and \
6059 blockers is not None:
6060 # Re-use the blockers from the graph.
6061 blocker_atoms = sorted(blockers)
6062 counter = long(pkg.metadata["COUNTER"])
6064 blocker_cache.BlockerData(counter, blocker_atoms)
6065 blocker_cache[pkg.cpv] = blocker_data
6069 blocker_atoms = blocker_data.atoms
6071 # Use aux_get() to trigger FakeVartree global
6072 # updates on *DEPEND when appropriate.
6073 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6074 # It is crucial to pass in final_db here in order to
6075 # optimize dep_check calls by eliminating atoms via
6076 # dep_wordreduce and dep_eval calls.
6078 portage.dep._dep_check_strict = False
6080 success, atoms = portage.dep_check(depstr,
6081 final_db, pkgsettings, myuse=pkg.use.enabled,
6082 trees=self._graph_trees, myroot=myroot)
6083 except Exception, e:
6084 if isinstance(e, SystemExit):
6086 # This is helpful, for example, if a ValueError
6087 # is thrown from cpv_expand due to multiple
6088 # matches (this can happen if an atom lacks a
6090 show_invalid_depstring_notice(
6091 pkg, depstr, str(e))
6095 portage.dep._dep_check_strict = True
6097 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6098 if replacement_pkg and \
6099 replacement_pkg[0].operation == "merge":
6100 # This package is being replaced anyway, so
6101 # ignore invalid dependencies so as not to
6102 # annoy the user too much (otherwise they'd be
6103 # forced to manually unmerge it first).
6105 show_invalid_depstring_notice(pkg, depstr, atoms)
6107 blocker_atoms = [myatom for myatom in atoms \
6108 if myatom.startswith("!")]
6109 blocker_atoms.sort()
6110 counter = long(pkg.metadata["COUNTER"])
6111 blocker_cache[cpv] = \
6112 blocker_cache.BlockerData(counter, blocker_atoms)
6115 for atom in blocker_atoms:
6116 blocker = Blocker(atom=portage.dep.Atom(atom),
6117 eapi=pkg.metadata["EAPI"], root=myroot)
6118 self._blocker_parents.add(blocker, pkg)
6119 except portage.exception.InvalidAtom, e:
6120 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6121 show_invalid_depstring_notice(
6122 pkg, depstr, "Invalid Atom: %s" % (e,))
6124 for cpv in stale_cache:
6125 del blocker_cache[cpv]
6126 blocker_cache.flush()
6129 # Discard any "uninstall" tasks scheduled by previous calls
6130 # to this method, since those tasks may not make sense given
6131 # the current graph state.
6132 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6133 if previous_uninstall_tasks:
6134 self._blocker_uninstalls = digraph()
6135 self.digraph.difference_update(previous_uninstall_tasks)
6137 for blocker in self._blocker_parents.leaf_nodes():
6138 self.spinner.update()
6139 root_config = self.roots[blocker.root]
6140 virtuals = root_config.settings.getvirtuals()
6141 myroot = blocker.root
6142 initial_db = self.trees[myroot]["vartree"].dbapi
6143 final_db = self.mydbapi[myroot]
6145 provider_virtual = False
6146 if blocker.cp in virtuals and \
6147 not self._have_new_virt(blocker.root, blocker.cp):
6148 provider_virtual = True
6150 if provider_virtual:
6152 for provider_entry in virtuals[blocker.cp]:
6154 portage.dep_getkey(provider_entry)
6155 atoms.append(blocker.atom.replace(
6156 blocker.cp, provider_cp))
6158 atoms = [blocker.atom]
6160 blocked_initial = []
6162 blocked_initial.extend(initial_db.match_pkgs(atom))
6166 blocked_final.extend(final_db.match_pkgs(atom))
6168 if not blocked_initial and not blocked_final:
6169 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6170 self._blocker_parents.remove(blocker)
6171 # Discard any parents that don't have any more blockers.
6172 for pkg in parent_pkgs:
6173 self._irrelevant_blockers.add(blocker, pkg)
6174 if not self._blocker_parents.child_nodes(pkg):
6175 self._blocker_parents.remove(pkg)
6177 for parent in self._blocker_parents.parent_nodes(blocker):
6178 unresolved_blocks = False
6179 depends_on_order = set()
6180 for pkg in blocked_initial:
6181 if pkg.slot_atom == parent.slot_atom:
6182 # TODO: Support blocks within slots in cases where it
6183 # might make sense. For example, a new version might
6184 # require that the old version be uninstalled at build
6187 if parent.installed:
6188 # Two currently installed packages conflict with
6189 # eachother. Ignore this case since the damage
6190 # is already done and this would be likely to
6191 # confuse users if displayed like a normal blocker.
6193 if parent.operation == "merge":
6194 # Maybe the blocked package can be replaced or simply
6195 # unmerged to resolve this block.
6196 depends_on_order.add((pkg, parent))
6198 # None of the above blocker resolutions techniques apply,
6199 # so apparently this one is unresolvable.
6200 unresolved_blocks = True
6201 for pkg in blocked_final:
6202 if pkg.slot_atom == parent.slot_atom:
6203 # TODO: Support blocks within slots.
6205 if parent.operation == "nomerge" and \
6206 pkg.operation == "nomerge":
6207 # This blocker will be handled the next time that a
6208 # merge of either package is triggered.
6211 # Maybe the blocking package can be
6212 # unmerged to resolve this block.
6213 if parent.operation == "merge" and pkg.installed:
6214 depends_on_order.add((pkg, parent))
6216 elif parent.operation == "nomerge":
6217 depends_on_order.add((parent, pkg))
6219 # None of the above blocker resolutions techniques apply,
6220 # so apparently this one is unresolvable.
6221 unresolved_blocks = True
6223 # Make sure we don't unmerge any package that have been pulled
6225 if not unresolved_blocks and depends_on_order:
6226 for inst_pkg, inst_task in depends_on_order:
6227 if self.digraph.contains(inst_pkg) and \
6228 self.digraph.parent_nodes(inst_pkg):
6229 unresolved_blocks = True
6232 if not unresolved_blocks and depends_on_order:
6233 for inst_pkg, inst_task in depends_on_order:
6234 uninst_task = Package(built=inst_pkg.built,
6235 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6236 metadata=inst_pkg.metadata,
6237 operation="uninstall",
6238 root_config=inst_pkg.root_config,
6239 type_name=inst_pkg.type_name)
6240 self._pkg_cache[uninst_task] = uninst_task
6241 # Enforce correct merge order with a hard dep.
6242 self.digraph.addnode(uninst_task, inst_task,
6243 priority=BlockerDepPriority.instance)
6244 # Count references to this blocker so that it can be
6245 # invalidated after nodes referencing it have been
6247 self._blocker_uninstalls.addnode(uninst_task, blocker)
6248 if not unresolved_blocks and not depends_on_order:
6249 self._irrelevant_blockers.add(blocker, parent)
6250 self._blocker_parents.remove_edge(blocker, parent)
6251 if not self._blocker_parents.parent_nodes(blocker):
6252 self._blocker_parents.remove(blocker)
6253 if not self._blocker_parents.child_nodes(parent):
6254 self._blocker_parents.remove(parent)
6255 if unresolved_blocks:
6256 self._unsolvable_blockers.add(blocker, parent)
6260 def _accept_blocker_conflicts(self):
6262 for x in ("--buildpkgonly", "--fetchonly",
6263 "--fetch-all-uri", "--nodeps", "--pretend"):
6264 if x in self.myopts:
6269 def _merge_order_bias(self, mygraph):
6270 """Order nodes from highest to lowest overall reference count for
6271 optimal leaf node selection."""
6273 for node in mygraph.order:
6274 node_info[node] = len(mygraph.parent_nodes(node))
6275 def cmp_merge_preference(node1, node2):
6276 return node_info[node2] - node_info[node1]
6277 mygraph.order.sort(cmp_merge_preference)
6279 def altlist(self, reversed=False):
6281 while self._serialized_tasks_cache is None:
6282 self._resolve_conflicts()
6284 self._serialized_tasks_cache, self._scheduler_graph = \
6285 self._serialize_tasks()
6286 except self._serialize_tasks_retry:
6289 retlist = self._serialized_tasks_cache[:]
6294 def schedulerGraph(self):
6296 The scheduler graph is identical to the normal one except that
6297 uninstall edges are reversed in specific cases that require
6298 conflicting packages to be temporarily installed simultaneously.
6299 This is intended for use by the Scheduler in it's parallelization
6300 logic. It ensures that temporary simultaneous installation of
6301 conflicting packages is avoided when appropriate (especially for
6302 !!atom blockers), but allowed in specific cases that require it.
6304 Note that this method calls break_refs() which alters the state of
6305 internal Package instances such that this depgraph instance should
6306 not be used to perform any more calculations.
6308 if self._scheduler_graph is None:
6310 self.break_refs(self._scheduler_graph.order)
6311 return self._scheduler_graph
6313 def break_refs(self, nodes):
6315 Take a mergelist like that returned from self.altlist() and
6316 break any references that lead back to the depgraph. This is
6317 useful if you want to hold references to packages without
6318 also holding the depgraph on the heap.
6321 if hasattr(node, "root_config"):
6322 # The FakeVartree references the _package_cache which
6323 # references the depgraph. So that Package instances don't
6324 # hold the depgraph and FakeVartree on the heap, replace
6325 # the RootConfig that references the FakeVartree with the
6326 # original RootConfig instance which references the actual
6328 node.root_config = \
6329 self._trees_orig[node.root_config.root]["root_config"]
6331 def _resolve_conflicts(self):
6332 if not self._complete_graph():
6333 raise self._unknown_internal_error()
6335 if not self.validate_blockers():
6336 raise self._unknown_internal_error()
6338 if self._slot_collision_info:
6339 self._process_slot_conflicts()
6341 def _serialize_tasks(self):
6342 scheduler_graph = self.digraph.copy()
6343 mygraph=self.digraph.copy()
6344 # Prune "nomerge" root nodes if nothing depends on them, since
6345 # otherwise they slow down merge order calculation. Don't remove
6346 # non-root nodes since they help optimize merge order in some cases
6347 # such as revdep-rebuild.
6348 removed_nodes = set()
6350 for node in mygraph.root_nodes():
6351 if not isinstance(node, Package) or \
6352 node.installed or node.onlydeps:
6353 removed_nodes.add(node)
6355 self.spinner.update()
6356 mygraph.difference_update(removed_nodes)
6357 if not removed_nodes:
6359 removed_nodes.clear()
6360 self._merge_order_bias(mygraph)
6361 def cmp_circular_bias(n1, n2):
6363 RDEPEND is stronger than PDEPEND and this function
6364 measures such a strength bias within a circular
6365 dependency relationship.
6367 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6368 ignore_priority=DepPriority.MEDIUM_SOFT)
6369 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6370 ignore_priority=DepPriority.MEDIUM_SOFT)
6371 if n1_n2_medium == n2_n1_medium:
6376 myblocker_uninstalls = self._blocker_uninstalls.copy()
6378 # Contains uninstall tasks that have been scheduled to
6379 # occur after overlapping blockers have been installed.
6380 scheduled_uninstalls = set()
6381 # Contains any Uninstall tasks that have been ignored
6382 # in order to avoid the circular deps code path. These
6383 # correspond to blocker conflicts that could not be
6385 ignored_uninstall_tasks = set()
6386 have_uninstall_task = False
6387 complete = "complete" in self.myparams
6388 myblocker_parents = self._blocker_parents.copy()
6391 def get_nodes(**kwargs):
6393 Returns leaf nodes excluding Uninstall instances
6394 since those should be executed as late as possible.
6396 return [node for node in mygraph.leaf_nodes(**kwargs) \
6397 if isinstance(node, Package) and \
6398 (node.operation != "uninstall" or \
6399 node in scheduled_uninstalls)]
6401 # sys-apps/portage needs special treatment if ROOT="/"
6402 running_root = self._running_root.root
6403 from portage.const import PORTAGE_PACKAGE_ATOM
6404 runtime_deps = InternalPackageSet(
6405 initial_atoms=[PORTAGE_PACKAGE_ATOM])
6406 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6407 PORTAGE_PACKAGE_ATOM)
6408 replacement_portage = self.mydbapi[running_root].match_pkgs(
6409 PORTAGE_PACKAGE_ATOM)
6412 running_portage = running_portage[0]
6414 running_portage = None
6416 if replacement_portage:
6417 replacement_portage = replacement_portage[0]
6419 replacement_portage = None
6421 if replacement_portage == running_portage:
6422 replacement_portage = None
6424 if replacement_portage is not None:
6425 # update from running_portage to replacement_portage asap
6426 asap_nodes.append(replacement_portage)
6428 if running_portage is not None:
6430 portage_rdepend = self._select_atoms_highest_available(
6431 running_root, running_portage.metadata["RDEPEND"],
6432 myuse=running_portage.use.enabled,
6433 parent=running_portage, strict=False)
6434 except portage.exception.InvalidDependString, e:
6435 portage.writemsg("!!! Invalid RDEPEND in " + \
6436 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6437 (running_root, running_portage.cpv, e), noiselevel=-1)
6439 portage_rdepend = []
6440 runtime_deps.update(atom for atom in portage_rdepend \
6441 if not atom.startswith("!"))
6443 ignore_priority_soft_range = [None]
6444 ignore_priority_soft_range.extend(
6445 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6446 tree_mode = "--tree" in self.myopts
6447 # Tracks whether or not the current iteration should prefer asap_nodes
6448 # if available. This is set to False when the previous iteration
6449 # failed to select any nodes. It is reset whenever nodes are
6450 # successfully selected.
6453 # By default, try to avoid selecting root nodes whenever possible. This
6454 # helps ensure that the maximimum possible number of soft dependencies
6455 # have been removed from the graph before their parent nodes have
6456 # selected. This is especially important when those dependencies are
6457 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6458 # CHOST has been changed (like when building a stage3 from a stage2).
6459 accept_root_node = False
6461 # State of prefer_asap and accept_root_node flags for successive
6462 # iterations that loosen the criteria for node selection.
6464 # iteration prefer_asap accept_root_node
6469 # If no nodes are selected on the 3rd iteration, it is due to
6470 # unresolved blockers or circular dependencies.
6472 while not mygraph.empty():
6473 self.spinner.update()
6474 selected_nodes = None
6475 ignore_priority = None
6476 if prefer_asap and asap_nodes:
6477 """ASAP nodes are merged before their soft deps."""
6478 asap_nodes = [node for node in asap_nodes \
6479 if mygraph.contains(node)]
6480 for node in asap_nodes:
6481 if not mygraph.child_nodes(node,
6482 ignore_priority=DepPriority.SOFT):
6483 selected_nodes = [node]
6484 asap_nodes.remove(node)
6486 if not selected_nodes and \
6487 not (prefer_asap and asap_nodes):
6488 for ignore_priority in ignore_priority_soft_range:
6489 nodes = get_nodes(ignore_priority=ignore_priority)
6493 if ignore_priority is None and not tree_mode:
6494 # Greedily pop all of these nodes since no relationship
6495 # has been ignored. This optimization destroys --tree
6496 # output, so it's disabled in reversed mode. If there
6497 # is a mix of merge and uninstall nodes, save the
6498 # uninstall nodes from later since sometimes a merge
6499 # node will render an install node unnecessary, and
6500 # we want to avoid doing a separate uninstall task in
6502 merge_nodes = [node for node in nodes \
6503 if node.operation == "merge"]
6505 selected_nodes = merge_nodes
6507 selected_nodes = nodes
6509 # For optimal merge order:
6510 # * Only pop one node.
6511 # * Removing a root node (node without a parent)
6512 # will not produce a leaf node, so avoid it.
6514 if mygraph.parent_nodes(node):
6515 # found a non-root node
6516 selected_nodes = [node]
6518 if not selected_nodes and \
6519 (accept_root_node or ignore_priority is None):
6520 # settle for a root node
6521 selected_nodes = [nodes[0]]
6523 if not selected_nodes:
6524 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6526 """Recursively gather a group of nodes that RDEPEND on
6527 eachother. This ensures that they are merged as a group
6528 and get their RDEPENDs satisfied as soon as possible."""
6529 def gather_deps(ignore_priority,
6530 mergeable_nodes, selected_nodes, node):
6531 if node in selected_nodes:
6533 if node not in mergeable_nodes:
6535 if node == replacement_portage and \
6536 mygraph.child_nodes(node,
6537 ignore_priority=DepPriority.MEDIUM_SOFT):
6538 # Make sure that portage always has all of it's
6539 # RDEPENDs installed first.
6541 selected_nodes.add(node)
6542 for child in mygraph.child_nodes(node,
6543 ignore_priority=ignore_priority):
6544 if not gather_deps(ignore_priority,
6545 mergeable_nodes, selected_nodes, child):
6548 mergeable_nodes = set(nodes)
6549 if prefer_asap and asap_nodes:
6551 for ignore_priority in xrange(DepPriority.SOFT,
6552 DepPriority.MEDIUM_SOFT + 1):
6554 if nodes is not asap_nodes and \
6555 not accept_root_node and \
6556 not mygraph.parent_nodes(node):
6558 selected_nodes = set()
6559 if gather_deps(ignore_priority,
6560 mergeable_nodes, selected_nodes, node):
6563 selected_nodes = None
6567 # If any nodes have been selected here, it's always
6568 # possible that anything up to a MEDIUM_SOFT priority
6569 # relationship has been ignored. This state is recorded
6570 # in ignore_priority so that relevant nodes will be
6571 # added to asap_nodes when appropriate.
6573 ignore_priority = DepPriority.MEDIUM_SOFT
6575 if prefer_asap and asap_nodes and not selected_nodes:
6576 # We failed to find any asap nodes to merge, so ignore
6577 # them for the next iteration.
6581 if not selected_nodes and not accept_root_node:
6582 # Maybe there are only root nodes left, so accept them
6583 # for the next iteration.
6584 accept_root_node = True
6587 if selected_nodes and ignore_priority > DepPriority.SOFT:
6588 # Try to merge ignored medium deps as soon as possible.
6589 for node in selected_nodes:
6590 children = set(mygraph.child_nodes(node))
6591 soft = children.difference(
6592 mygraph.child_nodes(node,
6593 ignore_priority=DepPriority.SOFT))
6594 medium_soft = children.difference(
6595 mygraph.child_nodes(node,
6596 ignore_priority=DepPriority.MEDIUM_SOFT))
6597 medium_soft.difference_update(soft)
6598 for child in medium_soft:
6599 if child in selected_nodes:
6601 if child in asap_nodes:
6603 asap_nodes.append(child)
6605 if selected_nodes and len(selected_nodes) > 1:
6606 if not isinstance(selected_nodes, list):
6607 selected_nodes = list(selected_nodes)
6608 selected_nodes.sort(cmp_circular_bias)
6610 if not selected_nodes and not myblocker_uninstalls.is_empty():
6611 # An Uninstall task needs to be executed in order to
6612 # avoid conflict if possible.
6613 min_parent_deps = None
6615 for task in myblocker_uninstalls.leaf_nodes():
6616 # Do some sanity checks so that system or world packages
6617 # don't get uninstalled inappropriately here (only really
6618 # necessary when --complete-graph has not been enabled).
6620 if task in ignored_uninstall_tasks:
6623 if task in scheduled_uninstalls:
6624 # It's been scheduled but it hasn't
6625 # been executed yet due to dependence
6626 # on installation of blocking packages.
6629 root_config = self.roots[task.root]
6630 inst_pkg = self._pkg_cache[
6631 ("installed", task.root, task.cpv, "nomerge")]
6633 if self.digraph.contains(inst_pkg):
6636 forbid_overlap = False
6637 heuristic_overlap = False
6638 for blocker in myblocker_uninstalls.parent_nodes(task):
6639 if blocker.eapi in ("0", "1"):
6640 heuristic_overlap = True
6641 elif blocker.atom.blocker.overlap.forbid:
6642 forbid_overlap = True
6644 if forbid_overlap and running_root == task.root:
6647 if heuristic_overlap and running_root == task.root:
6648 # Never uninstall sys-apps/portage or it's essential
6649 # dependencies, except through replacement.
6651 runtime_dep_atoms = \
6652 list(runtime_deps.iterAtomsForPackage(task))
6653 except portage.exception.InvalidDependString, e:
6654 portage.writemsg("!!! Invalid PROVIDE in " + \
6655 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6656 (task.root, task.cpv, e), noiselevel=-1)
6660 # Don't uninstall a runtime dep if it appears
6661 # to be the only suitable one installed.
6663 vardb = root_config.trees["vartree"].dbapi
6664 for atom in runtime_dep_atoms:
6665 other_version = None
6666 for pkg in vardb.match_pkgs(atom):
6667 if pkg.cpv == task.cpv and \
6668 pkg.metadata["COUNTER"] == \
6669 task.metadata["COUNTER"]:
6673 if other_version is None:
6679 # For packages in the system set, don't take
6680 # any chances. If the conflict can't be resolved
6681 # by a normal replacement operation then abort.
6684 for atom in root_config.sets[
6685 "system"].iterAtomsForPackage(task):
6688 except portage.exception.InvalidDependString, e:
6689 portage.writemsg("!!! Invalid PROVIDE in " + \
6690 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6691 (task.root, task.cpv, e), noiselevel=-1)
6697 # Note that the world check isn't always
6698 # necessary since self._complete_graph() will
6699 # add all packages from the system and world sets to the
6700 # graph. This just allows unresolved conflicts to be
6701 # detected as early as possible, which makes it possible
6702 # to avoid calling self._complete_graph() when it is
6703 # unnecessary due to blockers triggering an abortion.
6705 # For packages in the world set, go ahead an uninstall
6706 # when necessary, as long as the atom will be satisfied
6707 # in the final state.
6708 graph_db = self.mydbapi[task.root]
6711 for atom in root_config.sets[
6712 "world"].iterAtomsForPackage(task):
6714 for pkg in graph_db.match_pkgs(atom):
6722 except portage.exception.InvalidDependString, e:
6723 portage.writemsg("!!! Invalid PROVIDE in " + \
6724 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6725 (task.root, task.cpv, e), noiselevel=-1)
6731 # Check the deps of parent nodes to ensure that
6732 # the chosen task produces a leaf node. Maybe
6733 # this can be optimized some more to make the
6734 # best possible choice, but the current algorithm
6735 # is simple and should be near optimal for most
6738 for parent in mygraph.parent_nodes(task):
6739 parent_deps.update(mygraph.child_nodes(parent,
6740 ignore_priority=DepPriority.MEDIUM_SOFT))
6741 parent_deps.remove(task)
6742 if min_parent_deps is None or \
6743 len(parent_deps) < min_parent_deps:
6744 min_parent_deps = len(parent_deps)
6747 if uninst_task is not None:
6748 # The uninstall is performed only after blocking
6749 # packages have been merged on top of it. File
6750 # collisions between blocking packages are detected
6751 # and removed from the list of files to be uninstalled.
6752 scheduled_uninstalls.add(uninst_task)
6753 parent_nodes = mygraph.parent_nodes(uninst_task)
6755 # Reverse the parent -> uninstall edges since we want
6756 # to do the uninstall after blocking packages have
6757 # been merged on top of it.
6758 mygraph.remove(uninst_task)
6759 for blocked_pkg in parent_nodes:
6760 mygraph.add(blocked_pkg, uninst_task,
6761 priority=BlockerDepPriority.instance)
6762 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6763 scheduler_graph.add(blocked_pkg, uninst_task,
6764 priority=BlockerDepPriority.instance)
6767 # None of the Uninstall tasks are acceptable, so
6768 # the corresponding blockers are unresolvable.
6769 # We need to drop an Uninstall task here in order
6770 # to avoid the circular deps code path, but the
6771 # blocker will still be counted as an unresolved
6773 for node in myblocker_uninstalls.leaf_nodes():
6775 mygraph.remove(node)
6779 ignored_uninstall_tasks.add(node)
6782 # After dropping an Uninstall task, reset
6783 # the state variables for leaf node selection and
6784 # continue trying to select leaf nodes.
6786 accept_root_node = False
6789 if not selected_nodes:
6790 self._circular_deps_for_display = mygraph
6791 raise self._unknown_internal_error()
6793 # At this point, we've succeeded in selecting one or more nodes, so
6794 # it's now safe to reset the prefer_asap and accept_root_node flags
6795 # to their default states.
6797 accept_root_node = False
6799 mygraph.difference_update(selected_nodes)
6801 for node in selected_nodes:
6802 if isinstance(node, Package) and \
6803 node.operation == "nomerge":
6806 # Handle interactions between blockers
6807 # and uninstallation tasks.
6808 solved_blockers = set()
6810 if isinstance(node, Package) and \
6811 "uninstall" == node.operation:
6812 have_uninstall_task = True
6815 vardb = self.trees[node.root]["vartree"].dbapi
6816 previous_cpv = vardb.match(node.slot_atom)
6818 # The package will be replaced by this one, so remove
6819 # the corresponding Uninstall task if necessary.
6820 previous_cpv = previous_cpv[0]
6822 ("installed", node.root, previous_cpv, "uninstall")
6824 mygraph.remove(uninst_task)
6828 if uninst_task is not None and \
6829 uninst_task not in ignored_uninstall_tasks and \
6830 myblocker_uninstalls.contains(uninst_task):
6831 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6832 myblocker_uninstalls.remove(uninst_task)
6833 # Discard any blockers that this Uninstall solves.
6834 for blocker in blocker_nodes:
6835 if not myblocker_uninstalls.child_nodes(blocker):
6836 myblocker_uninstalls.remove(blocker)
6837 solved_blockers.add(blocker)
6839 retlist.append(node)
6841 if (isinstance(node, Package) and \
6842 "uninstall" == node.operation) or \
6843 (uninst_task is not None and \
6844 uninst_task in scheduled_uninstalls):
6845 # Include satisfied blockers in the merge list
6846 # since the user might be interested and also
6847 # it serves as an indicator that blocking packages
6848 # will be temporarily installed simultaneously.
6849 for blocker in solved_blockers:
6850 retlist.append(Blocker(atom=blocker.atom,
6851 root=blocker.root, eapi=blocker.eapi,
6854 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
6855 for node in myblocker_uninstalls.root_nodes():
6856 unsolvable_blockers.add(node)
6858 for blocker in unsolvable_blockers:
6859 retlist.append(blocker)
6861 # If any Uninstall tasks need to be executed in order
6862 # to avoid a conflict, complete the graph with any
6863 # dependencies that may have been initially
6864 # neglected (to ensure that unsafe Uninstall tasks
6865 # are properly identified and blocked from execution).
6866 if have_uninstall_task and \
6868 not unsolvable_blockers:
6869 self.myparams.add("complete")
6870 raise self._serialize_tasks_retry("")
6872 if unsolvable_blockers and \
6873 not self._accept_blocker_conflicts():
6874 self._unsatisfied_blockers_for_display = unsolvable_blockers
6875 self._serialized_tasks_cache = retlist[:]
6876 self._scheduler_graph = scheduler_graph
6877 raise self._unknown_internal_error()
6879 if self._slot_collision_info and \
6880 not self._accept_blocker_conflicts():
6881 self._serialized_tasks_cache = retlist[:]
6882 self._scheduler_graph = scheduler_graph
6883 raise self._unknown_internal_error()
6885 return retlist, scheduler_graph
6887 def _show_circular_deps(self, mygraph):
6888 # No leaf nodes are available, so we have a circular
6889 # dependency panic situation. Reduce the noise level to a
6890 # minimum via repeated elimination of root nodes since they
6891 # have no parents and thus can not be part of a cycle.
6893 root_nodes = mygraph.root_nodes(
6894 ignore_priority=DepPriority.MEDIUM_SOFT)
6897 mygraph.difference_update(root_nodes)
6898 # Display the USE flags that are enabled on nodes that are part
6899 # of dependency cycles in case that helps the user decide to
6900 # disable some of them.
6902 tempgraph = mygraph.copy()
6903 while not tempgraph.empty():
6904 nodes = tempgraph.leaf_nodes()
6906 node = tempgraph.order[0]
6909 display_order.append(node)
6910 tempgraph.remove(node)
6911 display_order.reverse()
6912 self.myopts.pop("--quiet", None)
6913 self.myopts.pop("--verbose", None)
6914 self.myopts["--tree"] = True
6915 portage.writemsg("\n\n", noiselevel=-1)
6916 self.display(display_order)
6917 prefix = colorize("BAD", " * ")
6918 portage.writemsg("\n", noiselevel=-1)
6919 portage.writemsg(prefix + "Error: circular dependencies:\n",
6921 portage.writemsg("\n", noiselevel=-1)
6922 mygraph.debug_print()
6923 portage.writemsg("\n", noiselevel=-1)
6924 portage.writemsg(prefix + "Note that circular dependencies " + \
6925 "can often be avoided by temporarily\n", noiselevel=-1)
6926 portage.writemsg(prefix + "disabling USE flags that trigger " + \
6927 "optional dependencies.\n", noiselevel=-1)
6929 def _show_merge_list(self):
6930 if self._serialized_tasks_cache is not None and \
6931 not (self._displayed_list and \
6932 (self._displayed_list == self._serialized_tasks_cache or \
6933 self._displayed_list == \
6934 list(reversed(self._serialized_tasks_cache)))):
6935 display_list = self._serialized_tasks_cache[:]
6936 if "--tree" in self.myopts:
6937 display_list.reverse()
6938 self.display(display_list)
6940 def _show_unsatisfied_blockers(self, blockers):
6941 self._show_merge_list()
6942 msg = "Error: The above package list contains " + \
6943 "packages which cannot be installed " + \
6944 "at the same time on the same system."
6945 prefix = colorize("BAD", " * ")
6946 from textwrap import wrap
6947 portage.writemsg("\n", noiselevel=-1)
6948 for line in wrap(msg, 70):
6949 portage.writemsg(prefix + line + "\n", noiselevel=-1)
6950 if "--quiet" not in self.myopts:
6951 show_blocker_docs_link()
6953 def display(self, mylist, favorites=[], verbosity=None):
6955 # This is used to prevent display_problems() from
6956 # redundantly displaying this exact same merge list
6957 # again via _show_merge_list().
6958 self._displayed_list = mylist
6960 if verbosity is None:
6961 verbosity = ("--quiet" in self.myopts and 1 or \
6962 "--verbose" in self.myopts and 3 or 2)
6963 favorites_set = InternalPackageSet(favorites)
6964 oneshot = "--oneshot" in self.myopts or \
6965 "--onlydeps" in self.myopts
6966 columns = "--columns" in self.myopts
6971 counters = PackageCounters()
6973 if verbosity == 1 and "--verbose" not in self.myopts:
6974 def create_use_string(*args):
6977 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
6979 is_new, reinst_flags,
6980 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
6981 alphabetical=("--alphabetical" in self.myopts)):
6989 cur_iuse = set(cur_iuse)
6990 enabled_flags = cur_iuse.intersection(cur_use)
6991 removed_iuse = set(old_iuse).difference(cur_iuse)
6992 any_iuse = cur_iuse.union(old_iuse)
6993 any_iuse = list(any_iuse)
6995 for flag in any_iuse:
6998 reinst_flag = reinst_flags and flag in reinst_flags
6999 if flag in enabled_flags:
7001 if is_new or flag in old_use and \
7002 (all_flags or reinst_flag):
7003 flag_str = red(flag)
7004 elif flag not in old_iuse:
7005 flag_str = yellow(flag) + "%*"
7006 elif flag not in old_use:
7007 flag_str = green(flag) + "*"
7008 elif flag in removed_iuse:
7009 if all_flags or reinst_flag:
7010 flag_str = yellow("-" + flag) + "%"
7013 flag_str = "(" + flag_str + ")"
7014 removed.append(flag_str)
7017 if is_new or flag in old_iuse and \
7018 flag not in old_use and \
7019 (all_flags or reinst_flag):
7020 flag_str = blue("-" + flag)
7021 elif flag not in old_iuse:
7022 flag_str = yellow("-" + flag)
7023 if flag not in iuse_forced:
7025 elif flag in old_use:
7026 flag_str = green("-" + flag) + "*"
7028 if flag in iuse_forced:
7029 flag_str = "(" + flag_str + ")"
7031 enabled.append(flag_str)
7033 disabled.append(flag_str)
7036 ret = " ".join(enabled)
7038 ret = " ".join(enabled + disabled + removed)
7040 ret = '%s="%s" ' % (name, ret)
7043 repo_display = RepoDisplay(self.roots)
7047 mygraph = self.digraph.copy()
7049 # If there are any Uninstall instances, add the corresponding
7050 # blockers to the digraph (useful for --tree display).
7052 executed_uninstalls = set(node for node in mylist \
7053 if isinstance(node, Package) and node.operation == "unmerge")
7055 for uninstall in self._blocker_uninstalls.leaf_nodes():
7056 uninstall_parents = \
7057 self._blocker_uninstalls.parent_nodes(uninstall)
7058 if not uninstall_parents:
7061 # Remove the corresponding "nomerge" node and substitute
7062 # the Uninstall node.
7063 inst_pkg = self._pkg_cache[
7064 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7066 mygraph.remove(inst_pkg)
7071 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7073 inst_pkg_blockers = []
7075 # Break the Package -> Uninstall edges.
7076 mygraph.remove(uninstall)
7078 # Resolution of a package's blockers
7079 # depend on it's own uninstallation.
7080 for blocker in inst_pkg_blockers:
7081 mygraph.add(uninstall, blocker)
7083 # Expand Package -> Uninstall edges into
7084 # Package -> Blocker -> Uninstall edges.
7085 for blocker in uninstall_parents:
7086 mygraph.add(uninstall, blocker)
7087 for parent in self._blocker_parents.parent_nodes(blocker):
7088 if parent != inst_pkg:
7089 mygraph.add(blocker, parent)
7091 # If the uninstall task did not need to be executed because
7092 # of an upgrade, display Blocker -> Upgrade edges since the
7093 # corresponding Blocker -> Uninstall edges will not be shown.
7095 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7096 if upgrade_node is not None and \
7097 uninstall not in executed_uninstalls:
7098 for blocker in uninstall_parents:
7099 mygraph.add(upgrade_node, blocker)
7101 unsatisfied_blockers = []
7106 if isinstance(x, Blocker) and not x.satisfied:
7107 unsatisfied_blockers.append(x)
7110 if "--tree" in self.myopts:
7111 depth = len(tree_nodes)
7112 while depth and graph_key not in \
7113 mygraph.child_nodes(tree_nodes[depth-1]):
7116 tree_nodes = tree_nodes[:depth]
7117 tree_nodes.append(graph_key)
7118 display_list.append((x, depth, True))
7119 shown_edges.add((graph_key, tree_nodes[depth-1]))
7121 traversed_nodes = set() # prevent endless circles
7122 traversed_nodes.add(graph_key)
7123 def add_parents(current_node, ordered):
7125 # Do not traverse to parents if this node is an
7126 # an argument or a direct member of a set that has
7127 # been specified as an argument (system or world).
7128 if current_node not in self._set_nodes:
7129 parent_nodes = mygraph.parent_nodes(current_node)
7131 child_nodes = set(mygraph.child_nodes(current_node))
7132 selected_parent = None
7133 # First, try to avoid a direct cycle.
7134 for node in parent_nodes:
7135 if not isinstance(node, (Blocker, Package)):
7137 if node not in traversed_nodes and \
7138 node not in child_nodes:
7139 edge = (current_node, node)
7140 if edge in shown_edges:
7142 selected_parent = node
7144 if not selected_parent:
7145 # A direct cycle is unavoidable.
7146 for node in parent_nodes:
7147 if not isinstance(node, (Blocker, Package)):
7149 if node not in traversed_nodes:
7150 edge = (current_node, node)
7151 if edge in shown_edges:
7153 selected_parent = node
7156 shown_edges.add((current_node, selected_parent))
7157 traversed_nodes.add(selected_parent)
7158 add_parents(selected_parent, False)
7159 display_list.append((current_node,
7160 len(tree_nodes), ordered))
7161 tree_nodes.append(current_node)
7163 add_parents(graph_key, True)
7165 display_list.append((x, depth, True))
7166 mylist = display_list
7167 for x in unsatisfied_blockers:
7168 mylist.append((x, 0, True))
7170 last_merge_depth = 0
7171 for i in xrange(len(mylist)-1,-1,-1):
7172 graph_key, depth, ordered = mylist[i]
7173 if not ordered and depth == 0 and i > 0 \
7174 and graph_key == mylist[i-1][0] and \
7175 mylist[i-1][1] == 0:
7176 # An ordered node got a consecutive duplicate when the tree was
7180 if ordered and graph_key[-1] != "nomerge":
7181 last_merge_depth = depth
7183 if depth >= last_merge_depth or \
7184 i < len(mylist) - 1 and \
7185 depth >= mylist[i+1][1]:
7188 from portage import flatten
7189 from portage.dep import use_reduce, paren_reduce
7190 # files to fetch list - avoids counting a same file twice
7191 # in size display (verbose mode)
7194 # Use this set to detect when all the "repoadd" strings are "[0]"
7195 # and disable the entire repo display in this case.
7198 for mylist_index in xrange(len(mylist)):
7199 x, depth, ordered = mylist[mylist_index]
7203 portdb = self.trees[myroot]["porttree"].dbapi
7204 bindb = self.trees[myroot]["bintree"].dbapi
7205 vardb = self.trees[myroot]["vartree"].dbapi
7206 vartree = self.trees[myroot]["vartree"]
7207 pkgsettings = self.pkgsettings[myroot]
7210 indent = " " * depth
7212 if isinstance(x, Blocker):
7214 blocker_style = "PKG_BLOCKER_SATISFIED"
7215 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7217 blocker_style = "PKG_BLOCKER"
7218 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7220 counters.blocks += 1
7222 counters.blocks_satisfied += 1
7223 resolved = portage.key_expand(
7224 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7225 if "--columns" in self.myopts and "--quiet" in self.myopts:
7226 addl += " " + colorize(blocker_style, resolved)
7228 addl = "[%s %s] %s%s" % \
7229 (colorize(blocker_style, "blocks"),
7230 addl, indent, colorize(blocker_style, resolved))
7231 block_parents = self._blocker_parents.parent_nodes(x)
7232 block_parents = set([pnode[2] for pnode in block_parents])
7233 block_parents = ", ".join(block_parents)
7235 addl += colorize(blocker_style,
7236 " (\"%s\" is blocking %s)") % \
7237 (str(x.atom).lstrip("!"), block_parents)
7239 addl += colorize(blocker_style,
7240 " (is blocking %s)") % block_parents
7241 if isinstance(x, Blocker) and x.satisfied:
7246 blockers.append(addl)
7249 pkg_merge = ordered and pkg_status == "merge"
7250 if not pkg_merge and pkg_status == "merge":
7251 pkg_status = "nomerge"
7252 built = pkg_type != "ebuild"
7253 installed = pkg_type == "installed"
7255 metadata = pkg.metadata
7257 repo_name = metadata["repository"]
7258 if pkg_type == "ebuild":
7259 ebuild_path = portdb.findname(pkg_key)
7260 if not ebuild_path: # shouldn't happen
7261 raise portage.exception.PackageNotFound(pkg_key)
7262 repo_path_real = os.path.dirname(os.path.dirname(
7263 os.path.dirname(ebuild_path)))
7265 repo_path_real = portdb.getRepositoryPath(repo_name)
7266 pkg_use = list(pkg.use.enabled)
7268 restrict = flatten(use_reduce(paren_reduce(
7269 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7270 except portage.exception.InvalidDependString, e:
7271 if not pkg.installed:
7272 show_invalid_depstring_notice(x,
7273 pkg.metadata["RESTRICT"], str(e))
7277 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7278 "fetch" in restrict:
7281 counters.restrict_fetch += 1
7282 if portdb.fetch_check(pkg_key, pkg_use):
7285 counters.restrict_fetch_satisfied += 1
7287 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7288 #param is used for -u, where you still *do* want to see when something is being upgraded.
7291 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7292 if vardb.cpv_exists(pkg_key):
7293 addl=" "+yellow("R")+fetch+" "
7296 counters.reinst += 1
7297 elif pkg_status == "uninstall":
7298 counters.uninst += 1
7299 # filter out old-style virtual matches
7300 elif installed_versions and \
7301 portage.cpv_getkey(installed_versions[0]) == \
7302 portage.cpv_getkey(pkg_key):
7303 myinslotlist = vardb.match(pkg.slot_atom)
7304 # If this is the first install of a new-style virtual, we
7305 # need to filter out old-style virtual matches.
7306 if myinslotlist and \
7307 portage.cpv_getkey(myinslotlist[0]) != \
7308 portage.cpv_getkey(pkg_key):
7311 myoldbest = myinslotlist[:]
7313 if not portage.dep.cpvequal(pkg_key,
7314 portage.best([pkg_key] + myoldbest)):
7316 addl += turquoise("U")+blue("D")
7318 counters.downgrades += 1
7321 addl += turquoise("U") + " "
7323 counters.upgrades += 1
7325 # New slot, mark it new.
7326 addl = " " + green("NS") + fetch + " "
7327 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7329 counters.newslot += 1
7331 if "--changelog" in self.myopts:
7332 inst_matches = vardb.match(pkg.slot_atom)
7334 changelogs.extend(self.calc_changelog(
7335 portdb.findname(pkg_key),
7336 inst_matches[0], pkg_key))
7338 addl = " " + green("N") + " " + fetch + " "
7347 forced_flags = set()
7348 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7349 forced_flags.update(pkgsettings.useforce)
7350 forced_flags.update(pkgsettings.usemask)
7352 cur_use = [flag for flag in pkg.use.enabled \
7353 if flag in pkg.iuse.all]
7354 cur_iuse = sorted(pkg.iuse.all)
7356 if myoldbest and myinslotlist:
7357 previous_cpv = myoldbest[0]
7359 previous_cpv = pkg.cpv
7360 if vardb.cpv_exists(previous_cpv):
7361 old_iuse, old_use = vardb.aux_get(
7362 previous_cpv, ["IUSE", "USE"])
7363 old_iuse = list(set(
7364 filter_iuse_defaults(old_iuse.split())))
7366 old_use = old_use.split()
7373 old_use = [flag for flag in old_use if flag in old_iuse]
7375 use_expand = pkgsettings["USE_EXPAND"].lower().split()
7377 use_expand.reverse()
7378 use_expand_hidden = \
7379 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7381 def map_to_use_expand(myvals, forcedFlags=False,
7385 for exp in use_expand:
7388 for val in myvals[:]:
7389 if val.startswith(exp.lower()+"_"):
7390 if val in forced_flags:
7391 forced[exp].add(val[len(exp)+1:])
7392 ret[exp].append(val[len(exp)+1:])
7395 forced["USE"] = [val for val in myvals \
7396 if val in forced_flags]
7398 for exp in use_expand_hidden:
7404 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7405 # are the only thing that triggered reinstallation.
7406 reinst_flags_map = {}
7407 reinstall_for_flags = self._reinstall_nodes.get(pkg)
7408 reinst_expand_map = None
7409 if reinstall_for_flags:
7410 reinst_flags_map = map_to_use_expand(
7411 list(reinstall_for_flags), removeHidden=False)
7412 for k in list(reinst_flags_map):
7413 if not reinst_flags_map[k]:
7414 del reinst_flags_map[k]
7415 if not reinst_flags_map.get("USE"):
7416 reinst_expand_map = reinst_flags_map.copy()
7417 reinst_expand_map.pop("USE", None)
7418 if reinst_expand_map and \
7419 not set(reinst_expand_map).difference(
7421 use_expand_hidden = \
7422 set(use_expand_hidden).difference(
7425 cur_iuse_map, iuse_forced = \
7426 map_to_use_expand(cur_iuse, forcedFlags=True)
7427 cur_use_map = map_to_use_expand(cur_use)
7428 old_iuse_map = map_to_use_expand(old_iuse)
7429 old_use_map = map_to_use_expand(old_use)
7432 use_expand.insert(0, "USE")
7434 for key in use_expand:
7435 if key in use_expand_hidden:
7437 verboseadd += create_use_string(key.upper(),
7438 cur_iuse_map[key], iuse_forced[key],
7439 cur_use_map[key], old_iuse_map[key],
7440 old_use_map[key], is_new,
7441 reinst_flags_map.get(key))
7446 if pkg_type == "ebuild" and pkg_merge:
7448 myfilesdict = portdb.getfetchsizes(pkg_key,
7449 useflags=pkg_use, debug=self.edebug)
7450 except portage.exception.InvalidDependString, e:
7451 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7452 show_invalid_depstring_notice(x, src_uri, str(e))
7455 if myfilesdict is None:
7456 myfilesdict="[empty/missing/bad digest]"
7458 for myfetchfile in myfilesdict:
7459 if myfetchfile not in myfetchlist:
7460 mysize+=myfilesdict[myfetchfile]
7461 myfetchlist.append(myfetchfile)
7463 counters.totalsize += mysize
7464 verboseadd += format_size(mysize)
7467 # assign index for a previous version in the same slot
7468 has_previous = False
7469 repo_name_prev = None
7470 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7472 slot_matches = vardb.match(slot_atom)
7475 repo_name_prev = vardb.aux_get(slot_matches[0],
7478 # now use the data to generate output
7479 if pkg.installed or not has_previous:
7480 repoadd = repo_display.repoStr(repo_path_real)
7482 repo_path_prev = None
7484 repo_path_prev = portdb.getRepositoryPath(
7486 if repo_path_prev == repo_path_real:
7487 repoadd = repo_display.repoStr(repo_path_real)
7489 repoadd = "%s=>%s" % (
7490 repo_display.repoStr(repo_path_prev),
7491 repo_display.repoStr(repo_path_real))
7493 repoadd_set.add(repoadd)
7495 xs = [portage.cpv_getkey(pkg_key)] + \
7496 list(portage.catpkgsplit(pkg_key)[2:])
7503 if "COLUMNWIDTH" in self.settings:
7505 mywidth = int(self.settings["COLUMNWIDTH"])
7506 except ValueError, e:
7507 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7509 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7510 self.settings["COLUMNWIDTH"], noiselevel=-1)
7512 oldlp = mywidth - 30
7515 # Convert myoldbest from a list to a string.
7519 for pos, key in enumerate(myoldbest):
7520 key = portage.catpkgsplit(key)[2] + \
7521 "-" + portage.catpkgsplit(key)[3]
7522 if key[-3:] == "-r0":
7524 myoldbest[pos] = key
7525 myoldbest = blue("["+", ".join(myoldbest)+"]")
7528 root_config = self.roots[myroot]
7529 system_set = root_config.sets["system"]
7530 world_set = root_config.sets["world"]
7535 pkg_system = system_set.findAtomForPackage(pkg)
7536 pkg_world = world_set.findAtomForPackage(pkg)
7537 if not (oneshot or pkg_world) and \
7538 myroot == self.target_root and \
7539 favorites_set.findAtomForPackage(pkg):
7540 # Maybe it will be added to world now.
7541 if create_world_atom(pkg, favorites_set, root_config):
7543 except portage.exception.InvalidDependString:
7544 # This is reported elsewhere if relevant.
7547 def pkgprint(pkg_str):
7550 return colorize("PKG_MERGE_SYSTEM", pkg_str)
7552 return colorize("PKG_MERGE_WORLD", pkg_str)
7554 return colorize("PKG_MERGE", pkg_str)
7555 elif pkg_status == "uninstall":
7556 return colorize("PKG_UNINSTALL", pkg_str)
7559 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7561 return colorize("PKG_NOMERGE_WORLD", pkg_str)
7563 return colorize("PKG_NOMERGE", pkg_str)
7566 properties = flatten(use_reduce(paren_reduce(
7567 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7568 except portage.exception.InvalidDependString, e:
7569 if not pkg.installed:
7570 show_invalid_depstring_notice(pkg,
7571 pkg.metadata["PROPERTIES"], str(e))
7575 interactive = "interactive" in properties
7576 if interactive and pkg.operation == "merge":
7577 addl = colorize("WARN", "I") + addl[1:]
7579 counters.interactive += 1
7584 if "--columns" in self.myopts:
7585 if "--quiet" in self.myopts:
7586 myprint=addl+" "+indent+pkgprint(pkg_cp)
7587 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7588 myprint=myprint+myoldbest
7589 myprint=myprint+darkgreen("to "+x[1])
7593 myprint = "[%s] %s%s" % \
7594 (pkgprint(pkg_status.ljust(13)),
7595 indent, pkgprint(pkg.cp))
7597 myprint = "[%s %s] %s%s" % \
7598 (pkgprint(pkg.type_name), addl,
7599 indent, pkgprint(pkg.cp))
7600 if (newlp-nc_len(myprint)) > 0:
7601 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7602 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7603 if (oldlp-nc_len(myprint)) > 0:
7604 myprint=myprint+" "*(oldlp-nc_len(myprint))
7605 myprint=myprint+myoldbest
7606 myprint += darkgreen("to " + pkg.root)
7609 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7611 myprint = "[" + pkg_type + " " + addl + "] "
7612 myprint += indent + pkgprint(pkg_key) + " " + \
7613 myoldbest + darkgreen("to " + myroot)
7615 if "--columns" in self.myopts:
7616 if "--quiet" in self.myopts:
7617 myprint=addl+" "+indent+pkgprint(pkg_cp)
7618 myprint=myprint+" "+green(xs[1]+xs[2])+" "
7619 myprint=myprint+myoldbest
7623 myprint = "[%s] %s%s" % \
7624 (pkgprint(pkg_status.ljust(13)),
7625 indent, pkgprint(pkg.cp))
7627 myprint = "[%s %s] %s%s" % \
7628 (pkgprint(pkg.type_name), addl,
7629 indent, pkgprint(pkg.cp))
7630 if (newlp-nc_len(myprint)) > 0:
7631 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7632 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7633 if (oldlp-nc_len(myprint)) > 0:
7634 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7635 myprint += myoldbest
7638 myprint = "[%s] %s%s %s" % \
7639 (pkgprint(pkg_status.ljust(13)),
7640 indent, pkgprint(pkg.cpv),
7643 myprint = "[%s %s] %s%s %s" % \
7644 (pkgprint(pkg_type), addl, indent,
7645 pkgprint(pkg.cpv), myoldbest)
7647 if columns and pkg.operation == "uninstall":
7649 p.append((myprint, verboseadd, repoadd))
7651 if "--tree" not in self.myopts and \
7652 "--quiet" not in self.myopts and \
7653 not self._opts_no_restart.intersection(self.myopts) and \
7654 pkg.root == self._running_root.root and \
7655 portage.match_from_list(
7656 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7657 not vardb.cpv_exists(pkg.cpv) and \
7658 "--quiet" not in self.myopts:
7659 if mylist_index < len(mylist) - 1:
7660 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7661 p.append(colorize("WARN", " then resume the merge."))
7664 show_repos = repoadd_set and repoadd_set != set(["0"])
7667 if isinstance(x, basestring):
7668 out.write("%s\n" % (x,))
7671 myprint, verboseadd, repoadd = x
7674 myprint += " " + verboseadd
7676 if show_repos and repoadd:
7677 myprint += " " + teal("[%s]" % repoadd)
7679 out.write("%s\n" % (myprint,))
7688 sys.stdout.write(str(repo_display))
7690 if "--changelog" in self.myopts:
7692 for revision,text in changelogs:
7693 print bold('*'+revision)
7694 sys.stdout.write(text)
7699 def display_problems(self):
7701 Display problems with the dependency graph such as slot collisions.
7702 This is called internally by display() to show the problems _after_
7703 the merge list where it is most likely to be seen, but if display()
7704 is not going to be called then this method should be called explicitly
7705 to ensure that the user is notified of problems with the graph.
7707 All output goes to stderr, except for unsatisfied dependencies which
7708 go to stdout for parsing by programs such as autounmask.
7711 # Note that show_masked_packages() sends it's output to
7712 # stdout, and some programs such as autounmask parse the
7713 # output in cases when emerge bails out. However, when
7714 # show_masked_packages() is called for installed packages
7715 # here, the message is a warning that is more appropriate
7716 # to send to stderr, so temporarily redirect stdout to
7717 # stderr. TODO: Fix output code so there's a cleaner way
7718 # to redirect everything to stderr.
7723 sys.stdout = sys.stderr
7724 self._display_problems()
7730 # This goes to stdout for parsing by programs like autounmask.
7731 for pargs, kwargs in self._unsatisfied_deps_for_display:
7732 self._show_unsatisfied_dep(*pargs, **kwargs)
7734 def _display_problems(self):
7735 if self._circular_deps_for_display is not None:
7736 self._show_circular_deps(
7737 self._circular_deps_for_display)
7739 # The user is only notified of a slot conflict if
7740 # there are no unresolvable blocker conflicts.
7741 if self._unsatisfied_blockers_for_display is not None:
7742 self._show_unsatisfied_blockers(
7743 self._unsatisfied_blockers_for_display)
7745 self._show_slot_collision_notice()
7747 # TODO: Add generic support for "set problem" handlers so that
7748 # the below warnings aren't special cases for world only.
7750 if self._missing_args:
7751 world_problems = False
7752 if "world" in self._sets:
7753 # Filter out indirect members of world (from nested sets)
7754 # since only direct members of world are desired here.
7755 world_set = self.roots[self.target_root].sets["world"]
7756 for arg, atom in self._missing_args:
7757 if arg.name == "world" and atom in world_set:
7758 world_problems = True
7762 sys.stderr.write("\n!!! Problems have been " + \
7763 "detected with your world file\n")
7764 sys.stderr.write("!!! Please run " + \
7765 green("emaint --check world")+"\n\n")
7767 if self._missing_args:
7768 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7769 " Ebuilds for the following packages are either all\n")
7770 sys.stderr.write(colorize("BAD", "!!!") + \
7771 " masked or don't exist:\n")
7772 sys.stderr.write(" ".join(str(atom) for arg, atom in \
7773 self._missing_args) + "\n")
7775 if self._pprovided_args:
7777 for arg, atom in self._pprovided_args:
7778 if isinstance(arg, SetArg):
7780 arg_atom = (atom, atom)
7783 arg_atom = (arg.arg, atom)
7784 refs = arg_refs.setdefault(arg_atom, [])
7785 if parent not in refs:
7788 msg.append(bad("\nWARNING: "))
7789 if len(self._pprovided_args) > 1:
7790 msg.append("Requested packages will not be " + \
7791 "merged because they are listed in\n")
7793 msg.append("A requested package will not be " + \
7794 "merged because it is listed in\n")
7795 msg.append("package.provided:\n\n")
7796 problems_sets = set()
7797 for (arg, atom), refs in arg_refs.iteritems():
7800 problems_sets.update(refs)
7802 ref_string = ", ".join(["'%s'" % name for name in refs])
7803 ref_string = " pulled in by " + ref_string
7804 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7806 if "world" in problems_sets:
7807 msg.append("This problem can be solved in one of the following ways:\n\n")
7808 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
7809 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
7810 msg.append(" C) Remove offending entries from package.provided.\n\n")
7811 msg.append("The best course of action depends on the reason that an offending\n")
7812 msg.append("package.provided entry exists.\n\n")
7813 sys.stderr.write("".join(msg))
7815 masked_packages = []
7816 for pkg in self._masked_installed:
7817 root_config = pkg.root_config
7818 pkgsettings = self.pkgsettings[pkg.root]
7819 mreasons = get_masking_status(pkg, pkgsettings, root_config)
7820 masked_packages.append((root_config, pkgsettings,
7821 pkg.cpv, pkg.metadata, mreasons))
7823 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7824 " The following installed packages are masked:\n")
7825 show_masked_packages(masked_packages)
7829 def calc_changelog(self,ebuildpath,current,next):
7830 if ebuildpath == None or not os.path.exists(ebuildpath):
7832 current = '-'.join(portage.catpkgsplit(current)[1:])
7833 if current.endswith('-r0'):
7834 current = current[:-3]
7835 next = '-'.join(portage.catpkgsplit(next)[1:])
7836 if next.endswith('-r0'):
7838 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
7840 changelog = open(changelogpath).read()
7841 except SystemExit, e:
7842 raise # Needed else can't exit
7845 divisions = self.find_changelog_tags(changelog)
7846 #print 'XX from',current,'to',next
7847 #for div,text in divisions: print 'XX',div
7848 # skip entries for all revisions above the one we are about to emerge
7849 for i in range(len(divisions)):
7850 if divisions[i][0]==next:
7851 divisions = divisions[i:]
7853 # find out how many entries we are going to display
7854 for i in range(len(divisions)):
7855 if divisions[i][0]==current:
7856 divisions = divisions[:i]
7859 # couldnt find the current revision in the list. display nothing
7863 def find_changelog_tags(self,changelog):
7867 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
7869 if release is not None:
7870 divs.append((release,changelog))
7872 if release is not None:
7873 divs.append((release,changelog[:match.start()]))
7874 changelog = changelog[match.end():]
7875 release = match.group(1)
7876 if release.endswith('.ebuild'):
7877 release = release[:-7]
7878 if release.endswith('-r0'):
7879 release = release[:-3]
7881 def saveNomergeFavorites(self):
7882 """Find atoms in favorites that are not in the mergelist and add them
7883 to the world file if necessary."""
7884 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7885 "--oneshot", "--onlydeps", "--pretend"):
7886 if x in self.myopts:
7888 root_config = self.roots[self.target_root]
7889 world_set = root_config.sets["world"]
7891 world_locked = False
7892 if hasattr(world_set, "lock"):
7896 if hasattr(world_set, "load"):
7897 world_set.load() # maybe it's changed on disk
7899 args_set = self._sets["args"]
7900 portdb = self.trees[self.target_root]["porttree"].dbapi
7901 added_favorites = set()
7902 for x in self._set_nodes:
7903 pkg_type, root, pkg_key, pkg_status = x
7904 if pkg_status != "nomerge":
7908 myfavkey = create_world_atom(x, args_set, root_config)
7910 if myfavkey in added_favorites:
7912 added_favorites.add(myfavkey)
7913 except portage.exception.InvalidDependString, e:
7914 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7915 (pkg_key, str(e)), noiselevel=-1)
7916 writemsg("!!! see '%s'\n\n" % os.path.join(
7917 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
7920 for k in self._sets:
7921 if k in ("args", "world") or not root_config.sets[k].world_candidate:
7926 all_added.append(SETPREFIX + k)
7927 all_added.extend(added_favorites)
7930 print ">>> Recording %s in \"world\" favorites file..." % \
7931 colorize("INFORM", str(a))
7933 world_set.update(all_added)
7938 def loadResumeCommand(self, resume_data, skip_masked=False):
7940 Add a resume command to the graph and validate it in the process. This
7941 will raise a PackageNotFound exception if a package is not available.
7944 if not isinstance(resume_data, dict):
7947 mergelist = resume_data.get("mergelist")
7948 if not isinstance(mergelist, list):
7951 fakedb = self.mydbapi
7953 serialized_tasks = []
7956 if not (isinstance(x, list) and len(x) == 4):
7958 pkg_type, myroot, pkg_key, action = x
7959 if pkg_type not in self.pkg_tree_map:
7961 if action != "merge":
7963 tree_type = self.pkg_tree_map[pkg_type]
7964 mydb = trees[myroot][tree_type].dbapi
7965 db_keys = list(self._trees_orig[myroot][
7966 tree_type].dbapi._aux_cache_keys)
7968 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
7970 # It does no exist or it is corrupt.
7971 if action == "uninstall":
7973 raise portage.exception.PackageNotFound(pkg_key)
7974 installed = action == "uninstall"
7975 built = pkg_type != "ebuild"
7976 root_config = self.roots[myroot]
7977 pkg = Package(built=built, cpv=pkg_key,
7978 installed=installed, metadata=metadata,
7979 operation=action, root_config=root_config,
7981 if pkg_type == "ebuild":
7982 pkgsettings = self.pkgsettings[myroot]
7983 pkgsettings.setcpv(pkg)
7984 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
7985 self._pkg_cache[pkg] = pkg
7987 root_config = self.roots[pkg.root]
7988 if "merge" == pkg.operation and \
7989 not visible(root_config.settings, pkg):
7991 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
7993 self._unsatisfied_deps_for_display.append(
7994 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
7996 fakedb[myroot].cpv_inject(pkg)
7997 serialized_tasks.append(pkg)
7998 self.spinner.update()
8000 if self._unsatisfied_deps_for_display:
8003 if not serialized_tasks or "--nodeps" in self.myopts:
8004 self._serialized_tasks_cache = serialized_tasks
8005 self._scheduler_graph = self.digraph
8007 self._select_package = self._select_pkg_from_graph
8008 self.myparams.add("selective")
8010 favorites = resume_data.get("favorites")
8011 args_set = self._sets["args"]
8012 if isinstance(favorites, list):
8013 args = self._load_favorites(favorites)
8017 for task in serialized_tasks:
8018 if isinstance(task, Package) and \
8019 task.operation == "merge":
8020 if not self._add_pkg(task, None):
8023 # Packages for argument atoms need to be explicitly
8024 # added via _add_pkg() so that they are included in the
8025 # digraph (needed at least for --tree display).
8027 for atom in arg.set:
8028 pkg, existing_node = self._select_package(
8029 arg.root_config.root, atom)
8030 if existing_node is None and \
8032 if not self._add_pkg(pkg, Dependency(atom=atom,
8033 root=pkg.root, parent=arg)):
8036 # Allow unsatisfied deps here to avoid showing a masking
8037 # message for an unsatisfied dep that isn't necessarily
8039 if not self._create_graph(allow_unsatisfied=True):
8041 if masked_tasks or self._unsatisfied_deps:
8042 # This probably means that a required package
8043 # was dropped via --skipfirst. It makes the
8044 # resume list invalid, so convert it to a
8045 # UnsatisfiedResumeDep exception.
8046 raise self.UnsatisfiedResumeDep(self,
8047 masked_tasks + self._unsatisfied_deps)
8048 self._serialized_tasks_cache = None
8051 except self._unknown_internal_error:
8056 def _load_favorites(self, favorites):
8058 Use a list of favorites to resume state from a
8059 previous select_files() call. This creates similar
8060 DependencyArg instances to those that would have
8061 been created by the original select_files() call.
8062 This allows Package instances to be matched with
8063 DependencyArg instances during graph creation.
8065 root_config = self.roots[self.target_root]
8066 getSetAtoms = root_config.setconfig.getSetAtoms
8067 sets = root_config.sets
8070 if not isinstance(x, basestring):
8072 if x in ("system", "world"):
8074 if x.startswith(SETPREFIX):
8075 s = x[len(SETPREFIX):]
8080 # Recursively expand sets so that containment tests in
8081 # self._get_parent_sets() properly match atoms in nested
8082 # sets (like if world contains system).
8083 expanded_set = InternalPackageSet(
8084 initial_atoms=getSetAtoms(s))
8085 self._sets[s] = expanded_set
8086 args.append(SetArg(arg=x, set=expanded_set,
8087 root_config=root_config))
8089 if not portage.isvalidatom(x):
8091 args.append(AtomArg(arg=x, atom=x,
8092 root_config=root_config))
8094 # Create the "args" package set from atoms and
8095 # packages given as arguments.
8096 args_set = self._sets["args"]
8098 if not isinstance(arg, (AtomArg, PackageArg)):
8101 if myatom in args_set:
8103 args_set.add(myatom)
8104 self._set_atoms.update(chain(*self._sets.itervalues()))
8105 atom_arg_map = self._atom_arg_map
8107 for atom in arg.set:
8108 atom_key = (atom, arg.root_config.root)
8109 refs = atom_arg_map.get(atom_key)
8112 atom_arg_map[atom_key] = refs
8117 class UnsatisfiedResumeDep(portage.exception.PortageException):
8119 A dependency of a resume list is not installed. This
8120 can occur when a required package is dropped from the
8121 merge list via --skipfirst.
8123 def __init__(self, depgraph, value):
8124 portage.exception.PortageException.__init__(self, value)
8125 self.depgraph = depgraph
8127 class _internal_exception(portage.exception.PortageException):
8128 def __init__(self, value=""):
8129 portage.exception.PortageException.__init__(self, value)
8131 class _unknown_internal_error(_internal_exception):
8133 Used by the depgraph internally to terminate graph creation.
8134 The specific reason for the failure should have been dumped
8135 to stderr, unfortunately, the exact reason for the failure
8139 class _serialize_tasks_retry(_internal_exception):
8141 This is raised by the _serialize_tasks() method when it needs to
8142 be called again for some reason. The only case that it's currently
8143 used for is when neglected dependencies need to be added to the
8144 graph in order to avoid making a potentially unsafe decision.
8147 class _dep_check_composite_db(portage.dbapi):
8149 A dbapi-like interface that is optimized for use in dep_check() calls.
8150 This is built on top of the existing depgraph package selection logic.
8151 Some packages that have been added to the graph may be masked from this
8152 view in order to influence the atom preference selection that occurs
8155 def __init__(self, depgraph, root):
8156 portage.dbapi.__init__(self)
8157 self._depgraph = depgraph
8159 self._match_cache = {}
8160 self._cpv_pkg_map = {}
8162 def match(self, atom):
8163 ret = self._match_cache.get(atom)
8168 atom = self._dep_expand(atom)
8169 pkg, existing = self._depgraph._select_package(self._root, atom)
8173 # Return the highest available from select_package() as well as
8174 # any matching slots in the graph db.
8176 slots.add(pkg.metadata["SLOT"])
8177 atom_cp = portage.dep_getkey(atom)
8178 if pkg.cp.startswith("virtual/"):
8179 # For new-style virtual lookahead that occurs inside
8180 # dep_check(), examine all slots. This is needed
8181 # so that newer slots will not unnecessarily be pulled in
8182 # when a satisfying lower slot is already installed. For
8183 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8184 # there's no need to pull in a newer slot to satisfy a
8185 # virtual/jdk dependency.
8186 for db, pkg_type, built, installed, db_keys in \
8187 self._depgraph._filtered_trees[self._root]["dbs"]:
8188 for cpv in db.match(atom):
8189 if portage.cpv_getkey(cpv) != pkg.cp:
8191 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8193 if self._visible(pkg):
8194 self._cpv_pkg_map[pkg.cpv] = pkg
8196 slots.remove(pkg.metadata["SLOT"])
8198 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8199 pkg, existing = self._depgraph._select_package(
8200 self._root, slot_atom)
8203 if not self._visible(pkg):
8205 self._cpv_pkg_map[pkg.cpv] = pkg
8208 self._cpv_sort_ascending(ret)
8209 self._match_cache[orig_atom] = ret
8212 def _visible(self, pkg):
8213 if pkg.installed and "selective" not in self._depgraph.myparams:
8215 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8216 except (StopIteration, portage.exception.InvalidDependString):
8223 self._depgraph.pkgsettings[pkg.root], pkg):
8225 except portage.exception.InvalidDependString:
8229 def _dep_expand(self, atom):
8231 This is only needed for old installed packages that may
8232 contain atoms that are not fully qualified with a specific
8233 category. Emulate the cpv_expand() function that's used by
8234 dbapi.match() in cases like this. If there are multiple
8235 matches, it's often due to a new-style virtual that has
8236 been added, so try to filter those out to avoid raising
8239 root_config = self._depgraph.roots[self._root]
8241 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8242 if len(expanded_atoms) > 1:
8243 non_virtual_atoms = []
8244 for x in expanded_atoms:
8245 if not portage.dep_getkey(x).startswith("virtual/"):
8246 non_virtual_atoms.append(x)
8247 if len(non_virtual_atoms) == 1:
8248 expanded_atoms = non_virtual_atoms
8249 if len(expanded_atoms) > 1:
8250 # compatible with portage.cpv_expand()
8251 raise portage.exception.AmbiguousPackageName(
8252 [portage.dep_getkey(x) for x in expanded_atoms])
8254 atom = expanded_atoms[0]
8256 null_atom = insert_category_into_atom(atom, "null")
8257 null_cp = portage.dep_getkey(null_atom)
8258 cat, atom_pn = portage.catsplit(null_cp)
8259 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8261 # Allow the resolver to choose which virtual.
8262 atom = insert_category_into_atom(atom, "virtual")
8264 atom = insert_category_into_atom(atom, "null")
8267 def aux_get(self, cpv, wants):
8268 metadata = self._cpv_pkg_map[cpv].metadata
8269 return [metadata.get(x, "") for x in wants]
8271 class _package_cache(dict):
8272 def __init__(self, depgraph):
8274 self._depgraph = depgraph
8276 def __setitem__(self, k, v):
8277 dict.__setitem__(self, k, v)
8278 root_config = self._depgraph.roots[v.root]
8280 if visible(root_config.settings, v) and \
8281 not (v.installed and \
8282 v.root_config.settings._getMissingKeywords(v.cpv, v.metadata)):
8283 root_config.visible_pkgs.cpv_inject(v)
8284 except portage.exception.InvalidDependString:
8287 class RepoDisplay(object):
8288 def __init__(self, roots):
8289 self._shown_repos = {}
8290 self._unknown_repo = False
8292 for root_config in roots.itervalues():
8293 portdir = root_config.settings.get("PORTDIR")
8295 repo_paths.add(portdir)
8296 overlays = root_config.settings.get("PORTDIR_OVERLAY")
8298 repo_paths.update(overlays.split())
8299 repo_paths = list(repo_paths)
8300 self._repo_paths = repo_paths
8301 self._repo_paths_real = [ os.path.realpath(repo_path) \
8302 for repo_path in repo_paths ]
8304 # pre-allocate index for PORTDIR so that it always has index 0.
8305 for root_config in roots.itervalues():
8306 portdb = root_config.trees["porttree"].dbapi
8307 portdir = portdb.porttree_root
8309 self.repoStr(portdir)
8311 def repoStr(self, repo_path_real):
8314 real_index = self._repo_paths_real.index(repo_path_real)
8315 if real_index == -1:
8317 self._unknown_repo = True
8319 shown_repos = self._shown_repos
8320 repo_paths = self._repo_paths
8321 repo_path = repo_paths[real_index]
8322 index = shown_repos.get(repo_path)
8324 index = len(shown_repos)
8325 shown_repos[repo_path] = index
8331 shown_repos = self._shown_repos
8332 unknown_repo = self._unknown_repo
8333 if shown_repos or self._unknown_repo:
8334 output.append("Portage tree and overlays:\n")
8335 show_repo_paths = list(shown_repos)
8336 for repo_path, repo_index in shown_repos.iteritems():
8337 show_repo_paths[repo_index] = repo_path
8339 for index, repo_path in enumerate(show_repo_paths):
8340 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8342 output.append(" "+teal("[?]") + \
8343 " indicates that the source repository could not be determined\n")
8344 return "".join(output)
8346 class PackageCounters(object):
8356 self.blocks_satisfied = 0
8358 self.restrict_fetch = 0
8359 self.restrict_fetch_satisfied = 0
8360 self.interactive = 0
8363 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8366 myoutput.append("Total: %s package" % total_installs)
8367 if total_installs != 1:
8368 myoutput.append("s")
8369 if total_installs != 0:
8370 myoutput.append(" (")
8371 if self.upgrades > 0:
8372 details.append("%s upgrade" % self.upgrades)
8373 if self.upgrades > 1:
8375 if self.downgrades > 0:
8376 details.append("%s downgrade" % self.downgrades)
8377 if self.downgrades > 1:
8380 details.append("%s new" % self.new)
8381 if self.newslot > 0:
8382 details.append("%s in new slot" % self.newslot)
8383 if self.newslot > 1:
8386 details.append("%s reinstall" % self.reinst)
8390 details.append("%s uninstall" % self.uninst)
8393 if self.interactive > 0:
8394 details.append("%s %s" % (self.interactive,
8395 colorize("WARN", "interactive")))
8396 myoutput.append(", ".join(details))
8397 if total_installs != 0:
8398 myoutput.append(")")
8399 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8400 if self.restrict_fetch:
8401 myoutput.append("\nFetch Restriction: %s package" % \
8402 self.restrict_fetch)
8403 if self.restrict_fetch > 1:
8404 myoutput.append("s")
8405 if self.restrict_fetch_satisfied < self.restrict_fetch:
8406 myoutput.append(bad(" (%s unsatisfied)") % \
8407 (self.restrict_fetch - self.restrict_fetch_satisfied))
8409 myoutput.append("\nConflict: %s block" % \
8412 myoutput.append("s")
8413 if self.blocks_satisfied < self.blocks:
8414 myoutput.append(bad(" (%s unsatisfied)") % \
8415 (self.blocks - self.blocks_satisfied))
8416 return "".join(myoutput)
8418 class PollConstants(object):
8421 Provides POLL* constants that are equivalent to those from the
8422 select module, for use by PollSelectAdapter.
8425 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
8428 locals()[k] = getattr(select, k, v)
8432 class PollSelectAdapter(PollConstants):
8435 Use select to emulate a poll object, for
8436 systems that don't support poll().
8440 self._registered = {}
8441 self._select_args = [[], [], []]
8443 def register(self, fd, *args):
8445 Only POLLIN is currently supported!
8449 "register expected at most 2 arguments, got " + \
8450 repr(1 + len(args)))
8452 eventmask = PollConstants.POLLIN | \
8453 PollConstants.POLLPRI | PollConstants.POLLOUT
8457 self._registered[fd] = eventmask
8458 self._select_args = None
8460 def unregister(self, fd):
8461 self._select_args = None
8462 del self._registered[fd]
8464 def poll(self, *args):
8467 "poll expected at most 2 arguments, got " + \
8468 repr(1 + len(args)))
8474 select_args = self._select_args
8475 if select_args is None:
8476 select_args = [self._registered.keys(), [], []]
8478 if timeout is not None:
8479 select_args = select_args[:]
8480 # Translate poll() timeout args to select() timeout args:
8482 # | units | value(s) for indefinite block
8483 # ---------|--------------|------------------------------
8484 # poll | milliseconds | omitted, negative, or None
8485 # ---------|--------------|------------------------------
8486 # select | seconds | omitted
8487 # ---------|--------------|------------------------------
8489 if timeout is not None and timeout < 0:
8491 if timeout is not None:
8492 select_args.append(timeout / 1000)
8494 select_events = select.select(*select_args)
8496 for fd in select_events[0]:
8497 poll_events.append((fd, PollConstants.POLLIN))
8500 class SequentialTaskQueue(SlotObject):
8502 __slots__ = ("max_jobs", "running_tasks") + \
8503 ("_dirty", "_scheduling", "_task_queue")
8505 def __init__(self, **kwargs):
8506 SlotObject.__init__(self, **kwargs)
8507 self._task_queue = deque()
8508 self.running_tasks = set()
8509 if self.max_jobs is None:
8513 def add(self, task):
8514 self._task_queue.append(task)
8517 def addFront(self, task):
8518 self._task_queue.appendleft(task)
8529 if self._scheduling:
8530 # Ignore any recursive schedule() calls triggered via
8531 # self._task_exit().
8534 self._scheduling = True
8536 task_queue = self._task_queue
8537 running_tasks = self.running_tasks
8538 max_jobs = self.max_jobs
8539 state_changed = False
8541 while task_queue and \
8542 (max_jobs is True or len(running_tasks) < max_jobs):
8543 task = task_queue.popleft()
8544 cancelled = getattr(task, "cancelled", None)
8546 running_tasks.add(task)
8547 task.addExitListener(self._task_exit)
8549 state_changed = True
8552 self._scheduling = False
8554 return state_changed
8556 def _task_exit(self, task):
8558 Since we can always rely on exit listeners being called, the set of
8559 running tasks is always pruned automatically and there is never any need
8560 to actively prune it.
8562 self.running_tasks.remove(task)
8563 if self._task_queue:
8567 self._task_queue.clear()
8568 running_tasks = self.running_tasks
8569 while running_tasks:
8570 task = running_tasks.pop()
8571 task.removeExitListener(self._task_exit)
8575 def __nonzero__(self):
8576 return bool(self._task_queue or self.running_tasks)
8579 return len(self._task_queue) + len(self.running_tasks)
8581 _can_poll_device = None
8583 def can_poll_device():
8585 Test if it's possible to use poll() on a device such as a pty. This
8586 is known to fail on Darwin.
8588 @returns: True if poll() on a device succeeds, False otherwise.
8591 global _can_poll_device
8592 if _can_poll_device is not None:
8593 return _can_poll_device
8595 if not hasattr(select, "poll"):
8596 _can_poll_device = False
8597 return _can_poll_device
8600 dev_null = open('/dev/null', 'rb')
8602 _can_poll_device = False
8603 return _can_poll_device
8606 p.register(dev_null.fileno(), PollConstants.POLLIN)
8608 invalid_request = False
8609 for f, event in p.poll():
8610 if event & PollConstants.POLLNVAL:
8611 invalid_request = True
8615 _can_poll_device = not invalid_request
8616 return _can_poll_device
8618 def create_poll_instance():
8620 Create an instance of select.poll, or an instance of
8621 PollSelectAdapter there is no poll() implementation or
8622 it is broken somehow.
8624 if can_poll_device():
8625 return select.poll()
8626 return PollSelectAdapter()
8628 class PollScheduler(object):
8630 class _sched_iface_class(SlotObject):
8631 __slots__ = ("register", "schedule", "unregister")
8635 self._max_load = None
8637 self._poll_event_queue = []
8638 self._poll_event_handlers = {}
8639 self._poll_event_handler_ids = {}
8640 # Increment id for each new handler.
8641 self._event_handler_id = 0
8642 self._poll_obj = create_poll_instance()
8643 self._scheduling = False
8645 def _schedule(self):
8647 Calls _schedule_tasks() and automatically returns early from
8648 any recursive calls to this method that the _schedule_tasks()
8649 call might trigger. This makes _schedule() safe to call from
8650 inside exit listeners.
8652 if self._scheduling:
8654 self._scheduling = True
8656 return self._schedule_tasks()
8658 self._scheduling = False
8660 def _running_job_count(self):
8663 def _can_add_job(self):
8664 max_jobs = self._max_jobs
8665 max_load = self._max_load
8667 if self._max_jobs is not True and \
8668 self._running_job_count() >= self._max_jobs:
8671 if max_load is not None and \
8672 (max_jobs is True or max_jobs > 1) and \
8673 self._running_job_count() >= 1:
8675 avg1, avg5, avg15 = os.getloadavg()
8676 except (AttributeError, OSError), e:
8677 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8682 if avg1 >= max_load:
8687 def _poll(self, timeout=None):
8689 All poll() calls pass through here. The poll events
8690 are added directly to self._poll_event_queue.
8691 In order to avoid endless blocking, this raises
8692 StopIteration if timeout is None and there are
8693 no file descriptors to poll.
8695 if not self._poll_event_handlers:
8697 if timeout is None and \
8698 not self._poll_event_handlers:
8699 raise StopIteration(
8700 "timeout is None and there are no poll() event handlers")
8702 # The following error is known to occur with Linux kernel versions
8705 # select.error: (4, 'Interrupted system call')
8707 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8708 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8709 # without any events.
8712 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8714 except select.error, e:
8715 writemsg_level("\n!!! select error: %s\n" % (e,),
8716 level=logging.ERROR, noiselevel=-1)
8718 if timeout is not None:
8721 def _next_poll_event(self, timeout=None):
8723 Since the _schedule_wait() loop is called by event
8724 handlers from _poll_loop(), maintain a central event
8725 queue for both of them to share events from a single
8726 poll() call. In order to avoid endless blocking, this
8727 raises StopIteration if timeout is None and there are
8728 no file descriptors to poll.
8730 if not self._poll_event_queue:
8732 return self._poll_event_queue.pop()
8734 def _poll_loop(self):
8736 event_handlers = self._poll_event_handlers
8737 event_handled = False
8740 while event_handlers:
8741 f, event = self._next_poll_event()
8742 handler, reg_id = event_handlers[f]
8744 event_handled = True
8745 except StopIteration:
8746 event_handled = True
8748 if not event_handled:
8749 raise AssertionError("tight loop")
8751 def _schedule_yield(self):
8753 Schedule for a short period of time chosen by the scheduler based
8754 on internal state. Synchronous tasks should call this periodically
8755 in order to allow the scheduler to service pending poll events. The
8756 scheduler will call poll() exactly once, without blocking, and any
8757 resulting poll events will be serviced.
8759 event_handlers = self._poll_event_handlers
8762 if not event_handlers:
8763 return bool(events_handled)
8765 if not self._poll_event_queue:
8769 while event_handlers and self._poll_event_queue:
8770 f, event = self._next_poll_event()
8771 handler, reg_id = event_handlers[f]
8774 except StopIteration:
8777 return bool(events_handled)
8779 def _register(self, f, eventmask, handler):
8782 @return: A unique registration id, for use in schedule() or
8785 if f in self._poll_event_handlers:
8786 raise AssertionError("fd %d is already registered" % f)
8787 self._event_handler_id += 1
8788 reg_id = self._event_handler_id
8789 self._poll_event_handler_ids[reg_id] = f
8790 self._poll_event_handlers[f] = (handler, reg_id)
8791 self._poll_obj.register(f, eventmask)
8794 def _unregister(self, reg_id):
8795 f = self._poll_event_handler_ids[reg_id]
8796 self._poll_obj.unregister(f)
8797 del self._poll_event_handlers[f]
8798 del self._poll_event_handler_ids[reg_id]
8800 def _schedule_wait(self, wait_ids):
8802 Schedule until wait_id is not longer registered
8805 @param wait_id: a task id to wait for
8807 event_handlers = self._poll_event_handlers
8808 handler_ids = self._poll_event_handler_ids
8809 event_handled = False
8811 if isinstance(wait_ids, int):
8812 wait_ids = frozenset([wait_ids])
8815 while wait_ids.intersection(handler_ids):
8816 f, event = self._next_poll_event()
8817 handler, reg_id = event_handlers[f]
8819 event_handled = True
8820 except StopIteration:
8821 event_handled = True
8823 return event_handled
8825 class QueueScheduler(PollScheduler):
8828 Add instances of SequentialTaskQueue and then call run(). The
8829 run() method returns when no tasks remain.
8832 def __init__(self, max_jobs=None, max_load=None):
8833 PollScheduler.__init__(self)
8835 if max_jobs is None:
8838 self._max_jobs = max_jobs
8839 self._max_load = max_load
8840 self.sched_iface = self._sched_iface_class(
8841 register=self._register,
8842 schedule=self._schedule_wait,
8843 unregister=self._unregister)
8846 self._schedule_listeners = []
8849 self._queues.append(q)
8851 def remove(self, q):
8852 self._queues.remove(q)
8856 while self._schedule():
8859 while self._running_job_count():
8862 def _schedule_tasks(self):
8865 @returns: True if there may be remaining tasks to schedule,
8868 while self._can_add_job():
8869 n = self._max_jobs - self._running_job_count()
8873 if not self._start_next_job(n):
8876 for q in self._queues:
8881 def _running_job_count(self):
8883 for q in self._queues:
8884 job_count += len(q.running_tasks)
8885 self._jobs = job_count
8888 def _start_next_job(self, n=1):
8890 for q in self._queues:
8891 initial_job_count = len(q.running_tasks)
8893 final_job_count = len(q.running_tasks)
8894 if final_job_count > initial_job_count:
8895 started_count += (final_job_count - initial_job_count)
8896 if started_count >= n:
8898 return started_count
8900 class TaskScheduler(object):
8903 A simple way to handle scheduling of AsynchrousTask instances. Simply
8904 add tasks and call run(). The run() method returns when no tasks remain.
8907 def __init__(self, max_jobs=None, max_load=None):
8908 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
8909 self._scheduler = QueueScheduler(
8910 max_jobs=max_jobs, max_load=max_load)
8911 self.sched_iface = self._scheduler.sched_iface
8912 self.run = self._scheduler.run
8913 self._scheduler.add(self._queue)
8915 def add(self, task):
8916 self._queue.add(task)
8919 self._scheduler.schedule()
8921 class JobStatusDisplay(object):
8923 _bound_properties = ("curval", "failed", "running")
8924 _jobs_column_width = 48
8926 # Don't update the display unless at least this much
8927 # time has passed, in units of seconds.
8928 _min_display_latency = 2
8930 _default_term_codes = {
8936 _termcap_name_map = {
8937 'carriage_return' : 'cr',
8942 def __init__(self, out=sys.stdout, quiet=False):
8943 object.__setattr__(self, "out", out)
8944 object.__setattr__(self, "quiet", quiet)
8945 object.__setattr__(self, "maxval", 0)
8946 object.__setattr__(self, "merges", 0)
8947 object.__setattr__(self, "_changed", False)
8948 object.__setattr__(self, "_displayed", False)
8949 object.__setattr__(self, "_last_display_time", 0)
8950 object.__setattr__(self, "width", 80)
8953 isatty = hasattr(out, "isatty") and out.isatty()
8954 object.__setattr__(self, "_isatty", isatty)
8955 if not isatty or not self._init_term():
8957 for k, capname in self._termcap_name_map.iteritems():
8958 term_codes[k] = self._default_term_codes[capname]
8959 object.__setattr__(self, "_term_codes", term_codes)
8961 def _init_term(self):
8963 Initialize term control codes.
8965 @returns: True if term codes were successfully initialized,
8969 term_type = os.environ.get("TERM", "vt100")
8975 curses.setupterm(term_type, self.out.fileno())
8976 tigetstr = curses.tigetstr
8977 except curses.error:
8982 if tigetstr is None:
8986 for k, capname in self._termcap_name_map.iteritems():
8987 code = tigetstr(capname)
8989 code = self._default_term_codes[capname]
8990 term_codes[k] = code
8991 object.__setattr__(self, "_term_codes", term_codes)
8994 def _format_msg(self, msg):
8995 return ">>> %s" % msg
8999 self._term_codes['carriage_return'] + \
9000 self._term_codes['clr_eol'])
9002 self._displayed = False
9004 def _display(self, line):
9005 self.out.write(line)
9007 self._displayed = True
9009 def _update(self, msg):
9012 if not self._isatty:
9013 out.write(self._format_msg(msg) + self._term_codes['newline'])
9015 self._displayed = True
9021 self._display(self._format_msg(msg))
9023 def displayMessage(self, msg):
9025 was_displayed = self._displayed
9027 if self._isatty and self._displayed:
9030 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9032 self._displayed = False
9035 self._changed = True
9041 for name in self._bound_properties:
9042 object.__setattr__(self, name, 0)
9045 self.out.write(self._term_codes['newline'])
9047 self._displayed = False
9049 def __setattr__(self, name, value):
9050 old_value = getattr(self, name)
9051 if value == old_value:
9053 object.__setattr__(self, name, value)
9054 if name in self._bound_properties:
9055 self._property_change(name, old_value, value)
9057 def _property_change(self, name, old_value, new_value):
9058 self._changed = True
9061 def _load_avg_str(self):
9063 avg = os.getloadavg()
9064 except (AttributeError, OSError), e:
9076 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9080 Display status on stdout, but only if something has
9081 changed since the last call.
9087 current_time = time.time()
9088 time_delta = current_time - self._last_display_time
9089 if self._displayed and \
9091 if not self._isatty:
9093 if time_delta < self._min_display_latency:
9096 self._last_display_time = current_time
9097 self._changed = False
9098 self._display_status()
9100 def _display_status(self):
9101 # Don't use len(self._completed_tasks) here since that also
9102 # can include uninstall tasks.
9103 curval_str = str(self.curval)
9104 maxval_str = str(self.maxval)
9105 running_str = str(self.running)
9106 failed_str = str(self.failed)
9107 load_avg_str = self._load_avg_str()
9109 color_output = StringIO.StringIO()
9110 plain_output = StringIO.StringIO()
9111 style_file = portage.output.ConsoleStyleFile(color_output)
9112 style_file.write_listener = plain_output
9113 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9114 style_writer.style_listener = style_file.new_styles
9115 f = formatter.AbstractFormatter(style_writer)
9117 number_style = "INFORM"
9118 f.add_literal_data("Jobs: ")
9119 f.push_style(number_style)
9120 f.add_literal_data(curval_str)
9122 f.add_literal_data(" of ")
9123 f.push_style(number_style)
9124 f.add_literal_data(maxval_str)
9126 f.add_literal_data(" complete")
9129 f.add_literal_data(", ")
9130 f.push_style(number_style)
9131 f.add_literal_data(running_str)
9133 f.add_literal_data(" running")
9136 f.add_literal_data(", ")
9137 f.push_style(number_style)
9138 f.add_literal_data(failed_str)
9140 f.add_literal_data(" failed")
9142 padding = self._jobs_column_width - len(plain_output.getvalue())
9144 f.add_literal_data(padding * " ")
9146 f.add_literal_data("Load avg: ")
9147 f.add_literal_data(load_avg_str)
9149 # Truncate to fit width, to avoid making the terminal scroll if the
9150 # line overflows (happens when the load average is large).
9151 plain_output = plain_output.getvalue()
9152 if self._isatty and len(plain_output) > self.width:
9153 # Use plain_output here since it's easier to truncate
9154 # properly than the color output which contains console
9156 self._update(plain_output[:self.width])
9158 self._update(color_output.getvalue())
9160 xtermTitle(" ".join(plain_output.split()))
9162 class Scheduler(PollScheduler):
9164 _opts_ignore_blockers = \
9165 frozenset(["--buildpkgonly",
9166 "--fetchonly", "--fetch-all-uri",
9167 "--nodeps", "--pretend"])
9169 _opts_no_background = \
9170 frozenset(["--pretend",
9171 "--fetchonly", "--fetch-all-uri"])
9173 _opts_no_restart = frozenset(["--buildpkgonly",
9174 "--fetchonly", "--fetch-all-uri", "--pretend"])
9176 _bad_resume_opts = set(["--ask", "--changelog",
9177 "--resume", "--skipfirst"])
9179 _fetch_log = "/var/log/emerge-fetch.log"
9181 class _iface_class(SlotObject):
9182 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9183 "dblinkElog", "fetch", "register", "schedule",
9184 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9187 class _fetch_iface_class(SlotObject):
9188 __slots__ = ("log_file", "schedule")
9190 _task_queues_class = slot_dict_class(
9191 ("merge", "jobs", "fetch", "unpack"), prefix="")
9193 class _build_opts_class(SlotObject):
9194 __slots__ = ("buildpkg", "buildpkgonly",
9195 "fetch_all_uri", "fetchonly", "pretend")
9197 class _binpkg_opts_class(SlotObject):
9198 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9200 class _pkg_count_class(SlotObject):
9201 __slots__ = ("curval", "maxval")
9203 class _emerge_log_class(SlotObject):
9204 __slots__ = ("xterm_titles",)
9206 def log(self, *pargs, **kwargs):
9207 if not self.xterm_titles:
9208 # Avoid interference with the scheduler's status display.
9209 kwargs.pop("short_msg", None)
9210 emergelog(self.xterm_titles, *pargs, **kwargs)
9212 class _failed_pkg(SlotObject):
9213 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9215 class _ConfigPool(object):
9216 """Interface for a task to temporarily allocate a config
9217 instance from a pool. This allows a task to be constructed
9218 long before the config instance actually becomes needed, like
9219 when prefetchers are constructed for the whole merge list."""
9220 __slots__ = ("_root", "_allocate", "_deallocate")
9221 def __init__(self, root, allocate, deallocate):
9223 self._allocate = allocate
9224 self._deallocate = deallocate
9226 return self._allocate(self._root)
9227 def deallocate(self, settings):
9228 self._deallocate(settings)
9230 class _unknown_internal_error(portage.exception.PortageException):
9232 Used internally to terminate scheduling. The specific reason for
9233 the failure should have been dumped to stderr.
9235 def __init__(self, value=""):
9236 portage.exception.PortageException.__init__(self, value)
9238 def __init__(self, settings, trees, mtimedb, myopts,
9239 spinner, mergelist, favorites, digraph):
9240 PollScheduler.__init__(self)
9241 self.settings = settings
9242 self.target_root = settings["ROOT"]
9244 self.myopts = myopts
9245 self._spinner = spinner
9246 self._mtimedb = mtimedb
9247 self._mergelist = mergelist
9248 self._favorites = favorites
9249 self._args_set = InternalPackageSet(favorites)
9250 self._build_opts = self._build_opts_class()
9251 for k in self._build_opts.__slots__:
9252 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9253 self._binpkg_opts = self._binpkg_opts_class()
9254 for k in self._binpkg_opts.__slots__:
9255 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9258 self._logger = self._emerge_log_class()
9259 self._task_queues = self._task_queues_class()
9260 for k in self._task_queues.allowed_keys:
9261 setattr(self._task_queues, k,
9262 SequentialTaskQueue())
9263 self._status_display = JobStatusDisplay()
9264 self._max_load = myopts.get("--load-average")
9265 max_jobs = myopts.get("--jobs")
9266 if max_jobs is None:
9268 self._set_max_jobs(max_jobs)
9270 # The root where the currently running
9271 # portage instance is installed.
9272 self._running_root = trees["/"]["root_config"]
9274 if settings.get("PORTAGE_DEBUG", "") == "1":
9276 self.pkgsettings = {}
9277 self._config_pool = {}
9278 self._blocker_db = {}
9280 self._config_pool[root] = []
9281 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9283 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9284 schedule=self._schedule_fetch)
9285 self._sched_iface = self._iface_class(
9286 dblinkEbuildPhase=self._dblink_ebuild_phase,
9287 dblinkDisplayMerge=self._dblink_display_merge,
9288 dblinkElog=self._dblink_elog,
9289 fetch=fetch_iface, register=self._register,
9290 schedule=self._schedule_wait,
9291 scheduleSetup=self._schedule_setup,
9292 scheduleUnpack=self._schedule_unpack,
9293 scheduleYield=self._schedule_yield,
9294 unregister=self._unregister)
9296 self._prefetchers = weakref.WeakValueDictionary()
9297 self._pkg_queue = []
9298 self._completed_tasks = set()
9300 self._failed_pkgs = []
9301 self._failed_pkgs_all = []
9302 self._failed_pkgs_die_msgs = []
9303 self._post_mod_echo_msgs = []
9304 self._parallel_fetch = False
9305 merge_count = len([x for x in mergelist \
9306 if isinstance(x, Package) and x.operation == "merge"])
9307 self._pkg_count = self._pkg_count_class(
9308 curval=0, maxval=merge_count)
9309 self._status_display.maxval = self._pkg_count.maxval
9311 # The load average takes some time to respond when new
9312 # jobs are added, so we need to limit the rate of adding
9314 self._job_delay_max = 10
9315 self._job_delay_factor = 1.0
9316 self._job_delay_exp = 1.5
9317 self._previous_job_start_time = None
9319 self._set_digraph(digraph)
9321 # This is used to memoize the _choose_pkg() result when
9322 # no packages can be chosen until one of the existing
9324 self._choose_pkg_return_early = False
9326 features = self.settings.features
9327 if "parallel-fetch" in features and \
9328 not ("--pretend" in self.myopts or \
9329 "--fetch-all-uri" in self.myopts or \
9330 "--fetchonly" in self.myopts):
9331 if "distlocks" not in features:
9332 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9333 portage.writemsg(red("!!!")+" parallel-fetching " + \
9334 "requires the distlocks feature enabled"+"\n",
9336 portage.writemsg(red("!!!")+" you have it disabled, " + \
9337 "thus parallel-fetching is being disabled"+"\n",
9339 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9340 elif len(mergelist) > 1:
9341 self._parallel_fetch = True
9343 if self._parallel_fetch:
9344 # clear out existing fetch log if it exists
9346 open(self._fetch_log, 'w')
9347 except EnvironmentError:
9350 self._running_portage = None
9351 portage_match = self._running_root.trees["vartree"].dbapi.match(
9352 portage.const.PORTAGE_PACKAGE_ATOM)
9354 cpv = portage_match.pop()
9355 self._running_portage = self._pkg(cpv, "installed",
9356 self._running_root, installed=True)
9358 def _poll(self, timeout=None):
9360 PollScheduler._poll(self, timeout=timeout)
9362 def _set_max_jobs(self, max_jobs):
9363 self._max_jobs = max_jobs
9364 self._task_queues.jobs.max_jobs = max_jobs
9366 def _background_mode(self):
9368 Check if background mode is enabled and adjust states as necessary.
9371 @returns: True if background mode is enabled, False otherwise.
9373 background = (self._max_jobs is True or \
9374 self._max_jobs > 1 or "--quiet" in self.myopts) and \
9375 not bool(self._opts_no_background.intersection(self.myopts))
9378 interactive_tasks = self._get_interactive_tasks()
9379 if interactive_tasks:
9381 writemsg_level(">>> Sending package output to stdio due " + \
9382 "to interactive package(s):\n",
9383 level=logging.INFO, noiselevel=-1)
9385 for pkg in interactive_tasks:
9386 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
9388 pkg_str += " for " + pkg.root
9391 writemsg_level("".join("%s\n" % (l,) for l in msg),
9392 level=logging.INFO, noiselevel=-1)
9393 if self._max_jobs is True or self._max_jobs > 1:
9394 self._set_max_jobs(1)
9395 writemsg_level(">>> Setting --jobs=1 due " + \
9396 "to the above interactive package(s)\n",
9397 level=logging.INFO, noiselevel=-1)
9399 self._status_display.quiet = \
9401 ("--quiet" in self.myopts and \
9402 "--verbose" not in self.myopts)
9404 self._logger.xterm_titles = \
9405 "notitles" not in self.settings.features and \
9406 self._status_display.quiet
9410 def _get_interactive_tasks(self):
9411 from portage import flatten
9412 from portage.dep import use_reduce, paren_reduce
9413 interactive_tasks = []
9414 for task in self._mergelist:
9415 if not (isinstance(task, Package) and \
9416 task.operation == "merge"):
9419 properties = flatten(use_reduce(paren_reduce(
9420 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9421 except portage.exception.InvalidDependString, e:
9422 show_invalid_depstring_notice(task,
9423 task.metadata["PROPERTIES"], str(e))
9424 raise self._unknown_internal_error()
9425 if "interactive" in properties:
9426 interactive_tasks.append(task)
9427 return interactive_tasks
9429 def _set_digraph(self, digraph):
9430 if "--nodeps" in self.myopts or \
9431 (self._max_jobs is not True and self._max_jobs < 2):
9433 self._digraph = None
9436 self._digraph = digraph
9437 self._prune_digraph()
9439 def _prune_digraph(self):
9441 Prune any root nodes that are irrelevant.
9444 graph = self._digraph
9445 completed_tasks = self._completed_tasks
9446 removed_nodes = set()
9448 for node in graph.root_nodes():
9449 if not isinstance(node, Package) or \
9450 (node.installed and node.operation == "nomerge") or \
9452 node in completed_tasks:
9453 removed_nodes.add(node)
9455 graph.difference_update(removed_nodes)
9456 if not removed_nodes:
9458 removed_nodes.clear()
9460 class _pkg_failure(portage.exception.PortageException):
9462 An instance of this class is raised by unmerge() when
9463 an uninstallation fails.
9466 def __init__(self, *pargs):
9467 portage.exception.PortageException.__init__(self, pargs)
9469 self.status = pargs[0]
9471 def _schedule_fetch(self, fetcher):
9473 Schedule a fetcher on the fetch queue, in order to
9474 serialize access to the fetch log.
9476 self._task_queues.fetch.addFront(fetcher)
9478 def _schedule_setup(self, setup_phase):
9480 Schedule a setup phase on the merge queue, in order to
9481 serialize unsandboxed access to the live filesystem.
9483 self._task_queues.merge.addFront(setup_phase)
9486 def _schedule_unpack(self, unpack_phase):
9488 Schedule an unpack phase on the unpack queue, in order
9489 to serialize $DISTDIR access for live ebuilds.
9491 self._task_queues.unpack.add(unpack_phase)
9493 def _find_blockers(self, new_pkg):
9495 Returns a callable which should be called only when
9496 the vdb lock has been acquired.
9499 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9502 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9503 if self._opts_ignore_blockers.intersection(self.myopts):
9506 # Call gc.collect() here to avoid heap overflow that
9507 # triggers 'Cannot allocate memory' errors (reported
9512 blocker_db = self._blocker_db[new_pkg.root]
9514 blocker_dblinks = []
9515 for blocking_pkg in blocker_db.findInstalledBlockers(
9516 new_pkg, acquire_lock=acquire_lock):
9517 if new_pkg.slot_atom == blocking_pkg.slot_atom:
9519 if new_pkg.cpv == blocking_pkg.cpv:
9521 blocker_dblinks.append(portage.dblink(
9522 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9523 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9524 vartree=self.trees[blocking_pkg.root]["vartree"]))
9528 return blocker_dblinks
9530 def _dblink_pkg(self, pkg_dblink):
9531 cpv = pkg_dblink.mycpv
9532 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9533 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9534 installed = type_name == "installed"
9535 return self._pkg(cpv, type_name, root_config, installed=installed)
9537 def _append_to_log_path(self, log_path, msg):
9538 f = open(log_path, 'a')
9544 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9546 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9549 background = self._background
9551 if background and log_path is not None:
9552 log_file = open(log_path, 'a')
9557 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9559 if log_file is not None:
9562 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9563 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9564 background = self._background
9566 if log_path is None:
9567 if not (background and level < logging.WARN):
9568 portage.util.writemsg_level(msg,
9569 level=level, noiselevel=noiselevel)
9572 portage.util.writemsg_level(msg,
9573 level=level, noiselevel=noiselevel)
9574 self._append_to_log_path(log_path, msg)
9576 def _dblink_ebuild_phase(self,
9577 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9579 Using this callback for merge phases allows the scheduler
9580 to run while these phases execute asynchronously, and allows
9581 the scheduler control output handling.
9584 scheduler = self._sched_iface
9585 settings = pkg_dblink.settings
9586 pkg = self._dblink_pkg(pkg_dblink)
9587 background = self._background
9588 log_path = settings.get("PORTAGE_LOG_FILE")
9590 ebuild_phase = EbuildPhase(background=background,
9591 pkg=pkg, phase=phase, scheduler=scheduler,
9592 settings=settings, tree=pkg_dblink.treetype)
9593 ebuild_phase.start()
9596 return ebuild_phase.returncode
9598 def _check_manifests(self):
9599 # Verify all the manifests now so that the user is notified of failure
9600 # as soon as possible.
9601 if "strict" not in self.settings.features or \
9602 "--fetchonly" in self.myopts or \
9603 "--fetch-all-uri" in self.myopts:
9606 shown_verifying_msg = False
9608 for myroot, pkgsettings in self.pkgsettings.iteritems():
9609 quiet_config = portage.config(clone=pkgsettings)
9610 quiet_config["PORTAGE_QUIET"] = "1"
9611 quiet_config.backup_changes("PORTAGE_QUIET")
9612 quiet_settings[myroot] = quiet_config
9615 for x in self._mergelist:
9616 if not isinstance(x, Package) or \
9617 x.type_name != "ebuild":
9620 if not shown_verifying_msg:
9621 shown_verifying_msg = True
9622 self._status_msg("Verifying ebuild manifests")
9624 root_config = x.root_config
9625 portdb = root_config.trees["porttree"].dbapi
9626 quiet_config = quiet_settings[root_config.root]
9627 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9628 if not portage.digestcheck([], quiet_config, strict=True):
9633 def _add_prefetchers(self):
9635 if not self._parallel_fetch:
9638 if self._parallel_fetch:
9639 self._status_msg("Starting parallel fetch")
9641 prefetchers = self._prefetchers
9642 getbinpkg = "--getbinpkg" in self.myopts
9644 # In order to avoid "waiting for lock" messages
9645 # at the beginning, which annoy users, never
9646 # spawn a prefetcher for the first package.
9647 for pkg in self._mergelist[1:]:
9648 prefetcher = self._create_prefetcher(pkg)
9649 if prefetcher is not None:
9650 self._task_queues.fetch.add(prefetcher)
9651 prefetchers[pkg] = prefetcher
9653 def _create_prefetcher(self, pkg):
9655 @return: a prefetcher, or None if not applicable
9659 if not isinstance(pkg, Package):
9662 elif pkg.type_name == "ebuild":
9664 prefetcher = EbuildFetcher(background=True,
9665 config_pool=self._ConfigPool(pkg.root,
9666 self._allocate_config, self._deallocate_config),
9667 fetchonly=1, logfile=self._fetch_log,
9668 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9670 elif pkg.type_name == "binary" and \
9671 "--getbinpkg" in self.myopts and \
9672 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9674 prefetcher = BinpkgFetcher(background=True,
9675 logfile=self._fetch_log, pkg=pkg,
9676 scheduler=self._sched_iface)
9680 def _is_restart_scheduled(self):
9682 Check if the merge list contains a replacement
9683 for the current running instance, that will result
9684 in restart after merge.
9686 @returns: True if a restart is scheduled, False otherwise.
9688 if self._opts_no_restart.intersection(self.myopts):
9691 mergelist = self._mergelist
9693 for i, pkg in enumerate(mergelist):
9694 if self._is_restart_necessary(pkg) and \
9695 i != len(mergelist) - 1:
9700 def _is_restart_necessary(self, pkg):
9702 @return: True if merging the given package
9703 requires restart, False otherwise.
9706 # Figure out if we need a restart.
9707 if pkg.root == self._running_root.root and \
9708 portage.match_from_list(
9709 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9710 if self._running_portage:
9711 return cmp(pkg, self._running_portage) != 0
9715 def _restart_if_necessary(self, pkg):
9717 Use execv() to restart emerge. This happens
9718 if portage upgrades itself and there are
9719 remaining packages in the list.
9722 if self._opts_no_restart.intersection(self.myopts):
9725 if not self._is_restart_necessary(pkg):
9728 if pkg == self._mergelist[-1]:
9731 self._main_loop_cleanup()
9733 logger = self._logger
9734 pkg_count = self._pkg_count
9735 mtimedb = self._mtimedb
9736 bad_resume_opts = self._bad_resume_opts
9738 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9739 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9741 logger.log(" *** RESTARTING " + \
9742 "emerge via exec() after change of " + \
9745 mtimedb["resume"]["mergelist"].remove(list(pkg))
9747 portage.run_exitfuncs()
9748 mynewargv = [sys.argv[0], "--resume"]
9749 resume_opts = self.myopts.copy()
9750 # For automatic resume, we need to prevent
9751 # any of bad_resume_opts from leaking in
9752 # via EMERGE_DEFAULT_OPTS.
9753 resume_opts["--ignore-default-opts"] = True
9754 for myopt, myarg in resume_opts.iteritems():
9755 if myopt not in bad_resume_opts:
9757 mynewargv.append(myopt)
9759 mynewargv.append(myopt +"="+ str(myarg))
9760 # priority only needs to be adjusted on the first run
9761 os.environ["PORTAGE_NICENESS"] = "0"
9762 os.execv(mynewargv[0], mynewargv)
9766 if "--resume" in self.myopts:
9768 portage.writemsg_stdout(
9769 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9770 self._logger.log(" *** Resuming merge...")
9772 self._save_resume_list()
9775 self._background = self._background_mode()
9776 except self._unknown_internal_error:
9779 for root in self.trees:
9780 root_config = self.trees[root]["root_config"]
9782 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
9783 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
9784 # for ensuring sane $PWD (bug #239560) and storing elog messages.
9785 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
9786 if not tmpdir or not os.path.isdir(tmpdir):
9787 msg = "The directory specified in your " + \
9788 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
9789 "does not exist. Please create this " + \
9790 "directory or correct your PORTAGE_TMPDIR setting."
9791 msg = textwrap.wrap(msg, 70)
9792 out = portage.output.EOutput()
9797 if self._background:
9798 root_config.settings.unlock()
9799 root_config.settings["PORTAGE_BACKGROUND"] = "1"
9800 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
9801 root_config.settings.lock()
9803 self.pkgsettings[root] = portage.config(
9804 clone=root_config.settings)
9806 rval = self._check_manifests()
9807 if rval != os.EX_OK:
9810 keep_going = "--keep-going" in self.myopts
9811 fetchonly = self._build_opts.fetchonly
9812 mtimedb = self._mtimedb
9813 failed_pkgs = self._failed_pkgs
9816 rval = self._merge()
9817 if rval == os.EX_OK or fetchonly or not keep_going:
9819 if "resume" not in mtimedb:
9821 mergelist = self._mtimedb["resume"].get("mergelist")
9828 for failed_pkg in failed_pkgs:
9829 mergelist.remove(list(failed_pkg.pkg))
9831 self._failed_pkgs_all.extend(failed_pkgs)
9837 if not self._calc_resume_list():
9840 clear_caches(self.trees)
9841 if not self._mergelist:
9844 self._save_resume_list()
9845 self._pkg_count.curval = 0
9846 self._pkg_count.maxval = len([x for x in self._mergelist \
9847 if isinstance(x, Package) and x.operation == "merge"])
9848 self._status_display.maxval = self._pkg_count.maxval
9850 self._logger.log(" *** Finished. Cleaning up...")
9853 self._failed_pkgs_all.extend(failed_pkgs)
9856 background = self._background
9857 failure_log_shown = False
9858 if background and len(self._failed_pkgs_all) == 1:
9859 # If only one package failed then just show it's
9860 # whole log for easy viewing.
9861 failed_pkg = self._failed_pkgs_all[-1]
9862 build_dir = failed_pkg.build_dir
9865 log_paths = [failed_pkg.build_log]
9867 log_path = self._locate_failure_log(failed_pkg)
9868 if log_path is not None:
9870 log_file = open(log_path, 'rb')
9874 if log_file is not None:
9876 for line in log_file:
9877 writemsg_level(line, noiselevel=-1)
9880 failure_log_shown = True
9882 # Dump mod_echo output now since it tends to flood the terminal.
9883 # This allows us to avoid having more important output, generated
9884 # later, from being swept away by the mod_echo output.
9885 mod_echo_output = _flush_elog_mod_echo()
9887 if background and not failure_log_shown and \
9888 self._failed_pkgs_all and \
9889 self._failed_pkgs_die_msgs and \
9890 not mod_echo_output:
9892 printer = portage.output.EOutput()
9893 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
9895 if mysettings["ROOT"] != "/":
9896 root_msg = " merged to %s" % mysettings["ROOT"]
9898 printer.einfo("Error messages for package %s%s:" % \
9899 (colorize("INFORM", key), root_msg))
9901 for phase in portage.const.EBUILD_PHASES:
9902 if phase not in logentries:
9904 for msgtype, msgcontent in logentries[phase]:
9905 if isinstance(msgcontent, basestring):
9906 msgcontent = [msgcontent]
9907 for line in msgcontent:
9908 printer.eerror(line.strip("\n"))
9910 if self._post_mod_echo_msgs:
9911 for msg in self._post_mod_echo_msgs:
9914 if len(self._failed_pkgs_all) > 1:
9915 msg = "The following packages have " + \
9916 "failed to build or install:"
9918 writemsg(prefix + "\n", noiselevel=-1)
9919 from textwrap import wrap
9920 for line in wrap(msg, 72):
9921 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
9922 writemsg(prefix + "\n", noiselevel=-1)
9923 for failed_pkg in self._failed_pkgs_all:
9924 writemsg("%s\t%s\n" % (prefix,
9925 colorize("INFORM", str(failed_pkg.pkg))),
9927 writemsg(prefix + "\n", noiselevel=-1)
9931 def _elog_listener(self, mysettings, key, logentries, fulltext):
9932 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
9934 self._failed_pkgs_die_msgs.append(
9935 (mysettings, key, errors))
9937 def _locate_failure_log(self, failed_pkg):
9939 build_dir = failed_pkg.build_dir
9942 log_paths = [failed_pkg.build_log]
9944 for log_path in log_paths:
9949 log_size = os.stat(log_path).st_size
9960 def _add_packages(self):
9961 pkg_queue = self._pkg_queue
9962 for pkg in self._mergelist:
9963 if isinstance(pkg, Package):
9964 pkg_queue.append(pkg)
9965 elif isinstance(pkg, Blocker):
9968 def _merge_exit(self, merge):
9969 self._do_merge_exit(merge)
9970 self._deallocate_config(merge.merge.settings)
9971 if merge.returncode == os.EX_OK and \
9972 not merge.merge.pkg.installed:
9973 self._status_display.curval += 1
9974 self._status_display.merges = len(self._task_queues.merge)
9977 def _do_merge_exit(self, merge):
9978 pkg = merge.merge.pkg
9979 if merge.returncode != os.EX_OK:
9980 settings = merge.merge.settings
9981 build_dir = settings.get("PORTAGE_BUILDDIR")
9982 build_log = settings.get("PORTAGE_LOG_FILE")
9984 self._failed_pkgs.append(self._failed_pkg(
9985 build_dir=build_dir, build_log=build_log,
9987 returncode=merge.returncode))
9988 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
9990 self._status_display.failed = len(self._failed_pkgs)
9993 self._task_complete(pkg)
9994 pkg_to_replace = merge.merge.pkg_to_replace
9995 if pkg_to_replace is not None:
9996 # When a package is replaced, mark it's uninstall
9997 # task complete (if any).
9999 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10000 self._task_complete(uninst_hash_key)
10005 self._restart_if_necessary(pkg)
10007 # Call mtimedb.commit() after each merge so that
10008 # --resume still works after being interrupted
10009 # by reboot, sigkill or similar.
10010 mtimedb = self._mtimedb
10011 mtimedb["resume"]["mergelist"].remove(list(pkg))
10012 if not mtimedb["resume"]["mergelist"]:
10013 del mtimedb["resume"]
10016 def _build_exit(self, build):
10017 if build.returncode == os.EX_OK:
10019 merge = PackageMerge(merge=build)
10020 merge.addExitListener(self._merge_exit)
10021 self._task_queues.merge.add(merge)
10022 self._status_display.merges = len(self._task_queues.merge)
10024 settings = build.settings
10025 build_dir = settings.get("PORTAGE_BUILDDIR")
10026 build_log = settings.get("PORTAGE_LOG_FILE")
10028 self._failed_pkgs.append(self._failed_pkg(
10029 build_dir=build_dir, build_log=build_log,
10031 returncode=build.returncode))
10032 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10034 self._status_display.failed = len(self._failed_pkgs)
10035 self._deallocate_config(build.settings)
10037 self._status_display.running = self._jobs
10040 def _extract_exit(self, build):
10041 self._build_exit(build)
10043 def _task_complete(self, pkg):
10044 self._completed_tasks.add(pkg)
10045 self._choose_pkg_return_early = False
10049 self._add_prefetchers()
10050 self._add_packages()
10051 pkg_queue = self._pkg_queue
10052 failed_pkgs = self._failed_pkgs
10053 portage.locks._quiet = self._background
10054 portage.elog._emerge_elog_listener = self._elog_listener
10060 self._main_loop_cleanup()
10061 portage.locks._quiet = False
10062 portage.elog._emerge_elog_listener = None
10064 rval = failed_pkgs[-1].returncode
10068 def _main_loop_cleanup(self):
10069 del self._pkg_queue[:]
10070 self._completed_tasks.clear()
10071 self._choose_pkg_return_early = False
10072 self._status_display.reset()
10073 self._digraph = None
10074 self._task_queues.fetch.clear()
10076 def _choose_pkg(self):
10078 Choose a task that has all it's dependencies satisfied.
10081 if self._choose_pkg_return_early:
10084 if self._digraph is None:
10085 if (self._jobs or self._task_queues.merge) and \
10086 not ("--nodeps" in self.myopts and \
10087 (self._max_jobs is True or self._max_jobs > 1)):
10088 self._choose_pkg_return_early = True
10090 return self._pkg_queue.pop(0)
10092 if not (self._jobs or self._task_queues.merge):
10093 return self._pkg_queue.pop(0)
10095 self._prune_digraph()
10098 later = set(self._pkg_queue)
10099 for pkg in self._pkg_queue:
10101 if not self._dependent_on_scheduled_merges(pkg, later):
10105 if chosen_pkg is not None:
10106 self._pkg_queue.remove(chosen_pkg)
10108 if chosen_pkg is None:
10109 # There's no point in searching for a package to
10110 # choose until at least one of the existing jobs
10112 self._choose_pkg_return_early = True
10116 def _dependent_on_scheduled_merges(self, pkg, later):
10118 Traverse the subgraph of the given packages deep dependencies
10119 to see if it contains any scheduled merges.
10120 @param pkg: a package to check dependencies for
10122 @param later: packages for which dependence should be ignored
10123 since they will be merged later than pkg anyway and therefore
10124 delaying the merge of pkg will not result in a more optimal
10128 @returns: True if the package is dependent, False otherwise.
10131 graph = self._digraph
10132 completed_tasks = self._completed_tasks
10135 traversed_nodes = set([pkg])
10136 direct_deps = graph.child_nodes(pkg)
10137 node_stack = direct_deps
10138 direct_deps = frozenset(direct_deps)
10140 node = node_stack.pop()
10141 if node in traversed_nodes:
10143 traversed_nodes.add(node)
10144 if not ((node.installed and node.operation == "nomerge") or \
10145 (node.operation == "uninstall" and \
10146 node not in direct_deps) or \
10147 node in completed_tasks or \
10151 node_stack.extend(graph.child_nodes(node))
10155 def _allocate_config(self, root):
10157 Allocate a unique config instance for a task in order
10158 to prevent interference between parallel tasks.
10160 if self._config_pool[root]:
10161 temp_settings = self._config_pool[root].pop()
10163 temp_settings = portage.config(clone=self.pkgsettings[root])
10164 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10165 # performance reasons, call it here to make sure all settings from the
10166 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10167 temp_settings.reload()
10168 temp_settings.reset()
10169 return temp_settings
10171 def _deallocate_config(self, settings):
10172 self._config_pool[settings["ROOT"]].append(settings)
10174 def _main_loop(self):
10176 # Only allow 1 job max if a restart is scheduled
10177 # due to portage update.
10178 if self._is_restart_scheduled() or \
10179 self._opts_no_background.intersection(self.myopts):
10180 self._set_max_jobs(1)
10182 merge_queue = self._task_queues.merge
10184 while self._schedule():
10185 if self._poll_event_handlers:
10190 if not (self._jobs or merge_queue):
10192 if self._poll_event_handlers:
10195 def _keep_scheduling(self):
10196 return bool(self._pkg_queue and \
10197 not (self._failed_pkgs and not self._build_opts.fetchonly))
10199 def _schedule_tasks(self):
10200 self._schedule_tasks_imp()
10201 self._status_display.display()
10204 for q in self._task_queues.values():
10208 # Cancel prefetchers if they're the only reason
10209 # the main poll loop is still running.
10210 if self._failed_pkgs and not self._build_opts.fetchonly and \
10211 not (self._jobs or self._task_queues.merge) and \
10212 self._task_queues.fetch:
10213 self._task_queues.fetch.clear()
10217 self._schedule_tasks_imp()
10218 self._status_display.display()
10220 return self._keep_scheduling()
10222 def _job_delay(self):
10225 @returns: True if job scheduling should be delayed, False otherwise.
10228 if self._jobs and self._max_load is not None:
10230 current_time = time.time()
10232 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10233 if delay > self._job_delay_max:
10234 delay = self._job_delay_max
10235 if (current_time - self._previous_job_start_time) < delay:
10240 def _schedule_tasks_imp(self):
10243 @returns: True if state changed, False otherwise.
10250 if not self._keep_scheduling():
10251 return bool(state_change)
10253 if self._choose_pkg_return_early or \
10254 not self._can_add_job() or \
10256 return bool(state_change)
10258 pkg = self._choose_pkg()
10260 return bool(state_change)
10264 if not pkg.installed:
10265 self._pkg_count.curval += 1
10267 task = self._task(pkg)
10270 merge = PackageMerge(merge=task)
10271 merge.addExitListener(self._merge_exit)
10272 self._task_queues.merge.add(merge)
10276 self._previous_job_start_time = time.time()
10277 self._status_display.running = self._jobs
10278 task.addExitListener(self._extract_exit)
10279 self._task_queues.jobs.add(task)
10283 self._previous_job_start_time = time.time()
10284 self._status_display.running = self._jobs
10285 task.addExitListener(self._build_exit)
10286 self._task_queues.jobs.add(task)
10288 return bool(state_change)
10290 def _task(self, pkg):
10292 pkg_to_replace = None
10293 if pkg.operation != "uninstall":
10294 vardb = pkg.root_config.trees["vartree"].dbapi
10295 previous_cpv = vardb.match(pkg.slot_atom)
10297 previous_cpv = previous_cpv.pop()
10298 pkg_to_replace = self._pkg(previous_cpv,
10299 "installed", pkg.root_config, installed=True)
10301 task = MergeListItem(args_set=self._args_set,
10302 background=self._background, binpkg_opts=self._binpkg_opts,
10303 build_opts=self._build_opts,
10304 config_pool=self._ConfigPool(pkg.root,
10305 self._allocate_config, self._deallocate_config),
10306 emerge_opts=self.myopts,
10307 find_blockers=self._find_blockers(pkg), logger=self._logger,
10308 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10309 pkg_to_replace=pkg_to_replace,
10310 prefetcher=self._prefetchers.get(pkg),
10311 scheduler=self._sched_iface,
10312 settings=self._allocate_config(pkg.root),
10313 statusMessage=self._status_msg,
10314 world_atom=self._world_atom)
10318 def _failed_pkg_msg(self, failed_pkg, action, preposition):
10319 pkg = failed_pkg.pkg
10320 msg = "%s to %s %s" % \
10321 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10322 if pkg.root != "/":
10323 msg += " %s %s" % (preposition, pkg.root)
10325 log_path = self._locate_failure_log(failed_pkg)
10326 if log_path is not None:
10327 msg += ", Log file:"
10328 self._status_msg(msg)
10330 if log_path is not None:
10331 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10333 def _status_msg(self, msg):
10335 Display a brief status message (no newlines) in the status display.
10336 This is called by tasks to provide feedback to the user. This
10337 delegates the resposibility of generating \r and \n control characters,
10338 to guarantee that lines are created or erased when necessary and
10342 @param msg: a brief status message (no newlines allowed)
10344 if not self._background:
10345 writemsg_level("\n")
10346 self._status_display.displayMessage(msg)
10348 def _save_resume_list(self):
10350 Do this before verifying the ebuild Manifests since it might
10351 be possible for the user to use --resume --skipfirst get past
10352 a non-essential package with a broken digest.
10354 mtimedb = self._mtimedb
10355 mtimedb["resume"]["mergelist"] = [list(x) \
10356 for x in self._mergelist \
10357 if isinstance(x, Package) and x.operation == "merge"]
10361 def _calc_resume_list(self):
10363 Use the current resume list to calculate a new one,
10364 dropping any packages with unsatisfied deps.
10366 @returns: True if successful, False otherwise.
10368 print colorize("GOOD", "*** Resuming merge...")
10370 if self._show_list():
10371 if "--tree" in self.myopts:
10372 portage.writemsg_stdout("\n" + \
10373 darkgreen("These are the packages that " + \
10374 "would be merged, in reverse order:\n\n"))
10377 portage.writemsg_stdout("\n" + \
10378 darkgreen("These are the packages that " + \
10379 "would be merged, in order:\n\n"))
10381 show_spinner = "--quiet" not in self.myopts and \
10382 "--nodeps" not in self.myopts
10385 print "Calculating dependencies ",
10387 myparams = create_depgraph_params(self.myopts, None)
10391 success, mydepgraph, dropped_tasks = resume_depgraph(
10392 self.settings, self.trees, self._mtimedb, self.myopts,
10393 myparams, self._spinner, skip_unsatisfied=True)
10394 except depgraph.UnsatisfiedResumeDep, e:
10395 mydepgraph = e.depgraph
10396 dropped_tasks = set()
10399 print "\b\b... done!"
10402 def unsatisfied_resume_dep_msg():
10403 mydepgraph.display_problems()
10404 out = portage.output.EOutput()
10405 out.eerror("One or more packages are either masked or " + \
10406 "have missing dependencies:")
10409 show_parents = set()
10410 for dep in e.value:
10411 if dep.parent in show_parents:
10413 show_parents.add(dep.parent)
10414 if dep.atom is None:
10415 out.eerror(indent + "Masked package:")
10416 out.eerror(2 * indent + str(dep.parent))
10419 out.eerror(indent + str(dep.atom) + " pulled in by:")
10420 out.eerror(2 * indent + str(dep.parent))
10422 msg = "The resume list contains packages " + \
10423 "that are either masked or have " + \
10424 "unsatisfied dependencies. " + \
10425 "Please restart/continue " + \
10426 "the operation manually, or use --skipfirst " + \
10427 "to skip the first package in the list and " + \
10428 "any other packages that may be " + \
10429 "masked or have missing dependencies."
10430 for line in textwrap.wrap(msg, 72):
10432 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10435 if success and self._show_list():
10436 mylist = mydepgraph.altlist()
10438 if "--tree" in self.myopts:
10440 mydepgraph.display(mylist, favorites=self._favorites)
10443 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10445 mydepgraph.display_problems()
10447 mylist = mydepgraph.altlist()
10448 mydepgraph.break_refs(mylist)
10449 self._mergelist = mylist
10450 self._set_digraph(mydepgraph.schedulerGraph())
10453 for task in dropped_tasks:
10454 if not (isinstance(task, Package) and task.operation == "merge"):
10457 msg = "emerge --keep-going:" + \
10459 if pkg.root != "/":
10460 msg += " for %s" % (pkg.root,)
10461 msg += " dropped due to unsatisfied dependency."
10462 for line in textwrap.wrap(msg, msg_width):
10463 eerror(line, phase="other", key=pkg.cpv)
10464 settings = self.pkgsettings[pkg.root]
10465 # Ensure that log collection from $T is disabled inside
10466 # elog_process(), since any logs that might exist are
10468 settings.pop("T", None)
10469 portage.elog.elog_process(pkg.cpv, settings)
10470 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10474 def _show_list(self):
10475 myopts = self.myopts
10476 if "--quiet" not in myopts and \
10477 ("--ask" in myopts or "--tree" in myopts or \
10478 "--verbose" in myopts):
10482 def _world_atom(self, pkg):
10484 Add the package to the world file, but only if
10485 it's supposed to be added. Otherwise, do nothing.
10488 if set(("--buildpkgonly", "--fetchonly",
10490 "--oneshot", "--onlydeps",
10491 "--pretend")).intersection(self.myopts):
10494 if pkg.root != self.target_root:
10497 args_set = self._args_set
10498 if not args_set.findAtomForPackage(pkg):
10501 logger = self._logger
10502 pkg_count = self._pkg_count
10503 root_config = pkg.root_config
10504 world_set = root_config.sets["world"]
10505 world_locked = False
10506 if hasattr(world_set, "lock"):
10508 world_locked = True
10511 if hasattr(world_set, "load"):
10512 world_set.load() # maybe it's changed on disk
10514 atom = create_world_atom(pkg, args_set, root_config)
10516 if hasattr(world_set, "add"):
10517 self._status_msg(('Recording %s in "world" ' + \
10518 'favorites file...') % atom)
10519 logger.log(" === (%s of %s) Updating world file (%s)" % \
10520 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10521 world_set.add(atom)
10523 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10524 (atom,), level=logging.WARN, noiselevel=-1)
10529 def _pkg(self, cpv, type_name, root_config, installed=False):
10531 Get a package instance from the cache, or create a new
10532 one if necessary. Raises KeyError from aux_get if it
10533 failures for some reason (package does not exist or is
10536 operation = "merge"
10538 operation = "nomerge"
10540 if self._digraph is not None:
10541 # Reuse existing instance when available.
10542 pkg = self._digraph.get(
10543 (type_name, root_config.root, cpv, operation))
10544 if pkg is not None:
10547 tree_type = depgraph.pkg_tree_map[type_name]
10548 db = root_config.trees[tree_type].dbapi
10549 db_keys = list(self.trees[root_config.root][
10550 tree_type].dbapi._aux_cache_keys)
10551 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10552 pkg = Package(cpv=cpv, metadata=metadata,
10553 root_config=root_config, installed=installed)
10554 if type_name == "ebuild":
10555 settings = self.pkgsettings[root_config.root]
10556 settings.setcpv(pkg)
10557 pkg.metadata["USE"] = settings["PORTAGE_USE"]
10561 class MetadataRegen(PollScheduler):
10563 def __init__(self, portdb, max_jobs=None, max_load=None):
10564 PollScheduler.__init__(self)
10565 self._portdb = portdb
10567 if max_jobs is None:
10570 self._max_jobs = max_jobs
10571 self._max_load = max_load
10572 self._sched_iface = self._sched_iface_class(
10573 register=self._register,
10574 schedule=self._schedule_wait,
10575 unregister=self._unregister)
10577 self._valid_pkgs = set()
10578 self._process_iter = self._iter_metadata_processes()
10580 def _iter_metadata_processes(self):
10581 portdb = self._portdb
10582 valid_pkgs = self._valid_pkgs
10583 every_cp = portdb.cp_all()
10584 every_cp.sort(reverse=True)
10587 cp = every_cp.pop()
10588 portage.writemsg_stdout("Processing %s\n" % cp)
10589 cpv_list = portdb.cp_list(cp)
10590 for cpv in cpv_list:
10591 valid_pkgs.add(cpv)
10592 ebuild_path, repo_path = portdb.findname2(cpv)
10593 metadata_process = portdb._metadata_process(
10594 cpv, ebuild_path, repo_path)
10595 if metadata_process is None:
10597 yield metadata_process
10601 portdb = self._portdb
10602 from portage.cache.cache_errors import CacheError
10605 for mytree in portdb.porttrees:
10607 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10608 except CacheError, e:
10609 portage.writemsg("Error listing cache entries for " + \
10610 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10615 while self._schedule():
10622 for y in self._valid_pkgs:
10623 for mytree in portdb.porttrees:
10624 if portdb.findname2(y, mytree=mytree)[0]:
10625 dead_nodes[mytree].discard(y)
10627 for mytree, nodes in dead_nodes.iteritems():
10628 auxdb = portdb.auxdb[mytree]
10632 except (KeyError, CacheError):
10635 def _schedule_tasks(self):
10638 @returns: True if there may be remaining tasks to schedule,
10641 while self._can_add_job():
10643 metadata_process = self._process_iter.next()
10644 except StopIteration:
10648 metadata_process.scheduler = self._sched_iface
10649 metadata_process.addExitListener(self._metadata_exit)
10650 metadata_process.start()
10653 def _metadata_exit(self, metadata_process):
10655 if metadata_process.returncode != os.EX_OK:
10656 self._valid_pkgs.discard(metadata_process.cpv)
10657 portage.writemsg("Error processing %s, continuing...\n" % \
10658 (metadata_process.cpv,))
10661 class UninstallFailure(portage.exception.PortageException):
10663 An instance of this class is raised by unmerge() when
10664 an uninstallation fails.
10667 def __init__(self, *pargs):
10668 portage.exception.PortageException.__init__(self, pargs)
10670 self.status = pargs[0]
10672 def unmerge(root_config, myopts, unmerge_action,
10673 unmerge_files, ldpath_mtimes, autoclean=0,
10674 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10675 scheduler=None, writemsg_level=portage.util.writemsg_level):
10677 quiet = "--quiet" in myopts
10678 settings = root_config.settings
10679 sets = root_config.sets
10680 vartree = root_config.trees["vartree"]
10681 candidate_catpkgs=[]
10683 xterm_titles = "notitles" not in settings.features
10684 out = portage.output.EOutput()
10686 db_keys = list(vartree.dbapi._aux_cache_keys)
10689 pkg = pkg_cache.get(cpv)
10691 pkg = Package(cpv=cpv, installed=True,
10692 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10693 root_config=root_config,
10694 type_name="installed")
10695 pkg_cache[cpv] = pkg
10698 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10700 # At least the parent needs to exist for the lock file.
10701 portage.util.ensure_dirs(vdb_path)
10702 except portage.exception.PortageException:
10706 if os.access(vdb_path, os.W_OK):
10707 vdb_lock = portage.locks.lockdir(vdb_path)
10708 realsyslist = sets["system"].getAtoms()
10710 for x in realsyslist:
10711 mycp = portage.dep_getkey(x)
10712 if mycp in settings.getvirtuals():
10714 for provider in settings.getvirtuals()[mycp]:
10715 if vartree.dbapi.match(provider):
10716 providers.append(provider)
10717 if len(providers) == 1:
10718 syslist.extend(providers)
10720 syslist.append(mycp)
10722 mysettings = portage.config(clone=settings)
10724 if not unmerge_files:
10725 if unmerge_action == "unmerge":
10727 print bold("emerge unmerge") + " can only be used with specific package names"
10733 localtree = vartree
10734 # process all arguments and add all
10735 # valid db entries to candidate_catpkgs
10737 if not unmerge_files:
10738 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10740 #we've got command-line arguments
10741 if not unmerge_files:
10742 print "\nNo packages to unmerge have been provided.\n"
10744 for x in unmerge_files:
10745 arg_parts = x.split('/')
10746 if x[0] not in [".","/"] and \
10747 arg_parts[-1][-7:] != ".ebuild":
10748 #possible cat/pkg or dep; treat as such
10749 candidate_catpkgs.append(x)
10750 elif unmerge_action in ["prune","clean"]:
10751 print "\n!!! Prune and clean do not accept individual" + \
10752 " ebuilds as arguments;\n skipping.\n"
10755 # it appears that the user is specifying an installed
10756 # ebuild and we're in "unmerge" mode, so it's ok.
10757 if not os.path.exists(x):
10758 print "\n!!! The path '"+x+"' doesn't exist.\n"
10761 absx = os.path.abspath(x)
10762 sp_absx = absx.split("/")
10763 if sp_absx[-1][-7:] == ".ebuild":
10765 absx = "/".join(sp_absx)
10767 sp_absx_len = len(sp_absx)
10769 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10770 vdb_len = len(vdb_path)
10772 sp_vdb = vdb_path.split("/")
10773 sp_vdb_len = len(sp_vdb)
10775 if not os.path.exists(absx+"/CONTENTS"):
10776 print "!!! Not a valid db dir: "+str(absx)
10779 if sp_absx_len <= sp_vdb_len:
10780 # The Path is shorter... so it can't be inside the vdb.
10783 print "\n!!!",x,"cannot be inside "+ \
10784 vdb_path+"; aborting.\n"
10787 for idx in range(0,sp_vdb_len):
10788 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
10791 print "\n!!!", x, "is not inside "+\
10792 vdb_path+"; aborting.\n"
10795 print "="+"/".join(sp_absx[sp_vdb_len:])
10796 candidate_catpkgs.append(
10797 "="+"/".join(sp_absx[sp_vdb_len:]))
10800 if (not "--quiet" in myopts):
10802 if settings["ROOT"] != "/":
10803 writemsg_level(darkgreen(newline+ \
10804 ">>> Using system located in ROOT tree %s\n" % \
10807 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
10808 not ("--quiet" in myopts):
10809 writemsg_level(darkgreen(newline+\
10810 ">>> These are the packages that would be unmerged:\n"))
10812 # Preservation of order is required for --depclean and --prune so
10813 # that dependencies are respected. Use all_selected to eliminate
10814 # duplicate packages since the same package may be selected by
10817 all_selected = set()
10818 for x in candidate_catpkgs:
10819 # cycle through all our candidate deps and determine
10820 # what will and will not get unmerged
10822 mymatch = vartree.dbapi.match(x)
10823 except portage.exception.AmbiguousPackageName, errpkgs:
10824 print "\n\n!!! The short ebuild name \"" + \
10825 x + "\" is ambiguous. Please specify"
10826 print "!!! one of the following fully-qualified " + \
10827 "ebuild names instead:\n"
10828 for i in errpkgs[0]:
10829 print " " + green(i)
10833 if not mymatch and x[0] not in "<>=~":
10834 mymatch = localtree.dep_match(x)
10836 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
10837 (x, unmerge_action), noiselevel=-1)
10841 {"protected": set(), "selected": set(), "omitted": set()})
10842 mykey = len(pkgmap) - 1
10843 if unmerge_action=="unmerge":
10845 if y not in all_selected:
10846 pkgmap[mykey]["selected"].add(y)
10847 all_selected.add(y)
10848 elif unmerge_action == "prune":
10849 if len(mymatch) == 1:
10851 best_version = mymatch[0]
10852 best_slot = vartree.getslot(best_version)
10853 best_counter = vartree.dbapi.cpv_counter(best_version)
10854 for mypkg in mymatch[1:]:
10855 myslot = vartree.getslot(mypkg)
10856 mycounter = vartree.dbapi.cpv_counter(mypkg)
10857 if (myslot == best_slot and mycounter > best_counter) or \
10858 mypkg == portage.best([mypkg, best_version]):
10859 if myslot == best_slot:
10860 if mycounter < best_counter:
10861 # On slot collision, keep the one with the
10862 # highest counter since it is the most
10863 # recently installed.
10865 best_version = mypkg
10867 best_counter = mycounter
10868 pkgmap[mykey]["protected"].add(best_version)
10869 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
10870 if mypkg != best_version and mypkg not in all_selected)
10871 all_selected.update(pkgmap[mykey]["selected"])
10873 # unmerge_action == "clean"
10875 for mypkg in mymatch:
10876 if unmerge_action == "clean":
10877 myslot = localtree.getslot(mypkg)
10879 # since we're pruning, we don't care about slots
10880 # and put all the pkgs in together
10882 if myslot not in slotmap:
10883 slotmap[myslot] = {}
10884 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
10886 for myslot in slotmap:
10887 counterkeys = slotmap[myslot].keys()
10888 if not counterkeys:
10891 pkgmap[mykey]["protected"].add(
10892 slotmap[myslot][counterkeys[-1]])
10893 del counterkeys[-1]
10894 #be pretty and get them in order of merge:
10895 for ckey in counterkeys:
10896 mypkg = slotmap[myslot][ckey]
10897 if mypkg not in all_selected:
10898 pkgmap[mykey]["selected"].add(mypkg)
10899 all_selected.add(mypkg)
10900 # ok, now the last-merged package
10901 # is protected, and the rest are selected
10902 numselected = len(all_selected)
10903 if global_unmerge and not numselected:
10904 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
10907 if not numselected:
10908 portage.writemsg_stdout(
10909 "\n>>> No packages selected for removal by " + \
10910 unmerge_action + "\n")
10914 vartree.dbapi.flush_cache()
10915 portage.locks.unlockdir(vdb_lock)
10917 from portage.sets.base import EditablePackageSet
10919 # generate a list of package sets that are directly or indirectly listed in "world",
10920 # as there is no persistent list of "installed" sets
10921 installed_sets = ["world"]
10926 pos = len(installed_sets)
10927 for s in installed_sets[pos - 1:]:
10930 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
10933 installed_sets += candidates
10934 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
10937 # we don't want to unmerge packages that are still listed in user-editable package sets
10938 # listed in "world" as they would be remerged on the next update of "world" or the
10939 # relevant package sets.
10940 unknown_sets = set()
10941 for cp in xrange(len(pkgmap)):
10942 for cpv in pkgmap[cp]["selected"].copy():
10946 # It could have been uninstalled
10947 # by a concurrent process.
10950 if unmerge_action != "clean" and \
10951 root_config.root == "/" and \
10952 portage.match_from_list(
10953 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10954 msg = ("Not unmerging package %s since there is no valid " + \
10955 "reason for portage to unmerge itself.") % (pkg.cpv,)
10956 for line in textwrap.wrap(msg, 75):
10958 # adjust pkgmap so the display output is correct
10959 pkgmap[cp]["selected"].remove(cpv)
10960 all_selected.remove(cpv)
10961 pkgmap[cp]["protected"].add(cpv)
10965 for s in installed_sets:
10966 # skip sets that the user requested to unmerge, and skip world
10967 # unless we're unmerging a package set (as the package would be
10968 # removed from "world" later on)
10969 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
10973 if s in unknown_sets:
10975 unknown_sets.add(s)
10976 out = portage.output.EOutput()
10977 out.eerror(("Unknown set '@%s' in " + \
10978 "%svar/lib/portage/world_sets") % \
10979 (s, root_config.root))
10982 # only check instances of EditablePackageSet as other classes are generally used for
10983 # special purposes and can be ignored here (and are usually generated dynamically, so the
10984 # user can't do much about them anyway)
10985 if isinstance(sets[s], EditablePackageSet):
10987 # This is derived from a snippet of code in the
10988 # depgraph._iter_atoms_for_pkg() method.
10989 for atom in sets[s].iterAtomsForPackage(pkg):
10990 inst_matches = vartree.dbapi.match(atom)
10991 inst_matches.reverse() # descending order
10993 for inst_cpv in inst_matches:
10995 inst_pkg = _pkg(inst_cpv)
10997 # It could have been uninstalled
10998 # by a concurrent process.
11001 if inst_pkg.cp != atom.cp:
11003 if pkg >= inst_pkg:
11004 # This is descending order, and we're not
11005 # interested in any versions <= pkg given.
11007 if pkg.slot_atom != inst_pkg.slot_atom:
11008 higher_slot = inst_pkg
11010 if higher_slot is None:
11014 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11015 #print colorize("WARN", "but still listed in the following package sets:")
11016 #print " %s\n" % ", ".join(parents)
11017 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11018 print colorize("WARN", "still referenced by the following package sets:")
11019 print " %s\n" % ", ".join(parents)
11020 # adjust pkgmap so the display output is correct
11021 pkgmap[cp]["selected"].remove(cpv)
11022 all_selected.remove(cpv)
11023 pkgmap[cp]["protected"].add(cpv)
11027 numselected = len(all_selected)
11028 if not numselected:
11030 "\n>>> No packages selected for removal by " + \
11031 unmerge_action + "\n")
11034 # Unmerge order only matters in some cases
11038 selected = d["selected"]
11041 cp = portage.cpv_getkey(iter(selected).next())
11042 cp_dict = unordered.get(cp)
11043 if cp_dict is None:
11045 unordered[cp] = cp_dict
11048 for k, v in d.iteritems():
11049 cp_dict[k].update(v)
11050 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11052 for x in xrange(len(pkgmap)):
11053 selected = pkgmap[x]["selected"]
11056 for mytype, mylist in pkgmap[x].iteritems():
11057 if mytype == "selected":
11059 mylist.difference_update(all_selected)
11060 cp = portage.cpv_getkey(iter(selected).next())
11061 for y in localtree.dep_match(cp):
11062 if y not in pkgmap[x]["omitted"] and \
11063 y not in pkgmap[x]["selected"] and \
11064 y not in pkgmap[x]["protected"] and \
11065 y not in all_selected:
11066 pkgmap[x]["omitted"].add(y)
11067 if global_unmerge and not pkgmap[x]["selected"]:
11068 #avoid cluttering the preview printout with stuff that isn't getting unmerged
11070 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11071 writemsg_level(colorize("BAD","\a\n\n!!! " + \
11072 "'%s' is part of your system profile.\n" % cp),
11073 level=logging.WARNING, noiselevel=-1)
11074 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11075 "be damaging to your system.\n\n"),
11076 level=logging.WARNING, noiselevel=-1)
11077 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11078 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11079 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11081 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11083 writemsg_level(bold(cp) + ": ", noiselevel=-1)
11084 for mytype in ["selected","protected","omitted"]:
11086 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11087 if pkgmap[x][mytype]:
11088 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11089 sorted_pkgs.sort(portage.pkgcmp)
11090 for pn, ver, rev in sorted_pkgs:
11094 myversion = ver + "-" + rev
11095 if mytype == "selected":
11097 colorize("UNMERGE_WARN", myversion + " "),
11101 colorize("GOOD", myversion + " "), noiselevel=-1)
11103 writemsg_level("none ", noiselevel=-1)
11105 writemsg_level("\n", noiselevel=-1)
11107 writemsg_level("\n", noiselevel=-1)
11109 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11110 " packages are slated for removal.\n")
11111 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11112 " and " + colorize("GOOD", "'omitted'") + \
11113 " packages will not be removed.\n\n")
11115 if "--pretend" in myopts:
11116 #we're done... return
11118 if "--ask" in myopts:
11119 if userquery("Would you like to unmerge these packages?")=="No":
11120 # enter pretend mode for correct formatting of results
11121 myopts["--pretend"] = True
11126 #the real unmerging begins, after a short delay....
11127 if clean_delay and not autoclean:
11128 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11130 for x in xrange(len(pkgmap)):
11131 for y in pkgmap[x]["selected"]:
11132 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11133 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11134 mysplit = y.split("/")
11136 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11137 mysettings, unmerge_action not in ["clean","prune"],
11138 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11139 scheduler=scheduler)
11141 if retval != os.EX_OK:
11142 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11144 raise UninstallFailure(retval)
11147 if clean_world and hasattr(sets["world"], "cleanPackage"):
11148 sets["world"].cleanPackage(vartree.dbapi, y)
11149 emergelog(xterm_titles, " >>> unmerge success: "+y)
11150 if clean_world and hasattr(sets["world"], "remove"):
11151 for s in root_config.setconfig.active:
11152 sets["world"].remove(SETPREFIX+s)
11155 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11157 if os.path.exists("/usr/bin/install-info"):
11158 out = portage.output.EOutput()
11163 inforoot=normpath(root+z)
11164 if os.path.isdir(inforoot):
11165 infomtime = long(os.stat(inforoot).st_mtime)
11166 if inforoot not in prev_mtimes or \
11167 prev_mtimes[inforoot] != infomtime:
11168 regen_infodirs.append(inforoot)
11170 if not regen_infodirs:
11171 portage.writemsg_stdout("\n")
11172 out.einfo("GNU info directory index is up-to-date.")
11174 portage.writemsg_stdout("\n")
11175 out.einfo("Regenerating GNU info directory index...")
11177 dir_extensions = ("", ".gz", ".bz2")
11181 for inforoot in regen_infodirs:
11185 if not os.path.isdir(inforoot) or \
11186 not os.access(inforoot, os.W_OK):
11189 file_list = os.listdir(inforoot)
11191 dir_file = os.path.join(inforoot, "dir")
11192 moved_old_dir = False
11193 processed_count = 0
11194 for x in file_list:
11195 if x.startswith(".") or \
11196 os.path.isdir(os.path.join(inforoot, x)):
11198 if x.startswith("dir"):
11200 for ext in dir_extensions:
11201 if x == "dir" + ext or \
11202 x == "dir" + ext + ".old":
11207 if processed_count == 0:
11208 for ext in dir_extensions:
11210 os.rename(dir_file + ext, dir_file + ext + ".old")
11211 moved_old_dir = True
11212 except EnvironmentError, e:
11213 if e.errno != errno.ENOENT:
11216 processed_count += 1
11217 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11218 existsstr="already exists, for file `"
11220 if re.search(existsstr,myso):
11221 # Already exists... Don't increment the count for this.
11223 elif myso[:44]=="install-info: warning: no info dir entry in ":
11224 # This info file doesn't contain a DIR-header: install-info produces this
11225 # (harmless) warning (the --quiet switch doesn't seem to work).
11226 # Don't increment the count for this.
11229 badcount=badcount+1
11230 errmsg += myso + "\n"
11233 if moved_old_dir and not os.path.exists(dir_file):
11234 # We didn't generate a new dir file, so put the old file
11235 # back where it was originally found.
11236 for ext in dir_extensions:
11238 os.rename(dir_file + ext + ".old", dir_file + ext)
11239 except EnvironmentError, e:
11240 if e.errno != errno.ENOENT:
11244 # Clean dir.old cruft so that they don't prevent
11245 # unmerge of otherwise empty directories.
11246 for ext in dir_extensions:
11248 os.unlink(dir_file + ext + ".old")
11249 except EnvironmentError, e:
11250 if e.errno != errno.ENOENT:
11254 #update mtime so we can potentially avoid regenerating.
11255 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11258 out.eerror("Processed %d info files; %d errors." % \
11259 (icount, badcount))
11260 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11263 out.einfo("Processed %d info files." % (icount,))
11266 def display_news_notification(root_config, myopts):
11267 target_root = root_config.root
11268 trees = root_config.trees
11269 settings = trees["vartree"].settings
11270 portdb = trees["porttree"].dbapi
11271 vardb = trees["vartree"].dbapi
11272 NEWS_PATH = os.path.join("metadata", "news")
11273 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11274 newsReaderDisplay = False
11275 update = "--pretend" not in myopts
11277 for repo in portdb.getRepositories():
11278 unreadItems = checkUpdatedNewsItems(
11279 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11281 if not newsReaderDisplay:
11282 newsReaderDisplay = True
11284 print colorize("WARN", " * IMPORTANT:"),
11285 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11288 if newsReaderDisplay:
11289 print colorize("WARN", " *"),
11290 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11293 def display_preserved_libs(vardbapi):
11296 if vardbapi.plib_registry.hasEntries():
11298 print colorize("WARN", "!!!") + " existing preserved libs:"
11299 plibdata = vardbapi.plib_registry.getPreservedLibs()
11300 linkmap = vardbapi.linkmap
11303 linkmap_broken = False
11307 except portage.exception.CommandNotFound, e:
11308 writemsg_level("!!! Command Not Found: %s\n" % (e,),
11309 level=logging.ERROR, noiselevel=-1)
11311 linkmap_broken = True
11313 search_for_owners = set()
11314 for cpv in plibdata:
11315 for f in plibdata[cpv]:
11316 if f in consumer_map:
11318 consumers = list(linkmap.findConsumers(f))
11320 consumer_map[f] = consumers
11321 search_for_owners.update(consumers[:MAX_DISPLAY+1])
11323 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11325 for cpv in plibdata:
11326 print colorize("WARN", ">>>") + " package: %s" % cpv
11328 for f in plibdata[cpv]:
11329 obj_key = linkmap._obj_key(f)
11330 alt_paths = samefile_map.get(obj_key)
11331 if alt_paths is None:
11333 samefile_map[obj_key] = alt_paths
11336 for alt_paths in samefile_map.itervalues():
11337 alt_paths = sorted(alt_paths)
11338 for p in alt_paths:
11339 print colorize("WARN", " * ") + " - %s" % (p,)
11341 consumers = consumer_map.get(f, [])
11342 for c in consumers[:MAX_DISPLAY]:
11343 print colorize("WARN", " * ") + " used by %s (%s)" % \
11344 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11345 if len(consumers) == MAX_DISPLAY + 1:
11346 print colorize("WARN", " * ") + " used by %s (%s)" % \
11347 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11348 for x in owners.get(consumers[MAX_DISPLAY], [])))
11349 elif len(consumers) > MAX_DISPLAY:
11350 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
11351 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11354 def _flush_elog_mod_echo():
11356 Dump the mod_echo output now so that our other
11357 notifications are shown last.
11359 @returns: True if messages were shown, False otherwise.
11361 messages_shown = False
11363 from portage.elog import mod_echo
11364 except ImportError:
11365 pass # happens during downgrade to a version without the module
11367 messages_shown = bool(mod_echo._items)
11368 mod_echo.finalize()
11369 return messages_shown
11371 def post_emerge(root_config, myopts, mtimedb, retval):
11373 Misc. things to run at the end of a merge session.
11376 Update Config Files
11379 Display preserved libs warnings
11382 @param trees: A dictionary mapping each ROOT to it's package databases
11384 @param mtimedb: The mtimeDB to store data needed across merge invocations
11385 @type mtimedb: MtimeDB class instance
11386 @param retval: Emerge's return value
11390 1. Calls sys.exit(retval)
11393 target_root = root_config.root
11394 trees = { target_root : root_config.trees }
11395 vardbapi = trees[target_root]["vartree"].dbapi
11396 settings = vardbapi.settings
11397 info_mtimes = mtimedb["info"]
11399 # Load the most current variables from ${ROOT}/etc/profile.env
11402 settings.regenerate()
11405 config_protect = settings.get("CONFIG_PROTECT","").split()
11406 infodirs = settings.get("INFOPATH","").split(":") + \
11407 settings.get("INFODIR","").split(":")
11411 if retval == os.EX_OK:
11412 exit_msg = " *** exiting successfully."
11414 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11415 emergelog("notitles" not in settings.features, exit_msg)
11417 _flush_elog_mod_echo()
11419 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11420 if counter_hash is not None and \
11421 counter_hash == vardbapi._counter_hash():
11422 # If vdb state has not changed then there's nothing else to do.
11425 vdb_path = os.path.join(target_root, portage.VDB_PATH)
11426 portage.util.ensure_dirs(vdb_path)
11428 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11429 vdb_lock = portage.locks.lockdir(vdb_path)
11433 if "noinfo" not in settings.features:
11434 chk_updated_info_files(target_root,
11435 infodirs, info_mtimes, retval)
11439 portage.locks.unlockdir(vdb_lock)
11441 chk_updated_cfg_files(target_root, config_protect)
11443 display_news_notification(root_config, myopts)
11444 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11445 display_preserved_libs(vardbapi)
11450 def chk_updated_cfg_files(target_root, config_protect):
11452 #number of directories with some protect files in them
11454 for x in config_protect:
11455 x = os.path.join(target_root, x.lstrip(os.path.sep))
11456 if not os.access(x, os.W_OK):
11457 # Avoid Permission denied errors generated
11461 mymode = os.lstat(x).st_mode
11464 if stat.S_ISLNK(mymode):
11465 # We want to treat it like a directory if it
11466 # is a symlink to an existing directory.
11468 real_mode = os.stat(x).st_mode
11469 if stat.S_ISDIR(real_mode):
11473 if stat.S_ISDIR(mymode):
11474 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11476 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11477 os.path.split(x.rstrip(os.path.sep))
11478 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11479 a = commands.getstatusoutput(mycommand)
11481 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11483 # Show the error message alone, sending stdout to /dev/null.
11484 os.system(mycommand + " 1>/dev/null")
11486 files = a[1].split('\0')
11487 # split always produces an empty string as the last element
11488 if files and not files[-1]:
11492 print "\n"+colorize("WARN", " * IMPORTANT:"),
11493 if stat.S_ISDIR(mymode):
11494 print "%d config files in '%s' need updating." % \
11497 print "config file '%s' needs updating." % x
11500 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11501 " section of the " + bold("emerge")
11502 print " "+yellow("*")+" man page to learn how to update config files."
11504 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11507 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11508 Returns the number of unread (yet relevent) items.
11510 @param portdb: a portage tree database
11511 @type portdb: pordbapi
11512 @param vardb: an installed package database
11513 @type vardb: vardbapi
11516 @param UNREAD_PATH:
11522 1. The number of unread but relevant news items.
11525 from portage.news import NewsManager
11526 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11527 return manager.getUnreadItems( repo_id, update=update )
11529 def insert_category_into_atom(atom, category):
11530 alphanum = re.search(r'\w', atom)
11532 ret = atom[:alphanum.start()] + "%s/" % category + \
11533 atom[alphanum.start():]
11538 def is_valid_package_atom(x):
11540 alphanum = re.search(r'\w', x)
11542 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11543 return portage.isvalidatom(x)
11545 def show_blocker_docs_link():
11547 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11548 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11550 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11553 def show_mask_docs():
11554 print "For more information, see the MASKED PACKAGES section in the emerge"
11555 print "man page or refer to the Gentoo Handbook."
11557 def action_sync(settings, trees, mtimedb, myopts, myaction):
11558 xterm_titles = "notitles" not in settings.features
11559 emergelog(xterm_titles, " === sync")
11560 myportdir = settings.get("PORTDIR", None)
11561 out = portage.output.EOutput()
11563 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
11565 if myportdir[-1]=="/":
11566 myportdir=myportdir[:-1]
11567 if not os.path.exists(myportdir):
11568 print ">>>",myportdir,"not found, creating it."
11569 os.makedirs(myportdir,0755)
11570 syncuri = settings.get("SYNC", "").strip()
11572 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11573 noiselevel=-1, level=logging.ERROR)
11577 updatecache_flg = False
11578 if myaction == "metadata":
11579 print "skipping sync"
11580 updatecache_flg = True
11581 elif syncuri[:8]=="rsync://":
11582 if not os.path.exists("/usr/bin/rsync"):
11583 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11584 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11589 import shlex, StringIO
11590 if settings["PORTAGE_RSYNC_OPTS"] == "":
11591 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11592 rsync_opts.extend([
11593 "--recursive", # Recurse directories
11594 "--links", # Consider symlinks
11595 "--safe-links", # Ignore links outside of tree
11596 "--perms", # Preserve permissions
11597 "--times", # Preserive mod times
11598 "--compress", # Compress the data transmitted
11599 "--force", # Force deletion on non-empty dirs
11600 "--whole-file", # Don't do block transfers, only entire files
11601 "--delete", # Delete files that aren't in the master tree
11602 "--stats", # Show final statistics about what was transfered
11603 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11604 "--exclude=/distfiles", # Exclude distfiles from consideration
11605 "--exclude=/local", # Exclude local from consideration
11606 "--exclude=/packages", # Exclude packages from consideration
11610 # The below validation is not needed when using the above hardcoded
11613 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11614 lexer = shlex.shlex(StringIO.StringIO(
11615 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11616 lexer.whitespace_split = True
11617 rsync_opts.extend(lexer)
11620 for opt in ("--recursive", "--times"):
11621 if opt not in rsync_opts:
11622 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11623 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11624 rsync_opts.append(opt)
11626 for exclude in ("distfiles", "local", "packages"):
11627 opt = "--exclude=/%s" % exclude
11628 if opt not in rsync_opts:
11629 portage.writemsg(yellow("WARNING:") + \
11630 " adding required option %s not included in " % opt + \
11631 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11632 rsync_opts.append(opt)
11634 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11635 def rsync_opt_startswith(opt_prefix):
11636 for x in rsync_opts:
11637 if x.startswith(opt_prefix):
11641 if not rsync_opt_startswith("--timeout="):
11642 rsync_opts.append("--timeout=%d" % mytimeout)
11644 for opt in ("--compress", "--whole-file"):
11645 if opt not in rsync_opts:
11646 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11647 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11648 rsync_opts.append(opt)
11650 if "--quiet" in myopts:
11651 rsync_opts.append("--quiet") # Shut up a lot
11653 rsync_opts.append("--verbose") # Print filelist
11655 if "--verbose" in myopts:
11656 rsync_opts.append("--progress") # Progress meter for each file
11658 if "--debug" in myopts:
11659 rsync_opts.append("--checksum") # Force checksum on all files
11661 # Real local timestamp file.
11662 servertimestampfile = os.path.join(
11663 myportdir, "metadata", "timestamp.chk")
11665 content = portage.util.grabfile(servertimestampfile)
11669 mytimestamp = time.mktime(time.strptime(content[0],
11670 "%a, %d %b %Y %H:%M:%S +0000"))
11671 except (OverflowError, ValueError):
11676 rsync_initial_timeout = \
11677 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11679 rsync_initial_timeout = 15
11682 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11683 except SystemExit, e:
11684 raise # Needed else can't exit
11686 maxretries=3 #default number of retries
11689 user_name, hostname, port = re.split(
11690 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11693 if user_name is None:
11695 updatecache_flg=True
11696 all_rsync_opts = set(rsync_opts)
11697 lexer = shlex.shlex(StringIO.StringIO(
11698 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
11699 lexer.whitespace_split = True
11700 extra_rsync_opts = list(lexer)
11702 all_rsync_opts.update(extra_rsync_opts)
11703 family = socket.AF_INET
11704 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
11705 family = socket.AF_INET
11706 elif socket.has_ipv6 and \
11707 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
11708 family = socket.AF_INET6
11710 SERVER_OUT_OF_DATE = -1
11711 EXCEEDED_MAX_RETRIES = -2
11717 for addrinfo in socket.getaddrinfo(
11718 hostname, None, family, socket.SOCK_STREAM):
11719 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
11720 # IPv6 addresses need to be enclosed in square brackets
11721 ips.append("[%s]" % addrinfo[4][0])
11723 ips.append(addrinfo[4][0])
11724 from random import shuffle
11726 except SystemExit, e:
11727 raise # Needed else can't exit
11728 except Exception, e:
11729 print "Notice:",str(e)
11734 dosyncuri = syncuri.replace(
11735 "//" + user_name + hostname + port + "/",
11736 "//" + user_name + ips[0] + port + "/", 1)
11737 except SystemExit, e:
11738 raise # Needed else can't exit
11739 except Exception, e:
11740 print "Notice:",str(e)
11744 if "--ask" in myopts:
11745 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
11750 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
11751 if "--quiet" not in myopts:
11752 print ">>> Starting rsync with "+dosyncuri+"..."
11754 emergelog(xterm_titles,
11755 ">>> Starting retry %d of %d with %s" % \
11756 (retries,maxretries,dosyncuri))
11757 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
11759 if mytimestamp != 0 and "--quiet" not in myopts:
11760 print ">>> Checking server timestamp ..."
11762 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
11764 if "--debug" in myopts:
11767 exitcode = os.EX_OK
11768 servertimestamp = 0
11769 # Even if there's no timestamp available locally, fetch the
11770 # timestamp anyway as an initial probe to verify that the server is
11771 # responsive. This protects us from hanging indefinitely on a
11772 # connection attempt to an unresponsive server which rsync's
11773 # --timeout option does not prevent.
11775 # Temporary file for remote server timestamp comparison.
11776 from tempfile import mkstemp
11777 fd, tmpservertimestampfile = mkstemp()
11779 mycommand = rsynccommand[:]
11780 mycommand.append(dosyncuri.rstrip("/") + \
11781 "/metadata/timestamp.chk")
11782 mycommand.append(tmpservertimestampfile)
11786 def timeout_handler(signum, frame):
11787 raise portage.exception.PortageException("timed out")
11788 signal.signal(signal.SIGALRM, timeout_handler)
11789 # Timeout here in case the server is unresponsive. The
11790 # --timeout rsync option doesn't apply to the initial
11791 # connection attempt.
11792 if rsync_initial_timeout:
11793 signal.alarm(rsync_initial_timeout)
11795 mypids.extend(portage.process.spawn(
11796 mycommand, env=settings.environ(), returnpid=True))
11797 exitcode = os.waitpid(mypids[0], 0)[1]
11798 content = portage.grabfile(tmpservertimestampfile)
11800 if rsync_initial_timeout:
11803 os.unlink(tmpservertimestampfile)
11806 except portage.exception.PortageException, e:
11810 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
11811 os.kill(mypids[0], signal.SIGTERM)
11812 os.waitpid(mypids[0], 0)
11813 # This is the same code rsync uses for timeout.
11816 if exitcode != os.EX_OK:
11817 if exitcode & 0xff:
11818 exitcode = (exitcode & 0xff) << 8
11820 exitcode = exitcode >> 8
11822 portage.process.spawned_pids.remove(mypids[0])
11825 servertimestamp = time.mktime(time.strptime(
11826 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
11827 except (OverflowError, ValueError):
11829 del mycommand, mypids, content
11830 if exitcode == os.EX_OK:
11831 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
11832 emergelog(xterm_titles,
11833 ">>> Cancelling sync -- Already current.")
11836 print ">>> Timestamps on the server and in the local repository are the same."
11837 print ">>> Cancelling all further sync action. You are already up to date."
11839 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11843 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
11844 emergelog(xterm_titles,
11845 ">>> Server out of date: %s" % dosyncuri)
11848 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
11850 print ">>> In order to force sync, remove '%s'." % servertimestampfile
11853 exitcode = SERVER_OUT_OF_DATE
11854 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
11856 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
11857 exitcode = portage.process.spawn(mycommand,
11858 env=settings.environ())
11859 if exitcode in [0,1,3,4,11,14,20,21]:
11861 elif exitcode in [1,3,4,11,14,20,21]:
11864 # Code 2 indicates protocol incompatibility, which is expected
11865 # for servers with protocol < 29 that don't support
11866 # --prune-empty-directories. Retry for a server that supports
11867 # at least rsync protocol version 29 (>=rsync-2.6.4).
11872 if retries<=maxretries:
11873 print ">>> Retrying..."
11878 updatecache_flg=False
11879 exitcode = EXCEEDED_MAX_RETRIES
11883 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
11884 elif exitcode == SERVER_OUT_OF_DATE:
11886 elif exitcode == EXCEEDED_MAX_RETRIES:
11888 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
11893 msg.append("Rsync has reported that there is a syntax error. Please ensure")
11894 msg.append("that your SYNC statement is proper.")
11895 msg.append("SYNC=" + settings["SYNC"])
11897 msg.append("Rsync has reported that there is a File IO error. Normally")
11898 msg.append("this means your disk is full, but can be caused by corruption")
11899 msg.append("on the filesystem that contains PORTDIR. Please investigate")
11900 msg.append("and try again after the problem has been fixed.")
11901 msg.append("PORTDIR=" + settings["PORTDIR"])
11903 msg.append("Rsync was killed before it finished.")
11905 msg.append("Rsync has not successfully finished. It is recommended that you keep")
11906 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
11907 msg.append("to use rsync due to firewall or other restrictions. This should be a")
11908 msg.append("temporary problem unless complications exist with your network")
11909 msg.append("(and possibly your system's filesystem) configuration.")
11913 elif syncuri[:6]=="cvs://":
11914 if not os.path.exists("/usr/bin/cvs"):
11915 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
11916 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
11918 cvsroot=syncuri[6:]
11919 cvsdir=os.path.dirname(myportdir)
11920 if not os.path.exists(myportdir+"/CVS"):
11922 print ">>> Starting initial cvs checkout with "+syncuri+"..."
11923 if os.path.exists(cvsdir+"/gentoo-x86"):
11924 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
11927 os.rmdir(myportdir)
11929 if e.errno != errno.ENOENT:
11931 "!!! existing '%s' directory; exiting.\n" % myportdir)
11934 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
11935 print "!!! cvs checkout error; exiting."
11937 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
11940 print ">>> Starting cvs update with "+syncuri+"..."
11941 retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
11942 myportdir, settings, free=1)
11943 if retval != os.EX_OK:
11945 dosyncuri = syncuri
11947 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
11948 noiselevel=-1, level=logging.ERROR)
11951 if updatecache_flg and \
11952 myaction != "metadata" and \
11953 "metadata-transfer" not in settings.features:
11954 updatecache_flg = False
11956 # Reload the whole config from scratch.
11957 settings, trees, mtimedb = load_emerge_config(trees=trees)
11958 root_config = trees[settings["ROOT"]]["root_config"]
11959 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11961 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
11962 action_metadata(settings, portdb, myopts)
11964 if portage._global_updates(trees, mtimedb["updates"]):
11966 # Reload the whole config from scratch.
11967 settings, trees, mtimedb = load_emerge_config(trees=trees)
11968 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11969 root_config = trees[settings["ROOT"]]["root_config"]
11971 mybestpv = portdb.xmatch("bestmatch-visible",
11972 portage.const.PORTAGE_PACKAGE_ATOM)
11973 mypvs = portage.best(
11974 trees[settings["ROOT"]]["vartree"].dbapi.match(
11975 portage.const.PORTAGE_PACKAGE_ATOM))
11977 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
11979 if myaction != "metadata":
11980 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
11981 retval = portage.process.spawn(
11982 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
11983 dosyncuri], env=settings.environ())
11984 if retval != os.EX_OK:
11985 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
11987 if(mybestpv != mypvs) and not "--quiet" in myopts:
11989 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
11990 print red(" * ")+"that you update portage now, before any other packages are updated."
11992 print red(" * ")+"To update portage, run 'emerge portage' now."
11995 display_news_notification(root_config, myopts)
11998 def action_metadata(settings, portdb, myopts):
11999 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
12000 old_umask = os.umask(0002)
12001 cachedir = os.path.normpath(settings.depcachedir)
12002 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
12003 "/lib", "/opt", "/proc", "/root", "/sbin",
12004 "/sys", "/tmp", "/usr", "/var"]:
12005 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12006 "ROOT DIRECTORY ON YOUR SYSTEM."
12007 print >> sys.stderr, \
12008 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12010 if not os.path.exists(cachedir):
12013 ec = portage.eclass_cache.cache(portdb.porttree_root)
12014 myportdir = os.path.realpath(settings["PORTDIR"])
12015 cm = settings.load_best_module("portdbapi.metadbmodule")(
12016 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12018 from portage.cache import util
12020 class percentage_noise_maker(util.quiet_mirroring):
12021 def __init__(self, dbapi):
12023 self.cp_all = dbapi.cp_all()
12024 l = len(self.cp_all)
12025 self.call_update_min = 100000000
12026 self.min_cp_all = l/100.0
12030 def __iter__(self):
12031 for x in self.cp_all:
12033 if self.count > self.min_cp_all:
12034 self.call_update_min = 0
12036 for y in self.dbapi.cp_list(x):
12038 self.call_update_mine = 0
12040 def update(self, *arg):
12041 try: self.pstr = int(self.pstr) + 1
12042 except ValueError: self.pstr = 1
12043 sys.stdout.write("%s%i%%" % \
12044 ("\b" * (len(str(self.pstr))+1), self.pstr))
12046 self.call_update_min = 10000000
12048 def finish(self, *arg):
12049 sys.stdout.write("\b\b\b\b100%\n")
12052 if "--quiet" in myopts:
12053 def quicky_cpv_generator(cp_all_list):
12054 for x in cp_all_list:
12055 for y in portdb.cp_list(x):
12057 source = quicky_cpv_generator(portdb.cp_all())
12058 noise_maker = portage.cache.util.quiet_mirroring()
12060 noise_maker = source = percentage_noise_maker(portdb)
12061 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12062 eclass_cache=ec, verbose_instance=noise_maker)
12065 os.umask(old_umask)
12067 def action_regen(settings, portdb, max_jobs, max_load):
12068 xterm_titles = "notitles" not in settings.features
12069 emergelog(xterm_titles, " === regen")
12070 #regenerate cache entries
12071 portage.writemsg_stdout("Regenerating cache entries...\n")
12073 os.close(sys.stdin.fileno())
12074 except SystemExit, e:
12075 raise # Needed else can't exit
12080 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12083 portage.writemsg_stdout("done!\n")
12085 def action_config(settings, trees, myopts, myfiles):
12086 if len(myfiles) != 1:
12087 print red("!!! config can only take a single package atom at this time\n")
12089 if not is_valid_package_atom(myfiles[0]):
12090 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12092 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12093 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12097 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12098 except portage.exception.AmbiguousPackageName, e:
12099 # Multiple matches thrown from cpv_expand
12102 print "No packages found.\n"
12104 elif len(pkgs) > 1:
12105 if "--ask" in myopts:
12107 print "Please select a package to configure:"
12111 options.append(str(idx))
12112 print options[-1]+") "+pkg
12114 options.append("X")
12115 idx = userquery("Selection?", options)
12118 pkg = pkgs[int(idx)-1]
12120 print "The following packages available:"
12123 print "\nPlease use a specific atom or the --ask option."
12129 if "--ask" in myopts:
12130 if userquery("Ready to configure "+pkg+"?") == "No":
12133 print "Configuring pkg..."
12135 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12136 mysettings = portage.config(clone=settings)
12137 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12138 debug = mysettings.get("PORTAGE_DEBUG") == "1"
12139 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12141 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12142 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12143 if retval == os.EX_OK:
12144 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12145 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12148 def action_info(settings, trees, myopts, myfiles):
12149 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12150 settings.profile_path, settings["CHOST"],
12151 trees[settings["ROOT"]]["vartree"].dbapi)
12153 header_title = "System Settings"
12155 print header_width * "="
12156 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12157 print header_width * "="
12158 print "System uname: "+platform.platform(aliased=1)
12160 lastSync = portage.grabfile(os.path.join(
12161 settings["PORTDIR"], "metadata", "timestamp.chk"))
12162 print "Timestamp of tree:",
12168 output=commands.getstatusoutput("distcc --version")
12170 print str(output[1].split("\n",1)[0]),
12171 if "distcc" in settings.features:
12176 output=commands.getstatusoutput("ccache -V")
12178 print str(output[1].split("\n",1)[0]),
12179 if "ccache" in settings.features:
12184 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12185 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
12186 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12187 myvars = portage.util.unique_array(myvars)
12191 if portage.isvalidatom(x):
12192 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12193 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12194 pkg_matches.sort(portage.pkgcmp)
12196 for pn, ver, rev in pkg_matches:
12198 pkgs.append(ver + "-" + rev)
12202 pkgs = ", ".join(pkgs)
12203 print "%-20s %s" % (x+":", pkgs)
12205 print "%-20s %s" % (x+":", "[NOT VALID]")
12207 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12209 if "--verbose" in myopts:
12210 myvars=settings.keys()
12212 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12213 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12214 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12215 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12217 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12219 myvars = portage.util.unique_array(myvars)
12225 print '%s="%s"' % (x, settings[x])
12227 use = set(settings["USE"].split())
12228 use_expand = settings["USE_EXPAND"].split()
12230 for varname in use_expand:
12231 flag_prefix = varname.lower() + "_"
12232 for f in list(use):
12233 if f.startswith(flag_prefix):
12237 print 'USE="%s"' % " ".join(use),
12238 for varname in use_expand:
12239 myval = settings.get(varname)
12241 print '%s="%s"' % (varname, myval),
12244 unset_vars.append(x)
12246 print "Unset: "+", ".join(unset_vars)
12249 if "--debug" in myopts:
12250 for x in dir(portage):
12251 module = getattr(portage, x)
12252 if "cvs_id_string" in dir(module):
12253 print "%s: %s" % (str(x), str(module.cvs_id_string))
12255 # See if we can find any packages installed matching the strings
12256 # passed on the command line
12258 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12259 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12261 mypkgs.extend(vardb.match(x))
12263 # If some packages were found...
12265 # Get our global settings (we only print stuff if it varies from
12266 # the current config)
12267 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12268 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12270 pkgsettings = portage.config(clone=settings)
12272 for myvar in mydesiredvars:
12273 global_vals[myvar] = set(settings.get(myvar, "").split())
12275 # Loop through each package
12276 # Only print settings if they differ from global settings
12277 header_title = "Package Settings"
12278 print header_width * "="
12279 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12280 print header_width * "="
12281 from portage.output import EOutput
12284 # Get all package specific variables
12285 auxvalues = vardb.aux_get(pkg, auxkeys)
12287 for i in xrange(len(auxkeys)):
12288 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12290 for myvar in mydesiredvars:
12291 # If the package variable doesn't match the
12292 # current global variable, something has changed
12293 # so set diff_found so we know to print
12294 if valuesmap[myvar] != global_vals[myvar]:
12295 diff_values[myvar] = valuesmap[myvar]
12296 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12297 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12298 pkgsettings.reset()
12299 # If a matching ebuild is no longer available in the tree, maybe it
12300 # would make sense to compare against the flags for the best
12301 # available version with the same slot?
12303 if portdb.cpv_exists(pkg):
12305 pkgsettings.setcpv(pkg, mydb=mydb)
12306 if valuesmap["IUSE"].intersection(
12307 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12308 diff_values["USE"] = valuesmap["USE"]
12309 # If a difference was found, print the info for
12312 # Print package info
12313 print "%s was built with the following:" % pkg
12314 for myvar in mydesiredvars + ["USE"]:
12315 if myvar in diff_values:
12316 mylist = list(diff_values[myvar])
12318 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12320 print ">>> Attempting to run pkg_info() for '%s'" % pkg
12321 ebuildpath = vardb.findname(pkg)
12322 if not ebuildpath or not os.path.exists(ebuildpath):
12323 out.ewarn("No ebuild found for '%s'" % pkg)
12325 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12326 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12327 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12330 def action_search(root_config, myopts, myfiles, spinner):
12332 print "emerge: no search terms provided."
12334 searchinstance = search(root_config,
12335 spinner, "--searchdesc" in myopts,
12336 "--quiet" not in myopts, "--usepkg" in myopts,
12337 "--usepkgonly" in myopts)
12338 for mysearch in myfiles:
12340 searchinstance.execute(mysearch)
12341 except re.error, comment:
12342 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12344 searchinstance.output()
12346 def action_depclean(settings, trees, ldpath_mtimes,
12347 myopts, action, myfiles, spinner):
12348 # Kill packages that aren't explicitly merged or are required as a
12349 # dependency of another package. World file is explicit.
12351 # Global depclean or prune operations are not very safe when there are
12352 # missing dependencies since it's unknown how badly incomplete
12353 # the dependency graph is, and we might accidentally remove packages
12354 # that should have been pulled into the graph. On the other hand, it's
12355 # relatively safe to ignore missing deps when only asked to remove
12356 # specific packages.
12357 allow_missing_deps = len(myfiles) > 0
12360 msg.append("Always study the list of packages to be cleaned for any obvious\n")
12361 msg.append("mistakes. Packages that are part of the world set will always\n")
12362 msg.append("be kept. They can be manually added to this set with\n")
12363 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
12364 msg.append("package.provided (see portage(5)) will be removed by\n")
12365 msg.append("depclean, even if they are part of the world set.\n")
12367 msg.append("As a safety measure, depclean will not remove any packages\n")
12368 msg.append("unless *all* required dependencies have been resolved. As a\n")
12369 msg.append("consequence, it is often necessary to run %s\n" % \
12370 good("`emerge --update"))
12371 msg.append(good("--newuse --deep @system @world`") + \
12372 " prior to depclean.\n")
12374 if action == "depclean" and "--quiet" not in myopts and not myfiles:
12375 portage.writemsg_stdout("\n")
12377 portage.writemsg_stdout(colorize("WARN", " * ") + x)
12379 xterm_titles = "notitles" not in settings.features
12380 myroot = settings["ROOT"]
12381 root_config = trees[myroot]["root_config"]
12382 getSetAtoms = root_config.setconfig.getSetAtoms
12383 vardb = trees[myroot]["vartree"].dbapi
12385 required_set_names = ("system", "world")
12389 for s in required_set_names:
12390 required_sets[s] = InternalPackageSet(
12391 initial_atoms=getSetAtoms(s))
12394 # When removing packages, use a temporary version of world
12395 # which excludes packages that are intended to be eligible for
12397 world_temp_set = required_sets["world"]
12398 system_set = required_sets["system"]
12400 if not system_set or not world_temp_set:
12403 writemsg_level("!!! You have no system list.\n",
12404 level=logging.ERROR, noiselevel=-1)
12406 if not world_temp_set:
12407 writemsg_level("!!! You have no world file.\n",
12408 level=logging.WARNING, noiselevel=-1)
12410 writemsg_level("!!! Proceeding is likely to " + \
12411 "break your installation.\n",
12412 level=logging.WARNING, noiselevel=-1)
12413 if "--pretend" not in myopts:
12414 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12416 if action == "depclean":
12417 emergelog(xterm_titles, " >>> depclean")
12420 args_set = InternalPackageSet()
12423 if not is_valid_package_atom(x):
12424 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12425 level=logging.ERROR, noiselevel=-1)
12426 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12429 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12430 except portage.exception.AmbiguousPackageName, e:
12431 msg = "The short ebuild name \"" + x + \
12432 "\" is ambiguous. Please specify " + \
12433 "one of the following " + \
12434 "fully-qualified ebuild names instead:"
12435 for line in textwrap.wrap(msg, 70):
12436 writemsg_level("!!! %s\n" % (line,),
12437 level=logging.ERROR, noiselevel=-1)
12439 writemsg_level(" %s\n" % colorize("INFORM", i),
12440 level=logging.ERROR, noiselevel=-1)
12441 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12444 matched_packages = False
12447 matched_packages = True
12449 if not matched_packages:
12450 writemsg_level(">>> No packages selected for removal by %s\n" % \
12454 writemsg_level("\nCalculating dependencies ")
12455 resolver_params = create_depgraph_params(myopts, "remove")
12456 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12457 vardb = resolver.trees[myroot]["vartree"].dbapi
12459 if action == "depclean":
12462 # Pull in everything that's installed but not matched
12463 # by an argument atom since we don't want to clean any
12464 # package if something depends on it.
12466 world_temp_set.clear()
12471 if args_set.findAtomForPackage(pkg) is None:
12472 world_temp_set.add("=" + pkg.cpv)
12474 except portage.exception.InvalidDependString, e:
12475 show_invalid_depstring_notice(pkg,
12476 pkg.metadata["PROVIDE"], str(e))
12478 world_temp_set.add("=" + pkg.cpv)
12481 elif action == "prune":
12483 # Pull in everything that's installed since we don't
12484 # to prune a package if something depends on it.
12485 world_temp_set.clear()
12486 world_temp_set.update(vardb.cp_all())
12490 # Try to prune everything that's slotted.
12491 for cp in vardb.cp_all():
12492 if len(vardb.cp_list(cp)) > 1:
12495 # Remove atoms from world that match installed packages
12496 # that are also matched by argument atoms, but do not remove
12497 # them if they match the highest installed version.
12500 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12501 if not pkgs_for_cp or pkg not in pkgs_for_cp:
12502 raise AssertionError("package expected in matches: " + \
12503 "cp = %s, cpv = %s matches = %s" % \
12504 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12506 highest_version = pkgs_for_cp[-1]
12507 if pkg == highest_version:
12508 # pkg is the highest version
12509 world_temp_set.add("=" + pkg.cpv)
12512 if len(pkgs_for_cp) <= 1:
12513 raise AssertionError("more packages expected: " + \
12514 "cp = %s, cpv = %s matches = %s" % \
12515 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12518 if args_set.findAtomForPackage(pkg) is None:
12519 world_temp_set.add("=" + pkg.cpv)
12521 except portage.exception.InvalidDependString, e:
12522 show_invalid_depstring_notice(pkg,
12523 pkg.metadata["PROVIDE"], str(e))
12525 world_temp_set.add("=" + pkg.cpv)
12529 for s, package_set in required_sets.iteritems():
12530 set_atom = SETPREFIX + s
12531 set_arg = SetArg(arg=set_atom, set=package_set,
12532 root_config=resolver.roots[myroot])
12533 set_args[s] = set_arg
12534 for atom in set_arg.set:
12535 resolver._dep_stack.append(
12536 Dependency(atom=atom, root=myroot, parent=set_arg))
12537 resolver.digraph.add(set_arg, None)
12539 success = resolver._complete_graph()
12540 writemsg_level("\b\b... done!\n")
12542 resolver.display_problems()
12547 def unresolved_deps():
12549 unresolvable = set()
12550 for dep in resolver._initially_unsatisfied_deps:
12551 if isinstance(dep.parent, Package) and \
12552 (dep.priority > UnmergeDepPriority.SOFT):
12553 unresolvable.add((dep.atom, dep.parent.cpv))
12555 if not unresolvable:
12558 if unresolvable and not allow_missing_deps:
12559 prefix = bad(" * ")
12561 msg.append("Dependencies could not be completely resolved due to")
12562 msg.append("the following required packages not being installed:")
12564 for atom, parent in unresolvable:
12565 msg.append(" %s pulled in by:" % (atom,))
12566 msg.append(" %s" % (parent,))
12568 msg.append("Have you forgotten to run " + \
12569 good("`emerge --update --newuse --deep world`") + " prior to")
12570 msg.append(("%s? It may be necessary to manually " + \
12571 "uninstall packages that no longer") % action)
12572 msg.append("exist in the portage tree since " + \
12573 "it may not be possible to satisfy their")
12574 msg.append("dependencies. Also, be aware of " + \
12575 "the --with-bdeps option that is documented")
12576 msg.append("in " + good("`man emerge`") + ".")
12577 if action == "prune":
12579 msg.append("If you would like to ignore " + \
12580 "dependencies then use %s." % good("--nodeps"))
12581 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
12582 level=logging.ERROR, noiselevel=-1)
12586 if unresolved_deps():
12589 graph = resolver.digraph.copy()
12590 required_pkgs_total = 0
12592 if isinstance(node, Package):
12593 required_pkgs_total += 1
12595 def show_parents(child_node):
12596 parent_nodes = graph.parent_nodes(child_node)
12597 if not parent_nodes:
12598 # With --prune, the highest version can be pulled in without any
12599 # real parent since all installed packages are pulled in. In that
12600 # case there's nothing to show here.
12603 for node in parent_nodes:
12604 parent_strs.append(str(getattr(node, "cpv", node)))
12607 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
12608 for parent_str in parent_strs:
12609 msg.append(" %s\n" % (parent_str,))
12611 portage.writemsg_stdout("".join(msg), noiselevel=-1)
12613 def create_cleanlist():
12614 pkgs_to_remove = []
12616 if action == "depclean":
12622 arg_atom = args_set.findAtomForPackage(pkg)
12623 except portage.exception.InvalidDependString:
12624 # this error has already been displayed by now
12628 if pkg not in graph:
12629 pkgs_to_remove.append(pkg)
12630 elif "--verbose" in myopts:
12635 if pkg not in graph:
12636 pkgs_to_remove.append(pkg)
12637 elif "--verbose" in myopts:
12640 elif action == "prune":
12641 # Prune really uses all installed instead of world. It's not
12642 # a real reverse dependency so don't display it as such.
12643 graph.remove(set_args["world"])
12645 for atom in args_set:
12646 for pkg in vardb.match_pkgs(atom):
12647 if pkg not in graph:
12648 pkgs_to_remove.append(pkg)
12649 elif "--verbose" in myopts:
12652 if not pkgs_to_remove:
12654 ">>> No packages selected for removal by %s\n" % action)
12655 if "--verbose" not in myopts:
12657 ">>> To see reverse dependencies, use %s\n" % \
12659 if action == "prune":
12661 ">>> To ignore dependencies, use %s\n" % \
12664 return pkgs_to_remove
12666 cleanlist = create_cleanlist()
12669 clean_set = set(cleanlist)
12671 # Check if any of these package are the sole providers of libraries
12672 # with consumers that have not been selected for removal. If so, these
12673 # packages and any dependencies need to be added to the graph.
12674 real_vardb = trees[myroot]["vartree"].dbapi
12675 linkmap = real_vardb.linkmap
12676 liblist = linkmap.listLibraryObjects()
12677 consumer_cache = {}
12678 provider_cache = {}
12682 writemsg_level(">>> Checking for lib consumers...\n")
12684 for pkg in cleanlist:
12685 pkg_dblink = real_vardb._dblink(pkg.cpv)
12686 provided_libs = set()
12688 for lib in liblist:
12689 if pkg_dblink.isowner(lib, myroot):
12690 provided_libs.add(lib)
12692 if not provided_libs:
12696 for lib in provided_libs:
12697 lib_consumers = consumer_cache.get(lib)
12698 if lib_consumers is None:
12699 lib_consumers = linkmap.findConsumers(lib)
12700 consumer_cache[lib] = lib_consumers
12702 consumers[lib] = lib_consumers
12707 for lib, lib_consumers in consumers.items():
12708 for consumer_file in list(lib_consumers):
12709 if pkg_dblink.isowner(consumer_file, myroot):
12710 lib_consumers.remove(consumer_file)
12711 if not lib_consumers:
12717 for lib, lib_consumers in consumers.iteritems():
12719 soname = soname_cache.get(lib)
12721 soname = linkmap.getSoname(lib)
12722 soname_cache[lib] = soname
12724 consumer_providers = []
12725 for lib_consumer in lib_consumers:
12726 providers = provider_cache.get(lib)
12727 if providers is None:
12728 providers = linkmap.findProviders(lib_consumer)
12729 provider_cache[lib_consumer] = providers
12730 if soname not in providers:
12731 # Why does this happen?
12733 consumer_providers.append(
12734 (lib_consumer, providers[soname]))
12736 consumers[lib] = consumer_providers
12738 consumer_map[pkg] = consumers
12742 search_files = set()
12743 for consumers in consumer_map.itervalues():
12744 for lib, consumer_providers in consumers.iteritems():
12745 for lib_consumer, providers in consumer_providers:
12746 search_files.add(lib_consumer)
12747 search_files.update(providers)
12749 writemsg_level(">>> Assigning files to packages...\n")
12750 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
12752 for pkg, consumers in consumer_map.items():
12753 for lib, consumer_providers in consumers.items():
12754 lib_consumers = set()
12756 for lib_consumer, providers in consumer_providers:
12757 owner_set = file_owners.get(lib_consumer)
12758 provider_dblinks = set()
12759 provider_pkgs = set()
12761 if len(providers) > 1:
12762 for provider in providers:
12763 provider_set = file_owners.get(provider)
12764 if provider_set is not None:
12765 provider_dblinks.update(provider_set)
12767 if len(provider_dblinks) > 1:
12768 for provider_dblink in provider_dblinks:
12769 pkg_key = ("installed", myroot,
12770 provider_dblink.mycpv, "nomerge")
12771 if pkg_key not in clean_set:
12772 provider_pkgs.add(vardb.get(pkg_key))
12777 if owner_set is not None:
12778 lib_consumers.update(owner_set)
12780 for consumer_dblink in list(lib_consumers):
12781 if ("installed", myroot, consumer_dblink.mycpv,
12782 "nomerge") in clean_set:
12783 lib_consumers.remove(consumer_dblink)
12787 consumers[lib] = lib_consumers
12791 del consumer_map[pkg]
12794 # TODO: Implement a package set for rebuilding consumer packages.
12796 msg = "In order to avoid breakage of link level " + \
12797 "dependencies, one or more packages will not be removed. " + \
12798 "This can be solved by rebuilding " + \
12799 "the packages that pulled them in."
12801 prefix = bad(" * ")
12802 from textwrap import wrap
12803 writemsg_level("".join(prefix + "%s\n" % line for \
12804 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
12807 for pkg, consumers in consumer_map.iteritems():
12808 unique_consumers = set(chain(*consumers.values()))
12809 unique_consumers = sorted(consumer.mycpv \
12810 for consumer in unique_consumers)
12812 msg.append(" %s pulled in by:" % (pkg.cpv,))
12813 for consumer in unique_consumers:
12814 msg.append(" %s" % (consumer,))
12816 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
12817 level=logging.WARNING, noiselevel=-1)
12819 # Add lib providers to the graph as children of lib consumers,
12820 # and also add any dependencies pulled in by the provider.
12821 writemsg_level(">>> Adding lib providers to graph...\n")
12823 for pkg, consumers in consumer_map.iteritems():
12824 for consumer_dblink in set(chain(*consumers.values())):
12825 consumer_pkg = vardb.get(("installed", myroot,
12826 consumer_dblink.mycpv, "nomerge"))
12827 if not resolver._add_pkg(pkg,
12828 Dependency(parent=consumer_pkg,
12829 priority=UnmergeDepPriority(runtime=True),
12831 resolver.display_problems()
12834 writemsg_level("\nCalculating dependencies ")
12835 success = resolver._complete_graph()
12836 writemsg_level("\b\b... done!\n")
12837 resolver.display_problems()
12840 if unresolved_deps():
12843 graph = resolver.digraph.copy()
12844 required_pkgs_total = 0
12846 if isinstance(node, Package):
12847 required_pkgs_total += 1
12848 cleanlist = create_cleanlist()
12851 clean_set = set(cleanlist)
12853 # Use a topological sort to create an unmerge order such that
12854 # each package is unmerged before it's dependencies. This is
12855 # necessary to avoid breaking things that may need to run
12856 # during pkg_prerm or pkg_postrm phases.
12858 # Create a new graph to account for dependencies between the
12859 # packages being unmerged.
12863 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
12864 runtime = UnmergeDepPriority(runtime=True)
12865 runtime_post = UnmergeDepPriority(runtime_post=True)
12866 buildtime = UnmergeDepPriority(buildtime=True)
12868 "RDEPEND": runtime,
12869 "PDEPEND": runtime_post,
12870 "DEPEND": buildtime,
12873 for node in clean_set:
12874 graph.add(node, None)
12876 node_use = node.metadata["USE"].split()
12877 for dep_type in dep_keys:
12878 depstr = node.metadata[dep_type]
12882 portage.dep._dep_check_strict = False
12883 success, atoms = portage.dep_check(depstr, None, settings,
12884 myuse=node_use, trees=resolver._graph_trees,
12887 portage.dep._dep_check_strict = True
12889 # Ignore invalid deps of packages that will
12890 # be uninstalled anyway.
12893 priority = priority_map[dep_type]
12895 if not isinstance(atom, portage.dep.Atom):
12896 # Ignore invalid atoms returned from dep_check().
12900 matches = vardb.match_pkgs(atom)
12903 for child_node in matches:
12904 if child_node in clean_set:
12905 graph.add(child_node, node, priority=priority)
12908 if len(graph.order) == len(graph.root_nodes()):
12909 # If there are no dependencies between packages
12910 # let unmerge() group them by cat/pn.
12912 cleanlist = [pkg.cpv for pkg in graph.order]
12914 # Order nodes from lowest to highest overall reference count for
12915 # optimal root node selection.
12916 node_refcounts = {}
12917 for node in graph.order:
12918 node_refcounts[node] = len(graph.parent_nodes(node))
12919 def cmp_reference_count(node1, node2):
12920 return node_refcounts[node1] - node_refcounts[node2]
12921 graph.order.sort(cmp_reference_count)
12923 ignore_priority_range = [None]
12924 ignore_priority_range.extend(
12925 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
12926 while not graph.empty():
12927 for ignore_priority in ignore_priority_range:
12928 nodes = graph.root_nodes(ignore_priority=ignore_priority)
12932 raise AssertionError("no root nodes")
12933 if ignore_priority is not None:
12934 # Some deps have been dropped due to circular dependencies,
12935 # so only pop one node in order do minimize the number that
12940 cleanlist.append(node.cpv)
12942 unmerge(root_config, myopts, "unmerge", cleanlist,
12943 ldpath_mtimes, ordered=ordered)
12945 if action == "prune":
12948 if not cleanlist and "--quiet" in myopts:
12951 print "Packages installed: "+str(len(vardb.cpv_all()))
12952 print "Packages in world: " + \
12953 str(len(root_config.sets["world"].getAtoms()))
12954 print "Packages in system: " + \
12955 str(len(root_config.sets["system"].getAtoms()))
12956 print "Required packages: "+str(required_pkgs_total)
12957 if "--pretend" in myopts:
12958 print "Number to remove: "+str(len(cleanlist))
12960 print "Number removed: "+str(len(cleanlist))
12962 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
12963 skip_masked=False, skip_unsatisfied=False):
12965 Construct a depgraph for the given resume list. This will raise
12966 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
12968 @returns: (success, depgraph, dropped_tasks)
12970 mergelist = mtimedb["resume"]["mergelist"]
12971 dropped_tasks = set()
12973 mydepgraph = depgraph(settings, trees,
12974 myopts, myparams, spinner)
12976 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
12977 skip_masked=skip_masked)
12978 except depgraph.UnsatisfiedResumeDep, e:
12979 if not skip_unsatisfied:
12982 graph = mydepgraph.digraph
12983 unsatisfied_parents = dict((dep.parent, dep.parent) \
12984 for dep in e.value)
12985 traversed_nodes = set()
12986 unsatisfied_stack = list(unsatisfied_parents)
12987 while unsatisfied_stack:
12988 pkg = unsatisfied_stack.pop()
12989 if pkg in traversed_nodes:
12991 traversed_nodes.add(pkg)
12993 # If this package was pulled in by a parent
12994 # package scheduled for merge, removing this
12995 # package may cause the the parent package's
12996 # dependency to become unsatisfied.
12997 for parent_node in graph.parent_nodes(pkg):
12998 if not isinstance(parent_node, Package) \
12999 or parent_node.operation not in ("merge", "nomerge"):
13002 graph.child_nodes(parent_node,
13003 ignore_priority=DepPriority.SOFT)
13004 if pkg in unsatisfied:
13005 unsatisfied_parents[parent_node] = parent_node
13006 unsatisfied_stack.append(parent_node)
13008 pruned_mergelist = [x for x in mergelist \
13009 if isinstance(x, list) and \
13010 tuple(x) not in unsatisfied_parents]
13012 # If the mergelist doesn't shrink then this loop is infinite.
13013 if len(pruned_mergelist) == len(mergelist):
13014 # This happens if a package can't be dropped because
13015 # it's already installed, but it has unsatisfied PDEPEND.
13017 mergelist[:] = pruned_mergelist
13019 # Exclude installed packages that have been removed from the graph due
13020 # to failure to build/install runtime dependencies after the dependent
13021 # package has already been installed.
13022 dropped_tasks.update(pkg for pkg in \
13023 unsatisfied_parents if pkg.operation != "nomerge")
13024 mydepgraph.break_refs(unsatisfied_parents)
13026 del e, graph, traversed_nodes, \
13027 unsatisfied_parents, unsatisfied_stack
13031 return (success, mydepgraph, dropped_tasks)
13033 def action_build(settings, trees, mtimedb,
13034 myopts, myaction, myfiles, spinner):
13036 # validate the state of the resume data
13037 # so that we can make assumptions later.
13038 for k in ("resume", "resume_backup"):
13039 if k not in mtimedb:
13041 resume_data = mtimedb[k]
13042 if not isinstance(resume_data, dict):
13045 mergelist = resume_data.get("mergelist")
13046 if not isinstance(mergelist, list):
13049 resume_opts = resume_data.get("myopts")
13050 if not isinstance(resume_opts, (dict, list)):
13053 favorites = resume_data.get("favorites")
13054 if not isinstance(favorites, list):
13059 if "--resume" in myopts and \
13060 ("resume" in mtimedb or
13061 "resume_backup" in mtimedb):
13063 if "resume" not in mtimedb:
13064 mtimedb["resume"] = mtimedb["resume_backup"]
13065 del mtimedb["resume_backup"]
13067 # "myopts" is a list for backward compatibility.
13068 resume_opts = mtimedb["resume"].get("myopts", [])
13069 if isinstance(resume_opts, list):
13070 resume_opts = dict((k,True) for k in resume_opts)
13071 for opt in ("--skipfirst", "--ask", "--tree"):
13072 resume_opts.pop(opt, None)
13073 myopts.update(resume_opts)
13075 if "--debug" in myopts:
13076 writemsg_level("myopts %s\n" % (myopts,))
13078 # Adjust config according to options of the command being resumed.
13079 for myroot in trees:
13080 mysettings = trees[myroot]["vartree"].settings
13081 mysettings.unlock()
13082 adjust_config(myopts, mysettings)
13084 del myroot, mysettings
13086 ldpath_mtimes = mtimedb["ldpath"]
13089 buildpkgonly = "--buildpkgonly" in myopts
13090 pretend = "--pretend" in myopts
13091 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13092 ask = "--ask" in myopts
13093 nodeps = "--nodeps" in myopts
13094 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13095 tree = "--tree" in myopts
13096 if nodeps and tree:
13098 del myopts["--tree"]
13099 portage.writemsg(colorize("WARN", " * ") + \
13100 "--tree is broken with --nodeps. Disabling...\n")
13101 debug = "--debug" in myopts
13102 verbose = "--verbose" in myopts
13103 quiet = "--quiet" in myopts
13104 if pretend or fetchonly:
13105 # make the mtimedb readonly
13106 mtimedb.filename = None
13107 if "--digest" in myopts:
13108 msg = "The --digest option can prevent corruption from being" + \
13109 " noticed. The `repoman manifest` command is the preferred" + \
13110 " way to generate manifests and it is capable of doing an" + \
13111 " entire repository or category at once."
13112 prefix = bad(" * ")
13113 writemsg(prefix + "\n")
13114 from textwrap import wrap
13115 for line in wrap(msg, 72):
13116 writemsg("%s%s\n" % (prefix, line))
13117 writemsg(prefix + "\n")
13119 if "--quiet" not in myopts and \
13120 ("--pretend" in myopts or "--ask" in myopts or \
13121 "--tree" in myopts or "--verbose" in myopts):
13123 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13125 elif "--buildpkgonly" in myopts:
13129 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13131 print darkgreen("These are the packages that would be %s, in reverse order:") % action
13135 print darkgreen("These are the packages that would be %s, in order:") % action
13138 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13139 if not show_spinner:
13140 spinner.update = spinner.update_quiet
13143 favorites = mtimedb["resume"].get("favorites")
13144 if not isinstance(favorites, list):
13148 print "Calculating dependencies ",
13149 myparams = create_depgraph_params(myopts, myaction)
13151 resume_data = mtimedb["resume"]
13152 mergelist = resume_data["mergelist"]
13153 if mergelist and "--skipfirst" in myopts:
13154 for i, task in enumerate(mergelist):
13155 if isinstance(task, list) and \
13156 task and task[-1] == "merge":
13160 skip_masked = "--skipfirst" in myopts
13161 skip_unsatisfied = "--skipfirst" in myopts
13165 success, mydepgraph, dropped_tasks = resume_depgraph(
13166 settings, trees, mtimedb, myopts, myparams, spinner,
13167 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13168 except (portage.exception.PackageNotFound,
13169 depgraph.UnsatisfiedResumeDep), e:
13170 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13171 mydepgraph = e.depgraph
13174 from textwrap import wrap
13175 from portage.output import EOutput
13178 resume_data = mtimedb["resume"]
13179 mergelist = resume_data.get("mergelist")
13180 if not isinstance(mergelist, list):
13182 if mergelist and debug or (verbose and not quiet):
13183 out.eerror("Invalid resume list:")
13186 for task in mergelist:
13187 if isinstance(task, list):
13188 out.eerror(indent + str(tuple(task)))
13191 if isinstance(e, depgraph.UnsatisfiedResumeDep):
13192 out.eerror("One or more packages are either masked or " + \
13193 "have missing dependencies:")
13196 for dep in e.value:
13197 if dep.atom is None:
13198 out.eerror(indent + "Masked package:")
13199 out.eerror(2 * indent + str(dep.parent))
13202 out.eerror(indent + str(dep.atom) + " pulled in by:")
13203 out.eerror(2 * indent + str(dep.parent))
13205 msg = "The resume list contains packages " + \
13206 "that are either masked or have " + \
13207 "unsatisfied dependencies. " + \
13208 "Please restart/continue " + \
13209 "the operation manually, or use --skipfirst " + \
13210 "to skip the first package in the list and " + \
13211 "any other packages that may be " + \
13212 "masked or have missing dependencies."
13213 for line in wrap(msg, 72):
13215 elif isinstance(e, portage.exception.PackageNotFound):
13216 out.eerror("An expected package is " + \
13217 "not available: %s" % str(e))
13219 msg = "The resume list contains one or more " + \
13220 "packages that are no longer " + \
13221 "available. Please restart/continue " + \
13222 "the operation manually."
13223 for line in wrap(msg, 72):
13227 print "\b\b... done!"
13231 portage.writemsg("!!! One or more packages have been " + \
13232 "dropped due to\n" + \
13233 "!!! masking or unsatisfied dependencies:\n\n",
13235 for task in dropped_tasks:
13236 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
13237 portage.writemsg("\n", noiselevel=-1)
13240 if mydepgraph is not None:
13241 mydepgraph.display_problems()
13242 if not (ask or pretend):
13243 # delete the current list and also the backup
13244 # since it's probably stale too.
13245 for k in ("resume", "resume_backup"):
13246 mtimedb.pop(k, None)
13251 if ("--resume" in myopts):
13252 print darkgreen("emerge: It seems we have nothing to resume...")
13255 myparams = create_depgraph_params(myopts, myaction)
13256 if "--quiet" not in myopts and "--nodeps" not in myopts:
13257 print "Calculating dependencies ",
13259 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13261 retval, favorites = mydepgraph.select_files(myfiles)
13262 except portage.exception.PackageNotFound, e:
13263 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13265 except portage.exception.PackageSetNotFound, e:
13266 root_config = trees[settings["ROOT"]]["root_config"]
13267 display_missing_pkg_set(root_config, e.value)
13270 print "\b\b... done!"
13272 mydepgraph.display_problems()
13275 if "--pretend" not in myopts and \
13276 ("--ask" in myopts or "--tree" in myopts or \
13277 "--verbose" in myopts) and \
13278 not ("--quiet" in myopts and "--ask" not in myopts):
13279 if "--resume" in myopts:
13280 mymergelist = mydepgraph.altlist()
13281 if len(mymergelist) == 0:
13282 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13284 favorites = mtimedb["resume"]["favorites"]
13285 retval = mydepgraph.display(
13286 mydepgraph.altlist(reversed=tree),
13287 favorites=favorites)
13288 mydepgraph.display_problems()
13289 if retval != os.EX_OK:
13291 prompt="Would you like to resume merging these packages?"
13293 retval = mydepgraph.display(
13294 mydepgraph.altlist(reversed=("--tree" in myopts)),
13295 favorites=favorites)
13296 mydepgraph.display_problems()
13297 if retval != os.EX_OK:
13300 for x in mydepgraph.altlist():
13301 if isinstance(x, Package) and x.operation == "merge":
13305 sets = trees[settings["ROOT"]]["root_config"].sets
13306 world_candidates = None
13307 if "--noreplace" in myopts and \
13308 not oneshot and favorites:
13309 # Sets that are not world candidates are filtered
13310 # out here since the favorites list needs to be
13311 # complete for depgraph.loadResumeCommand() to
13312 # operate correctly.
13313 world_candidates = [x for x in favorites \
13314 if not (x.startswith(SETPREFIX) and \
13315 not sets[x[1:]].world_candidate)]
13316 if "--noreplace" in myopts and \
13317 not oneshot and world_candidates:
13319 for x in world_candidates:
13320 print " %s %s" % (good("*"), x)
13321 prompt="Would you like to add these packages to your world favorites?"
13322 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
13323 prompt="Nothing to merge; would you like to auto-clean packages?"
13326 print "Nothing to merge; quitting."
13329 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13330 prompt="Would you like to fetch the source files for these packages?"
13332 prompt="Would you like to merge these packages?"
13334 if "--ask" in myopts and userquery(prompt) == "No":
13339 # Don't ask again (e.g. when auto-cleaning packages after merge)
13340 myopts.pop("--ask", None)
13342 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13343 if ("--resume" in myopts):
13344 mymergelist = mydepgraph.altlist()
13345 if len(mymergelist) == 0:
13346 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13348 favorites = mtimedb["resume"]["favorites"]
13349 retval = mydepgraph.display(
13350 mydepgraph.altlist(reversed=tree),
13351 favorites=favorites)
13352 mydepgraph.display_problems()
13353 if retval != os.EX_OK:
13356 retval = mydepgraph.display(
13357 mydepgraph.altlist(reversed=("--tree" in myopts)),
13358 favorites=favorites)
13359 mydepgraph.display_problems()
13360 if retval != os.EX_OK:
13362 if "--buildpkgonly" in myopts:
13363 graph_copy = mydepgraph.digraph.clone()
13364 for node in list(graph_copy.order):
13365 if not isinstance(node, Package):
13366 graph_copy.remove(node)
13367 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13368 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13369 print "!!! You have to merge the dependencies before you can build this package.\n"
13372 if "--buildpkgonly" in myopts:
13373 graph_copy = mydepgraph.digraph.clone()
13374 for node in list(graph_copy.order):
13375 if not isinstance(node, Package):
13376 graph_copy.remove(node)
13377 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
13378 print "\n!!! --buildpkgonly requires all dependencies to be merged."
13379 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
13382 if ("--resume" in myopts):
13383 favorites=mtimedb["resume"]["favorites"]
13384 mymergelist = mydepgraph.altlist()
13385 mydepgraph.break_refs(mymergelist)
13386 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13387 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
13388 del mydepgraph, mymergelist
13389 clear_caches(trees)
13391 retval = mergetask.merge()
13392 merge_count = mergetask.curval
13394 if "resume" in mtimedb and \
13395 "mergelist" in mtimedb["resume"] and \
13396 len(mtimedb["resume"]["mergelist"]) > 1:
13397 mtimedb["resume_backup"] = mtimedb["resume"]
13398 del mtimedb["resume"]
13400 mtimedb["resume"]={}
13401 # Stored as a dict starting with portage-2.2_rc7, and supported
13402 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
13403 # a list type for options.
13404 mtimedb["resume"]["myopts"] = myopts.copy()
13406 # Convert Atom instances to plain str since the mtimedb loader
13407 # sets unpickler.find_global = None which causes unpickler.load()
13408 # to raise the following exception:
13410 # cPickle.UnpicklingError: Global and instance pickles are not supported.
13412 # TODO: Maybe stop setting find_global = None, or find some other
13413 # way to avoid accidental triggering of the above UnpicklingError.
13414 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
13416 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
13417 for pkgline in mydepgraph.altlist():
13418 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
13419 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
13420 tmpsettings = portage.config(clone=settings)
13422 if settings.get("PORTAGE_DEBUG", "") == "1":
13424 retval = portage.doebuild(
13425 y, "digest", settings["ROOT"], tmpsettings, edebug,
13426 ("--pretend" in myopts),
13427 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
13430 pkglist = mydepgraph.altlist()
13431 mydepgraph.saveNomergeFavorites()
13432 mydepgraph.break_refs(pkglist)
13433 mergetask = Scheduler(settings, trees, mtimedb, myopts,
13434 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
13435 del mydepgraph, pkglist
13436 clear_caches(trees)
13438 retval = mergetask.merge()
13439 merge_count = mergetask.curval
13441 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
13442 if "yes" == settings.get("AUTOCLEAN"):
13443 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
13444 unmerge(trees[settings["ROOT"]]["root_config"],
13445 myopts, "clean", [],
13446 ldpath_mtimes, autoclean=1)
13448 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13449 + " AUTOCLEAN is disabled. This can cause serious"
13450 + " problems due to overlapping packages.\n")
13451 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
13455 def multiple_actions(action1, action2):
13456 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13457 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
13460 def insert_optional_args(args):
13462 Parse optional arguments and insert a value if one has
13463 not been provided. This is done before feeding the args
13464 to the optparse parser since that parser does not support
13465 this feature natively.
13469 jobs_opts = ("-j", "--jobs")
13470 arg_stack = args[:]
13471 arg_stack.reverse()
13473 arg = arg_stack.pop()
13475 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
13476 if not (short_job_opt or arg in jobs_opts):
13477 new_args.append(arg)
13480 # Insert an empty placeholder in order to
13481 # satisfy the requirements of optparse.
13483 new_args.append("--jobs")
13486 if short_job_opt and len(arg) > 2:
13487 if arg[:2] == "-j":
13489 job_count = int(arg[2:])
13491 saved_opts = arg[2:]
13494 saved_opts = arg[1:].replace("j", "")
13496 if job_count is None and arg_stack:
13498 job_count = int(arg_stack[-1])
13502 # Discard the job count from the stack
13503 # since we're consuming it here.
13506 if job_count is None:
13507 # unlimited number of jobs
13508 new_args.append("True")
13510 new_args.append(str(job_count))
13512 if saved_opts is not None:
13513 new_args.append("-" + saved_opts)
13517 def parse_opts(tmpcmdline, silent=False):
13522 global actions, options, shortmapping
13524 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13525 argument_options = {
13527 "help":"specify the location for portage configuration files",
13531 "help":"enable or disable color output",
13533 "choices":("y", "n")
13538 "help" : "Specifies the number of packages to build " + \
13544 "--load-average": {
13546 "help" :"Specifies that no new builds should be started " + \
13547 "if there are other builds running and the load average " + \
13548 "is at least LOAD (a floating-point number).",
13554 "help":"include unnecessary build time dependencies",
13556 "choices":("y", "n")
13559 "help":"specify conditions to trigger package reinstallation",
13561 "choices":["changed-use"]
13565 from optparse import OptionParser
13566 parser = OptionParser()
13567 if parser.has_option("--help"):
13568 parser.remove_option("--help")
13570 for action_opt in actions:
13571 parser.add_option("--" + action_opt, action="store_true",
13572 dest=action_opt.replace("-", "_"), default=False)
13573 for myopt in options:
13574 parser.add_option(myopt, action="store_true",
13575 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13576 for shortopt, longopt in shortmapping.iteritems():
13577 parser.add_option("-" + shortopt, action="store_true",
13578 dest=longopt.lstrip("--").replace("-", "_"), default=False)
13579 for myalias, myopt in longopt_aliases.iteritems():
13580 parser.add_option(myalias, action="store_true",
13581 dest=myopt.lstrip("--").replace("-", "_"), default=False)
13583 for myopt, kwargs in argument_options.iteritems():
13584 parser.add_option(myopt,
13585 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
13587 tmpcmdline = insert_optional_args(tmpcmdline)
13589 myoptions, myargs = parser.parse_args(args=tmpcmdline)
13593 if myoptions.jobs == "True":
13597 jobs = int(myoptions.jobs)
13601 if jobs is not True and \
13605 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
13606 (myoptions.jobs,), noiselevel=-1)
13608 myoptions.jobs = jobs
13610 if myoptions.load_average:
13612 load_average = float(myoptions.load_average)
13616 if load_average <= 0.0:
13617 load_average = None
13619 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
13620 (myoptions.load_average,), noiselevel=-1)
13622 myoptions.load_average = load_average
13624 for myopt in options:
13625 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
13627 myopts[myopt] = True
13629 for myopt in argument_options:
13630 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
13634 for action_opt in actions:
13635 v = getattr(myoptions, action_opt.replace("-", "_"))
13638 multiple_actions(myaction, action_opt)
13640 myaction = action_opt
13644 return myaction, myopts, myfiles
13646 def validate_ebuild_environment(trees):
13647 for myroot in trees:
13648 settings = trees[myroot]["vartree"].settings
13649 settings.validate()
13651 def clear_caches(trees):
13652 for d in trees.itervalues():
13653 d["porttree"].dbapi.melt()
13654 d["porttree"].dbapi._aux_cache.clear()
13655 d["bintree"].dbapi._aux_cache.clear()
13656 d["bintree"].dbapi._clear_cache()
13657 d["vartree"].dbapi.linkmap._clear_cache()
13658 portage.dircache.clear()
13661 def load_emerge_config(trees=None):
13663 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
13664 v = os.environ.get(envvar, None)
13665 if v and v.strip():
13667 trees = portage.create_trees(trees=trees, **kwargs)
13669 for root, root_trees in trees.iteritems():
13670 settings = root_trees["vartree"].settings
13671 setconfig = load_default_config(settings, root_trees)
13672 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
13674 settings = trees["/"]["vartree"].settings
13676 for myroot in trees:
13678 settings = trees[myroot]["vartree"].settings
13681 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
13682 mtimedb = portage.MtimeDB(mtimedbfile)
13684 return settings, trees, mtimedb
13686 def adjust_config(myopts, settings):
13687 """Make emerge specific adjustments to the config."""
13689 # To enhance usability, make some vars case insensitive by forcing them to
13691 for myvar in ("AUTOCLEAN", "NOCOLOR"):
13692 if myvar in settings:
13693 settings[myvar] = settings[myvar].lower()
13694 settings.backup_changes(myvar)
13697 # Kill noauto as it will break merges otherwise.
13698 if "noauto" in settings.features:
13699 while "noauto" in settings.features:
13700 settings.features.remove("noauto")
13701 settings["FEATURES"] = " ".join(settings.features)
13702 settings.backup_changes("FEATURES")
13706 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
13707 except ValueError, e:
13708 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13709 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
13710 settings["CLEAN_DELAY"], noiselevel=-1)
13711 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
13712 settings.backup_changes("CLEAN_DELAY")
13714 EMERGE_WARNING_DELAY = 10
13716 EMERGE_WARNING_DELAY = int(settings.get(
13717 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
13718 except ValueError, e:
13719 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13720 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
13721 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
13722 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
13723 settings.backup_changes("EMERGE_WARNING_DELAY")
13725 if "--quiet" in myopts:
13726 settings["PORTAGE_QUIET"]="1"
13727 settings.backup_changes("PORTAGE_QUIET")
13729 if "--verbose" in myopts:
13730 settings["PORTAGE_VERBOSE"] = "1"
13731 settings.backup_changes("PORTAGE_VERBOSE")
13733 # Set so that configs will be merged regardless of remembered status
13734 if ("--noconfmem" in myopts):
13735 settings["NOCONFMEM"]="1"
13736 settings.backup_changes("NOCONFMEM")
13738 # Set various debug markers... They should be merged somehow.
13741 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
13742 if PORTAGE_DEBUG not in (0, 1):
13743 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
13744 PORTAGE_DEBUG, noiselevel=-1)
13745 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
13748 except ValueError, e:
13749 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13750 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
13751 settings["PORTAGE_DEBUG"], noiselevel=-1)
13753 if "--debug" in myopts:
13755 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
13756 settings.backup_changes("PORTAGE_DEBUG")
13758 if settings.get("NOCOLOR") not in ("yes","true"):
13759 portage.output.havecolor = 1
13761 """The explicit --color < y | n > option overrides the NOCOLOR environment
13762 variable and stdout auto-detection."""
13763 if "--color" in myopts:
13764 if "y" == myopts["--color"]:
13765 portage.output.havecolor = 1
13766 settings["NOCOLOR"] = "false"
13768 portage.output.havecolor = 0
13769 settings["NOCOLOR"] = "true"
13770 settings.backup_changes("NOCOLOR")
13771 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
13772 portage.output.havecolor = 0
13773 settings["NOCOLOR"] = "true"
13774 settings.backup_changes("NOCOLOR")
13776 def apply_priorities(settings):
13780 def nice(settings):
13782 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
13783 except (OSError, ValueError), e:
13784 out = portage.output.EOutput()
13785 out.eerror("Failed to change nice value to '%s'" % \
13786 settings["PORTAGE_NICENESS"])
13787 out.eerror("%s\n" % str(e))
13789 def ionice(settings):
13791 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
13793 ionice_cmd = shlex.split(ionice_cmd)
13797 from portage.util import varexpand
13798 variables = {"PID" : str(os.getpid())}
13799 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
13802 rval = portage.process.spawn(cmd, env=os.environ)
13803 except portage.exception.CommandNotFound:
13804 # The OS kernel probably doesn't support ionice,
13805 # so return silently.
13808 if rval != os.EX_OK:
13809 out = portage.output.EOutput()
13810 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
13811 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
13813 def display_missing_pkg_set(root_config, set_name):
13816 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
13817 "The following sets exist:") % \
13818 colorize("INFORM", set_name))
13821 for s in sorted(root_config.sets):
13822 msg.append(" %s" % s)
13825 writemsg_level("".join("%s\n" % l for l in msg),
13826 level=logging.ERROR, noiselevel=-1)
13828 def expand_set_arguments(myfiles, myaction, root_config):
13830 setconfig = root_config.setconfig
13832 sets = setconfig.getSets()
13834 # In order to know exactly which atoms/sets should be added to the
13835 # world file, the depgraph performs set expansion later. It will get
13836 # confused about where the atoms came from if it's not allowed to
13837 # expand them itself.
13838 do_not_expand = (None, )
13841 if a in ("system", "world"):
13842 newargs.append(SETPREFIX+a)
13849 # separators for set arguments
13853 # WARNING: all operators must be of equal length
13855 DIFF_OPERATOR = "-@"
13856 UNION_OPERATOR = "+@"
13858 for i in range(0, len(myfiles)):
13859 if myfiles[i].startswith(SETPREFIX):
13862 x = myfiles[i][len(SETPREFIX):]
13865 start = x.find(ARG_START)
13866 end = x.find(ARG_END)
13867 if start > 0 and start < end:
13868 namepart = x[:start]
13869 argpart = x[start+1:end]
13871 # TODO: implement proper quoting
13872 args = argpart.split(",")
13876 k, v = a.split("=", 1)
13879 options[a] = "True"
13880 setconfig.update(namepart, options)
13881 newset += (x[:start-len(namepart)]+namepart)
13882 x = x[end+len(ARG_END):]
13886 myfiles[i] = SETPREFIX+newset
13888 sets = setconfig.getSets()
13890 # display errors that occured while loading the SetConfig instance
13891 for e in setconfig.errors:
13892 print colorize("BAD", "Error during set creation: %s" % e)
13894 # emerge relies on the existance of sets with names "world" and "system"
13895 required_sets = ("world", "system")
13897 for s in required_sets:
13899 msg = ["emerge: incomplete set configuration, " + \
13900 "no \"%s\" set defined" % s]
13901 msg.append(" sets defined: %s" % ", ".join(sets))
13903 sys.stderr.write(line + "\n")
13905 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
13908 if a.startswith(SETPREFIX):
13909 # support simple set operations (intersection, difference and union)
13910 # on the commandline. Expressions are evaluated strictly left-to-right
13911 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
13912 expression = a[len(SETPREFIX):]
13915 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
13916 is_pos = expression.rfind(IS_OPERATOR)
13917 diff_pos = expression.rfind(DIFF_OPERATOR)
13918 union_pos = expression.rfind(UNION_OPERATOR)
13919 op_pos = max(is_pos, diff_pos, union_pos)
13920 s1 = expression[:op_pos]
13921 s2 = expression[op_pos+len(IS_OPERATOR):]
13922 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
13924 display_missing_pkg_set(root_config, s2)
13926 expr_sets.insert(0, s2)
13927 expr_ops.insert(0, op)
13929 if not expression in sets:
13930 display_missing_pkg_set(root_config, expression)
13932 expr_sets.insert(0, expression)
13933 result = set(setconfig.getSetAtoms(expression))
13934 for i in range(0, len(expr_ops)):
13935 s2 = setconfig.getSetAtoms(expr_sets[i+1])
13936 if expr_ops[i] == IS_OPERATOR:
13937 result.intersection_update(s2)
13938 elif expr_ops[i] == DIFF_OPERATOR:
13939 result.difference_update(s2)
13940 elif expr_ops[i] == UNION_OPERATOR:
13943 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
13944 newargs.extend(result)
13946 s = a[len(SETPREFIX):]
13948 display_missing_pkg_set(root_config, s)
13950 setconfig.active.append(s)
13952 set_atoms = setconfig.getSetAtoms(s)
13953 except portage.exception.PackageSetNotFound, e:
13954 writemsg_level(("emerge: the given set '%s' " + \
13955 "contains a non-existent set named '%s'.\n") % \
13956 (s, e), level=logging.ERROR, noiselevel=-1)
13958 if myaction in unmerge_actions and \
13959 not sets[s].supportsOperation("unmerge"):
13960 sys.stderr.write("emerge: the given set '%s' does " % s + \
13961 "not support unmerge operations\n")
13963 elif not set_atoms:
13964 print "emerge: '%s' is an empty set" % s
13965 elif myaction not in do_not_expand:
13966 newargs.extend(set_atoms)
13968 newargs.append(SETPREFIX+s)
13969 for e in sets[s].errors:
13973 return (newargs, retval)
13975 def repo_name_check(trees):
13976 missing_repo_names = set()
13977 for root, root_trees in trees.iteritems():
13978 if "porttree" in root_trees:
13979 portdb = root_trees["porttree"].dbapi
13980 missing_repo_names.update(portdb.porttrees)
13981 repos = portdb.getRepositories()
13983 missing_repo_names.discard(portdb.getRepositoryPath(r))
13985 if missing_repo_names:
13987 msg.append("WARNING: One or more repositories " + \
13988 "have missing repo_name entries:")
13990 for p in missing_repo_names:
13991 msg.append("\t%s/profiles/repo_name" % (p,))
13993 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
13994 "should be a plain text file containing a unique " + \
13995 "name for the repository on the first line.", 70))
13996 writemsg_level("".join("%s\n" % l for l in msg),
13997 level=logging.WARNING, noiselevel=-1)
13999 return bool(missing_repo_names)
14001 def config_protect_check(trees):
14002 for root, root_trees in trees.iteritems():
14003 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14004 msg = "!!! CONFIG_PROTECT is empty"
14006 msg += " for '%s'" % root
14007 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14009 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14011 if "--quiet" in myopts:
14012 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14013 print "!!! one of the following fully-qualified ebuild names instead:\n"
14014 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14015 print " " + colorize("INFORM", cp)
14018 s = search(root_config, spinner, "--searchdesc" in myopts,
14019 "--quiet" not in myopts, "--usepkg" in myopts,
14020 "--usepkgonly" in myopts)
14021 null_cp = portage.dep_getkey(insert_category_into_atom(
14023 cat, atom_pn = portage.catsplit(null_cp)
14024 s.searchkey = atom_pn
14025 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14028 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14029 print "!!! one of the above fully-qualified ebuild names instead.\n"
14031 def profile_check(trees, myaction, myopts):
14032 if myaction in ("info", "sync"):
14034 elif "--version" in myopts or "--help" in myopts:
14036 for root, root_trees in trees.iteritems():
14037 if root_trees["root_config"].settings.profiles:
14039 # generate some profile related warning messages
14040 validate_ebuild_environment(trees)
14041 msg = "If you have just changed your profile configuration, you " + \
14042 "should revert back to the previous configuration. Due to " + \
14043 "your current profile being invalid, allowed actions are " + \
14044 "limited to --help, --info, --sync, and --version."
14045 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14046 level=logging.ERROR, noiselevel=-1)
14051 global portage # NFC why this is necessary now - genone
14052 portage._disable_legacy_globals()
14053 # Disable color until we're sure that it should be enabled (after
14054 # EMERGE_DEFAULT_OPTS has been parsed).
14055 portage.output.havecolor = 0
14056 # This first pass is just for options that need to be known as early as
14057 # possible, such as --config-root. They will be parsed again later,
14058 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14059 # the value of --config-root).
14060 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14061 if "--debug" in myopts:
14062 os.environ["PORTAGE_DEBUG"] = "1"
14063 if "--config-root" in myopts:
14064 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14066 # Portage needs to ensure a sane umask for the files it creates.
14068 settings, trees, mtimedb = load_emerge_config()
14069 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14070 rval = profile_check(trees, myaction, myopts)
14071 if rval != os.EX_OK:
14074 if portage._global_updates(trees, mtimedb["updates"]):
14076 # Reload the whole config from scratch.
14077 settings, trees, mtimedb = load_emerge_config(trees=trees)
14078 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14080 xterm_titles = "notitles" not in settings.features
14083 if "--ignore-default-opts" not in myopts:
14084 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14085 tmpcmdline.extend(sys.argv[1:])
14086 myaction, myopts, myfiles = parse_opts(tmpcmdline)
14088 if "--digest" in myopts:
14089 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14090 # Reload the whole config from scratch so that the portdbapi internal
14091 # config is updated with new FEATURES.
14092 settings, trees, mtimedb = load_emerge_config(trees=trees)
14093 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14095 for myroot in trees:
14096 mysettings = trees[myroot]["vartree"].settings
14097 mysettings.unlock()
14098 adjust_config(myopts, mysettings)
14099 mysettings["PORTAGE_COUNTER_HASH"] = \
14100 trees[myroot]["vartree"].dbapi._counter_hash()
14101 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14103 del myroot, mysettings
14105 apply_priorities(settings)
14107 spinner = stdout_spinner()
14108 if "candy" in settings.features:
14109 spinner.update = spinner.update_scroll
14111 if "--quiet" not in myopts:
14112 portage.deprecated_profile_check()
14113 repo_name_check(trees)
14114 config_protect_check(trees)
14116 eclasses_overridden = {}
14117 for mytrees in trees.itervalues():
14118 mydb = mytrees["porttree"].dbapi
14119 # Freeze the portdbapi for performance (memoize all xmatch results).
14121 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14124 if eclasses_overridden and \
14125 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14126 prefix = bad(" * ")
14127 if len(eclasses_overridden) == 1:
14128 writemsg(prefix + "Overlay eclass overrides " + \
14129 "eclass from PORTDIR:\n", noiselevel=-1)
14131 writemsg(prefix + "Overlay eclasses override " + \
14132 "eclasses from PORTDIR:\n", noiselevel=-1)
14133 writemsg(prefix + "\n", noiselevel=-1)
14134 for eclass_name in sorted(eclasses_overridden):
14135 writemsg(prefix + " '%s/%s.eclass'\n" % \
14136 (eclasses_overridden[eclass_name], eclass_name),
14138 writemsg(prefix + "\n", noiselevel=-1)
14139 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14140 "because it will trigger invalidation of cached ebuild metadata " + \
14141 "that is distributed with the portage tree. If you must " + \
14142 "override eclasses from PORTDIR then you are advised to add " + \
14143 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14144 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14145 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14146 "you would like to disable this warning."
14147 from textwrap import wrap
14148 for line in wrap(msg, 72):
14149 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14151 if "moo" in myfiles:
14154 Larry loves Gentoo (""" + platform.system() + """)
14156 _______________________
14157 < Have you mooed today? >
14158 -----------------------
14168 ext = os.path.splitext(x)[1]
14169 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14170 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14173 root_config = trees[settings["ROOT"]]["root_config"]
14174 if myaction == "list-sets":
14175 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14179 # only expand sets for actions taking package arguments
14180 oldargs = myfiles[:]
14181 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14182 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14183 if retval != os.EX_OK:
14186 # Need to handle empty sets specially, otherwise emerge will react
14187 # with the help message for empty argument lists
14188 if oldargs and not myfiles:
14189 print "emerge: no targets left after set expansion"
14192 if ("--tree" in myopts) and ("--columns" in myopts):
14193 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14196 if ("--quiet" in myopts):
14197 spinner.update = spinner.update_quiet
14198 portage.util.noiselimit = -1
14200 # Always create packages if FEATURES=buildpkg
14201 # Imply --buildpkg if --buildpkgonly
14202 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14203 if "--buildpkg" not in myopts:
14204 myopts["--buildpkg"] = True
14206 # Also allow -S to invoke search action (-sS)
14207 if ("--searchdesc" in myopts):
14208 if myaction and myaction != "search":
14209 myfiles.append(myaction)
14210 if "--search" not in myopts:
14211 myopts["--search"] = True
14212 myaction = "search"
14214 # Always try and fetch binary packages if FEATURES=getbinpkg
14215 if ("getbinpkg" in settings.features):
14216 myopts["--getbinpkg"] = True
14218 if "--buildpkgonly" in myopts:
14219 # --buildpkgonly will not merge anything, so
14220 # it cancels all binary package options.
14221 for opt in ("--getbinpkg", "--getbinpkgonly",
14222 "--usepkg", "--usepkgonly"):
14223 myopts.pop(opt, None)
14225 if "--fetch-all-uri" in myopts:
14226 myopts["--fetchonly"] = True
14228 if "--skipfirst" in myopts and "--resume" not in myopts:
14229 myopts["--resume"] = True
14231 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14232 myopts["--usepkgonly"] = True
14234 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14235 myopts["--getbinpkg"] = True
14237 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14238 myopts["--usepkg"] = True
14240 # Also allow -K to apply --usepkg/-k
14241 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14242 myopts["--usepkg"] = True
14244 # Allow -p to remove --ask
14245 if ("--pretend" in myopts) and ("--ask" in myopts):
14246 print ">>> --pretend disables --ask... removing --ask from options."
14247 del myopts["--ask"]
14249 # forbid --ask when not in a terminal
14250 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14251 if ("--ask" in myopts) and (not sys.stdin.isatty()):
14252 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14256 if settings.get("PORTAGE_DEBUG", "") == "1":
14257 spinner.update = spinner.update_quiet
14259 if "python-trace" in settings.features:
14260 import portage.debug
14261 portage.debug.set_trace(True)
14263 if not ("--quiet" in myopts):
14264 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14265 spinner.update = spinner.update_basic
14267 if "--version" in myopts:
14268 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14269 settings.profile_path, settings["CHOST"],
14270 trees[settings["ROOT"]]["vartree"].dbapi)
14272 elif "--help" in myopts:
14273 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14276 if "--debug" in myopts:
14277 print "myaction", myaction
14278 print "myopts", myopts
14280 if not myaction and not myfiles and "--resume" not in myopts:
14281 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14284 pretend = "--pretend" in myopts
14285 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14286 buildpkgonly = "--buildpkgonly" in myopts
14288 # check if root user is the current user for the actions where emerge needs this
14289 if portage.secpass < 2:
14290 # We've already allowed "--version" and "--help" above.
14291 if "--pretend" not in myopts and myaction not in ("search","info"):
14292 need_superuser = not \
14294 (buildpkgonly and secpass >= 1) or \
14295 myaction in ("metadata", "regen") or \
14296 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
14297 if portage.secpass < 1 or \
14300 access_desc = "superuser"
14302 access_desc = "portage group"
14303 # Always show portage_group_warning() when only portage group
14304 # access is required but the user is not in the portage group.
14305 from portage.data import portage_group_warning
14306 if "--ask" in myopts:
14307 myopts["--pretend"] = True
14308 del myopts["--ask"]
14309 print ("%s access is required... " + \
14310 "adding --pretend to options.\n") % access_desc
14311 if portage.secpass < 1 and not need_superuser:
14312 portage_group_warning()
14314 sys.stderr.write(("emerge: %s access is " + \
14315 "required.\n\n") % access_desc)
14316 if portage.secpass < 1 and not need_superuser:
14317 portage_group_warning()
14320 disable_emergelog = False
14321 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
14323 disable_emergelog = True
14325 if myaction in ("search", "info"):
14326 disable_emergelog = True
14327 if disable_emergelog:
14328 """ Disable emergelog for everything except build or unmerge
14329 operations. This helps minimize parallel emerge.log entries that can
14330 confuse log parsers. We especially want it disabled during
14331 parallel-fetch, which uses --resume --fetchonly."""
14333 def emergelog(*pargs, **kargs):
14336 if not "--pretend" in myopts:
14337 emergelog(xterm_titles, "Started emerge on: "+\
14338 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
14341 myelogstr=" ".join(myopts)
14343 myelogstr+=" "+myaction
14345 myelogstr += " " + " ".join(oldargs)
14346 emergelog(xterm_titles, " *** emerge " + myelogstr)
14349 def emergeexitsig(signum, frame):
14350 signal.signal(signal.SIGINT, signal.SIG_IGN)
14351 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14352 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
14353 sys.exit(100+signum)
14354 signal.signal(signal.SIGINT, emergeexitsig)
14355 signal.signal(signal.SIGTERM, emergeexitsig)
14358 """This gets out final log message in before we quit."""
14359 if "--pretend" not in myopts:
14360 emergelog(xterm_titles, " *** terminating.")
14361 if "notitles" not in settings.features:
14363 portage.atexit_register(emergeexit)
14365 if myaction in ("config", "metadata", "regen", "sync"):
14366 if "--pretend" in myopts:
14367 sys.stderr.write(("emerge: The '%s' action does " + \
14368 "not support '--pretend'.\n") % myaction)
14371 if "sync" == myaction:
14372 return action_sync(settings, trees, mtimedb, myopts, myaction)
14373 elif "metadata" == myaction:
14374 action_metadata(settings, portdb, myopts)
14375 elif myaction=="regen":
14376 validate_ebuild_environment(trees)
14377 action_regen(settings, portdb, myopts.get("--jobs"),
14378 myopts.get("--load-average"))
14380 elif "config"==myaction:
14381 validate_ebuild_environment(trees)
14382 action_config(settings, trees, myopts, myfiles)
14385 elif "search"==myaction:
14386 validate_ebuild_environment(trees)
14387 action_search(trees[settings["ROOT"]]["root_config"],
14388 myopts, myfiles, spinner)
14389 elif myaction in ("clean", "unmerge") or \
14390 (myaction == "prune" and "--nodeps" in myopts):
14391 validate_ebuild_environment(trees)
14393 # Ensure atoms are valid before calling unmerge().
14394 # For backward compat, leading '=' is not required.
14396 if is_valid_package_atom(x) or \
14397 is_valid_package_atom("=" + x):
14400 msg.append("'%s' is not a valid package atom." % (x,))
14401 msg.append("Please check ebuild(5) for full details.")
14402 writemsg_level("".join("!!! %s\n" % line for line in msg),
14403 level=logging.ERROR, noiselevel=-1)
14406 # When given a list of atoms, unmerge
14407 # them in the order given.
14408 ordered = myaction == "unmerge"
14409 if 1 == unmerge(root_config, myopts, myaction, myfiles,
14410 mtimedb["ldpath"], ordered=ordered):
14411 if not (buildpkgonly or fetchonly or pretend):
14412 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14414 elif myaction in ("depclean", "info", "prune"):
14416 # Ensure atoms are valid before calling unmerge().
14417 vardb = trees[settings["ROOT"]]["vartree"].dbapi
14420 if is_valid_package_atom(x):
14422 valid_atoms.append(
14423 portage.dep_expand(x, mydb=vardb, settings=settings))
14424 except portage.exception.AmbiguousPackageName, e:
14425 msg = "The short ebuild name \"" + x + \
14426 "\" is ambiguous. Please specify " + \
14427 "one of the following " + \
14428 "fully-qualified ebuild names instead:"
14429 for line in textwrap.wrap(msg, 70):
14430 writemsg_level("!!! %s\n" % (line,),
14431 level=logging.ERROR, noiselevel=-1)
14433 writemsg_level(" %s\n" % colorize("INFORM", i),
14434 level=logging.ERROR, noiselevel=-1)
14435 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14439 msg.append("'%s' is not a valid package atom." % (x,))
14440 msg.append("Please check ebuild(5) for full details.")
14441 writemsg_level("".join("!!! %s\n" % line for line in msg),
14442 level=logging.ERROR, noiselevel=-1)
14445 if myaction == "info":
14446 return action_info(settings, trees, myopts, valid_atoms)
14448 validate_ebuild_environment(trees)
14449 action_depclean(settings, trees, mtimedb["ldpath"],
14450 myopts, myaction, valid_atoms, spinner)
14451 if not (buildpkgonly or fetchonly or pretend):
14452 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
14453 # "update", "system", or just process files:
14455 validate_ebuild_environment(trees)
14456 if "--pretend" not in myopts:
14457 display_news_notification(root_config, myopts)
14458 retval = action_build(settings, trees, mtimedb,
14459 myopts, myaction, myfiles, spinner)
14460 root_config = trees[settings["ROOT"]]["root_config"]
14461 post_emerge(root_config, myopts, mtimedb, retval)