2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
61 import cPickle as pickle
66 from cStringIO import StringIO
68 from StringIO import StringIO
70 class stdout_spinner(object):
72 "Gentoo Rocks ("+platform.system()+")",
73 "Thank you for using Gentoo. :)",
74 "Are you actually trying to read this?",
75 "How many times have you stared at this?",
76 "We are generating the cache right now",
77 "You are paying too much attention.",
78 "A theory is better than its explanation.",
79 "Phasers locked on target, Captain.",
80 "Thrashing is just virtual crashing.",
81 "To be is to program.",
82 "Real Users hate Real Programmers.",
83 "When all else fails, read the instructions.",
84 "Functionality breeds Contempt.",
85 "The future lies ahead.",
86 "3.1415926535897932384626433832795028841971694",
87 "Sometimes insanity is the only alternative.",
88 "Inaccuracy saves a world of explanation.",
91 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
95 self.update = self.update_twirl
96 self.scroll_sequence = self.scroll_msgs[
97 int(time.time() * 100) % len(self.scroll_msgs)]
99 self.min_display_latency = 0.05
101 def _return_early(self):
103 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104 each update* method should return without doing any output when this
107 cur_time = time.time()
108 if cur_time - self.last_update < self.min_display_latency:
110 self.last_update = cur_time
113 def update_basic(self):
114 self.spinpos = (self.spinpos + 1) % 500
115 if self._return_early():
117 if (self.spinpos % 100) == 0:
118 if self.spinpos == 0:
119 sys.stdout.write(". ")
121 sys.stdout.write(".")
124 def update_scroll(self):
125 if self._return_early():
127 if(self.spinpos >= len(self.scroll_sequence)):
128 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135 def update_twirl(self):
136 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137 if self._return_early():
139 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142 def update_quiet(self):
145 def userquery(prompt, responses=None, colours=None):
146 """Displays a prompt and a set of responses, then waits for a response
147 which is checked against the responses and the first to match is
148 returned. An empty response will match the first value in responses. The
149 input buffer is *not* cleared prior to the prompt!
152 responses: a List of Strings.
153 colours: a List of Functions taking and returning a String, used to
154 process the responses for display. Typically these will be functions
155 like red() but could be e.g. lambda x: "DisplayString".
156 If responses is omitted, defaults to ["Yes", "No"], [green, red].
157 If only colours is omitted, defaults to [bold, ...].
159 Returns a member of the List responses. (If called without optional
160 arguments, returns "Yes" or "No".)
161 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163 if responses is None:
164 responses = ["Yes", "No"]
166 create_color_func("PROMPT_CHOICE_DEFAULT"),
167 create_color_func("PROMPT_CHOICE_OTHER")
169 elif colours is None:
171 colours=(colours*len(responses))[:len(responses)]
175 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176 for key in responses:
177 # An empty response will match the first value in responses.
178 if response.upper()==key[:len(response)].upper():
180 print "Sorry, response '%s' not understood." % response,
181 except (EOFError, KeyboardInterrupt):
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen", "search",
189 "sync", "unmerge", "version",
192 "--ask", "--alphabetical",
193 "--buildpkg", "--buildpkgonly",
194 "--changelog", "--columns",
199 "--fetchonly", "--fetch-all-uri",
200 "--getbinpkg", "--getbinpkgonly",
201 "--help", "--ignore-default-opts",
204 "--newuse", "--nocolor",
205 "--nodeps", "--noreplace",
206 "--nospinner", "--oneshot",
207 "--onlydeps", "--pretend",
208 "--quiet", "--resume",
209 "--searchdesc", "--selective",
213 "--usepkg", "--usepkgonly",
220 "b":"--buildpkg", "B":"--buildpkgonly",
221 "c":"--clean", "C":"--unmerge",
222 "d":"--debug", "D":"--deep",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "k":"--usepkg", "K":"--usepkgonly",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps", "O":"--nodeps",
231 "p":"--pretend", "P":"--prune",
233 "s":"--search", "S":"--searchdesc",
236 "v":"--verbose", "V":"--version"
239 def emergelog(xterm_titles, mystr, short_msg=None):
240 if xterm_titles and short_msg:
241 if "HOSTNAME" in os.environ:
242 short_msg = os.environ["HOSTNAME"]+": "+short_msg
243 xtermTitle(short_msg)
245 file_path = "/var/log/emerge.log"
246 mylogfile = open(file_path, "a")
247 portage.util.apply_secpass_permissions(file_path,
248 uid=portage.portage_uid, gid=portage.portage_gid,
252 mylock = portage.locks.lockfile(mylogfile)
253 # seek because we may have gotten held up by the lock.
254 # if so, we may not be positioned at the end of the file.
256 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
260 portage.locks.unlockfile(mylock)
262 except (IOError,OSError,portage.exception.PortageException), e:
264 print >> sys.stderr, "emergelog():",e
266 def countdown(secs=5, doing="Starting"):
268 print ">>> Waiting",secs,"seconds before starting..."
269 print ">>> (Control-C to abort)...\n"+doing+" in: ",
273 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280 if isinstance(mysize, basestring):
282 if 0 != mysize % 1024:
283 # Always round up to the next kB so that it doesn't show 0 kB when
284 # some small file still needs to be fetched.
285 mysize += 1024 - mysize % 1024
286 mystr=str(mysize/1024)
290 mystr=mystr[:mycount]+","+mystr[mycount:]
294 def getgccversion(chost):
297 return: the current in-use gcc version
300 gcc_ver_command = 'gcc -dumpversion'
301 gcc_ver_prefix = 'gcc-'
303 gcc_not_found_error = red(
304 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305 "!!! to update the environment of this terminal and possibly\n" +
306 "!!! other terminals also.\n"
309 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313 mystatus, myoutput = commands.getstatusoutput(
314 chost + "-" + gcc_ver_command)
315 if mystatus == os.EX_OK:
316 return gcc_ver_prefix + myoutput
318 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319 if mystatus == os.EX_OK:
320 return gcc_ver_prefix + myoutput
322 portage.writemsg(gcc_not_found_error, noiselevel=-1)
323 return "[unavailable]"
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326 profilever = "unavailable"
328 realpath = os.path.realpath(profile)
329 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
330 if realpath.startswith(basepath):
331 profilever = realpath[1 + len(basepath):]
334 profilever = "!" + os.readlink(profile)
337 del realpath, basepath
340 libclist = vardb.match("virtual/libc")
341 libclist += vardb.match("virtual/glibc")
342 libclist = portage.util.unique_array(libclist)
344 xs=portage.catpkgsplit(x)
346 libcver+=","+"-".join(xs[1:])
348 libcver="-".join(xs[1:])
350 libcver="unavailable"
352 gccver = getgccversion(chost)
353 unameout=platform.release()+" "+platform.machine()
355 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357 def create_depgraph_params(myopts, myaction):
358 #configure emerge engine parameters
360 # self: include _this_ package regardless of if it is merged.
361 # selective: exclude the package if it is merged
362 # recurse: go into the dependencies
363 # deep: go into the dependencies of already merged packages
364 # empty: pretend nothing is merged
365 # complete: completely account for all known dependencies
366 # remove: build graph for use in removing packages
367 myparams = set(["recurse"])
369 if myaction == "remove":
370 myparams.add("remove")
371 myparams.add("complete")
374 if "--update" in myopts or \
375 "--newuse" in myopts or \
376 "--reinstall" in myopts or \
377 "--noreplace" in myopts:
378 myparams.add("selective")
379 if "--emptytree" in myopts:
380 myparams.add("empty")
381 myparams.discard("selective")
382 if "--nodeps" in myopts:
383 myparams.discard("recurse")
384 if "--deep" in myopts:
386 if "--complete-graph" in myopts:
387 myparams.add("complete")
390 # search functionality
391 class search(object):
402 def __init__(self, root_config, spinner, searchdesc,
403 verbose, usepkg, usepkgonly):
404 """Searches the available and installed packages for the supplied search key.
405 The list of available and installed packages is created at object instantiation.
406 This makes successive searches faster."""
407 self.settings = root_config.settings
408 self.vartree = root_config.trees["vartree"]
409 self.spinner = spinner
410 self.verbose = verbose
411 self.searchdesc = searchdesc
412 self.root_config = root_config
413 self.setconfig = root_config.setconfig
414 self.matches = {"pkg" : []}
419 self.portdb = fake_portdb
420 for attrib in ("aux_get", "cp_all",
421 "xmatch", "findname", "getFetchMap"):
422 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
426 portdb = root_config.trees["porttree"].dbapi
427 bindb = root_config.trees["bintree"].dbapi
428 vardb = root_config.trees["vartree"].dbapi
430 if not usepkgonly and portdb._have_root_eclass_dir:
431 self._dbs.append(portdb)
433 if (usepkg or usepkgonly) and bindb.cp_all():
434 self._dbs.append(bindb)
436 self._dbs.append(vardb)
437 self._portdb = portdb
442 cp_all.update(db.cp_all())
443 return list(sorted(cp_all))
445 def _aux_get(self, *args, **kwargs):
448 return db.aux_get(*args, **kwargs)
453 def _findname(self, *args, **kwargs):
455 if db is not self._portdb:
456 # We don't want findname to return anything
457 # unless it's an ebuild in a portage tree.
458 # Otherwise, it's already built and we don't
461 func = getattr(db, "findname", None)
463 value = func(*args, **kwargs)
468 def _getFetchMap(self, *args, **kwargs):
470 func = getattr(db, "getFetchMap", None)
472 value = func(*args, **kwargs)
477 def _visible(self, db, cpv, metadata):
478 installed = db is self.vartree.dbapi
479 built = installed or db is not self._portdb
482 pkg_type = "installed"
485 return visible(self.settings,
486 Package(type_name=pkg_type, root_config=self.root_config,
487 cpv=cpv, built=built, installed=installed, metadata=metadata))
489 def _xmatch(self, level, atom):
491 This method does not expand old-style virtuals because it
492 is restricted to returning matches for a single ${CATEGORY}/${PN}
493 and old-style virual matches unreliable for that when querying
494 multiple package databases. If necessary, old-style virtuals
495 can be performed on atoms prior to calling this method.
497 cp = portage.dep_getkey(atom)
498 if level == "match-all":
501 if hasattr(db, "xmatch"):
502 matches.update(db.xmatch(level, atom))
504 matches.update(db.match(atom))
505 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506 db._cpv_sort_ascending(result)
507 elif level == "match-visible":
510 if hasattr(db, "xmatch"):
511 matches.update(db.xmatch(level, atom))
513 db_keys = list(db._aux_cache_keys)
514 for cpv in db.match(atom):
515 metadata = izip(db_keys,
516 db.aux_get(cpv, db_keys))
517 if not self._visible(db, cpv, metadata):
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "bestmatch-visible":
525 if hasattr(db, "xmatch"):
526 cpv = db.xmatch("bestmatch-visible", atom)
527 if not cpv or portage.cpv_getkey(cpv) != cp:
529 if not result or cpv == portage.best([cpv, result]):
532 db_keys = Package.metadata_keys
533 # break out of this loop with highest visible
534 # match, checked in descending order
535 for cpv in reversed(db.match(atom)):
536 if portage.cpv_getkey(cpv) != cp:
538 metadata = izip(db_keys,
539 db.aux_get(cpv, db_keys))
540 if not self._visible(db, cpv, metadata):
542 if not result or cpv == portage.best([cpv, result]):
546 raise NotImplementedError(level)
549 def execute(self,searchkey):
550 """Performs the search for the supplied search key"""
552 self.searchkey=searchkey
553 self.packagematches = []
556 self.matches = {"pkg":[], "desc":[], "set":[]}
559 self.matches = {"pkg":[], "set":[]}
560 print "Searching... ",
563 if self.searchkey.startswith('%'):
565 self.searchkey = self.searchkey[1:]
566 if self.searchkey.startswith('@'):
568 self.searchkey = self.searchkey[1:]
570 self.searchre=re.compile(self.searchkey,re.I)
572 self.searchre=re.compile(re.escape(self.searchkey), re.I)
573 for package in self.portdb.cp_all():
574 self.spinner.update()
577 match_string = package[:]
579 match_string = package.split("/")[-1]
582 if self.searchre.search(match_string):
583 if not self.portdb.xmatch("match-visible", package):
585 self.matches["pkg"].append([package,masked])
586 elif self.searchdesc: # DESCRIPTION searching
587 full_package = self.portdb.xmatch("bestmatch-visible", package)
589 #no match found; we don't want to query description
590 full_package = portage.best(
591 self.portdb.xmatch("match-all", package))
597 full_desc = self.portdb.aux_get(
598 full_package, ["DESCRIPTION"])[0]
600 print "emerge: search: aux_get() failed, skipping"
602 if self.searchre.search(full_desc):
603 self.matches["desc"].append([full_package,masked])
605 self.sdict = self.setconfig.getSets()
606 for setname in self.sdict:
607 self.spinner.update()
609 match_string = setname
611 match_string = setname.split("/")[-1]
613 if self.searchre.search(match_string):
614 self.matches["set"].append([setname, False])
615 elif self.searchdesc:
616 if self.searchre.search(
617 self.sdict[setname].getMetadata("DESCRIPTION")):
618 self.matches["set"].append([setname, False])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
656 print green("*")+" "+white(match)
657 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
661 desc, homepage, license = self.portdb.aux_get(
662 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664 print "emerge: search: aux_get() failed, skipping"
667 print green("*")+" "+white(match)+" "+red("[ Masked ]")
669 print green("*")+" "+white(match)
670 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
674 mycat = match.split("/")[0]
675 mypkg = match.split("/")[1]
676 mycpv = match + "-" + myversion
677 myebuild = self.portdb.findname(mycpv)
679 pkgdir = os.path.dirname(myebuild)
680 from portage import manifest
681 mf = manifest.Manifest(
682 pkgdir, self.settings["DISTDIR"])
684 uri_map = self.portdb.getFetchMap(mycpv)
685 except portage.exception.InvalidDependString, e:
686 file_size_str = "Unknown (%s)" % (e,)
690 mysum[0] = mf.getDistfilesSize(uri_map)
692 file_size_str = "Unknown (missing " + \
693 "digest for %s)" % (e,)
698 if db is not vardb and \
699 db.cpv_exists(mycpv):
701 if not myebuild and hasattr(db, "bintree"):
702 myebuild = db.bintree.getname(mycpv)
704 mysum[0] = os.stat(myebuild).st_size
709 if myebuild and file_size_str is None:
710 mystr = str(mysum[0] / 1024)
714 mystr = mystr[:mycount] + "," + mystr[mycount:]
715 file_size_str = mystr + " kB"
719 print " ", darkgreen("Latest version available:"),myversion
720 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
723 (darkgreen("Size of files:"), file_size_str)
724 print " ", darkgreen("Homepage:")+" ",homepage
725 print " ", darkgreen("Description:")+" ",desc
726 print " ", darkgreen("License:")+" ",license
731 def getInstallationStatus(self,package):
732 installed_package = self.vartree.dep_bestmatch(package)
734 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736 result = darkgreen("Latest version installed:")+" "+version
738 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741 def getVersion(self,full_package,detail):
742 if len(full_package) > 1:
743 package_parts = portage.catpkgsplit(full_package)
744 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745 result = package_parts[2]+ "-" + package_parts[3]
747 result = package_parts[2]
752 class RootConfig(object):
753 """This is used internally by depgraph to track information about a
757 "ebuild" : "porttree",
758 "binary" : "bintree",
759 "installed" : "vartree"
763 for k, v in pkg_tree_map.iteritems():
766 def __init__(self, settings, trees, setconfig):
768 self.settings = settings
769 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770 self.root = self.settings["ROOT"]
771 self.setconfig = setconfig
772 self.sets = self.setconfig.getSets()
773 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775 def create_world_atom(pkg, args_set, root_config):
776 """Create a new atom for the world file if one does not exist. If the
777 argument atom is precise enough to identify a specific slot then a slot
778 atom will be returned. Atoms that are in the system set may also be stored
779 in world since system atoms can only match one slot while world atoms can
780 be greedy with respect to slots. Unslotted system packages will not be
783 arg_atom = args_set.findAtomForPackage(pkg)
786 cp = portage.dep_getkey(arg_atom)
788 sets = root_config.sets
789 portdb = root_config.trees["porttree"].dbapi
790 vardb = root_config.trees["vartree"].dbapi
791 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792 for cpv in portdb.match(cp))
793 slotted = len(available_slots) > 1 or \
794 (len(available_slots) == 1 and "0" not in available_slots)
796 # check the vdb in case this is multislot
797 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798 for cpv in vardb.match(cp))
799 slotted = len(available_slots) > 1 or \
800 (len(available_slots) == 1 and "0" not in available_slots)
801 if slotted and arg_atom != cp:
802 # If the user gave a specific atom, store it as a
803 # slot atom in the world file.
804 slot_atom = pkg.slot_atom
806 # For USE=multislot, there are a couple of cases to
809 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810 # unknown value, so just record an unslotted atom.
812 # 2) SLOT comes from an installed package and there is no
813 # matching SLOT in the portage tree.
815 # Make sure that the slot atom is available in either the
816 # portdb or the vardb, since otherwise the user certainly
817 # doesn't want the SLOT atom recorded in the world file
818 # (case 1 above). If it's only available in the vardb,
819 # the user may be trying to prevent a USE=multislot
820 # package from being removed by --depclean (case 2 above).
823 if not portdb.match(slot_atom):
824 # SLOT seems to come from an installed multislot package
826 # If there is no installed package matching the SLOT atom,
827 # it probably changed SLOT spontaneously due to USE=multislot,
828 # so just record an unslotted atom.
829 if vardb.match(slot_atom):
830 # Now verify that the argument is precise
831 # enough to identify a specific slot.
832 matches = mydb.match(arg_atom)
833 matched_slots = set()
835 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836 if len(matched_slots) == 1:
837 new_world_atom = slot_atom
839 if new_world_atom == sets["world"].findAtomForPackage(pkg):
840 # Both atoms would be identical, so there's nothing to add.
843 # Unlike world atoms, system atoms are not greedy for slots, so they
844 # can't be safely excluded from world if they are slotted.
845 system_atom = sets["system"].findAtomForPackage(pkg)
847 if not portage.dep_getkey(system_atom).startswith("virtual/"):
849 # System virtuals aren't safe to exclude from world since they can
850 # match multiple old-style virtuals but only one of them will be
851 # pulled in by update or depclean.
852 providers = portdb.mysettings.getvirtuals().get(
853 portage.dep_getkey(system_atom))
854 if providers and len(providers) == 1 and providers[0] == cp:
856 return new_world_atom
858 def filter_iuse_defaults(iuse):
860 if flag.startswith("+") or flag.startswith("-"):
865 class SlotObject(object):
866 __slots__ = ("__weakref__",)
868 def __init__(self, **kwargs):
869 classes = [self.__class__]
874 classes.extend(c.__bases__)
875 slots = getattr(c, "__slots__", None)
879 myvalue = kwargs.get(myattr, None)
880 setattr(self, myattr, myvalue)
884 Create a new instance and copy all attributes
885 defined from __slots__ (including those from
888 obj = self.__class__()
890 classes = [self.__class__]
895 classes.extend(c.__bases__)
896 slots = getattr(c, "__slots__", None)
900 setattr(obj, myattr, getattr(self, myattr))
904 class AbstractDepPriority(SlotObject):
905 __slots__ = ("buildtime", "runtime", "runtime_post")
907 def __lt__(self, other):
908 return self.__int__() < other
910 def __le__(self, other):
911 return self.__int__() <= other
913 def __eq__(self, other):
914 return self.__int__() == other
916 def __ne__(self, other):
917 return self.__int__() != other
919 def __gt__(self, other):
920 return self.__int__() > other
922 def __ge__(self, other):
923 return self.__int__() >= other
927 return copy.copy(self)
929 class DepPriority(AbstractDepPriority):
931 __slots__ = ("satisfied", "optional", "rebuild")
943 if self.runtime_post:
944 return "runtime_post"
947 class BlockerDepPriority(DepPriority):
955 BlockerDepPriority.instance = BlockerDepPriority()
957 class UnmergeDepPriority(AbstractDepPriority):
958 __slots__ = ("optional", "satisfied",)
960 Combination of properties Priority Category
965 (none of the above) -2 SOFT
975 if self.runtime_post:
982 myvalue = self.__int__()
983 if myvalue > self.SOFT:
987 class DepPriorityNormalRange(object):
989 DepPriority properties Index Category
993 runtime_post 2 MEDIUM_SOFT
995 (none of the above) 0 NONE
1003 def _ignore_optional(cls, priority):
1004 if priority.__class__ is not DepPriority:
1006 return bool(priority.optional)
1009 def _ignore_runtime_post(cls, priority):
1010 if priority.__class__ is not DepPriority:
1012 return bool(priority.optional or priority.runtime_post)
1015 def _ignore_runtime(cls, priority):
1016 if priority.__class__ is not DepPriority:
1018 return not priority.buildtime
1020 ignore_medium = _ignore_runtime
1021 ignore_medium_soft = _ignore_runtime_post
1022 ignore_soft = _ignore_optional
1024 DepPriorityNormalRange.ignore_priority = (
1026 DepPriorityNormalRange._ignore_optional,
1027 DepPriorityNormalRange._ignore_runtime_post,
1028 DepPriorityNormalRange._ignore_runtime
1031 class DepPrioritySatisfiedRange(object):
1033 DepPriority Index Category
1035 not satisfied and buildtime HARD
1036 not satisfied and runtime 7 MEDIUM
1037 not satisfied and runtime_post 6 MEDIUM_SOFT
1038 satisfied and buildtime and rebuild 5 SOFT
1039 satisfied and buildtime 4 SOFT
1040 satisfied and runtime 3 SOFT
1041 satisfied and runtime_post 2 SOFT
1043 (none of the above) 0 NONE
1051 def _ignore_optional(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional)
1057 def _ignore_satisfied_runtime_post(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 if priority.optional:
1062 if not priority.satisfied:
1064 return bool(priority.runtime_post)
1067 def _ignore_satisfied_runtime(cls, priority):
1068 if priority.__class__ is not DepPriority:
1070 if priority.optional:
1072 if not priority.satisfied:
1074 return not priority.buildtime
1077 def _ignore_satisfied_buildtime(cls, priority):
1078 if priority.__class__ is not DepPriority:
1080 if priority.optional:
1082 if not priority.satisfied:
1084 if priority.buildtime:
1085 return not priority.rebuild
1089 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090 if priority.__class__ is not DepPriority:
1092 if priority.optional:
1094 return bool(priority.satisfied)
1097 def _ignore_runtime_post(cls, priority):
1098 if priority.__class__ is not DepPriority:
1100 return bool(priority.optional or \
1101 priority.satisfied or \
1102 priority.runtime_post)
1105 def _ignore_runtime(cls, priority):
1106 if priority.__class__ is not DepPriority:
1108 return bool(priority.satisfied or \
1109 not priority.buildtime)
1111 ignore_medium = _ignore_runtime
1112 ignore_medium_soft = _ignore_runtime_post
1113 ignore_soft = _ignore_satisfied_buildtime_rebuild
1115 DepPrioritySatisfiedRange.ignore_priority = (
1117 DepPrioritySatisfiedRange._ignore_optional,
1118 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122 DepPrioritySatisfiedRange._ignore_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_runtime
1126 def _find_deep_system_runtime_deps(graph):
1127 deep_system_deps = set()
1130 if not isinstance(node, Package) or \
1131 node.operation == 'uninstall':
1133 if node.root_config.sets['system'].findAtomForPackage(node):
1134 node_stack.append(node)
1136 def ignore_priority(priority):
1138 Ignore non-runtime priorities.
1140 if isinstance(priority, DepPriority) and \
1141 (priority.runtime or priority.runtime_post):
1146 node = node_stack.pop()
1147 if node in deep_system_deps:
1149 deep_system_deps.add(node)
1150 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151 if not isinstance(child, Package) or \
1152 child.operation == 'uninstall':
1154 node_stack.append(child)
1156 return deep_system_deps
1158 class FakeVartree(portage.vartree):
1159 """This is implements an in-memory copy of a vartree instance that provides
1160 all the interfaces required for use by the depgraph. The vardb is locked
1161 during the constructor call just long enough to read a copy of the
1162 installed package information. This allows the depgraph to do it's
1163 dependency calculations without holding a lock on the vardb. It also
1164 allows things like vardb global updates to be done in memory so that the
1165 user doesn't necessarily need write access to the vardb in cases where
1166 global updates are necessary (updates are performed when necessary if there
1167 is not a matching ebuild in the tree)."""
1168 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169 self._root_config = root_config
1170 if pkg_cache is None:
1172 real_vartree = root_config.trees["vartree"]
1173 portdb = root_config.trees["porttree"].dbapi
1174 self.root = real_vartree.root
1175 self.settings = real_vartree.settings
1176 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177 if "_mtime_" not in mykeys:
1178 mykeys.append("_mtime_")
1179 self._db_keys = mykeys
1180 self._pkg_cache = pkg_cache
1181 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184 # At least the parent needs to exist for the lock file.
1185 portage.util.ensure_dirs(vdb_path)
1186 except portage.exception.PortageException:
1190 if acquire_lock and os.access(vdb_path, os.W_OK):
1191 vdb_lock = portage.locks.lockdir(vdb_path)
1192 real_dbapi = real_vartree.dbapi
1194 for cpv in real_dbapi.cpv_all():
1195 cache_key = ("installed", self.root, cpv, "nomerge")
1196 pkg = self._pkg_cache.get(cache_key)
1198 metadata = pkg.metadata
1200 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201 myslot = metadata["SLOT"]
1202 mycp = portage.dep_getkey(cpv)
1203 myslot_atom = "%s:%s" % (mycp, myslot)
1205 mycounter = long(metadata["COUNTER"])
1208 metadata["COUNTER"] = str(mycounter)
1209 other_counter = slot_counters.get(myslot_atom, None)
1210 if other_counter is not None:
1211 if other_counter > mycounter:
1213 slot_counters[myslot_atom] = mycounter
1215 pkg = Package(built=True, cpv=cpv,
1216 installed=True, metadata=metadata,
1217 root_config=root_config, type_name="installed")
1218 self._pkg_cache[pkg] = pkg
1219 self.dbapi.cpv_inject(pkg)
1220 real_dbapi.flush_cache()
1223 portage.locks.unlockdir(vdb_lock)
1224 # Populate the old-style virtuals using the cached values.
1225 if not self.settings.treeVirtuals:
1226 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227 portage.getCPFromCPV, self.get_all_provides())
1229 # Intialize variables needed for lazy cache pulls of the live ebuild
1230 # metadata. This ensures that the vardb lock is released ASAP, without
1231 # being delayed in case cache generation is triggered.
1232 self._aux_get = self.dbapi.aux_get
1233 self.dbapi.aux_get = self._aux_get_wrapper
1234 self._match = self.dbapi.match
1235 self.dbapi.match = self._match_wrapper
1236 self._aux_get_history = set()
1237 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238 self._portdb = portdb
1239 self._global_updates = None
1241 def _match_wrapper(self, cpv, use_cache=1):
1243 Make sure the metadata in Package instances gets updated for any
1244 cpv that is returned from a match() call, since the metadata can
1245 be accessed directly from the Package instance instead of via
1248 matches = self._match(cpv, use_cache=use_cache)
1250 if cpv in self._aux_get_history:
1252 self._aux_get_wrapper(cpv, [])
1255 def _aux_get_wrapper(self, pkg, wants):
1256 if pkg in self._aux_get_history:
1257 return self._aux_get(pkg, wants)
1258 self._aux_get_history.add(pkg)
1260 # Use the live ebuild metadata if possible.
1261 live_metadata = dict(izip(self._portdb_keys,
1262 self._portdb.aux_get(pkg, self._portdb_keys)))
1263 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265 self.dbapi.aux_update(pkg, live_metadata)
1266 except (KeyError, portage.exception.PortageException):
1267 if self._global_updates is None:
1268 self._global_updates = \
1269 grab_global_updates(self._portdb.porttree_root)
1270 perform_global_updates(
1271 pkg, self.dbapi, self._global_updates)
1272 return self._aux_get(pkg, wants)
1274 def sync(self, acquire_lock=1):
1276 Call this method to synchronize state with the real vardb
1277 after one or more packages may have been installed or
1280 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282 # At least the parent needs to exist for the lock file.
1283 portage.util.ensure_dirs(vdb_path)
1284 except portage.exception.PortageException:
1288 if acquire_lock and os.access(vdb_path, os.W_OK):
1289 vdb_lock = portage.locks.lockdir(vdb_path)
1293 portage.locks.unlockdir(vdb_lock)
1297 real_vardb = self._root_config.trees["vartree"].dbapi
1298 current_cpv_set = frozenset(real_vardb.cpv_all())
1299 pkg_vardb = self.dbapi
1300 aux_get_history = self._aux_get_history
1302 # Remove any packages that have been uninstalled.
1303 for pkg in list(pkg_vardb):
1304 if pkg.cpv not in current_cpv_set:
1305 pkg_vardb.cpv_remove(pkg)
1306 aux_get_history.discard(pkg.cpv)
1308 # Validate counters and timestamps.
1311 validation_keys = ["COUNTER", "_mtime_"]
1312 for cpv in current_cpv_set:
1314 pkg_hash_key = ("installed", root, cpv, "nomerge")
1315 pkg = pkg_vardb.get(pkg_hash_key)
1317 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319 counter = long(counter)
1323 if counter != pkg.counter or \
1325 pkg_vardb.cpv_remove(pkg)
1326 aux_get_history.discard(pkg.cpv)
1330 pkg = self._pkg(cpv)
1332 other_counter = slot_counters.get(pkg.slot_atom)
1333 if other_counter is not None:
1334 if other_counter > pkg.counter:
1337 slot_counters[pkg.slot_atom] = pkg.counter
1338 pkg_vardb.cpv_inject(pkg)
1340 real_vardb.flush_cache()
1342 def _pkg(self, cpv):
1343 root_config = self._root_config
1344 real_vardb = root_config.trees["vartree"].dbapi
1345 pkg = Package(cpv=cpv, installed=True,
1346 metadata=izip(self._db_keys,
1347 real_vardb.aux_get(cpv, self._db_keys)),
1348 root_config=root_config,
1349 type_name="installed")
1352 mycounter = long(pkg.metadata["COUNTER"])
1355 pkg.metadata["COUNTER"] = str(mycounter)
1359 def grab_global_updates(portdir):
1360 from portage.update import grab_updates, parse_updates
1361 updpath = os.path.join(portdir, "profiles", "updates")
1363 rawupdates = grab_updates(updpath)
1364 except portage.exception.DirectoryNotFound:
1367 for mykey, mystat, mycontent in rawupdates:
1368 commands, errors = parse_updates(mycontent)
1369 upd_commands.extend(commands)
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373 from portage.update import update_dbentries
1374 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376 updates = update_dbentries(mycommands, aux_dict)
1378 mydb.aux_update(mycpv, updates)
1380 def visible(pkgsettings, pkg):
1382 Check if a package is visible. This can raise an InvalidDependString
1383 exception if LICENSE is invalid.
1384 TODO: optionally generate a list of masking reasons
1386 @returns: True if the package is visible, False otherwise.
1388 if not pkg.metadata["SLOT"]:
1390 if not pkg.installed:
1391 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1393 eapi = pkg.metadata["EAPI"]
1394 if not portage.eapi_is_supported(eapi):
1396 if not pkg.installed:
1397 if portage._eapi_is_deprecated(eapi):
1399 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408 except portage.exception.InvalidDependString:
1412 def get_masking_status(pkg, pkgsettings, root_config):
1414 mreasons = portage.getmaskingstatus(
1415 pkg, settings=pkgsettings,
1416 portdb=root_config.trees["porttree"].dbapi)
1418 if not pkg.installed:
1419 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420 mreasons.append("CHOST: %s" % \
1421 pkg.metadata["CHOST"])
1423 if not pkg.metadata["SLOT"]:
1424 mreasons.append("invalid: SLOT is undefined")
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429 db, pkg_type, built, installed, db_keys):
1432 metadata = dict(izip(db_keys,
1433 db.aux_get(cpv, db_keys)))
1436 if metadata and not built:
1437 pkgsettings.setcpv(cpv, mydb=metadata)
1438 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440 if metadata is None:
1441 mreasons = ["corruption"]
1443 pkg = Package(type_name=pkg_type, root_config=root_config,
1444 cpv=cpv, built=built, installed=installed, metadata=metadata)
1445 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446 return metadata, mreasons
1448 def show_masked_packages(masked_packages):
1449 shown_licenses = set()
1450 shown_comments = set()
1451 # Maybe there is both an ebuild and a binary. Only
1452 # show one of them to avoid redundant appearance.
1454 have_eapi_mask = False
1455 for (root_config, pkgsettings, cpv,
1456 metadata, mreasons) in masked_packages:
1457 if cpv in shown_cpvs:
1460 comment, filename = None, None
1461 if "package.mask" in mreasons:
1462 comment, filename = \
1463 portage.getmaskingreason(
1464 cpv, metadata=metadata,
1465 settings=pkgsettings,
1466 portdb=root_config.trees["porttree"].dbapi,
1467 return_location=True)
1468 missing_licenses = []
1470 if not portage.eapi_is_supported(metadata["EAPI"]):
1471 have_eapi_mask = True
1473 missing_licenses = \
1474 pkgsettings._getMissingLicenses(
1476 except portage.exception.InvalidDependString:
1477 # This will have already been reported
1478 # above via mreasons.
1481 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482 if comment and comment not in shown_comments:
1485 shown_comments.add(comment)
1486 portdb = root_config.trees["porttree"].dbapi
1487 for l in missing_licenses:
1488 l_path = portdb.findLicensePath(l)
1489 if l in shown_licenses:
1491 msg = ("A copy of the '%s' license" + \
1492 " is located at '%s'.") % (l, l_path)
1495 shown_licenses.add(l)
1496 return have_eapi_mask
1498 class Task(SlotObject):
1499 __slots__ = ("_hash_key", "_hash_value")
1501 def _get_hash_key(self):
1502 hash_key = getattr(self, "_hash_key", None)
1503 if hash_key is None:
1504 raise NotImplementedError(self)
1507 def __eq__(self, other):
1508 return self._get_hash_key() == other
1510 def __ne__(self, other):
1511 return self._get_hash_key() != other
1514 hash_value = getattr(self, "_hash_value", None)
1515 if hash_value is None:
1516 self._hash_value = hash(self._get_hash_key())
1517 return self._hash_value
1520 return len(self._get_hash_key())
1522 def __getitem__(self, key):
1523 return self._get_hash_key()[key]
1526 return iter(self._get_hash_key())
1528 def __contains__(self, key):
1529 return key in self._get_hash_key()
1532 return str(self._get_hash_key())
1534 class Blocker(Task):
1536 __hash__ = Task.__hash__
1537 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1539 def __init__(self, **kwargs):
1540 Task.__init__(self, **kwargs)
1541 self.cp = portage.dep_getkey(self.atom)
1543 def _get_hash_key(self):
1544 hash_key = getattr(self, "_hash_key", None)
1545 if hash_key is None:
1547 ("blocks", self.root, self.atom, self.eapi)
1548 return self._hash_key
1550 class Package(Task):
1552 __hash__ = Task.__hash__
1553 __slots__ = ("built", "cpv", "depth",
1554 "installed", "metadata", "onlydeps", "operation",
1555 "root_config", "type_name",
1556 "category", "counter", "cp", "cpv_split",
1557 "inherited", "iuse", "mtime",
1558 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1561 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562 "INHERITED", "IUSE", "KEYWORDS",
1563 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1566 def __init__(self, **kwargs):
1567 Task.__init__(self, **kwargs)
1568 self.root = self.root_config.root
1569 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570 self.cp = portage.cpv_getkey(self.cpv)
1573 # Avoid an InvalidAtom exception when creating slot_atom.
1574 # This package instance will be masked due to empty SLOT.
1576 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1577 self.category, self.pf = portage.catsplit(self.cpv)
1578 self.cpv_split = portage.catpkgsplit(self.cpv)
1579 self.pv_split = self.cpv_split[1:]
1583 __slots__ = ("__weakref__", "enabled")
1585 def __init__(self, use):
1586 self.enabled = frozenset(use)
1588 class _iuse(object):
1590 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1592 def __init__(self, tokens, iuse_implicit):
1593 self.tokens = tuple(tokens)
1594 self.iuse_implicit = iuse_implicit
1601 enabled.append(x[1:])
1603 disabled.append(x[1:])
1606 self.enabled = frozenset(enabled)
1607 self.disabled = frozenset(disabled)
1608 self.all = frozenset(chain(enabled, disabled, other))
1610 def __getattribute__(self, name):
1613 return object.__getattribute__(self, "regex")
1614 except AttributeError:
1615 all = object.__getattribute__(self, "all")
1616 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1617 # Escape anything except ".*" which is supposed
1618 # to pass through from _get_implicit_iuse()
1619 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1620 regex = "^(%s)$" % "|".join(regex)
1621 regex = regex.replace("\\.\\*", ".*")
1622 self.regex = re.compile(regex)
1623 return object.__getattribute__(self, name)
1625 def _get_hash_key(self):
1626 hash_key = getattr(self, "_hash_key", None)
1627 if hash_key is None:
1628 if self.operation is None:
1629 self.operation = "merge"
1630 if self.onlydeps or self.installed:
1631 self.operation = "nomerge"
1633 (self.type_name, self.root, self.cpv, self.operation)
1634 return self._hash_key
1636 def __lt__(self, other):
1637 if other.cp != self.cp:
1639 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1643 def __le__(self, other):
1644 if other.cp != self.cp:
1646 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1650 def __gt__(self, other):
1651 if other.cp != self.cp:
1653 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1657 def __ge__(self, other):
1658 if other.cp != self.cp:
1660 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1664 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1665 if not x.startswith("UNUSED_"))
1666 _all_metadata_keys.discard("CDEPEND")
1667 _all_metadata_keys.update(Package.metadata_keys)
1669 from portage.cache.mappings import slot_dict_class
1670 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1672 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1674 Detect metadata updates and synchronize Package attributes.
1677 __slots__ = ("_pkg",)
1678 _wrapped_keys = frozenset(
1679 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1681 def __init__(self, pkg, metadata):
1682 _PackageMetadataWrapperBase.__init__(self)
1684 self.update(metadata)
1686 def __setitem__(self, k, v):
1687 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1688 if k in self._wrapped_keys:
1689 getattr(self, "_set_" + k.lower())(k, v)
1691 def _set_inherited(self, k, v):
1692 if isinstance(v, basestring):
1693 v = frozenset(v.split())
1694 self._pkg.inherited = v
1696 def _set_iuse(self, k, v):
1697 self._pkg.iuse = self._pkg._iuse(
1698 v.split(), self._pkg.root_config.iuse_implicit)
1700 def _set_slot(self, k, v):
1703 def _set_use(self, k, v):
1704 self._pkg.use = self._pkg._use(v.split())
1706 def _set_counter(self, k, v):
1707 if isinstance(v, basestring):
1712 self._pkg.counter = v
1714 def _set__mtime_(self, k, v):
1715 if isinstance(v, basestring):
1722 class EbuildFetchonly(SlotObject):
1724 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1727 settings = self.settings
1729 portdb = pkg.root_config.trees["porttree"].dbapi
1730 ebuild_path = portdb.findname(pkg.cpv)
1731 settings.setcpv(pkg)
1732 debug = settings.get("PORTAGE_DEBUG") == "1"
1733 use_cache = 1 # always true
1734 portage.doebuild_environment(ebuild_path, "fetch",
1735 settings["ROOT"], settings, debug, use_cache, portdb)
1736 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1739 rval = self._execute_with_builddir()
1741 rval = portage.doebuild(ebuild_path, "fetch",
1742 settings["ROOT"], settings, debug=debug,
1743 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1744 mydbapi=portdb, tree="porttree")
1746 if rval != os.EX_OK:
1747 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1748 eerror(msg, phase="unpack", key=pkg.cpv)
1752 def _execute_with_builddir(self):
1753 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1754 # ensuring sane $PWD (bug #239560) and storing elog
1755 # messages. Use a private temp directory, in order
1756 # to avoid locking the main one.
1757 settings = self.settings
1758 global_tmpdir = settings["PORTAGE_TMPDIR"]
1759 from tempfile import mkdtemp
1761 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1763 if e.errno != portage.exception.PermissionDenied.errno:
1765 raise portage.exception.PermissionDenied(global_tmpdir)
1766 settings["PORTAGE_TMPDIR"] = private_tmpdir
1767 settings.backup_changes("PORTAGE_TMPDIR")
1769 retval = self._execute()
1771 settings["PORTAGE_TMPDIR"] = global_tmpdir
1772 settings.backup_changes("PORTAGE_TMPDIR")
1773 shutil.rmtree(private_tmpdir)
1777 settings = self.settings
1779 root_config = pkg.root_config
1780 portdb = root_config.trees["porttree"].dbapi
1781 ebuild_path = portdb.findname(pkg.cpv)
1782 debug = settings.get("PORTAGE_DEBUG") == "1"
1783 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1785 retval = portage.doebuild(ebuild_path, "fetch",
1786 self.settings["ROOT"], self.settings, debug=debug,
1787 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1788 mydbapi=portdb, tree="porttree")
1790 if retval != os.EX_OK:
1791 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1792 eerror(msg, phase="unpack", key=pkg.cpv)
1794 portage.elog.elog_process(self.pkg.cpv, self.settings)
1797 class PollConstants(object):
1800 Provides POLL* constants that are equivalent to those from the
1801 select module, for use by PollSelectAdapter.
1804 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1807 locals()[k] = getattr(select, k, v)
1811 class AsynchronousTask(SlotObject):
1813 Subclasses override _wait() and _poll() so that calls
1814 to public methods can be wrapped for implementing
1815 hooks such as exit listener notification.
1817 Sublasses should call self.wait() to notify exit listeners after
1818 the task is complete and self.returncode has been set.
1821 __slots__ = ("background", "cancelled", "returncode") + \
1822 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1826 Start an asynchronous task and then return as soon as possible.
1832 raise NotImplementedError(self)
1835 return self.returncode is None
1842 return self.returncode
1845 if self.returncode is None:
1848 return self.returncode
1851 return self.returncode
1854 self.cancelled = True
1857 def addStartListener(self, f):
1859 The function will be called with one argument, a reference to self.
1861 if self._start_listeners is None:
1862 self._start_listeners = []
1863 self._start_listeners.append(f)
1865 def removeStartListener(self, f):
1866 if self._start_listeners is None:
1868 self._start_listeners.remove(f)
1870 def _start_hook(self):
1871 if self._start_listeners is not None:
1872 start_listeners = self._start_listeners
1873 self._start_listeners = None
1875 for f in start_listeners:
1878 def addExitListener(self, f):
1880 The function will be called with one argument, a reference to self.
1882 if self._exit_listeners is None:
1883 self._exit_listeners = []
1884 self._exit_listeners.append(f)
1886 def removeExitListener(self, f):
1887 if self._exit_listeners is None:
1888 if self._exit_listener_stack is not None:
1889 self._exit_listener_stack.remove(f)
1891 self._exit_listeners.remove(f)
1893 def _wait_hook(self):
1895 Call this method after the task completes, just before returning
1896 the returncode from wait() or poll(). This hook is
1897 used to trigger exit listeners when the returncode first
1900 if self.returncode is not None and \
1901 self._exit_listeners is not None:
1903 # This prevents recursion, in case one of the
1904 # exit handlers triggers this method again by
1905 # calling wait(). Use a stack that gives
1906 # removeExitListener() an opportunity to consume
1907 # listeners from the stack, before they can get
1908 # called below. This is necessary because a call
1909 # to one exit listener may result in a call to
1910 # removeExitListener() for another listener on
1911 # the stack. That listener needs to be removed
1912 # from the stack since it would be inconsistent
1913 # to call it after it has been been passed into
1914 # removeExitListener().
1915 self._exit_listener_stack = self._exit_listeners
1916 self._exit_listeners = None
1918 self._exit_listener_stack.reverse()
1919 while self._exit_listener_stack:
1920 self._exit_listener_stack.pop()(self)
1922 class AbstractPollTask(AsynchronousTask):
1924 __slots__ = ("scheduler",) + \
1928 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1929 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1932 def _unregister(self):
1933 raise NotImplementedError(self)
1935 def _unregister_if_appropriate(self, event):
1936 if self._registered:
1937 if event & self._exceptional_events:
1940 elif event & PollConstants.POLLHUP:
1944 class PipeReader(AbstractPollTask):
1947 Reads output from one or more files and saves it in memory,
1948 for retrieval via the getvalue() method. This is driven by
1949 the scheduler's poll() loop, so it runs entirely within the
1953 __slots__ = ("input_files",) + \
1954 ("_read_data", "_reg_ids")
1957 self._reg_ids = set()
1958 self._read_data = []
1959 for k, f in self.input_files.iteritems():
1960 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1961 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1962 self._reg_ids.add(self.scheduler.register(f.fileno(),
1963 self._registered_events, self._output_handler))
1964 self._registered = True
1967 return self._registered
1970 if self.returncode is None:
1972 self.cancelled = True
1976 if self.returncode is not None:
1977 return self.returncode
1979 if self._registered:
1980 self.scheduler.schedule(self._reg_ids)
1983 self.returncode = os.EX_OK
1984 return self.returncode
1987 """Retrieve the entire contents"""
1988 if sys.hexversion >= 0x3000000:
1989 return bytes().join(self._read_data)
1990 return "".join(self._read_data)
1993 """Free the memory buffer."""
1994 self._read_data = None
1996 def _output_handler(self, fd, event):
1998 if event & PollConstants.POLLIN:
2000 for f in self.input_files.itervalues():
2001 if fd == f.fileno():
2004 buf = array.array('B')
2006 buf.fromfile(f, self._bufsize)
2011 self._read_data.append(buf.tostring())
2016 self._unregister_if_appropriate(event)
2017 return self._registered
2019 def _unregister(self):
2021 Unregister from the scheduler and close open files.
2024 self._registered = False
2026 if self._reg_ids is not None:
2027 for reg_id in self._reg_ids:
2028 self.scheduler.unregister(reg_id)
2029 self._reg_ids = None
2031 if self.input_files is not None:
2032 for f in self.input_files.itervalues():
2034 self.input_files = None
2036 class CompositeTask(AsynchronousTask):
2038 __slots__ = ("scheduler",) + ("_current_task",)
2041 return self._current_task is not None
2044 self.cancelled = True
2045 if self._current_task is not None:
2046 self._current_task.cancel()
2050 This does a loop calling self._current_task.poll()
2051 repeatedly as long as the value of self._current_task
2052 keeps changing. It calls poll() a maximum of one time
2053 for a given self._current_task instance. This is useful
2054 since calling poll() on a task can trigger advance to
2055 the next task could eventually lead to the returncode
2056 being set in cases when polling only a single task would
2057 not have the same effect.
2062 task = self._current_task
2063 if task is None or task is prev:
2064 # don't poll the same task more than once
2069 return self.returncode
2075 task = self._current_task
2077 # don't wait for the same task more than once
2080 # Before the task.wait() method returned, an exit
2081 # listener should have set self._current_task to either
2082 # a different task or None. Something is wrong.
2083 raise AssertionError("self._current_task has not " + \
2084 "changed since calling wait", self, task)
2088 return self.returncode
2090 def _assert_current(self, task):
2092 Raises an AssertionError if the given task is not the
2093 same one as self._current_task. This can be useful
2096 if task is not self._current_task:
2097 raise AssertionError("Unrecognized task: %s" % (task,))
2099 def _default_exit(self, task):
2101 Calls _assert_current() on the given task and then sets the
2102 composite returncode attribute if task.returncode != os.EX_OK.
2103 If the task failed then self._current_task will be set to None.
2104 Subclasses can use this as a generic task exit callback.
2107 @returns: The task.returncode attribute.
2109 self._assert_current(task)
2110 if task.returncode != os.EX_OK:
2111 self.returncode = task.returncode
2112 self._current_task = None
2113 return task.returncode
2115 def _final_exit(self, task):
2117 Assumes that task is the final task of this composite task.
2118 Calls _default_exit() and sets self.returncode to the task's
2119 returncode and sets self._current_task to None.
2121 self._default_exit(task)
2122 self._current_task = None
2123 self.returncode = task.returncode
2124 return self.returncode
2126 def _default_final_exit(self, task):
2128 This calls _final_exit() and then wait().
2130 Subclasses can use this as a generic final task exit callback.
2133 self._final_exit(task)
2136 def _start_task(self, task, exit_handler):
2138 Register exit handler for the given task, set it
2139 as self._current_task, and call task.start().
2141 Subclasses can use this as a generic way to start
2145 task.addExitListener(exit_handler)
2146 self._current_task = task
2149 class TaskSequence(CompositeTask):
2151 A collection of tasks that executes sequentially. Each task
2152 must have a addExitListener() method that can be used as
2153 a means to trigger movement from one task to the next.
2156 __slots__ = ("_task_queue",)
2158 def __init__(self, **kwargs):
2159 AsynchronousTask.__init__(self, **kwargs)
2160 self._task_queue = deque()
2162 def add(self, task):
2163 self._task_queue.append(task)
2166 self._start_next_task()
2169 self._task_queue.clear()
2170 CompositeTask.cancel(self)
2172 def _start_next_task(self):
2173 self._start_task(self._task_queue.popleft(),
2174 self._task_exit_handler)
2176 def _task_exit_handler(self, task):
2177 if self._default_exit(task) != os.EX_OK:
2179 elif self._task_queue:
2180 self._start_next_task()
2182 self._final_exit(task)
2185 class SubProcess(AbstractPollTask):
2187 __slots__ = ("pid",) + \
2188 ("_files", "_reg_id")
2190 # A file descriptor is required for the scheduler to monitor changes from
2191 # inside a poll() loop. When logging is not enabled, create a pipe just to
2192 # serve this purpose alone.
2196 if self.returncode is not None:
2197 return self.returncode
2198 if self.pid is None:
2199 return self.returncode
2200 if self._registered:
2201 return self.returncode
2204 retval = os.waitpid(self.pid, os.WNOHANG)
2206 if e.errno != errno.ECHILD:
2209 retval = (self.pid, 1)
2211 if retval == (0, 0):
2213 self._set_returncode(retval)
2214 return self.returncode
2219 os.kill(self.pid, signal.SIGTERM)
2221 if e.errno != errno.ESRCH:
2225 self.cancelled = True
2226 if self.pid is not None:
2228 return self.returncode
2231 return self.pid is not None and \
2232 self.returncode is None
2236 if self.returncode is not None:
2237 return self.returncode
2239 if self._registered:
2240 self.scheduler.schedule(self._reg_id)
2242 if self.returncode is not None:
2243 return self.returncode
2246 wait_retval = os.waitpid(self.pid, 0)
2248 if e.errno != errno.ECHILD:
2251 self._set_returncode((self.pid, 1))
2253 self._set_returncode(wait_retval)
2255 return self.returncode
2257 def _unregister(self):
2259 Unregister from the scheduler and close open files.
2262 self._registered = False
2264 if self._reg_id is not None:
2265 self.scheduler.unregister(self._reg_id)
2268 if self._files is not None:
2269 for f in self._files.itervalues():
2273 def _set_returncode(self, wait_retval):
2275 retval = wait_retval[1]
2277 if retval != os.EX_OK:
2279 retval = (retval & 0xff) << 8
2281 retval = retval >> 8
2283 self.returncode = retval
2285 class SpawnProcess(SubProcess):
2288 Constructor keyword args are passed into portage.process.spawn().
2289 The required "args" keyword argument will be passed as the first
2293 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2294 "uid", "gid", "groups", "umask", "logfile",
2295 "path_lookup", "pre_exec")
2297 __slots__ = ("args",) + \
2300 _file_names = ("log", "process", "stdout")
2301 _files_dict = slot_dict_class(_file_names, prefix="")
2308 if self.fd_pipes is None:
2310 fd_pipes = self.fd_pipes
2311 fd_pipes.setdefault(0, sys.stdin.fileno())
2312 fd_pipes.setdefault(1, sys.stdout.fileno())
2313 fd_pipes.setdefault(2, sys.stderr.fileno())
2315 # flush any pending output
2316 for fd in fd_pipes.itervalues():
2317 if fd == sys.stdout.fileno():
2319 if fd == sys.stderr.fileno():
2322 logfile = self.logfile
2323 self._files = self._files_dict()
2326 master_fd, slave_fd = self._pipe(fd_pipes)
2327 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2328 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2331 fd_pipes_orig = fd_pipes.copy()
2333 # TODO: Use job control functions like tcsetpgrp() to control
2334 # access to stdin. Until then, use /dev/null so that any
2335 # attempts to read from stdin will immediately return EOF
2336 # instead of blocking indefinitely.
2337 null_input = open('/dev/null', 'rb')
2338 fd_pipes[0] = null_input.fileno()
2340 fd_pipes[0] = fd_pipes_orig[0]
2342 files.process = os.fdopen(master_fd, 'rb')
2343 if logfile is not None:
2345 fd_pipes[1] = slave_fd
2346 fd_pipes[2] = slave_fd
2348 files.log = open(logfile, mode='ab')
2349 portage.util.apply_secpass_permissions(logfile,
2350 uid=portage.portage_uid, gid=portage.portage_gid,
2353 if not self.background:
2354 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2356 output_handler = self._output_handler
2360 # Create a dummy pipe so the scheduler can monitor
2361 # the process from inside a poll() loop.
2362 fd_pipes[self._dummy_pipe_fd] = slave_fd
2364 fd_pipes[1] = slave_fd
2365 fd_pipes[2] = slave_fd
2366 output_handler = self._dummy_handler
2369 for k in self._spawn_kwarg_names:
2370 v = getattr(self, k)
2374 kwargs["fd_pipes"] = fd_pipes
2375 kwargs["returnpid"] = True
2376 kwargs.pop("logfile", None)
2378 self._reg_id = self.scheduler.register(files.process.fileno(),
2379 self._registered_events, output_handler)
2380 self._registered = True
2382 retval = self._spawn(self.args, **kwargs)
2385 if null_input is not None:
2388 if isinstance(retval, int):
2391 self.returncode = retval
2395 self.pid = retval[0]
2396 portage.process.spawned_pids.remove(self.pid)
2398 def _pipe(self, fd_pipes):
2400 @type fd_pipes: dict
2401 @param fd_pipes: pipes from which to copy terminal size if desired.
2405 def _spawn(self, args, **kwargs):
2406 return portage.process.spawn(args, **kwargs)
2408 def _output_handler(self, fd, event):
2410 if event & PollConstants.POLLIN:
2413 buf = array.array('B')
2415 buf.fromfile(files.process, self._bufsize)
2420 if not self.background:
2421 buf.tofile(files.stdout)
2422 files.stdout.flush()
2423 buf.tofile(files.log)
2429 self._unregister_if_appropriate(event)
2430 return self._registered
2432 def _dummy_handler(self, fd, event):
2434 This method is mainly interested in detecting EOF, since
2435 the only purpose of the pipe is to allow the scheduler to
2436 monitor the process from inside a poll() loop.
2439 if event & PollConstants.POLLIN:
2441 buf = array.array('B')
2443 buf.fromfile(self._files.process, self._bufsize)
2453 self._unregister_if_appropriate(event)
2454 return self._registered
2456 class MiscFunctionsProcess(SpawnProcess):
2458 Spawns misc-functions.sh with an existing ebuild environment.
2461 __slots__ = ("commands", "phase", "pkg", "settings")
2464 settings = self.settings
2465 settings.pop("EBUILD_PHASE", None)
2466 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2467 misc_sh_binary = os.path.join(portage_bin_path,
2468 os.path.basename(portage.const.MISC_SH_BINARY))
2470 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2471 self.logfile = settings.get("PORTAGE_LOG_FILE")
2473 portage._doebuild_exit_status_unlink(
2474 settings.get("EBUILD_EXIT_STATUS_FILE"))
2476 SpawnProcess._start(self)
2478 def _spawn(self, args, **kwargs):
2479 settings = self.settings
2480 debug = settings.get("PORTAGE_DEBUG") == "1"
2481 return portage.spawn(" ".join(args), settings,
2482 debug=debug, **kwargs)
2484 def _set_returncode(self, wait_retval):
2485 SpawnProcess._set_returncode(self, wait_retval)
2486 self.returncode = portage._doebuild_exit_status_check_and_log(
2487 self.settings, self.phase, self.returncode)
2489 class EbuildFetcher(SpawnProcess):
2491 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2496 root_config = self.pkg.root_config
2497 portdb = root_config.trees["porttree"].dbapi
2498 ebuild_path = portdb.findname(self.pkg.cpv)
2499 settings = self.config_pool.allocate()
2500 settings.setcpv(self.pkg)
2502 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2503 # should not be touched since otherwise it could interfere with
2504 # another instance of the same cpv concurrently being built for a
2505 # different $ROOT (currently, builds only cooperate with prefetchers
2506 # that are spawned for the same $ROOT).
2507 if not self.prefetch:
2508 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2509 self._build_dir.lock()
2510 self._build_dir.clean()
2511 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2512 if self.logfile is None:
2513 self.logfile = settings.get("PORTAGE_LOG_FILE")
2519 # If any incremental variables have been overridden
2520 # via the environment, those values need to be passed
2521 # along here so that they are correctly considered by
2522 # the config instance in the subproccess.
2523 fetch_env = os.environ.copy()
2525 nocolor = settings.get("NOCOLOR")
2526 if nocolor is not None:
2527 fetch_env["NOCOLOR"] = nocolor
2529 fetch_env["PORTAGE_NICENESS"] = "0"
2531 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2533 ebuild_binary = os.path.join(
2534 settings["PORTAGE_BIN_PATH"], "ebuild")
2536 fetch_args = [ebuild_binary, ebuild_path, phase]
2537 debug = settings.get("PORTAGE_DEBUG") == "1"
2539 fetch_args.append("--debug")
2541 self.args = fetch_args
2542 self.env = fetch_env
2543 SpawnProcess._start(self)
2545 def _pipe(self, fd_pipes):
2546 """When appropriate, use a pty so that fetcher progress bars,
2547 like wget has, will work properly."""
2548 if self.background or not sys.stdout.isatty():
2549 # When the output only goes to a log file,
2550 # there's no point in creating a pty.
2552 stdout_pipe = fd_pipes.get(1)
2553 got_pty, master_fd, slave_fd = \
2554 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2555 return (master_fd, slave_fd)
2557 def _set_returncode(self, wait_retval):
2558 SpawnProcess._set_returncode(self, wait_retval)
2559 # Collect elog messages that might have been
2560 # created by the pkg_nofetch phase.
2561 if self._build_dir is not None:
2562 # Skip elog messages for prefetch, in order to avoid duplicates.
2563 if not self.prefetch and self.returncode != os.EX_OK:
2565 if self.logfile is not None:
2567 elog_out = open(self.logfile, 'a')
2568 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2569 if self.logfile is not None:
2570 msg += ", Log file:"
2571 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2572 if self.logfile is not None:
2573 eerror(" '%s'" % (self.logfile,),
2574 phase="unpack", key=self.pkg.cpv, out=elog_out)
2575 if elog_out is not None:
2577 if not self.prefetch:
2578 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2579 features = self._build_dir.settings.features
2580 if self.returncode == os.EX_OK:
2581 self._build_dir.clean()
2582 self._build_dir.unlock()
2583 self.config_pool.deallocate(self._build_dir.settings)
2584 self._build_dir = None
2586 class EbuildBuildDir(SlotObject):
2588 __slots__ = ("dir_path", "pkg", "settings",
2589 "locked", "_catdir", "_lock_obj")
2591 def __init__(self, **kwargs):
2592 SlotObject.__init__(self, **kwargs)
2597 This raises an AlreadyLocked exception if lock() is called
2598 while a lock is already held. In order to avoid this, call
2599 unlock() or check whether the "locked" attribute is True
2600 or False before calling lock().
2602 if self._lock_obj is not None:
2603 raise self.AlreadyLocked((self._lock_obj,))
2605 dir_path = self.dir_path
2606 if dir_path is None:
2607 root_config = self.pkg.root_config
2608 portdb = root_config.trees["porttree"].dbapi
2609 ebuild_path = portdb.findname(self.pkg.cpv)
2610 settings = self.settings
2611 settings.setcpv(self.pkg)
2612 debug = settings.get("PORTAGE_DEBUG") == "1"
2613 use_cache = 1 # always true
2614 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2615 self.settings, debug, use_cache, portdb)
2616 dir_path = self.settings["PORTAGE_BUILDDIR"]
2618 catdir = os.path.dirname(dir_path)
2619 self._catdir = catdir
2621 portage.util.ensure_dirs(os.path.dirname(catdir),
2622 gid=portage.portage_gid,
2626 catdir_lock = portage.locks.lockdir(catdir)
2627 portage.util.ensure_dirs(catdir,
2628 gid=portage.portage_gid,
2630 self._lock_obj = portage.locks.lockdir(dir_path)
2632 self.locked = self._lock_obj is not None
2633 if catdir_lock is not None:
2634 portage.locks.unlockdir(catdir_lock)
2637 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2638 by keepwork or keeptemp in FEATURES."""
2639 settings = self.settings
2640 features = settings.features
2641 if not ("keepwork" in features or "keeptemp" in features):
2643 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2644 except EnvironmentError, e:
2645 if e.errno != errno.ENOENT:
2650 if self._lock_obj is None:
2653 portage.locks.unlockdir(self._lock_obj)
2654 self._lock_obj = None
2657 catdir = self._catdir
2660 catdir_lock = portage.locks.lockdir(catdir)
2666 if e.errno not in (errno.ENOENT,
2667 errno.ENOTEMPTY, errno.EEXIST):
2670 portage.locks.unlockdir(catdir_lock)
2672 class AlreadyLocked(portage.exception.PortageException):
2675 class EbuildBuild(CompositeTask):
2677 __slots__ = ("args_set", "config_pool", "find_blockers",
2678 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2679 "prefetcher", "settings", "world_atom") + \
2680 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2684 logger = self.logger
2687 settings = self.settings
2688 world_atom = self.world_atom
2689 root_config = pkg.root_config
2692 portdb = root_config.trees[tree].dbapi
2693 settings.setcpv(pkg)
2694 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2695 ebuild_path = portdb.findname(self.pkg.cpv)
2696 self._ebuild_path = ebuild_path
2698 prefetcher = self.prefetcher
2699 if prefetcher is None:
2701 elif not prefetcher.isAlive():
2703 elif prefetcher.poll() is None:
2705 waiting_msg = "Fetching files " + \
2706 "in the background. " + \
2707 "To view fetch progress, run `tail -f " + \
2708 "/var/log/emerge-fetch.log` in another " + \
2710 msg_prefix = colorize("GOOD", " * ")
2711 from textwrap import wrap
2712 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2713 for line in wrap(waiting_msg, 65))
2714 if not self.background:
2715 writemsg(waiting_msg, noiselevel=-1)
2717 self._current_task = prefetcher
2718 prefetcher.addExitListener(self._prefetch_exit)
2721 self._prefetch_exit(prefetcher)
2723 def _prefetch_exit(self, prefetcher):
2727 settings = self.settings
2730 fetcher = EbuildFetchonly(
2731 fetch_all=opts.fetch_all_uri,
2732 pkg=pkg, pretend=opts.pretend,
2734 retval = fetcher.execute()
2735 self.returncode = retval
2739 fetcher = EbuildFetcher(config_pool=self.config_pool,
2740 fetchall=opts.fetch_all_uri,
2741 fetchonly=opts.fetchonly,
2742 background=self.background,
2743 pkg=pkg, scheduler=self.scheduler)
2745 self._start_task(fetcher, self._fetch_exit)
2747 def _fetch_exit(self, fetcher):
2751 fetch_failed = False
2753 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2755 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2757 if fetch_failed and fetcher.logfile is not None and \
2758 os.path.exists(fetcher.logfile):
2759 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2761 if not fetch_failed and fetcher.logfile is not None:
2762 # Fetch was successful, so remove the fetch log.
2764 os.unlink(fetcher.logfile)
2768 if fetch_failed or opts.fetchonly:
2772 logger = self.logger
2774 pkg_count = self.pkg_count
2775 scheduler = self.scheduler
2776 settings = self.settings
2777 features = settings.features
2778 ebuild_path = self._ebuild_path
2779 system_set = pkg.root_config.sets["system"]
2781 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2782 self._build_dir.lock()
2784 # Cleaning is triggered before the setup
2785 # phase, in portage.doebuild().
2786 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2787 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2788 short_msg = "emerge: (%s of %s) %s Clean" % \
2789 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2790 logger.log(msg, short_msg=short_msg)
2792 #buildsyspkg: Check if we need to _force_ binary package creation
2793 self._issyspkg = "buildsyspkg" in features and \
2794 system_set.findAtomForPackage(pkg) and \
2797 if opts.buildpkg or self._issyspkg:
2799 self._buildpkg = True
2801 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2802 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2803 short_msg = "emerge: (%s of %s) %s Compile" % \
2804 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2805 logger.log(msg, short_msg=short_msg)
2808 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2809 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2810 short_msg = "emerge: (%s of %s) %s Compile" % \
2811 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2812 logger.log(msg, short_msg=short_msg)
2814 build = EbuildExecuter(background=self.background, pkg=pkg,
2815 scheduler=scheduler, settings=settings)
2816 self._start_task(build, self._build_exit)
2818 def _unlock_builddir(self):
2819 portage.elog.elog_process(self.pkg.cpv, self.settings)
2820 self._build_dir.unlock()
2822 def _build_exit(self, build):
2823 if self._default_exit(build) != os.EX_OK:
2824 self._unlock_builddir()
2829 buildpkg = self._buildpkg
2832 self._final_exit(build)
2837 msg = ">>> This is a system package, " + \
2838 "let's pack a rescue tarball.\n"
2840 log_path = self.settings.get("PORTAGE_LOG_FILE")
2841 if log_path is not None:
2842 log_file = open(log_path, 'a')
2848 if not self.background:
2849 portage.writemsg_stdout(msg, noiselevel=-1)
2851 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2852 scheduler=self.scheduler, settings=self.settings)
2854 self._start_task(packager, self._buildpkg_exit)
2856 def _buildpkg_exit(self, packager):
2858 Released build dir lock when there is a failure or
2859 when in buildpkgonly mode. Otherwise, the lock will
2860 be released when merge() is called.
2863 if self._default_exit(packager) != os.EX_OK:
2864 self._unlock_builddir()
2868 if self.opts.buildpkgonly:
2869 # Need to call "clean" phase for buildpkgonly mode
2870 portage.elog.elog_process(self.pkg.cpv, self.settings)
2872 clean_phase = EbuildPhase(background=self.background,
2873 pkg=self.pkg, phase=phase,
2874 scheduler=self.scheduler, settings=self.settings,
2876 self._start_task(clean_phase, self._clean_exit)
2879 # Continue holding the builddir lock until
2880 # after the package has been installed.
2881 self._current_task = None
2882 self.returncode = packager.returncode
2885 def _clean_exit(self, clean_phase):
2886 if self._final_exit(clean_phase) != os.EX_OK or \
2887 self.opts.buildpkgonly:
2888 self._unlock_builddir()
2893 Install the package and then clean up and release locks.
2894 Only call this after the build has completed successfully
2895 and neither fetchonly nor buildpkgonly mode are enabled.
2898 find_blockers = self.find_blockers
2899 ldpath_mtimes = self.ldpath_mtimes
2900 logger = self.logger
2902 pkg_count = self.pkg_count
2903 settings = self.settings
2904 world_atom = self.world_atom
2905 ebuild_path = self._ebuild_path
2908 merge = EbuildMerge(find_blockers=self.find_blockers,
2909 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2910 pkg_count=pkg_count, pkg_path=ebuild_path,
2911 scheduler=self.scheduler,
2912 settings=settings, tree=tree, world_atom=world_atom)
2914 msg = " === (%s of %s) Merging (%s::%s)" % \
2915 (pkg_count.curval, pkg_count.maxval,
2916 pkg.cpv, ebuild_path)
2917 short_msg = "emerge: (%s of %s) %s Merge" % \
2918 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2919 logger.log(msg, short_msg=short_msg)
2922 rval = merge.execute()
2924 self._unlock_builddir()
2928 class EbuildExecuter(CompositeTask):
2930 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2932 _phases = ("prepare", "configure", "compile", "test", "install")
2934 _live_eclasses = frozenset([
2944 self._tree = "porttree"
2947 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2948 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2949 self._start_task(clean_phase, self._clean_phase_exit)
2951 def _clean_phase_exit(self, clean_phase):
2953 if self._default_exit(clean_phase) != os.EX_OK:
2958 scheduler = self.scheduler
2959 settings = self.settings
2962 # This initializes PORTAGE_LOG_FILE.
2963 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2965 setup_phase = EbuildPhase(background=self.background,
2966 pkg=pkg, phase="setup", scheduler=scheduler,
2967 settings=settings, tree=self._tree)
2969 setup_phase.addExitListener(self._setup_exit)
2970 self._current_task = setup_phase
2971 self.scheduler.scheduleSetup(setup_phase)
2973 def _setup_exit(self, setup_phase):
2975 if self._default_exit(setup_phase) != os.EX_OK:
2979 unpack_phase = EbuildPhase(background=self.background,
2980 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2981 settings=self.settings, tree=self._tree)
2983 if self._live_eclasses.intersection(self.pkg.inherited):
2984 # Serialize $DISTDIR access for live ebuilds since
2985 # otherwise they can interfere with eachother.
2987 unpack_phase.addExitListener(self._unpack_exit)
2988 self._current_task = unpack_phase
2989 self.scheduler.scheduleUnpack(unpack_phase)
2992 self._start_task(unpack_phase, self._unpack_exit)
2994 def _unpack_exit(self, unpack_phase):
2996 if self._default_exit(unpack_phase) != os.EX_OK:
3000 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3003 phases = self._phases
3004 eapi = pkg.metadata["EAPI"]
3005 if eapi in ("0", "1"):
3006 # skip src_prepare and src_configure
3009 for phase in phases:
3010 ebuild_phases.add(EbuildPhase(background=self.background,
3011 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3012 settings=self.settings, tree=self._tree))
3014 self._start_task(ebuild_phases, self._default_final_exit)
3016 class EbuildMetadataPhase(SubProcess):
3019 Asynchronous interface for the ebuild "depend" phase which is
3020 used to extract metadata from the ebuild.
3023 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3024 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3027 _file_names = ("ebuild",)
3028 _files_dict = slot_dict_class(_file_names, prefix="")
3032 settings = self.settings
3034 ebuild_path = self.ebuild_path
3035 debug = settings.get("PORTAGE_DEBUG") == "1"
3039 if self.fd_pipes is not None:
3040 fd_pipes = self.fd_pipes.copy()
3044 fd_pipes.setdefault(0, sys.stdin.fileno())
3045 fd_pipes.setdefault(1, sys.stdout.fileno())
3046 fd_pipes.setdefault(2, sys.stderr.fileno())
3048 # flush any pending output
3049 for fd in fd_pipes.itervalues():
3050 if fd == sys.stdout.fileno():
3052 if fd == sys.stderr.fileno():
3055 fd_pipes_orig = fd_pipes.copy()
3056 self._files = self._files_dict()
3059 master_fd, slave_fd = os.pipe()
3060 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3061 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3063 fd_pipes[self._metadata_fd] = slave_fd
3065 self._raw_metadata = []
3066 files.ebuild = os.fdopen(master_fd, 'r')
3067 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3068 self._registered_events, self._output_handler)
3069 self._registered = True
3071 retval = portage.doebuild(ebuild_path, "depend",
3072 settings["ROOT"], settings, debug,
3073 mydbapi=self.portdb, tree="porttree",
3074 fd_pipes=fd_pipes, returnpid=True)
3078 if isinstance(retval, int):
3079 # doebuild failed before spawning
3081 self.returncode = retval
3085 self.pid = retval[0]
3086 portage.process.spawned_pids.remove(self.pid)
3088 def _output_handler(self, fd, event):
3090 if event & PollConstants.POLLIN:
3091 self._raw_metadata.append(self._files.ebuild.read())
3092 if not self._raw_metadata[-1]:
3096 self._unregister_if_appropriate(event)
3097 return self._registered
3099 def _set_returncode(self, wait_retval):
3100 SubProcess._set_returncode(self, wait_retval)
3101 if self.returncode == os.EX_OK:
3102 metadata_lines = "".join(self._raw_metadata).splitlines()
3103 if len(portage.auxdbkeys) != len(metadata_lines):
3104 # Don't trust bash's returncode if the
3105 # number of lines is incorrect.
3108 metadata = izip(portage.auxdbkeys, metadata_lines)
3109 self.metadata_callback(self.cpv, self.ebuild_path,
3110 self.repo_path, metadata, self.ebuild_mtime)
3112 class EbuildProcess(SpawnProcess):
3114 __slots__ = ("phase", "pkg", "settings", "tree")
3117 # Don't open the log file during the clean phase since the
3118 # open file can result in an nfs lock on $T/build.log which
3119 # prevents the clean phase from removing $T.
3120 if self.phase not in ("clean", "cleanrm"):
3121 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3122 SpawnProcess._start(self)
3124 def _pipe(self, fd_pipes):
3125 stdout_pipe = fd_pipes.get(1)
3126 got_pty, master_fd, slave_fd = \
3127 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3128 return (master_fd, slave_fd)
3130 def _spawn(self, args, **kwargs):
3132 root_config = self.pkg.root_config
3134 mydbapi = root_config.trees[tree].dbapi
3135 settings = self.settings
3136 ebuild_path = settings["EBUILD"]
3137 debug = settings.get("PORTAGE_DEBUG") == "1"
3139 rval = portage.doebuild(ebuild_path, self.phase,
3140 root_config.root, settings, debug,
3141 mydbapi=mydbapi, tree=tree, **kwargs)
3145 def _set_returncode(self, wait_retval):
3146 SpawnProcess._set_returncode(self, wait_retval)
3148 if self.phase not in ("clean", "cleanrm"):
3149 self.returncode = portage._doebuild_exit_status_check_and_log(
3150 self.settings, self.phase, self.returncode)
3152 if self.phase == "test" and self.returncode != os.EX_OK and \
3153 "test-fail-continue" in self.settings.features:
3154 self.returncode = os.EX_OK
3156 portage._post_phase_userpriv_perms(self.settings)
3158 class EbuildPhase(CompositeTask):
3160 __slots__ = ("background", "pkg", "phase",
3161 "scheduler", "settings", "tree")
3163 _post_phase_cmds = portage._post_phase_cmds
3167 ebuild_process = EbuildProcess(background=self.background,
3168 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3169 settings=self.settings, tree=self.tree)
3171 self._start_task(ebuild_process, self._ebuild_exit)
3173 def _ebuild_exit(self, ebuild_process):
3175 if self.phase == "install":
3177 log_path = self.settings.get("PORTAGE_LOG_FILE")
3179 if self.background and log_path is not None:
3180 log_file = open(log_path, 'a')
3183 portage._check_build_log(self.settings, out=out)
3185 if log_file is not None:
3188 if self._default_exit(ebuild_process) != os.EX_OK:
3192 settings = self.settings
3194 if self.phase == "install":
3195 portage._post_src_install_uid_fix(settings)
3197 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3198 if post_phase_cmds is not None:
3199 post_phase = MiscFunctionsProcess(background=self.background,
3200 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3201 scheduler=self.scheduler, settings=settings)
3202 self._start_task(post_phase, self._post_phase_exit)
3205 self.returncode = ebuild_process.returncode
3206 self._current_task = None
3209 def _post_phase_exit(self, post_phase):
3210 if self._final_exit(post_phase) != os.EX_OK:
3211 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3213 self._current_task = None
3217 class EbuildBinpkg(EbuildProcess):
3219 This assumes that src_install() has successfully completed.
3221 __slots__ = ("_binpkg_tmpfile",)
3224 self.phase = "package"
3225 self.tree = "porttree"
3227 root_config = pkg.root_config
3228 portdb = root_config.trees["porttree"].dbapi
3229 bintree = root_config.trees["bintree"]
3230 ebuild_path = portdb.findname(self.pkg.cpv)
3231 settings = self.settings
3232 debug = settings.get("PORTAGE_DEBUG") == "1"
3234 bintree.prevent_collision(pkg.cpv)
3235 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3236 pkg.cpv + ".tbz2." + str(os.getpid()))
3237 self._binpkg_tmpfile = binpkg_tmpfile
3238 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3239 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3242 EbuildProcess._start(self)
3244 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3246 def _set_returncode(self, wait_retval):
3247 EbuildProcess._set_returncode(self, wait_retval)
3250 bintree = pkg.root_config.trees["bintree"]
3251 binpkg_tmpfile = self._binpkg_tmpfile
3252 if self.returncode == os.EX_OK:
3253 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3255 class EbuildMerge(SlotObject):
3257 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3258 "pkg", "pkg_count", "pkg_path", "pretend",
3259 "scheduler", "settings", "tree", "world_atom")
3262 root_config = self.pkg.root_config
3263 settings = self.settings
3264 retval = portage.merge(settings["CATEGORY"],
3265 settings["PF"], settings["D"],
3266 os.path.join(settings["PORTAGE_BUILDDIR"],
3267 "build-info"), root_config.root, settings,
3268 myebuild=settings["EBUILD"],
3269 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3270 vartree=root_config.trees["vartree"],
3271 prev_mtimes=self.ldpath_mtimes,
3272 scheduler=self.scheduler,
3273 blockers=self.find_blockers)
3275 if retval == os.EX_OK:
3276 self.world_atom(self.pkg)
3281 def _log_success(self):
3283 pkg_count = self.pkg_count
3284 pkg_path = self.pkg_path
3285 logger = self.logger
3286 if "noclean" not in self.settings.features:
3287 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3288 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3289 logger.log((" === (%s of %s) " + \
3290 "Post-Build Cleaning (%s::%s)") % \
3291 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3292 short_msg=short_msg)
3293 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3294 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3296 class PackageUninstall(AsynchronousTask):
3298 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3302 unmerge(self.pkg.root_config, self.opts, "unmerge",
3303 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3304 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3305 writemsg_level=self._writemsg_level)
3306 except UninstallFailure, e:
3307 self.returncode = e.status
3309 self.returncode = os.EX_OK
3312 def _writemsg_level(self, msg, level=0, noiselevel=0):
3314 log_path = self.settings.get("PORTAGE_LOG_FILE")
3315 background = self.background
3317 if log_path is None:
3318 if not (background and level < logging.WARNING):
3319 portage.util.writemsg_level(msg,
3320 level=level, noiselevel=noiselevel)
3323 portage.util.writemsg_level(msg,
3324 level=level, noiselevel=noiselevel)
3326 f = open(log_path, 'a')
3332 class Binpkg(CompositeTask):
3334 __slots__ = ("find_blockers",
3335 "ldpath_mtimes", "logger", "opts",
3336 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3337 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3338 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3340 def _writemsg_level(self, msg, level=0, noiselevel=0):
3342 if not self.background:
3343 portage.util.writemsg_level(msg,
3344 level=level, noiselevel=noiselevel)
3346 log_path = self.settings.get("PORTAGE_LOG_FILE")
3347 if log_path is not None:
3348 f = open(log_path, 'a')
3357 settings = self.settings
3358 settings.setcpv(pkg)
3359 self._tree = "bintree"
3360 self._bintree = self.pkg.root_config.trees[self._tree]
3361 self._verify = not self.opts.pretend
3363 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3364 "portage", pkg.category, pkg.pf)
3365 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3366 pkg=pkg, settings=settings)
3367 self._image_dir = os.path.join(dir_path, "image")
3368 self._infloc = os.path.join(dir_path, "build-info")
3369 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3370 settings["EBUILD"] = self._ebuild_path
3371 debug = settings.get("PORTAGE_DEBUG") == "1"
3372 portage.doebuild_environment(self._ebuild_path, "setup",
3373 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3374 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3376 # The prefetcher has already completed or it
3377 # could be running now. If it's running now,
3378 # wait for it to complete since it holds
3379 # a lock on the file being fetched. The
3380 # portage.locks functions are only designed
3381 # to work between separate processes. Since
3382 # the lock is held by the current process,
3383 # use the scheduler and fetcher methods to
3384 # synchronize with the fetcher.
3385 prefetcher = self.prefetcher
3386 if prefetcher is None:
3388 elif not prefetcher.isAlive():
3390 elif prefetcher.poll() is None:
3392 waiting_msg = ("Fetching '%s' " + \
3393 "in the background. " + \
3394 "To view fetch progress, run `tail -f " + \
3395 "/var/log/emerge-fetch.log` in another " + \
3396 "terminal.") % prefetcher.pkg_path
3397 msg_prefix = colorize("GOOD", " * ")
3398 from textwrap import wrap
3399 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3400 for line in wrap(waiting_msg, 65))
3401 if not self.background:
3402 writemsg(waiting_msg, noiselevel=-1)
3404 self._current_task = prefetcher
3405 prefetcher.addExitListener(self._prefetch_exit)
3408 self._prefetch_exit(prefetcher)
3410 def _prefetch_exit(self, prefetcher):
3413 pkg_count = self.pkg_count
3414 if not (self.opts.pretend or self.opts.fetchonly):
3415 self._build_dir.lock()
3417 shutil.rmtree(self._build_dir.dir_path)
3418 except EnvironmentError, e:
3419 if e.errno != errno.ENOENT:
3422 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3423 fetcher = BinpkgFetcher(background=self.background,
3424 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3425 pretend=self.opts.pretend, scheduler=self.scheduler)
3426 pkg_path = fetcher.pkg_path
3427 self._pkg_path = pkg_path
3429 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3431 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3432 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3433 short_msg = "emerge: (%s of %s) %s Fetch" % \
3434 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3435 self.logger.log(msg, short_msg=short_msg)
3436 self._start_task(fetcher, self._fetcher_exit)
3439 self._fetcher_exit(fetcher)
3441 def _fetcher_exit(self, fetcher):
3443 # The fetcher only has a returncode when
3444 # --getbinpkg is enabled.
3445 if fetcher.returncode is not None:
3446 self._fetched_pkg = True
3447 if self._default_exit(fetcher) != os.EX_OK:
3448 self._unlock_builddir()
3452 if self.opts.pretend:
3453 self._current_task = None
3454 self.returncode = os.EX_OK
3462 logfile = self.settings.get("PORTAGE_LOG_FILE")
3463 verifier = BinpkgVerifier(background=self.background,
3464 logfile=logfile, pkg=self.pkg)
3465 self._start_task(verifier, self._verifier_exit)
3468 self._verifier_exit(verifier)
3470 def _verifier_exit(self, verifier):
3471 if verifier is not None and \
3472 self._default_exit(verifier) != os.EX_OK:
3473 self._unlock_builddir()
3477 logger = self.logger
3479 pkg_count = self.pkg_count
3480 pkg_path = self._pkg_path
3482 if self._fetched_pkg:
3483 self._bintree.inject(pkg.cpv, filename=pkg_path)
3485 if self.opts.fetchonly:
3486 self._current_task = None
3487 self.returncode = os.EX_OK
3491 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3492 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3493 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3494 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3495 logger.log(msg, short_msg=short_msg)
3498 settings = self.settings
3499 ebuild_phase = EbuildPhase(background=self.background,
3500 pkg=pkg, phase=phase, scheduler=self.scheduler,
3501 settings=settings, tree=self._tree)
3503 self._start_task(ebuild_phase, self._clean_exit)
3505 def _clean_exit(self, clean_phase):
3506 if self._default_exit(clean_phase) != os.EX_OK:
3507 self._unlock_builddir()
3511 dir_path = self._build_dir.dir_path
3513 infloc = self._infloc
3515 pkg_path = self._pkg_path
3518 for mydir in (dir_path, self._image_dir, infloc):
3519 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3520 gid=portage.data.portage_gid, mode=dir_mode)
3522 # This initializes PORTAGE_LOG_FILE.
3523 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3524 self._writemsg_level(">>> Extracting info\n")
3526 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3527 check_missing_metadata = ("CATEGORY", "PF")
3528 missing_metadata = set()
3529 for k in check_missing_metadata:
3530 v = pkg_xpak.getfile(k)
3532 missing_metadata.add(k)
3534 pkg_xpak.unpackinfo(infloc)
3535 for k in missing_metadata:
3543 f = open(os.path.join(infloc, k), 'wb')
3549 # Store the md5sum in the vdb.
3550 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3552 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3556 # This gives bashrc users an opportunity to do various things
3557 # such as remove binary packages after they're installed.
3558 settings = self.settings
3559 settings.setcpv(self.pkg)
3560 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3561 settings.backup_changes("PORTAGE_BINPKG_FILE")
3564 setup_phase = EbuildPhase(background=self.background,
3565 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3566 settings=settings, tree=self._tree)
3568 setup_phase.addExitListener(self._setup_exit)
3569 self._current_task = setup_phase
3570 self.scheduler.scheduleSetup(setup_phase)
3572 def _setup_exit(self, setup_phase):
3573 if self._default_exit(setup_phase) != os.EX_OK:
3574 self._unlock_builddir()
3578 extractor = BinpkgExtractorAsync(background=self.background,
3579 image_dir=self._image_dir,
3580 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3581 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3582 self._start_task(extractor, self._extractor_exit)
3584 def _extractor_exit(self, extractor):
3585 if self._final_exit(extractor) != os.EX_OK:
3586 self._unlock_builddir()
3587 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3591 def _unlock_builddir(self):
3592 if self.opts.pretend or self.opts.fetchonly:
3594 portage.elog.elog_process(self.pkg.cpv, self.settings)
3595 self._build_dir.unlock()
3599 # This gives bashrc users an opportunity to do various things
3600 # such as remove binary packages after they're installed.
3601 settings = self.settings
3602 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3603 settings.backup_changes("PORTAGE_BINPKG_FILE")
3605 merge = EbuildMerge(find_blockers=self.find_blockers,
3606 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3607 pkg=self.pkg, pkg_count=self.pkg_count,
3608 pkg_path=self._pkg_path, scheduler=self.scheduler,
3609 settings=settings, tree=self._tree, world_atom=self.world_atom)
3612 retval = merge.execute()
3614 settings.pop("PORTAGE_BINPKG_FILE", None)
3615 self._unlock_builddir()
3618 class BinpkgFetcher(SpawnProcess):
3620 __slots__ = ("pkg", "pretend",
3621 "locked", "pkg_path", "_lock_obj")
3623 def __init__(self, **kwargs):
3624 SpawnProcess.__init__(self, **kwargs)
3626 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3634 pretend = self.pretend
3635 bintree = pkg.root_config.trees["bintree"]
3636 settings = bintree.settings
3637 use_locks = "distlocks" in settings.features
3638 pkg_path = self.pkg_path
3641 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3644 exists = os.path.exists(pkg_path)
3645 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3646 if not (pretend or resume):
3647 # Remove existing file or broken symlink.
3653 # urljoin doesn't work correctly with
3654 # unrecognized protocols like sftp
3655 if bintree._remote_has_index:
3656 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3658 rel_uri = pkg.cpv + ".tbz2"
3659 uri = bintree._remote_base_uri.rstrip("/") + \
3660 "/" + rel_uri.lstrip("/")
3662 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3663 "/" + pkg.pf + ".tbz2"
3666 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3667 self.returncode = os.EX_OK
3671 protocol = urlparse.urlparse(uri)[0]
3672 fcmd_prefix = "FETCHCOMMAND"
3674 fcmd_prefix = "RESUMECOMMAND"
3675 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3677 fcmd = settings.get(fcmd_prefix)
3680 "DISTDIR" : os.path.dirname(pkg_path),
3682 "FILE" : os.path.basename(pkg_path)
3685 fetch_env = dict(settings.iteritems())
3686 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3687 for x in shlex.split(fcmd)]
3689 if self.fd_pipes is None:
3691 fd_pipes = self.fd_pipes
3693 # Redirect all output to stdout since some fetchers like
3694 # wget pollute stderr (if portage detects a problem then it
3695 # can send it's own message to stderr).
3696 fd_pipes.setdefault(0, sys.stdin.fileno())
3697 fd_pipes.setdefault(1, sys.stdout.fileno())
3698 fd_pipes.setdefault(2, sys.stdout.fileno())
3700 self.args = fetch_args
3701 self.env = fetch_env
3702 SpawnProcess._start(self)
3704 def _set_returncode(self, wait_retval):
3705 SpawnProcess._set_returncode(self, wait_retval)
3706 if self.returncode == os.EX_OK:
3707 # If possible, update the mtime to match the remote package if
3708 # the fetcher didn't already do it automatically.
3709 bintree = self.pkg.root_config.trees["bintree"]
3710 if bintree._remote_has_index:
3711 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3712 if remote_mtime is not None:
3714 remote_mtime = long(remote_mtime)
3719 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3723 if remote_mtime != local_mtime:
3725 os.utime(self.pkg_path,
3726 (remote_mtime, remote_mtime))
3735 This raises an AlreadyLocked exception if lock() is called
3736 while a lock is already held. In order to avoid this, call
3737 unlock() or check whether the "locked" attribute is True
3738 or False before calling lock().
3740 if self._lock_obj is not None:
3741 raise self.AlreadyLocked((self._lock_obj,))
3743 self._lock_obj = portage.locks.lockfile(
3744 self.pkg_path, wantnewlockfile=1)
3747 class AlreadyLocked(portage.exception.PortageException):
3751 if self._lock_obj is None:
3753 portage.locks.unlockfile(self._lock_obj)
3754 self._lock_obj = None
3757 class BinpkgVerifier(AsynchronousTask):
3758 __slots__ = ("logfile", "pkg",)
3762 Note: Unlike a normal AsynchronousTask.start() method,
3763 this one does all work is synchronously. The returncode
3764 attribute will be set before it returns.
3768 root_config = pkg.root_config
3769 bintree = root_config.trees["bintree"]
3771 stdout_orig = sys.stdout
3772 stderr_orig = sys.stderr
3774 if self.background and self.logfile is not None:
3775 log_file = open(self.logfile, 'a')
3777 if log_file is not None:
3778 sys.stdout = log_file
3779 sys.stderr = log_file
3781 bintree.digestCheck(pkg)
3782 except portage.exception.FileNotFound:
3783 writemsg("!!! Fetching Binary failed " + \
3784 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3786 except portage.exception.DigestException, e:
3787 writemsg("\n!!! Digest verification failed:\n",
3789 writemsg("!!! %s\n" % e.value[0],
3791 writemsg("!!! Reason: %s\n" % e.value[1],
3793 writemsg("!!! Got: %s\n" % e.value[2],
3795 writemsg("!!! Expected: %s\n" % e.value[3],
3798 if rval != os.EX_OK:
3799 pkg_path = bintree.getname(pkg.cpv)
3800 head, tail = os.path.split(pkg_path)
3801 temp_filename = portage._checksum_failure_temp_file(head, tail)
3802 writemsg("File renamed to '%s'\n" % (temp_filename,),
3805 sys.stdout = stdout_orig
3806 sys.stderr = stderr_orig
3807 if log_file is not None:
3810 self.returncode = rval
3813 class BinpkgPrefetcher(CompositeTask):
3815 __slots__ = ("pkg",) + \
3816 ("pkg_path", "_bintree",)
3819 self._bintree = self.pkg.root_config.trees["bintree"]
3820 fetcher = BinpkgFetcher(background=self.background,
3821 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3822 scheduler=self.scheduler)
3823 self.pkg_path = fetcher.pkg_path
3824 self._start_task(fetcher, self._fetcher_exit)
3826 def _fetcher_exit(self, fetcher):
3828 if self._default_exit(fetcher) != os.EX_OK:
3832 verifier = BinpkgVerifier(background=self.background,
3833 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3834 self._start_task(verifier, self._verifier_exit)
3836 def _verifier_exit(self, verifier):
3837 if self._default_exit(verifier) != os.EX_OK:
3841 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3843 self._current_task = None
3844 self.returncode = os.EX_OK
3847 class BinpkgExtractorAsync(SpawnProcess):
3849 __slots__ = ("image_dir", "pkg", "pkg_path")
3851 _shell_binary = portage.const.BASH_BINARY
3854 self.args = [self._shell_binary, "-c",
3855 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3856 (portage._shell_quote(self.pkg_path),
3857 portage._shell_quote(self.image_dir))]
3859 self.env = self.pkg.root_config.settings.environ()
3860 SpawnProcess._start(self)
3862 class MergeListItem(CompositeTask):
3865 TODO: For parallel scheduling, everything here needs asynchronous
3866 execution support (start, poll, and wait methods).
3869 __slots__ = ("args_set",
3870 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3871 "find_blockers", "logger", "mtimedb", "pkg",
3872 "pkg_count", "pkg_to_replace", "prefetcher",
3873 "settings", "statusMessage", "world_atom") + \
3879 build_opts = self.build_opts
3882 # uninstall, executed by self.merge()
3883 self.returncode = os.EX_OK
3887 args_set = self.args_set
3888 find_blockers = self.find_blockers
3889 logger = self.logger
3890 mtimedb = self.mtimedb
3891 pkg_count = self.pkg_count
3892 scheduler = self.scheduler
3893 settings = self.settings
3894 world_atom = self.world_atom
3895 ldpath_mtimes = mtimedb["ldpath"]
3897 action_desc = "Emerging"
3899 if pkg.type_name == "binary":
3900 action_desc += " binary"
3902 if build_opts.fetchonly:
3903 action_desc = "Fetching"
3905 msg = "%s (%s of %s) %s" % \
3907 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3908 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3909 colorize("GOOD", pkg.cpv))
3911 portdb = pkg.root_config.trees["porttree"].dbapi
3912 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3913 if portdir_repo_name:
3914 pkg_repo_name = pkg.metadata.get("repository")
3915 if pkg_repo_name != portdir_repo_name:
3916 if not pkg_repo_name:
3917 pkg_repo_name = "unknown repo"
3918 msg += " from %s" % pkg_repo_name
3921 msg += " %s %s" % (preposition, pkg.root)
3923 if not build_opts.pretend:
3924 self.statusMessage(msg)
3925 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3926 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3928 if pkg.type_name == "ebuild":
3930 build = EbuildBuild(args_set=args_set,
3931 background=self.background,
3932 config_pool=self.config_pool,
3933 find_blockers=find_blockers,
3934 ldpath_mtimes=ldpath_mtimes, logger=logger,
3935 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3936 prefetcher=self.prefetcher, scheduler=scheduler,
3937 settings=settings, world_atom=world_atom)
3939 self._install_task = build
3940 self._start_task(build, self._default_final_exit)
3943 elif pkg.type_name == "binary":
3945 binpkg = Binpkg(background=self.background,
3946 find_blockers=find_blockers,
3947 ldpath_mtimes=ldpath_mtimes, logger=logger,
3948 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3949 prefetcher=self.prefetcher, settings=settings,
3950 scheduler=scheduler, world_atom=world_atom)
3952 self._install_task = binpkg
3953 self._start_task(binpkg, self._default_final_exit)
3957 self._install_task.poll()
3958 return self.returncode
3961 self._install_task.wait()
3962 return self.returncode
3967 build_opts = self.build_opts
3968 find_blockers = self.find_blockers
3969 logger = self.logger
3970 mtimedb = self.mtimedb
3971 pkg_count = self.pkg_count
3972 prefetcher = self.prefetcher
3973 scheduler = self.scheduler
3974 settings = self.settings
3975 world_atom = self.world_atom
3976 ldpath_mtimes = mtimedb["ldpath"]
3979 if not (build_opts.buildpkgonly or \
3980 build_opts.fetchonly or build_opts.pretend):
3982 uninstall = PackageUninstall(background=self.background,
3983 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3984 pkg=pkg, scheduler=scheduler, settings=settings)
3987 retval = uninstall.wait()
3988 if retval != os.EX_OK:
3992 if build_opts.fetchonly or \
3993 build_opts.buildpkgonly:
3994 return self.returncode
3996 retval = self._install_task.install()
3999 class PackageMerge(AsynchronousTask):
4001 TODO: Implement asynchronous merge so that the scheduler can
4002 run while a merge is executing.
4005 __slots__ = ("merge",)
4009 pkg = self.merge.pkg
4010 pkg_count = self.merge.pkg_count
4013 action_desc = "Uninstalling"
4014 preposition = "from"
4016 action_desc = "Installing"
4019 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4022 msg += " %s %s" % (preposition, pkg.root)
4024 if not self.merge.build_opts.fetchonly and \
4025 not self.merge.build_opts.pretend and \
4026 not self.merge.build_opts.buildpkgonly:
4027 self.merge.statusMessage(msg)
4029 self.returncode = self.merge.merge()
4032 class DependencyArg(object):
4033 def __init__(self, arg=None, root_config=None):
4035 self.root_config = root_config
4038 return str(self.arg)
4040 class AtomArg(DependencyArg):
4041 def __init__(self, atom=None, **kwargs):
4042 DependencyArg.__init__(self, **kwargs)
4044 if not isinstance(self.atom, portage.dep.Atom):
4045 self.atom = portage.dep.Atom(self.atom)
4046 self.set = (self.atom, )
4048 class PackageArg(DependencyArg):
4049 def __init__(self, package=None, **kwargs):
4050 DependencyArg.__init__(self, **kwargs)
4051 self.package = package
4052 self.atom = portage.dep.Atom("=" + package.cpv)
4053 self.set = (self.atom, )
4055 class SetArg(DependencyArg):
4056 def __init__(self, set=None, **kwargs):
4057 DependencyArg.__init__(self, **kwargs)
4059 self.name = self.arg[len(SETPREFIX):]
4061 class Dependency(SlotObject):
4062 __slots__ = ("atom", "blocker", "depth",
4063 "parent", "onlydeps", "priority", "root")
4064 def __init__(self, **kwargs):
4065 SlotObject.__init__(self, **kwargs)
4066 if self.priority is None:
4067 self.priority = DepPriority()
4068 if self.depth is None:
4071 class BlockerCache(portage.cache.mappings.MutableMapping):
4072 """This caches blockers of installed packages so that dep_check does not
4073 have to be done for every single installed package on every invocation of
4074 emerge. The cache is invalidated whenever it is detected that something
4075 has changed that might alter the results of dep_check() calls:
4076 1) the set of installed packages (including COUNTER) has changed
4077 2) the old-style virtuals have changed
4080 # Number of uncached packages to trigger cache update, since
4081 # it's wasteful to update it for every vdb change.
4082 _cache_threshold = 5
4084 class BlockerData(object):
4086 __slots__ = ("__weakref__", "atoms", "counter")
4088 def __init__(self, counter, atoms):
4089 self.counter = counter
4092 def __init__(self, myroot, vardb):
4094 self._virtuals = vardb.settings.getvirtuals()
4095 self._cache_filename = os.path.join(myroot,
4096 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4097 self._cache_version = "1"
4098 self._cache_data = None
4099 self._modified = set()
4104 f = open(self._cache_filename, mode='rb')
4105 mypickle = pickle.Unpickler(f)
4107 mypickle.find_global = None
4108 except AttributeError:
4109 # TODO: If py3k, override Unpickler.find_class().
4111 self._cache_data = mypickle.load()
4114 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4115 if isinstance(e, pickle.UnpicklingError):
4116 writemsg("!!! Error loading '%s': %s\n" % \
4117 (self._cache_filename, str(e)), noiselevel=-1)
4120 cache_valid = self._cache_data and \
4121 isinstance(self._cache_data, dict) and \
4122 self._cache_data.get("version") == self._cache_version and \
4123 isinstance(self._cache_data.get("blockers"), dict)
4125 # Validate all the atoms and counters so that
4126 # corruption is detected as soon as possible.
4127 invalid_items = set()
4128 for k, v in self._cache_data["blockers"].iteritems():
4129 if not isinstance(k, basestring):
4130 invalid_items.add(k)
4133 if portage.catpkgsplit(k) is None:
4134 invalid_items.add(k)
4136 except portage.exception.InvalidData:
4137 invalid_items.add(k)
4139 if not isinstance(v, tuple) or \
4141 invalid_items.add(k)
4144 if not isinstance(counter, (int, long)):
4145 invalid_items.add(k)
4147 if not isinstance(atoms, (list, tuple)):
4148 invalid_items.add(k)
4150 invalid_atom = False
4152 if not isinstance(atom, basestring):
4155 if atom[:1] != "!" or \
4156 not portage.isvalidatom(
4157 atom, allow_blockers=True):
4161 invalid_items.add(k)
4164 for k in invalid_items:
4165 del self._cache_data["blockers"][k]
4166 if not self._cache_data["blockers"]:
4170 self._cache_data = {"version":self._cache_version}
4171 self._cache_data["blockers"] = {}
4172 self._cache_data["virtuals"] = self._virtuals
4173 self._modified.clear()
4176 """If the current user has permission and the internal blocker cache
4177 been updated, save it to disk and mark it unmodified. This is called
4178 by emerge after it has proccessed blockers for all installed packages.
4179 Currently, the cache is only written if the user has superuser
4180 privileges (since that's required to obtain a lock), but all users
4181 have read access and benefit from faster blocker lookups (as long as
4182 the entire cache is still valid). The cache is stored as a pickled
4183 dict object with the following format:
4187 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4188 "virtuals" : vardb.settings.getvirtuals()
4191 if len(self._modified) >= self._cache_threshold and \
4194 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4195 pickle.dump(self._cache_data, f, protocol=2)
4197 portage.util.apply_secpass_permissions(
4198 self._cache_filename, gid=portage.portage_gid, mode=0644)
4199 except (IOError, OSError), e:
4201 self._modified.clear()
4203 def __setitem__(self, cpv, blocker_data):
4205 Update the cache and mark it as modified for a future call to
4208 @param cpv: Package for which to cache blockers.
4210 @param blocker_data: An object with counter and atoms attributes.
4211 @type blocker_data: BlockerData
4213 self._cache_data["blockers"][cpv] = \
4214 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4215 self._modified.add(cpv)
4218 if self._cache_data is None:
4219 # triggered by python-trace
4221 return iter(self._cache_data["blockers"])
4223 def __delitem__(self, cpv):
4224 del self._cache_data["blockers"][cpv]
4226 def __getitem__(self, cpv):
4229 @returns: An object with counter and atoms attributes.
4231 return self.BlockerData(*self._cache_data["blockers"][cpv])
4233 class BlockerDB(object):
4235 def __init__(self, root_config):
4236 self._root_config = root_config
4237 self._vartree = root_config.trees["vartree"]
4238 self._portdb = root_config.trees["porttree"].dbapi
4240 self._dep_check_trees = None
4241 self._fake_vartree = None
4243 def _get_fake_vartree(self, acquire_lock=0):
4244 fake_vartree = self._fake_vartree
4245 if fake_vartree is None:
4246 fake_vartree = FakeVartree(self._root_config,
4247 acquire_lock=acquire_lock)
4248 self._fake_vartree = fake_vartree
4249 self._dep_check_trees = { self._vartree.root : {
4250 "porttree" : fake_vartree,
4251 "vartree" : fake_vartree,
4254 fake_vartree.sync(acquire_lock=acquire_lock)
4257 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4258 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4259 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4260 settings = self._vartree.settings
4261 stale_cache = set(blocker_cache)
4262 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4263 dep_check_trees = self._dep_check_trees
4264 vardb = fake_vartree.dbapi
4265 installed_pkgs = list(vardb)
4267 for inst_pkg in installed_pkgs:
4268 stale_cache.discard(inst_pkg.cpv)
4269 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4270 if cached_blockers is not None and \
4271 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4272 cached_blockers = None
4273 if cached_blockers is not None:
4274 blocker_atoms = cached_blockers.atoms
4276 # Use aux_get() to trigger FakeVartree global
4277 # updates on *DEPEND when appropriate.
4278 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4280 portage.dep._dep_check_strict = False
4281 success, atoms = portage.dep_check(depstr,
4282 vardb, settings, myuse=inst_pkg.use.enabled,
4283 trees=dep_check_trees, myroot=inst_pkg.root)
4285 portage.dep._dep_check_strict = True
4287 pkg_location = os.path.join(inst_pkg.root,
4288 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4289 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4290 (pkg_location, atoms), noiselevel=-1)
4293 blocker_atoms = [atom for atom in atoms \
4294 if atom.startswith("!")]
4295 blocker_atoms.sort()
4296 counter = long(inst_pkg.metadata["COUNTER"])
4297 blocker_cache[inst_pkg.cpv] = \
4298 blocker_cache.BlockerData(counter, blocker_atoms)
4299 for cpv in stale_cache:
4300 del blocker_cache[cpv]
4301 blocker_cache.flush()
4303 blocker_parents = digraph()
4305 for pkg in installed_pkgs:
4306 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4307 blocker_atom = blocker_atom.lstrip("!")
4308 blocker_atoms.append(blocker_atom)
4309 blocker_parents.add(blocker_atom, pkg)
4311 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4312 blocking_pkgs = set()
4313 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4314 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4316 # Check for blockers in the other direction.
4317 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4319 portage.dep._dep_check_strict = False
4320 success, atoms = portage.dep_check(depstr,
4321 vardb, settings, myuse=new_pkg.use.enabled,
4322 trees=dep_check_trees, myroot=new_pkg.root)
4324 portage.dep._dep_check_strict = True
4326 # We should never get this far with invalid deps.
4327 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4330 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4333 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4334 for inst_pkg in installed_pkgs:
4336 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4337 except (portage.exception.InvalidDependString, StopIteration):
4339 blocking_pkgs.add(inst_pkg)
4341 return blocking_pkgs
4343 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4345 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4346 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4347 p_type, p_root, p_key, p_status = parent_node
4349 if p_status == "nomerge":
4350 category, pf = portage.catsplit(p_key)
4351 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4352 msg.append("Portage is unable to process the dependencies of the ")
4353 msg.append("'%s' package. " % p_key)
4354 msg.append("In order to correct this problem, the package ")
4355 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4356 msg.append("As a temporary workaround, the --nodeps option can ")
4357 msg.append("be used to ignore all dependencies. For reference, ")
4358 msg.append("the problematic dependencies can be found in the ")
4359 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4361 msg.append("This package can not be installed. ")
4362 msg.append("Please notify the '%s' package maintainer " % p_key)
4363 msg.append("about this problem.")
4365 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4366 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4368 class PackageVirtualDbapi(portage.dbapi):
4370 A dbapi-like interface class that represents the state of the installed
4371 package database as new packages are installed, replacing any packages
4372 that previously existed in the same slot. The main difference between
4373 this class and fakedbapi is that this one uses Package instances
4374 internally (passed in via cpv_inject() and cpv_remove() calls).
4376 def __init__(self, settings):
4377 portage.dbapi.__init__(self)
4378 self.settings = settings
4379 self._match_cache = {}
4385 Remove all packages.
4389 self._cp_map.clear()
4390 self._cpv_map.clear()
4393 obj = PackageVirtualDbapi(self.settings)
4394 obj._match_cache = self._match_cache.copy()
4395 obj._cp_map = self._cp_map.copy()
4396 for k, v in obj._cp_map.iteritems():
4397 obj._cp_map[k] = v[:]
4398 obj._cpv_map = self._cpv_map.copy()
4402 return self._cpv_map.itervalues()
4404 def __contains__(self, item):
4405 existing = self._cpv_map.get(item.cpv)
4406 if existing is not None and \
4411 def get(self, item, default=None):
4412 cpv = getattr(item, "cpv", None)
4416 type_name, root, cpv, operation = item
4418 existing = self._cpv_map.get(cpv)
4419 if existing is not None and \
4424 def match_pkgs(self, atom):
4425 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4427 def _clear_cache(self):
4428 if self._categories is not None:
4429 self._categories = None
4430 if self._match_cache:
4431 self._match_cache = {}
4433 def match(self, origdep, use_cache=1):
4434 result = self._match_cache.get(origdep)
4435 if result is not None:
4437 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4438 self._match_cache[origdep] = result
4441 def cpv_exists(self, cpv):
4442 return cpv in self._cpv_map
4444 def cp_list(self, mycp, use_cache=1):
4445 cachelist = self._match_cache.get(mycp)
4446 # cp_list() doesn't expand old-style virtuals
4447 if cachelist and cachelist[0].startswith(mycp):
4449 cpv_list = self._cp_map.get(mycp)
4450 if cpv_list is None:
4453 cpv_list = [pkg.cpv for pkg in cpv_list]
4454 self._cpv_sort_ascending(cpv_list)
4455 if not (not cpv_list and mycp.startswith("virtual/")):
4456 self._match_cache[mycp] = cpv_list
4460 return list(self._cp_map)
4463 return list(self._cpv_map)
4465 def cpv_inject(self, pkg):
4466 cp_list = self._cp_map.get(pkg.cp)
4469 self._cp_map[pkg.cp] = cp_list
4470 e_pkg = self._cpv_map.get(pkg.cpv)
4471 if e_pkg is not None:
4474 self.cpv_remove(e_pkg)
4475 for e_pkg in cp_list:
4476 if e_pkg.slot_atom == pkg.slot_atom:
4479 self.cpv_remove(e_pkg)
4482 self._cpv_map[pkg.cpv] = pkg
4485 def cpv_remove(self, pkg):
4486 old_pkg = self._cpv_map.get(pkg.cpv)
4489 self._cp_map[pkg.cp].remove(pkg)
4490 del self._cpv_map[pkg.cpv]
4493 def aux_get(self, cpv, wants):
4494 metadata = self._cpv_map[cpv].metadata
4495 return [metadata.get(x, "") for x in wants]
4497 def aux_update(self, cpv, values):
4498 self._cpv_map[cpv].metadata.update(values)
4501 class depgraph(object):
4503 pkg_tree_map = RootConfig.pkg_tree_map
4505 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4507 def __init__(self, settings, trees, myopts, myparams, spinner):
4508 self.settings = settings
4509 self.target_root = settings["ROOT"]
4510 self.myopts = myopts
4511 self.myparams = myparams
4513 if settings.get("PORTAGE_DEBUG", "") == "1":
4515 self.spinner = spinner
4516 self._running_root = trees["/"]["root_config"]
4517 self._opts_no_restart = Scheduler._opts_no_restart
4518 self.pkgsettings = {}
4519 # Maps slot atom to package for each Package added to the graph.
4520 self._slot_pkg_map = {}
4521 # Maps nodes to the reasons they were selected for reinstallation.
4522 self._reinstall_nodes = {}
4525 self._trees_orig = trees
4527 # Contains a filtered view of preferred packages that are selected
4528 # from available repositories.
4529 self._filtered_trees = {}
4530 # Contains installed packages and new packages that have been added
4532 self._graph_trees = {}
4533 # All Package instances
4534 self._pkg_cache = {}
4535 for myroot in trees:
4536 self.trees[myroot] = {}
4537 # Create a RootConfig instance that references
4538 # the FakeVartree instead of the real one.
4539 self.roots[myroot] = RootConfig(
4540 trees[myroot]["vartree"].settings,
4542 trees[myroot]["root_config"].setconfig)
4543 for tree in ("porttree", "bintree"):
4544 self.trees[myroot][tree] = trees[myroot][tree]
4545 self.trees[myroot]["vartree"] = \
4546 FakeVartree(trees[myroot]["root_config"],
4547 pkg_cache=self._pkg_cache)
4548 self.pkgsettings[myroot] = portage.config(
4549 clone=self.trees[myroot]["vartree"].settings)
4550 self._slot_pkg_map[myroot] = {}
4551 vardb = self.trees[myroot]["vartree"].dbapi
4552 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4553 "--buildpkgonly" not in self.myopts
4554 # This fakedbapi instance will model the state that the vdb will
4555 # have after new packages have been installed.
4556 fakedb = PackageVirtualDbapi(vardb.settings)
4557 if preload_installed_pkgs:
4559 self.spinner.update()
4560 # This triggers metadata updates via FakeVartree.
4561 vardb.aux_get(pkg.cpv, [])
4562 fakedb.cpv_inject(pkg)
4564 # Now that the vardb state is cached in our FakeVartree,
4565 # we won't be needing the real vartree cache for awhile.
4566 # To make some room on the heap, clear the vardbapi
4568 trees[myroot]["vartree"].dbapi._clear_cache()
4571 self.mydbapi[myroot] = fakedb
4574 graph_tree.dbapi = fakedb
4575 self._graph_trees[myroot] = {}
4576 self._filtered_trees[myroot] = {}
4577 # Substitute the graph tree for the vartree in dep_check() since we
4578 # want atom selections to be consistent with package selections
4579 # have already been made.
4580 self._graph_trees[myroot]["porttree"] = graph_tree
4581 self._graph_trees[myroot]["vartree"] = graph_tree
4582 def filtered_tree():
4584 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4585 self._filtered_trees[myroot]["porttree"] = filtered_tree
4587 # Passing in graph_tree as the vartree here could lead to better
4588 # atom selections in some cases by causing atoms for packages that
4589 # have been added to the graph to be preferred over other choices.
4590 # However, it can trigger atom selections that result in
4591 # unresolvable direct circular dependencies. For example, this
4592 # happens with gwydion-dylan which depends on either itself or
4593 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4594 # gwydion-dylan-bin needs to be selected in order to avoid a
4595 # an unresolvable direct circular dependency.
4597 # To solve the problem described above, pass in "graph_db" so that
4598 # packages that have been added to the graph are distinguishable
4599 # from other available packages and installed packages. Also, pass
4600 # the parent package into self._select_atoms() calls so that
4601 # unresolvable direct circular dependencies can be detected and
4602 # avoided when possible.
4603 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4604 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4607 portdb = self.trees[myroot]["porttree"].dbapi
4608 bindb = self.trees[myroot]["bintree"].dbapi
4609 vardb = self.trees[myroot]["vartree"].dbapi
4610 # (db, pkg_type, built, installed, db_keys)
4611 if "--usepkgonly" not in self.myopts:
4612 db_keys = list(portdb._aux_cache_keys)
4613 dbs.append((portdb, "ebuild", False, False, db_keys))
4614 if "--usepkg" in self.myopts:
4615 db_keys = list(bindb._aux_cache_keys)
4616 dbs.append((bindb, "binary", True, False, db_keys))
4617 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4618 dbs.append((vardb, "installed", True, True, db_keys))
4619 self._filtered_trees[myroot]["dbs"] = dbs
4620 if "--usepkg" in self.myopts:
4621 self.trees[myroot]["bintree"].populate(
4622 "--getbinpkg" in self.myopts,
4623 "--getbinpkgonly" in self.myopts)
4626 self.digraph=portage.digraph()
4627 # contains all sets added to the graph
4629 # contains atoms given as arguments
4630 self._sets["args"] = InternalPackageSet()
4631 # contains all atoms from all sets added to the graph, including
4632 # atoms given as arguments
4633 self._set_atoms = InternalPackageSet()
4634 self._atom_arg_map = {}
4635 # contains all nodes pulled in by self._set_atoms
4636 self._set_nodes = set()
4637 # Contains only Blocker -> Uninstall edges
4638 self._blocker_uninstalls = digraph()
4639 # Contains only Package -> Blocker edges
4640 self._blocker_parents = digraph()
4641 # Contains only irrelevant Package -> Blocker edges
4642 self._irrelevant_blockers = digraph()
4643 # Contains only unsolvable Package -> Blocker edges
4644 self._unsolvable_blockers = digraph()
4645 # Contains all Blocker -> Blocked Package edges
4646 self._blocked_pkgs = digraph()
4647 # Contains world packages that have been protected from
4648 # uninstallation but may not have been added to the graph
4649 # if the graph is not complete yet.
4650 self._blocked_world_pkgs = {}
4651 self._slot_collision_info = {}
4652 # Slot collision nodes are not allowed to block other packages since
4653 # blocker validation is only able to account for one package per slot.
4654 self._slot_collision_nodes = set()
4655 self._parent_atoms = {}
4656 self._slot_conflict_parent_atoms = set()
4657 self._serialized_tasks_cache = None
4658 self._scheduler_graph = None
4659 self._displayed_list = None
4660 self._pprovided_args = []
4661 self._missing_args = []
4662 self._masked_installed = set()
4663 self._unsatisfied_deps_for_display = []
4664 self._unsatisfied_blockers_for_display = None
4665 self._circular_deps_for_display = None
4666 self._dep_stack = []
4667 self._unsatisfied_deps = []
4668 self._initially_unsatisfied_deps = []
4669 self._ignored_deps = []
4670 self._required_set_names = set(["system", "world"])
4671 self._select_atoms = self._select_atoms_highest_available
4672 self._select_package = self._select_pkg_highest_available
4673 self._highest_pkg_cache = {}
4675 def _show_slot_collision_notice(self):
4676 """Show an informational message advising the user to mask one of the
4677 the packages. In some cases it may be possible to resolve this
4678 automatically, but support for backtracking (removal nodes that have
4679 already been selected) will be required in order to handle all possible
4683 if not self._slot_collision_info:
4686 self._show_merge_list()
4689 msg.append("\n!!! Multiple package instances within a single " + \
4690 "package slot have been pulled\n")
4691 msg.append("!!! into the dependency graph, resulting" + \
4692 " in a slot conflict:\n\n")
4694 # Max number of parents shown, to avoid flooding the display.
4696 explanation_columns = 70
4698 for (slot_atom, root), slot_nodes \
4699 in self._slot_collision_info.iteritems():
4700 msg.append(str(slot_atom))
4703 for node in slot_nodes:
4705 msg.append(str(node))
4706 parent_atoms = self._parent_atoms.get(node)
4709 # Prefer conflict atoms over others.
4710 for parent_atom in parent_atoms:
4711 if len(pruned_list) >= max_parents:
4713 if parent_atom in self._slot_conflict_parent_atoms:
4714 pruned_list.add(parent_atom)
4716 # If this package was pulled in by conflict atoms then
4717 # show those alone since those are the most interesting.
4719 # When generating the pruned list, prefer instances
4720 # of DependencyArg over instances of Package.
4721 for parent_atom in parent_atoms:
4722 if len(pruned_list) >= max_parents:
4724 parent, atom = parent_atom
4725 if isinstance(parent, DependencyArg):
4726 pruned_list.add(parent_atom)
4727 # Prefer Packages instances that themselves have been
4728 # pulled into collision slots.
4729 for parent_atom in parent_atoms:
4730 if len(pruned_list) >= max_parents:
4732 parent, atom = parent_atom
4733 if isinstance(parent, Package) and \
4734 (parent.slot_atom, parent.root) \
4735 in self._slot_collision_info:
4736 pruned_list.add(parent_atom)
4737 for parent_atom in parent_atoms:
4738 if len(pruned_list) >= max_parents:
4740 pruned_list.add(parent_atom)
4741 omitted_parents = len(parent_atoms) - len(pruned_list)
4742 parent_atoms = pruned_list
4743 msg.append(" pulled in by\n")
4744 for parent_atom in parent_atoms:
4745 parent, atom = parent_atom
4746 msg.append(2*indent)
4747 if isinstance(parent,
4748 (PackageArg, AtomArg)):
4749 # For PackageArg and AtomArg types, it's
4750 # redundant to display the atom attribute.
4751 msg.append(str(parent))
4753 # Display the specific atom from SetArg or
4755 msg.append("%s required by %s" % (atom, parent))
4758 msg.append(2*indent)
4759 msg.append("(and %d more)\n" % omitted_parents)
4761 msg.append(" (no parents)\n")
4763 explanation = self._slot_conflict_explanation(slot_nodes)
4766 msg.append(indent + "Explanation:\n\n")
4767 for line in textwrap.wrap(explanation, explanation_columns):
4768 msg.append(2*indent + line + "\n")
4771 sys.stderr.write("".join(msg))
4774 explanations_for_all = explanations == len(self._slot_collision_info)
4776 if explanations_for_all or "--quiet" in self.myopts:
4780 msg.append("It may be possible to solve this problem ")
4781 msg.append("by using package.mask to prevent one of ")
4782 msg.append("those packages from being selected. ")
4783 msg.append("However, it is also possible that conflicting ")
4784 msg.append("dependencies exist such that they are impossible to ")
4785 msg.append("satisfy simultaneously. If such a conflict exists in ")
4786 msg.append("the dependencies of two different packages, then those ")
4787 msg.append("packages can not be installed simultaneously.")
4789 from formatter import AbstractFormatter, DumbWriter
4790 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4792 f.add_flowing_data(x)
4796 msg.append("For more information, see MASKED PACKAGES ")
4797 msg.append("section in the emerge man page or refer ")
4798 msg.append("to the Gentoo Handbook.")
4800 f.add_flowing_data(x)
4804 def _slot_conflict_explanation(self, slot_nodes):
4806 When a slot conflict occurs due to USE deps, there are a few
4807 different cases to consider:
4809 1) New USE are correctly set but --newuse wasn't requested so an
4810 installed package with incorrect USE happened to get pulled
4811 into graph before the new one.
4813 2) New USE are incorrectly set but an installed package has correct
4814 USE so it got pulled into the graph, and a new instance also got
4815 pulled in due to --newuse or an upgrade.
4817 3) Multiple USE deps exist that can't be satisfied simultaneously,
4818 and multiple package instances got pulled into the same slot to
4819 satisfy the conflicting deps.
4821 Currently, explanations and suggested courses of action are generated
4822 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4825 if len(slot_nodes) != 2:
4826 # Suggestions are only implemented for
4827 # conflicts between two packages.
4830 all_conflict_atoms = self._slot_conflict_parent_atoms
4832 matched_atoms = None
4833 unmatched_node = None
4834 for node in slot_nodes:
4835 parent_atoms = self._parent_atoms.get(node)
4836 if not parent_atoms:
4837 # Normally, there are always parent atoms. If there are
4838 # none then something unexpected is happening and there's
4839 # currently no suggestion for this case.
4841 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4842 for parent_atom in conflict_atoms:
4843 parent, atom = parent_atom
4845 # Suggestions are currently only implemented for cases
4846 # in which all conflict atoms have USE deps.
4849 if matched_node is not None:
4850 # If conflict atoms match multiple nodes
4851 # then there's no suggestion.
4854 matched_atoms = conflict_atoms
4856 if unmatched_node is not None:
4857 # Neither node is matched by conflict atoms, and
4858 # there is no suggestion for this case.
4860 unmatched_node = node
4862 if matched_node is None or unmatched_node is None:
4863 # This shouldn't happen.
4866 if unmatched_node.installed and not matched_node.installed and \
4867 unmatched_node.cpv == matched_node.cpv:
4868 # If the conflicting packages are the same version then
4869 # --newuse should be all that's needed. If they are different
4870 # versions then there's some other problem.
4871 return "New USE are correctly set, but --newuse wasn't" + \
4872 " requested, so an installed package with incorrect USE " + \
4873 "happened to get pulled into the dependency graph. " + \
4874 "In order to solve " + \
4875 "this, either specify the --newuse option or explicitly " + \
4876 " reinstall '%s'." % matched_node.slot_atom
4878 if matched_node.installed and not unmatched_node.installed:
4879 atoms = sorted(set(atom for parent, atom in matched_atoms))
4880 explanation = ("New USE for '%s' are incorrectly set. " + \
4881 "In order to solve this, adjust USE to satisfy '%s'") % \
4882 (matched_node.slot_atom, atoms[0])
4884 for atom in atoms[1:-1]:
4885 explanation += ", '%s'" % (atom,)
4888 explanation += " and '%s'" % (atoms[-1],)
4894 def _process_slot_conflicts(self):
4896 Process slot conflict data to identify specific atoms which
4897 lead to conflict. These atoms only match a subset of the
4898 packages that have been pulled into a given slot.
4900 for (slot_atom, root), slot_nodes \
4901 in self._slot_collision_info.iteritems():
4903 all_parent_atoms = set()
4904 for pkg in slot_nodes:
4905 parent_atoms = self._parent_atoms.get(pkg)
4906 if not parent_atoms:
4908 all_parent_atoms.update(parent_atoms)
4910 for pkg in slot_nodes:
4911 parent_atoms = self._parent_atoms.get(pkg)
4912 if parent_atoms is None:
4913 parent_atoms = set()
4914 self._parent_atoms[pkg] = parent_atoms
4915 for parent_atom in all_parent_atoms:
4916 if parent_atom in parent_atoms:
4918 # Use package set for matching since it will match via
4919 # PROVIDE when necessary, while match_from_list does not.
4920 parent, atom = parent_atom
4921 atom_set = InternalPackageSet(
4922 initial_atoms=(atom,))
4923 if atom_set.findAtomForPackage(pkg):
4924 parent_atoms.add(parent_atom)
4926 self._slot_conflict_parent_atoms.add(parent_atom)
4928 def _reinstall_for_flags(self, forced_flags,
4929 orig_use, orig_iuse, cur_use, cur_iuse):
4930 """Return a set of flags that trigger reinstallation, or None if there
4931 are no such flags."""
4932 if "--newuse" in self.myopts:
4933 flags = set(orig_iuse.symmetric_difference(
4934 cur_iuse).difference(forced_flags))
4935 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4936 cur_iuse.intersection(cur_use)))
4939 elif "changed-use" == self.myopts.get("--reinstall"):
4940 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4941 cur_iuse.intersection(cur_use))
4946 def _create_graph(self, allow_unsatisfied=False):
4947 dep_stack = self._dep_stack
4949 self.spinner.update()
4950 dep = dep_stack.pop()
4951 if isinstance(dep, Package):
4952 if not self._add_pkg_deps(dep,
4953 allow_unsatisfied=allow_unsatisfied):
4956 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4960 def _add_dep(self, dep, allow_unsatisfied=False):
4961 debug = "--debug" in self.myopts
4962 buildpkgonly = "--buildpkgonly" in self.myopts
4963 nodeps = "--nodeps" in self.myopts
4964 empty = "empty" in self.myparams
4965 deep = "deep" in self.myparams
4966 update = "--update" in self.myopts and dep.depth <= 1
4968 if not buildpkgonly and \
4970 dep.parent not in self._slot_collision_nodes:
4971 if dep.parent.onlydeps:
4972 # It's safe to ignore blockers if the
4973 # parent is an --onlydeps node.
4975 # The blocker applies to the root where
4976 # the parent is or will be installed.
4977 blocker = Blocker(atom=dep.atom,
4978 eapi=dep.parent.metadata["EAPI"],
4979 root=dep.parent.root)
4980 self._blocker_parents.add(blocker, dep.parent)
4982 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4983 onlydeps=dep.onlydeps)
4985 if dep.priority.optional:
4986 # This could be an unecessary build-time dep
4987 # pulled in by --with-bdeps=y.
4989 if allow_unsatisfied:
4990 self._unsatisfied_deps.append(dep)
4992 self._unsatisfied_deps_for_display.append(
4993 ((dep.root, dep.atom), {"myparent":dep.parent}))
4995 # In some cases, dep_check will return deps that shouldn't
4996 # be proccessed any further, so they are identified and
4997 # discarded here. Try to discard as few as possible since
4998 # discarded dependencies reduce the amount of information
4999 # available for optimization of merge order.
5000 if dep.priority.satisfied and \
5001 not dep_pkg.installed and \
5002 not (existing_node or empty or deep or update):
5004 if dep.root == self.target_root:
5006 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5007 except StopIteration:
5009 except portage.exception.InvalidDependString:
5010 if not dep_pkg.installed:
5011 # This shouldn't happen since the package
5012 # should have been masked.
5015 self._ignored_deps.append(dep)
5018 if not self._add_pkg(dep_pkg, dep):
5022 def _add_pkg(self, pkg, dep):
5029 myparent = dep.parent
5030 priority = dep.priority
5032 if priority is None:
5033 priority = DepPriority()
5035 Fills the digraph with nodes comprised of packages to merge.
5036 mybigkey is the package spec of the package to merge.
5037 myparent is the package depending on mybigkey ( or None )
5038 addme = Should we add this package to the digraph or are we just looking at it's deps?
5039 Think --onlydeps, we need to ignore packages in that case.
5042 #IUSE-aware emerge -> USE DEP aware depgraph
5043 #"no downgrade" emerge
5045 # Ensure that the dependencies of the same package
5046 # are never processed more than once.
5047 previously_added = pkg in self.digraph
5049 # select the correct /var database that we'll be checking against
5050 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5051 pkgsettings = self.pkgsettings[pkg.root]
5056 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5057 except portage.exception.InvalidDependString, e:
5058 if not pkg.installed:
5059 show_invalid_depstring_notice(
5060 pkg, pkg.metadata["PROVIDE"], str(e))
5064 if not pkg.onlydeps:
5065 if not pkg.installed and \
5066 "empty" not in self.myparams and \
5067 vardbapi.match(pkg.slot_atom):
5068 # Increase the priority of dependencies on packages that
5069 # are being rebuilt. This optimizes merge order so that
5070 # dependencies are rebuilt/updated as soon as possible,
5071 # which is needed especially when emerge is called by
5072 # revdep-rebuild since dependencies may be affected by ABI
5073 # breakage that has rendered them useless. Don't adjust
5074 # priority here when in "empty" mode since all packages
5075 # are being merged in that case.
5076 priority.rebuild = True
5078 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5079 slot_collision = False
5081 existing_node_matches = pkg.cpv == existing_node.cpv
5082 if existing_node_matches and \
5083 pkg != existing_node and \
5084 dep.atom is not None:
5085 # Use package set for matching since it will match via
5086 # PROVIDE when necessary, while match_from_list does not.
5087 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5088 if not atom_set.findAtomForPackage(existing_node):
5089 existing_node_matches = False
5090 if existing_node_matches:
5091 # The existing node can be reused.
5093 for parent_atom in arg_atoms:
5094 parent, atom = parent_atom
5095 self.digraph.add(existing_node, parent,
5097 self._add_parent_atom(existing_node, parent_atom)
5098 # If a direct circular dependency is not an unsatisfied
5099 # buildtime dependency then drop it here since otherwise
5100 # it can skew the merge order calculation in an unwanted
5102 if existing_node != myparent or \
5103 (priority.buildtime and not priority.satisfied):
5104 self.digraph.addnode(existing_node, myparent,
5106 if dep.atom is not None and dep.parent is not None:
5107 self._add_parent_atom(existing_node,
5108 (dep.parent, dep.atom))
5112 # A slot collision has occurred. Sometimes this coincides
5113 # with unresolvable blockers, so the slot collision will be
5114 # shown later if there are no unresolvable blockers.
5115 self._add_slot_conflict(pkg)
5116 slot_collision = True
5119 # Now add this node to the graph so that self.display()
5120 # can show use flags and --tree portage.output. This node is
5121 # only being partially added to the graph. It must not be
5122 # allowed to interfere with the other nodes that have been
5123 # added. Do not overwrite data for existing nodes in
5124 # self.mydbapi since that data will be used for blocker
5126 # Even though the graph is now invalid, continue to process
5127 # dependencies so that things like --fetchonly can still
5128 # function despite collisions.
5130 elif not previously_added:
5131 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5132 self.mydbapi[pkg.root].cpv_inject(pkg)
5133 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5135 if not pkg.installed:
5136 # Allow this package to satisfy old-style virtuals in case it
5137 # doesn't already. Any pre-existing providers will be preferred
5140 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5141 # For consistency, also update the global virtuals.
5142 settings = self.roots[pkg.root].settings
5144 settings.setinst(pkg.cpv, pkg.metadata)
5146 except portage.exception.InvalidDependString, e:
5147 show_invalid_depstring_notice(
5148 pkg, pkg.metadata["PROVIDE"], str(e))
5153 self._set_nodes.add(pkg)
5155 # Do this even when addme is False (--onlydeps) so that the
5156 # parent/child relationship is always known in case
5157 # self._show_slot_collision_notice() needs to be called later.
5158 self.digraph.add(pkg, myparent, priority=priority)
5159 if dep.atom is not None and dep.parent is not None:
5160 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5163 for parent_atom in arg_atoms:
5164 parent, atom = parent_atom
5165 self.digraph.add(pkg, parent, priority=priority)
5166 self._add_parent_atom(pkg, parent_atom)
5168 """ This section determines whether we go deeper into dependencies or not.
5169 We want to go deeper on a few occasions:
5170 Installing package A, we need to make sure package A's deps are met.
5171 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5172 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5174 dep_stack = self._dep_stack
5175 if "recurse" not in self.myparams:
5177 elif pkg.installed and \
5178 "deep" not in self.myparams:
5179 dep_stack = self._ignored_deps
5181 self.spinner.update()
5186 if not previously_added:
5187 dep_stack.append(pkg)
5190 def _add_parent_atom(self, pkg, parent_atom):
5191 parent_atoms = self._parent_atoms.get(pkg)
5192 if parent_atoms is None:
5193 parent_atoms = set()
5194 self._parent_atoms[pkg] = parent_atoms
5195 parent_atoms.add(parent_atom)
5197 def _add_slot_conflict(self, pkg):
5198 self._slot_collision_nodes.add(pkg)
5199 slot_key = (pkg.slot_atom, pkg.root)
5200 slot_nodes = self._slot_collision_info.get(slot_key)
5201 if slot_nodes is None:
5203 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5204 self._slot_collision_info[slot_key] = slot_nodes
5207 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5209 mytype = pkg.type_name
5212 metadata = pkg.metadata
5213 myuse = pkg.use.enabled
5215 depth = pkg.depth + 1
5216 removal_action = "remove" in self.myparams
5219 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5221 edepend[k] = metadata[k]
5223 if not pkg.built and \
5224 "--buildpkgonly" in self.myopts and \
5225 "deep" not in self.myparams and \
5226 "empty" not in self.myparams:
5227 edepend["RDEPEND"] = ""
5228 edepend["PDEPEND"] = ""
5229 bdeps_optional = False
5231 if pkg.built and not removal_action:
5232 if self.myopts.get("--with-bdeps", "n") == "y":
5233 # Pull in build time deps as requested, but marked them as
5234 # "optional" since they are not strictly required. This allows
5235 # more freedom in the merge order calculation for solving
5236 # circular dependencies. Don't convert to PDEPEND since that
5237 # could make --with-bdeps=y less effective if it is used to
5238 # adjust merge order to prevent built_with_use() calls from
5240 bdeps_optional = True
5242 # built packages do not have build time dependencies.
5243 edepend["DEPEND"] = ""
5245 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5246 edepend["DEPEND"] = ""
5249 ("/", edepend["DEPEND"],
5250 self._priority(buildtime=(not bdeps_optional),
5251 optional=bdeps_optional)),
5252 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5253 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5256 debug = "--debug" in self.myopts
5257 strict = mytype != "installed"
5259 for dep_root, dep_string, dep_priority in deps:
5264 print "Parent: ", jbigkey
5265 print "Depstring:", dep_string
5266 print "Priority:", dep_priority
5267 vardb = self.roots[dep_root].trees["vartree"].dbapi
5269 selected_atoms = self._select_atoms(dep_root,
5270 dep_string, myuse=myuse, parent=pkg, strict=strict,
5271 priority=dep_priority)
5272 except portage.exception.InvalidDependString, e:
5273 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5276 print "Candidates:", selected_atoms
5278 for atom in selected_atoms:
5281 atom = portage.dep.Atom(atom)
5283 mypriority = dep_priority.copy()
5284 if not atom.blocker and vardb.match(atom):
5285 mypriority.satisfied = True
5287 if not self._add_dep(Dependency(atom=atom,
5288 blocker=atom.blocker, depth=depth, parent=pkg,
5289 priority=mypriority, root=dep_root),
5290 allow_unsatisfied=allow_unsatisfied):
5293 except portage.exception.InvalidAtom, e:
5294 show_invalid_depstring_notice(
5295 pkg, dep_string, str(e))
5297 if not pkg.installed:
5301 print "Exiting...", jbigkey
5302 except portage.exception.AmbiguousPackageName, e:
5304 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5305 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5307 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5308 portage.writemsg("\n", noiselevel=-1)
5309 if mytype == "binary":
5311 "!!! This binary package cannot be installed: '%s'\n" % \
5312 mykey, noiselevel=-1)
5313 elif mytype == "ebuild":
5314 portdb = self.roots[myroot].trees["porttree"].dbapi
5315 myebuild, mylocation = portdb.findname2(mykey)
5316 portage.writemsg("!!! This ebuild cannot be installed: " + \
5317 "'%s'\n" % myebuild, noiselevel=-1)
5318 portage.writemsg("!!! Please notify the package maintainer " + \
5319 "that atoms must be fully-qualified.\n", noiselevel=-1)
5323 def _priority(self, **kwargs):
5324 if "remove" in self.myparams:
5325 priority_constructor = UnmergeDepPriority
5327 priority_constructor = DepPriority
5328 return priority_constructor(**kwargs)
5330 def _dep_expand(self, root_config, atom_without_category):
5332 @param root_config: a root config instance
5333 @type root_config: RootConfig
5334 @param atom_without_category: an atom without a category component
5335 @type atom_without_category: String
5337 @returns: a list of atoms containing categories (possibly empty)
5339 null_cp = portage.dep_getkey(insert_category_into_atom(
5340 atom_without_category, "null"))
5341 cat, atom_pn = portage.catsplit(null_cp)
5343 dbs = self._filtered_trees[root_config.root]["dbs"]
5345 for db, pkg_type, built, installed, db_keys in dbs:
5346 for cat in db.categories:
5347 if db.cp_list("%s/%s" % (cat, atom_pn)):
5351 for cat in categories:
5352 deps.append(insert_category_into_atom(
5353 atom_without_category, cat))
5356 def _have_new_virt(self, root, atom_cp):
5358 for db, pkg_type, built, installed, db_keys in \
5359 self._filtered_trees[root]["dbs"]:
5360 if db.cp_list(atom_cp):
5365 def _iter_atoms_for_pkg(self, pkg):
5366 # TODO: add multiple $ROOT support
5367 if pkg.root != self.target_root:
5369 atom_arg_map = self._atom_arg_map
5370 root_config = self.roots[pkg.root]
5371 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5372 atom_cp = portage.dep_getkey(atom)
5373 if atom_cp != pkg.cp and \
5374 self._have_new_virt(pkg.root, atom_cp):
5376 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5377 visible_pkgs.reverse() # descending order
5379 for visible_pkg in visible_pkgs:
5380 if visible_pkg.cp != atom_cp:
5382 if pkg >= visible_pkg:
5383 # This is descending order, and we're not
5384 # interested in any versions <= pkg given.
5386 if pkg.slot_atom != visible_pkg.slot_atom:
5387 higher_slot = visible_pkg
5389 if higher_slot is not None:
5391 for arg in atom_arg_map[(atom, pkg.root)]:
5392 if isinstance(arg, PackageArg) and \
5397 def select_files(self, myfiles):
5398 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5399 appropriate depgraph and return a favorite list."""
5400 debug = "--debug" in self.myopts
5401 root_config = self.roots[self.target_root]
5402 sets = root_config.sets
5403 getSetAtoms = root_config.setconfig.getSetAtoms
5405 myroot = self.target_root
5406 dbs = self._filtered_trees[myroot]["dbs"]
5407 vardb = self.trees[myroot]["vartree"].dbapi
5408 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5409 portdb = self.trees[myroot]["porttree"].dbapi
5410 bindb = self.trees[myroot]["bintree"].dbapi
5411 pkgsettings = self.pkgsettings[myroot]
5413 onlydeps = "--onlydeps" in self.myopts
5416 ext = os.path.splitext(x)[1]
5418 if not os.path.exists(x):
5420 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5421 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5422 elif os.path.exists(
5423 os.path.join(pkgsettings["PKGDIR"], x)):
5424 x = os.path.join(pkgsettings["PKGDIR"], x)
5426 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5427 print "!!! Please ensure the tbz2 exists as specified.\n"
5428 return 0, myfavorites
5429 mytbz2=portage.xpak.tbz2(x)
5430 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5431 if os.path.realpath(x) != \
5432 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5433 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5434 return 0, myfavorites
5435 db_keys = list(bindb._aux_cache_keys)
5436 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5437 pkg = Package(type_name="binary", root_config=root_config,
5438 cpv=mykey, built=True, metadata=metadata,
5440 self._pkg_cache[pkg] = pkg
5441 args.append(PackageArg(arg=x, package=pkg,
5442 root_config=root_config))
5443 elif ext==".ebuild":
5444 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5445 pkgdir = os.path.dirname(ebuild_path)
5446 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5447 cp = pkgdir[len(tree_root)+1:]
5448 e = portage.exception.PackageNotFound(
5449 ("%s is not in a valid portage tree " + \
5450 "hierarchy or does not exist") % x)
5451 if not portage.isvalidatom(cp):
5453 cat = portage.catsplit(cp)[0]
5454 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5455 if not portage.isvalidatom("="+mykey):
5457 ebuild_path = portdb.findname(mykey)
5459 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5460 cp, os.path.basename(ebuild_path)):
5461 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5462 return 0, myfavorites
5463 if mykey not in portdb.xmatch(
5464 "match-visible", portage.dep_getkey(mykey)):
5465 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5466 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5467 print colorize("BAD", "*** page for details.")
5468 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5471 raise portage.exception.PackageNotFound(
5472 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5473 db_keys = list(portdb._aux_cache_keys)
5474 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5475 pkg = Package(type_name="ebuild", root_config=root_config,
5476 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5477 pkgsettings.setcpv(pkg)
5478 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5479 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5480 self._pkg_cache[pkg] = pkg
5481 args.append(PackageArg(arg=x, package=pkg,
5482 root_config=root_config))
5483 elif x.startswith(os.path.sep):
5484 if not x.startswith(myroot):
5485 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5486 " $ROOT.\n") % x, noiselevel=-1)
5488 # Queue these up since it's most efficient to handle
5489 # multiple files in a single iter_owners() call.
5490 lookup_owners.append(x)
5492 if x in ("system", "world"):
5494 if x.startswith(SETPREFIX):
5495 s = x[len(SETPREFIX):]
5497 raise portage.exception.PackageSetNotFound(s)
5500 # Recursively expand sets so that containment tests in
5501 # self._get_parent_sets() properly match atoms in nested
5502 # sets (like if world contains system).
5503 expanded_set = InternalPackageSet(
5504 initial_atoms=getSetAtoms(s))
5505 self._sets[s] = expanded_set
5506 args.append(SetArg(arg=x, set=expanded_set,
5507 root_config=root_config))
5509 if not is_valid_package_atom(x):
5510 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5512 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5513 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5515 # Don't expand categories or old-style virtuals here unless
5516 # necessary. Expansion of old-style virtuals here causes at
5517 # least the following problems:
5518 # 1) It's more difficult to determine which set(s) an atom
5519 # came from, if any.
5520 # 2) It takes away freedom from the resolver to choose other
5521 # possible expansions when necessary.
5523 args.append(AtomArg(arg=x, atom=x,
5524 root_config=root_config))
5526 expanded_atoms = self._dep_expand(root_config, x)
5527 installed_cp_set = set()
5528 for atom in expanded_atoms:
5529 atom_cp = portage.dep_getkey(atom)
5530 if vardb.cp_list(atom_cp):
5531 installed_cp_set.add(atom_cp)
5532 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5533 installed_cp = iter(installed_cp_set).next()
5534 expanded_atoms = [atom for atom in expanded_atoms \
5535 if portage.dep_getkey(atom) == installed_cp]
5537 if len(expanded_atoms) > 1:
5540 ambiguous_package_name(x, expanded_atoms, root_config,
5541 self.spinner, self.myopts)
5542 return False, myfavorites
5544 atom = expanded_atoms[0]
5546 null_atom = insert_category_into_atom(x, "null")
5547 null_cp = portage.dep_getkey(null_atom)
5548 cat, atom_pn = portage.catsplit(null_cp)
5549 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5551 # Allow the depgraph to choose which virtual.
5552 atom = insert_category_into_atom(x, "virtual")
5554 atom = insert_category_into_atom(x, "null")
5556 args.append(AtomArg(arg=x, atom=atom,
5557 root_config=root_config))
5561 search_for_multiple = False
5562 if len(lookup_owners) > 1:
5563 search_for_multiple = True
5565 for x in lookup_owners:
5566 if not search_for_multiple and os.path.isdir(x):
5567 search_for_multiple = True
5568 relative_paths.append(x[len(myroot):])
5571 for pkg, relative_path in \
5572 real_vardb._owners.iter_owners(relative_paths):
5573 owners.add(pkg.mycpv)
5574 if not search_for_multiple:
5578 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5579 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5583 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5585 # portage now masks packages with missing slot, but it's
5586 # possible that one was installed by an older version
5587 atom = portage.cpv_getkey(cpv)
5589 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5590 args.append(AtomArg(arg=atom, atom=atom,
5591 root_config=root_config))
5593 if "--update" in self.myopts:
5594 # In some cases, the greedy slots behavior can pull in a slot that
5595 # the user would want to uninstall due to it being blocked by a
5596 # newer version in a different slot. Therefore, it's necessary to
5597 # detect and discard any that should be uninstalled. Each time
5598 # that arguments are updated, package selections are repeated in
5599 # order to ensure consistency with the current arguments:
5601 # 1) Initialize args
5602 # 2) Select packages and generate initial greedy atoms
5603 # 3) Update args with greedy atoms
5604 # 4) Select packages and generate greedy atoms again, while
5605 # accounting for any blockers between selected packages
5606 # 5) Update args with revised greedy atoms
5608 self._set_args(args)
5611 greedy_args.append(arg)
5612 if not isinstance(arg, AtomArg):
5614 for atom in self._greedy_slots(arg.root_config, arg.atom):
5616 AtomArg(arg=arg.arg, atom=atom,
5617 root_config=arg.root_config))
5619 self._set_args(greedy_args)
5622 # Revise greedy atoms, accounting for any blockers
5623 # between selected packages.
5624 revised_greedy_args = []
5626 revised_greedy_args.append(arg)
5627 if not isinstance(arg, AtomArg):
5629 for atom in self._greedy_slots(arg.root_config, arg.atom,
5630 blocker_lookahead=True):
5631 revised_greedy_args.append(
5632 AtomArg(arg=arg.arg, atom=atom,
5633 root_config=arg.root_config))
5634 args = revised_greedy_args
5635 del revised_greedy_args
5637 self._set_args(args)
5639 myfavorites = set(myfavorites)
5641 if isinstance(arg, (AtomArg, PackageArg)):
5642 myfavorites.add(arg.atom)
5643 elif isinstance(arg, SetArg):
5644 myfavorites.add(arg.arg)
5645 myfavorites = list(myfavorites)
5647 pprovideddict = pkgsettings.pprovideddict
5649 portage.writemsg("\n", noiselevel=-1)
5650 # Order needs to be preserved since a feature of --nodeps
5651 # is to allow the user to force a specific merge order.
5655 for atom in arg.set:
5656 self.spinner.update()
5657 dep = Dependency(atom=atom, onlydeps=onlydeps,
5658 root=myroot, parent=arg)
5659 atom_cp = portage.dep_getkey(atom)
5661 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5662 if pprovided and portage.match_from_list(atom, pprovided):
5663 # A provided package has been specified on the command line.
5664 self._pprovided_args.append((arg, atom))
5666 if isinstance(arg, PackageArg):
5667 if not self._add_pkg(arg.package, dep) or \
5668 not self._create_graph():
5669 sys.stderr.write(("\n\n!!! Problem resolving " + \
5670 "dependencies for %s\n") % arg.arg)
5671 return 0, myfavorites
5674 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5675 (arg, atom), noiselevel=-1)
5676 pkg, existing_node = self._select_package(
5677 myroot, atom, onlydeps=onlydeps)
5679 if not (isinstance(arg, SetArg) and \
5680 arg.name in ("system", "world")):
5681 self._unsatisfied_deps_for_display.append(
5682 ((myroot, atom), {}))
5683 return 0, myfavorites
5684 self._missing_args.append((arg, atom))
5686 if atom_cp != pkg.cp:
5687 # For old-style virtuals, we need to repeat the
5688 # package.provided check against the selected package.
5689 expanded_atom = atom.replace(atom_cp, pkg.cp)
5690 pprovided = pprovideddict.get(pkg.cp)
5692 portage.match_from_list(expanded_atom, pprovided):
5693 # A provided package has been
5694 # specified on the command line.
5695 self._pprovided_args.append((arg, atom))
5697 if pkg.installed and "selective" not in self.myparams:
5698 self._unsatisfied_deps_for_display.append(
5699 ((myroot, atom), {}))
5700 # Previous behavior was to bail out in this case, but
5701 # since the dep is satisfied by the installed package,
5702 # it's more friendly to continue building the graph
5703 # and just show a warning message. Therefore, only bail
5704 # out here if the atom is not from either the system or
5706 if not (isinstance(arg, SetArg) and \
5707 arg.name in ("system", "world")):
5708 return 0, myfavorites
5710 # Add the selected package to the graph as soon as possible
5711 # so that later dep_check() calls can use it as feedback
5712 # for making more consistent atom selections.
5713 if not self._add_pkg(pkg, dep):
5714 if isinstance(arg, SetArg):
5715 sys.stderr.write(("\n\n!!! Problem resolving " + \
5716 "dependencies for %s from %s\n") % \
5719 sys.stderr.write(("\n\n!!! Problem resolving " + \
5720 "dependencies for %s\n") % atom)
5721 return 0, myfavorites
5723 except portage.exception.MissingSignature, e:
5724 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5725 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5726 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5727 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5728 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5729 return 0, myfavorites
5730 except portage.exception.InvalidSignature, e:
5731 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5732 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5733 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5734 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5735 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5736 return 0, myfavorites
5737 except SystemExit, e:
5738 raise # Needed else can't exit
5739 except Exception, e:
5740 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5741 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5744 # Now that the root packages have been added to the graph,
5745 # process the dependencies.
5746 if not self._create_graph():
5747 return 0, myfavorites
5750 if "--usepkgonly" in self.myopts:
5751 for xs in self.digraph.all_nodes():
5752 if not isinstance(xs, Package):
5754 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5758 print "Missing binary for:",xs[2]
5762 except self._unknown_internal_error:
5763 return False, myfavorites
5765 # We're true here unless we are missing binaries.
5766 return (not missing,myfavorites)
5768 def _set_args(self, args):
5770 Create the "args" package set from atoms and packages given as
5771 arguments. This method can be called multiple times if necessary.
5772 The package selection cache is automatically invalidated, since
5773 arguments influence package selections.
5775 args_set = self._sets["args"]
5778 if not isinstance(arg, (AtomArg, PackageArg)):
5781 if atom in args_set:
5785 self._set_atoms.clear()
5786 self._set_atoms.update(chain(*self._sets.itervalues()))
5787 atom_arg_map = self._atom_arg_map
5788 atom_arg_map.clear()
5790 for atom in arg.set:
5791 atom_key = (atom, arg.root_config.root)
5792 refs = atom_arg_map.get(atom_key)
5795 atom_arg_map[atom_key] = refs
5799 # Invalidate the package selection cache, since
5800 # arguments influence package selections.
5801 self._highest_pkg_cache.clear()
5802 for trees in self._filtered_trees.itervalues():
5803 trees["porttree"].dbapi._clear_cache()
5805 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5807 Return a list of slot atoms corresponding to installed slots that
5808 differ from the slot of the highest visible match. When
5809 blocker_lookahead is True, slot atoms that would trigger a blocker
5810 conflict are automatically discarded, potentially allowing automatic
5811 uninstallation of older slots when appropriate.
5813 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5814 if highest_pkg is None:
5816 vardb = root_config.trees["vartree"].dbapi
5818 for cpv in vardb.match(atom):
5819 # don't mix new virtuals with old virtuals
5820 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5821 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5823 slots.add(highest_pkg.metadata["SLOT"])
5827 slots.remove(highest_pkg.metadata["SLOT"])
5830 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5831 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5832 if pkg is not None and \
5833 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5834 greedy_pkgs.append(pkg)
5837 if not blocker_lookahead:
5838 return [pkg.slot_atom for pkg in greedy_pkgs]
5841 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5842 for pkg in greedy_pkgs + [highest_pkg]:
5843 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5845 atoms = self._select_atoms(
5846 pkg.root, dep_str, pkg.use.enabled,
5847 parent=pkg, strict=True)
5848 except portage.exception.InvalidDependString:
5850 blocker_atoms = (x for x in atoms if x.blocker)
5851 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5853 if highest_pkg not in blockers:
5856 # filter packages with invalid deps
5857 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5859 # filter packages that conflict with highest_pkg
5860 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5861 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5862 blockers[pkg].findAtomForPackage(highest_pkg))]
5867 # If two packages conflict, discard the lower version.
5868 discard_pkgs = set()
5869 greedy_pkgs.sort(reverse=True)
5870 for i in xrange(len(greedy_pkgs) - 1):
5871 pkg1 = greedy_pkgs[i]
5872 if pkg1 in discard_pkgs:
5874 for j in xrange(i + 1, len(greedy_pkgs)):
5875 pkg2 = greedy_pkgs[j]
5876 if pkg2 in discard_pkgs:
5878 if blockers[pkg1].findAtomForPackage(pkg2) or \
5879 blockers[pkg2].findAtomForPackage(pkg1):
5881 discard_pkgs.add(pkg2)
5883 return [pkg.slot_atom for pkg in greedy_pkgs \
5884 if pkg not in discard_pkgs]
5886 def _select_atoms_from_graph(self, *pargs, **kwargs):
5888 Prefer atoms matching packages that have already been
5889 added to the graph or those that are installed and have
5890 not been scheduled for replacement.
5892 kwargs["trees"] = self._graph_trees
5893 return self._select_atoms_highest_available(*pargs, **kwargs)
5895 def _select_atoms_highest_available(self, root, depstring,
5896 myuse=None, parent=None, strict=True, trees=None, priority=None):
5897 """This will raise InvalidDependString if necessary. If trees is
5898 None then self._filtered_trees is used."""
5899 pkgsettings = self.pkgsettings[root]
5901 trees = self._filtered_trees
5902 if not getattr(priority, "buildtime", False):
5903 # The parent should only be passed to dep_check() for buildtime
5904 # dependencies since that's the only case when it's appropriate
5905 # to trigger the circular dependency avoidance code which uses it.
5906 # It's important not to trigger the same circular dependency
5907 # avoidance code for runtime dependencies since it's not needed
5908 # and it can promote an incorrect package choice.
5912 if parent is not None:
5913 trees[root]["parent"] = parent
5915 portage.dep._dep_check_strict = False
5916 mycheck = portage.dep_check(depstring, None,
5917 pkgsettings, myuse=myuse,
5918 myroot=root, trees=trees)
5920 if parent is not None:
5921 trees[root].pop("parent")
5922 portage.dep._dep_check_strict = True
5924 raise portage.exception.InvalidDependString(mycheck[1])
5925 selected_atoms = mycheck[1]
5926 return selected_atoms
5928 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5929 atom = portage.dep.Atom(atom)
5930 atom_set = InternalPackageSet(initial_atoms=(atom,))
5931 atom_without_use = atom
5933 atom_without_use = portage.dep.remove_slot(atom)
5935 atom_without_use += ":" + atom.slot
5936 atom_without_use = portage.dep.Atom(atom_without_use)
5937 xinfo = '"%s"' % atom
5940 # Discard null/ from failed cpv_expand category expansion.
5941 xinfo = xinfo.replace("null/", "")
5942 masked_packages = []
5944 masked_pkg_instances = set()
5945 missing_licenses = []
5946 have_eapi_mask = False
5947 pkgsettings = self.pkgsettings[root]
5948 implicit_iuse = pkgsettings._get_implicit_iuse()
5949 root_config = self.roots[root]
5950 portdb = self.roots[root].trees["porttree"].dbapi
5951 dbs = self._filtered_trees[root]["dbs"]
5952 for db, pkg_type, built, installed, db_keys in dbs:
5956 if hasattr(db, "xmatch"):
5957 cpv_list = db.xmatch("match-all", atom_without_use)
5959 cpv_list = db.match(atom_without_use)
5962 for cpv in cpv_list:
5963 metadata, mreasons = get_mask_info(root_config, cpv,
5964 pkgsettings, db, pkg_type, built, installed, db_keys)
5965 if metadata is not None:
5966 pkg = Package(built=built, cpv=cpv,
5967 installed=installed, metadata=metadata,
5968 root_config=root_config)
5969 if pkg.cp != atom.cp:
5970 # A cpv can be returned from dbapi.match() as an
5971 # old-style virtual match even in cases when the
5972 # package does not actually PROVIDE the virtual.
5973 # Filter out any such false matches here.
5974 if not atom_set.findAtomForPackage(pkg):
5977 masked_pkg_instances.add(pkg)
5979 missing_use.append(pkg)
5982 masked_packages.append(
5983 (root_config, pkgsettings, cpv, metadata, mreasons))
5985 missing_use_reasons = []
5986 missing_iuse_reasons = []
5987 for pkg in missing_use:
5988 use = pkg.use.enabled
5989 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5990 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5992 for x in atom.use.required:
5993 if iuse_re.match(x) is None:
5994 missing_iuse.append(x)
5997 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5998 missing_iuse_reasons.append((pkg, mreasons))
6000 need_enable = sorted(atom.use.enabled.difference(use))
6001 need_disable = sorted(atom.use.disabled.intersection(use))
6002 if need_enable or need_disable:
6004 changes.extend(colorize("red", "+" + x) \
6005 for x in need_enable)
6006 changes.extend(colorize("blue", "-" + x) \
6007 for x in need_disable)
6008 mreasons.append("Change USE: %s" % " ".join(changes))
6009 missing_use_reasons.append((pkg, mreasons))
6011 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6012 in missing_use_reasons if pkg not in masked_pkg_instances]
6014 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6015 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6017 show_missing_use = False
6018 if unmasked_use_reasons:
6019 # Only show the latest version.
6020 show_missing_use = unmasked_use_reasons[:1]
6021 elif unmasked_iuse_reasons:
6022 if missing_use_reasons:
6023 # All packages with required IUSE are masked,
6024 # so display a normal masking message.
6027 show_missing_use = unmasked_iuse_reasons
6029 if show_missing_use:
6030 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6031 print "!!! One of the following packages is required to complete your request:"
6032 for pkg, mreasons in show_missing_use:
6033 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6035 elif masked_packages:
6037 colorize("BAD", "All ebuilds that could satisfy ") + \
6038 colorize("INFORM", xinfo) + \
6039 colorize("BAD", " have been masked.")
6040 print "!!! One of the following masked packages is required to complete your request:"
6041 have_eapi_mask = show_masked_packages(masked_packages)
6044 msg = ("The current version of portage supports " + \
6045 "EAPI '%s'. You must upgrade to a newer version" + \
6046 " of portage before EAPI masked packages can" + \
6047 " be installed.") % portage.const.EAPI
6048 from textwrap import wrap
6049 for line in wrap(msg, 75):
6054 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6056 # Show parent nodes and the argument that pulled them in.
6057 traversed_nodes = set()
6060 while node is not None:
6061 traversed_nodes.add(node)
6062 msg.append('(dependency required by "%s" [%s])' % \
6063 (colorize('INFORM', str(node.cpv)), node.type_name))
6064 # When traversing to parents, prefer arguments over packages
6065 # since arguments are root nodes. Never traverse the same
6066 # package twice, in order to prevent an infinite loop.
6067 selected_parent = None
6068 for parent in self.digraph.parent_nodes(node):
6069 if isinstance(parent, DependencyArg):
6070 msg.append('(dependency required by "%s" [argument])' % \
6071 (colorize('INFORM', str(parent))))
6072 selected_parent = None
6074 if parent not in traversed_nodes:
6075 selected_parent = parent
6076 node = selected_parent
6082 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6083 cache_key = (root, atom, onlydeps)
6084 ret = self._highest_pkg_cache.get(cache_key)
6087 if pkg and not existing:
6088 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6089 if existing and existing == pkg:
6090 # Update the cache to reflect that the
6091 # package has been added to the graph.
6093 self._highest_pkg_cache[cache_key] = ret
6095 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6096 self._highest_pkg_cache[cache_key] = ret
6099 settings = pkg.root_config.settings
6100 if visible(settings, pkg) and not (pkg.installed and \
6101 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6102 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6105 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6106 root_config = self.roots[root]
6107 pkgsettings = self.pkgsettings[root]
6108 dbs = self._filtered_trees[root]["dbs"]
6109 vardb = self.roots[root].trees["vartree"].dbapi
6110 portdb = self.roots[root].trees["porttree"].dbapi
6111 # List of acceptable packages, ordered by type preference.
6112 matched_packages = []
6113 highest_version = None
6114 if not isinstance(atom, portage.dep.Atom):
6115 atom = portage.dep.Atom(atom)
6117 atom_set = InternalPackageSet(initial_atoms=(atom,))
6118 existing_node = None
6120 usepkgonly = "--usepkgonly" in self.myopts
6121 empty = "empty" in self.myparams
6122 selective = "selective" in self.myparams
6124 noreplace = "--noreplace" in self.myopts
6125 # Behavior of the "selective" parameter depends on
6126 # whether or not a package matches an argument atom.
6127 # If an installed package provides an old-style
6128 # virtual that is no longer provided by an available
6129 # package, the installed package may match an argument
6130 # atom even though none of the available packages do.
6131 # Therefore, "selective" logic does not consider
6132 # whether or not an installed package matches an
6133 # argument atom. It only considers whether or not
6134 # available packages match argument atoms, which is
6135 # represented by the found_available_arg flag.
6136 found_available_arg = False
6137 for find_existing_node in True, False:
6140 for db, pkg_type, built, installed, db_keys in dbs:
6143 if installed and not find_existing_node:
6144 want_reinstall = reinstall or empty or \
6145 (found_available_arg and not selective)
6146 if want_reinstall and matched_packages:
6148 if hasattr(db, "xmatch"):
6149 cpv_list = db.xmatch("match-all", atom)
6151 cpv_list = db.match(atom)
6153 # USE=multislot can make an installed package appear as if
6154 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6155 # won't do any good as long as USE=multislot is enabled since
6156 # the newly built package still won't have the expected slot.
6157 # Therefore, assume that such SLOT dependencies are already
6158 # satisfied rather than forcing a rebuild.
6159 if installed and not cpv_list and atom.slot:
6160 for cpv in db.match(atom.cp):
6161 slot_available = False
6162 for other_db, other_type, other_built, \
6163 other_installed, other_keys in dbs:
6166 other_db.aux_get(cpv, ["SLOT"])[0]:
6167 slot_available = True
6171 if not slot_available:
6173 inst_pkg = self._pkg(cpv, "installed",
6174 root_config, installed=installed)
6175 # Remove the slot from the atom and verify that
6176 # the package matches the resulting atom.
6177 atom_without_slot = portage.dep.remove_slot(atom)
6179 atom_without_slot += str(atom.use)
6180 atom_without_slot = portage.dep.Atom(atom_without_slot)
6181 if portage.match_from_list(
6182 atom_without_slot, [inst_pkg]):
6183 cpv_list = [inst_pkg.cpv]
6188 pkg_status = "merge"
6189 if installed or onlydeps:
6190 pkg_status = "nomerge"
6193 for cpv in cpv_list:
6194 # Make --noreplace take precedence over --newuse.
6195 if not installed and noreplace and \
6196 cpv in vardb.match(atom):
6197 # If the installed version is masked, it may
6198 # be necessary to look at lower versions,
6199 # in case there is a visible downgrade.
6201 reinstall_for_flags = None
6202 cache_key = (pkg_type, root, cpv, pkg_status)
6203 calculated_use = True
6204 pkg = self._pkg_cache.get(cache_key)
6206 calculated_use = False
6208 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6211 pkg = Package(built=built, cpv=cpv,
6212 installed=installed, metadata=metadata,
6213 onlydeps=onlydeps, root_config=root_config,
6215 metadata = pkg.metadata
6217 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6218 if not built and ("?" in metadata["LICENSE"] or \
6219 "?" in metadata["PROVIDE"]):
6220 # This is avoided whenever possible because
6221 # it's expensive. It only needs to be done here
6222 # if it has an effect on visibility.
6223 pkgsettings.setcpv(pkg)
6224 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6225 calculated_use = True
6226 self._pkg_cache[pkg] = pkg
6228 if not installed or (built and matched_packages):
6229 # Only enforce visibility on installed packages
6230 # if there is at least one other visible package
6231 # available. By filtering installed masked packages
6232 # here, packages that have been masked since they
6233 # were installed can be automatically downgraded
6234 # to an unmasked version.
6236 if not visible(pkgsettings, pkg):
6238 except portage.exception.InvalidDependString:
6242 # Enable upgrade or downgrade to a version
6243 # with visible KEYWORDS when the installed
6244 # version is masked by KEYWORDS, but never
6245 # reinstall the same exact version only due
6246 # to a KEYWORDS mask.
6247 if built and matched_packages:
6249 different_version = None
6250 for avail_pkg in matched_packages:
6251 if not portage.dep.cpvequal(
6252 pkg.cpv, avail_pkg.cpv):
6253 different_version = avail_pkg
6255 if different_version is not None:
6258 pkgsettings._getMissingKeywords(
6259 pkg.cpv, pkg.metadata):
6262 # If the ebuild no longer exists or it's
6263 # keywords have been dropped, reject built
6264 # instances (installed or binary).
6265 # If --usepkgonly is enabled, assume that
6266 # the ebuild status should be ignored.
6270 pkg.cpv, "ebuild", root_config)
6271 except portage.exception.PackageNotFound:
6274 if not visible(pkgsettings, pkg_eb):
6277 if not pkg.built and not calculated_use:
6278 # This is avoided whenever possible because
6280 pkgsettings.setcpv(pkg)
6281 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6283 if pkg.cp != atom.cp:
6284 # A cpv can be returned from dbapi.match() as an
6285 # old-style virtual match even in cases when the
6286 # package does not actually PROVIDE the virtual.
6287 # Filter out any such false matches here.
6288 if not atom_set.findAtomForPackage(pkg):
6292 if root == self.target_root:
6294 # Ebuild USE must have been calculated prior
6295 # to this point, in case atoms have USE deps.
6296 myarg = self._iter_atoms_for_pkg(pkg).next()
6297 except StopIteration:
6299 except portage.exception.InvalidDependString:
6301 # masked by corruption
6303 if not installed and myarg:
6304 found_available_arg = True
6306 if atom.use and not pkg.built:
6307 use = pkg.use.enabled
6308 if atom.use.enabled.difference(use):
6310 if atom.use.disabled.intersection(use):
6312 if pkg.cp == atom_cp:
6313 if highest_version is None:
6314 highest_version = pkg
6315 elif pkg > highest_version:
6316 highest_version = pkg
6317 # At this point, we've found the highest visible
6318 # match from the current repo. Any lower versions
6319 # from this repo are ignored, so this so the loop
6320 # will always end with a break statement below
6322 if find_existing_node:
6323 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6326 if portage.dep.match_from_list(atom, [e_pkg]):
6327 if highest_version and \
6328 e_pkg.cp == atom_cp and \
6329 e_pkg < highest_version and \
6330 e_pkg.slot_atom != highest_version.slot_atom:
6331 # There is a higher version available in a
6332 # different slot, so this existing node is
6336 matched_packages.append(e_pkg)
6337 existing_node = e_pkg
6339 # Compare built package to current config and
6340 # reject the built package if necessary.
6341 if built and not installed and \
6342 ("--newuse" in self.myopts or \
6343 "--reinstall" in self.myopts):
6344 iuses = pkg.iuse.all
6345 old_use = pkg.use.enabled
6347 pkgsettings.setcpv(myeb)
6349 pkgsettings.setcpv(pkg)
6350 now_use = pkgsettings["PORTAGE_USE"].split()
6351 forced_flags = set()
6352 forced_flags.update(pkgsettings.useforce)
6353 forced_flags.update(pkgsettings.usemask)
6355 if myeb and not usepkgonly:
6356 cur_iuse = myeb.iuse.all
6357 if self._reinstall_for_flags(forced_flags,
6361 # Compare current config to installed package
6362 # and do not reinstall if possible.
6363 if not installed and \
6364 ("--newuse" in self.myopts or \
6365 "--reinstall" in self.myopts) and \
6366 cpv in vardb.match(atom):
6367 pkgsettings.setcpv(pkg)
6368 forced_flags = set()
6369 forced_flags.update(pkgsettings.useforce)
6370 forced_flags.update(pkgsettings.usemask)
6371 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6372 old_iuse = set(filter_iuse_defaults(
6373 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6374 cur_use = pkgsettings["PORTAGE_USE"].split()
6375 cur_iuse = pkg.iuse.all
6376 reinstall_for_flags = \
6377 self._reinstall_for_flags(
6378 forced_flags, old_use, old_iuse,
6380 if reinstall_for_flags:
6384 matched_packages.append(pkg)
6385 if reinstall_for_flags:
6386 self._reinstall_nodes[pkg] = \
6390 if not matched_packages:
6393 if "--debug" in self.myopts:
6394 for pkg in matched_packages:
6395 portage.writemsg("%s %s\n" % \
6396 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6398 # Filter out any old-style virtual matches if they are
6399 # mixed with new-style virtual matches.
6400 cp = portage.dep_getkey(atom)
6401 if len(matched_packages) > 1 and \
6402 "virtual" == portage.catsplit(cp)[0]:
6403 for pkg in matched_packages:
6406 # Got a new-style virtual, so filter
6407 # out any old-style virtuals.
6408 matched_packages = [pkg for pkg in matched_packages \
6412 if len(matched_packages) > 1:
6413 bestmatch = portage.best(
6414 [pkg.cpv for pkg in matched_packages])
6415 matched_packages = [pkg for pkg in matched_packages \
6416 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6418 # ordered by type preference ("ebuild" type is the last resort)
6419 return matched_packages[-1], existing_node
6421 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6423 Select packages that have already been added to the graph or
6424 those that are installed and have not been scheduled for
6427 graph_db = self._graph_trees[root]["porttree"].dbapi
6428 matches = graph_db.match_pkgs(atom)
6431 pkg = matches[-1] # highest match
6432 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6433 return pkg, in_graph
6435 def _complete_graph(self):
6437 Add any deep dependencies of required sets (args, system, world) that
6438 have not been pulled into the graph yet. This ensures that the graph
6439 is consistent such that initially satisfied deep dependencies are not
6440 broken in the new graph. Initially unsatisfied dependencies are
6441 irrelevant since we only want to avoid breaking dependencies that are
6444 Since this method can consume enough time to disturb users, it is
6445 currently only enabled by the --complete-graph option.
6447 if "--buildpkgonly" in self.myopts or \
6448 "recurse" not in self.myparams:
6451 if "complete" not in self.myparams:
6452 # Skip this to avoid consuming enough time to disturb users.
6455 # Put the depgraph into a mode that causes it to only
6456 # select packages that have already been added to the
6457 # graph or those that are installed and have not been
6458 # scheduled for replacement. Also, toggle the "deep"
6459 # parameter so that all dependencies are traversed and
6461 self._select_atoms = self._select_atoms_from_graph
6462 self._select_package = self._select_pkg_from_graph
6463 already_deep = "deep" in self.myparams
6464 if not already_deep:
6465 self.myparams.add("deep")
6467 for root in self.roots:
6468 required_set_names = self._required_set_names.copy()
6469 if root == self.target_root and \
6470 (already_deep or "empty" in self.myparams):
6471 required_set_names.difference_update(self._sets)
6472 if not required_set_names and not self._ignored_deps:
6474 root_config = self.roots[root]
6475 setconfig = root_config.setconfig
6477 # Reuse existing SetArg instances when available.
6478 for arg in self.digraph.root_nodes():
6479 if not isinstance(arg, SetArg):
6481 if arg.root_config != root_config:
6483 if arg.name in required_set_names:
6485 required_set_names.remove(arg.name)
6486 # Create new SetArg instances only when necessary.
6487 for s in required_set_names:
6488 expanded_set = InternalPackageSet(
6489 initial_atoms=setconfig.getSetAtoms(s))
6490 atom = SETPREFIX + s
6491 args.append(SetArg(arg=atom, set=expanded_set,
6492 root_config=root_config))
6493 vardb = root_config.trees["vartree"].dbapi
6495 for atom in arg.set:
6496 self._dep_stack.append(
6497 Dependency(atom=atom, root=root, parent=arg))
6498 if self._ignored_deps:
6499 self._dep_stack.extend(self._ignored_deps)
6500 self._ignored_deps = []
6501 if not self._create_graph(allow_unsatisfied=True):
6503 # Check the unsatisfied deps to see if any initially satisfied deps
6504 # will become unsatisfied due to an upgrade. Initially unsatisfied
6505 # deps are irrelevant since we only want to avoid breaking deps
6506 # that are initially satisfied.
6507 while self._unsatisfied_deps:
6508 dep = self._unsatisfied_deps.pop()
6509 matches = vardb.match_pkgs(dep.atom)
6511 self._initially_unsatisfied_deps.append(dep)
6513 # An scheduled installation broke a deep dependency.
6514 # Add the installed package to the graph so that it
6515 # will be appropriately reported as a slot collision
6516 # (possibly solvable via backtracking).
6517 pkg = matches[-1] # highest match
6518 if not self._add_pkg(pkg, dep):
6520 if not self._create_graph(allow_unsatisfied=True):
6524 def _pkg(self, cpv, type_name, root_config, installed=False):
6526 Get a package instance from the cache, or create a new
6527 one if necessary. Raises KeyError from aux_get if it
6528 failures for some reason (package does not exist or is
6533 operation = "nomerge"
6534 pkg = self._pkg_cache.get(
6535 (type_name, root_config.root, cpv, operation))
6537 tree_type = self.pkg_tree_map[type_name]
6538 db = root_config.trees[tree_type].dbapi
6539 db_keys = list(self._trees_orig[root_config.root][
6540 tree_type].dbapi._aux_cache_keys)
6542 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6544 raise portage.exception.PackageNotFound(cpv)
6545 pkg = Package(cpv=cpv, metadata=metadata,
6546 root_config=root_config, installed=installed)
6547 if type_name == "ebuild":
6548 settings = self.pkgsettings[root_config.root]
6549 settings.setcpv(pkg)
6550 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6551 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6552 self._pkg_cache[pkg] = pkg
6555 def validate_blockers(self):
6556 """Remove any blockers from the digraph that do not match any of the
6557 packages within the graph. If necessary, create hard deps to ensure
6558 correct merge order such that mutually blocking packages are never
6559 installed simultaneously."""
6561 if "--buildpkgonly" in self.myopts or \
6562 "--nodeps" in self.myopts:
6565 #if "deep" in self.myparams:
6567 # Pull in blockers from all installed packages that haven't already
6568 # been pulled into the depgraph. This is not enabled by default
6569 # due to the performance penalty that is incurred by all the
6570 # additional dep_check calls that are required.
6572 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6573 for myroot in self.trees:
6574 vardb = self.trees[myroot]["vartree"].dbapi
6575 portdb = self.trees[myroot]["porttree"].dbapi
6576 pkgsettings = self.pkgsettings[myroot]
6577 final_db = self.mydbapi[myroot]
6579 blocker_cache = BlockerCache(myroot, vardb)
6580 stale_cache = set(blocker_cache)
6583 stale_cache.discard(cpv)
6584 pkg_in_graph = self.digraph.contains(pkg)
6586 # Check for masked installed packages. Only warn about
6587 # packages that are in the graph in order to avoid warning
6588 # about those that will be automatically uninstalled during
6589 # the merge process or by --depclean.
6591 if pkg_in_graph and not visible(pkgsettings, pkg):
6592 self._masked_installed.add(pkg)
6594 blocker_atoms = None
6600 self._blocker_parents.child_nodes(pkg))
6605 self._irrelevant_blockers.child_nodes(pkg))
6608 if blockers is not None:
6609 blockers = set(str(blocker.atom) \
6610 for blocker in blockers)
6612 # If this node has any blockers, create a "nomerge"
6613 # node for it so that they can be enforced.
6614 self.spinner.update()
6615 blocker_data = blocker_cache.get(cpv)
6616 if blocker_data is not None and \
6617 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6620 # If blocker data from the graph is available, use
6621 # it to validate the cache and update the cache if
6623 if blocker_data is not None and \
6624 blockers is not None:
6625 if not blockers.symmetric_difference(
6626 blocker_data.atoms):
6630 if blocker_data is None and \
6631 blockers is not None:
6632 # Re-use the blockers from the graph.
6633 blocker_atoms = sorted(blockers)
6634 counter = long(pkg.metadata["COUNTER"])
6636 blocker_cache.BlockerData(counter, blocker_atoms)
6637 blocker_cache[pkg.cpv] = blocker_data
6641 blocker_atoms = blocker_data.atoms
6643 # Use aux_get() to trigger FakeVartree global
6644 # updates on *DEPEND when appropriate.
6645 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6646 # It is crucial to pass in final_db here in order to
6647 # optimize dep_check calls by eliminating atoms via
6648 # dep_wordreduce and dep_eval calls.
6650 portage.dep._dep_check_strict = False
6652 success, atoms = portage.dep_check(depstr,
6653 final_db, pkgsettings, myuse=pkg.use.enabled,
6654 trees=self._graph_trees, myroot=myroot)
6655 except Exception, e:
6656 if isinstance(e, SystemExit):
6658 # This is helpful, for example, if a ValueError
6659 # is thrown from cpv_expand due to multiple
6660 # matches (this can happen if an atom lacks a
6662 show_invalid_depstring_notice(
6663 pkg, depstr, str(e))
6667 portage.dep._dep_check_strict = True
6669 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6670 if replacement_pkg and \
6671 replacement_pkg[0].operation == "merge":
6672 # This package is being replaced anyway, so
6673 # ignore invalid dependencies so as not to
6674 # annoy the user too much (otherwise they'd be
6675 # forced to manually unmerge it first).
6677 show_invalid_depstring_notice(pkg, depstr, atoms)
6679 blocker_atoms = [myatom for myatom in atoms \
6680 if myatom.startswith("!")]
6681 blocker_atoms.sort()
6682 counter = long(pkg.metadata["COUNTER"])
6683 blocker_cache[cpv] = \
6684 blocker_cache.BlockerData(counter, blocker_atoms)
6687 for atom in blocker_atoms:
6688 blocker = Blocker(atom=portage.dep.Atom(atom),
6689 eapi=pkg.metadata["EAPI"], root=myroot)
6690 self._blocker_parents.add(blocker, pkg)
6691 except portage.exception.InvalidAtom, e:
6692 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6693 show_invalid_depstring_notice(
6694 pkg, depstr, "Invalid Atom: %s" % (e,))
6696 for cpv in stale_cache:
6697 del blocker_cache[cpv]
6698 blocker_cache.flush()
6701 # Discard any "uninstall" tasks scheduled by previous calls
6702 # to this method, since those tasks may not make sense given
6703 # the current graph state.
6704 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6705 if previous_uninstall_tasks:
6706 self._blocker_uninstalls = digraph()
6707 self.digraph.difference_update(previous_uninstall_tasks)
6709 for blocker in self._blocker_parents.leaf_nodes():
6710 self.spinner.update()
6711 root_config = self.roots[blocker.root]
6712 virtuals = root_config.settings.getvirtuals()
6713 myroot = blocker.root
6714 initial_db = self.trees[myroot]["vartree"].dbapi
6715 final_db = self.mydbapi[myroot]
6717 provider_virtual = False
6718 if blocker.cp in virtuals and \
6719 not self._have_new_virt(blocker.root, blocker.cp):
6720 provider_virtual = True
6722 if provider_virtual:
6724 for provider_entry in virtuals[blocker.cp]:
6726 portage.dep_getkey(provider_entry)
6727 atoms.append(blocker.atom.replace(
6728 blocker.cp, provider_cp))
6730 atoms = [blocker.atom]
6732 blocked_initial = []
6734 blocked_initial.extend(initial_db.match_pkgs(atom))
6738 blocked_final.extend(final_db.match_pkgs(atom))
6740 if not blocked_initial and not blocked_final:
6741 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6742 self._blocker_parents.remove(blocker)
6743 # Discard any parents that don't have any more blockers.
6744 for pkg in parent_pkgs:
6745 self._irrelevant_blockers.add(blocker, pkg)
6746 if not self._blocker_parents.child_nodes(pkg):
6747 self._blocker_parents.remove(pkg)
6749 for parent in self._blocker_parents.parent_nodes(blocker):
6750 unresolved_blocks = False
6751 depends_on_order = set()
6752 for pkg in blocked_initial:
6753 if pkg.slot_atom == parent.slot_atom:
6754 # TODO: Support blocks within slots in cases where it
6755 # might make sense. For example, a new version might
6756 # require that the old version be uninstalled at build
6759 if parent.installed:
6760 # Two currently installed packages conflict with
6761 # eachother. Ignore this case since the damage
6762 # is already done and this would be likely to
6763 # confuse users if displayed like a normal blocker.
6766 self._blocked_pkgs.add(pkg, blocker)
6768 if parent.operation == "merge":
6769 # Maybe the blocked package can be replaced or simply
6770 # unmerged to resolve this block.
6771 depends_on_order.add((pkg, parent))
6773 # None of the above blocker resolutions techniques apply,
6774 # so apparently this one is unresolvable.
6775 unresolved_blocks = True
6776 for pkg in blocked_final:
6777 if pkg.slot_atom == parent.slot_atom:
6778 # TODO: Support blocks within slots.
6780 if parent.operation == "nomerge" and \
6781 pkg.operation == "nomerge":
6782 # This blocker will be handled the next time that a
6783 # merge of either package is triggered.
6786 self._blocked_pkgs.add(pkg, blocker)
6788 # Maybe the blocking package can be
6789 # unmerged to resolve this block.
6790 if parent.operation == "merge" and pkg.installed:
6791 depends_on_order.add((pkg, parent))
6793 elif parent.operation == "nomerge":
6794 depends_on_order.add((parent, pkg))
6796 # None of the above blocker resolutions techniques apply,
6797 # so apparently this one is unresolvable.
6798 unresolved_blocks = True
6800 # Make sure we don't unmerge any package that have been pulled
6802 if not unresolved_blocks and depends_on_order:
6803 for inst_pkg, inst_task in depends_on_order:
6804 if self.digraph.contains(inst_pkg) and \
6805 self.digraph.parent_nodes(inst_pkg):
6806 unresolved_blocks = True
6809 if not unresolved_blocks and depends_on_order:
6810 for inst_pkg, inst_task in depends_on_order:
6811 uninst_task = Package(built=inst_pkg.built,
6812 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6813 metadata=inst_pkg.metadata,
6814 operation="uninstall",
6815 root_config=inst_pkg.root_config,
6816 type_name=inst_pkg.type_name)
6817 self._pkg_cache[uninst_task] = uninst_task
6818 # Enforce correct merge order with a hard dep.
6819 self.digraph.addnode(uninst_task, inst_task,
6820 priority=BlockerDepPriority.instance)
6821 # Count references to this blocker so that it can be
6822 # invalidated after nodes referencing it have been
6824 self._blocker_uninstalls.addnode(uninst_task, blocker)
6825 if not unresolved_blocks and not depends_on_order:
6826 self._irrelevant_blockers.add(blocker, parent)
6827 self._blocker_parents.remove_edge(blocker, parent)
6828 if not self._blocker_parents.parent_nodes(blocker):
6829 self._blocker_parents.remove(blocker)
6830 if not self._blocker_parents.child_nodes(parent):
6831 self._blocker_parents.remove(parent)
6832 if unresolved_blocks:
6833 self._unsolvable_blockers.add(blocker, parent)
6837 def _accept_blocker_conflicts(self):
6839 for x in ("--buildpkgonly", "--fetchonly",
6840 "--fetch-all-uri", "--nodeps"):
6841 if x in self.myopts:
6846 def _merge_order_bias(self, mygraph):
6848 For optimal leaf node selection, promote deep system runtime deps and
6849 order nodes from highest to lowest overall reference count.
6853 for node in mygraph.order:
6854 node_info[node] = len(mygraph.parent_nodes(node))
6855 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6857 def cmp_merge_preference(node1, node2):
6859 if node1.operation == 'uninstall':
6860 if node2.operation == 'uninstall':
6864 if node2.operation == 'uninstall':
6865 if node1.operation == 'uninstall':
6869 node1_sys = node1 in deep_system_deps
6870 node2_sys = node2 in deep_system_deps
6871 if node1_sys != node2_sys:
6876 return node_info[node2] - node_info[node1]
6878 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6880 def altlist(self, reversed=False):
6882 while self._serialized_tasks_cache is None:
6883 self._resolve_conflicts()
6885 self._serialized_tasks_cache, self._scheduler_graph = \
6886 self._serialize_tasks()
6887 except self._serialize_tasks_retry:
6890 retlist = self._serialized_tasks_cache[:]
6895 def schedulerGraph(self):
6897 The scheduler graph is identical to the normal one except that
6898 uninstall edges are reversed in specific cases that require
6899 conflicting packages to be temporarily installed simultaneously.
6900 This is intended for use by the Scheduler in it's parallelization
6901 logic. It ensures that temporary simultaneous installation of
6902 conflicting packages is avoided when appropriate (especially for
6903 !!atom blockers), but allowed in specific cases that require it.
6905 Note that this method calls break_refs() which alters the state of
6906 internal Package instances such that this depgraph instance should
6907 not be used to perform any more calculations.
6909 if self._scheduler_graph is None:
6911 self.break_refs(self._scheduler_graph.order)
6912 return self._scheduler_graph
6914 def break_refs(self, nodes):
6916 Take a mergelist like that returned from self.altlist() and
6917 break any references that lead back to the depgraph. This is
6918 useful if you want to hold references to packages without
6919 also holding the depgraph on the heap.
6922 if hasattr(node, "root_config"):
6923 # The FakeVartree references the _package_cache which
6924 # references the depgraph. So that Package instances don't
6925 # hold the depgraph and FakeVartree on the heap, replace
6926 # the RootConfig that references the FakeVartree with the
6927 # original RootConfig instance which references the actual
6929 node.root_config = \
6930 self._trees_orig[node.root_config.root]["root_config"]
6932 def _resolve_conflicts(self):
6933 if not self._complete_graph():
6934 raise self._unknown_internal_error()
6936 if not self.validate_blockers():
6937 raise self._unknown_internal_error()
6939 if self._slot_collision_info:
6940 self._process_slot_conflicts()
6942 def _serialize_tasks(self):
6944 if "--debug" in self.myopts:
6945 writemsg("\ndigraph:\n\n", noiselevel=-1)
6946 self.digraph.debug_print()
6947 writemsg("\n", noiselevel=-1)
6949 scheduler_graph = self.digraph.copy()
6950 mygraph=self.digraph.copy()
6951 # Prune "nomerge" root nodes if nothing depends on them, since
6952 # otherwise they slow down merge order calculation. Don't remove
6953 # non-root nodes since they help optimize merge order in some cases
6954 # such as revdep-rebuild.
6955 removed_nodes = set()
6957 for node in mygraph.root_nodes():
6958 if not isinstance(node, Package) or \
6959 node.installed or node.onlydeps:
6960 removed_nodes.add(node)
6962 self.spinner.update()
6963 mygraph.difference_update(removed_nodes)
6964 if not removed_nodes:
6966 removed_nodes.clear()
6967 self._merge_order_bias(mygraph)
6968 def cmp_circular_bias(n1, n2):
6970 RDEPEND is stronger than PDEPEND and this function
6971 measures such a strength bias within a circular
6972 dependency relationship.
6974 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6975 ignore_priority=priority_range.ignore_medium_soft)
6976 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6977 ignore_priority=priority_range.ignore_medium_soft)
6978 if n1_n2_medium == n2_n1_medium:
6983 myblocker_uninstalls = self._blocker_uninstalls.copy()
6985 # Contains uninstall tasks that have been scheduled to
6986 # occur after overlapping blockers have been installed.
6987 scheduled_uninstalls = set()
6988 # Contains any Uninstall tasks that have been ignored
6989 # in order to avoid the circular deps code path. These
6990 # correspond to blocker conflicts that could not be
6992 ignored_uninstall_tasks = set()
6993 have_uninstall_task = False
6994 complete = "complete" in self.myparams
6997 def get_nodes(**kwargs):
6999 Returns leaf nodes excluding Uninstall instances
7000 since those should be executed as late as possible.
7002 return [node for node in mygraph.leaf_nodes(**kwargs) \
7003 if isinstance(node, Package) and \
7004 (node.operation != "uninstall" or \
7005 node in scheduled_uninstalls)]
7007 # sys-apps/portage needs special treatment if ROOT="/"
7008 running_root = self._running_root.root
7009 from portage.const import PORTAGE_PACKAGE_ATOM
7010 runtime_deps = InternalPackageSet(
7011 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7012 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7013 PORTAGE_PACKAGE_ATOM)
7014 replacement_portage = self.mydbapi[running_root].match_pkgs(
7015 PORTAGE_PACKAGE_ATOM)
7018 running_portage = running_portage[0]
7020 running_portage = None
7022 if replacement_portage:
7023 replacement_portage = replacement_portage[0]
7025 replacement_portage = None
7027 if replacement_portage == running_portage:
7028 replacement_portage = None
7030 if replacement_portage is not None:
7031 # update from running_portage to replacement_portage asap
7032 asap_nodes.append(replacement_portage)
7034 if running_portage is not None:
7036 portage_rdepend = self._select_atoms_highest_available(
7037 running_root, running_portage.metadata["RDEPEND"],
7038 myuse=running_portage.use.enabled,
7039 parent=running_portage, strict=False)
7040 except portage.exception.InvalidDependString, e:
7041 portage.writemsg("!!! Invalid RDEPEND in " + \
7042 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7043 (running_root, running_portage.cpv, e), noiselevel=-1)
7045 portage_rdepend = []
7046 runtime_deps.update(atom for atom in portage_rdepend \
7047 if not atom.startswith("!"))
7049 def gather_deps(ignore_priority, mergeable_nodes,
7050 selected_nodes, node):
7052 Recursively gather a group of nodes that RDEPEND on
7053 eachother. This ensures that they are merged as a group
7054 and get their RDEPENDs satisfied as soon as possible.
7056 if node in selected_nodes:
7058 if node not in mergeable_nodes:
7060 if node == replacement_portage and \
7061 mygraph.child_nodes(node,
7062 ignore_priority=priority_range.ignore_medium_soft):
7063 # Make sure that portage always has all of it's
7064 # RDEPENDs installed first.
7066 selected_nodes.add(node)
7067 for child in mygraph.child_nodes(node,
7068 ignore_priority=ignore_priority):
7069 if not gather_deps(ignore_priority,
7070 mergeable_nodes, selected_nodes, child):
7074 def ignore_uninst_or_med(priority):
7075 if priority is BlockerDepPriority.instance:
7077 return priority_range.ignore_medium(priority)
7079 def ignore_uninst_or_med_soft(priority):
7080 if priority is BlockerDepPriority.instance:
7082 return priority_range.ignore_medium_soft(priority)
7084 tree_mode = "--tree" in self.myopts
7085 # Tracks whether or not the current iteration should prefer asap_nodes
7086 # if available. This is set to False when the previous iteration
7087 # failed to select any nodes. It is reset whenever nodes are
7088 # successfully selected.
7091 # Controls whether or not the current iteration should drop edges that
7092 # are "satisfied" by installed packages, in order to solve circular
7093 # dependencies. The deep runtime dependencies of installed packages are
7094 # not checked in this case (bug #199856), so it must be avoided
7095 # whenever possible.
7096 drop_satisfied = False
7098 # State of variables for successive iterations that loosen the
7099 # criteria for node selection.
7101 # iteration prefer_asap drop_satisfied
7106 # If no nodes are selected on the last iteration, it is due to
7107 # unresolved blockers or circular dependencies.
7109 while not mygraph.empty():
7110 self.spinner.update()
7111 selected_nodes = None
7112 ignore_priority = None
7113 if drop_satisfied or (prefer_asap and asap_nodes):
7114 priority_range = DepPrioritySatisfiedRange
7116 priority_range = DepPriorityNormalRange
7117 if prefer_asap and asap_nodes:
7118 # ASAP nodes are merged before their soft deps. Go ahead and
7119 # select root nodes here if necessary, since it's typical for
7120 # the parent to have been removed from the graph already.
7121 asap_nodes = [node for node in asap_nodes \
7122 if mygraph.contains(node)]
7123 for node in asap_nodes:
7124 if not mygraph.child_nodes(node,
7125 ignore_priority=priority_range.ignore_soft):
7126 selected_nodes = [node]
7127 asap_nodes.remove(node)
7129 if not selected_nodes and \
7130 not (prefer_asap and asap_nodes):
7131 for i in xrange(priority_range.NONE,
7132 priority_range.MEDIUM_SOFT + 1):
7133 ignore_priority = priority_range.ignore_priority[i]
7134 nodes = get_nodes(ignore_priority=ignore_priority)
7136 # If there is a mix of uninstall nodes with other
7137 # types, save the uninstall nodes for later since
7138 # sometimes a merge node will render an uninstall
7139 # node unnecessary (due to occupying the same slot),
7140 # and we want to avoid executing a separate uninstall
7141 # task in that case.
7143 good_uninstalls = []
7144 with_some_uninstalls_excluded = []
7146 if node.operation == "uninstall":
7147 slot_node = self.mydbapi[node.root
7148 ].match_pkgs(node.slot_atom)
7150 slot_node[0].operation == "merge":
7152 good_uninstalls.append(node)
7153 with_some_uninstalls_excluded.append(node)
7155 nodes = good_uninstalls
7156 elif with_some_uninstalls_excluded:
7157 nodes = with_some_uninstalls_excluded
7161 if ignore_priority is None and not tree_mode:
7162 # Greedily pop all of these nodes since no
7163 # relationship has been ignored. This optimization
7164 # destroys --tree output, so it's disabled in tree
7166 selected_nodes = nodes
7168 # For optimal merge order:
7169 # * Only pop one node.
7170 # * Removing a root node (node without a parent)
7171 # will not produce a leaf node, so avoid it.
7172 # * It's normal for a selected uninstall to be a
7173 # root node, so don't check them for parents.
7175 if node.operation == "uninstall" or \
7176 mygraph.parent_nodes(node):
7177 selected_nodes = [node]
7183 if not selected_nodes:
7184 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7186 mergeable_nodes = set(nodes)
7187 if prefer_asap and asap_nodes:
7189 for i in xrange(priority_range.SOFT,
7190 priority_range.MEDIUM_SOFT + 1):
7191 ignore_priority = priority_range.ignore_priority[i]
7193 if not mygraph.parent_nodes(node):
7195 selected_nodes = set()
7196 if gather_deps(ignore_priority,
7197 mergeable_nodes, selected_nodes, node):
7200 selected_nodes = None
7204 if prefer_asap and asap_nodes and not selected_nodes:
7205 # We failed to find any asap nodes to merge, so ignore
7206 # them for the next iteration.
7210 if selected_nodes and ignore_priority is not None:
7211 # Try to merge ignored medium_soft deps as soon as possible
7212 # if they're not satisfied by installed packages.
7213 for node in selected_nodes:
7214 children = set(mygraph.child_nodes(node))
7215 soft = children.difference(
7216 mygraph.child_nodes(node,
7217 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7218 medium_soft = children.difference(
7219 mygraph.child_nodes(node,
7221 DepPrioritySatisfiedRange.ignore_medium_soft))
7222 medium_soft.difference_update(soft)
7223 for child in medium_soft:
7224 if child in selected_nodes:
7226 if child in asap_nodes:
7228 asap_nodes.append(child)
7230 if selected_nodes and len(selected_nodes) > 1:
7231 if not isinstance(selected_nodes, list):
7232 selected_nodes = list(selected_nodes)
7233 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7235 if not selected_nodes and not myblocker_uninstalls.is_empty():
7236 # An Uninstall task needs to be executed in order to
7237 # avoid conflict if possible.
7240 priority_range = DepPrioritySatisfiedRange
7242 priority_range = DepPriorityNormalRange
7244 mergeable_nodes = get_nodes(
7245 ignore_priority=ignore_uninst_or_med)
7247 min_parent_deps = None
7249 for task in myblocker_uninstalls.leaf_nodes():
7250 # Do some sanity checks so that system or world packages
7251 # don't get uninstalled inappropriately here (only really
7252 # necessary when --complete-graph has not been enabled).
7254 if task in ignored_uninstall_tasks:
7257 if task in scheduled_uninstalls:
7258 # It's been scheduled but it hasn't
7259 # been executed yet due to dependence
7260 # on installation of blocking packages.
7263 root_config = self.roots[task.root]
7264 inst_pkg = self._pkg_cache[
7265 ("installed", task.root, task.cpv, "nomerge")]
7267 if self.digraph.contains(inst_pkg):
7270 forbid_overlap = False
7271 heuristic_overlap = False
7272 for blocker in myblocker_uninstalls.parent_nodes(task):
7273 if blocker.eapi in ("0", "1"):
7274 heuristic_overlap = True
7275 elif blocker.atom.blocker.overlap.forbid:
7276 forbid_overlap = True
7278 if forbid_overlap and running_root == task.root:
7281 if heuristic_overlap and running_root == task.root:
7282 # Never uninstall sys-apps/portage or it's essential
7283 # dependencies, except through replacement.
7285 runtime_dep_atoms = \
7286 list(runtime_deps.iterAtomsForPackage(task))
7287 except portage.exception.InvalidDependString, e:
7288 portage.writemsg("!!! Invalid PROVIDE in " + \
7289 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7290 (task.root, task.cpv, e), noiselevel=-1)
7294 # Don't uninstall a runtime dep if it appears
7295 # to be the only suitable one installed.
7297 vardb = root_config.trees["vartree"].dbapi
7298 for atom in runtime_dep_atoms:
7299 other_version = None
7300 for pkg in vardb.match_pkgs(atom):
7301 if pkg.cpv == task.cpv and \
7302 pkg.metadata["COUNTER"] == \
7303 task.metadata["COUNTER"]:
7307 if other_version is None:
7313 # For packages in the system set, don't take
7314 # any chances. If the conflict can't be resolved
7315 # by a normal replacement operation then abort.
7318 for atom in root_config.sets[
7319 "system"].iterAtomsForPackage(task):
7322 except portage.exception.InvalidDependString, e:
7323 portage.writemsg("!!! Invalid PROVIDE in " + \
7324 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7325 (task.root, task.cpv, e), noiselevel=-1)
7331 # Note that the world check isn't always
7332 # necessary since self._complete_graph() will
7333 # add all packages from the system and world sets to the
7334 # graph. This just allows unresolved conflicts to be
7335 # detected as early as possible, which makes it possible
7336 # to avoid calling self._complete_graph() when it is
7337 # unnecessary due to blockers triggering an abortion.
7339 # For packages in the world set, go ahead an uninstall
7340 # when necessary, as long as the atom will be satisfied
7341 # in the final state.
7342 graph_db = self.mydbapi[task.root]
7345 for atom in root_config.sets[
7346 "world"].iterAtomsForPackage(task):
7348 for pkg in graph_db.match_pkgs(atom):
7355 self._blocked_world_pkgs[inst_pkg] = atom
7357 except portage.exception.InvalidDependString, e:
7358 portage.writemsg("!!! Invalid PROVIDE in " + \
7359 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7360 (task.root, task.cpv, e), noiselevel=-1)
7366 # Check the deps of parent nodes to ensure that
7367 # the chosen task produces a leaf node. Maybe
7368 # this can be optimized some more to make the
7369 # best possible choice, but the current algorithm
7370 # is simple and should be near optimal for most
7372 mergeable_parent = False
7374 for parent in mygraph.parent_nodes(task):
7375 parent_deps.update(mygraph.child_nodes(parent,
7376 ignore_priority=priority_range.ignore_medium_soft))
7377 if parent in mergeable_nodes and \
7378 gather_deps(ignore_uninst_or_med_soft,
7379 mergeable_nodes, set(), parent):
7380 mergeable_parent = True
7382 if not mergeable_parent:
7385 parent_deps.remove(task)
7386 if min_parent_deps is None or \
7387 len(parent_deps) < min_parent_deps:
7388 min_parent_deps = len(parent_deps)
7391 if uninst_task is not None:
7392 # The uninstall is performed only after blocking
7393 # packages have been merged on top of it. File
7394 # collisions between blocking packages are detected
7395 # and removed from the list of files to be uninstalled.
7396 scheduled_uninstalls.add(uninst_task)
7397 parent_nodes = mygraph.parent_nodes(uninst_task)
7399 # Reverse the parent -> uninstall edges since we want
7400 # to do the uninstall after blocking packages have
7401 # been merged on top of it.
7402 mygraph.remove(uninst_task)
7403 for blocked_pkg in parent_nodes:
7404 mygraph.add(blocked_pkg, uninst_task,
7405 priority=BlockerDepPriority.instance)
7406 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7407 scheduler_graph.add(blocked_pkg, uninst_task,
7408 priority=BlockerDepPriority.instance)
7410 # Reset the state variables for leaf node selection and
7411 # continue trying to select leaf nodes.
7413 drop_satisfied = False
7416 if not selected_nodes:
7417 # Only select root nodes as a last resort. This case should
7418 # only trigger when the graph is nearly empty and the only
7419 # remaining nodes are isolated (no parents or children). Since
7420 # the nodes must be isolated, ignore_priority is not needed.
7421 selected_nodes = get_nodes()
7423 if not selected_nodes and not drop_satisfied:
7424 drop_satisfied = True
7427 if not selected_nodes and not myblocker_uninstalls.is_empty():
7428 # If possible, drop an uninstall task here in order to avoid
7429 # the circular deps code path. The corresponding blocker will
7430 # still be counted as an unresolved conflict.
7432 for node in myblocker_uninstalls.leaf_nodes():
7434 mygraph.remove(node)
7439 ignored_uninstall_tasks.add(node)
7442 if uninst_task is not None:
7443 # Reset the state variables for leaf node selection and
7444 # continue trying to select leaf nodes.
7446 drop_satisfied = False
7449 if not selected_nodes:
7450 self._circular_deps_for_display = mygraph
7451 raise self._unknown_internal_error()
7453 # At this point, we've succeeded in selecting one or more nodes, so
7454 # reset state variables for leaf node selection.
7456 drop_satisfied = False
7458 mygraph.difference_update(selected_nodes)
7460 for node in selected_nodes:
7461 if isinstance(node, Package) and \
7462 node.operation == "nomerge":
7465 # Handle interactions between blockers
7466 # and uninstallation tasks.
7467 solved_blockers = set()
7469 if isinstance(node, Package) and \
7470 "uninstall" == node.operation:
7471 have_uninstall_task = True
7474 vardb = self.trees[node.root]["vartree"].dbapi
7475 previous_cpv = vardb.match(node.slot_atom)
7477 # The package will be replaced by this one, so remove
7478 # the corresponding Uninstall task if necessary.
7479 previous_cpv = previous_cpv[0]
7481 ("installed", node.root, previous_cpv, "uninstall")
7483 mygraph.remove(uninst_task)
7487 if uninst_task is not None and \
7488 uninst_task not in ignored_uninstall_tasks and \
7489 myblocker_uninstalls.contains(uninst_task):
7490 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7491 myblocker_uninstalls.remove(uninst_task)
7492 # Discard any blockers that this Uninstall solves.
7493 for blocker in blocker_nodes:
7494 if not myblocker_uninstalls.child_nodes(blocker):
7495 myblocker_uninstalls.remove(blocker)
7496 solved_blockers.add(blocker)
7498 retlist.append(node)
7500 if (isinstance(node, Package) and \
7501 "uninstall" == node.operation) or \
7502 (uninst_task is not None and \
7503 uninst_task in scheduled_uninstalls):
7504 # Include satisfied blockers in the merge list
7505 # since the user might be interested and also
7506 # it serves as an indicator that blocking packages
7507 # will be temporarily installed simultaneously.
7508 for blocker in solved_blockers:
7509 retlist.append(Blocker(atom=blocker.atom,
7510 root=blocker.root, eapi=blocker.eapi,
7513 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7514 for node in myblocker_uninstalls.root_nodes():
7515 unsolvable_blockers.add(node)
7517 for blocker in unsolvable_blockers:
7518 retlist.append(blocker)
7520 # If any Uninstall tasks need to be executed in order
7521 # to avoid a conflict, complete the graph with any
7522 # dependencies that may have been initially
7523 # neglected (to ensure that unsafe Uninstall tasks
7524 # are properly identified and blocked from execution).
7525 if have_uninstall_task and \
7527 not unsolvable_blockers:
7528 self.myparams.add("complete")
7529 raise self._serialize_tasks_retry("")
7531 if unsolvable_blockers and \
7532 not self._accept_blocker_conflicts():
7533 self._unsatisfied_blockers_for_display = unsolvable_blockers
7534 self._serialized_tasks_cache = retlist[:]
7535 self._scheduler_graph = scheduler_graph
7536 raise self._unknown_internal_error()
7538 if self._slot_collision_info and \
7539 not self._accept_blocker_conflicts():
7540 self._serialized_tasks_cache = retlist[:]
7541 self._scheduler_graph = scheduler_graph
7542 raise self._unknown_internal_error()
7544 return retlist, scheduler_graph
7546 def _show_circular_deps(self, mygraph):
7547 # No leaf nodes are available, so we have a circular
7548 # dependency panic situation. Reduce the noise level to a
7549 # minimum via repeated elimination of root nodes since they
7550 # have no parents and thus can not be part of a cycle.
7552 root_nodes = mygraph.root_nodes(
7553 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7556 mygraph.difference_update(root_nodes)
7557 # Display the USE flags that are enabled on nodes that are part
7558 # of dependency cycles in case that helps the user decide to
7559 # disable some of them.
7561 tempgraph = mygraph.copy()
7562 while not tempgraph.empty():
7563 nodes = tempgraph.leaf_nodes()
7565 node = tempgraph.order[0]
7568 display_order.append(node)
7569 tempgraph.remove(node)
7570 display_order.reverse()
7571 self.myopts.pop("--quiet", None)
7572 self.myopts.pop("--verbose", None)
7573 self.myopts["--tree"] = True
7574 portage.writemsg("\n\n", noiselevel=-1)
7575 self.display(display_order)
7576 prefix = colorize("BAD", " * ")
7577 portage.writemsg("\n", noiselevel=-1)
7578 portage.writemsg(prefix + "Error: circular dependencies:\n",
7580 portage.writemsg("\n", noiselevel=-1)
7581 mygraph.debug_print()
7582 portage.writemsg("\n", noiselevel=-1)
7583 portage.writemsg(prefix + "Note that circular dependencies " + \
7584 "can often be avoided by temporarily\n", noiselevel=-1)
7585 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7586 "optional dependencies.\n", noiselevel=-1)
7588 def _show_merge_list(self):
7589 if self._serialized_tasks_cache is not None and \
7590 not (self._displayed_list and \
7591 (self._displayed_list == self._serialized_tasks_cache or \
7592 self._displayed_list == \
7593 list(reversed(self._serialized_tasks_cache)))):
7594 display_list = self._serialized_tasks_cache[:]
7595 if "--tree" in self.myopts:
7596 display_list.reverse()
7597 self.display(display_list)
7599 def _show_unsatisfied_blockers(self, blockers):
7600 self._show_merge_list()
7601 msg = "Error: The above package list contains " + \
7602 "packages which cannot be installed " + \
7603 "at the same time on the same system."
7604 prefix = colorize("BAD", " * ")
7605 from textwrap import wrap
7606 portage.writemsg("\n", noiselevel=-1)
7607 for line in wrap(msg, 70):
7608 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7610 # Display the conflicting packages along with the packages
7611 # that pulled them in. This is helpful for troubleshooting
7612 # cases in which blockers don't solve automatically and
7613 # the reasons are not apparent from the normal merge list
7617 for blocker in blockers:
7618 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7619 self._blocker_parents.parent_nodes(blocker)):
7620 parent_atoms = self._parent_atoms.get(pkg)
7621 if not parent_atoms:
7622 atom = self._blocked_world_pkgs.get(pkg)
7623 if atom is not None:
7624 parent_atoms = set([("@world", atom)])
7626 conflict_pkgs[pkg] = parent_atoms
7629 # Reduce noise by pruning packages that are only
7630 # pulled in by other conflict packages.
7632 for pkg, parent_atoms in conflict_pkgs.iteritems():
7633 relevant_parent = False
7634 for parent, atom in parent_atoms:
7635 if parent not in conflict_pkgs:
7636 relevant_parent = True
7638 if not relevant_parent:
7639 pruned_pkgs.add(pkg)
7640 for pkg in pruned_pkgs:
7641 del conflict_pkgs[pkg]
7647 # Max number of parents shown, to avoid flooding the display.
7649 for pkg, parent_atoms in conflict_pkgs.iteritems():
7653 # Prefer packages that are not directly involved in a conflict.
7654 for parent_atom in parent_atoms:
7655 if len(pruned_list) >= max_parents:
7657 parent, atom = parent_atom
7658 if parent not in conflict_pkgs:
7659 pruned_list.add(parent_atom)
7661 for parent_atom in parent_atoms:
7662 if len(pruned_list) >= max_parents:
7664 pruned_list.add(parent_atom)
7666 omitted_parents = len(parent_atoms) - len(pruned_list)
7667 msg.append(indent + "%s pulled in by\n" % pkg)
7669 for parent_atom in pruned_list:
7670 parent, atom = parent_atom
7671 msg.append(2*indent)
7672 if isinstance(parent,
7673 (PackageArg, AtomArg)):
7674 # For PackageArg and AtomArg types, it's
7675 # redundant to display the atom attribute.
7676 msg.append(str(parent))
7678 # Display the specific atom from SetArg or
7680 msg.append("%s required by %s" % (atom, parent))
7684 msg.append(2*indent)
7685 msg.append("(and %d more)\n" % omitted_parents)
7689 sys.stderr.write("".join(msg))
7692 if "--quiet" not in self.myopts:
7693 show_blocker_docs_link()
7695 def display(self, mylist, favorites=[], verbosity=None):
7697 # This is used to prevent display_problems() from
7698 # redundantly displaying this exact same merge list
7699 # again via _show_merge_list().
7700 self._displayed_list = mylist
7702 if verbosity is None:
7703 verbosity = ("--quiet" in self.myopts and 1 or \
7704 "--verbose" in self.myopts and 3 or 2)
7705 favorites_set = InternalPackageSet(favorites)
7706 oneshot = "--oneshot" in self.myopts or \
7707 "--onlydeps" in self.myopts
7708 columns = "--columns" in self.myopts
7713 counters = PackageCounters()
7715 if verbosity == 1 and "--verbose" not in self.myopts:
7716 def create_use_string(*args):
7719 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7721 is_new, reinst_flags,
7722 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7723 alphabetical=("--alphabetical" in self.myopts)):
7731 cur_iuse = set(cur_iuse)
7732 enabled_flags = cur_iuse.intersection(cur_use)
7733 removed_iuse = set(old_iuse).difference(cur_iuse)
7734 any_iuse = cur_iuse.union(old_iuse)
7735 any_iuse = list(any_iuse)
7737 for flag in any_iuse:
7740 reinst_flag = reinst_flags and flag in reinst_flags
7741 if flag in enabled_flags:
7743 if is_new or flag in old_use and \
7744 (all_flags or reinst_flag):
7745 flag_str = red(flag)
7746 elif flag not in old_iuse:
7747 flag_str = yellow(flag) + "%*"
7748 elif flag not in old_use:
7749 flag_str = green(flag) + "*"
7750 elif flag in removed_iuse:
7751 if all_flags or reinst_flag:
7752 flag_str = yellow("-" + flag) + "%"
7755 flag_str = "(" + flag_str + ")"
7756 removed.append(flag_str)
7759 if is_new or flag in old_iuse and \
7760 flag not in old_use and \
7761 (all_flags or reinst_flag):
7762 flag_str = blue("-" + flag)
7763 elif flag not in old_iuse:
7764 flag_str = yellow("-" + flag)
7765 if flag not in iuse_forced:
7767 elif flag in old_use:
7768 flag_str = green("-" + flag) + "*"
7770 if flag in iuse_forced:
7771 flag_str = "(" + flag_str + ")"
7773 enabled.append(flag_str)
7775 disabled.append(flag_str)
7778 ret = " ".join(enabled)
7780 ret = " ".join(enabled + disabled + removed)
7782 ret = '%s="%s" ' % (name, ret)
7785 repo_display = RepoDisplay(self.roots)
7789 mygraph = self.digraph.copy()
7791 # If there are any Uninstall instances, add the corresponding
7792 # blockers to the digraph (useful for --tree display).
7794 executed_uninstalls = set(node for node in mylist \
7795 if isinstance(node, Package) and node.operation == "unmerge")
7797 for uninstall in self._blocker_uninstalls.leaf_nodes():
7798 uninstall_parents = \
7799 self._blocker_uninstalls.parent_nodes(uninstall)
7800 if not uninstall_parents:
7803 # Remove the corresponding "nomerge" node and substitute
7804 # the Uninstall node.
7805 inst_pkg = self._pkg_cache[
7806 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7808 mygraph.remove(inst_pkg)
7813 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7815 inst_pkg_blockers = []
7817 # Break the Package -> Uninstall edges.
7818 mygraph.remove(uninstall)
7820 # Resolution of a package's blockers
7821 # depend on it's own uninstallation.
7822 for blocker in inst_pkg_blockers:
7823 mygraph.add(uninstall, blocker)
7825 # Expand Package -> Uninstall edges into
7826 # Package -> Blocker -> Uninstall edges.
7827 for blocker in uninstall_parents:
7828 mygraph.add(uninstall, blocker)
7829 for parent in self._blocker_parents.parent_nodes(blocker):
7830 if parent != inst_pkg:
7831 mygraph.add(blocker, parent)
7833 # If the uninstall task did not need to be executed because
7834 # of an upgrade, display Blocker -> Upgrade edges since the
7835 # corresponding Blocker -> Uninstall edges will not be shown.
7837 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7838 if upgrade_node is not None and \
7839 uninstall not in executed_uninstalls:
7840 for blocker in uninstall_parents:
7841 mygraph.add(upgrade_node, blocker)
7843 unsatisfied_blockers = []
7848 if isinstance(x, Blocker) and not x.satisfied:
7849 unsatisfied_blockers.append(x)
7852 if "--tree" in self.myopts:
7853 depth = len(tree_nodes)
7854 while depth and graph_key not in \
7855 mygraph.child_nodes(tree_nodes[depth-1]):
7858 tree_nodes = tree_nodes[:depth]
7859 tree_nodes.append(graph_key)
7860 display_list.append((x, depth, True))
7861 shown_edges.add((graph_key, tree_nodes[depth-1]))
7863 traversed_nodes = set() # prevent endless circles
7864 traversed_nodes.add(graph_key)
7865 def add_parents(current_node, ordered):
7867 # Do not traverse to parents if this node is an
7868 # an argument or a direct member of a set that has
7869 # been specified as an argument (system or world).
7870 if current_node not in self._set_nodes:
7871 parent_nodes = mygraph.parent_nodes(current_node)
7873 child_nodes = set(mygraph.child_nodes(current_node))
7874 selected_parent = None
7875 # First, try to avoid a direct cycle.
7876 for node in parent_nodes:
7877 if not isinstance(node, (Blocker, Package)):
7879 if node not in traversed_nodes and \
7880 node not in child_nodes:
7881 edge = (current_node, node)
7882 if edge in shown_edges:
7884 selected_parent = node
7886 if not selected_parent:
7887 # A direct cycle is unavoidable.
7888 for node in parent_nodes:
7889 if not isinstance(node, (Blocker, Package)):
7891 if node not in traversed_nodes:
7892 edge = (current_node, node)
7893 if edge in shown_edges:
7895 selected_parent = node
7898 shown_edges.add((current_node, selected_parent))
7899 traversed_nodes.add(selected_parent)
7900 add_parents(selected_parent, False)
7901 display_list.append((current_node,
7902 len(tree_nodes), ordered))
7903 tree_nodes.append(current_node)
7905 add_parents(graph_key, True)
7907 display_list.append((x, depth, True))
7908 mylist = display_list
7909 for x in unsatisfied_blockers:
7910 mylist.append((x, 0, True))
7912 last_merge_depth = 0
7913 for i in xrange(len(mylist)-1,-1,-1):
7914 graph_key, depth, ordered = mylist[i]
7915 if not ordered and depth == 0 and i > 0 \
7916 and graph_key == mylist[i-1][0] and \
7917 mylist[i-1][1] == 0:
7918 # An ordered node got a consecutive duplicate when the tree was
7922 if ordered and graph_key[-1] != "nomerge":
7923 last_merge_depth = depth
7925 if depth >= last_merge_depth or \
7926 i < len(mylist) - 1 and \
7927 depth >= mylist[i+1][1]:
7930 from portage import flatten
7931 from portage.dep import use_reduce, paren_reduce
7932 # files to fetch list - avoids counting a same file twice
7933 # in size display (verbose mode)
7936 # Use this set to detect when all the "repoadd" strings are "[0]"
7937 # and disable the entire repo display in this case.
7940 for mylist_index in xrange(len(mylist)):
7941 x, depth, ordered = mylist[mylist_index]
7945 portdb = self.trees[myroot]["porttree"].dbapi
7946 bindb = self.trees[myroot]["bintree"].dbapi
7947 vardb = self.trees[myroot]["vartree"].dbapi
7948 vartree = self.trees[myroot]["vartree"]
7949 pkgsettings = self.pkgsettings[myroot]
7952 indent = " " * depth
7954 if isinstance(x, Blocker):
7956 blocker_style = "PKG_BLOCKER_SATISFIED"
7957 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7959 blocker_style = "PKG_BLOCKER"
7960 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7962 counters.blocks += 1
7964 counters.blocks_satisfied += 1
7965 resolved = portage.key_expand(
7966 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7967 if "--columns" in self.myopts and "--quiet" in self.myopts:
7968 addl += " " + colorize(blocker_style, resolved)
7970 addl = "[%s %s] %s%s" % \
7971 (colorize(blocker_style, "blocks"),
7972 addl, indent, colorize(blocker_style, resolved))
7973 block_parents = self._blocker_parents.parent_nodes(x)
7974 block_parents = set([pnode[2] for pnode in block_parents])
7975 block_parents = ", ".join(block_parents)
7977 addl += colorize(blocker_style,
7978 " (\"%s\" is blocking %s)") % \
7979 (str(x.atom).lstrip("!"), block_parents)
7981 addl += colorize(blocker_style,
7982 " (is blocking %s)") % block_parents
7983 if isinstance(x, Blocker) and x.satisfied:
7988 blockers.append(addl)
7991 pkg_merge = ordered and pkg_status == "merge"
7992 if not pkg_merge and pkg_status == "merge":
7993 pkg_status = "nomerge"
7994 built = pkg_type != "ebuild"
7995 installed = pkg_type == "installed"
7997 metadata = pkg.metadata
7999 repo_name = metadata["repository"]
8000 if pkg_type == "ebuild":
8001 ebuild_path = portdb.findname(pkg_key)
8002 if not ebuild_path: # shouldn't happen
8003 raise portage.exception.PackageNotFound(pkg_key)
8004 repo_path_real = os.path.dirname(os.path.dirname(
8005 os.path.dirname(ebuild_path)))
8007 repo_path_real = portdb.getRepositoryPath(repo_name)
8008 pkg_use = list(pkg.use.enabled)
8010 restrict = flatten(use_reduce(paren_reduce(
8011 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8012 except portage.exception.InvalidDependString, e:
8013 if not pkg.installed:
8014 show_invalid_depstring_notice(x,
8015 pkg.metadata["RESTRICT"], str(e))
8019 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8020 "fetch" in restrict:
8023 counters.restrict_fetch += 1
8024 if portdb.fetch_check(pkg_key, pkg_use):
8027 counters.restrict_fetch_satisfied += 1
8029 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8030 #param is used for -u, where you still *do* want to see when something is being upgraded.
8033 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8034 if vardb.cpv_exists(pkg_key):
8035 addl=" "+yellow("R")+fetch+" "
8038 counters.reinst += 1
8039 elif pkg_status == "uninstall":
8040 counters.uninst += 1
8041 # filter out old-style virtual matches
8042 elif installed_versions and \
8043 portage.cpv_getkey(installed_versions[0]) == \
8044 portage.cpv_getkey(pkg_key):
8045 myinslotlist = vardb.match(pkg.slot_atom)
8046 # If this is the first install of a new-style virtual, we
8047 # need to filter out old-style virtual matches.
8048 if myinslotlist and \
8049 portage.cpv_getkey(myinslotlist[0]) != \
8050 portage.cpv_getkey(pkg_key):
8053 myoldbest = myinslotlist[:]
8055 if not portage.dep.cpvequal(pkg_key,
8056 portage.best([pkg_key] + myoldbest)):
8058 addl += turquoise("U")+blue("D")
8060 counters.downgrades += 1
8063 addl += turquoise("U") + " "
8065 counters.upgrades += 1
8067 # New slot, mark it new.
8068 addl = " " + green("NS") + fetch + " "
8069 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8071 counters.newslot += 1
8073 if "--changelog" in self.myopts:
8074 inst_matches = vardb.match(pkg.slot_atom)
8076 changelogs.extend(self.calc_changelog(
8077 portdb.findname(pkg_key),
8078 inst_matches[0], pkg_key))
8080 addl = " " + green("N") + " " + fetch + " "
8089 forced_flags = set()
8090 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8091 forced_flags.update(pkgsettings.useforce)
8092 forced_flags.update(pkgsettings.usemask)
8094 cur_use = [flag for flag in pkg.use.enabled \
8095 if flag in pkg.iuse.all]
8096 cur_iuse = sorted(pkg.iuse.all)
8098 if myoldbest and myinslotlist:
8099 previous_cpv = myoldbest[0]
8101 previous_cpv = pkg.cpv
8102 if vardb.cpv_exists(previous_cpv):
8103 old_iuse, old_use = vardb.aux_get(
8104 previous_cpv, ["IUSE", "USE"])
8105 old_iuse = list(set(
8106 filter_iuse_defaults(old_iuse.split())))
8108 old_use = old_use.split()
8115 old_use = [flag for flag in old_use if flag in old_iuse]
8117 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8119 use_expand.reverse()
8120 use_expand_hidden = \
8121 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8123 def map_to_use_expand(myvals, forcedFlags=False,
8127 for exp in use_expand:
8130 for val in myvals[:]:
8131 if val.startswith(exp.lower()+"_"):
8132 if val in forced_flags:
8133 forced[exp].add(val[len(exp)+1:])
8134 ret[exp].append(val[len(exp)+1:])
8137 forced["USE"] = [val for val in myvals \
8138 if val in forced_flags]
8140 for exp in use_expand_hidden:
8146 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8147 # are the only thing that triggered reinstallation.
8148 reinst_flags_map = {}
8149 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8150 reinst_expand_map = None
8151 if reinstall_for_flags:
8152 reinst_flags_map = map_to_use_expand(
8153 list(reinstall_for_flags), removeHidden=False)
8154 for k in list(reinst_flags_map):
8155 if not reinst_flags_map[k]:
8156 del reinst_flags_map[k]
8157 if not reinst_flags_map.get("USE"):
8158 reinst_expand_map = reinst_flags_map.copy()
8159 reinst_expand_map.pop("USE", None)
8160 if reinst_expand_map and \
8161 not set(reinst_expand_map).difference(
8163 use_expand_hidden = \
8164 set(use_expand_hidden).difference(
8167 cur_iuse_map, iuse_forced = \
8168 map_to_use_expand(cur_iuse, forcedFlags=True)
8169 cur_use_map = map_to_use_expand(cur_use)
8170 old_iuse_map = map_to_use_expand(old_iuse)
8171 old_use_map = map_to_use_expand(old_use)
8174 use_expand.insert(0, "USE")
8176 for key in use_expand:
8177 if key in use_expand_hidden:
8179 verboseadd += create_use_string(key.upper(),
8180 cur_iuse_map[key], iuse_forced[key],
8181 cur_use_map[key], old_iuse_map[key],
8182 old_use_map[key], is_new,
8183 reinst_flags_map.get(key))
8188 if pkg_type == "ebuild" and pkg_merge:
8190 myfilesdict = portdb.getfetchsizes(pkg_key,
8191 useflags=pkg_use, debug=self.edebug)
8192 except portage.exception.InvalidDependString, e:
8193 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8194 show_invalid_depstring_notice(x, src_uri, str(e))
8197 if myfilesdict is None:
8198 myfilesdict="[empty/missing/bad digest]"
8200 for myfetchfile in myfilesdict:
8201 if myfetchfile not in myfetchlist:
8202 mysize+=myfilesdict[myfetchfile]
8203 myfetchlist.append(myfetchfile)
8205 counters.totalsize += mysize
8206 verboseadd += format_size(mysize)
8209 # assign index for a previous version in the same slot
8210 has_previous = False
8211 repo_name_prev = None
8212 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8214 slot_matches = vardb.match(slot_atom)
8217 repo_name_prev = vardb.aux_get(slot_matches[0],
8220 # now use the data to generate output
8221 if pkg.installed or not has_previous:
8222 repoadd = repo_display.repoStr(repo_path_real)
8224 repo_path_prev = None
8226 repo_path_prev = portdb.getRepositoryPath(
8228 if repo_path_prev == repo_path_real:
8229 repoadd = repo_display.repoStr(repo_path_real)
8231 repoadd = "%s=>%s" % (
8232 repo_display.repoStr(repo_path_prev),
8233 repo_display.repoStr(repo_path_real))
8235 repoadd_set.add(repoadd)
8237 xs = [portage.cpv_getkey(pkg_key)] + \
8238 list(portage.catpkgsplit(pkg_key)[2:])
8245 if "COLUMNWIDTH" in self.settings:
8247 mywidth = int(self.settings["COLUMNWIDTH"])
8248 except ValueError, e:
8249 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8251 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8252 self.settings["COLUMNWIDTH"], noiselevel=-1)
8254 oldlp = mywidth - 30
8257 # Convert myoldbest from a list to a string.
8261 for pos, key in enumerate(myoldbest):
8262 key = portage.catpkgsplit(key)[2] + \
8263 "-" + portage.catpkgsplit(key)[3]
8264 if key[-3:] == "-r0":
8266 myoldbest[pos] = key
8267 myoldbest = blue("["+", ".join(myoldbest)+"]")
8270 root_config = self.roots[myroot]
8271 system_set = root_config.sets["system"]
8272 world_set = root_config.sets["world"]
8277 pkg_system = system_set.findAtomForPackage(pkg)
8278 pkg_world = world_set.findAtomForPackage(pkg)
8279 if not (oneshot or pkg_world) and \
8280 myroot == self.target_root and \
8281 favorites_set.findAtomForPackage(pkg):
8282 # Maybe it will be added to world now.
8283 if create_world_atom(pkg, favorites_set, root_config):
8285 except portage.exception.InvalidDependString:
8286 # This is reported elsewhere if relevant.
8289 def pkgprint(pkg_str):
8292 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8294 return colorize("PKG_MERGE_WORLD", pkg_str)
8296 return colorize("PKG_MERGE", pkg_str)
8297 elif pkg_status == "uninstall":
8298 return colorize("PKG_UNINSTALL", pkg_str)
8301 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8303 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8305 return colorize("PKG_NOMERGE", pkg_str)
8308 properties = flatten(use_reduce(paren_reduce(
8309 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8310 except portage.exception.InvalidDependString, e:
8311 if not pkg.installed:
8312 show_invalid_depstring_notice(pkg,
8313 pkg.metadata["PROPERTIES"], str(e))
8317 interactive = "interactive" in properties
8318 if interactive and pkg.operation == "merge":
8319 addl = colorize("WARN", "I") + addl[1:]
8321 counters.interactive += 1
8326 if "--columns" in self.myopts:
8327 if "--quiet" in self.myopts:
8328 myprint=addl+" "+indent+pkgprint(pkg_cp)
8329 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8330 myprint=myprint+myoldbest
8331 myprint=myprint+darkgreen("to "+x[1])
8335 myprint = "[%s] %s%s" % \
8336 (pkgprint(pkg_status.ljust(13)),
8337 indent, pkgprint(pkg.cp))
8339 myprint = "[%s %s] %s%s" % \
8340 (pkgprint(pkg.type_name), addl,
8341 indent, pkgprint(pkg.cp))
8342 if (newlp-nc_len(myprint)) > 0:
8343 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8344 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8345 if (oldlp-nc_len(myprint)) > 0:
8346 myprint=myprint+" "*(oldlp-nc_len(myprint))
8347 myprint=myprint+myoldbest
8348 myprint += darkgreen("to " + pkg.root)
8351 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8353 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8354 myprint += indent + pkgprint(pkg_key) + " " + \
8355 myoldbest + darkgreen("to " + myroot)
8357 if "--columns" in self.myopts:
8358 if "--quiet" in self.myopts:
8359 myprint=addl+" "+indent+pkgprint(pkg_cp)
8360 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8361 myprint=myprint+myoldbest
8365 myprint = "[%s] %s%s" % \
8366 (pkgprint(pkg_status.ljust(13)),
8367 indent, pkgprint(pkg.cp))
8369 myprint = "[%s %s] %s%s" % \
8370 (pkgprint(pkg.type_name), addl,
8371 indent, pkgprint(pkg.cp))
8372 if (newlp-nc_len(myprint)) > 0:
8373 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8374 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8375 if (oldlp-nc_len(myprint)) > 0:
8376 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8377 myprint += myoldbest
8380 myprint = "[%s] %s%s %s" % \
8381 (pkgprint(pkg_status.ljust(13)),
8382 indent, pkgprint(pkg.cpv),
8385 myprint = "[%s %s] %s%s %s" % \
8386 (pkgprint(pkg_type), addl, indent,
8387 pkgprint(pkg.cpv), myoldbest)
8389 if columns and pkg.operation == "uninstall":
8391 p.append((myprint, verboseadd, repoadd))
8393 if "--tree" not in self.myopts and \
8394 "--quiet" not in self.myopts and \
8395 not self._opts_no_restart.intersection(self.myopts) and \
8396 pkg.root == self._running_root.root and \
8397 portage.match_from_list(
8398 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8399 not vardb.cpv_exists(pkg.cpv) and \
8400 "--quiet" not in self.myopts:
8401 if mylist_index < len(mylist) - 1:
8402 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8403 p.append(colorize("WARN", " then resume the merge."))
8406 show_repos = repoadd_set and repoadd_set != set(["0"])
8409 if isinstance(x, basestring):
8410 out.write("%s\n" % (x,))
8413 myprint, verboseadd, repoadd = x
8416 myprint += " " + verboseadd
8418 if show_repos and repoadd:
8419 myprint += " " + teal("[%s]" % repoadd)
8421 out.write("%s\n" % (myprint,))
8430 sys.stdout.write(str(repo_display))
8432 if "--changelog" in self.myopts:
8434 for revision,text in changelogs:
8435 print bold('*'+revision)
8436 sys.stdout.write(text)
8441 def display_problems(self):
8443 Display problems with the dependency graph such as slot collisions.
8444 This is called internally by display() to show the problems _after_
8445 the merge list where it is most likely to be seen, but if display()
8446 is not going to be called then this method should be called explicitly
8447 to ensure that the user is notified of problems with the graph.
8449 All output goes to stderr, except for unsatisfied dependencies which
8450 go to stdout for parsing by programs such as autounmask.
8453 # Note that show_masked_packages() sends it's output to
8454 # stdout, and some programs such as autounmask parse the
8455 # output in cases when emerge bails out. However, when
8456 # show_masked_packages() is called for installed packages
8457 # here, the message is a warning that is more appropriate
8458 # to send to stderr, so temporarily redirect stdout to
8459 # stderr. TODO: Fix output code so there's a cleaner way
8460 # to redirect everything to stderr.
8465 sys.stdout = sys.stderr
8466 self._display_problems()
8472 # This goes to stdout for parsing by programs like autounmask.
8473 for pargs, kwargs in self._unsatisfied_deps_for_display:
8474 self._show_unsatisfied_dep(*pargs, **kwargs)
8476 def _display_problems(self):
8477 if self._circular_deps_for_display is not None:
8478 self._show_circular_deps(
8479 self._circular_deps_for_display)
8481 # The user is only notified of a slot conflict if
8482 # there are no unresolvable blocker conflicts.
8483 if self._unsatisfied_blockers_for_display is not None:
8484 self._show_unsatisfied_blockers(
8485 self._unsatisfied_blockers_for_display)
8487 self._show_slot_collision_notice()
8489 # TODO: Add generic support for "set problem" handlers so that
8490 # the below warnings aren't special cases for world only.
8492 if self._missing_args:
8493 world_problems = False
8494 if "world" in self._sets:
8495 # Filter out indirect members of world (from nested sets)
8496 # since only direct members of world are desired here.
8497 world_set = self.roots[self.target_root].sets["world"]
8498 for arg, atom in self._missing_args:
8499 if arg.name == "world" and atom in world_set:
8500 world_problems = True
8504 sys.stderr.write("\n!!! Problems have been " + \
8505 "detected with your world file\n")
8506 sys.stderr.write("!!! Please run " + \
8507 green("emaint --check world")+"\n\n")
8509 if self._missing_args:
8510 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8511 " Ebuilds for the following packages are either all\n")
8512 sys.stderr.write(colorize("BAD", "!!!") + \
8513 " masked or don't exist:\n")
8514 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8515 self._missing_args) + "\n")
8517 if self._pprovided_args:
8519 for arg, atom in self._pprovided_args:
8520 if isinstance(arg, SetArg):
8522 arg_atom = (atom, atom)
8525 arg_atom = (arg.arg, atom)
8526 refs = arg_refs.setdefault(arg_atom, [])
8527 if parent not in refs:
8530 msg.append(bad("\nWARNING: "))
8531 if len(self._pprovided_args) > 1:
8532 msg.append("Requested packages will not be " + \
8533 "merged because they are listed in\n")
8535 msg.append("A requested package will not be " + \
8536 "merged because it is listed in\n")
8537 msg.append("package.provided:\n\n")
8538 problems_sets = set()
8539 for (arg, atom), refs in arg_refs.iteritems():
8542 problems_sets.update(refs)
8544 ref_string = ", ".join(["'%s'" % name for name in refs])
8545 ref_string = " pulled in by " + ref_string
8546 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8548 if "world" in problems_sets:
8549 msg.append("This problem can be solved in one of the following ways:\n\n")
8550 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8551 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8552 msg.append(" C) Remove offending entries from package.provided.\n\n")
8553 msg.append("The best course of action depends on the reason that an offending\n")
8554 msg.append("package.provided entry exists.\n\n")
8555 sys.stderr.write("".join(msg))
8557 masked_packages = []
8558 for pkg in self._masked_installed:
8559 root_config = pkg.root_config
8560 pkgsettings = self.pkgsettings[pkg.root]
8561 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8562 masked_packages.append((root_config, pkgsettings,
8563 pkg.cpv, pkg.metadata, mreasons))
8565 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8566 " The following installed packages are masked:\n")
8567 show_masked_packages(masked_packages)
8571 def calc_changelog(self,ebuildpath,current,next):
8572 if ebuildpath == None or not os.path.exists(ebuildpath):
8574 current = '-'.join(portage.catpkgsplit(current)[1:])
8575 if current.endswith('-r0'):
8576 current = current[:-3]
8577 next = '-'.join(portage.catpkgsplit(next)[1:])
8578 if next.endswith('-r0'):
8580 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8582 changelog = open(changelogpath).read()
8583 except SystemExit, e:
8584 raise # Needed else can't exit
8587 divisions = self.find_changelog_tags(changelog)
8588 #print 'XX from',current,'to',next
8589 #for div,text in divisions: print 'XX',div
8590 # skip entries for all revisions above the one we are about to emerge
8591 for i in range(len(divisions)):
8592 if divisions[i][0]==next:
8593 divisions = divisions[i:]
8595 # find out how many entries we are going to display
8596 for i in range(len(divisions)):
8597 if divisions[i][0]==current:
8598 divisions = divisions[:i]
8601 # couldnt find the current revision in the list. display nothing
8605 def find_changelog_tags(self,changelog):
8609 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8611 if release is not None:
8612 divs.append((release,changelog))
8614 if release is not None:
8615 divs.append((release,changelog[:match.start()]))
8616 changelog = changelog[match.end():]
8617 release = match.group(1)
8618 if release.endswith('.ebuild'):
8619 release = release[:-7]
8620 if release.endswith('-r0'):
8621 release = release[:-3]
8623 def saveNomergeFavorites(self):
8624 """Find atoms in favorites that are not in the mergelist and add them
8625 to the world file if necessary."""
8626 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8627 "--oneshot", "--onlydeps", "--pretend"):
8628 if x in self.myopts:
8630 root_config = self.roots[self.target_root]
8631 world_set = root_config.sets["world"]
8633 world_locked = False
8634 if hasattr(world_set, "lock"):
8638 if hasattr(world_set, "load"):
8639 world_set.load() # maybe it's changed on disk
8641 args_set = self._sets["args"]
8642 portdb = self.trees[self.target_root]["porttree"].dbapi
8643 added_favorites = set()
8644 for x in self._set_nodes:
8645 pkg_type, root, pkg_key, pkg_status = x
8646 if pkg_status != "nomerge":
8650 myfavkey = create_world_atom(x, args_set, root_config)
8652 if myfavkey in added_favorites:
8654 added_favorites.add(myfavkey)
8655 except portage.exception.InvalidDependString, e:
8656 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8657 (pkg_key, str(e)), noiselevel=-1)
8658 writemsg("!!! see '%s'\n\n" % os.path.join(
8659 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8662 for k in self._sets:
8663 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8668 all_added.append(SETPREFIX + k)
8669 all_added.extend(added_favorites)
8672 print ">>> Recording %s in \"world\" favorites file..." % \
8673 colorize("INFORM", str(a))
8675 world_set.update(all_added)
8680 def loadResumeCommand(self, resume_data, skip_masked=False):
8682 Add a resume command to the graph and validate it in the process. This
8683 will raise a PackageNotFound exception if a package is not available.
8686 if not isinstance(resume_data, dict):
8689 mergelist = resume_data.get("mergelist")
8690 if not isinstance(mergelist, list):
8693 fakedb = self.mydbapi
8695 serialized_tasks = []
8698 if not (isinstance(x, list) and len(x) == 4):
8700 pkg_type, myroot, pkg_key, action = x
8701 if pkg_type not in self.pkg_tree_map:
8703 if action != "merge":
8705 tree_type = self.pkg_tree_map[pkg_type]
8706 mydb = trees[myroot][tree_type].dbapi
8707 db_keys = list(self._trees_orig[myroot][
8708 tree_type].dbapi._aux_cache_keys)
8710 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8712 # It does no exist or it is corrupt.
8713 if action == "uninstall":
8715 raise portage.exception.PackageNotFound(pkg_key)
8716 installed = action == "uninstall"
8717 built = pkg_type != "ebuild"
8718 root_config = self.roots[myroot]
8719 pkg = Package(built=built, cpv=pkg_key,
8720 installed=installed, metadata=metadata,
8721 operation=action, root_config=root_config,
8723 if pkg_type == "ebuild":
8724 pkgsettings = self.pkgsettings[myroot]
8725 pkgsettings.setcpv(pkg)
8726 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8727 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8728 self._pkg_cache[pkg] = pkg
8730 root_config = self.roots[pkg.root]
8731 if "merge" == pkg.operation and \
8732 not visible(root_config.settings, pkg):
8734 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8736 self._unsatisfied_deps_for_display.append(
8737 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8739 fakedb[myroot].cpv_inject(pkg)
8740 serialized_tasks.append(pkg)
8741 self.spinner.update()
8743 if self._unsatisfied_deps_for_display:
8746 if not serialized_tasks or "--nodeps" in self.myopts:
8747 self._serialized_tasks_cache = serialized_tasks
8748 self._scheduler_graph = self.digraph
8750 self._select_package = self._select_pkg_from_graph
8751 self.myparams.add("selective")
8752 # Always traverse deep dependencies in order to account for
8753 # potentially unsatisfied dependencies of installed packages.
8754 # This is necessary for correct --keep-going or --resume operation
8755 # in case a package from a group of circularly dependent packages
8756 # fails. In this case, a package which has recently been installed
8757 # may have an unsatisfied circular dependency (pulled in by
8758 # PDEPEND, for example). So, even though a package is already
8759 # installed, it may not have all of it's dependencies satisfied, so
8760 # it may not be usable. If such a package is in the subgraph of
8761 # deep depenedencies of a scheduled build, that build needs to
8762 # be cancelled. In order for this type of situation to be
8763 # recognized, deep traversal of dependencies is required.
8764 self.myparams.add("deep")
8766 favorites = resume_data.get("favorites")
8767 args_set = self._sets["args"]
8768 if isinstance(favorites, list):
8769 args = self._load_favorites(favorites)
8773 for task in serialized_tasks:
8774 if isinstance(task, Package) and \
8775 task.operation == "merge":
8776 if not self._add_pkg(task, None):
8779 # Packages for argument atoms need to be explicitly
8780 # added via _add_pkg() so that they are included in the
8781 # digraph (needed at least for --tree display).
8783 for atom in arg.set:
8784 pkg, existing_node = self._select_package(
8785 arg.root_config.root, atom)
8786 if existing_node is None and \
8788 if not self._add_pkg(pkg, Dependency(atom=atom,
8789 root=pkg.root, parent=arg)):
8792 # Allow unsatisfied deps here to avoid showing a masking
8793 # message for an unsatisfied dep that isn't necessarily
8795 if not self._create_graph(allow_unsatisfied=True):
8798 unsatisfied_deps = []
8799 for dep in self._unsatisfied_deps:
8800 if not isinstance(dep.parent, Package):
8802 if dep.parent.operation == "merge":
8803 unsatisfied_deps.append(dep)
8806 # For unsatisfied deps of installed packages, only account for
8807 # them if they are in the subgraph of dependencies of a package
8808 # which is scheduled to be installed.
8809 unsatisfied_install = False
8811 dep_stack = self.digraph.parent_nodes(dep.parent)
8813 node = dep_stack.pop()
8814 if not isinstance(node, Package):
8816 if node.operation == "merge":
8817 unsatisfied_install = True
8819 if node in traversed:
8822 dep_stack.extend(self.digraph.parent_nodes(node))
8824 if unsatisfied_install:
8825 unsatisfied_deps.append(dep)
8827 if masked_tasks or unsatisfied_deps:
8828 # This probably means that a required package
8829 # was dropped via --skipfirst. It makes the
8830 # resume list invalid, so convert it to a
8831 # UnsatisfiedResumeDep exception.
8832 raise self.UnsatisfiedResumeDep(self,
8833 masked_tasks + unsatisfied_deps)
8834 self._serialized_tasks_cache = None
8837 except self._unknown_internal_error:
8842 def _load_favorites(self, favorites):
8844 Use a list of favorites to resume state from a
8845 previous select_files() call. This creates similar
8846 DependencyArg instances to those that would have
8847 been created by the original select_files() call.
8848 This allows Package instances to be matched with
8849 DependencyArg instances during graph creation.
8851 root_config = self.roots[self.target_root]
8852 getSetAtoms = root_config.setconfig.getSetAtoms
8853 sets = root_config.sets
8856 if not isinstance(x, basestring):
8858 if x in ("system", "world"):
8860 if x.startswith(SETPREFIX):
8861 s = x[len(SETPREFIX):]
8866 # Recursively expand sets so that containment tests in
8867 # self._get_parent_sets() properly match atoms in nested
8868 # sets (like if world contains system).
8869 expanded_set = InternalPackageSet(
8870 initial_atoms=getSetAtoms(s))
8871 self._sets[s] = expanded_set
8872 args.append(SetArg(arg=x, set=expanded_set,
8873 root_config=root_config))
8875 if not portage.isvalidatom(x):
8877 args.append(AtomArg(arg=x, atom=x,
8878 root_config=root_config))
8880 self._set_args(args)
8883 class UnsatisfiedResumeDep(portage.exception.PortageException):
8885 A dependency of a resume list is not installed. This
8886 can occur when a required package is dropped from the
8887 merge list via --skipfirst.
8889 def __init__(self, depgraph, value):
8890 portage.exception.PortageException.__init__(self, value)
8891 self.depgraph = depgraph
8893 class _internal_exception(portage.exception.PortageException):
8894 def __init__(self, value=""):
8895 portage.exception.PortageException.__init__(self, value)
8897 class _unknown_internal_error(_internal_exception):
8899 Used by the depgraph internally to terminate graph creation.
8900 The specific reason for the failure should have been dumped
8901 to stderr, unfortunately, the exact reason for the failure
8905 class _serialize_tasks_retry(_internal_exception):
8907 This is raised by the _serialize_tasks() method when it needs to
8908 be called again for some reason. The only case that it's currently
8909 used for is when neglected dependencies need to be added to the
8910 graph in order to avoid making a potentially unsafe decision.
8913 class _dep_check_composite_db(portage.dbapi):
8915 A dbapi-like interface that is optimized for use in dep_check() calls.
8916 This is built on top of the existing depgraph package selection logic.
8917 Some packages that have been added to the graph may be masked from this
8918 view in order to influence the atom preference selection that occurs
8921 def __init__(self, depgraph, root):
8922 portage.dbapi.__init__(self)
8923 self._depgraph = depgraph
8925 self._match_cache = {}
8926 self._cpv_pkg_map = {}
8928 def _clear_cache(self):
8929 self._match_cache.clear()
8930 self._cpv_pkg_map.clear()
8932 def match(self, atom):
8933 ret = self._match_cache.get(atom)
8938 atom = self._dep_expand(atom)
8939 pkg, existing = self._depgraph._select_package(self._root, atom)
8943 # Return the highest available from select_package() as well as
8944 # any matching slots in the graph db.
8946 slots.add(pkg.metadata["SLOT"])
8947 atom_cp = portage.dep_getkey(atom)
8948 if pkg.cp.startswith("virtual/"):
8949 # For new-style virtual lookahead that occurs inside
8950 # dep_check(), examine all slots. This is needed
8951 # so that newer slots will not unnecessarily be pulled in
8952 # when a satisfying lower slot is already installed. For
8953 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8954 # there's no need to pull in a newer slot to satisfy a
8955 # virtual/jdk dependency.
8956 for db, pkg_type, built, installed, db_keys in \
8957 self._depgraph._filtered_trees[self._root]["dbs"]:
8958 for cpv in db.match(atom):
8959 if portage.cpv_getkey(cpv) != pkg.cp:
8961 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8963 if self._visible(pkg):
8964 self._cpv_pkg_map[pkg.cpv] = pkg
8966 slots.remove(pkg.metadata["SLOT"])
8968 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8969 pkg, existing = self._depgraph._select_package(
8970 self._root, slot_atom)
8973 if not self._visible(pkg):
8975 self._cpv_pkg_map[pkg.cpv] = pkg
8978 self._cpv_sort_ascending(ret)
8979 self._match_cache[orig_atom] = ret
8982 def _visible(self, pkg):
8983 if pkg.installed and "selective" not in self._depgraph.myparams:
8985 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8986 except (StopIteration, portage.exception.InvalidDependString):
8993 self._depgraph.pkgsettings[pkg.root], pkg):
8995 except portage.exception.InvalidDependString:
8997 in_graph = self._depgraph._slot_pkg_map[
8998 self._root].get(pkg.slot_atom)
8999 if in_graph is None:
9000 # Mask choices for packages which are not the highest visible
9001 # version within their slot (since they usually trigger slot
9003 highest_visible, in_graph = self._depgraph._select_package(
9004 self._root, pkg.slot_atom)
9005 if pkg != highest_visible:
9007 elif in_graph != pkg:
9008 # Mask choices for packages that would trigger a slot
9009 # conflict with a previously selected package.
9013 def _dep_expand(self, atom):
9015 This is only needed for old installed packages that may
9016 contain atoms that are not fully qualified with a specific
9017 category. Emulate the cpv_expand() function that's used by
9018 dbapi.match() in cases like this. If there are multiple
9019 matches, it's often due to a new-style virtual that has
9020 been added, so try to filter those out to avoid raising
9023 root_config = self._depgraph.roots[self._root]
9025 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9026 if len(expanded_atoms) > 1:
9027 non_virtual_atoms = []
9028 for x in expanded_atoms:
9029 if not portage.dep_getkey(x).startswith("virtual/"):
9030 non_virtual_atoms.append(x)
9031 if len(non_virtual_atoms) == 1:
9032 expanded_atoms = non_virtual_atoms
9033 if len(expanded_atoms) > 1:
9034 # compatible with portage.cpv_expand()
9035 raise portage.exception.AmbiguousPackageName(
9036 [portage.dep_getkey(x) for x in expanded_atoms])
9038 atom = expanded_atoms[0]
9040 null_atom = insert_category_into_atom(atom, "null")
9041 null_cp = portage.dep_getkey(null_atom)
9042 cat, atom_pn = portage.catsplit(null_cp)
9043 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9045 # Allow the resolver to choose which virtual.
9046 atom = insert_category_into_atom(atom, "virtual")
9048 atom = insert_category_into_atom(atom, "null")
9051 def aux_get(self, cpv, wants):
9052 metadata = self._cpv_pkg_map[cpv].metadata
9053 return [metadata.get(x, "") for x in wants]
9055 class RepoDisplay(object):
9056 def __init__(self, roots):
9057 self._shown_repos = {}
9058 self._unknown_repo = False
9060 for root_config in roots.itervalues():
9061 portdir = root_config.settings.get("PORTDIR")
9063 repo_paths.add(portdir)
9064 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9066 repo_paths.update(overlays.split())
9067 repo_paths = list(repo_paths)
9068 self._repo_paths = repo_paths
9069 self._repo_paths_real = [ os.path.realpath(repo_path) \
9070 for repo_path in repo_paths ]
9072 # pre-allocate index for PORTDIR so that it always has index 0.
9073 for root_config in roots.itervalues():
9074 portdb = root_config.trees["porttree"].dbapi
9075 portdir = portdb.porttree_root
9077 self.repoStr(portdir)
9079 def repoStr(self, repo_path_real):
9082 real_index = self._repo_paths_real.index(repo_path_real)
9083 if real_index == -1:
9085 self._unknown_repo = True
9087 shown_repos = self._shown_repos
9088 repo_paths = self._repo_paths
9089 repo_path = repo_paths[real_index]
9090 index = shown_repos.get(repo_path)
9092 index = len(shown_repos)
9093 shown_repos[repo_path] = index
9099 shown_repos = self._shown_repos
9100 unknown_repo = self._unknown_repo
9101 if shown_repos or self._unknown_repo:
9102 output.append("Portage tree and overlays:\n")
9103 show_repo_paths = list(shown_repos)
9104 for repo_path, repo_index in shown_repos.iteritems():
9105 show_repo_paths[repo_index] = repo_path
9107 for index, repo_path in enumerate(show_repo_paths):
9108 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9110 output.append(" "+teal("[?]") + \
9111 " indicates that the source repository could not be determined\n")
9112 return "".join(output)
9114 class PackageCounters(object):
9124 self.blocks_satisfied = 0
9126 self.restrict_fetch = 0
9127 self.restrict_fetch_satisfied = 0
9128 self.interactive = 0
9131 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9134 myoutput.append("Total: %s package" % total_installs)
9135 if total_installs != 1:
9136 myoutput.append("s")
9137 if total_installs != 0:
9138 myoutput.append(" (")
9139 if self.upgrades > 0:
9140 details.append("%s upgrade" % self.upgrades)
9141 if self.upgrades > 1:
9143 if self.downgrades > 0:
9144 details.append("%s downgrade" % self.downgrades)
9145 if self.downgrades > 1:
9148 details.append("%s new" % self.new)
9149 if self.newslot > 0:
9150 details.append("%s in new slot" % self.newslot)
9151 if self.newslot > 1:
9154 details.append("%s reinstall" % self.reinst)
9158 details.append("%s uninstall" % self.uninst)
9161 if self.interactive > 0:
9162 details.append("%s %s" % (self.interactive,
9163 colorize("WARN", "interactive")))
9164 myoutput.append(", ".join(details))
9165 if total_installs != 0:
9166 myoutput.append(")")
9167 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9168 if self.restrict_fetch:
9169 myoutput.append("\nFetch Restriction: %s package" % \
9170 self.restrict_fetch)
9171 if self.restrict_fetch > 1:
9172 myoutput.append("s")
9173 if self.restrict_fetch_satisfied < self.restrict_fetch:
9174 myoutput.append(bad(" (%s unsatisfied)") % \
9175 (self.restrict_fetch - self.restrict_fetch_satisfied))
9177 myoutput.append("\nConflict: %s block" % \
9180 myoutput.append("s")
9181 if self.blocks_satisfied < self.blocks:
9182 myoutput.append(bad(" (%s unsatisfied)") % \
9183 (self.blocks - self.blocks_satisfied))
9184 return "".join(myoutput)
9186 class PollSelectAdapter(PollConstants):
9189 Use select to emulate a poll object, for
9190 systems that don't support poll().
9194 self._registered = {}
9195 self._select_args = [[], [], []]
9197 def register(self, fd, *args):
9199 Only POLLIN is currently supported!
9203 "register expected at most 2 arguments, got " + \
9204 repr(1 + len(args)))
9206 eventmask = PollConstants.POLLIN | \
9207 PollConstants.POLLPRI | PollConstants.POLLOUT
9211 self._registered[fd] = eventmask
9212 self._select_args = None
9214 def unregister(self, fd):
9215 self._select_args = None
9216 del self._registered[fd]
9218 def poll(self, *args):
9221 "poll expected at most 2 arguments, got " + \
9222 repr(1 + len(args)))
9228 select_args = self._select_args
9229 if select_args is None:
9230 select_args = [self._registered.keys(), [], []]
9232 if timeout is not None:
9233 select_args = select_args[:]
9234 # Translate poll() timeout args to select() timeout args:
9236 # | units | value(s) for indefinite block
9237 # ---------|--------------|------------------------------
9238 # poll | milliseconds | omitted, negative, or None
9239 # ---------|--------------|------------------------------
9240 # select | seconds | omitted
9241 # ---------|--------------|------------------------------
9243 if timeout is not None and timeout < 0:
9245 if timeout is not None:
9246 select_args.append(timeout / 1000)
9248 select_events = select.select(*select_args)
9250 for fd in select_events[0]:
9251 poll_events.append((fd, PollConstants.POLLIN))
9254 class SequentialTaskQueue(SlotObject):
9256 __slots__ = ("max_jobs", "running_tasks") + \
9257 ("_dirty", "_scheduling", "_task_queue")
9259 def __init__(self, **kwargs):
9260 SlotObject.__init__(self, **kwargs)
9261 self._task_queue = deque()
9262 self.running_tasks = set()
9263 if self.max_jobs is None:
9267 def add(self, task):
9268 self._task_queue.append(task)
9271 def addFront(self, task):
9272 self._task_queue.appendleft(task)
9283 if self._scheduling:
9284 # Ignore any recursive schedule() calls triggered via
9285 # self._task_exit().
9288 self._scheduling = True
9290 task_queue = self._task_queue
9291 running_tasks = self.running_tasks
9292 max_jobs = self.max_jobs
9293 state_changed = False
9295 while task_queue and \
9296 (max_jobs is True or len(running_tasks) < max_jobs):
9297 task = task_queue.popleft()
9298 cancelled = getattr(task, "cancelled", None)
9300 running_tasks.add(task)
9301 task.addExitListener(self._task_exit)
9303 state_changed = True
9306 self._scheduling = False
9308 return state_changed
9310 def _task_exit(self, task):
9312 Since we can always rely on exit listeners being called, the set of
9313 running tasks is always pruned automatically and there is never any need
9314 to actively prune it.
9316 self.running_tasks.remove(task)
9317 if self._task_queue:
9321 self._task_queue.clear()
9322 running_tasks = self.running_tasks
9323 while running_tasks:
9324 task = running_tasks.pop()
9325 task.removeExitListener(self._task_exit)
9329 def __nonzero__(self):
9330 return bool(self._task_queue or self.running_tasks)
9333 return len(self._task_queue) + len(self.running_tasks)
9335 _can_poll_device = None
9337 def can_poll_device():
9339 Test if it's possible to use poll() on a device such as a pty. This
9340 is known to fail on Darwin.
9342 @returns: True if poll() on a device succeeds, False otherwise.
9345 global _can_poll_device
9346 if _can_poll_device is not None:
9347 return _can_poll_device
9349 if not hasattr(select, "poll"):
9350 _can_poll_device = False
9351 return _can_poll_device
9354 dev_null = open('/dev/null', 'rb')
9356 _can_poll_device = False
9357 return _can_poll_device
9360 p.register(dev_null.fileno(), PollConstants.POLLIN)
9362 invalid_request = False
9363 for f, event in p.poll():
9364 if event & PollConstants.POLLNVAL:
9365 invalid_request = True
9369 _can_poll_device = not invalid_request
9370 return _can_poll_device
9372 def create_poll_instance():
9374 Create an instance of select.poll, or an instance of
9375 PollSelectAdapter there is no poll() implementation or
9376 it is broken somehow.
9378 if can_poll_device():
9379 return select.poll()
9380 return PollSelectAdapter()
9382 getloadavg = getattr(os, "getloadavg", None)
9383 if getloadavg is None:
9386 Uses /proc/loadavg to emulate os.getloadavg().
9387 Raises OSError if the load average was unobtainable.
9390 loadavg_str = open('/proc/loadavg').readline()
9392 # getloadavg() is only supposed to raise OSError, so convert
9393 raise OSError('unknown')
9394 loadavg_split = loadavg_str.split()
9395 if len(loadavg_split) < 3:
9396 raise OSError('unknown')
9400 loadavg_floats.append(float(loadavg_split[i]))
9402 raise OSError('unknown')
9403 return tuple(loadavg_floats)
9405 class PollScheduler(object):
9407 class _sched_iface_class(SlotObject):
9408 __slots__ = ("register", "schedule", "unregister")
9412 self._max_load = None
9414 self._poll_event_queue = []
9415 self._poll_event_handlers = {}
9416 self._poll_event_handler_ids = {}
9417 # Increment id for each new handler.
9418 self._event_handler_id = 0
9419 self._poll_obj = create_poll_instance()
9420 self._scheduling = False
9422 def _schedule(self):
9424 Calls _schedule_tasks() and automatically returns early from
9425 any recursive calls to this method that the _schedule_tasks()
9426 call might trigger. This makes _schedule() safe to call from
9427 inside exit listeners.
9429 if self._scheduling:
9431 self._scheduling = True
9433 return self._schedule_tasks()
9435 self._scheduling = False
9437 def _running_job_count(self):
9440 def _can_add_job(self):
9441 max_jobs = self._max_jobs
9442 max_load = self._max_load
9444 if self._max_jobs is not True and \
9445 self._running_job_count() >= self._max_jobs:
9448 if max_load is not None and \
9449 (max_jobs is True or max_jobs > 1) and \
9450 self._running_job_count() >= 1:
9452 avg1, avg5, avg15 = getloadavg()
9456 if avg1 >= max_load:
9461 def _poll(self, timeout=None):
9463 All poll() calls pass through here. The poll events
9464 are added directly to self._poll_event_queue.
9465 In order to avoid endless blocking, this raises
9466 StopIteration if timeout is None and there are
9467 no file descriptors to poll.
9469 if not self._poll_event_handlers:
9471 if timeout is None and \
9472 not self._poll_event_handlers:
9473 raise StopIteration(
9474 "timeout is None and there are no poll() event handlers")
9476 # The following error is known to occur with Linux kernel versions
9479 # select.error: (4, 'Interrupted system call')
9481 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9482 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9483 # without any events.
9486 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9488 except select.error, e:
9489 writemsg_level("\n!!! select error: %s\n" % (e,),
9490 level=logging.ERROR, noiselevel=-1)
9492 if timeout is not None:
9495 def _next_poll_event(self, timeout=None):
9497 Since the _schedule_wait() loop is called by event
9498 handlers from _poll_loop(), maintain a central event
9499 queue for both of them to share events from a single
9500 poll() call. In order to avoid endless blocking, this
9501 raises StopIteration if timeout is None and there are
9502 no file descriptors to poll.
9504 if not self._poll_event_queue:
9506 return self._poll_event_queue.pop()
9508 def _poll_loop(self):
9510 event_handlers = self._poll_event_handlers
9511 event_handled = False
9514 while event_handlers:
9515 f, event = self._next_poll_event()
9516 handler, reg_id = event_handlers[f]
9518 event_handled = True
9519 except StopIteration:
9520 event_handled = True
9522 if not event_handled:
9523 raise AssertionError("tight loop")
9525 def _schedule_yield(self):
9527 Schedule for a short period of time chosen by the scheduler based
9528 on internal state. Synchronous tasks should call this periodically
9529 in order to allow the scheduler to service pending poll events. The
9530 scheduler will call poll() exactly once, without blocking, and any
9531 resulting poll events will be serviced.
9533 event_handlers = self._poll_event_handlers
9536 if not event_handlers:
9537 return bool(events_handled)
9539 if not self._poll_event_queue:
9543 while event_handlers and self._poll_event_queue:
9544 f, event = self._next_poll_event()
9545 handler, reg_id = event_handlers[f]
9548 except StopIteration:
9551 return bool(events_handled)
9553 def _register(self, f, eventmask, handler):
9556 @return: A unique registration id, for use in schedule() or
9559 if f in self._poll_event_handlers:
9560 raise AssertionError("fd %d is already registered" % f)
9561 self._event_handler_id += 1
9562 reg_id = self._event_handler_id
9563 self._poll_event_handler_ids[reg_id] = f
9564 self._poll_event_handlers[f] = (handler, reg_id)
9565 self._poll_obj.register(f, eventmask)
9568 def _unregister(self, reg_id):
9569 f = self._poll_event_handler_ids[reg_id]
9570 self._poll_obj.unregister(f)
9571 del self._poll_event_handlers[f]
9572 del self._poll_event_handler_ids[reg_id]
9574 def _schedule_wait(self, wait_ids):
9576 Schedule until wait_id is not longer registered
9579 @param wait_id: a task id to wait for
9581 event_handlers = self._poll_event_handlers
9582 handler_ids = self._poll_event_handler_ids
9583 event_handled = False
9585 if isinstance(wait_ids, int):
9586 wait_ids = frozenset([wait_ids])
9589 while wait_ids.intersection(handler_ids):
9590 f, event = self._next_poll_event()
9591 handler, reg_id = event_handlers[f]
9593 event_handled = True
9594 except StopIteration:
9595 event_handled = True
9597 return event_handled
9599 class QueueScheduler(PollScheduler):
9602 Add instances of SequentialTaskQueue and then call run(). The
9603 run() method returns when no tasks remain.
9606 def __init__(self, max_jobs=None, max_load=None):
9607 PollScheduler.__init__(self)
9609 if max_jobs is None:
9612 self._max_jobs = max_jobs
9613 self._max_load = max_load
9614 self.sched_iface = self._sched_iface_class(
9615 register=self._register,
9616 schedule=self._schedule_wait,
9617 unregister=self._unregister)
9620 self._schedule_listeners = []
9623 self._queues.append(q)
9625 def remove(self, q):
9626 self._queues.remove(q)
9630 while self._schedule():
9633 while self._running_job_count():
9636 def _schedule_tasks(self):
9639 @returns: True if there may be remaining tasks to schedule,
9642 while self._can_add_job():
9643 n = self._max_jobs - self._running_job_count()
9647 if not self._start_next_job(n):
9650 for q in self._queues:
9655 def _running_job_count(self):
9657 for q in self._queues:
9658 job_count += len(q.running_tasks)
9659 self._jobs = job_count
9662 def _start_next_job(self, n=1):
9664 for q in self._queues:
9665 initial_job_count = len(q.running_tasks)
9667 final_job_count = len(q.running_tasks)
9668 if final_job_count > initial_job_count:
9669 started_count += (final_job_count - initial_job_count)
9670 if started_count >= n:
9672 return started_count
9674 class TaskScheduler(object):
9677 A simple way to handle scheduling of AsynchrousTask instances. Simply
9678 add tasks and call run(). The run() method returns when no tasks remain.
9681 def __init__(self, max_jobs=None, max_load=None):
9682 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9683 self._scheduler = QueueScheduler(
9684 max_jobs=max_jobs, max_load=max_load)
9685 self.sched_iface = self._scheduler.sched_iface
9686 self.run = self._scheduler.run
9687 self._scheduler.add(self._queue)
9689 def add(self, task):
9690 self._queue.add(task)
9692 class JobStatusDisplay(object):
9694 _bound_properties = ("curval", "failed", "running")
9695 _jobs_column_width = 48
9697 # Don't update the display unless at least this much
9698 # time has passed, in units of seconds.
9699 _min_display_latency = 2
9701 _default_term_codes = {
9707 _termcap_name_map = {
9708 'carriage_return' : 'cr',
9713 def __init__(self, out=sys.stdout, quiet=False):
9714 object.__setattr__(self, "out", out)
9715 object.__setattr__(self, "quiet", quiet)
9716 object.__setattr__(self, "maxval", 0)
9717 object.__setattr__(self, "merges", 0)
9718 object.__setattr__(self, "_changed", False)
9719 object.__setattr__(self, "_displayed", False)
9720 object.__setattr__(self, "_last_display_time", 0)
9721 object.__setattr__(self, "width", 80)
9724 isatty = hasattr(out, "isatty") and out.isatty()
9725 object.__setattr__(self, "_isatty", isatty)
9726 if not isatty or not self._init_term():
9728 for k, capname in self._termcap_name_map.iteritems():
9729 term_codes[k] = self._default_term_codes[capname]
9730 object.__setattr__(self, "_term_codes", term_codes)
9731 encoding = sys.getdefaultencoding()
9732 for k, v in self._term_codes.items():
9733 if not isinstance(v, basestring):
9734 self._term_codes[k] = v.decode(encoding, 'replace')
9736 def _init_term(self):
9738 Initialize term control codes.
9740 @returns: True if term codes were successfully initialized,
9744 term_type = os.environ.get("TERM", "vt100")
9750 curses.setupterm(term_type, self.out.fileno())
9751 tigetstr = curses.tigetstr
9752 except curses.error:
9757 if tigetstr is None:
9761 for k, capname in self._termcap_name_map.iteritems():
9762 code = tigetstr(capname)
9764 code = self._default_term_codes[capname]
9765 term_codes[k] = code
9766 object.__setattr__(self, "_term_codes", term_codes)
9769 def _format_msg(self, msg):
9770 return ">>> %s" % msg
9774 self._term_codes['carriage_return'] + \
9775 self._term_codes['clr_eol'])
9777 self._displayed = False
9779 def _display(self, line):
9780 self.out.write(line)
9782 self._displayed = True
9784 def _update(self, msg):
9787 if not self._isatty:
9788 out.write(self._format_msg(msg) + self._term_codes['newline'])
9790 self._displayed = True
9796 self._display(self._format_msg(msg))
9798 def displayMessage(self, msg):
9800 was_displayed = self._displayed
9802 if self._isatty and self._displayed:
9805 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9807 self._displayed = False
9810 self._changed = True
9816 for name in self._bound_properties:
9817 object.__setattr__(self, name, 0)
9820 self.out.write(self._term_codes['newline'])
9822 self._displayed = False
9824 def __setattr__(self, name, value):
9825 old_value = getattr(self, name)
9826 if value == old_value:
9828 object.__setattr__(self, name, value)
9829 if name in self._bound_properties:
9830 self._property_change(name, old_value, value)
9832 def _property_change(self, name, old_value, new_value):
9833 self._changed = True
9836 def _load_avg_str(self):
9851 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9855 Display status on stdout, but only if something has
9856 changed since the last call.
9862 current_time = time.time()
9863 time_delta = current_time - self._last_display_time
9864 if self._displayed and \
9866 if not self._isatty:
9868 if time_delta < self._min_display_latency:
9871 self._last_display_time = current_time
9872 self._changed = False
9873 self._display_status()
9875 def _display_status(self):
9876 # Don't use len(self._completed_tasks) here since that also
9877 # can include uninstall tasks.
9878 curval_str = str(self.curval)
9879 maxval_str = str(self.maxval)
9880 running_str = str(self.running)
9881 failed_str = str(self.failed)
9882 load_avg_str = self._load_avg_str()
9884 color_output = StringIO()
9885 plain_output = StringIO()
9886 style_file = portage.output.ConsoleStyleFile(color_output)
9887 style_file.write_listener = plain_output
9888 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9889 style_writer.style_listener = style_file.new_styles
9890 f = formatter.AbstractFormatter(style_writer)
9892 number_style = "INFORM"
9893 f.add_literal_data("Jobs: ")
9894 f.push_style(number_style)
9895 f.add_literal_data(curval_str)
9897 f.add_literal_data(" of ")
9898 f.push_style(number_style)
9899 f.add_literal_data(maxval_str)
9901 f.add_literal_data(" complete")
9904 f.add_literal_data(", ")
9905 f.push_style(number_style)
9906 f.add_literal_data(running_str)
9908 f.add_literal_data(" running")
9911 f.add_literal_data(", ")
9912 f.push_style(number_style)
9913 f.add_literal_data(failed_str)
9915 f.add_literal_data(" failed")
9917 padding = self._jobs_column_width - len(plain_output.getvalue())
9919 f.add_literal_data(padding * " ")
9921 f.add_literal_data("Load avg: ")
9922 f.add_literal_data(load_avg_str)
9924 # Truncate to fit width, to avoid making the terminal scroll if the
9925 # line overflows (happens when the load average is large).
9926 plain_output = plain_output.getvalue()
9927 if self._isatty and len(plain_output) > self.width:
9928 # Use plain_output here since it's easier to truncate
9929 # properly than the color output which contains console
9931 self._update(plain_output[:self.width])
9933 self._update(color_output.getvalue())
9935 xtermTitle(" ".join(plain_output.split()))
9937 class Scheduler(PollScheduler):
9939 _opts_ignore_blockers = \
9940 frozenset(["--buildpkgonly",
9941 "--fetchonly", "--fetch-all-uri",
9942 "--nodeps", "--pretend"])
9944 _opts_no_background = \
9945 frozenset(["--pretend",
9946 "--fetchonly", "--fetch-all-uri"])
9948 _opts_no_restart = frozenset(["--buildpkgonly",
9949 "--fetchonly", "--fetch-all-uri", "--pretend"])
9951 _bad_resume_opts = set(["--ask", "--changelog",
9952 "--resume", "--skipfirst"])
9954 _fetch_log = "/var/log/emerge-fetch.log"
9956 class _iface_class(SlotObject):
9957 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9958 "dblinkElog", "fetch", "register", "schedule",
9959 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9962 class _fetch_iface_class(SlotObject):
9963 __slots__ = ("log_file", "schedule")
9965 _task_queues_class = slot_dict_class(
9966 ("merge", "jobs", "fetch", "unpack"), prefix="")
9968 class _build_opts_class(SlotObject):
9969 __slots__ = ("buildpkg", "buildpkgonly",
9970 "fetch_all_uri", "fetchonly", "pretend")
9972 class _binpkg_opts_class(SlotObject):
9973 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9975 class _pkg_count_class(SlotObject):
9976 __slots__ = ("curval", "maxval")
9978 class _emerge_log_class(SlotObject):
9979 __slots__ = ("xterm_titles",)
9981 def log(self, *pargs, **kwargs):
9982 if not self.xterm_titles:
9983 # Avoid interference with the scheduler's status display.
9984 kwargs.pop("short_msg", None)
9985 emergelog(self.xterm_titles, *pargs, **kwargs)
9987 class _failed_pkg(SlotObject):
9988 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9990 class _ConfigPool(object):
9991 """Interface for a task to temporarily allocate a config
9992 instance from a pool. This allows a task to be constructed
9993 long before the config instance actually becomes needed, like
9994 when prefetchers are constructed for the whole merge list."""
9995 __slots__ = ("_root", "_allocate", "_deallocate")
9996 def __init__(self, root, allocate, deallocate):
9998 self._allocate = allocate
9999 self._deallocate = deallocate
10000 def allocate(self):
10001 return self._allocate(self._root)
10002 def deallocate(self, settings):
10003 self._deallocate(settings)
10005 class _unknown_internal_error(portage.exception.PortageException):
10007 Used internally to terminate scheduling. The specific reason for
10008 the failure should have been dumped to stderr.
10010 def __init__(self, value=""):
10011 portage.exception.PortageException.__init__(self, value)
10013 def __init__(self, settings, trees, mtimedb, myopts,
10014 spinner, mergelist, favorites, digraph):
10015 PollScheduler.__init__(self)
10016 self.settings = settings
10017 self.target_root = settings["ROOT"]
10019 self.myopts = myopts
10020 self._spinner = spinner
10021 self._mtimedb = mtimedb
10022 self._mergelist = mergelist
10023 self._favorites = favorites
10024 self._args_set = InternalPackageSet(favorites)
10025 self._build_opts = self._build_opts_class()
10026 for k in self._build_opts.__slots__:
10027 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10028 self._binpkg_opts = self._binpkg_opts_class()
10029 for k in self._binpkg_opts.__slots__:
10030 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10033 self._logger = self._emerge_log_class()
10034 self._task_queues = self._task_queues_class()
10035 for k in self._task_queues.allowed_keys:
10036 setattr(self._task_queues, k,
10037 SequentialTaskQueue())
10039 # Holds merges that will wait to be executed when no builds are
10040 # executing. This is useful for system packages since dependencies
10041 # on system packages are frequently unspecified.
10042 self._merge_wait_queue = []
10043 # Holds merges that have been transfered from the merge_wait_queue to
10044 # the actual merge queue. They are removed from this list upon
10045 # completion. Other packages can start building only when this list is
10047 self._merge_wait_scheduled = []
10049 # Holds system packages and their deep runtime dependencies. Before
10050 # being merged, these packages go to merge_wait_queue, to be merged
10051 # when no other packages are building.
10052 self._deep_system_deps = set()
10054 # Holds packages to merge which will satisfy currently unsatisfied
10055 # deep runtime dependencies of system packages. If this is not empty
10056 # then no parallel builds will be spawned until it is empty. This
10057 # minimizes the possibility that a build will fail due to the system
10058 # being in a fragile state. For example, see bug #259954.
10059 self._unsatisfied_system_deps = set()
10061 self._status_display = JobStatusDisplay()
10062 self._max_load = myopts.get("--load-average")
10063 max_jobs = myopts.get("--jobs")
10064 if max_jobs is None:
10066 self._set_max_jobs(max_jobs)
10068 # The root where the currently running
10069 # portage instance is installed.
10070 self._running_root = trees["/"]["root_config"]
10072 if settings.get("PORTAGE_DEBUG", "") == "1":
10074 self.pkgsettings = {}
10075 self._config_pool = {}
10076 self._blocker_db = {}
10078 self._config_pool[root] = []
10079 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10081 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10082 schedule=self._schedule_fetch)
10083 self._sched_iface = self._iface_class(
10084 dblinkEbuildPhase=self._dblink_ebuild_phase,
10085 dblinkDisplayMerge=self._dblink_display_merge,
10086 dblinkElog=self._dblink_elog,
10087 fetch=fetch_iface, register=self._register,
10088 schedule=self._schedule_wait,
10089 scheduleSetup=self._schedule_setup,
10090 scheduleUnpack=self._schedule_unpack,
10091 scheduleYield=self._schedule_yield,
10092 unregister=self._unregister)
10094 self._prefetchers = weakref.WeakValueDictionary()
10095 self._pkg_queue = []
10096 self._completed_tasks = set()
10098 self._failed_pkgs = []
10099 self._failed_pkgs_all = []
10100 self._failed_pkgs_die_msgs = []
10101 self._post_mod_echo_msgs = []
10102 self._parallel_fetch = False
10103 merge_count = len([x for x in mergelist \
10104 if isinstance(x, Package) and x.operation == "merge"])
10105 self._pkg_count = self._pkg_count_class(
10106 curval=0, maxval=merge_count)
10107 self._status_display.maxval = self._pkg_count.maxval
10109 # The load average takes some time to respond when new
10110 # jobs are added, so we need to limit the rate of adding
10112 self._job_delay_max = 10
10113 self._job_delay_factor = 1.0
10114 self._job_delay_exp = 1.5
10115 self._previous_job_start_time = None
10117 self._set_digraph(digraph)
10119 # This is used to memoize the _choose_pkg() result when
10120 # no packages can be chosen until one of the existing
10122 self._choose_pkg_return_early = False
10124 features = self.settings.features
10125 if "parallel-fetch" in features and \
10126 not ("--pretend" in self.myopts or \
10127 "--fetch-all-uri" in self.myopts or \
10128 "--fetchonly" in self.myopts):
10129 if "distlocks" not in features:
10130 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10131 portage.writemsg(red("!!!")+" parallel-fetching " + \
10132 "requires the distlocks feature enabled"+"\n",
10134 portage.writemsg(red("!!!")+" you have it disabled, " + \
10135 "thus parallel-fetching is being disabled"+"\n",
10137 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10138 elif len(mergelist) > 1:
10139 self._parallel_fetch = True
10141 if self._parallel_fetch:
10142 # clear out existing fetch log if it exists
10144 open(self._fetch_log, 'w')
10145 except EnvironmentError:
10148 self._running_portage = None
10149 portage_match = self._running_root.trees["vartree"].dbapi.match(
10150 portage.const.PORTAGE_PACKAGE_ATOM)
10152 cpv = portage_match.pop()
10153 self._running_portage = self._pkg(cpv, "installed",
10154 self._running_root, installed=True)
10156 def _poll(self, timeout=None):
10158 PollScheduler._poll(self, timeout=timeout)
10160 def _set_max_jobs(self, max_jobs):
10161 self._max_jobs = max_jobs
10162 self._task_queues.jobs.max_jobs = max_jobs
10164 def _background_mode(self):
10166 Check if background mode is enabled and adjust states as necessary.
10169 @returns: True if background mode is enabled, False otherwise.
10171 background = (self._max_jobs is True or \
10172 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10173 not bool(self._opts_no_background.intersection(self.myopts))
10176 interactive_tasks = self._get_interactive_tasks()
10177 if interactive_tasks:
10179 writemsg_level(">>> Sending package output to stdio due " + \
10180 "to interactive package(s):\n",
10181 level=logging.INFO, noiselevel=-1)
10183 for pkg in interactive_tasks:
10184 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10185 if pkg.root != "/":
10186 pkg_str += " for " + pkg.root
10187 msg.append(pkg_str)
10189 writemsg_level("".join("%s\n" % (l,) for l in msg),
10190 level=logging.INFO, noiselevel=-1)
10191 if self._max_jobs is True or self._max_jobs > 1:
10192 self._set_max_jobs(1)
10193 writemsg_level(">>> Setting --jobs=1 due " + \
10194 "to the above interactive package(s)\n",
10195 level=logging.INFO, noiselevel=-1)
10197 self._status_display.quiet = \
10198 not background or \
10199 ("--quiet" in self.myopts and \
10200 "--verbose" not in self.myopts)
10202 self._logger.xterm_titles = \
10203 "notitles" not in self.settings.features and \
10204 self._status_display.quiet
10208 def _get_interactive_tasks(self):
10209 from portage import flatten
10210 from portage.dep import use_reduce, paren_reduce
10211 interactive_tasks = []
10212 for task in self._mergelist:
10213 if not (isinstance(task, Package) and \
10214 task.operation == "merge"):
10217 properties = flatten(use_reduce(paren_reduce(
10218 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10219 except portage.exception.InvalidDependString, e:
10220 show_invalid_depstring_notice(task,
10221 task.metadata["PROPERTIES"], str(e))
10222 raise self._unknown_internal_error()
10223 if "interactive" in properties:
10224 interactive_tasks.append(task)
10225 return interactive_tasks
10227 def _set_digraph(self, digraph):
10228 if "--nodeps" in self.myopts or \
10229 (self._max_jobs is not True and self._max_jobs < 2):
10231 self._digraph = None
10234 self._digraph = digraph
10235 self._find_system_deps()
10236 self._prune_digraph()
10237 self._prevent_builddir_collisions()
10239 def _find_system_deps(self):
10241 Find system packages and their deep runtime dependencies. Before being
10242 merged, these packages go to merge_wait_queue, to be merged when no
10243 other packages are building.
10245 deep_system_deps = self._deep_system_deps
10246 deep_system_deps.clear()
10247 deep_system_deps.update(
10248 _find_deep_system_runtime_deps(self._digraph))
10249 deep_system_deps.difference_update([pkg for pkg in \
10250 deep_system_deps if pkg.operation != "merge"])
10252 def _prune_digraph(self):
10254 Prune any root nodes that are irrelevant.
10257 graph = self._digraph
10258 completed_tasks = self._completed_tasks
10259 removed_nodes = set()
10261 for node in graph.root_nodes():
10262 if not isinstance(node, Package) or \
10263 (node.installed and node.operation == "nomerge") or \
10265 node in completed_tasks:
10266 removed_nodes.add(node)
10268 graph.difference_update(removed_nodes)
10269 if not removed_nodes:
10271 removed_nodes.clear()
10273 def _prevent_builddir_collisions(self):
10275 When building stages, sometimes the same exact cpv needs to be merged
10276 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10277 in the builddir. Currently, normal file locks would be inappropriate
10278 for this purpose since emerge holds all of it's build dir locks from
10282 for pkg in self._mergelist:
10283 if not isinstance(pkg, Package):
10284 # a satisfied blocker
10288 if pkg.cpv not in cpv_map:
10289 cpv_map[pkg.cpv] = [pkg]
10291 for earlier_pkg in cpv_map[pkg.cpv]:
10292 self._digraph.add(earlier_pkg, pkg,
10293 priority=DepPriority(buildtime=True))
10294 cpv_map[pkg.cpv].append(pkg)
10296 class _pkg_failure(portage.exception.PortageException):
10298 An instance of this class is raised by unmerge() when
10299 an uninstallation fails.
10302 def __init__(self, *pargs):
10303 portage.exception.PortageException.__init__(self, pargs)
10305 self.status = pargs[0]
10307 def _schedule_fetch(self, fetcher):
10309 Schedule a fetcher on the fetch queue, in order to
10310 serialize access to the fetch log.
10312 self._task_queues.fetch.addFront(fetcher)
10314 def _schedule_setup(self, setup_phase):
10316 Schedule a setup phase on the merge queue, in order to
10317 serialize unsandboxed access to the live filesystem.
10319 self._task_queues.merge.addFront(setup_phase)
10322 def _schedule_unpack(self, unpack_phase):
10324 Schedule an unpack phase on the unpack queue, in order
10325 to serialize $DISTDIR access for live ebuilds.
10327 self._task_queues.unpack.add(unpack_phase)
10329 def _find_blockers(self, new_pkg):
10331 Returns a callable which should be called only when
10332 the vdb lock has been acquired.
10334 def get_blockers():
10335 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10336 return get_blockers
10338 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10339 if self._opts_ignore_blockers.intersection(self.myopts):
10342 # Call gc.collect() here to avoid heap overflow that
10343 # triggers 'Cannot allocate memory' errors (reported
10344 # with python-2.5).
10348 blocker_db = self._blocker_db[new_pkg.root]
10350 blocker_dblinks = []
10351 for blocking_pkg in blocker_db.findInstalledBlockers(
10352 new_pkg, acquire_lock=acquire_lock):
10353 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10355 if new_pkg.cpv == blocking_pkg.cpv:
10357 blocker_dblinks.append(portage.dblink(
10358 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10359 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10360 vartree=self.trees[blocking_pkg.root]["vartree"]))
10364 return blocker_dblinks
10366 def _dblink_pkg(self, pkg_dblink):
10367 cpv = pkg_dblink.mycpv
10368 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10369 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10370 installed = type_name == "installed"
10371 return self._pkg(cpv, type_name, root_config, installed=installed)
10373 def _append_to_log_path(self, log_path, msg):
10374 f = open(log_path, 'a')
10380 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10382 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10385 background = self._background
10387 if background and log_path is not None:
10388 log_file = open(log_path, 'a')
10393 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10395 if log_file is not None:
10398 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10399 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10400 background = self._background
10402 if log_path is None:
10403 if not (background and level < logging.WARN):
10404 portage.util.writemsg_level(msg,
10405 level=level, noiselevel=noiselevel)
10408 portage.util.writemsg_level(msg,
10409 level=level, noiselevel=noiselevel)
10410 self._append_to_log_path(log_path, msg)
10412 def _dblink_ebuild_phase(self,
10413 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10415 Using this callback for merge phases allows the scheduler
10416 to run while these phases execute asynchronously, and allows
10417 the scheduler control output handling.
10420 scheduler = self._sched_iface
10421 settings = pkg_dblink.settings
10422 pkg = self._dblink_pkg(pkg_dblink)
10423 background = self._background
10424 log_path = settings.get("PORTAGE_LOG_FILE")
10426 ebuild_phase = EbuildPhase(background=background,
10427 pkg=pkg, phase=phase, scheduler=scheduler,
10428 settings=settings, tree=pkg_dblink.treetype)
10429 ebuild_phase.start()
10430 ebuild_phase.wait()
10432 return ebuild_phase.returncode
10434 def _generate_digests(self):
10436 Generate digests if necessary for --digests or FEATURES=digest.
10437 In order to avoid interference, this must done before parallel
10441 if '--fetchonly' in self.myopts:
10444 digest = '--digest' in self.myopts
10446 for pkgsettings in self.pkgsettings.itervalues():
10447 if 'digest' in pkgsettings.features:
10454 for x in self._mergelist:
10455 if not isinstance(x, Package) or \
10456 x.type_name != 'ebuild' or \
10457 x.operation != 'merge':
10459 pkgsettings = self.pkgsettings[x.root]
10460 if '--digest' not in self.myopts and \
10461 'digest' not in pkgsettings.features:
10463 portdb = x.root_config.trees['porttree'].dbapi
10464 ebuild_path = portdb.findname(x.cpv)
10465 if not ebuild_path:
10467 "!!! Could not locate ebuild for '%s'.\n" \
10468 % x.cpv, level=logging.ERROR, noiselevel=-1)
10470 pkgsettings['O'] = os.path.dirname(ebuild_path)
10471 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10473 "!!! Unable to generate manifest for '%s'.\n" \
10474 % x.cpv, level=logging.ERROR, noiselevel=-1)
10479 def _check_manifests(self):
10480 # Verify all the manifests now so that the user is notified of failure
10481 # as soon as possible.
10482 if "strict" not in self.settings.features or \
10483 "--fetchonly" in self.myopts or \
10484 "--fetch-all-uri" in self.myopts:
10487 shown_verifying_msg = False
10488 quiet_settings = {}
10489 for myroot, pkgsettings in self.pkgsettings.iteritems():
10490 quiet_config = portage.config(clone=pkgsettings)
10491 quiet_config["PORTAGE_QUIET"] = "1"
10492 quiet_config.backup_changes("PORTAGE_QUIET")
10493 quiet_settings[myroot] = quiet_config
10496 for x in self._mergelist:
10497 if not isinstance(x, Package) or \
10498 x.type_name != "ebuild":
10501 if not shown_verifying_msg:
10502 shown_verifying_msg = True
10503 self._status_msg("Verifying ebuild manifests")
10505 root_config = x.root_config
10506 portdb = root_config.trees["porttree"].dbapi
10507 quiet_config = quiet_settings[root_config.root]
10508 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10509 if not portage.digestcheck([], quiet_config, strict=True):
10514 def _add_prefetchers(self):
10516 if not self._parallel_fetch:
10519 if self._parallel_fetch:
10520 self._status_msg("Starting parallel fetch")
10522 prefetchers = self._prefetchers
10523 getbinpkg = "--getbinpkg" in self.myopts
10525 # In order to avoid "waiting for lock" messages
10526 # at the beginning, which annoy users, never
10527 # spawn a prefetcher for the first package.
10528 for pkg in self._mergelist[1:]:
10529 prefetcher = self._create_prefetcher(pkg)
10530 if prefetcher is not None:
10531 self._task_queues.fetch.add(prefetcher)
10532 prefetchers[pkg] = prefetcher
10534 def _create_prefetcher(self, pkg):
10536 @return: a prefetcher, or None if not applicable
10540 if not isinstance(pkg, Package):
10543 elif pkg.type_name == "ebuild":
10545 prefetcher = EbuildFetcher(background=True,
10546 config_pool=self._ConfigPool(pkg.root,
10547 self._allocate_config, self._deallocate_config),
10548 fetchonly=1, logfile=self._fetch_log,
10549 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10551 elif pkg.type_name == "binary" and \
10552 "--getbinpkg" in self.myopts and \
10553 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10555 prefetcher = BinpkgPrefetcher(background=True,
10556 pkg=pkg, scheduler=self._sched_iface)
10560 def _is_restart_scheduled(self):
10562 Check if the merge list contains a replacement
10563 for the current running instance, that will result
10564 in restart after merge.
10566 @returns: True if a restart is scheduled, False otherwise.
10568 if self._opts_no_restart.intersection(self.myopts):
10571 mergelist = self._mergelist
10573 for i, pkg in enumerate(mergelist):
10574 if self._is_restart_necessary(pkg) and \
10575 i != len(mergelist) - 1:
10580 def _is_restart_necessary(self, pkg):
10582 @return: True if merging the given package
10583 requires restart, False otherwise.
10586 # Figure out if we need a restart.
10587 if pkg.root == self._running_root.root and \
10588 portage.match_from_list(
10589 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10590 if self._running_portage:
10591 return pkg.cpv != self._running_portage.cpv
10595 def _restart_if_necessary(self, pkg):
10597 Use execv() to restart emerge. This happens
10598 if portage upgrades itself and there are
10599 remaining packages in the list.
10602 if self._opts_no_restart.intersection(self.myopts):
10605 if not self._is_restart_necessary(pkg):
10608 if pkg == self._mergelist[-1]:
10611 self._main_loop_cleanup()
10613 logger = self._logger
10614 pkg_count = self._pkg_count
10615 mtimedb = self._mtimedb
10616 bad_resume_opts = self._bad_resume_opts
10618 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10619 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10621 logger.log(" *** RESTARTING " + \
10622 "emerge via exec() after change of " + \
10623 "portage version.")
10625 mtimedb["resume"]["mergelist"].remove(list(pkg))
10627 portage.run_exitfuncs()
10628 mynewargv = [sys.argv[0], "--resume"]
10629 resume_opts = self.myopts.copy()
10630 # For automatic resume, we need to prevent
10631 # any of bad_resume_opts from leaking in
10632 # via EMERGE_DEFAULT_OPTS.
10633 resume_opts["--ignore-default-opts"] = True
10634 for myopt, myarg in resume_opts.iteritems():
10635 if myopt not in bad_resume_opts:
10637 mynewargv.append(myopt)
10639 mynewargv.append(myopt +"="+ str(myarg))
10640 # priority only needs to be adjusted on the first run
10641 os.environ["PORTAGE_NICENESS"] = "0"
10642 os.execv(mynewargv[0], mynewargv)
10646 if "--resume" in self.myopts:
10648 portage.writemsg_stdout(
10649 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10650 self._logger.log(" *** Resuming merge...")
10652 self._save_resume_list()
10655 self._background = self._background_mode()
10656 except self._unknown_internal_error:
10659 for root in self.trees:
10660 root_config = self.trees[root]["root_config"]
10662 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10663 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10664 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10665 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10666 if not tmpdir or not os.path.isdir(tmpdir):
10667 msg = "The directory specified in your " + \
10668 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10669 "does not exist. Please create this " + \
10670 "directory or correct your PORTAGE_TMPDIR setting."
10671 msg = textwrap.wrap(msg, 70)
10672 out = portage.output.EOutput()
10677 if self._background:
10678 root_config.settings.unlock()
10679 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10680 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10681 root_config.settings.lock()
10683 self.pkgsettings[root] = portage.config(
10684 clone=root_config.settings)
10686 rval = self._generate_digests()
10687 if rval != os.EX_OK:
10690 rval = self._check_manifests()
10691 if rval != os.EX_OK:
10694 keep_going = "--keep-going" in self.myopts
10695 fetchonly = self._build_opts.fetchonly
10696 mtimedb = self._mtimedb
10697 failed_pkgs = self._failed_pkgs
10700 rval = self._merge()
10701 if rval == os.EX_OK or fetchonly or not keep_going:
10703 if "resume" not in mtimedb:
10705 mergelist = self._mtimedb["resume"].get("mergelist")
10709 if not failed_pkgs:
10712 for failed_pkg in failed_pkgs:
10713 mergelist.remove(list(failed_pkg.pkg))
10715 self._failed_pkgs_all.extend(failed_pkgs)
10721 if not self._calc_resume_list():
10724 clear_caches(self.trees)
10725 if not self._mergelist:
10728 self._save_resume_list()
10729 self._pkg_count.curval = 0
10730 self._pkg_count.maxval = len([x for x in self._mergelist \
10731 if isinstance(x, Package) and x.operation == "merge"])
10732 self._status_display.maxval = self._pkg_count.maxval
10734 self._logger.log(" *** Finished. Cleaning up...")
10737 self._failed_pkgs_all.extend(failed_pkgs)
10740 background = self._background
10741 failure_log_shown = False
10742 if background and len(self._failed_pkgs_all) == 1:
10743 # If only one package failed then just show it's
10744 # whole log for easy viewing.
10745 failed_pkg = self._failed_pkgs_all[-1]
10746 build_dir = failed_pkg.build_dir
10749 log_paths = [failed_pkg.build_log]
10751 log_path = self._locate_failure_log(failed_pkg)
10752 if log_path is not None:
10754 log_file = open(log_path)
10758 if log_file is not None:
10760 for line in log_file:
10761 writemsg_level(line, noiselevel=-1)
10764 failure_log_shown = True
10766 # Dump mod_echo output now since it tends to flood the terminal.
10767 # This allows us to avoid having more important output, generated
10768 # later, from being swept away by the mod_echo output.
10769 mod_echo_output = _flush_elog_mod_echo()
10771 if background and not failure_log_shown and \
10772 self._failed_pkgs_all and \
10773 self._failed_pkgs_die_msgs and \
10774 not mod_echo_output:
10776 printer = portage.output.EOutput()
10777 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10779 if mysettings["ROOT"] != "/":
10780 root_msg = " merged to %s" % mysettings["ROOT"]
10782 printer.einfo("Error messages for package %s%s:" % \
10783 (colorize("INFORM", key), root_msg))
10785 for phase in portage.const.EBUILD_PHASES:
10786 if phase not in logentries:
10788 for msgtype, msgcontent in logentries[phase]:
10789 if isinstance(msgcontent, basestring):
10790 msgcontent = [msgcontent]
10791 for line in msgcontent:
10792 printer.eerror(line.strip("\n"))
10794 if self._post_mod_echo_msgs:
10795 for msg in self._post_mod_echo_msgs:
10798 if len(self._failed_pkgs_all) > 1 or \
10799 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10800 if len(self._failed_pkgs_all) > 1:
10801 msg = "The following %d packages have " % \
10802 len(self._failed_pkgs_all) + \
10803 "failed to build or install:"
10805 msg = "The following package has " + \
10806 "failed to build or install:"
10807 prefix = bad(" * ")
10808 writemsg(prefix + "\n", noiselevel=-1)
10809 from textwrap import wrap
10810 for line in wrap(msg, 72):
10811 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10812 writemsg(prefix + "\n", noiselevel=-1)
10813 for failed_pkg in self._failed_pkgs_all:
10814 writemsg("%s\t%s\n" % (prefix,
10815 colorize("INFORM", str(failed_pkg.pkg))),
10817 writemsg(prefix + "\n", noiselevel=-1)
10821 def _elog_listener(self, mysettings, key, logentries, fulltext):
10822 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10824 self._failed_pkgs_die_msgs.append(
10825 (mysettings, key, errors))
10827 def _locate_failure_log(self, failed_pkg):
10829 build_dir = failed_pkg.build_dir
10832 log_paths = [failed_pkg.build_log]
10834 for log_path in log_paths:
10839 log_size = os.stat(log_path).st_size
10850 def _add_packages(self):
10851 pkg_queue = self._pkg_queue
10852 for pkg in self._mergelist:
10853 if isinstance(pkg, Package):
10854 pkg_queue.append(pkg)
10855 elif isinstance(pkg, Blocker):
10858 def _system_merge_started(self, merge):
10860 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10862 graph = self._digraph
10865 pkg = merge.merge.pkg
10867 # Skip this if $ROOT != / since it shouldn't matter if there
10868 # are unsatisfied system runtime deps in this case.
10869 if pkg.root != '/':
10872 completed_tasks = self._completed_tasks
10873 unsatisfied = self._unsatisfied_system_deps
10875 def ignore_non_runtime_or_satisfied(priority):
10877 Ignore non-runtime and satisfied runtime priorities.
10879 if isinstance(priority, DepPriority) and \
10880 not priority.satisfied and \
10881 (priority.runtime or priority.runtime_post):
10885 # When checking for unsatisfied runtime deps, only check
10886 # direct deps since indirect deps are checked when the
10887 # corresponding parent is merged.
10888 for child in graph.child_nodes(pkg,
10889 ignore_priority=ignore_non_runtime_or_satisfied):
10890 if not isinstance(child, Package) or \
10891 child.operation == 'uninstall':
10895 if child.operation == 'merge' and \
10896 child not in completed_tasks:
10897 unsatisfied.add(child)
10899 def _merge_wait_exit_handler(self, task):
10900 self._merge_wait_scheduled.remove(task)
10901 self._merge_exit(task)
10903 def _merge_exit(self, merge):
10904 self._do_merge_exit(merge)
10905 self._deallocate_config(merge.merge.settings)
10906 if merge.returncode == os.EX_OK and \
10907 not merge.merge.pkg.installed:
10908 self._status_display.curval += 1
10909 self._status_display.merges = len(self._task_queues.merge)
10912 def _do_merge_exit(self, merge):
10913 pkg = merge.merge.pkg
10914 if merge.returncode != os.EX_OK:
10915 settings = merge.merge.settings
10916 build_dir = settings.get("PORTAGE_BUILDDIR")
10917 build_log = settings.get("PORTAGE_LOG_FILE")
10919 self._failed_pkgs.append(self._failed_pkg(
10920 build_dir=build_dir, build_log=build_log,
10922 returncode=merge.returncode))
10923 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10925 self._status_display.failed = len(self._failed_pkgs)
10928 self._task_complete(pkg)
10929 pkg_to_replace = merge.merge.pkg_to_replace
10930 if pkg_to_replace is not None:
10931 # When a package is replaced, mark it's uninstall
10932 # task complete (if any).
10933 uninst_hash_key = \
10934 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10935 self._task_complete(uninst_hash_key)
10940 self._restart_if_necessary(pkg)
10942 # Call mtimedb.commit() after each merge so that
10943 # --resume still works after being interrupted
10944 # by reboot, sigkill or similar.
10945 mtimedb = self._mtimedb
10946 mtimedb["resume"]["mergelist"].remove(list(pkg))
10947 if not mtimedb["resume"]["mergelist"]:
10948 del mtimedb["resume"]
10951 def _build_exit(self, build):
10952 if build.returncode == os.EX_OK:
10954 merge = PackageMerge(merge=build)
10955 if not build.build_opts.buildpkgonly and \
10956 build.pkg in self._deep_system_deps:
10957 # Since dependencies on system packages are frequently
10958 # unspecified, merge them only when no builds are executing.
10959 self._merge_wait_queue.append(merge)
10960 merge.addStartListener(self._system_merge_started)
10962 merge.addExitListener(self._merge_exit)
10963 self._task_queues.merge.add(merge)
10964 self._status_display.merges = len(self._task_queues.merge)
10966 settings = build.settings
10967 build_dir = settings.get("PORTAGE_BUILDDIR")
10968 build_log = settings.get("PORTAGE_LOG_FILE")
10970 self._failed_pkgs.append(self._failed_pkg(
10971 build_dir=build_dir, build_log=build_log,
10973 returncode=build.returncode))
10974 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10976 self._status_display.failed = len(self._failed_pkgs)
10977 self._deallocate_config(build.settings)
10979 self._status_display.running = self._jobs
10982 def _extract_exit(self, build):
10983 self._build_exit(build)
10985 def _task_complete(self, pkg):
10986 self._completed_tasks.add(pkg)
10987 self._unsatisfied_system_deps.discard(pkg)
10988 self._choose_pkg_return_early = False
10992 self._add_prefetchers()
10993 self._add_packages()
10994 pkg_queue = self._pkg_queue
10995 failed_pkgs = self._failed_pkgs
10996 portage.locks._quiet = self._background
10997 portage.elog._emerge_elog_listener = self._elog_listener
11003 self._main_loop_cleanup()
11004 portage.locks._quiet = False
11005 portage.elog._emerge_elog_listener = None
11007 rval = failed_pkgs[-1].returncode
11011 def _main_loop_cleanup(self):
11012 del self._pkg_queue[:]
11013 self._completed_tasks.clear()
11014 self._deep_system_deps.clear()
11015 self._unsatisfied_system_deps.clear()
11016 self._choose_pkg_return_early = False
11017 self._status_display.reset()
11018 self._digraph = None
11019 self._task_queues.fetch.clear()
11021 def _choose_pkg(self):
11023 Choose a task that has all it's dependencies satisfied.
11026 if self._choose_pkg_return_early:
11029 if self._digraph is None:
11030 if (self._jobs or self._task_queues.merge) and \
11031 not ("--nodeps" in self.myopts and \
11032 (self._max_jobs is True or self._max_jobs > 1)):
11033 self._choose_pkg_return_early = True
11035 return self._pkg_queue.pop(0)
11037 if not (self._jobs or self._task_queues.merge):
11038 return self._pkg_queue.pop(0)
11040 self._prune_digraph()
11043 later = set(self._pkg_queue)
11044 for pkg in self._pkg_queue:
11046 if not self._dependent_on_scheduled_merges(pkg, later):
11050 if chosen_pkg is not None:
11051 self._pkg_queue.remove(chosen_pkg)
11053 if chosen_pkg is None:
11054 # There's no point in searching for a package to
11055 # choose until at least one of the existing jobs
11057 self._choose_pkg_return_early = True
11061 def _dependent_on_scheduled_merges(self, pkg, later):
11063 Traverse the subgraph of the given packages deep dependencies
11064 to see if it contains any scheduled merges.
11065 @param pkg: a package to check dependencies for
11067 @param later: packages for which dependence should be ignored
11068 since they will be merged later than pkg anyway and therefore
11069 delaying the merge of pkg will not result in a more optimal
11073 @returns: True if the package is dependent, False otherwise.
11076 graph = self._digraph
11077 completed_tasks = self._completed_tasks
11080 traversed_nodes = set([pkg])
11081 direct_deps = graph.child_nodes(pkg)
11082 node_stack = direct_deps
11083 direct_deps = frozenset(direct_deps)
11085 node = node_stack.pop()
11086 if node in traversed_nodes:
11088 traversed_nodes.add(node)
11089 if not ((node.installed and node.operation == "nomerge") or \
11090 (node.operation == "uninstall" and \
11091 node not in direct_deps) or \
11092 node in completed_tasks or \
11096 node_stack.extend(graph.child_nodes(node))
11100 def _allocate_config(self, root):
11102 Allocate a unique config instance for a task in order
11103 to prevent interference between parallel tasks.
11105 if self._config_pool[root]:
11106 temp_settings = self._config_pool[root].pop()
11108 temp_settings = portage.config(clone=self.pkgsettings[root])
11109 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11110 # performance reasons, call it here to make sure all settings from the
11111 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11112 temp_settings.reload()
11113 temp_settings.reset()
11114 return temp_settings
11116 def _deallocate_config(self, settings):
11117 self._config_pool[settings["ROOT"]].append(settings)
11119 def _main_loop(self):
11121 # Only allow 1 job max if a restart is scheduled
11122 # due to portage update.
11123 if self._is_restart_scheduled() or \
11124 self._opts_no_background.intersection(self.myopts):
11125 self._set_max_jobs(1)
11127 merge_queue = self._task_queues.merge
11129 while self._schedule():
11130 if self._poll_event_handlers:
11135 if not (self._jobs or merge_queue):
11137 if self._poll_event_handlers:
11140 def _keep_scheduling(self):
11141 return bool(self._pkg_queue and \
11142 not (self._failed_pkgs and not self._build_opts.fetchonly))
11144 def _schedule_tasks(self):
11146 # When the number of jobs drops to zero, process all waiting merges.
11147 if not self._jobs and self._merge_wait_queue:
11148 for task in self._merge_wait_queue:
11149 task.addExitListener(self._merge_wait_exit_handler)
11150 self._task_queues.merge.add(task)
11151 self._status_display.merges = len(self._task_queues.merge)
11152 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11153 del self._merge_wait_queue[:]
11155 self._schedule_tasks_imp()
11156 self._status_display.display()
11159 for q in self._task_queues.values():
11163 # Cancel prefetchers if they're the only reason
11164 # the main poll loop is still running.
11165 if self._failed_pkgs and not self._build_opts.fetchonly and \
11166 not (self._jobs or self._task_queues.merge) and \
11167 self._task_queues.fetch:
11168 self._task_queues.fetch.clear()
11172 self._schedule_tasks_imp()
11173 self._status_display.display()
11175 return self._keep_scheduling()
11177 def _job_delay(self):
11180 @returns: True if job scheduling should be delayed, False otherwise.
11183 if self._jobs and self._max_load is not None:
11185 current_time = time.time()
11187 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11188 if delay > self._job_delay_max:
11189 delay = self._job_delay_max
11190 if (current_time - self._previous_job_start_time) < delay:
11195 def _schedule_tasks_imp(self):
11198 @returns: True if state changed, False otherwise.
11205 if not self._keep_scheduling():
11206 return bool(state_change)
11208 if self._choose_pkg_return_early or \
11209 self._merge_wait_scheduled or \
11210 (self._jobs and self._unsatisfied_system_deps) or \
11211 not self._can_add_job() or \
11213 return bool(state_change)
11215 pkg = self._choose_pkg()
11217 return bool(state_change)
11221 if not pkg.installed:
11222 self._pkg_count.curval += 1
11224 task = self._task(pkg)
11227 merge = PackageMerge(merge=task)
11228 merge.addExitListener(self._merge_exit)
11229 self._task_queues.merge.add(merge)
11233 self._previous_job_start_time = time.time()
11234 self._status_display.running = self._jobs
11235 task.addExitListener(self._extract_exit)
11236 self._task_queues.jobs.add(task)
11240 self._previous_job_start_time = time.time()
11241 self._status_display.running = self._jobs
11242 task.addExitListener(self._build_exit)
11243 self._task_queues.jobs.add(task)
11245 return bool(state_change)
11247 def _task(self, pkg):
11249 pkg_to_replace = None
11250 if pkg.operation != "uninstall":
11251 vardb = pkg.root_config.trees["vartree"].dbapi
11252 previous_cpv = vardb.match(pkg.slot_atom)
11254 previous_cpv = previous_cpv.pop()
11255 pkg_to_replace = self._pkg(previous_cpv,
11256 "installed", pkg.root_config, installed=True)
11258 task = MergeListItem(args_set=self._args_set,
11259 background=self._background, binpkg_opts=self._binpkg_opts,
11260 build_opts=self._build_opts,
11261 config_pool=self._ConfigPool(pkg.root,
11262 self._allocate_config, self._deallocate_config),
11263 emerge_opts=self.myopts,
11264 find_blockers=self._find_blockers(pkg), logger=self._logger,
11265 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11266 pkg_to_replace=pkg_to_replace,
11267 prefetcher=self._prefetchers.get(pkg),
11268 scheduler=self._sched_iface,
11269 settings=self._allocate_config(pkg.root),
11270 statusMessage=self._status_msg,
11271 world_atom=self._world_atom)
11275 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11276 pkg = failed_pkg.pkg
11277 msg = "%s to %s %s" % \
11278 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11279 if pkg.root != "/":
11280 msg += " %s %s" % (preposition, pkg.root)
11282 log_path = self._locate_failure_log(failed_pkg)
11283 if log_path is not None:
11284 msg += ", Log file:"
11285 self._status_msg(msg)
11287 if log_path is not None:
11288 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11290 def _status_msg(self, msg):
11292 Display a brief status message (no newlines) in the status display.
11293 This is called by tasks to provide feedback to the user. This
11294 delegates the resposibility of generating \r and \n control characters,
11295 to guarantee that lines are created or erased when necessary and
11299 @param msg: a brief status message (no newlines allowed)
11301 if not self._background:
11302 writemsg_level("\n")
11303 self._status_display.displayMessage(msg)
11305 def _save_resume_list(self):
11307 Do this before verifying the ebuild Manifests since it might
11308 be possible for the user to use --resume --skipfirst get past
11309 a non-essential package with a broken digest.
11311 mtimedb = self._mtimedb
11312 mtimedb["resume"]["mergelist"] = [list(x) \
11313 for x in self._mergelist \
11314 if isinstance(x, Package) and x.operation == "merge"]
11318 def _calc_resume_list(self):
11320 Use the current resume list to calculate a new one,
11321 dropping any packages with unsatisfied deps.
11323 @returns: True if successful, False otherwise.
11325 print colorize("GOOD", "*** Resuming merge...")
11327 if self._show_list():
11328 if "--tree" in self.myopts:
11329 portage.writemsg_stdout("\n" + \
11330 darkgreen("These are the packages that " + \
11331 "would be merged, in reverse order:\n\n"))
11334 portage.writemsg_stdout("\n" + \
11335 darkgreen("These are the packages that " + \
11336 "would be merged, in order:\n\n"))
11338 show_spinner = "--quiet" not in self.myopts and \
11339 "--nodeps" not in self.myopts
11342 print "Calculating dependencies ",
11344 myparams = create_depgraph_params(self.myopts, None)
11348 success, mydepgraph, dropped_tasks = resume_depgraph(
11349 self.settings, self.trees, self._mtimedb, self.myopts,
11350 myparams, self._spinner)
11351 except depgraph.UnsatisfiedResumeDep, exc:
11352 # rename variable to avoid python-3.0 error:
11353 # SyntaxError: can not delete variable 'e' referenced in nested
11356 mydepgraph = e.depgraph
11357 dropped_tasks = set()
11360 print "\b\b... done!"
11363 def unsatisfied_resume_dep_msg():
11364 mydepgraph.display_problems()
11365 out = portage.output.EOutput()
11366 out.eerror("One or more packages are either masked or " + \
11367 "have missing dependencies:")
11370 show_parents = set()
11371 for dep in e.value:
11372 if dep.parent in show_parents:
11374 show_parents.add(dep.parent)
11375 if dep.atom is None:
11376 out.eerror(indent + "Masked package:")
11377 out.eerror(2 * indent + str(dep.parent))
11380 out.eerror(indent + str(dep.atom) + " pulled in by:")
11381 out.eerror(2 * indent + str(dep.parent))
11383 msg = "The resume list contains packages " + \
11384 "that are either masked or have " + \
11385 "unsatisfied dependencies. " + \
11386 "Please restart/continue " + \
11387 "the operation manually, or use --skipfirst " + \
11388 "to skip the first package in the list and " + \
11389 "any other packages that may be " + \
11390 "masked or have missing dependencies."
11391 for line in textwrap.wrap(msg, 72):
11393 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11396 if success and self._show_list():
11397 mylist = mydepgraph.altlist()
11399 if "--tree" in self.myopts:
11401 mydepgraph.display(mylist, favorites=self._favorites)
11404 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11406 mydepgraph.display_problems()
11408 mylist = mydepgraph.altlist()
11409 mydepgraph.break_refs(mylist)
11410 mydepgraph.break_refs(dropped_tasks)
11411 self._mergelist = mylist
11412 self._set_digraph(mydepgraph.schedulerGraph())
11415 for task in dropped_tasks:
11416 if not (isinstance(task, Package) and task.operation == "merge"):
11419 msg = "emerge --keep-going:" + \
11421 if pkg.root != "/":
11422 msg += " for %s" % (pkg.root,)
11423 msg += " dropped due to unsatisfied dependency."
11424 for line in textwrap.wrap(msg, msg_width):
11425 eerror(line, phase="other", key=pkg.cpv)
11426 settings = self.pkgsettings[pkg.root]
11427 # Ensure that log collection from $T is disabled inside
11428 # elog_process(), since any logs that might exist are
11430 settings.pop("T", None)
11431 portage.elog.elog_process(pkg.cpv, settings)
11432 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11436 def _show_list(self):
11437 myopts = self.myopts
11438 if "--quiet" not in myopts and \
11439 ("--ask" in myopts or "--tree" in myopts or \
11440 "--verbose" in myopts):
11444 def _world_atom(self, pkg):
11446 Add the package to the world file, but only if
11447 it's supposed to be added. Otherwise, do nothing.
11450 if set(("--buildpkgonly", "--fetchonly",
11452 "--oneshot", "--onlydeps",
11453 "--pretend")).intersection(self.myopts):
11456 if pkg.root != self.target_root:
11459 args_set = self._args_set
11460 if not args_set.findAtomForPackage(pkg):
11463 logger = self._logger
11464 pkg_count = self._pkg_count
11465 root_config = pkg.root_config
11466 world_set = root_config.sets["world"]
11467 world_locked = False
11468 if hasattr(world_set, "lock"):
11470 world_locked = True
11473 if hasattr(world_set, "load"):
11474 world_set.load() # maybe it's changed on disk
11476 atom = create_world_atom(pkg, args_set, root_config)
11478 if hasattr(world_set, "add"):
11479 self._status_msg(('Recording %s in "world" ' + \
11480 'favorites file...') % atom)
11481 logger.log(" === (%s of %s) Updating world file (%s)" % \
11482 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11483 world_set.add(atom)
11485 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11486 (atom,), level=logging.WARN, noiselevel=-1)
11491 def _pkg(self, cpv, type_name, root_config, installed=False):
11493 Get a package instance from the cache, or create a new
11494 one if necessary. Raises KeyError from aux_get if it
11495 failures for some reason (package does not exist or is
11498 operation = "merge"
11500 operation = "nomerge"
11502 if self._digraph is not None:
11503 # Reuse existing instance when available.
11504 pkg = self._digraph.get(
11505 (type_name, root_config.root, cpv, operation))
11506 if pkg is not None:
11509 tree_type = depgraph.pkg_tree_map[type_name]
11510 db = root_config.trees[tree_type].dbapi
11511 db_keys = list(self.trees[root_config.root][
11512 tree_type].dbapi._aux_cache_keys)
11513 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11514 pkg = Package(cpv=cpv, metadata=metadata,
11515 root_config=root_config, installed=installed)
11516 if type_name == "ebuild":
11517 settings = self.pkgsettings[root_config.root]
11518 settings.setcpv(pkg)
11519 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11520 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11524 class MetadataRegen(PollScheduler):
11526 def __init__(self, portdb, max_jobs=None, max_load=None):
11527 PollScheduler.__init__(self)
11528 self._portdb = portdb
11530 if max_jobs is None:
11533 self._max_jobs = max_jobs
11534 self._max_load = max_load
11535 self._sched_iface = self._sched_iface_class(
11536 register=self._register,
11537 schedule=self._schedule_wait,
11538 unregister=self._unregister)
11540 self._valid_pkgs = set()
11541 self._process_iter = self._iter_metadata_processes()
11542 self.returncode = os.EX_OK
11543 self._error_count = 0
11545 def _iter_metadata_processes(self):
11546 portdb = self._portdb
11547 valid_pkgs = self._valid_pkgs
11548 every_cp = portdb.cp_all()
11549 every_cp.sort(reverse=True)
11552 cp = every_cp.pop()
11553 portage.writemsg_stdout("Processing %s\n" % cp)
11554 cpv_list = portdb.cp_list(cp)
11555 for cpv in cpv_list:
11556 valid_pkgs.add(cpv)
11557 ebuild_path, repo_path = portdb.findname2(cpv)
11558 metadata_process = portdb._metadata_process(
11559 cpv, ebuild_path, repo_path)
11560 if metadata_process is None:
11562 yield metadata_process
11566 portdb = self._portdb
11567 from portage.cache.cache_errors import CacheError
11570 for mytree in portdb.porttrees:
11572 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11573 except CacheError, e:
11574 portage.writemsg("Error listing cache entries for " + \
11575 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11580 while self._schedule():
11587 for y in self._valid_pkgs:
11588 for mytree in portdb.porttrees:
11589 if portdb.findname2(y, mytree=mytree)[0]:
11590 dead_nodes[mytree].discard(y)
11592 for mytree, nodes in dead_nodes.iteritems():
11593 auxdb = portdb.auxdb[mytree]
11597 except (KeyError, CacheError):
11600 def _schedule_tasks(self):
11603 @returns: True if there may be remaining tasks to schedule,
11606 while self._can_add_job():
11608 metadata_process = self._process_iter.next()
11609 except StopIteration:
11613 metadata_process.scheduler = self._sched_iface
11614 metadata_process.addExitListener(self._metadata_exit)
11615 metadata_process.start()
11618 def _metadata_exit(self, metadata_process):
11620 if metadata_process.returncode != os.EX_OK:
11621 self.returncode = 1
11622 self._error_count += 1
11623 self._valid_pkgs.discard(metadata_process.cpv)
11624 portage.writemsg("Error processing %s, continuing...\n" % \
11625 (metadata_process.cpv,))
11628 class UninstallFailure(portage.exception.PortageException):
11630 An instance of this class is raised by unmerge() when
11631 an uninstallation fails.
11634 def __init__(self, *pargs):
11635 portage.exception.PortageException.__init__(self, pargs)
11637 self.status = pargs[0]
11639 def unmerge(root_config, myopts, unmerge_action,
11640 unmerge_files, ldpath_mtimes, autoclean=0,
11641 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11642 scheduler=None, writemsg_level=portage.util.writemsg_level):
11644 quiet = "--quiet" in myopts
11645 settings = root_config.settings
11646 sets = root_config.sets
11647 vartree = root_config.trees["vartree"]
11648 candidate_catpkgs=[]
11650 xterm_titles = "notitles" not in settings.features
11651 out = portage.output.EOutput()
11653 db_keys = list(vartree.dbapi._aux_cache_keys)
11656 pkg = pkg_cache.get(cpv)
11658 pkg = Package(cpv=cpv, installed=True,
11659 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11660 root_config=root_config,
11661 type_name="installed")
11662 pkg_cache[cpv] = pkg
11665 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11667 # At least the parent needs to exist for the lock file.
11668 portage.util.ensure_dirs(vdb_path)
11669 except portage.exception.PortageException:
11673 if os.access(vdb_path, os.W_OK):
11674 vdb_lock = portage.locks.lockdir(vdb_path)
11675 realsyslist = sets["system"].getAtoms()
11677 for x in realsyslist:
11678 mycp = portage.dep_getkey(x)
11679 if mycp in settings.getvirtuals():
11681 for provider in settings.getvirtuals()[mycp]:
11682 if vartree.dbapi.match(provider):
11683 providers.append(provider)
11684 if len(providers) == 1:
11685 syslist.extend(providers)
11687 syslist.append(mycp)
11689 mysettings = portage.config(clone=settings)
11691 if not unmerge_files:
11692 if unmerge_action == "unmerge":
11694 print bold("emerge unmerge") + " can only be used with specific package names"
11700 localtree = vartree
11701 # process all arguments and add all
11702 # valid db entries to candidate_catpkgs
11704 if not unmerge_files:
11705 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11707 #we've got command-line arguments
11708 if not unmerge_files:
11709 print "\nNo packages to unmerge have been provided.\n"
11711 for x in unmerge_files:
11712 arg_parts = x.split('/')
11713 if x[0] not in [".","/"] and \
11714 arg_parts[-1][-7:] != ".ebuild":
11715 #possible cat/pkg or dep; treat as such
11716 candidate_catpkgs.append(x)
11717 elif unmerge_action in ["prune","clean"]:
11718 print "\n!!! Prune and clean do not accept individual" + \
11719 " ebuilds as arguments;\n skipping.\n"
11722 # it appears that the user is specifying an installed
11723 # ebuild and we're in "unmerge" mode, so it's ok.
11724 if not os.path.exists(x):
11725 print "\n!!! The path '"+x+"' doesn't exist.\n"
11728 absx = os.path.abspath(x)
11729 sp_absx = absx.split("/")
11730 if sp_absx[-1][-7:] == ".ebuild":
11732 absx = "/".join(sp_absx)
11734 sp_absx_len = len(sp_absx)
11736 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11737 vdb_len = len(vdb_path)
11739 sp_vdb = vdb_path.split("/")
11740 sp_vdb_len = len(sp_vdb)
11742 if not os.path.exists(absx+"/CONTENTS"):
11743 print "!!! Not a valid db dir: "+str(absx)
11746 if sp_absx_len <= sp_vdb_len:
11747 # The Path is shorter... so it can't be inside the vdb.
11750 print "\n!!!",x,"cannot be inside "+ \
11751 vdb_path+"; aborting.\n"
11754 for idx in range(0,sp_vdb_len):
11755 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11758 print "\n!!!", x, "is not inside "+\
11759 vdb_path+"; aborting.\n"
11762 print "="+"/".join(sp_absx[sp_vdb_len:])
11763 candidate_catpkgs.append(
11764 "="+"/".join(sp_absx[sp_vdb_len:]))
11767 if (not "--quiet" in myopts):
11769 if settings["ROOT"] != "/":
11770 writemsg_level(darkgreen(newline+ \
11771 ">>> Using system located in ROOT tree %s\n" % \
11774 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11775 not ("--quiet" in myopts):
11776 writemsg_level(darkgreen(newline+\
11777 ">>> These are the packages that would be unmerged:\n"))
11779 # Preservation of order is required for --depclean and --prune so
11780 # that dependencies are respected. Use all_selected to eliminate
11781 # duplicate packages since the same package may be selected by
11784 all_selected = set()
11785 for x in candidate_catpkgs:
11786 # cycle through all our candidate deps and determine
11787 # what will and will not get unmerged
11789 mymatch = vartree.dbapi.match(x)
11790 except portage.exception.AmbiguousPackageName, errpkgs:
11791 print "\n\n!!! The short ebuild name \"" + \
11792 x + "\" is ambiguous. Please specify"
11793 print "!!! one of the following fully-qualified " + \
11794 "ebuild names instead:\n"
11795 for i in errpkgs[0]:
11796 print " " + green(i)
11800 if not mymatch and x[0] not in "<>=~":
11801 mymatch = localtree.dep_match(x)
11803 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11804 (x, unmerge_action), noiselevel=-1)
11808 {"protected": set(), "selected": set(), "omitted": set()})
11809 mykey = len(pkgmap) - 1
11810 if unmerge_action=="unmerge":
11812 if y not in all_selected:
11813 pkgmap[mykey]["selected"].add(y)
11814 all_selected.add(y)
11815 elif unmerge_action == "prune":
11816 if len(mymatch) == 1:
11818 best_version = mymatch[0]
11819 best_slot = vartree.getslot(best_version)
11820 best_counter = vartree.dbapi.cpv_counter(best_version)
11821 for mypkg in mymatch[1:]:
11822 myslot = vartree.getslot(mypkg)
11823 mycounter = vartree.dbapi.cpv_counter(mypkg)
11824 if (myslot == best_slot and mycounter > best_counter) or \
11825 mypkg == portage.best([mypkg, best_version]):
11826 if myslot == best_slot:
11827 if mycounter < best_counter:
11828 # On slot collision, keep the one with the
11829 # highest counter since it is the most
11830 # recently installed.
11832 best_version = mypkg
11834 best_counter = mycounter
11835 pkgmap[mykey]["protected"].add(best_version)
11836 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11837 if mypkg != best_version and mypkg not in all_selected)
11838 all_selected.update(pkgmap[mykey]["selected"])
11840 # unmerge_action == "clean"
11842 for mypkg in mymatch:
11843 if unmerge_action == "clean":
11844 myslot = localtree.getslot(mypkg)
11846 # since we're pruning, we don't care about slots
11847 # and put all the pkgs in together
11849 if myslot not in slotmap:
11850 slotmap[myslot] = {}
11851 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11853 for mypkg in vartree.dbapi.cp_list(
11854 portage.dep_getkey(mymatch[0])):
11855 myslot = vartree.getslot(mypkg)
11856 if myslot not in slotmap:
11857 slotmap[myslot] = {}
11858 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11860 for myslot in slotmap:
11861 counterkeys = slotmap[myslot].keys()
11862 if not counterkeys:
11865 pkgmap[mykey]["protected"].add(
11866 slotmap[myslot][counterkeys[-1]])
11867 del counterkeys[-1]
11869 for counter in counterkeys[:]:
11870 mypkg = slotmap[myslot][counter]
11871 if mypkg not in mymatch:
11872 counterkeys.remove(counter)
11873 pkgmap[mykey]["protected"].add(
11874 slotmap[myslot][counter])
11876 #be pretty and get them in order of merge:
11877 for ckey in counterkeys:
11878 mypkg = slotmap[myslot][ckey]
11879 if mypkg not in all_selected:
11880 pkgmap[mykey]["selected"].add(mypkg)
11881 all_selected.add(mypkg)
11882 # ok, now the last-merged package
11883 # is protected, and the rest are selected
11884 numselected = len(all_selected)
11885 if global_unmerge and not numselected:
11886 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11889 if not numselected:
11890 portage.writemsg_stdout(
11891 "\n>>> No packages selected for removal by " + \
11892 unmerge_action + "\n")
11896 vartree.dbapi.flush_cache()
11897 portage.locks.unlockdir(vdb_lock)
11899 from portage.sets.base import EditablePackageSet
11901 # generate a list of package sets that are directly or indirectly listed in "world",
11902 # as there is no persistent list of "installed" sets
11903 installed_sets = ["world"]
11908 pos = len(installed_sets)
11909 for s in installed_sets[pos - 1:]:
11912 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11915 installed_sets += candidates
11916 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11919 # we don't want to unmerge packages that are still listed in user-editable package sets
11920 # listed in "world" as they would be remerged on the next update of "world" or the
11921 # relevant package sets.
11922 unknown_sets = set()
11923 for cp in xrange(len(pkgmap)):
11924 for cpv in pkgmap[cp]["selected"].copy():
11928 # It could have been uninstalled
11929 # by a concurrent process.
11932 if unmerge_action != "clean" and \
11933 root_config.root == "/" and \
11934 portage.match_from_list(
11935 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11936 msg = ("Not unmerging package %s since there is no valid " + \
11937 "reason for portage to unmerge itself.") % (pkg.cpv,)
11938 for line in textwrap.wrap(msg, 75):
11940 # adjust pkgmap so the display output is correct
11941 pkgmap[cp]["selected"].remove(cpv)
11942 all_selected.remove(cpv)
11943 pkgmap[cp]["protected"].add(cpv)
11947 for s in installed_sets:
11948 # skip sets that the user requested to unmerge, and skip world
11949 # unless we're unmerging a package set (as the package would be
11950 # removed from "world" later on)
11951 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11955 if s in unknown_sets:
11957 unknown_sets.add(s)
11958 out = portage.output.EOutput()
11959 out.eerror(("Unknown set '@%s' in " + \
11960 "%svar/lib/portage/world_sets") % \
11961 (s, root_config.root))
11964 # only check instances of EditablePackageSet as other classes are generally used for
11965 # special purposes and can be ignored here (and are usually generated dynamically, so the
11966 # user can't do much about them anyway)
11967 if isinstance(sets[s], EditablePackageSet):
11969 # This is derived from a snippet of code in the
11970 # depgraph._iter_atoms_for_pkg() method.
11971 for atom in sets[s].iterAtomsForPackage(pkg):
11972 inst_matches = vartree.dbapi.match(atom)
11973 inst_matches.reverse() # descending order
11975 for inst_cpv in inst_matches:
11977 inst_pkg = _pkg(inst_cpv)
11979 # It could have been uninstalled
11980 # by a concurrent process.
11983 if inst_pkg.cp != atom.cp:
11985 if pkg >= inst_pkg:
11986 # This is descending order, and we're not
11987 # interested in any versions <= pkg given.
11989 if pkg.slot_atom != inst_pkg.slot_atom:
11990 higher_slot = inst_pkg
11992 if higher_slot is None:
11996 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11997 #print colorize("WARN", "but still listed in the following package sets:")
11998 #print " %s\n" % ", ".join(parents)
11999 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12000 print colorize("WARN", "still referenced by the following package sets:")
12001 print " %s\n" % ", ".join(parents)
12002 # adjust pkgmap so the display output is correct
12003 pkgmap[cp]["selected"].remove(cpv)
12004 all_selected.remove(cpv)
12005 pkgmap[cp]["protected"].add(cpv)
12009 numselected = len(all_selected)
12010 if not numselected:
12012 "\n>>> No packages selected for removal by " + \
12013 unmerge_action + "\n")
12016 # Unmerge order only matters in some cases
12020 selected = d["selected"]
12023 cp = portage.cpv_getkey(iter(selected).next())
12024 cp_dict = unordered.get(cp)
12025 if cp_dict is None:
12027 unordered[cp] = cp_dict
12030 for k, v in d.iteritems():
12031 cp_dict[k].update(v)
12032 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12034 for x in xrange(len(pkgmap)):
12035 selected = pkgmap[x]["selected"]
12038 for mytype, mylist in pkgmap[x].iteritems():
12039 if mytype == "selected":
12041 mylist.difference_update(all_selected)
12042 cp = portage.cpv_getkey(iter(selected).next())
12043 for y in localtree.dep_match(cp):
12044 if y not in pkgmap[x]["omitted"] and \
12045 y not in pkgmap[x]["selected"] and \
12046 y not in pkgmap[x]["protected"] and \
12047 y not in all_selected:
12048 pkgmap[x]["omitted"].add(y)
12049 if global_unmerge and not pkgmap[x]["selected"]:
12050 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12052 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12053 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12054 "'%s' is part of your system profile.\n" % cp),
12055 level=logging.WARNING, noiselevel=-1)
12056 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12057 "be damaging to your system.\n\n"),
12058 level=logging.WARNING, noiselevel=-1)
12059 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12060 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12061 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12063 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12065 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12066 for mytype in ["selected","protected","omitted"]:
12068 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12069 if pkgmap[x][mytype]:
12070 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12071 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12072 for pn, ver, rev in sorted_pkgs:
12076 myversion = ver + "-" + rev
12077 if mytype == "selected":
12079 colorize("UNMERGE_WARN", myversion + " "),
12083 colorize("GOOD", myversion + " "), noiselevel=-1)
12085 writemsg_level("none ", noiselevel=-1)
12087 writemsg_level("\n", noiselevel=-1)
12089 writemsg_level("\n", noiselevel=-1)
12091 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12092 " packages are slated for removal.\n")
12093 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12094 " and " + colorize("GOOD", "'omitted'") + \
12095 " packages will not be removed.\n\n")
12097 if "--pretend" in myopts:
12098 #we're done... return
12100 if "--ask" in myopts:
12101 if userquery("Would you like to unmerge these packages?")=="No":
12102 # enter pretend mode for correct formatting of results
12103 myopts["--pretend"] = True
12108 #the real unmerging begins, after a short delay....
12109 if clean_delay and not autoclean:
12110 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12112 for x in xrange(len(pkgmap)):
12113 for y in pkgmap[x]["selected"]:
12114 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12115 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12116 mysplit = y.split("/")
12118 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12119 mysettings, unmerge_action not in ["clean","prune"],
12120 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12121 scheduler=scheduler)
12123 if retval != os.EX_OK:
12124 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12126 raise UninstallFailure(retval)
12129 if clean_world and hasattr(sets["world"], "cleanPackage"):
12130 sets["world"].cleanPackage(vartree.dbapi, y)
12131 emergelog(xterm_titles, " >>> unmerge success: "+y)
12132 if clean_world and hasattr(sets["world"], "remove"):
12133 for s in root_config.setconfig.active:
12134 sets["world"].remove(SETPREFIX+s)
12137 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12139 if os.path.exists("/usr/bin/install-info"):
12140 out = portage.output.EOutput()
12145 inforoot=normpath(root+z)
12146 if os.path.isdir(inforoot):
12147 infomtime = long(os.stat(inforoot).st_mtime)
12148 if inforoot not in prev_mtimes or \
12149 prev_mtimes[inforoot] != infomtime:
12150 regen_infodirs.append(inforoot)
12152 if not regen_infodirs:
12153 portage.writemsg_stdout("\n")
12154 out.einfo("GNU info directory index is up-to-date.")
12156 portage.writemsg_stdout("\n")
12157 out.einfo("Regenerating GNU info directory index...")
12159 dir_extensions = ("", ".gz", ".bz2")
12163 for inforoot in regen_infodirs:
12167 if not os.path.isdir(inforoot) or \
12168 not os.access(inforoot, os.W_OK):
12171 file_list = os.listdir(inforoot)
12173 dir_file = os.path.join(inforoot, "dir")
12174 moved_old_dir = False
12175 processed_count = 0
12176 for x in file_list:
12177 if x.startswith(".") or \
12178 os.path.isdir(os.path.join(inforoot, x)):
12180 if x.startswith("dir"):
12182 for ext in dir_extensions:
12183 if x == "dir" + ext or \
12184 x == "dir" + ext + ".old":
12189 if processed_count == 0:
12190 for ext in dir_extensions:
12192 os.rename(dir_file + ext, dir_file + ext + ".old")
12193 moved_old_dir = True
12194 except EnvironmentError, e:
12195 if e.errno != errno.ENOENT:
12198 processed_count += 1
12199 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12200 existsstr="already exists, for file `"
12202 if re.search(existsstr,myso):
12203 # Already exists... Don't increment the count for this.
12205 elif myso[:44]=="install-info: warning: no info dir entry in ":
12206 # This info file doesn't contain a DIR-header: install-info produces this
12207 # (harmless) warning (the --quiet switch doesn't seem to work).
12208 # Don't increment the count for this.
12211 badcount=badcount+1
12212 errmsg += myso + "\n"
12215 if moved_old_dir and not os.path.exists(dir_file):
12216 # We didn't generate a new dir file, so put the old file
12217 # back where it was originally found.
12218 for ext in dir_extensions:
12220 os.rename(dir_file + ext + ".old", dir_file + ext)
12221 except EnvironmentError, e:
12222 if e.errno != errno.ENOENT:
12226 # Clean dir.old cruft so that they don't prevent
12227 # unmerge of otherwise empty directories.
12228 for ext in dir_extensions:
12230 os.unlink(dir_file + ext + ".old")
12231 except EnvironmentError, e:
12232 if e.errno != errno.ENOENT:
12236 #update mtime so we can potentially avoid regenerating.
12237 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12240 out.eerror("Processed %d info files; %d errors." % \
12241 (icount, badcount))
12242 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12245 out.einfo("Processed %d info files." % (icount,))
12248 def display_news_notification(root_config, myopts):
12249 target_root = root_config.root
12250 trees = root_config.trees
12251 settings = trees["vartree"].settings
12252 portdb = trees["porttree"].dbapi
12253 vardb = trees["vartree"].dbapi
12254 NEWS_PATH = os.path.join("metadata", "news")
12255 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12256 newsReaderDisplay = False
12257 update = "--pretend" not in myopts
12259 for repo in portdb.getRepositories():
12260 unreadItems = checkUpdatedNewsItems(
12261 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12263 if not newsReaderDisplay:
12264 newsReaderDisplay = True
12266 print colorize("WARN", " * IMPORTANT:"),
12267 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12270 if newsReaderDisplay:
12271 print colorize("WARN", " *"),
12272 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12275 def display_preserved_libs(vardbapi):
12278 # Ensure the registry is consistent with existing files.
12279 vardbapi.plib_registry.pruneNonExisting()
12281 if vardbapi.plib_registry.hasEntries():
12283 print colorize("WARN", "!!!") + " existing preserved libs:"
12284 plibdata = vardbapi.plib_registry.getPreservedLibs()
12285 linkmap = vardbapi.linkmap
12288 linkmap_broken = False
12292 except portage.exception.CommandNotFound, e:
12293 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12294 level=logging.ERROR, noiselevel=-1)
12296 linkmap_broken = True
12298 search_for_owners = set()
12299 for cpv in plibdata:
12300 internal_plib_keys = set(linkmap._obj_key(f) \
12301 for f in plibdata[cpv])
12302 for f in plibdata[cpv]:
12303 if f in consumer_map:
12306 for c in linkmap.findConsumers(f):
12307 # Filter out any consumers that are also preserved libs
12308 # belonging to the same package as the provider.
12309 if linkmap._obj_key(c) not in internal_plib_keys:
12310 consumers.append(c)
12312 consumer_map[f] = consumers
12313 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12315 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12317 for cpv in plibdata:
12318 print colorize("WARN", ">>>") + " package: %s" % cpv
12320 for f in plibdata[cpv]:
12321 obj_key = linkmap._obj_key(f)
12322 alt_paths = samefile_map.get(obj_key)
12323 if alt_paths is None:
12325 samefile_map[obj_key] = alt_paths
12328 for alt_paths in samefile_map.itervalues():
12329 alt_paths = sorted(alt_paths)
12330 for p in alt_paths:
12331 print colorize("WARN", " * ") + " - %s" % (p,)
12333 consumers = consumer_map.get(f, [])
12334 for c in consumers[:MAX_DISPLAY]:
12335 print colorize("WARN", " * ") + " used by %s (%s)" % \
12336 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12337 if len(consumers) == MAX_DISPLAY + 1:
12338 print colorize("WARN", " * ") + " used by %s (%s)" % \
12339 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12340 for x in owners.get(consumers[MAX_DISPLAY], [])))
12341 elif len(consumers) > MAX_DISPLAY:
12342 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12343 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12346 def _flush_elog_mod_echo():
12348 Dump the mod_echo output now so that our other
12349 notifications are shown last.
12351 @returns: True if messages were shown, False otherwise.
12353 messages_shown = False
12355 from portage.elog import mod_echo
12356 except ImportError:
12357 pass # happens during downgrade to a version without the module
12359 messages_shown = bool(mod_echo._items)
12360 mod_echo.finalize()
12361 return messages_shown
12363 def post_emerge(root_config, myopts, mtimedb, retval):
12365 Misc. things to run at the end of a merge session.
12368 Update Config Files
12371 Display preserved libs warnings
12374 @param trees: A dictionary mapping each ROOT to it's package databases
12376 @param mtimedb: The mtimeDB to store data needed across merge invocations
12377 @type mtimedb: MtimeDB class instance
12378 @param retval: Emerge's return value
12382 1. Calls sys.exit(retval)
12385 target_root = root_config.root
12386 trees = { target_root : root_config.trees }
12387 vardbapi = trees[target_root]["vartree"].dbapi
12388 settings = vardbapi.settings
12389 info_mtimes = mtimedb["info"]
12391 # Load the most current variables from ${ROOT}/etc/profile.env
12394 settings.regenerate()
12397 config_protect = settings.get("CONFIG_PROTECT","").split()
12398 infodirs = settings.get("INFOPATH","").split(":") + \
12399 settings.get("INFODIR","").split(":")
12403 if retval == os.EX_OK:
12404 exit_msg = " *** exiting successfully."
12406 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12407 emergelog("notitles" not in settings.features, exit_msg)
12409 _flush_elog_mod_echo()
12411 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12412 if "--pretend" in myopts or (counter_hash is not None and \
12413 counter_hash == vardbapi._counter_hash()):
12414 display_news_notification(root_config, myopts)
12415 # If vdb state has not changed then there's nothing else to do.
12418 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12419 portage.util.ensure_dirs(vdb_path)
12421 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12422 vdb_lock = portage.locks.lockdir(vdb_path)
12426 if "noinfo" not in settings.features:
12427 chk_updated_info_files(target_root,
12428 infodirs, info_mtimes, retval)
12432 portage.locks.unlockdir(vdb_lock)
12434 chk_updated_cfg_files(target_root, config_protect)
12436 display_news_notification(root_config, myopts)
12437 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12438 display_preserved_libs(vardbapi)
12443 def chk_updated_cfg_files(target_root, config_protect):
12445 #number of directories with some protect files in them
12447 for x in config_protect:
12448 x = os.path.join(target_root, x.lstrip(os.path.sep))
12449 if not os.access(x, os.W_OK):
12450 # Avoid Permission denied errors generated
12454 mymode = os.lstat(x).st_mode
12457 if stat.S_ISLNK(mymode):
12458 # We want to treat it like a directory if it
12459 # is a symlink to an existing directory.
12461 real_mode = os.stat(x).st_mode
12462 if stat.S_ISDIR(real_mode):
12466 if stat.S_ISDIR(mymode):
12467 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12469 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12470 os.path.split(x.rstrip(os.path.sep))
12471 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12472 a = commands.getstatusoutput(mycommand)
12474 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12476 # Show the error message alone, sending stdout to /dev/null.
12477 os.system(mycommand + " 1>/dev/null")
12479 files = a[1].split('\0')
12480 # split always produces an empty string as the last element
12481 if files and not files[-1]:
12485 print "\n"+colorize("WARN", " * IMPORTANT:"),
12486 if stat.S_ISDIR(mymode):
12487 print "%d config files in '%s' need updating." % \
12490 print "config file '%s' needs updating." % x
12493 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12494 " section of the " + bold("emerge")
12495 print " "+yellow("*")+" man page to learn how to update config files."
12497 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12500 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12501 Returns the number of unread (yet relevent) items.
12503 @param portdb: a portage tree database
12504 @type portdb: pordbapi
12505 @param vardb: an installed package database
12506 @type vardb: vardbapi
12509 @param UNREAD_PATH:
12515 1. The number of unread but relevant news items.
12518 from portage.news import NewsManager
12519 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12520 return manager.getUnreadItems( repo_id, update=update )
12522 def insert_category_into_atom(atom, category):
12523 alphanum = re.search(r'\w', atom)
12525 ret = atom[:alphanum.start()] + "%s/" % category + \
12526 atom[alphanum.start():]
12531 def is_valid_package_atom(x):
12533 alphanum = re.search(r'\w', x)
12535 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12536 return portage.isvalidatom(x)
12538 def show_blocker_docs_link():
12540 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12541 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12543 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12546 def show_mask_docs():
12547 print "For more information, see the MASKED PACKAGES section in the emerge"
12548 print "man page or refer to the Gentoo Handbook."
12550 def action_sync(settings, trees, mtimedb, myopts, myaction):
12551 xterm_titles = "notitles" not in settings.features
12552 emergelog(xterm_titles, " === sync")
12553 myportdir = settings.get("PORTDIR", None)
12554 out = portage.output.EOutput()
12556 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12558 if myportdir[-1]=="/":
12559 myportdir=myportdir[:-1]
12561 st = os.stat(myportdir)
12565 print ">>>",myportdir,"not found, creating it."
12566 os.makedirs(myportdir,0755)
12567 st = os.stat(myportdir)
12570 spawn_kwargs["env"] = settings.environ()
12571 if 'usersync' in settings.features and \
12572 portage.data.secpass >= 2 and \
12573 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12574 st.st_gid != os.getgid() and st.st_mode & 0070):
12576 homedir = pwd.getpwuid(st.st_uid).pw_dir
12580 # Drop privileges when syncing, in order to match
12581 # existing uid/gid settings.
12582 spawn_kwargs["uid"] = st.st_uid
12583 spawn_kwargs["gid"] = st.st_gid
12584 spawn_kwargs["groups"] = [st.st_gid]
12585 spawn_kwargs["env"]["HOME"] = homedir
12587 if not st.st_mode & 0020:
12588 umask = umask | 0020
12589 spawn_kwargs["umask"] = umask
12591 syncuri = settings.get("SYNC", "").strip()
12593 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12594 noiselevel=-1, level=logging.ERROR)
12597 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12598 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12601 dosyncuri = syncuri
12602 updatecache_flg = False
12603 if myaction == "metadata":
12604 print "skipping sync"
12605 updatecache_flg = True
12606 elif ".git" in vcs_dirs:
12607 # Update existing git repository, and ignore the syncuri. We are
12608 # going to trust the user and assume that the user is in the branch
12609 # that he/she wants updated. We'll let the user manage branches with
12611 if portage.process.find_binary("git") is None:
12612 msg = ["Command not found: git",
12613 "Type \"emerge dev-util/git\" to enable git support."]
12615 writemsg_level("!!! %s\n" % l,
12616 level=logging.ERROR, noiselevel=-1)
12618 msg = ">>> Starting git pull in %s..." % myportdir
12619 emergelog(xterm_titles, msg )
12620 writemsg_level(msg + "\n")
12621 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12622 (portage._shell_quote(myportdir),), **spawn_kwargs)
12623 if exitcode != os.EX_OK:
12624 msg = "!!! git pull error in %s." % myportdir
12625 emergelog(xterm_titles, msg)
12626 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12628 msg = ">>> Git pull in %s successful" % myportdir
12629 emergelog(xterm_titles, msg)
12630 writemsg_level(msg + "\n")
12631 exitcode = git_sync_timestamps(settings, myportdir)
12632 if exitcode == os.EX_OK:
12633 updatecache_flg = True
12634 elif syncuri[:8]=="rsync://":
12635 for vcs_dir in vcs_dirs:
12636 writemsg_level(("!!! %s appears to be under revision " + \
12637 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12638 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12640 if not os.path.exists("/usr/bin/rsync"):
12641 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12642 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12647 if settings["PORTAGE_RSYNC_OPTS"] == "":
12648 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12649 rsync_opts.extend([
12650 "--recursive", # Recurse directories
12651 "--links", # Consider symlinks
12652 "--safe-links", # Ignore links outside of tree
12653 "--perms", # Preserve permissions
12654 "--times", # Preserive mod times
12655 "--compress", # Compress the data transmitted
12656 "--force", # Force deletion on non-empty dirs
12657 "--whole-file", # Don't do block transfers, only entire files
12658 "--delete", # Delete files that aren't in the master tree
12659 "--stats", # Show final statistics about what was transfered
12660 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12661 "--exclude=/distfiles", # Exclude distfiles from consideration
12662 "--exclude=/local", # Exclude local from consideration
12663 "--exclude=/packages", # Exclude packages from consideration
12667 # The below validation is not needed when using the above hardcoded
12670 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12672 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12673 for opt in ("--recursive", "--times"):
12674 if opt not in rsync_opts:
12675 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12676 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12677 rsync_opts.append(opt)
12679 for exclude in ("distfiles", "local", "packages"):
12680 opt = "--exclude=/%s" % exclude
12681 if opt not in rsync_opts:
12682 portage.writemsg(yellow("WARNING:") + \
12683 " adding required option %s not included in " % opt + \
12684 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12685 rsync_opts.append(opt)
12687 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12688 def rsync_opt_startswith(opt_prefix):
12689 for x in rsync_opts:
12690 if x.startswith(opt_prefix):
12694 if not rsync_opt_startswith("--timeout="):
12695 rsync_opts.append("--timeout=%d" % mytimeout)
12697 for opt in ("--compress", "--whole-file"):
12698 if opt not in rsync_opts:
12699 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12700 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12701 rsync_opts.append(opt)
12703 if "--quiet" in myopts:
12704 rsync_opts.append("--quiet") # Shut up a lot
12706 rsync_opts.append("--verbose") # Print filelist
12708 if "--verbose" in myopts:
12709 rsync_opts.append("--progress") # Progress meter for each file
12711 if "--debug" in myopts:
12712 rsync_opts.append("--checksum") # Force checksum on all files
12714 # Real local timestamp file.
12715 servertimestampfile = os.path.join(
12716 myportdir, "metadata", "timestamp.chk")
12718 content = portage.util.grabfile(servertimestampfile)
12722 mytimestamp = time.mktime(time.strptime(content[0],
12723 "%a, %d %b %Y %H:%M:%S +0000"))
12724 except (OverflowError, ValueError):
12729 rsync_initial_timeout = \
12730 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12732 rsync_initial_timeout = 15
12735 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12736 except SystemExit, e:
12737 raise # Needed else can't exit
12739 maxretries=3 #default number of retries
12742 user_name, hostname, port = re.split(
12743 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12746 if user_name is None:
12748 updatecache_flg=True
12749 all_rsync_opts = set(rsync_opts)
12750 extra_rsync_opts = shlex.split(
12751 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12752 all_rsync_opts.update(extra_rsync_opts)
12753 family = socket.AF_INET
12754 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12755 family = socket.AF_INET
12756 elif socket.has_ipv6 and \
12757 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12758 family = socket.AF_INET6
12760 SERVER_OUT_OF_DATE = -1
12761 EXCEEDED_MAX_RETRIES = -2
12767 for addrinfo in socket.getaddrinfo(
12768 hostname, None, family, socket.SOCK_STREAM):
12769 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12770 # IPv6 addresses need to be enclosed in square brackets
12771 ips.append("[%s]" % addrinfo[4][0])
12773 ips.append(addrinfo[4][0])
12774 from random import shuffle
12776 except SystemExit, e:
12777 raise # Needed else can't exit
12778 except Exception, e:
12779 print "Notice:",str(e)
12784 dosyncuri = syncuri.replace(
12785 "//" + user_name + hostname + port + "/",
12786 "//" + user_name + ips[0] + port + "/", 1)
12787 except SystemExit, e:
12788 raise # Needed else can't exit
12789 except Exception, e:
12790 print "Notice:",str(e)
12794 if "--ask" in myopts:
12795 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12800 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12801 if "--quiet" not in myopts:
12802 print ">>> Starting rsync with "+dosyncuri+"..."
12804 emergelog(xterm_titles,
12805 ">>> Starting retry %d of %d with %s" % \
12806 (retries,maxretries,dosyncuri))
12807 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12809 if mytimestamp != 0 and "--quiet" not in myopts:
12810 print ">>> Checking server timestamp ..."
12812 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12814 if "--debug" in myopts:
12817 exitcode = os.EX_OK
12818 servertimestamp = 0
12819 # Even if there's no timestamp available locally, fetch the
12820 # timestamp anyway as an initial probe to verify that the server is
12821 # responsive. This protects us from hanging indefinitely on a
12822 # connection attempt to an unresponsive server which rsync's
12823 # --timeout option does not prevent.
12825 # Temporary file for remote server timestamp comparison.
12826 from tempfile import mkstemp
12827 fd, tmpservertimestampfile = mkstemp()
12829 mycommand = rsynccommand[:]
12830 mycommand.append(dosyncuri.rstrip("/") + \
12831 "/metadata/timestamp.chk")
12832 mycommand.append(tmpservertimestampfile)
12836 def timeout_handler(signum, frame):
12837 raise portage.exception.PortageException("timed out")
12838 signal.signal(signal.SIGALRM, timeout_handler)
12839 # Timeout here in case the server is unresponsive. The
12840 # --timeout rsync option doesn't apply to the initial
12841 # connection attempt.
12842 if rsync_initial_timeout:
12843 signal.alarm(rsync_initial_timeout)
12845 mypids.extend(portage.process.spawn(
12846 mycommand, env=settings.environ(), returnpid=True))
12847 exitcode = os.waitpid(mypids[0], 0)[1]
12848 content = portage.grabfile(tmpservertimestampfile)
12850 if rsync_initial_timeout:
12853 os.unlink(tmpservertimestampfile)
12856 except portage.exception.PortageException, e:
12860 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12861 os.kill(mypids[0], signal.SIGTERM)
12862 os.waitpid(mypids[0], 0)
12863 # This is the same code rsync uses for timeout.
12866 if exitcode != os.EX_OK:
12867 if exitcode & 0xff:
12868 exitcode = (exitcode & 0xff) << 8
12870 exitcode = exitcode >> 8
12872 portage.process.spawned_pids.remove(mypids[0])
12875 servertimestamp = time.mktime(time.strptime(
12876 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12877 except (OverflowError, ValueError):
12879 del mycommand, mypids, content
12880 if exitcode == os.EX_OK:
12881 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12882 emergelog(xterm_titles,
12883 ">>> Cancelling sync -- Already current.")
12886 print ">>> Timestamps on the server and in the local repository are the same."
12887 print ">>> Cancelling all further sync action. You are already up to date."
12889 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12893 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12894 emergelog(xterm_titles,
12895 ">>> Server out of date: %s" % dosyncuri)
12898 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12900 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12903 exitcode = SERVER_OUT_OF_DATE
12904 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12906 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12907 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12908 if exitcode in [0,1,3,4,11,14,20,21]:
12910 elif exitcode in [1,3,4,11,14,20,21]:
12913 # Code 2 indicates protocol incompatibility, which is expected
12914 # for servers with protocol < 29 that don't support
12915 # --prune-empty-directories. Retry for a server that supports
12916 # at least rsync protocol version 29 (>=rsync-2.6.4).
12921 if retries<=maxretries:
12922 print ">>> Retrying..."
12927 updatecache_flg=False
12928 exitcode = EXCEEDED_MAX_RETRIES
12932 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12933 elif exitcode == SERVER_OUT_OF_DATE:
12935 elif exitcode == EXCEEDED_MAX_RETRIES:
12937 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12942 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12943 msg.append("that your SYNC statement is proper.")
12944 msg.append("SYNC=" + settings["SYNC"])
12946 msg.append("Rsync has reported that there is a File IO error. Normally")
12947 msg.append("this means your disk is full, but can be caused by corruption")
12948 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12949 msg.append("and try again after the problem has been fixed.")
12950 msg.append("PORTDIR=" + settings["PORTDIR"])
12952 msg.append("Rsync was killed before it finished.")
12954 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12955 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12956 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12957 msg.append("temporary problem unless complications exist with your network")
12958 msg.append("(and possibly your system's filesystem) configuration.")
12962 elif syncuri[:6]=="cvs://":
12963 if not os.path.exists("/usr/bin/cvs"):
12964 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12965 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12967 cvsroot=syncuri[6:]
12968 cvsdir=os.path.dirname(myportdir)
12969 if not os.path.exists(myportdir+"/CVS"):
12971 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12972 if os.path.exists(cvsdir+"/gentoo-x86"):
12973 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12976 os.rmdir(myportdir)
12978 if e.errno != errno.ENOENT:
12980 "!!! existing '%s' directory; exiting.\n" % myportdir)
12983 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12984 print "!!! cvs checkout error; exiting."
12986 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12989 print ">>> Starting cvs update with "+syncuri+"..."
12990 retval = portage.process.spawn_bash(
12991 "cd %s; cvs -z0 -q update -dP" % \
12992 (portage._shell_quote(myportdir),), **spawn_kwargs)
12993 if retval != os.EX_OK:
12995 dosyncuri = syncuri
12997 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12998 noiselevel=-1, level=logging.ERROR)
13001 if updatecache_flg and \
13002 myaction != "metadata" and \
13003 "metadata-transfer" not in settings.features:
13004 updatecache_flg = False
13006 # Reload the whole config from scratch.
13007 settings, trees, mtimedb = load_emerge_config(trees=trees)
13008 root_config = trees[settings["ROOT"]]["root_config"]
13009 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13011 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13012 action_metadata(settings, portdb, myopts)
13014 if portage._global_updates(trees, mtimedb["updates"]):
13016 # Reload the whole config from scratch.
13017 settings, trees, mtimedb = load_emerge_config(trees=trees)
13018 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13019 root_config = trees[settings["ROOT"]]["root_config"]
13021 mybestpv = portdb.xmatch("bestmatch-visible",
13022 portage.const.PORTAGE_PACKAGE_ATOM)
13023 mypvs = portage.best(
13024 trees[settings["ROOT"]]["vartree"].dbapi.match(
13025 portage.const.PORTAGE_PACKAGE_ATOM))
13027 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13029 if myaction != "metadata":
13030 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13031 retval = portage.process.spawn(
13032 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13033 dosyncuri], env=settings.environ())
13034 if retval != os.EX_OK:
13035 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13037 if(mybestpv != mypvs) and not "--quiet" in myopts:
13039 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13040 print red(" * ")+"that you update portage now, before any other packages are updated."
13042 print red(" * ")+"To update portage, run 'emerge portage' now."
13045 display_news_notification(root_config, myopts)
13048 def git_sync_timestamps(settings, portdir):
13050 Since git doesn't preserve timestamps, synchronize timestamps between
13051 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13052 for a given file as long as the file in the working tree is not modified
13053 (relative to HEAD).
13055 cache_dir = os.path.join(portdir, "metadata", "cache")
13056 if not os.path.isdir(cache_dir):
13058 writemsg_level(">>> Synchronizing timestamps...\n")
13060 from portage.cache.cache_errors import CacheError
13062 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13063 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13064 except CacheError, e:
13065 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13066 level=logging.ERROR, noiselevel=-1)
13069 ec_dir = os.path.join(portdir, "eclass")
13071 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13072 if f.endswith(".eclass"))
13074 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13075 level=logging.ERROR, noiselevel=-1)
13078 args = [portage.const.BASH_BINARY, "-c",
13079 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13080 portage._shell_quote(portdir)]
13082 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13083 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13085 if rval != os.EX_OK:
13088 modified_eclasses = set(ec for ec in ec_names \
13089 if os.path.join("eclass", ec + ".eclass") in modified_files)
13091 updated_ec_mtimes = {}
13093 for cpv in cache_db:
13094 cpv_split = portage.catpkgsplit(cpv)
13095 if cpv_split is None:
13096 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13097 level=logging.ERROR, noiselevel=-1)
13100 cat, pn, ver, rev = cpv_split
13101 cat, pf = portage.catsplit(cpv)
13102 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13103 if relative_eb_path in modified_files:
13107 cache_entry = cache_db[cpv]
13108 eb_mtime = cache_entry.get("_mtime_")
13109 ec_mtimes = cache_entry.get("_eclasses_")
13111 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13112 level=logging.ERROR, noiselevel=-1)
13114 except CacheError, e:
13115 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13116 (cpv, e), level=logging.ERROR, noiselevel=-1)
13119 if eb_mtime is None:
13120 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13121 level=logging.ERROR, noiselevel=-1)
13125 eb_mtime = long(eb_mtime)
13127 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13128 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13131 if ec_mtimes is None:
13132 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13133 level=logging.ERROR, noiselevel=-1)
13136 if modified_eclasses.intersection(ec_mtimes):
13139 missing_eclasses = set(ec_mtimes).difference(ec_names)
13140 if missing_eclasses:
13141 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13142 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13146 eb_path = os.path.join(portdir, relative_eb_path)
13148 current_eb_mtime = os.stat(eb_path)
13150 writemsg_level("!!! Missing ebuild: %s\n" % \
13151 (cpv,), level=logging.ERROR, noiselevel=-1)
13154 inconsistent = False
13155 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13156 updated_mtime = updated_ec_mtimes.get(ec)
13157 if updated_mtime is not None and updated_mtime != ec_mtime:
13158 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13159 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13160 inconsistent = True
13166 if current_eb_mtime != eb_mtime:
13167 os.utime(eb_path, (eb_mtime, eb_mtime))
13169 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13170 if ec in updated_ec_mtimes:
13172 ec_path = os.path.join(ec_dir, ec + ".eclass")
13173 current_mtime = long(os.stat(ec_path).st_mtime)
13174 if current_mtime != ec_mtime:
13175 os.utime(ec_path, (ec_mtime, ec_mtime))
13176 updated_ec_mtimes[ec] = ec_mtime
13180 def action_metadata(settings, portdb, myopts):
13181 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13182 old_umask = os.umask(0002)
13183 cachedir = os.path.normpath(settings.depcachedir)
13184 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13185 "/lib", "/opt", "/proc", "/root", "/sbin",
13186 "/sys", "/tmp", "/usr", "/var"]:
13187 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13188 "ROOT DIRECTORY ON YOUR SYSTEM."
13189 print >> sys.stderr, \
13190 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13192 if not os.path.exists(cachedir):
13195 ec = portage.eclass_cache.cache(portdb.porttree_root)
13196 myportdir = os.path.realpath(settings["PORTDIR"])
13197 cm = settings.load_best_module("portdbapi.metadbmodule")(
13198 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13200 from portage.cache import util
13202 class percentage_noise_maker(util.quiet_mirroring):
13203 def __init__(self, dbapi):
13205 self.cp_all = dbapi.cp_all()
13206 l = len(self.cp_all)
13207 self.call_update_min = 100000000
13208 self.min_cp_all = l/100.0
13212 def __iter__(self):
13213 for x in self.cp_all:
13215 if self.count > self.min_cp_all:
13216 self.call_update_min = 0
13218 for y in self.dbapi.cp_list(x):
13220 self.call_update_mine = 0
13222 def update(self, *arg):
13223 try: self.pstr = int(self.pstr) + 1
13224 except ValueError: self.pstr = 1
13225 sys.stdout.write("%s%i%%" % \
13226 ("\b" * (len(str(self.pstr))+1), self.pstr))
13228 self.call_update_min = 10000000
13230 def finish(self, *arg):
13231 sys.stdout.write("\b\b\b\b100%\n")
13234 if "--quiet" in myopts:
13235 def quicky_cpv_generator(cp_all_list):
13236 for x in cp_all_list:
13237 for y in portdb.cp_list(x):
13239 source = quicky_cpv_generator(portdb.cp_all())
13240 noise_maker = portage.cache.util.quiet_mirroring()
13242 noise_maker = source = percentage_noise_maker(portdb)
13243 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13244 eclass_cache=ec, verbose_instance=noise_maker)
13247 os.umask(old_umask)
13249 def action_regen(settings, portdb, max_jobs, max_load):
13250 xterm_titles = "notitles" not in settings.features
13251 emergelog(xterm_titles, " === regen")
13252 #regenerate cache entries
13253 portage.writemsg_stdout("Regenerating cache entries...\n")
13255 os.close(sys.stdin.fileno())
13256 except SystemExit, e:
13257 raise # Needed else can't exit
13262 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13265 portage.writemsg_stdout("done!\n")
13266 return regen.returncode
13268 def action_config(settings, trees, myopts, myfiles):
13269 if len(myfiles) != 1:
13270 print red("!!! config can only take a single package atom at this time\n")
13272 if not is_valid_package_atom(myfiles[0]):
13273 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13275 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13276 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13280 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13281 except portage.exception.AmbiguousPackageName, e:
13282 # Multiple matches thrown from cpv_expand
13285 print "No packages found.\n"
13287 elif len(pkgs) > 1:
13288 if "--ask" in myopts:
13290 print "Please select a package to configure:"
13294 options.append(str(idx))
13295 print options[-1]+") "+pkg
13297 options.append("X")
13298 idx = userquery("Selection?", options)
13301 pkg = pkgs[int(idx)-1]
13303 print "The following packages available:"
13306 print "\nPlease use a specific atom or the --ask option."
13312 if "--ask" in myopts:
13313 if userquery("Ready to configure "+pkg+"?") == "No":
13316 print "Configuring pkg..."
13318 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13319 mysettings = portage.config(clone=settings)
13320 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13321 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13322 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13324 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13325 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13326 if retval == os.EX_OK:
13327 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13328 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13331 def action_info(settings, trees, myopts, myfiles):
13332 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13333 settings.profile_path, settings["CHOST"],
13334 trees[settings["ROOT"]]["vartree"].dbapi)
13336 header_title = "System Settings"
13338 print header_width * "="
13339 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13340 print header_width * "="
13341 print "System uname: "+platform.platform(aliased=1)
13343 lastSync = portage.grabfile(os.path.join(
13344 settings["PORTDIR"], "metadata", "timestamp.chk"))
13345 print "Timestamp of tree:",
13351 output=commands.getstatusoutput("distcc --version")
13353 print str(output[1].split("\n",1)[0]),
13354 if "distcc" in settings.features:
13359 output=commands.getstatusoutput("ccache -V")
13361 print str(output[1].split("\n",1)[0]),
13362 if "ccache" in settings.features:
13367 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13368 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13369 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13370 myvars = portage.util.unique_array(myvars)
13374 if portage.isvalidatom(x):
13375 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13376 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13377 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13379 for pn, ver, rev in pkg_matches:
13381 pkgs.append(ver + "-" + rev)
13385 pkgs = ", ".join(pkgs)
13386 print "%-20s %s" % (x+":", pkgs)
13388 print "%-20s %s" % (x+":", "[NOT VALID]")
13390 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13392 if "--verbose" in myopts:
13393 myvars=settings.keys()
13395 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13396 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13397 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13398 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13400 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13402 myvars = portage.util.unique_array(myvars)
13408 print '%s="%s"' % (x, settings[x])
13410 use = set(settings["USE"].split())
13411 use_expand = settings["USE_EXPAND"].split()
13413 for varname in use_expand:
13414 flag_prefix = varname.lower() + "_"
13415 for f in list(use):
13416 if f.startswith(flag_prefix):
13420 print 'USE="%s"' % " ".join(use),
13421 for varname in use_expand:
13422 myval = settings.get(varname)
13424 print '%s="%s"' % (varname, myval),
13427 unset_vars.append(x)
13429 print "Unset: "+", ".join(unset_vars)
13432 if "--debug" in myopts:
13433 for x in dir(portage):
13434 module = getattr(portage, x)
13435 if "cvs_id_string" in dir(module):
13436 print "%s: %s" % (str(x), str(module.cvs_id_string))
13438 # See if we can find any packages installed matching the strings
13439 # passed on the command line
13441 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13442 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13444 mypkgs.extend(vardb.match(x))
13446 # If some packages were found...
13448 # Get our global settings (we only print stuff if it varies from
13449 # the current config)
13450 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13451 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13453 pkgsettings = portage.config(clone=settings)
13455 for myvar in mydesiredvars:
13456 global_vals[myvar] = set(settings.get(myvar, "").split())
13458 # Loop through each package
13459 # Only print settings if they differ from global settings
13460 header_title = "Package Settings"
13461 print header_width * "="
13462 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13463 print header_width * "="
13464 from portage.output import EOutput
13467 # Get all package specific variables
13468 auxvalues = vardb.aux_get(pkg, auxkeys)
13470 for i in xrange(len(auxkeys)):
13471 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13473 for myvar in mydesiredvars:
13474 # If the package variable doesn't match the
13475 # current global variable, something has changed
13476 # so set diff_found so we know to print
13477 if valuesmap[myvar] != global_vals[myvar]:
13478 diff_values[myvar] = valuesmap[myvar]
13479 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13480 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13481 pkgsettings.reset()
13482 # If a matching ebuild is no longer available in the tree, maybe it
13483 # would make sense to compare against the flags for the best
13484 # available version with the same slot?
13486 if portdb.cpv_exists(pkg):
13488 pkgsettings.setcpv(pkg, mydb=mydb)
13489 if valuesmap["IUSE"].intersection(
13490 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13491 diff_values["USE"] = valuesmap["USE"]
13492 # If a difference was found, print the info for
13495 # Print package info
13496 print "%s was built with the following:" % pkg
13497 for myvar in mydesiredvars + ["USE"]:
13498 if myvar in diff_values:
13499 mylist = list(diff_values[myvar])
13501 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13503 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13504 ebuildpath = vardb.findname(pkg)
13505 if not ebuildpath or not os.path.exists(ebuildpath):
13506 out.ewarn("No ebuild found for '%s'" % pkg)
13508 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13509 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13510 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13513 def action_search(root_config, myopts, myfiles, spinner):
13515 print "emerge: no search terms provided."
13517 searchinstance = search(root_config,
13518 spinner, "--searchdesc" in myopts,
13519 "--quiet" not in myopts, "--usepkg" in myopts,
13520 "--usepkgonly" in myopts)
13521 for mysearch in myfiles:
13523 searchinstance.execute(mysearch)
13524 except re.error, comment:
13525 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13527 searchinstance.output()
13529 def action_depclean(settings, trees, ldpath_mtimes,
13530 myopts, action, myfiles, spinner):
13531 # Kill packages that aren't explicitly merged or are required as a
13532 # dependency of another package. World file is explicit.
13534 # Global depclean or prune operations are not very safe when there are
13535 # missing dependencies since it's unknown how badly incomplete
13536 # the dependency graph is, and we might accidentally remove packages
13537 # that should have been pulled into the graph. On the other hand, it's
13538 # relatively safe to ignore missing deps when only asked to remove
13539 # specific packages.
13540 allow_missing_deps = len(myfiles) > 0
13543 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13544 msg.append("mistakes. Packages that are part of the world set will always\n")
13545 msg.append("be kept. They can be manually added to this set with\n")
13546 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13547 msg.append("package.provided (see portage(5)) will be removed by\n")
13548 msg.append("depclean, even if they are part of the world set.\n")
13550 msg.append("As a safety measure, depclean will not remove any packages\n")
13551 msg.append("unless *all* required dependencies have been resolved. As a\n")
13552 msg.append("consequence, it is often necessary to run %s\n" % \
13553 good("`emerge --update"))
13554 msg.append(good("--newuse --deep @system @world`") + \
13555 " prior to depclean.\n")
13557 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13558 portage.writemsg_stdout("\n")
13560 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13562 xterm_titles = "notitles" not in settings.features
13563 myroot = settings["ROOT"]
13564 root_config = trees[myroot]["root_config"]
13565 getSetAtoms = root_config.setconfig.getSetAtoms
13566 vardb = trees[myroot]["vartree"].dbapi
13568 required_set_names = ("system", "world")
13572 for s in required_set_names:
13573 required_sets[s] = InternalPackageSet(
13574 initial_atoms=getSetAtoms(s))
13577 # When removing packages, use a temporary version of world
13578 # which excludes packages that are intended to be eligible for
13580 world_temp_set = required_sets["world"]
13581 system_set = required_sets["system"]
13583 if not system_set or not world_temp_set:
13586 writemsg_level("!!! You have no system list.\n",
13587 level=logging.ERROR, noiselevel=-1)
13589 if not world_temp_set:
13590 writemsg_level("!!! You have no world file.\n",
13591 level=logging.WARNING, noiselevel=-1)
13593 writemsg_level("!!! Proceeding is likely to " + \
13594 "break your installation.\n",
13595 level=logging.WARNING, noiselevel=-1)
13596 if "--pretend" not in myopts:
13597 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13599 if action == "depclean":
13600 emergelog(xterm_titles, " >>> depclean")
13603 args_set = InternalPackageSet()
13606 if not is_valid_package_atom(x):
13607 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13608 level=logging.ERROR, noiselevel=-1)
13609 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13612 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13613 except portage.exception.AmbiguousPackageName, e:
13614 msg = "The short ebuild name \"" + x + \
13615 "\" is ambiguous. Please specify " + \
13616 "one of the following " + \
13617 "fully-qualified ebuild names instead:"
13618 for line in textwrap.wrap(msg, 70):
13619 writemsg_level("!!! %s\n" % (line,),
13620 level=logging.ERROR, noiselevel=-1)
13622 writemsg_level(" %s\n" % colorize("INFORM", i),
13623 level=logging.ERROR, noiselevel=-1)
13624 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13627 matched_packages = False
13630 matched_packages = True
13632 if not matched_packages:
13633 writemsg_level(">>> No packages selected for removal by %s\n" % \
13637 writemsg_level("\nCalculating dependencies ")
13638 resolver_params = create_depgraph_params(myopts, "remove")
13639 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13640 vardb = resolver.trees[myroot]["vartree"].dbapi
13642 if action == "depclean":
13645 # Pull in everything that's installed but not matched
13646 # by an argument atom since we don't want to clean any
13647 # package if something depends on it.
13649 world_temp_set.clear()
13654 if args_set.findAtomForPackage(pkg) is None:
13655 world_temp_set.add("=" + pkg.cpv)
13657 except portage.exception.InvalidDependString, e:
13658 show_invalid_depstring_notice(pkg,
13659 pkg.metadata["PROVIDE"], str(e))
13661 world_temp_set.add("=" + pkg.cpv)
13664 elif action == "prune":
13666 # Pull in everything that's installed since we don't
13667 # to prune a package if something depends on it.
13668 world_temp_set.clear()
13669 world_temp_set.update(vardb.cp_all())
13673 # Try to prune everything that's slotted.
13674 for cp in vardb.cp_all():
13675 if len(vardb.cp_list(cp)) > 1:
13678 # Remove atoms from world that match installed packages
13679 # that are also matched by argument atoms, but do not remove
13680 # them if they match the highest installed version.
13683 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13684 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13685 raise AssertionError("package expected in matches: " + \
13686 "cp = %s, cpv = %s matches = %s" % \
13687 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13689 highest_version = pkgs_for_cp[-1]
13690 if pkg == highest_version:
13691 # pkg is the highest version
13692 world_temp_set.add("=" + pkg.cpv)
13695 if len(pkgs_for_cp) <= 1:
13696 raise AssertionError("more packages expected: " + \
13697 "cp = %s, cpv = %s matches = %s" % \
13698 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13701 if args_set.findAtomForPackage(pkg) is None:
13702 world_temp_set.add("=" + pkg.cpv)
13704 except portage.exception.InvalidDependString, e:
13705 show_invalid_depstring_notice(pkg,
13706 pkg.metadata["PROVIDE"], str(e))
13708 world_temp_set.add("=" + pkg.cpv)
13712 for s, package_set in required_sets.iteritems():
13713 set_atom = SETPREFIX + s
13714 set_arg = SetArg(arg=set_atom, set=package_set,
13715 root_config=resolver.roots[myroot])
13716 set_args[s] = set_arg
13717 for atom in set_arg.set:
13718 resolver._dep_stack.append(
13719 Dependency(atom=atom, root=myroot, parent=set_arg))
13720 resolver.digraph.add(set_arg, None)
13722 success = resolver._complete_graph()
13723 writemsg_level("\b\b... done!\n")
13725 resolver.display_problems()
13730 def unresolved_deps():
13732 unresolvable = set()
13733 for dep in resolver._initially_unsatisfied_deps:
13734 if isinstance(dep.parent, Package) and \
13735 (dep.priority > UnmergeDepPriority.SOFT):
13736 unresolvable.add((dep.atom, dep.parent.cpv))
13738 if not unresolvable:
13741 if unresolvable and not allow_missing_deps:
13742 prefix = bad(" * ")
13744 msg.append("Dependencies could not be completely resolved due to")
13745 msg.append("the following required packages not being installed:")
13747 for atom, parent in unresolvable:
13748 msg.append(" %s pulled in by:" % (atom,))
13749 msg.append(" %s" % (parent,))
13751 msg.append("Have you forgotten to run " + \
13752 good("`emerge --update --newuse --deep @system @world`") + " prior")
13753 msg.append(("to %s? It may be necessary to manually " + \
13754 "uninstall packages that no longer") % action)
13755 msg.append("exist in the portage tree since " + \
13756 "it may not be possible to satisfy their")
13757 msg.append("dependencies. Also, be aware of " + \
13758 "the --with-bdeps option that is documented")
13759 msg.append("in " + good("`man emerge`") + ".")
13760 if action == "prune":
13762 msg.append("If you would like to ignore " + \
13763 "dependencies then use %s." % good("--nodeps"))
13764 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13765 level=logging.ERROR, noiselevel=-1)
13769 if unresolved_deps():
13772 graph = resolver.digraph.copy()
13773 required_pkgs_total = 0
13775 if isinstance(node, Package):
13776 required_pkgs_total += 1
13778 def show_parents(child_node):
13779 parent_nodes = graph.parent_nodes(child_node)
13780 if not parent_nodes:
13781 # With --prune, the highest version can be pulled in without any
13782 # real parent since all installed packages are pulled in. In that
13783 # case there's nothing to show here.
13786 for node in parent_nodes:
13787 parent_strs.append(str(getattr(node, "cpv", node)))
13790 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13791 for parent_str in parent_strs:
13792 msg.append(" %s\n" % (parent_str,))
13794 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13796 def cmp_pkg_cpv(pkg1, pkg2):
13797 """Sort Package instances by cpv."""
13798 if pkg1.cpv > pkg2.cpv:
13800 elif pkg1.cpv == pkg2.cpv:
13805 def create_cleanlist():
13806 pkgs_to_remove = []
13808 if action == "depclean":
13811 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13814 arg_atom = args_set.findAtomForPackage(pkg)
13815 except portage.exception.InvalidDependString:
13816 # this error has already been displayed by now
13820 if pkg not in graph:
13821 pkgs_to_remove.append(pkg)
13822 elif "--verbose" in myopts:
13826 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13827 if pkg not in graph:
13828 pkgs_to_remove.append(pkg)
13829 elif "--verbose" in myopts:
13832 elif action == "prune":
13833 # Prune really uses all installed instead of world. It's not
13834 # a real reverse dependency so don't display it as such.
13835 graph.remove(set_args["world"])
13837 for atom in args_set:
13838 for pkg in vardb.match_pkgs(atom):
13839 if pkg not in graph:
13840 pkgs_to_remove.append(pkg)
13841 elif "--verbose" in myopts:
13844 if not pkgs_to_remove:
13846 ">>> No packages selected for removal by %s\n" % action)
13847 if "--verbose" not in myopts:
13849 ">>> To see reverse dependencies, use %s\n" % \
13851 if action == "prune":
13853 ">>> To ignore dependencies, use %s\n" % \
13856 return pkgs_to_remove
13858 cleanlist = create_cleanlist()
13861 clean_set = set(cleanlist)
13863 # Check if any of these package are the sole providers of libraries
13864 # with consumers that have not been selected for removal. If so, these
13865 # packages and any dependencies need to be added to the graph.
13866 real_vardb = trees[myroot]["vartree"].dbapi
13867 linkmap = real_vardb.linkmap
13868 liblist = linkmap.listLibraryObjects()
13869 consumer_cache = {}
13870 provider_cache = {}
13874 writemsg_level(">>> Checking for lib consumers...\n")
13876 for pkg in cleanlist:
13877 pkg_dblink = real_vardb._dblink(pkg.cpv)
13878 provided_libs = set()
13880 for lib in liblist:
13881 if pkg_dblink.isowner(lib, myroot):
13882 provided_libs.add(lib)
13884 if not provided_libs:
13888 for lib in provided_libs:
13889 lib_consumers = consumer_cache.get(lib)
13890 if lib_consumers is None:
13891 lib_consumers = linkmap.findConsumers(lib)
13892 consumer_cache[lib] = lib_consumers
13894 consumers[lib] = lib_consumers
13899 for lib, lib_consumers in consumers.items():
13900 for consumer_file in list(lib_consumers):
13901 if pkg_dblink.isowner(consumer_file, myroot):
13902 lib_consumers.remove(consumer_file)
13903 if not lib_consumers:
13909 for lib, lib_consumers in consumers.iteritems():
13911 soname = soname_cache.get(lib)
13913 soname = linkmap.getSoname(lib)
13914 soname_cache[lib] = soname
13916 consumer_providers = []
13917 for lib_consumer in lib_consumers:
13918 providers = provider_cache.get(lib)
13919 if providers is None:
13920 providers = linkmap.findProviders(lib_consumer)
13921 provider_cache[lib_consumer] = providers
13922 if soname not in providers:
13923 # Why does this happen?
13925 consumer_providers.append(
13926 (lib_consumer, providers[soname]))
13928 consumers[lib] = consumer_providers
13930 consumer_map[pkg] = consumers
13934 search_files = set()
13935 for consumers in consumer_map.itervalues():
13936 for lib, consumer_providers in consumers.iteritems():
13937 for lib_consumer, providers in consumer_providers:
13938 search_files.add(lib_consumer)
13939 search_files.update(providers)
13941 writemsg_level(">>> Assigning files to packages...\n")
13942 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13944 for pkg, consumers in consumer_map.items():
13945 for lib, consumer_providers in consumers.items():
13946 lib_consumers = set()
13948 for lib_consumer, providers in consumer_providers:
13949 owner_set = file_owners.get(lib_consumer)
13950 provider_dblinks = set()
13951 provider_pkgs = set()
13953 if len(providers) > 1:
13954 for provider in providers:
13955 provider_set = file_owners.get(provider)
13956 if provider_set is not None:
13957 provider_dblinks.update(provider_set)
13959 if len(provider_dblinks) > 1:
13960 for provider_dblink in provider_dblinks:
13961 pkg_key = ("installed", myroot,
13962 provider_dblink.mycpv, "nomerge")
13963 if pkg_key not in clean_set:
13964 provider_pkgs.add(vardb.get(pkg_key))
13969 if owner_set is not None:
13970 lib_consumers.update(owner_set)
13972 for consumer_dblink in list(lib_consumers):
13973 if ("installed", myroot, consumer_dblink.mycpv,
13974 "nomerge") in clean_set:
13975 lib_consumers.remove(consumer_dblink)
13979 consumers[lib] = lib_consumers
13983 del consumer_map[pkg]
13986 # TODO: Implement a package set for rebuilding consumer packages.
13988 msg = "In order to avoid breakage of link level " + \
13989 "dependencies, one or more packages will not be removed. " + \
13990 "This can be solved by rebuilding " + \
13991 "the packages that pulled them in."
13993 prefix = bad(" * ")
13994 from textwrap import wrap
13995 writemsg_level("".join(prefix + "%s\n" % line for \
13996 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13999 for pkg, consumers in consumer_map.iteritems():
14000 unique_consumers = set(chain(*consumers.values()))
14001 unique_consumers = sorted(consumer.mycpv \
14002 for consumer in unique_consumers)
14004 msg.append(" %s pulled in by:" % (pkg.cpv,))
14005 for consumer in unique_consumers:
14006 msg.append(" %s" % (consumer,))
14008 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14009 level=logging.WARNING, noiselevel=-1)
14011 # Add lib providers to the graph as children of lib consumers,
14012 # and also add any dependencies pulled in by the provider.
14013 writemsg_level(">>> Adding lib providers to graph...\n")
14015 for pkg, consumers in consumer_map.iteritems():
14016 for consumer_dblink in set(chain(*consumers.values())):
14017 consumer_pkg = vardb.get(("installed", myroot,
14018 consumer_dblink.mycpv, "nomerge"))
14019 if not resolver._add_pkg(pkg,
14020 Dependency(parent=consumer_pkg,
14021 priority=UnmergeDepPriority(runtime=True),
14023 resolver.display_problems()
14026 writemsg_level("\nCalculating dependencies ")
14027 success = resolver._complete_graph()
14028 writemsg_level("\b\b... done!\n")
14029 resolver.display_problems()
14032 if unresolved_deps():
14035 graph = resolver.digraph.copy()
14036 required_pkgs_total = 0
14038 if isinstance(node, Package):
14039 required_pkgs_total += 1
14040 cleanlist = create_cleanlist()
14043 clean_set = set(cleanlist)
14045 # Use a topological sort to create an unmerge order such that
14046 # each package is unmerged before it's dependencies. This is
14047 # necessary to avoid breaking things that may need to run
14048 # during pkg_prerm or pkg_postrm phases.
14050 # Create a new graph to account for dependencies between the
14051 # packages being unmerged.
14055 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14056 runtime = UnmergeDepPriority(runtime=True)
14057 runtime_post = UnmergeDepPriority(runtime_post=True)
14058 buildtime = UnmergeDepPriority(buildtime=True)
14060 "RDEPEND": runtime,
14061 "PDEPEND": runtime_post,
14062 "DEPEND": buildtime,
14065 for node in clean_set:
14066 graph.add(node, None)
14068 node_use = node.metadata["USE"].split()
14069 for dep_type in dep_keys:
14070 depstr = node.metadata[dep_type]
14074 portage.dep._dep_check_strict = False
14075 success, atoms = portage.dep_check(depstr, None, settings,
14076 myuse=node_use, trees=resolver._graph_trees,
14079 portage.dep._dep_check_strict = True
14081 # Ignore invalid deps of packages that will
14082 # be uninstalled anyway.
14085 priority = priority_map[dep_type]
14087 if not isinstance(atom, portage.dep.Atom):
14088 # Ignore invalid atoms returned from dep_check().
14092 matches = vardb.match_pkgs(atom)
14095 for child_node in matches:
14096 if child_node in clean_set:
14097 graph.add(child_node, node, priority=priority)
14100 if len(graph.order) == len(graph.root_nodes()):
14101 # If there are no dependencies between packages
14102 # let unmerge() group them by cat/pn.
14104 cleanlist = [pkg.cpv for pkg in graph.order]
14106 # Order nodes from lowest to highest overall reference count for
14107 # optimal root node selection.
14108 node_refcounts = {}
14109 for node in graph.order:
14110 node_refcounts[node] = len(graph.parent_nodes(node))
14111 def cmp_reference_count(node1, node2):
14112 return node_refcounts[node1] - node_refcounts[node2]
14113 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14115 ignore_priority_range = [None]
14116 ignore_priority_range.extend(
14117 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14118 while not graph.empty():
14119 for ignore_priority in ignore_priority_range:
14120 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14124 raise AssertionError("no root nodes")
14125 if ignore_priority is not None:
14126 # Some deps have been dropped due to circular dependencies,
14127 # so only pop one node in order do minimize the number that
14132 cleanlist.append(node.cpv)
14134 unmerge(root_config, myopts, "unmerge", cleanlist,
14135 ldpath_mtimes, ordered=ordered)
14137 if action == "prune":
14140 if not cleanlist and "--quiet" in myopts:
14143 print "Packages installed: "+str(len(vardb.cpv_all()))
14144 print "Packages in world: " + \
14145 str(len(root_config.sets["world"].getAtoms()))
14146 print "Packages in system: " + \
14147 str(len(root_config.sets["system"].getAtoms()))
14148 print "Required packages: "+str(required_pkgs_total)
14149 if "--pretend" in myopts:
14150 print "Number to remove: "+str(len(cleanlist))
14152 print "Number removed: "+str(len(cleanlist))
14154 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14156 Construct a depgraph for the given resume list. This will raise
14157 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14159 @returns: (success, depgraph, dropped_tasks)
14162 skip_unsatisfied = True
14163 mergelist = mtimedb["resume"]["mergelist"]
14164 dropped_tasks = set()
14166 mydepgraph = depgraph(settings, trees,
14167 myopts, myparams, spinner)
14169 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14170 skip_masked=skip_masked)
14171 except depgraph.UnsatisfiedResumeDep, e:
14172 if not skip_unsatisfied:
14175 graph = mydepgraph.digraph
14176 unsatisfied_parents = dict((dep.parent, dep.parent) \
14177 for dep in e.value)
14178 traversed_nodes = set()
14179 unsatisfied_stack = list(unsatisfied_parents)
14180 while unsatisfied_stack:
14181 pkg = unsatisfied_stack.pop()
14182 if pkg in traversed_nodes:
14184 traversed_nodes.add(pkg)
14186 # If this package was pulled in by a parent
14187 # package scheduled for merge, removing this
14188 # package may cause the the parent package's
14189 # dependency to become unsatisfied.
14190 for parent_node in graph.parent_nodes(pkg):
14191 if not isinstance(parent_node, Package) \
14192 or parent_node.operation not in ("merge", "nomerge"):
14195 graph.child_nodes(parent_node,
14196 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14197 if pkg in unsatisfied:
14198 unsatisfied_parents[parent_node] = parent_node
14199 unsatisfied_stack.append(parent_node)
14201 pruned_mergelist = []
14202 for x in mergelist:
14203 if isinstance(x, list) and \
14204 tuple(x) not in unsatisfied_parents:
14205 pruned_mergelist.append(x)
14207 # If the mergelist doesn't shrink then this loop is infinite.
14208 if len(pruned_mergelist) == len(mergelist):
14209 # This happens if a package can't be dropped because
14210 # it's already installed, but it has unsatisfied PDEPEND.
14212 mergelist[:] = pruned_mergelist
14214 # Exclude installed packages that have been removed from the graph due
14215 # to failure to build/install runtime dependencies after the dependent
14216 # package has already been installed.
14217 dropped_tasks.update(pkg for pkg in \
14218 unsatisfied_parents if pkg.operation != "nomerge")
14219 mydepgraph.break_refs(unsatisfied_parents)
14221 del e, graph, traversed_nodes, \
14222 unsatisfied_parents, unsatisfied_stack
14226 return (success, mydepgraph, dropped_tasks)
14228 def action_build(settings, trees, mtimedb,
14229 myopts, myaction, myfiles, spinner):
14231 # validate the state of the resume data
14232 # so that we can make assumptions later.
14233 for k in ("resume", "resume_backup"):
14234 if k not in mtimedb:
14236 resume_data = mtimedb[k]
14237 if not isinstance(resume_data, dict):
14240 mergelist = resume_data.get("mergelist")
14241 if not isinstance(mergelist, list):
14244 for x in mergelist:
14245 if not (isinstance(x, list) and len(x) == 4):
14247 pkg_type, pkg_root, pkg_key, pkg_action = x
14248 if pkg_root not in trees:
14249 # Current $ROOT setting differs,
14250 # so the list must be stale.
14256 resume_opts = resume_data.get("myopts")
14257 if not isinstance(resume_opts, (dict, list)):
14260 favorites = resume_data.get("favorites")
14261 if not isinstance(favorites, list):
14266 if "--resume" in myopts and \
14267 ("resume" in mtimedb or
14268 "resume_backup" in mtimedb):
14270 if "resume" not in mtimedb:
14271 mtimedb["resume"] = mtimedb["resume_backup"]
14272 del mtimedb["resume_backup"]
14274 # "myopts" is a list for backward compatibility.
14275 resume_opts = mtimedb["resume"].get("myopts", [])
14276 if isinstance(resume_opts, list):
14277 resume_opts = dict((k,True) for k in resume_opts)
14278 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14279 resume_opts.pop(opt, None)
14280 myopts.update(resume_opts)
14282 if "--debug" in myopts:
14283 writemsg_level("myopts %s\n" % (myopts,))
14285 # Adjust config according to options of the command being resumed.
14286 for myroot in trees:
14287 mysettings = trees[myroot]["vartree"].settings
14288 mysettings.unlock()
14289 adjust_config(myopts, mysettings)
14291 del myroot, mysettings
14293 ldpath_mtimes = mtimedb["ldpath"]
14296 buildpkgonly = "--buildpkgonly" in myopts
14297 pretend = "--pretend" in myopts
14298 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14299 ask = "--ask" in myopts
14300 nodeps = "--nodeps" in myopts
14301 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14302 tree = "--tree" in myopts
14303 if nodeps and tree:
14305 del myopts["--tree"]
14306 portage.writemsg(colorize("WARN", " * ") + \
14307 "--tree is broken with --nodeps. Disabling...\n")
14308 debug = "--debug" in myopts
14309 verbose = "--verbose" in myopts
14310 quiet = "--quiet" in myopts
14311 if pretend or fetchonly:
14312 # make the mtimedb readonly
14313 mtimedb.filename = None
14314 if '--digest' in myopts or 'digest' in settings.features:
14315 if '--digest' in myopts:
14316 msg = "The --digest option"
14318 msg = "The FEATURES=digest setting"
14320 msg += " can prevent corruption from being" + \
14321 " noticed. The `repoman manifest` command is the preferred" + \
14322 " way to generate manifests and it is capable of doing an" + \
14323 " entire repository or category at once."
14324 prefix = bad(" * ")
14325 writemsg(prefix + "\n")
14326 from textwrap import wrap
14327 for line in wrap(msg, 72):
14328 writemsg("%s%s\n" % (prefix, line))
14329 writemsg(prefix + "\n")
14331 if "--quiet" not in myopts and \
14332 ("--pretend" in myopts or "--ask" in myopts or \
14333 "--tree" in myopts or "--verbose" in myopts):
14335 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14337 elif "--buildpkgonly" in myopts:
14341 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14343 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14347 print darkgreen("These are the packages that would be %s, in order:") % action
14350 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14351 if not show_spinner:
14352 spinner.update = spinner.update_quiet
14355 favorites = mtimedb["resume"].get("favorites")
14356 if not isinstance(favorites, list):
14360 print "Calculating dependencies ",
14361 myparams = create_depgraph_params(myopts, myaction)
14363 resume_data = mtimedb["resume"]
14364 mergelist = resume_data["mergelist"]
14365 if mergelist and "--skipfirst" in myopts:
14366 for i, task in enumerate(mergelist):
14367 if isinstance(task, list) and \
14368 task and task[-1] == "merge":
14375 success, mydepgraph, dropped_tasks = resume_depgraph(
14376 settings, trees, mtimedb, myopts, myparams, spinner)
14377 except (portage.exception.PackageNotFound,
14378 depgraph.UnsatisfiedResumeDep), e:
14379 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14380 mydepgraph = e.depgraph
14383 from textwrap import wrap
14384 from portage.output import EOutput
14387 resume_data = mtimedb["resume"]
14388 mergelist = resume_data.get("mergelist")
14389 if not isinstance(mergelist, list):
14391 if mergelist and debug or (verbose and not quiet):
14392 out.eerror("Invalid resume list:")
14395 for task in mergelist:
14396 if isinstance(task, list):
14397 out.eerror(indent + str(tuple(task)))
14400 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14401 out.eerror("One or more packages are either masked or " + \
14402 "have missing dependencies:")
14405 for dep in e.value:
14406 if dep.atom is None:
14407 out.eerror(indent + "Masked package:")
14408 out.eerror(2 * indent + str(dep.parent))
14411 out.eerror(indent + str(dep.atom) + " pulled in by:")
14412 out.eerror(2 * indent + str(dep.parent))
14414 msg = "The resume list contains packages " + \
14415 "that are either masked or have " + \
14416 "unsatisfied dependencies. " + \
14417 "Please restart/continue " + \
14418 "the operation manually, or use --skipfirst " + \
14419 "to skip the first package in the list and " + \
14420 "any other packages that may be " + \
14421 "masked or have missing dependencies."
14422 for line in wrap(msg, 72):
14424 elif isinstance(e, portage.exception.PackageNotFound):
14425 out.eerror("An expected package is " + \
14426 "not available: %s" % str(e))
14428 msg = "The resume list contains one or more " + \
14429 "packages that are no longer " + \
14430 "available. Please restart/continue " + \
14431 "the operation manually."
14432 for line in wrap(msg, 72):
14436 print "\b\b... done!"
14440 portage.writemsg("!!! One or more packages have been " + \
14441 "dropped due to\n" + \
14442 "!!! masking or unsatisfied dependencies:\n\n",
14444 for task in dropped_tasks:
14445 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14446 portage.writemsg("\n", noiselevel=-1)
14449 if mydepgraph is not None:
14450 mydepgraph.display_problems()
14451 if not (ask or pretend):
14452 # delete the current list and also the backup
14453 # since it's probably stale too.
14454 for k in ("resume", "resume_backup"):
14455 mtimedb.pop(k, None)
14460 if ("--resume" in myopts):
14461 print darkgreen("emerge: It seems we have nothing to resume...")
14464 myparams = create_depgraph_params(myopts, myaction)
14465 if "--quiet" not in myopts and "--nodeps" not in myopts:
14466 print "Calculating dependencies ",
14468 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14470 retval, favorites = mydepgraph.select_files(myfiles)
14471 except portage.exception.PackageNotFound, e:
14472 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14474 except portage.exception.PackageSetNotFound, e:
14475 root_config = trees[settings["ROOT"]]["root_config"]
14476 display_missing_pkg_set(root_config, e.value)
14479 print "\b\b... done!"
14481 mydepgraph.display_problems()
14484 if "--pretend" not in myopts and \
14485 ("--ask" in myopts or "--tree" in myopts or \
14486 "--verbose" in myopts) and \
14487 not ("--quiet" in myopts and "--ask" not in myopts):
14488 if "--resume" in myopts:
14489 mymergelist = mydepgraph.altlist()
14490 if len(mymergelist) == 0:
14491 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14493 favorites = mtimedb["resume"]["favorites"]
14494 retval = mydepgraph.display(
14495 mydepgraph.altlist(reversed=tree),
14496 favorites=favorites)
14497 mydepgraph.display_problems()
14498 if retval != os.EX_OK:
14500 prompt="Would you like to resume merging these packages?"
14502 retval = mydepgraph.display(
14503 mydepgraph.altlist(reversed=("--tree" in myopts)),
14504 favorites=favorites)
14505 mydepgraph.display_problems()
14506 if retval != os.EX_OK:
14509 for x in mydepgraph.altlist():
14510 if isinstance(x, Package) and x.operation == "merge":
14514 sets = trees[settings["ROOT"]]["root_config"].sets
14515 world_candidates = None
14516 if "--noreplace" in myopts and \
14517 not oneshot and favorites:
14518 # Sets that are not world candidates are filtered
14519 # out here since the favorites list needs to be
14520 # complete for depgraph.loadResumeCommand() to
14521 # operate correctly.
14522 world_candidates = [x for x in favorites \
14523 if not (x.startswith(SETPREFIX) and \
14524 not sets[x[1:]].world_candidate)]
14525 if "--noreplace" in myopts and \
14526 not oneshot and world_candidates:
14528 for x in world_candidates:
14529 print " %s %s" % (good("*"), x)
14530 prompt="Would you like to add these packages to your world favorites?"
14531 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14532 prompt="Nothing to merge; would you like to auto-clean packages?"
14535 print "Nothing to merge; quitting."
14538 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14539 prompt="Would you like to fetch the source files for these packages?"
14541 prompt="Would you like to merge these packages?"
14543 if "--ask" in myopts and userquery(prompt) == "No":
14548 # Don't ask again (e.g. when auto-cleaning packages after merge)
14549 myopts.pop("--ask", None)
14551 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14552 if ("--resume" in myopts):
14553 mymergelist = mydepgraph.altlist()
14554 if len(mymergelist) == 0:
14555 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14557 favorites = mtimedb["resume"]["favorites"]
14558 retval = mydepgraph.display(
14559 mydepgraph.altlist(reversed=tree),
14560 favorites=favorites)
14561 mydepgraph.display_problems()
14562 if retval != os.EX_OK:
14565 retval = mydepgraph.display(
14566 mydepgraph.altlist(reversed=("--tree" in myopts)),
14567 favorites=favorites)
14568 mydepgraph.display_problems()
14569 if retval != os.EX_OK:
14571 if "--buildpkgonly" in myopts:
14572 graph_copy = mydepgraph.digraph.clone()
14573 removed_nodes = set()
14574 for node in graph_copy:
14575 if not isinstance(node, Package) or \
14576 node.operation == "nomerge":
14577 removed_nodes.add(node)
14578 graph_copy.difference_update(removed_nodes)
14579 if not graph_copy.hasallzeros(ignore_priority = \
14580 DepPrioritySatisfiedRange.ignore_medium):
14581 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14582 print "!!! You have to merge the dependencies before you can build this package.\n"
14585 if "--buildpkgonly" in myopts:
14586 graph_copy = mydepgraph.digraph.clone()
14587 removed_nodes = set()
14588 for node in graph_copy:
14589 if not isinstance(node, Package) or \
14590 node.operation == "nomerge":
14591 removed_nodes.add(node)
14592 graph_copy.difference_update(removed_nodes)
14593 if not graph_copy.hasallzeros(ignore_priority = \
14594 DepPrioritySatisfiedRange.ignore_medium):
14595 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14596 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14599 if ("--resume" in myopts):
14600 favorites=mtimedb["resume"]["favorites"]
14601 mymergelist = mydepgraph.altlist()
14602 mydepgraph.break_refs(mymergelist)
14603 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14604 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14605 del mydepgraph, mymergelist
14606 clear_caches(trees)
14608 retval = mergetask.merge()
14609 merge_count = mergetask.curval
14611 if "resume" in mtimedb and \
14612 "mergelist" in mtimedb["resume"] and \
14613 len(mtimedb["resume"]["mergelist"]) > 1:
14614 mtimedb["resume_backup"] = mtimedb["resume"]
14615 del mtimedb["resume"]
14617 mtimedb["resume"]={}
14618 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14619 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14620 # a list type for options.
14621 mtimedb["resume"]["myopts"] = myopts.copy()
14623 # Convert Atom instances to plain str.
14624 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14626 pkglist = mydepgraph.altlist()
14627 mydepgraph.saveNomergeFavorites()
14628 mydepgraph.break_refs(pkglist)
14629 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14630 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14631 del mydepgraph, pkglist
14632 clear_caches(trees)
14634 retval = mergetask.merge()
14635 merge_count = mergetask.curval
14637 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14638 if "yes" == settings.get("AUTOCLEAN"):
14639 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14640 unmerge(trees[settings["ROOT"]]["root_config"],
14641 myopts, "clean", [],
14642 ldpath_mtimes, autoclean=1)
14644 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14645 + " AUTOCLEAN is disabled. This can cause serious"
14646 + " problems due to overlapping packages.\n")
14647 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14651 def multiple_actions(action1, action2):
14652 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14653 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14656 def insert_optional_args(args):
14658 Parse optional arguments and insert a value if one has
14659 not been provided. This is done before feeding the args
14660 to the optparse parser since that parser does not support
14661 this feature natively.
14665 jobs_opts = ("-j", "--jobs")
14666 arg_stack = args[:]
14667 arg_stack.reverse()
14669 arg = arg_stack.pop()
14671 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14672 if not (short_job_opt or arg in jobs_opts):
14673 new_args.append(arg)
14676 # Insert an empty placeholder in order to
14677 # satisfy the requirements of optparse.
14679 new_args.append("--jobs")
14682 if short_job_opt and len(arg) > 2:
14683 if arg[:2] == "-j":
14685 job_count = int(arg[2:])
14687 saved_opts = arg[2:]
14690 saved_opts = arg[1:].replace("j", "")
14692 if job_count is None and arg_stack:
14694 job_count = int(arg_stack[-1])
14698 # Discard the job count from the stack
14699 # since we're consuming it here.
14702 if job_count is None:
14703 # unlimited number of jobs
14704 new_args.append("True")
14706 new_args.append(str(job_count))
14708 if saved_opts is not None:
14709 new_args.append("-" + saved_opts)
14713 def parse_opts(tmpcmdline, silent=False):
14718 global actions, options, shortmapping
14720 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14721 argument_options = {
14723 "help":"specify the location for portage configuration files",
14727 "help":"enable or disable color output",
14729 "choices":("y", "n")
14734 "help" : "Specifies the number of packages to build " + \
14740 "--load-average": {
14742 "help" :"Specifies that no new builds should be started " + \
14743 "if there are other builds running and the load average " + \
14744 "is at least LOAD (a floating-point number).",
14750 "help":"include unnecessary build time dependencies",
14752 "choices":("y", "n")
14755 "help":"specify conditions to trigger package reinstallation",
14757 "choices":["changed-use"]
14761 from optparse import OptionParser
14762 parser = OptionParser()
14763 if parser.has_option("--help"):
14764 parser.remove_option("--help")
14766 for action_opt in actions:
14767 parser.add_option("--" + action_opt, action="store_true",
14768 dest=action_opt.replace("-", "_"), default=False)
14769 for myopt in options:
14770 parser.add_option(myopt, action="store_true",
14771 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14772 for shortopt, longopt in shortmapping.iteritems():
14773 parser.add_option("-" + shortopt, action="store_true",
14774 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14775 for myalias, myopt in longopt_aliases.iteritems():
14776 parser.add_option(myalias, action="store_true",
14777 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14779 for myopt, kwargs in argument_options.iteritems():
14780 parser.add_option(myopt,
14781 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14783 tmpcmdline = insert_optional_args(tmpcmdline)
14785 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14789 if myoptions.jobs == "True":
14793 jobs = int(myoptions.jobs)
14797 if jobs is not True and \
14801 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14802 (myoptions.jobs,), noiselevel=-1)
14804 myoptions.jobs = jobs
14806 if myoptions.load_average:
14808 load_average = float(myoptions.load_average)
14812 if load_average <= 0.0:
14813 load_average = None
14815 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14816 (myoptions.load_average,), noiselevel=-1)
14818 myoptions.load_average = load_average
14820 for myopt in options:
14821 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14823 myopts[myopt] = True
14825 for myopt in argument_options:
14826 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14830 if myoptions.searchdesc:
14831 myoptions.search = True
14833 for action_opt in actions:
14834 v = getattr(myoptions, action_opt.replace("-", "_"))
14837 multiple_actions(myaction, action_opt)
14839 myaction = action_opt
14843 return myaction, myopts, myfiles
14845 def validate_ebuild_environment(trees):
14846 for myroot in trees:
14847 settings = trees[myroot]["vartree"].settings
14848 settings.validate()
14850 def clear_caches(trees):
14851 for d in trees.itervalues():
14852 d["porttree"].dbapi.melt()
14853 d["porttree"].dbapi._aux_cache.clear()
14854 d["bintree"].dbapi._aux_cache.clear()
14855 d["bintree"].dbapi._clear_cache()
14856 d["vartree"].dbapi.linkmap._clear_cache()
14857 portage.dircache.clear()
14860 def load_emerge_config(trees=None):
14862 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14863 v = os.environ.get(envvar, None)
14864 if v and v.strip():
14866 trees = portage.create_trees(trees=trees, **kwargs)
14868 for root, root_trees in trees.iteritems():
14869 settings = root_trees["vartree"].settings
14870 setconfig = load_default_config(settings, root_trees)
14871 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14873 settings = trees["/"]["vartree"].settings
14875 for myroot in trees:
14877 settings = trees[myroot]["vartree"].settings
14880 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14881 mtimedb = portage.MtimeDB(mtimedbfile)
14883 return settings, trees, mtimedb
14885 def adjust_config(myopts, settings):
14886 """Make emerge specific adjustments to the config."""
14888 # To enhance usability, make some vars case insensitive by forcing them to
14890 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14891 if myvar in settings:
14892 settings[myvar] = settings[myvar].lower()
14893 settings.backup_changes(myvar)
14896 # Kill noauto as it will break merges otherwise.
14897 if "noauto" in settings.features:
14898 while "noauto" in settings.features:
14899 settings.features.remove("noauto")
14900 settings["FEATURES"] = " ".join(settings.features)
14901 settings.backup_changes("FEATURES")
14905 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14906 except ValueError, e:
14907 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14908 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14909 settings["CLEAN_DELAY"], noiselevel=-1)
14910 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14911 settings.backup_changes("CLEAN_DELAY")
14913 EMERGE_WARNING_DELAY = 10
14915 EMERGE_WARNING_DELAY = int(settings.get(
14916 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14917 except ValueError, e:
14918 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14919 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14920 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14921 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14922 settings.backup_changes("EMERGE_WARNING_DELAY")
14924 if "--quiet" in myopts:
14925 settings["PORTAGE_QUIET"]="1"
14926 settings.backup_changes("PORTAGE_QUIET")
14928 if "--verbose" in myopts:
14929 settings["PORTAGE_VERBOSE"] = "1"
14930 settings.backup_changes("PORTAGE_VERBOSE")
14932 # Set so that configs will be merged regardless of remembered status
14933 if ("--noconfmem" in myopts):
14934 settings["NOCONFMEM"]="1"
14935 settings.backup_changes("NOCONFMEM")
14937 # Set various debug markers... They should be merged somehow.
14940 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14941 if PORTAGE_DEBUG not in (0, 1):
14942 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14943 PORTAGE_DEBUG, noiselevel=-1)
14944 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14947 except ValueError, e:
14948 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14949 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14950 settings["PORTAGE_DEBUG"], noiselevel=-1)
14952 if "--debug" in myopts:
14954 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14955 settings.backup_changes("PORTAGE_DEBUG")
14957 if settings.get("NOCOLOR") not in ("yes","true"):
14958 portage.output.havecolor = 1
14960 """The explicit --color < y | n > option overrides the NOCOLOR environment
14961 variable and stdout auto-detection."""
14962 if "--color" in myopts:
14963 if "y" == myopts["--color"]:
14964 portage.output.havecolor = 1
14965 settings["NOCOLOR"] = "false"
14967 portage.output.havecolor = 0
14968 settings["NOCOLOR"] = "true"
14969 settings.backup_changes("NOCOLOR")
14970 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14971 portage.output.havecolor = 0
14972 settings["NOCOLOR"] = "true"
14973 settings.backup_changes("NOCOLOR")
14975 def apply_priorities(settings):
14979 def nice(settings):
14981 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14982 except (OSError, ValueError), e:
14983 out = portage.output.EOutput()
14984 out.eerror("Failed to change nice value to '%s'" % \
14985 settings["PORTAGE_NICENESS"])
14986 out.eerror("%s\n" % str(e))
14988 def ionice(settings):
14990 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14992 ionice_cmd = shlex.split(ionice_cmd)
14996 from portage.util import varexpand
14997 variables = {"PID" : str(os.getpid())}
14998 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15001 rval = portage.process.spawn(cmd, env=os.environ)
15002 except portage.exception.CommandNotFound:
15003 # The OS kernel probably doesn't support ionice,
15004 # so return silently.
15007 if rval != os.EX_OK:
15008 out = portage.output.EOutput()
15009 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15010 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15012 def display_missing_pkg_set(root_config, set_name):
15015 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15016 "The following sets exist:") % \
15017 colorize("INFORM", set_name))
15020 for s in sorted(root_config.sets):
15021 msg.append(" %s" % s)
15024 writemsg_level("".join("%s\n" % l for l in msg),
15025 level=logging.ERROR, noiselevel=-1)
15027 def expand_set_arguments(myfiles, myaction, root_config):
15029 setconfig = root_config.setconfig
15031 sets = setconfig.getSets()
15033 # In order to know exactly which atoms/sets should be added to the
15034 # world file, the depgraph performs set expansion later. It will get
15035 # confused about where the atoms came from if it's not allowed to
15036 # expand them itself.
15037 do_not_expand = (None, )
15040 if a in ("system", "world"):
15041 newargs.append(SETPREFIX+a)
15048 # separators for set arguments
15052 # WARNING: all operators must be of equal length
15054 DIFF_OPERATOR = "-@"
15055 UNION_OPERATOR = "+@"
15057 for i in range(0, len(myfiles)):
15058 if myfiles[i].startswith(SETPREFIX):
15061 x = myfiles[i][len(SETPREFIX):]
15064 start = x.find(ARG_START)
15065 end = x.find(ARG_END)
15066 if start > 0 and start < end:
15067 namepart = x[:start]
15068 argpart = x[start+1:end]
15070 # TODO: implement proper quoting
15071 args = argpart.split(",")
15075 k, v = a.split("=", 1)
15078 options[a] = "True"
15079 setconfig.update(namepart, options)
15080 newset += (x[:start-len(namepart)]+namepart)
15081 x = x[end+len(ARG_END):]
15085 myfiles[i] = SETPREFIX+newset
15087 sets = setconfig.getSets()
15089 # display errors that occured while loading the SetConfig instance
15090 for e in setconfig.errors:
15091 print colorize("BAD", "Error during set creation: %s" % e)
15093 # emerge relies on the existance of sets with names "world" and "system"
15094 required_sets = ("world", "system")
15097 for s in required_sets:
15099 missing_sets.append(s)
15101 if len(missing_sets) > 2:
15102 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15103 missing_sets_str += ', and "%s"' % missing_sets[-1]
15104 elif len(missing_sets) == 2:
15105 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15107 missing_sets_str = '"%s"' % missing_sets[-1]
15108 msg = ["emerge: incomplete set configuration, " + \
15109 "missing set(s): %s" % missing_sets_str]
15111 msg.append(" sets defined: %s" % ", ".join(sets))
15112 msg.append(" This usually means that '%s'" % \
15113 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15114 msg.append(" is missing or corrupt.")
15116 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15118 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15121 if a.startswith(SETPREFIX):
15122 # support simple set operations (intersection, difference and union)
15123 # on the commandline. Expressions are evaluated strictly left-to-right
15124 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15125 expression = a[len(SETPREFIX):]
15128 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15129 is_pos = expression.rfind(IS_OPERATOR)
15130 diff_pos = expression.rfind(DIFF_OPERATOR)
15131 union_pos = expression.rfind(UNION_OPERATOR)
15132 op_pos = max(is_pos, diff_pos, union_pos)
15133 s1 = expression[:op_pos]
15134 s2 = expression[op_pos+len(IS_OPERATOR):]
15135 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15137 display_missing_pkg_set(root_config, s2)
15139 expr_sets.insert(0, s2)
15140 expr_ops.insert(0, op)
15142 if not expression in sets:
15143 display_missing_pkg_set(root_config, expression)
15145 expr_sets.insert(0, expression)
15146 result = set(setconfig.getSetAtoms(expression))
15147 for i in range(0, len(expr_ops)):
15148 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15149 if expr_ops[i] == IS_OPERATOR:
15150 result.intersection_update(s2)
15151 elif expr_ops[i] == DIFF_OPERATOR:
15152 result.difference_update(s2)
15153 elif expr_ops[i] == UNION_OPERATOR:
15156 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15157 newargs.extend(result)
15159 s = a[len(SETPREFIX):]
15161 display_missing_pkg_set(root_config, s)
15163 setconfig.active.append(s)
15165 set_atoms = setconfig.getSetAtoms(s)
15166 except portage.exception.PackageSetNotFound, e:
15167 writemsg_level(("emerge: the given set '%s' " + \
15168 "contains a non-existent set named '%s'.\n") % \
15169 (s, e), level=logging.ERROR, noiselevel=-1)
15171 if myaction in unmerge_actions and \
15172 not sets[s].supportsOperation("unmerge"):
15173 sys.stderr.write("emerge: the given set '%s' does " % s + \
15174 "not support unmerge operations\n")
15176 elif not set_atoms:
15177 print "emerge: '%s' is an empty set" % s
15178 elif myaction not in do_not_expand:
15179 newargs.extend(set_atoms)
15181 newargs.append(SETPREFIX+s)
15182 for e in sets[s].errors:
15186 return (newargs, retval)
15188 def repo_name_check(trees):
15189 missing_repo_names = set()
15190 for root, root_trees in trees.iteritems():
15191 if "porttree" in root_trees:
15192 portdb = root_trees["porttree"].dbapi
15193 missing_repo_names.update(portdb.porttrees)
15194 repos = portdb.getRepositories()
15196 missing_repo_names.discard(portdb.getRepositoryPath(r))
15197 if portdb.porttree_root in missing_repo_names and \
15198 not os.path.exists(os.path.join(
15199 portdb.porttree_root, "profiles")):
15200 # This is normal if $PORTDIR happens to be empty,
15201 # so don't warn about it.
15202 missing_repo_names.remove(portdb.porttree_root)
15204 if missing_repo_names:
15206 msg.append("WARNING: One or more repositories " + \
15207 "have missing repo_name entries:")
15209 for p in missing_repo_names:
15210 msg.append("\t%s/profiles/repo_name" % (p,))
15212 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15213 "should be a plain text file containing a unique " + \
15214 "name for the repository on the first line.", 70))
15215 writemsg_level("".join("%s\n" % l for l in msg),
15216 level=logging.WARNING, noiselevel=-1)
15218 return bool(missing_repo_names)
15220 def config_protect_check(trees):
15221 for root, root_trees in trees.iteritems():
15222 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15223 msg = "!!! CONFIG_PROTECT is empty"
15225 msg += " for '%s'" % root
15226 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15228 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15230 if "--quiet" in myopts:
15231 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15232 print "!!! one of the following fully-qualified ebuild names instead:\n"
15233 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15234 print " " + colorize("INFORM", cp)
15237 s = search(root_config, spinner, "--searchdesc" in myopts,
15238 "--quiet" not in myopts, "--usepkg" in myopts,
15239 "--usepkgonly" in myopts)
15240 null_cp = portage.dep_getkey(insert_category_into_atom(
15242 cat, atom_pn = portage.catsplit(null_cp)
15243 s.searchkey = atom_pn
15244 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15247 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15248 print "!!! one of the above fully-qualified ebuild names instead.\n"
15250 def profile_check(trees, myaction, myopts):
15251 if myaction in ("info", "sync"):
15253 elif "--version" in myopts or "--help" in myopts:
15255 for root, root_trees in trees.iteritems():
15256 if root_trees["root_config"].settings.profiles:
15258 # generate some profile related warning messages
15259 validate_ebuild_environment(trees)
15260 msg = "If you have just changed your profile configuration, you " + \
15261 "should revert back to the previous configuration. Due to " + \
15262 "your current profile being invalid, allowed actions are " + \
15263 "limited to --help, --info, --sync, and --version."
15264 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15265 level=logging.ERROR, noiselevel=-1)
15270 global portage # NFC why this is necessary now - genone
15271 portage._disable_legacy_globals()
15272 # Disable color until we're sure that it should be enabled (after
15273 # EMERGE_DEFAULT_OPTS has been parsed).
15274 portage.output.havecolor = 0
15275 # This first pass is just for options that need to be known as early as
15276 # possible, such as --config-root. They will be parsed again later,
15277 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15278 # the value of --config-root).
15279 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15280 if "--debug" in myopts:
15281 os.environ["PORTAGE_DEBUG"] = "1"
15282 if "--config-root" in myopts:
15283 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15285 # Portage needs to ensure a sane umask for the files it creates.
15287 settings, trees, mtimedb = load_emerge_config()
15288 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15289 rval = profile_check(trees, myaction, myopts)
15290 if rval != os.EX_OK:
15293 if portage._global_updates(trees, mtimedb["updates"]):
15295 # Reload the whole config from scratch.
15296 settings, trees, mtimedb = load_emerge_config(trees=trees)
15297 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15299 xterm_titles = "notitles" not in settings.features
15302 if "--ignore-default-opts" not in myopts:
15303 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15304 tmpcmdline.extend(sys.argv[1:])
15305 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15307 if "--digest" in myopts:
15308 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15309 # Reload the whole config from scratch so that the portdbapi internal
15310 # config is updated with new FEATURES.
15311 settings, trees, mtimedb = load_emerge_config(trees=trees)
15312 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15314 for myroot in trees:
15315 mysettings = trees[myroot]["vartree"].settings
15316 mysettings.unlock()
15317 adjust_config(myopts, mysettings)
15318 if '--pretend' not in myopts and myaction in \
15319 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15320 mysettings["PORTAGE_COUNTER_HASH"] = \
15321 trees[myroot]["vartree"].dbapi._counter_hash()
15322 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15324 del myroot, mysettings
15326 apply_priorities(settings)
15328 spinner = stdout_spinner()
15329 if "candy" in settings.features:
15330 spinner.update = spinner.update_scroll
15332 if "--quiet" not in myopts:
15333 portage.deprecated_profile_check(settings=settings)
15334 repo_name_check(trees)
15335 config_protect_check(trees)
15337 eclasses_overridden = {}
15338 for mytrees in trees.itervalues():
15339 mydb = mytrees["porttree"].dbapi
15340 # Freeze the portdbapi for performance (memoize all xmatch results).
15342 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15345 if eclasses_overridden and \
15346 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15347 prefix = bad(" * ")
15348 if len(eclasses_overridden) == 1:
15349 writemsg(prefix + "Overlay eclass overrides " + \
15350 "eclass from PORTDIR:\n", noiselevel=-1)
15352 writemsg(prefix + "Overlay eclasses override " + \
15353 "eclasses from PORTDIR:\n", noiselevel=-1)
15354 writemsg(prefix + "\n", noiselevel=-1)
15355 for eclass_name in sorted(eclasses_overridden):
15356 writemsg(prefix + " '%s/%s.eclass'\n" % \
15357 (eclasses_overridden[eclass_name], eclass_name),
15359 writemsg(prefix + "\n", noiselevel=-1)
15360 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15361 "because it will trigger invalidation of cached ebuild metadata " + \
15362 "that is distributed with the portage tree. If you must " + \
15363 "override eclasses from PORTDIR then you are advised to add " + \
15364 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15365 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15366 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15367 "you would like to disable this warning."
15368 from textwrap import wrap
15369 for line in wrap(msg, 72):
15370 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15372 if "moo" in myfiles:
15375 Larry loves Gentoo (""" + platform.system() + """)
15377 _______________________
15378 < Have you mooed today? >
15379 -----------------------
15389 ext = os.path.splitext(x)[1]
15390 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15391 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15394 root_config = trees[settings["ROOT"]]["root_config"]
15395 if myaction == "list-sets":
15396 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15400 # only expand sets for actions taking package arguments
15401 oldargs = myfiles[:]
15402 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15403 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15404 if retval != os.EX_OK:
15407 # Need to handle empty sets specially, otherwise emerge will react
15408 # with the help message for empty argument lists
15409 if oldargs and not myfiles:
15410 print "emerge: no targets left after set expansion"
15413 if ("--tree" in myopts) and ("--columns" in myopts):
15414 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15417 if ("--quiet" in myopts):
15418 spinner.update = spinner.update_quiet
15419 portage.util.noiselimit = -1
15421 # Always create packages if FEATURES=buildpkg
15422 # Imply --buildpkg if --buildpkgonly
15423 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15424 if "--buildpkg" not in myopts:
15425 myopts["--buildpkg"] = True
15427 # Always try and fetch binary packages if FEATURES=getbinpkg
15428 if ("getbinpkg" in settings.features):
15429 myopts["--getbinpkg"] = True
15431 if "--buildpkgonly" in myopts:
15432 # --buildpkgonly will not merge anything, so
15433 # it cancels all binary package options.
15434 for opt in ("--getbinpkg", "--getbinpkgonly",
15435 "--usepkg", "--usepkgonly"):
15436 myopts.pop(opt, None)
15438 if "--fetch-all-uri" in myopts:
15439 myopts["--fetchonly"] = True
15441 if "--skipfirst" in myopts and "--resume" not in myopts:
15442 myopts["--resume"] = True
15444 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15445 myopts["--usepkgonly"] = True
15447 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15448 myopts["--getbinpkg"] = True
15450 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15451 myopts["--usepkg"] = True
15453 # Also allow -K to apply --usepkg/-k
15454 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15455 myopts["--usepkg"] = True
15457 # Allow -p to remove --ask
15458 if ("--pretend" in myopts) and ("--ask" in myopts):
15459 print ">>> --pretend disables --ask... removing --ask from options."
15460 del myopts["--ask"]
15462 # forbid --ask when not in a terminal
15463 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15464 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15465 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15469 if settings.get("PORTAGE_DEBUG", "") == "1":
15470 spinner.update = spinner.update_quiet
15472 if "python-trace" in settings.features:
15473 import portage.debug
15474 portage.debug.set_trace(True)
15476 if not ("--quiet" in myopts):
15477 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15478 spinner.update = spinner.update_basic
15480 if myaction == 'version':
15481 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15482 settings.profile_path, settings["CHOST"],
15483 trees[settings["ROOT"]]["vartree"].dbapi)
15485 elif "--help" in myopts:
15486 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15489 if "--debug" in myopts:
15490 print "myaction", myaction
15491 print "myopts", myopts
15493 if not myaction and not myfiles and "--resume" not in myopts:
15494 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15497 pretend = "--pretend" in myopts
15498 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15499 buildpkgonly = "--buildpkgonly" in myopts
15501 # check if root user is the current user for the actions where emerge needs this
15502 if portage.secpass < 2:
15503 # We've already allowed "--version" and "--help" above.
15504 if "--pretend" not in myopts and myaction not in ("search","info"):
15505 need_superuser = not \
15507 (buildpkgonly and secpass >= 1) or \
15508 myaction in ("metadata", "regen") or \
15509 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15510 if portage.secpass < 1 or \
15513 access_desc = "superuser"
15515 access_desc = "portage group"
15516 # Always show portage_group_warning() when only portage group
15517 # access is required but the user is not in the portage group.
15518 from portage.data import portage_group_warning
15519 if "--ask" in myopts:
15520 myopts["--pretend"] = True
15521 del myopts["--ask"]
15522 print ("%s access is required... " + \
15523 "adding --pretend to options.\n") % access_desc
15524 if portage.secpass < 1 and not need_superuser:
15525 portage_group_warning()
15527 sys.stderr.write(("emerge: %s access is " + \
15528 "required.\n\n") % access_desc)
15529 if portage.secpass < 1 and not need_superuser:
15530 portage_group_warning()
15533 disable_emergelog = False
15534 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15536 disable_emergelog = True
15538 if myaction in ("search", "info"):
15539 disable_emergelog = True
15540 if disable_emergelog:
15541 """ Disable emergelog for everything except build or unmerge
15542 operations. This helps minimize parallel emerge.log entries that can
15543 confuse log parsers. We especially want it disabled during
15544 parallel-fetch, which uses --resume --fetchonly."""
15546 def emergelog(*pargs, **kargs):
15549 if not "--pretend" in myopts:
15550 emergelog(xterm_titles, "Started emerge on: "+\
15551 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15554 myelogstr=" ".join(myopts)
15556 myelogstr+=" "+myaction
15558 myelogstr += " " + " ".join(oldargs)
15559 emergelog(xterm_titles, " *** emerge " + myelogstr)
15562 def emergeexitsig(signum, frame):
15563 signal.signal(signal.SIGINT, signal.SIG_IGN)
15564 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15565 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15566 sys.exit(100+signum)
15567 signal.signal(signal.SIGINT, emergeexitsig)
15568 signal.signal(signal.SIGTERM, emergeexitsig)
15571 """This gets out final log message in before we quit."""
15572 if "--pretend" not in myopts:
15573 emergelog(xterm_titles, " *** terminating.")
15574 if "notitles" not in settings.features:
15576 portage.atexit_register(emergeexit)
15578 if myaction in ("config", "metadata", "regen", "sync"):
15579 if "--pretend" in myopts:
15580 sys.stderr.write(("emerge: The '%s' action does " + \
15581 "not support '--pretend'.\n") % myaction)
15584 if "sync" == myaction:
15585 return action_sync(settings, trees, mtimedb, myopts, myaction)
15586 elif "metadata" == myaction:
15587 action_metadata(settings, portdb, myopts)
15588 elif myaction=="regen":
15589 validate_ebuild_environment(trees)
15590 return action_regen(settings, portdb, myopts.get("--jobs"),
15591 myopts.get("--load-average"))
15593 elif "config"==myaction:
15594 validate_ebuild_environment(trees)
15595 action_config(settings, trees, myopts, myfiles)
15598 elif "search"==myaction:
15599 validate_ebuild_environment(trees)
15600 action_search(trees[settings["ROOT"]]["root_config"],
15601 myopts, myfiles, spinner)
15602 elif myaction in ("clean", "unmerge") or \
15603 (myaction == "prune" and "--nodeps" in myopts):
15604 validate_ebuild_environment(trees)
15606 # Ensure atoms are valid before calling unmerge().
15607 # For backward compat, leading '=' is not required.
15609 if is_valid_package_atom(x) or \
15610 is_valid_package_atom("=" + x):
15613 msg.append("'%s' is not a valid package atom." % (x,))
15614 msg.append("Please check ebuild(5) for full details.")
15615 writemsg_level("".join("!!! %s\n" % line for line in msg),
15616 level=logging.ERROR, noiselevel=-1)
15619 # When given a list of atoms, unmerge
15620 # them in the order given.
15621 ordered = myaction == "unmerge"
15622 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15623 mtimedb["ldpath"], ordered=ordered):
15624 if not (buildpkgonly or fetchonly or pretend):
15625 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15627 elif myaction in ("depclean", "info", "prune"):
15629 # Ensure atoms are valid before calling unmerge().
15630 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15633 if is_valid_package_atom(x):
15635 valid_atoms.append(
15636 portage.dep_expand(x, mydb=vardb, settings=settings))
15637 except portage.exception.AmbiguousPackageName, e:
15638 msg = "The short ebuild name \"" + x + \
15639 "\" is ambiguous. Please specify " + \
15640 "one of the following " + \
15641 "fully-qualified ebuild names instead:"
15642 for line in textwrap.wrap(msg, 70):
15643 writemsg_level("!!! %s\n" % (line,),
15644 level=logging.ERROR, noiselevel=-1)
15646 writemsg_level(" %s\n" % colorize("INFORM", i),
15647 level=logging.ERROR, noiselevel=-1)
15648 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15652 msg.append("'%s' is not a valid package atom." % (x,))
15653 msg.append("Please check ebuild(5) for full details.")
15654 writemsg_level("".join("!!! %s\n" % line for line in msg),
15655 level=logging.ERROR, noiselevel=-1)
15658 if myaction == "info":
15659 return action_info(settings, trees, myopts, valid_atoms)
15661 validate_ebuild_environment(trees)
15662 action_depclean(settings, trees, mtimedb["ldpath"],
15663 myopts, myaction, valid_atoms, spinner)
15664 if not (buildpkgonly or fetchonly or pretend):
15665 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15666 # "update", "system", or just process files:
15668 validate_ebuild_environment(trees)
15669 if "--pretend" not in myopts:
15670 display_news_notification(root_config, myopts)
15671 retval = action_build(settings, trees, mtimedb,
15672 myopts, myaction, myfiles, spinner)
15673 root_config = trees[settings["ROOT"]]["root_config"]
15674 post_emerge(root_config, myopts, mtimedb, retval)