2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
61 import cPickle as pickle
66 from cStringIO import StringIO
68 from StringIO import StringIO
70 class stdout_spinner(object):
72 "Gentoo Rocks ("+platform.system()+")",
73 "Thank you for using Gentoo. :)",
74 "Are you actually trying to read this?",
75 "How many times have you stared at this?",
76 "We are generating the cache right now",
77 "You are paying too much attention.",
78 "A theory is better than its explanation.",
79 "Phasers locked on target, Captain.",
80 "Thrashing is just virtual crashing.",
81 "To be is to program.",
82 "Real Users hate Real Programmers.",
83 "When all else fails, read the instructions.",
84 "Functionality breeds Contempt.",
85 "The future lies ahead.",
86 "3.1415926535897932384626433832795028841971694",
87 "Sometimes insanity is the only alternative.",
88 "Inaccuracy saves a world of explanation.",
91 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
95 self.update = self.update_twirl
96 self.scroll_sequence = self.scroll_msgs[
97 int(time.time() * 100) % len(self.scroll_msgs)]
99 self.min_display_latency = 0.05
101 def _return_early(self):
103 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104 each update* method should return without doing any output when this
107 cur_time = time.time()
108 if cur_time - self.last_update < self.min_display_latency:
110 self.last_update = cur_time
113 def update_basic(self):
114 self.spinpos = (self.spinpos + 1) % 500
115 if self._return_early():
117 if (self.spinpos % 100) == 0:
118 if self.spinpos == 0:
119 sys.stdout.write(". ")
121 sys.stdout.write(".")
124 def update_scroll(self):
125 if self._return_early():
127 if(self.spinpos >= len(self.scroll_sequence)):
128 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135 def update_twirl(self):
136 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137 if self._return_early():
139 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142 def update_quiet(self):
145 def userquery(prompt, responses=None, colours=None):
146 """Displays a prompt and a set of responses, then waits for a response
147 which is checked against the responses and the first to match is
148 returned. An empty response will match the first value in responses. The
149 input buffer is *not* cleared prior to the prompt!
152 responses: a List of Strings.
153 colours: a List of Functions taking and returning a String, used to
154 process the responses for display. Typically these will be functions
155 like red() but could be e.g. lambda x: "DisplayString".
156 If responses is omitted, defaults to ["Yes", "No"], [green, red].
157 If only colours is omitted, defaults to [bold, ...].
159 Returns a member of the List responses. (If called without optional
160 arguments, returns "Yes" or "No".)
161 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163 if responses is None:
164 responses = ["Yes", "No"]
166 create_color_func("PROMPT_CHOICE_DEFAULT"),
167 create_color_func("PROMPT_CHOICE_OTHER")
169 elif colours is None:
171 colours=(colours*len(responses))[:len(responses)]
175 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176 for key in responses:
177 # An empty response will match the first value in responses.
178 if response.upper()==key[:len(response)].upper():
180 print "Sorry, response '%s' not understood." % response,
181 except (EOFError, KeyboardInterrupt):
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen", "search",
189 "sync", "unmerge", "version",
192 "--ask", "--alphabetical",
193 "--buildpkg", "--buildpkgonly",
194 "--changelog", "--columns",
199 "--fetchonly", "--fetch-all-uri",
200 "--getbinpkg", "--getbinpkgonly",
201 "--help", "--ignore-default-opts",
204 "--newuse", "--nocolor",
205 "--nodeps", "--noreplace",
206 "--nospinner", "--oneshot",
207 "--onlydeps", "--pretend",
208 "--quiet", "--resume",
209 "--searchdesc", "--selective",
213 "--usepkg", "--usepkgonly",
220 "b":"--buildpkg", "B":"--buildpkgonly",
221 "c":"--clean", "C":"--unmerge",
222 "d":"--debug", "D":"--deep",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "k":"--usepkg", "K":"--usepkgonly",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps", "O":"--nodeps",
231 "p":"--pretend", "P":"--prune",
233 "s":"--search", "S":"--searchdesc",
236 "v":"--verbose", "V":"--version"
239 def emergelog(xterm_titles, mystr, short_msg=None):
240 if xterm_titles and short_msg:
241 if "HOSTNAME" in os.environ:
242 short_msg = os.environ["HOSTNAME"]+": "+short_msg
243 xtermTitle(short_msg)
245 file_path = "/var/log/emerge.log"
246 mylogfile = open(file_path, "a")
247 portage.util.apply_secpass_permissions(file_path,
248 uid=portage.portage_uid, gid=portage.portage_gid,
252 mylock = portage.locks.lockfile(mylogfile)
253 # seek because we may have gotten held up by the lock.
254 # if so, we may not be positioned at the end of the file.
256 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
260 portage.locks.unlockfile(mylock)
262 except (IOError,OSError,portage.exception.PortageException), e:
264 print >> sys.stderr, "emergelog():",e
266 def countdown(secs=5, doing="Starting"):
268 print ">>> Waiting",secs,"seconds before starting..."
269 print ">>> (Control-C to abort)...\n"+doing+" in: ",
273 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280 if isinstance(mysize, basestring):
282 if 0 != mysize % 1024:
283 # Always round up to the next kB so that it doesn't show 0 kB when
284 # some small file still needs to be fetched.
285 mysize += 1024 - mysize % 1024
286 mystr=str(mysize/1024)
290 mystr=mystr[:mycount]+","+mystr[mycount:]
294 def getgccversion(chost):
297 return: the current in-use gcc version
300 gcc_ver_command = 'gcc -dumpversion'
301 gcc_ver_prefix = 'gcc-'
303 gcc_not_found_error = red(
304 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305 "!!! to update the environment of this terminal and possibly\n" +
306 "!!! other terminals also.\n"
309 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313 mystatus, myoutput = commands.getstatusoutput(
314 chost + "-" + gcc_ver_command)
315 if mystatus == os.EX_OK:
316 return gcc_ver_prefix + myoutput
318 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319 if mystatus == os.EX_OK:
320 return gcc_ver_prefix + myoutput
322 portage.writemsg(gcc_not_found_error, noiselevel=-1)
323 return "[unavailable]"
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326 profilever = "unavailable"
328 realpath = os.path.realpath(profile)
329 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
330 if realpath.startswith(basepath):
331 profilever = realpath[1 + len(basepath):]
334 profilever = "!" + os.readlink(profile)
337 del realpath, basepath
340 libclist = vardb.match("virtual/libc")
341 libclist += vardb.match("virtual/glibc")
342 libclist = portage.util.unique_array(libclist)
344 xs=portage.catpkgsplit(x)
346 libcver+=","+"-".join(xs[1:])
348 libcver="-".join(xs[1:])
350 libcver="unavailable"
352 gccver = getgccversion(chost)
353 unameout=platform.release()+" "+platform.machine()
355 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357 def create_depgraph_params(myopts, myaction):
358 #configure emerge engine parameters
360 # self: include _this_ package regardless of if it is merged.
361 # selective: exclude the package if it is merged
362 # recurse: go into the dependencies
363 # deep: go into the dependencies of already merged packages
364 # empty: pretend nothing is merged
365 # complete: completely account for all known dependencies
366 # remove: build graph for use in removing packages
367 myparams = set(["recurse"])
369 if myaction == "remove":
370 myparams.add("remove")
371 myparams.add("complete")
374 if "--update" in myopts or \
375 "--newuse" in myopts or \
376 "--reinstall" in myopts or \
377 "--noreplace" in myopts:
378 myparams.add("selective")
379 if "--emptytree" in myopts:
380 myparams.add("empty")
381 myparams.discard("selective")
382 if "--nodeps" in myopts:
383 myparams.discard("recurse")
384 if "--deep" in myopts:
386 if "--complete-graph" in myopts:
387 myparams.add("complete")
390 # search functionality
391 class search(object):
402 def __init__(self, root_config, spinner, searchdesc,
403 verbose, usepkg, usepkgonly):
404 """Searches the available and installed packages for the supplied search key.
405 The list of available and installed packages is created at object instantiation.
406 This makes successive searches faster."""
407 self.settings = root_config.settings
408 self.vartree = root_config.trees["vartree"]
409 self.spinner = spinner
410 self.verbose = verbose
411 self.searchdesc = searchdesc
412 self.root_config = root_config
413 self.setconfig = root_config.setconfig
414 self.matches = {"pkg" : []}
419 self.portdb = fake_portdb
420 for attrib in ("aux_get", "cp_all",
421 "xmatch", "findname", "getFetchMap"):
422 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
426 portdb = root_config.trees["porttree"].dbapi
427 bindb = root_config.trees["bintree"].dbapi
428 vardb = root_config.trees["vartree"].dbapi
430 if not usepkgonly and portdb._have_root_eclass_dir:
431 self._dbs.append(portdb)
433 if (usepkg or usepkgonly) and bindb.cp_all():
434 self._dbs.append(bindb)
436 self._dbs.append(vardb)
437 self._portdb = portdb
442 cp_all.update(db.cp_all())
443 return list(sorted(cp_all))
445 def _aux_get(self, *args, **kwargs):
448 return db.aux_get(*args, **kwargs)
453 def _findname(self, *args, **kwargs):
455 if db is not self._portdb:
456 # We don't want findname to return anything
457 # unless it's an ebuild in a portage tree.
458 # Otherwise, it's already built and we don't
461 func = getattr(db, "findname", None)
463 value = func(*args, **kwargs)
468 def _getFetchMap(self, *args, **kwargs):
470 func = getattr(db, "getFetchMap", None)
472 value = func(*args, **kwargs)
477 def _visible(self, db, cpv, metadata):
478 installed = db is self.vartree.dbapi
479 built = installed or db is not self._portdb
482 pkg_type = "installed"
485 return visible(self.settings,
486 Package(type_name=pkg_type, root_config=self.root_config,
487 cpv=cpv, built=built, installed=installed, metadata=metadata))
489 def _xmatch(self, level, atom):
491 This method does not expand old-style virtuals because it
492 is restricted to returning matches for a single ${CATEGORY}/${PN}
493 and old-style virual matches unreliable for that when querying
494 multiple package databases. If necessary, old-style virtuals
495 can be performed on atoms prior to calling this method.
497 cp = portage.dep_getkey(atom)
498 if level == "match-all":
501 if hasattr(db, "xmatch"):
502 matches.update(db.xmatch(level, atom))
504 matches.update(db.match(atom))
505 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506 db._cpv_sort_ascending(result)
507 elif level == "match-visible":
510 if hasattr(db, "xmatch"):
511 matches.update(db.xmatch(level, atom))
513 db_keys = list(db._aux_cache_keys)
514 for cpv in db.match(atom):
515 metadata = izip(db_keys,
516 db.aux_get(cpv, db_keys))
517 if not self._visible(db, cpv, metadata):
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "bestmatch-visible":
525 if hasattr(db, "xmatch"):
526 cpv = db.xmatch("bestmatch-visible", atom)
527 if not cpv or portage.cpv_getkey(cpv) != cp:
529 if not result or cpv == portage.best([cpv, result]):
532 db_keys = Package.metadata_keys
533 # break out of this loop with highest visible
534 # match, checked in descending order
535 for cpv in reversed(db.match(atom)):
536 if portage.cpv_getkey(cpv) != cp:
538 metadata = izip(db_keys,
539 db.aux_get(cpv, db_keys))
540 if not self._visible(db, cpv, metadata):
542 if not result or cpv == portage.best([cpv, result]):
546 raise NotImplementedError(level)
549 def execute(self,searchkey):
550 """Performs the search for the supplied search key"""
552 self.searchkey=searchkey
553 self.packagematches = []
556 self.matches = {"pkg":[], "desc":[], "set":[]}
559 self.matches = {"pkg":[], "set":[]}
560 print "Searching... ",
563 if self.searchkey.startswith('%'):
565 self.searchkey = self.searchkey[1:]
566 if self.searchkey.startswith('@'):
568 self.searchkey = self.searchkey[1:]
570 self.searchre=re.compile(self.searchkey,re.I)
572 self.searchre=re.compile(re.escape(self.searchkey), re.I)
573 for package in self.portdb.cp_all():
574 self.spinner.update()
577 match_string = package[:]
579 match_string = package.split("/")[-1]
582 if self.searchre.search(match_string):
583 if not self.portdb.xmatch("match-visible", package):
585 self.matches["pkg"].append([package,masked])
586 elif self.searchdesc: # DESCRIPTION searching
587 full_package = self.portdb.xmatch("bestmatch-visible", package)
589 #no match found; we don't want to query description
590 full_package = portage.best(
591 self.portdb.xmatch("match-all", package))
597 full_desc = self.portdb.aux_get(
598 full_package, ["DESCRIPTION"])[0]
600 print "emerge: search: aux_get() failed, skipping"
602 if self.searchre.search(full_desc):
603 self.matches["desc"].append([full_package,masked])
605 self.sdict = self.setconfig.getSets()
606 for setname in self.sdict:
607 self.spinner.update()
609 match_string = setname
611 match_string = setname.split("/")[-1]
613 if self.searchre.search(match_string):
614 self.matches["set"].append([setname, False])
615 elif self.searchdesc:
616 if self.searchre.search(
617 self.sdict[setname].getMetadata("DESCRIPTION")):
618 self.matches["set"].append([setname, False])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
656 print green("*")+" "+white(match)
657 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
661 desc, homepage, license = self.portdb.aux_get(
662 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664 print "emerge: search: aux_get() failed, skipping"
667 print green("*")+" "+white(match)+" "+red("[ Masked ]")
669 print green("*")+" "+white(match)
670 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
674 mycat = match.split("/")[0]
675 mypkg = match.split("/")[1]
676 mycpv = match + "-" + myversion
677 myebuild = self.portdb.findname(mycpv)
679 pkgdir = os.path.dirname(myebuild)
680 from portage import manifest
681 mf = manifest.Manifest(
682 pkgdir, self.settings["DISTDIR"])
684 uri_map = self.portdb.getFetchMap(mycpv)
685 except portage.exception.InvalidDependString, e:
686 file_size_str = "Unknown (%s)" % (e,)
690 mysum[0] = mf.getDistfilesSize(uri_map)
692 file_size_str = "Unknown (missing " + \
693 "digest for %s)" % (e,)
698 if db is not vardb and \
699 db.cpv_exists(mycpv):
701 if not myebuild and hasattr(db, "bintree"):
702 myebuild = db.bintree.getname(mycpv)
704 mysum[0] = os.stat(myebuild).st_size
709 if myebuild and file_size_str is None:
710 mystr = str(mysum[0] / 1024)
714 mystr = mystr[:mycount] + "," + mystr[mycount:]
715 file_size_str = mystr + " kB"
719 print " ", darkgreen("Latest version available:"),myversion
720 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
723 (darkgreen("Size of files:"), file_size_str)
724 print " ", darkgreen("Homepage:")+" ",homepage
725 print " ", darkgreen("Description:")+" ",desc
726 print " ", darkgreen("License:")+" ",license
731 def getInstallationStatus(self,package):
732 installed_package = self.vartree.dep_bestmatch(package)
734 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736 result = darkgreen("Latest version installed:")+" "+version
738 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741 def getVersion(self,full_package,detail):
742 if len(full_package) > 1:
743 package_parts = portage.catpkgsplit(full_package)
744 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745 result = package_parts[2]+ "-" + package_parts[3]
747 result = package_parts[2]
752 class RootConfig(object):
753 """This is used internally by depgraph to track information about a
757 "ebuild" : "porttree",
758 "binary" : "bintree",
759 "installed" : "vartree"
763 for k, v in pkg_tree_map.iteritems():
766 def __init__(self, settings, trees, setconfig):
768 self.settings = settings
769 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770 self.root = self.settings["ROOT"]
771 self.setconfig = setconfig
772 self.sets = self.setconfig.getSets()
773 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775 def create_world_atom(pkg, args_set, root_config):
776 """Create a new atom for the world file if one does not exist. If the
777 argument atom is precise enough to identify a specific slot then a slot
778 atom will be returned. Atoms that are in the system set may also be stored
779 in world since system atoms can only match one slot while world atoms can
780 be greedy with respect to slots. Unslotted system packages will not be
783 arg_atom = args_set.findAtomForPackage(pkg)
786 cp = portage.dep_getkey(arg_atom)
788 sets = root_config.sets
789 portdb = root_config.trees["porttree"].dbapi
790 vardb = root_config.trees["vartree"].dbapi
791 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792 for cpv in portdb.match(cp))
793 slotted = len(available_slots) > 1 or \
794 (len(available_slots) == 1 and "0" not in available_slots)
796 # check the vdb in case this is multislot
797 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798 for cpv in vardb.match(cp))
799 slotted = len(available_slots) > 1 or \
800 (len(available_slots) == 1 and "0" not in available_slots)
801 if slotted and arg_atom != cp:
802 # If the user gave a specific atom, store it as a
803 # slot atom in the world file.
804 slot_atom = pkg.slot_atom
806 # For USE=multislot, there are a couple of cases to
809 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810 # unknown value, so just record an unslotted atom.
812 # 2) SLOT comes from an installed package and there is no
813 # matching SLOT in the portage tree.
815 # Make sure that the slot atom is available in either the
816 # portdb or the vardb, since otherwise the user certainly
817 # doesn't want the SLOT atom recorded in the world file
818 # (case 1 above). If it's only available in the vardb,
819 # the user may be trying to prevent a USE=multislot
820 # package from being removed by --depclean (case 2 above).
823 if not portdb.match(slot_atom):
824 # SLOT seems to come from an installed multislot package
826 # If there is no installed package matching the SLOT atom,
827 # it probably changed SLOT spontaneously due to USE=multislot,
828 # so just record an unslotted atom.
829 if vardb.match(slot_atom):
830 # Now verify that the argument is precise
831 # enough to identify a specific slot.
832 matches = mydb.match(arg_atom)
833 matched_slots = set()
835 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836 if len(matched_slots) == 1:
837 new_world_atom = slot_atom
839 if new_world_atom == sets["world"].findAtomForPackage(pkg):
840 # Both atoms would be identical, so there's nothing to add.
843 # Unlike world atoms, system atoms are not greedy for slots, so they
844 # can't be safely excluded from world if they are slotted.
845 system_atom = sets["system"].findAtomForPackage(pkg)
847 if not portage.dep_getkey(system_atom).startswith("virtual/"):
849 # System virtuals aren't safe to exclude from world since they can
850 # match multiple old-style virtuals but only one of them will be
851 # pulled in by update or depclean.
852 providers = portdb.mysettings.getvirtuals().get(
853 portage.dep_getkey(system_atom))
854 if providers and len(providers) == 1 and providers[0] == cp:
856 return new_world_atom
858 def filter_iuse_defaults(iuse):
860 if flag.startswith("+") or flag.startswith("-"):
865 class SlotObject(object):
866 __slots__ = ("__weakref__",)
868 def __init__(self, **kwargs):
869 classes = [self.__class__]
874 classes.extend(c.__bases__)
875 slots = getattr(c, "__slots__", None)
879 myvalue = kwargs.get(myattr, None)
880 setattr(self, myattr, myvalue)
884 Create a new instance and copy all attributes
885 defined from __slots__ (including those from
888 obj = self.__class__()
890 classes = [self.__class__]
895 classes.extend(c.__bases__)
896 slots = getattr(c, "__slots__", None)
900 setattr(obj, myattr, getattr(self, myattr))
904 class AbstractDepPriority(SlotObject):
905 __slots__ = ("buildtime", "runtime", "runtime_post")
907 def __lt__(self, other):
908 return self.__int__() < other
910 def __le__(self, other):
911 return self.__int__() <= other
913 def __eq__(self, other):
914 return self.__int__() == other
916 def __ne__(self, other):
917 return self.__int__() != other
919 def __gt__(self, other):
920 return self.__int__() > other
922 def __ge__(self, other):
923 return self.__int__() >= other
927 return copy.copy(self)
929 class DepPriority(AbstractDepPriority):
931 __slots__ = ("satisfied", "optional", "rebuild")
943 if self.runtime_post:
944 return "runtime_post"
947 class BlockerDepPriority(DepPriority):
955 BlockerDepPriority.instance = BlockerDepPriority()
957 class UnmergeDepPriority(AbstractDepPriority):
958 __slots__ = ("optional", "satisfied",)
960 Combination of properties Priority Category
965 (none of the above) -2 SOFT
975 if self.runtime_post:
982 myvalue = self.__int__()
983 if myvalue > self.SOFT:
987 class DepPriorityNormalRange(object):
989 DepPriority properties Index Category
993 runtime_post 2 MEDIUM_SOFT
995 (none of the above) 0 NONE
1003 def _ignore_optional(cls, priority):
1004 if priority.__class__ is not DepPriority:
1006 return bool(priority.optional)
1009 def _ignore_runtime_post(cls, priority):
1010 if priority.__class__ is not DepPriority:
1012 return bool(priority.optional or priority.runtime_post)
1015 def _ignore_runtime(cls, priority):
1016 if priority.__class__ is not DepPriority:
1018 return not priority.buildtime
1020 ignore_medium = _ignore_runtime
1021 ignore_medium_soft = _ignore_runtime_post
1022 ignore_soft = _ignore_optional
1024 DepPriorityNormalRange.ignore_priority = (
1026 DepPriorityNormalRange._ignore_optional,
1027 DepPriorityNormalRange._ignore_runtime_post,
1028 DepPriorityNormalRange._ignore_runtime
1031 class DepPrioritySatisfiedRange(object):
1033 DepPriority Index Category
1035 not satisfied and buildtime HARD
1036 not satisfied and runtime 7 MEDIUM
1037 not satisfied and runtime_post 6 MEDIUM_SOFT
1038 satisfied and buildtime and rebuild 5 SOFT
1039 satisfied and buildtime 4 SOFT
1040 satisfied and runtime 3 SOFT
1041 satisfied and runtime_post 2 SOFT
1043 (none of the above) 0 NONE
1051 def _ignore_optional(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional)
1057 def _ignore_satisfied_runtime_post(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 if priority.optional:
1062 if not priority.satisfied:
1064 return bool(priority.runtime_post)
1067 def _ignore_satisfied_runtime(cls, priority):
1068 if priority.__class__ is not DepPriority:
1070 if priority.optional:
1072 if not priority.satisfied:
1074 return not priority.buildtime
1077 def _ignore_satisfied_buildtime(cls, priority):
1078 if priority.__class__ is not DepPriority:
1080 if priority.optional:
1082 if not priority.satisfied:
1084 if priority.buildtime:
1085 return not priority.rebuild
1089 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090 if priority.__class__ is not DepPriority:
1092 if priority.optional:
1094 return bool(priority.satisfied)
1097 def _ignore_runtime_post(cls, priority):
1098 if priority.__class__ is not DepPriority:
1100 return bool(priority.optional or \
1101 priority.satisfied or \
1102 priority.runtime_post)
1105 def _ignore_runtime(cls, priority):
1106 if priority.__class__ is not DepPriority:
1108 return bool(priority.satisfied or \
1109 not priority.buildtime)
1111 ignore_medium = _ignore_runtime
1112 ignore_medium_soft = _ignore_runtime_post
1113 ignore_soft = _ignore_satisfied_buildtime_rebuild
1115 DepPrioritySatisfiedRange.ignore_priority = (
1117 DepPrioritySatisfiedRange._ignore_optional,
1118 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122 DepPrioritySatisfiedRange._ignore_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_runtime
1126 def _find_deep_system_runtime_deps(graph):
1127 deep_system_deps = set()
1130 if not isinstance(node, Package) or \
1131 node.operation == 'uninstall':
1133 if node.root_config.sets['system'].findAtomForPackage(node):
1134 node_stack.append(node)
1136 def ignore_priority(priority):
1138 Ignore non-runtime priorities.
1140 if isinstance(priority, DepPriority) and \
1141 (priority.runtime or priority.runtime_post):
1146 node = node_stack.pop()
1147 if node in deep_system_deps:
1149 deep_system_deps.add(node)
1150 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151 if not isinstance(child, Package) or \
1152 child.operation == 'uninstall':
1154 node_stack.append(child)
1156 return deep_system_deps
1158 class FakeVartree(portage.vartree):
1159 """This is implements an in-memory copy of a vartree instance that provides
1160 all the interfaces required for use by the depgraph. The vardb is locked
1161 during the constructor call just long enough to read a copy of the
1162 installed package information. This allows the depgraph to do it's
1163 dependency calculations without holding a lock on the vardb. It also
1164 allows things like vardb global updates to be done in memory so that the
1165 user doesn't necessarily need write access to the vardb in cases where
1166 global updates are necessary (updates are performed when necessary if there
1167 is not a matching ebuild in the tree)."""
1168 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169 self._root_config = root_config
1170 if pkg_cache is None:
1172 real_vartree = root_config.trees["vartree"]
1173 portdb = root_config.trees["porttree"].dbapi
1174 self.root = real_vartree.root
1175 self.settings = real_vartree.settings
1176 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177 if "_mtime_" not in mykeys:
1178 mykeys.append("_mtime_")
1179 self._db_keys = mykeys
1180 self._pkg_cache = pkg_cache
1181 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184 # At least the parent needs to exist for the lock file.
1185 portage.util.ensure_dirs(vdb_path)
1186 except portage.exception.PortageException:
1190 if acquire_lock and os.access(vdb_path, os.W_OK):
1191 vdb_lock = portage.locks.lockdir(vdb_path)
1192 real_dbapi = real_vartree.dbapi
1194 for cpv in real_dbapi.cpv_all():
1195 cache_key = ("installed", self.root, cpv, "nomerge")
1196 pkg = self._pkg_cache.get(cache_key)
1198 metadata = pkg.metadata
1200 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201 myslot = metadata["SLOT"]
1202 mycp = portage.dep_getkey(cpv)
1203 myslot_atom = "%s:%s" % (mycp, myslot)
1205 mycounter = long(metadata["COUNTER"])
1208 metadata["COUNTER"] = str(mycounter)
1209 other_counter = slot_counters.get(myslot_atom, None)
1210 if other_counter is not None:
1211 if other_counter > mycounter:
1213 slot_counters[myslot_atom] = mycounter
1215 pkg = Package(built=True, cpv=cpv,
1216 installed=True, metadata=metadata,
1217 root_config=root_config, type_name="installed")
1218 self._pkg_cache[pkg] = pkg
1219 self.dbapi.cpv_inject(pkg)
1220 real_dbapi.flush_cache()
1223 portage.locks.unlockdir(vdb_lock)
1224 # Populate the old-style virtuals using the cached values.
1225 if not self.settings.treeVirtuals:
1226 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227 portage.getCPFromCPV, self.get_all_provides())
1229 # Intialize variables needed for lazy cache pulls of the live ebuild
1230 # metadata. This ensures that the vardb lock is released ASAP, without
1231 # being delayed in case cache generation is triggered.
1232 self._aux_get = self.dbapi.aux_get
1233 self.dbapi.aux_get = self._aux_get_wrapper
1234 self._match = self.dbapi.match
1235 self.dbapi.match = self._match_wrapper
1236 self._aux_get_history = set()
1237 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238 self._portdb = portdb
1239 self._global_updates = None
1241 def _match_wrapper(self, cpv, use_cache=1):
1243 Make sure the metadata in Package instances gets updated for any
1244 cpv that is returned from a match() call, since the metadata can
1245 be accessed directly from the Package instance instead of via
1248 matches = self._match(cpv, use_cache=use_cache)
1250 if cpv in self._aux_get_history:
1252 self._aux_get_wrapper(cpv, [])
1255 def _aux_get_wrapper(self, pkg, wants):
1256 if pkg in self._aux_get_history:
1257 return self._aux_get(pkg, wants)
1258 self._aux_get_history.add(pkg)
1260 # Use the live ebuild metadata if possible.
1261 live_metadata = dict(izip(self._portdb_keys,
1262 self._portdb.aux_get(pkg, self._portdb_keys)))
1263 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265 self.dbapi.aux_update(pkg, live_metadata)
1266 except (KeyError, portage.exception.PortageException):
1267 if self._global_updates is None:
1268 self._global_updates = \
1269 grab_global_updates(self._portdb.porttree_root)
1270 perform_global_updates(
1271 pkg, self.dbapi, self._global_updates)
1272 return self._aux_get(pkg, wants)
1274 def sync(self, acquire_lock=1):
1276 Call this method to synchronize state with the real vardb
1277 after one or more packages may have been installed or
1280 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282 # At least the parent needs to exist for the lock file.
1283 portage.util.ensure_dirs(vdb_path)
1284 except portage.exception.PortageException:
1288 if acquire_lock and os.access(vdb_path, os.W_OK):
1289 vdb_lock = portage.locks.lockdir(vdb_path)
1293 portage.locks.unlockdir(vdb_lock)
1297 real_vardb = self._root_config.trees["vartree"].dbapi
1298 current_cpv_set = frozenset(real_vardb.cpv_all())
1299 pkg_vardb = self.dbapi
1300 aux_get_history = self._aux_get_history
1302 # Remove any packages that have been uninstalled.
1303 for pkg in list(pkg_vardb):
1304 if pkg.cpv not in current_cpv_set:
1305 pkg_vardb.cpv_remove(pkg)
1306 aux_get_history.discard(pkg.cpv)
1308 # Validate counters and timestamps.
1311 validation_keys = ["COUNTER", "_mtime_"]
1312 for cpv in current_cpv_set:
1314 pkg_hash_key = ("installed", root, cpv, "nomerge")
1315 pkg = pkg_vardb.get(pkg_hash_key)
1317 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319 counter = long(counter)
1323 if counter != pkg.counter or \
1325 pkg_vardb.cpv_remove(pkg)
1326 aux_get_history.discard(pkg.cpv)
1330 pkg = self._pkg(cpv)
1332 other_counter = slot_counters.get(pkg.slot_atom)
1333 if other_counter is not None:
1334 if other_counter > pkg.counter:
1337 slot_counters[pkg.slot_atom] = pkg.counter
1338 pkg_vardb.cpv_inject(pkg)
1340 real_vardb.flush_cache()
1342 def _pkg(self, cpv):
1343 root_config = self._root_config
1344 real_vardb = root_config.trees["vartree"].dbapi
1345 pkg = Package(cpv=cpv, installed=True,
1346 metadata=izip(self._db_keys,
1347 real_vardb.aux_get(cpv, self._db_keys)),
1348 root_config=root_config,
1349 type_name="installed")
1352 mycounter = long(pkg.metadata["COUNTER"])
1355 pkg.metadata["COUNTER"] = str(mycounter)
1359 def grab_global_updates(portdir):
1360 from portage.update import grab_updates, parse_updates
1361 updpath = os.path.join(portdir, "profiles", "updates")
1363 rawupdates = grab_updates(updpath)
1364 except portage.exception.DirectoryNotFound:
1367 for mykey, mystat, mycontent in rawupdates:
1368 commands, errors = parse_updates(mycontent)
1369 upd_commands.extend(commands)
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373 from portage.update import update_dbentries
1374 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376 updates = update_dbentries(mycommands, aux_dict)
1378 mydb.aux_update(mycpv, updates)
1380 def visible(pkgsettings, pkg):
1382 Check if a package is visible. This can raise an InvalidDependString
1383 exception if LICENSE is invalid.
1384 TODO: optionally generate a list of masking reasons
1386 @returns: True if the package is visible, False otherwise.
1388 if not pkg.metadata["SLOT"]:
1390 if not pkg.installed:
1391 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1393 eapi = pkg.metadata["EAPI"]
1394 if not portage.eapi_is_supported(eapi):
1396 if not pkg.installed:
1397 if portage._eapi_is_deprecated(eapi):
1399 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408 except portage.exception.InvalidDependString:
1412 def get_masking_status(pkg, pkgsettings, root_config):
1414 mreasons = portage.getmaskingstatus(
1415 pkg, settings=pkgsettings,
1416 portdb=root_config.trees["porttree"].dbapi)
1418 if not pkg.installed:
1419 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420 mreasons.append("CHOST: %s" % \
1421 pkg.metadata["CHOST"])
1423 if not pkg.metadata["SLOT"]:
1424 mreasons.append("invalid: SLOT is undefined")
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429 db, pkg_type, built, installed, db_keys):
1432 metadata = dict(izip(db_keys,
1433 db.aux_get(cpv, db_keys)))
1436 if metadata and not built:
1437 pkgsettings.setcpv(cpv, mydb=metadata)
1438 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440 if metadata is None:
1441 mreasons = ["corruption"]
1443 pkg = Package(type_name=pkg_type, root_config=root_config,
1444 cpv=cpv, built=built, installed=installed, metadata=metadata)
1445 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446 return metadata, mreasons
1448 def show_masked_packages(masked_packages):
1449 shown_licenses = set()
1450 shown_comments = set()
1451 # Maybe there is both an ebuild and a binary. Only
1452 # show one of them to avoid redundant appearance.
1454 have_eapi_mask = False
1455 for (root_config, pkgsettings, cpv,
1456 metadata, mreasons) in masked_packages:
1457 if cpv in shown_cpvs:
1460 comment, filename = None, None
1461 if "package.mask" in mreasons:
1462 comment, filename = \
1463 portage.getmaskingreason(
1464 cpv, metadata=metadata,
1465 settings=pkgsettings,
1466 portdb=root_config.trees["porttree"].dbapi,
1467 return_location=True)
1468 missing_licenses = []
1470 if not portage.eapi_is_supported(metadata["EAPI"]):
1471 have_eapi_mask = True
1473 missing_licenses = \
1474 pkgsettings._getMissingLicenses(
1476 except portage.exception.InvalidDependString:
1477 # This will have already been reported
1478 # above via mreasons.
1481 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482 if comment and comment not in shown_comments:
1485 shown_comments.add(comment)
1486 portdb = root_config.trees["porttree"].dbapi
1487 for l in missing_licenses:
1488 l_path = portdb.findLicensePath(l)
1489 if l in shown_licenses:
1491 msg = ("A copy of the '%s' license" + \
1492 " is located at '%s'.") % (l, l_path)
1495 shown_licenses.add(l)
1496 return have_eapi_mask
1498 class Task(SlotObject):
1499 __slots__ = ("_hash_key", "_hash_value")
1501 def _get_hash_key(self):
1502 hash_key = getattr(self, "_hash_key", None)
1503 if hash_key is None:
1504 raise NotImplementedError(self)
1507 def __eq__(self, other):
1508 return self._get_hash_key() == other
1510 def __ne__(self, other):
1511 return self._get_hash_key() != other
1514 hash_value = getattr(self, "_hash_value", None)
1515 if hash_value is None:
1516 self._hash_value = hash(self._get_hash_key())
1517 return self._hash_value
1520 return len(self._get_hash_key())
1522 def __getitem__(self, key):
1523 return self._get_hash_key()[key]
1526 return iter(self._get_hash_key())
1528 def __contains__(self, key):
1529 return key in self._get_hash_key()
1532 return str(self._get_hash_key())
1534 class Blocker(Task):
1536 __hash__ = Task.__hash__
1537 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1539 def __init__(self, **kwargs):
1540 Task.__init__(self, **kwargs)
1541 self.cp = portage.dep_getkey(self.atom)
1543 def _get_hash_key(self):
1544 hash_key = getattr(self, "_hash_key", None)
1545 if hash_key is None:
1547 ("blocks", self.root, self.atom, self.eapi)
1548 return self._hash_key
1550 class Package(Task):
1552 __hash__ = Task.__hash__
1553 __slots__ = ("built", "cpv", "depth",
1554 "installed", "metadata", "onlydeps", "operation",
1555 "root_config", "type_name",
1556 "category", "counter", "cp", "cpv_split",
1557 "inherited", "iuse", "mtime",
1558 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1561 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562 "INHERITED", "IUSE", "KEYWORDS",
1563 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1566 def __init__(self, **kwargs):
1567 Task.__init__(self, **kwargs)
1568 self.root = self.root_config.root
1569 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570 self.cp = portage.cpv_getkey(self.cpv)
1573 # Avoid an InvalidAtom exception when creating slot_atom.
1574 # This package instance will be masked due to empty SLOT.
1576 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1577 self.category, self.pf = portage.catsplit(self.cpv)
1578 self.cpv_split = portage.catpkgsplit(self.cpv)
1579 self.pv_split = self.cpv_split[1:]
1583 __slots__ = ("__weakref__", "enabled")
1585 def __init__(self, use):
1586 self.enabled = frozenset(use)
1588 class _iuse(object):
1590 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1592 def __init__(self, tokens, iuse_implicit):
1593 self.tokens = tuple(tokens)
1594 self.iuse_implicit = iuse_implicit
1601 enabled.append(x[1:])
1603 disabled.append(x[1:])
1606 self.enabled = frozenset(enabled)
1607 self.disabled = frozenset(disabled)
1608 self.all = frozenset(chain(enabled, disabled, other))
1610 def __getattribute__(self, name):
1613 return object.__getattribute__(self, "regex")
1614 except AttributeError:
1615 all = object.__getattribute__(self, "all")
1616 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1617 # Escape anything except ".*" which is supposed
1618 # to pass through from _get_implicit_iuse()
1619 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1620 regex = "^(%s)$" % "|".join(regex)
1621 regex = regex.replace("\\.\\*", ".*")
1622 self.regex = re.compile(regex)
1623 return object.__getattribute__(self, name)
1625 def _get_hash_key(self):
1626 hash_key = getattr(self, "_hash_key", None)
1627 if hash_key is None:
1628 if self.operation is None:
1629 self.operation = "merge"
1630 if self.onlydeps or self.installed:
1631 self.operation = "nomerge"
1633 (self.type_name, self.root, self.cpv, self.operation)
1634 return self._hash_key
1636 def __lt__(self, other):
1637 if other.cp != self.cp:
1639 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1643 def __le__(self, other):
1644 if other.cp != self.cp:
1646 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1650 def __gt__(self, other):
1651 if other.cp != self.cp:
1653 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1657 def __ge__(self, other):
1658 if other.cp != self.cp:
1660 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1664 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1665 if not x.startswith("UNUSED_"))
1666 _all_metadata_keys.discard("CDEPEND")
1667 _all_metadata_keys.update(Package.metadata_keys)
1669 from portage.cache.mappings import slot_dict_class
1670 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1672 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1674 Detect metadata updates and synchronize Package attributes.
1677 __slots__ = ("_pkg",)
1678 _wrapped_keys = frozenset(
1679 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1681 def __init__(self, pkg, metadata):
1682 _PackageMetadataWrapperBase.__init__(self)
1684 self.update(metadata)
1686 def __setitem__(self, k, v):
1687 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1688 if k in self._wrapped_keys:
1689 getattr(self, "_set_" + k.lower())(k, v)
1691 def _set_inherited(self, k, v):
1692 if isinstance(v, basestring):
1693 v = frozenset(v.split())
1694 self._pkg.inherited = v
1696 def _set_iuse(self, k, v):
1697 self._pkg.iuse = self._pkg._iuse(
1698 v.split(), self._pkg.root_config.iuse_implicit)
1700 def _set_slot(self, k, v):
1703 def _set_use(self, k, v):
1704 self._pkg.use = self._pkg._use(v.split())
1706 def _set_counter(self, k, v):
1707 if isinstance(v, basestring):
1712 self._pkg.counter = v
1714 def _set__mtime_(self, k, v):
1715 if isinstance(v, basestring):
1722 class EbuildFetchonly(SlotObject):
1724 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1727 settings = self.settings
1729 portdb = pkg.root_config.trees["porttree"].dbapi
1730 ebuild_path = portdb.findname(pkg.cpv)
1731 settings.setcpv(pkg)
1732 debug = settings.get("PORTAGE_DEBUG") == "1"
1733 use_cache = 1 # always true
1734 portage.doebuild_environment(ebuild_path, "fetch",
1735 settings["ROOT"], settings, debug, use_cache, portdb)
1736 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1739 rval = self._execute_with_builddir()
1741 rval = portage.doebuild(ebuild_path, "fetch",
1742 settings["ROOT"], settings, debug=debug,
1743 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1744 mydbapi=portdb, tree="porttree")
1746 if rval != os.EX_OK:
1747 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1748 eerror(msg, phase="unpack", key=pkg.cpv)
1752 def _execute_with_builddir(self):
1753 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1754 # ensuring sane $PWD (bug #239560) and storing elog
1755 # messages. Use a private temp directory, in order
1756 # to avoid locking the main one.
1757 settings = self.settings
1758 global_tmpdir = settings["PORTAGE_TMPDIR"]
1759 from tempfile import mkdtemp
1761 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1763 if e.errno != portage.exception.PermissionDenied.errno:
1765 raise portage.exception.PermissionDenied(global_tmpdir)
1766 settings["PORTAGE_TMPDIR"] = private_tmpdir
1767 settings.backup_changes("PORTAGE_TMPDIR")
1769 retval = self._execute()
1771 settings["PORTAGE_TMPDIR"] = global_tmpdir
1772 settings.backup_changes("PORTAGE_TMPDIR")
1773 shutil.rmtree(private_tmpdir)
1777 settings = self.settings
1779 root_config = pkg.root_config
1780 portdb = root_config.trees["porttree"].dbapi
1781 ebuild_path = portdb.findname(pkg.cpv)
1782 debug = settings.get("PORTAGE_DEBUG") == "1"
1783 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1785 retval = portage.doebuild(ebuild_path, "fetch",
1786 self.settings["ROOT"], self.settings, debug=debug,
1787 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1788 mydbapi=portdb, tree="porttree")
1790 if retval != os.EX_OK:
1791 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1792 eerror(msg, phase="unpack", key=pkg.cpv)
1794 portage.elog.elog_process(self.pkg.cpv, self.settings)
1797 class PollConstants(object):
1800 Provides POLL* constants that are equivalent to those from the
1801 select module, for use by PollSelectAdapter.
1804 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1807 locals()[k] = getattr(select, k, v)
1811 class AsynchronousTask(SlotObject):
1813 Subclasses override _wait() and _poll() so that calls
1814 to public methods can be wrapped for implementing
1815 hooks such as exit listener notification.
1817 Sublasses should call self.wait() to notify exit listeners after
1818 the task is complete and self.returncode has been set.
1821 __slots__ = ("background", "cancelled", "returncode") + \
1822 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1826 Start an asynchronous task and then return as soon as possible.
1832 raise NotImplementedError(self)
1835 return self.returncode is None
1842 return self.returncode
1845 if self.returncode is None:
1848 return self.returncode
1851 return self.returncode
1854 self.cancelled = True
1857 def addStartListener(self, f):
1859 The function will be called with one argument, a reference to self.
1861 if self._start_listeners is None:
1862 self._start_listeners = []
1863 self._start_listeners.append(f)
1865 def removeStartListener(self, f):
1866 if self._start_listeners is None:
1868 self._start_listeners.remove(f)
1870 def _start_hook(self):
1871 if self._start_listeners is not None:
1872 start_listeners = self._start_listeners
1873 self._start_listeners = None
1875 for f in start_listeners:
1878 def addExitListener(self, f):
1880 The function will be called with one argument, a reference to self.
1882 if self._exit_listeners is None:
1883 self._exit_listeners = []
1884 self._exit_listeners.append(f)
1886 def removeExitListener(self, f):
1887 if self._exit_listeners is None:
1888 if self._exit_listener_stack is not None:
1889 self._exit_listener_stack.remove(f)
1891 self._exit_listeners.remove(f)
1893 def _wait_hook(self):
1895 Call this method after the task completes, just before returning
1896 the returncode from wait() or poll(). This hook is
1897 used to trigger exit listeners when the returncode first
1900 if self.returncode is not None and \
1901 self._exit_listeners is not None:
1903 # This prevents recursion, in case one of the
1904 # exit handlers triggers this method again by
1905 # calling wait(). Use a stack that gives
1906 # removeExitListener() an opportunity to consume
1907 # listeners from the stack, before they can get
1908 # called below. This is necessary because a call
1909 # to one exit listener may result in a call to
1910 # removeExitListener() for another listener on
1911 # the stack. That listener needs to be removed
1912 # from the stack since it would be inconsistent
1913 # to call it after it has been been passed into
1914 # removeExitListener().
1915 self._exit_listener_stack = self._exit_listeners
1916 self._exit_listeners = None
1918 self._exit_listener_stack.reverse()
1919 while self._exit_listener_stack:
1920 self._exit_listener_stack.pop()(self)
1922 class AbstractPollTask(AsynchronousTask):
1924 __slots__ = ("scheduler",) + \
1928 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1929 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1932 def _unregister(self):
1933 raise NotImplementedError(self)
1935 def _unregister_if_appropriate(self, event):
1936 if self._registered:
1937 if event & self._exceptional_events:
1940 elif event & PollConstants.POLLHUP:
1944 class PipeReader(AbstractPollTask):
1947 Reads output from one or more files and saves it in memory,
1948 for retrieval via the getvalue() method. This is driven by
1949 the scheduler's poll() loop, so it runs entirely within the
1953 __slots__ = ("input_files",) + \
1954 ("_read_data", "_reg_ids")
1957 self._reg_ids = set()
1958 self._read_data = []
1959 for k, f in self.input_files.iteritems():
1960 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1961 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1962 self._reg_ids.add(self.scheduler.register(f.fileno(),
1963 self._registered_events, self._output_handler))
1964 self._registered = True
1967 return self._registered
1970 if self.returncode is None:
1972 self.cancelled = True
1976 if self.returncode is not None:
1977 return self.returncode
1979 if self._registered:
1980 self.scheduler.schedule(self._reg_ids)
1983 self.returncode = os.EX_OK
1984 return self.returncode
1987 """Retrieve the entire contents"""
1988 if sys.hexversion >= 0x3000000:
1989 return bytes().join(self._read_data)
1990 return "".join(self._read_data)
1993 """Free the memory buffer."""
1994 self._read_data = None
1996 def _output_handler(self, fd, event):
1998 if event & PollConstants.POLLIN:
2000 for f in self.input_files.itervalues():
2001 if fd == f.fileno():
2004 buf = array.array('B')
2006 buf.fromfile(f, self._bufsize)
2011 self._read_data.append(buf.tostring())
2016 self._unregister_if_appropriate(event)
2017 return self._registered
2019 def _unregister(self):
2021 Unregister from the scheduler and close open files.
2024 self._registered = False
2026 if self._reg_ids is not None:
2027 for reg_id in self._reg_ids:
2028 self.scheduler.unregister(reg_id)
2029 self._reg_ids = None
2031 if self.input_files is not None:
2032 for f in self.input_files.itervalues():
2034 self.input_files = None
2036 class CompositeTask(AsynchronousTask):
2038 __slots__ = ("scheduler",) + ("_current_task",)
2041 return self._current_task is not None
2044 self.cancelled = True
2045 if self._current_task is not None:
2046 self._current_task.cancel()
2050 This does a loop calling self._current_task.poll()
2051 repeatedly as long as the value of self._current_task
2052 keeps changing. It calls poll() a maximum of one time
2053 for a given self._current_task instance. This is useful
2054 since calling poll() on a task can trigger advance to
2055 the next task could eventually lead to the returncode
2056 being set in cases when polling only a single task would
2057 not have the same effect.
2062 task = self._current_task
2063 if task is None or task is prev:
2064 # don't poll the same task more than once
2069 return self.returncode
2075 task = self._current_task
2077 # don't wait for the same task more than once
2080 # Before the task.wait() method returned, an exit
2081 # listener should have set self._current_task to either
2082 # a different task or None. Something is wrong.
2083 raise AssertionError("self._current_task has not " + \
2084 "changed since calling wait", self, task)
2088 return self.returncode
2090 def _assert_current(self, task):
2092 Raises an AssertionError if the given task is not the
2093 same one as self._current_task. This can be useful
2096 if task is not self._current_task:
2097 raise AssertionError("Unrecognized task: %s" % (task,))
2099 def _default_exit(self, task):
2101 Calls _assert_current() on the given task and then sets the
2102 composite returncode attribute if task.returncode != os.EX_OK.
2103 If the task failed then self._current_task will be set to None.
2104 Subclasses can use this as a generic task exit callback.
2107 @returns: The task.returncode attribute.
2109 self._assert_current(task)
2110 if task.returncode != os.EX_OK:
2111 self.returncode = task.returncode
2112 self._current_task = None
2113 return task.returncode
2115 def _final_exit(self, task):
2117 Assumes that task is the final task of this composite task.
2118 Calls _default_exit() and sets self.returncode to the task's
2119 returncode and sets self._current_task to None.
2121 self._default_exit(task)
2122 self._current_task = None
2123 self.returncode = task.returncode
2124 return self.returncode
2126 def _default_final_exit(self, task):
2128 This calls _final_exit() and then wait().
2130 Subclasses can use this as a generic final task exit callback.
2133 self._final_exit(task)
2136 def _start_task(self, task, exit_handler):
2138 Register exit handler for the given task, set it
2139 as self._current_task, and call task.start().
2141 Subclasses can use this as a generic way to start
2145 task.addExitListener(exit_handler)
2146 self._current_task = task
2149 class TaskSequence(CompositeTask):
2151 A collection of tasks that executes sequentially. Each task
2152 must have a addExitListener() method that can be used as
2153 a means to trigger movement from one task to the next.
2156 __slots__ = ("_task_queue",)
2158 def __init__(self, **kwargs):
2159 AsynchronousTask.__init__(self, **kwargs)
2160 self._task_queue = deque()
2162 def add(self, task):
2163 self._task_queue.append(task)
2166 self._start_next_task()
2169 self._task_queue.clear()
2170 CompositeTask.cancel(self)
2172 def _start_next_task(self):
2173 self._start_task(self._task_queue.popleft(),
2174 self._task_exit_handler)
2176 def _task_exit_handler(self, task):
2177 if self._default_exit(task) != os.EX_OK:
2179 elif self._task_queue:
2180 self._start_next_task()
2182 self._final_exit(task)
2185 class SubProcess(AbstractPollTask):
2187 __slots__ = ("pid",) + \
2188 ("_files", "_reg_id")
2190 # A file descriptor is required for the scheduler to monitor changes from
2191 # inside a poll() loop. When logging is not enabled, create a pipe just to
2192 # serve this purpose alone.
2196 if self.returncode is not None:
2197 return self.returncode
2198 if self.pid is None:
2199 return self.returncode
2200 if self._registered:
2201 return self.returncode
2204 retval = os.waitpid(self.pid, os.WNOHANG)
2206 if e.errno != errno.ECHILD:
2209 retval = (self.pid, 1)
2211 if retval == (0, 0):
2213 self._set_returncode(retval)
2214 return self.returncode
2219 os.kill(self.pid, signal.SIGTERM)
2221 if e.errno != errno.ESRCH:
2225 self.cancelled = True
2226 if self.pid is not None:
2228 return self.returncode
2231 return self.pid is not None and \
2232 self.returncode is None
2236 if self.returncode is not None:
2237 return self.returncode
2239 if self._registered:
2240 self.scheduler.schedule(self._reg_id)
2242 if self.returncode is not None:
2243 return self.returncode
2246 wait_retval = os.waitpid(self.pid, 0)
2248 if e.errno != errno.ECHILD:
2251 self._set_returncode((self.pid, 1))
2253 self._set_returncode(wait_retval)
2255 return self.returncode
2257 def _unregister(self):
2259 Unregister from the scheduler and close open files.
2262 self._registered = False
2264 if self._reg_id is not None:
2265 self.scheduler.unregister(self._reg_id)
2268 if self._files is not None:
2269 for f in self._files.itervalues():
2273 def _set_returncode(self, wait_retval):
2275 retval = wait_retval[1]
2277 if retval != os.EX_OK:
2279 retval = (retval & 0xff) << 8
2281 retval = retval >> 8
2283 self.returncode = retval
2285 class SpawnProcess(SubProcess):
2288 Constructor keyword args are passed into portage.process.spawn().
2289 The required "args" keyword argument will be passed as the first
2293 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2294 "uid", "gid", "groups", "umask", "logfile",
2295 "path_lookup", "pre_exec")
2297 __slots__ = ("args",) + \
2300 _file_names = ("log", "process", "stdout")
2301 _files_dict = slot_dict_class(_file_names, prefix="")
2308 if self.fd_pipes is None:
2310 fd_pipes = self.fd_pipes
2311 fd_pipes.setdefault(0, sys.stdin.fileno())
2312 fd_pipes.setdefault(1, sys.stdout.fileno())
2313 fd_pipes.setdefault(2, sys.stderr.fileno())
2315 # flush any pending output
2316 for fd in fd_pipes.itervalues():
2317 if fd == sys.stdout.fileno():
2319 if fd == sys.stderr.fileno():
2322 logfile = self.logfile
2323 self._files = self._files_dict()
2326 master_fd, slave_fd = self._pipe(fd_pipes)
2327 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2328 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2331 fd_pipes_orig = fd_pipes.copy()
2333 # TODO: Use job control functions like tcsetpgrp() to control
2334 # access to stdin. Until then, use /dev/null so that any
2335 # attempts to read from stdin will immediately return EOF
2336 # instead of blocking indefinitely.
2337 null_input = open('/dev/null', 'rb')
2338 fd_pipes[0] = null_input.fileno()
2340 fd_pipes[0] = fd_pipes_orig[0]
2342 files.process = os.fdopen(master_fd, 'rb')
2343 if logfile is not None:
2345 fd_pipes[1] = slave_fd
2346 fd_pipes[2] = slave_fd
2348 files.log = open(logfile, mode='ab')
2349 portage.util.apply_secpass_permissions(logfile,
2350 uid=portage.portage_uid, gid=portage.portage_gid,
2353 if not self.background:
2354 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2356 output_handler = self._output_handler
2360 # Create a dummy pipe so the scheduler can monitor
2361 # the process from inside a poll() loop.
2362 fd_pipes[self._dummy_pipe_fd] = slave_fd
2364 fd_pipes[1] = slave_fd
2365 fd_pipes[2] = slave_fd
2366 output_handler = self._dummy_handler
2369 for k in self._spawn_kwarg_names:
2370 v = getattr(self, k)
2374 kwargs["fd_pipes"] = fd_pipes
2375 kwargs["returnpid"] = True
2376 kwargs.pop("logfile", None)
2378 self._reg_id = self.scheduler.register(files.process.fileno(),
2379 self._registered_events, output_handler)
2380 self._registered = True
2382 retval = self._spawn(self.args, **kwargs)
2385 if null_input is not None:
2388 if isinstance(retval, int):
2391 self.returncode = retval
2395 self.pid = retval[0]
2396 portage.process.spawned_pids.remove(self.pid)
2398 def _pipe(self, fd_pipes):
2400 @type fd_pipes: dict
2401 @param fd_pipes: pipes from which to copy terminal size if desired.
2405 def _spawn(self, args, **kwargs):
2406 return portage.process.spawn(args, **kwargs)
2408 def _output_handler(self, fd, event):
2410 if event & PollConstants.POLLIN:
2413 buf = array.array('B')
2415 buf.fromfile(files.process, self._bufsize)
2420 if not self.background:
2421 buf.tofile(files.stdout)
2422 files.stdout.flush()
2423 buf.tofile(files.log)
2429 self._unregister_if_appropriate(event)
2430 return self._registered
2432 def _dummy_handler(self, fd, event):
2434 This method is mainly interested in detecting EOF, since
2435 the only purpose of the pipe is to allow the scheduler to
2436 monitor the process from inside a poll() loop.
2439 if event & PollConstants.POLLIN:
2441 buf = array.array('B')
2443 buf.fromfile(self._files.process, self._bufsize)
2453 self._unregister_if_appropriate(event)
2454 return self._registered
2456 class MiscFunctionsProcess(SpawnProcess):
2458 Spawns misc-functions.sh with an existing ebuild environment.
2461 __slots__ = ("commands", "phase", "pkg", "settings")
2464 settings = self.settings
2465 settings.pop("EBUILD_PHASE", None)
2466 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2467 misc_sh_binary = os.path.join(portage_bin_path,
2468 os.path.basename(portage.const.MISC_SH_BINARY))
2470 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2471 self.logfile = settings.get("PORTAGE_LOG_FILE")
2473 portage._doebuild_exit_status_unlink(
2474 settings.get("EBUILD_EXIT_STATUS_FILE"))
2476 SpawnProcess._start(self)
2478 def _spawn(self, args, **kwargs):
2479 settings = self.settings
2480 debug = settings.get("PORTAGE_DEBUG") == "1"
2481 return portage.spawn(" ".join(args), settings,
2482 debug=debug, **kwargs)
2484 def _set_returncode(self, wait_retval):
2485 SpawnProcess._set_returncode(self, wait_retval)
2486 self.returncode = portage._doebuild_exit_status_check_and_log(
2487 self.settings, self.phase, self.returncode)
2489 class EbuildFetcher(SpawnProcess):
2491 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2496 root_config = self.pkg.root_config
2497 portdb = root_config.trees["porttree"].dbapi
2498 ebuild_path = portdb.findname(self.pkg.cpv)
2499 settings = self.config_pool.allocate()
2500 settings.setcpv(self.pkg)
2502 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2503 # should not be touched since otherwise it could interfere with
2504 # another instance of the same cpv concurrently being built for a
2505 # different $ROOT (currently, builds only cooperate with prefetchers
2506 # that are spawned for the same $ROOT).
2507 if not self.prefetch:
2508 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2509 self._build_dir.lock()
2510 self._build_dir.clean()
2511 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2512 if self.logfile is None:
2513 self.logfile = settings.get("PORTAGE_LOG_FILE")
2519 # If any incremental variables have been overridden
2520 # via the environment, those values need to be passed
2521 # along here so that they are correctly considered by
2522 # the config instance in the subproccess.
2523 fetch_env = os.environ.copy()
2525 nocolor = settings.get("NOCOLOR")
2526 if nocolor is not None:
2527 fetch_env["NOCOLOR"] = nocolor
2529 fetch_env["PORTAGE_NICENESS"] = "0"
2531 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2533 ebuild_binary = os.path.join(
2534 settings["PORTAGE_BIN_PATH"], "ebuild")
2536 fetch_args = [ebuild_binary, ebuild_path, phase]
2537 debug = settings.get("PORTAGE_DEBUG") == "1"
2539 fetch_args.append("--debug")
2541 self.args = fetch_args
2542 self.env = fetch_env
2543 SpawnProcess._start(self)
2545 def _pipe(self, fd_pipes):
2546 """When appropriate, use a pty so that fetcher progress bars,
2547 like wget has, will work properly."""
2548 if self.background or not sys.stdout.isatty():
2549 # When the output only goes to a log file,
2550 # there's no point in creating a pty.
2552 stdout_pipe = fd_pipes.get(1)
2553 got_pty, master_fd, slave_fd = \
2554 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2555 return (master_fd, slave_fd)
2557 def _set_returncode(self, wait_retval):
2558 SpawnProcess._set_returncode(self, wait_retval)
2559 # Collect elog messages that might have been
2560 # created by the pkg_nofetch phase.
2561 if self._build_dir is not None:
2562 # Skip elog messages for prefetch, in order to avoid duplicates.
2563 if not self.prefetch and self.returncode != os.EX_OK:
2565 if self.logfile is not None:
2567 elog_out = open(self.logfile, 'a')
2568 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2569 if self.logfile is not None:
2570 msg += ", Log file:"
2571 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2572 if self.logfile is not None:
2573 eerror(" '%s'" % (self.logfile,),
2574 phase="unpack", key=self.pkg.cpv, out=elog_out)
2575 if elog_out is not None:
2577 if not self.prefetch:
2578 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2579 features = self._build_dir.settings.features
2580 if self.returncode == os.EX_OK:
2581 self._build_dir.clean()
2582 self._build_dir.unlock()
2583 self.config_pool.deallocate(self._build_dir.settings)
2584 self._build_dir = None
2586 class EbuildBuildDir(SlotObject):
2588 __slots__ = ("dir_path", "pkg", "settings",
2589 "locked", "_catdir", "_lock_obj")
2591 def __init__(self, **kwargs):
2592 SlotObject.__init__(self, **kwargs)
2597 This raises an AlreadyLocked exception if lock() is called
2598 while a lock is already held. In order to avoid this, call
2599 unlock() or check whether the "locked" attribute is True
2600 or False before calling lock().
2602 if self._lock_obj is not None:
2603 raise self.AlreadyLocked((self._lock_obj,))
2605 dir_path = self.dir_path
2606 if dir_path is None:
2607 root_config = self.pkg.root_config
2608 portdb = root_config.trees["porttree"].dbapi
2609 ebuild_path = portdb.findname(self.pkg.cpv)
2610 settings = self.settings
2611 settings.setcpv(self.pkg)
2612 debug = settings.get("PORTAGE_DEBUG") == "1"
2613 use_cache = 1 # always true
2614 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2615 self.settings, debug, use_cache, portdb)
2616 dir_path = self.settings["PORTAGE_BUILDDIR"]
2618 catdir = os.path.dirname(dir_path)
2619 self._catdir = catdir
2621 portage.util.ensure_dirs(os.path.dirname(catdir),
2622 gid=portage.portage_gid,
2626 catdir_lock = portage.locks.lockdir(catdir)
2627 portage.util.ensure_dirs(catdir,
2628 gid=portage.portage_gid,
2630 self._lock_obj = portage.locks.lockdir(dir_path)
2632 self.locked = self._lock_obj is not None
2633 if catdir_lock is not None:
2634 portage.locks.unlockdir(catdir_lock)
2637 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2638 by keepwork or keeptemp in FEATURES."""
2639 settings = self.settings
2640 features = settings.features
2641 if not ("keepwork" in features or "keeptemp" in features):
2643 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2644 except EnvironmentError, e:
2645 if e.errno != errno.ENOENT:
2650 if self._lock_obj is None:
2653 portage.locks.unlockdir(self._lock_obj)
2654 self._lock_obj = None
2657 catdir = self._catdir
2660 catdir_lock = portage.locks.lockdir(catdir)
2666 if e.errno not in (errno.ENOENT,
2667 errno.ENOTEMPTY, errno.EEXIST):
2670 portage.locks.unlockdir(catdir_lock)
2672 class AlreadyLocked(portage.exception.PortageException):
2675 class EbuildBuild(CompositeTask):
2677 __slots__ = ("args_set", "config_pool", "find_blockers",
2678 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2679 "prefetcher", "settings", "world_atom") + \
2680 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2684 logger = self.logger
2687 settings = self.settings
2688 world_atom = self.world_atom
2689 root_config = pkg.root_config
2692 portdb = root_config.trees[tree].dbapi
2693 settings.setcpv(pkg)
2694 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2695 ebuild_path = portdb.findname(self.pkg.cpv)
2696 self._ebuild_path = ebuild_path
2698 prefetcher = self.prefetcher
2699 if prefetcher is None:
2701 elif not prefetcher.isAlive():
2703 elif prefetcher.poll() is None:
2705 waiting_msg = "Fetching files " + \
2706 "in the background. " + \
2707 "To view fetch progress, run `tail -f " + \
2708 "/var/log/emerge-fetch.log` in another " + \
2710 msg_prefix = colorize("GOOD", " * ")
2711 from textwrap import wrap
2712 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2713 for line in wrap(waiting_msg, 65))
2714 if not self.background:
2715 writemsg(waiting_msg, noiselevel=-1)
2717 self._current_task = prefetcher
2718 prefetcher.addExitListener(self._prefetch_exit)
2721 self._prefetch_exit(prefetcher)
2723 def _prefetch_exit(self, prefetcher):
2727 settings = self.settings
2730 fetcher = EbuildFetchonly(
2731 fetch_all=opts.fetch_all_uri,
2732 pkg=pkg, pretend=opts.pretend,
2734 retval = fetcher.execute()
2735 self.returncode = retval
2739 fetcher = EbuildFetcher(config_pool=self.config_pool,
2740 fetchall=opts.fetch_all_uri,
2741 fetchonly=opts.fetchonly,
2742 background=self.background,
2743 pkg=pkg, scheduler=self.scheduler)
2745 self._start_task(fetcher, self._fetch_exit)
2747 def _fetch_exit(self, fetcher):
2751 fetch_failed = False
2753 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2755 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2757 if fetch_failed and fetcher.logfile is not None and \
2758 os.path.exists(fetcher.logfile):
2759 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2761 if not fetch_failed and fetcher.logfile is not None:
2762 # Fetch was successful, so remove the fetch log.
2764 os.unlink(fetcher.logfile)
2768 if fetch_failed or opts.fetchonly:
2772 logger = self.logger
2774 pkg_count = self.pkg_count
2775 scheduler = self.scheduler
2776 settings = self.settings
2777 features = settings.features
2778 ebuild_path = self._ebuild_path
2779 system_set = pkg.root_config.sets["system"]
2781 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2782 self._build_dir.lock()
2784 # Cleaning is triggered before the setup
2785 # phase, in portage.doebuild().
2786 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2787 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2788 short_msg = "emerge: (%s of %s) %s Clean" % \
2789 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2790 logger.log(msg, short_msg=short_msg)
2792 #buildsyspkg: Check if we need to _force_ binary package creation
2793 self._issyspkg = "buildsyspkg" in features and \
2794 system_set.findAtomForPackage(pkg) and \
2797 if opts.buildpkg or self._issyspkg:
2799 self._buildpkg = True
2801 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2802 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2803 short_msg = "emerge: (%s of %s) %s Compile" % \
2804 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2805 logger.log(msg, short_msg=short_msg)
2808 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2809 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2810 short_msg = "emerge: (%s of %s) %s Compile" % \
2811 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2812 logger.log(msg, short_msg=short_msg)
2814 build = EbuildExecuter(background=self.background, pkg=pkg,
2815 scheduler=scheduler, settings=settings)
2816 self._start_task(build, self._build_exit)
2818 def _unlock_builddir(self):
2819 portage.elog.elog_process(self.pkg.cpv, self.settings)
2820 self._build_dir.unlock()
2822 def _build_exit(self, build):
2823 if self._default_exit(build) != os.EX_OK:
2824 self._unlock_builddir()
2829 buildpkg = self._buildpkg
2832 self._final_exit(build)
2837 msg = ">>> This is a system package, " + \
2838 "let's pack a rescue tarball.\n"
2840 log_path = self.settings.get("PORTAGE_LOG_FILE")
2841 if log_path is not None:
2842 log_file = open(log_path, 'a')
2848 if not self.background:
2849 portage.writemsg_stdout(msg, noiselevel=-1)
2851 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2852 scheduler=self.scheduler, settings=self.settings)
2854 self._start_task(packager, self._buildpkg_exit)
2856 def _buildpkg_exit(self, packager):
2858 Released build dir lock when there is a failure or
2859 when in buildpkgonly mode. Otherwise, the lock will
2860 be released when merge() is called.
2863 if self._default_exit(packager) != os.EX_OK:
2864 self._unlock_builddir()
2868 if self.opts.buildpkgonly:
2869 # Need to call "clean" phase for buildpkgonly mode
2870 portage.elog.elog_process(self.pkg.cpv, self.settings)
2872 clean_phase = EbuildPhase(background=self.background,
2873 pkg=self.pkg, phase=phase,
2874 scheduler=self.scheduler, settings=self.settings,
2876 self._start_task(clean_phase, self._clean_exit)
2879 # Continue holding the builddir lock until
2880 # after the package has been installed.
2881 self._current_task = None
2882 self.returncode = packager.returncode
2885 def _clean_exit(self, clean_phase):
2886 if self._final_exit(clean_phase) != os.EX_OK or \
2887 self.opts.buildpkgonly:
2888 self._unlock_builddir()
2893 Install the package and then clean up and release locks.
2894 Only call this after the build has completed successfully
2895 and neither fetchonly nor buildpkgonly mode are enabled.
2898 find_blockers = self.find_blockers
2899 ldpath_mtimes = self.ldpath_mtimes
2900 logger = self.logger
2902 pkg_count = self.pkg_count
2903 settings = self.settings
2904 world_atom = self.world_atom
2905 ebuild_path = self._ebuild_path
2908 merge = EbuildMerge(find_blockers=self.find_blockers,
2909 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2910 pkg_count=pkg_count, pkg_path=ebuild_path,
2911 scheduler=self.scheduler,
2912 settings=settings, tree=tree, world_atom=world_atom)
2914 msg = " === (%s of %s) Merging (%s::%s)" % \
2915 (pkg_count.curval, pkg_count.maxval,
2916 pkg.cpv, ebuild_path)
2917 short_msg = "emerge: (%s of %s) %s Merge" % \
2918 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2919 logger.log(msg, short_msg=short_msg)
2922 rval = merge.execute()
2924 self._unlock_builddir()
2928 class EbuildExecuter(CompositeTask):
2930 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2932 _phases = ("prepare", "configure", "compile", "test", "install")
2934 _live_eclasses = frozenset([
2944 self._tree = "porttree"
2947 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2948 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2949 self._start_task(clean_phase, self._clean_phase_exit)
2951 def _clean_phase_exit(self, clean_phase):
2953 if self._default_exit(clean_phase) != os.EX_OK:
2958 scheduler = self.scheduler
2959 settings = self.settings
2962 # This initializes PORTAGE_LOG_FILE.
2963 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2965 setup_phase = EbuildPhase(background=self.background,
2966 pkg=pkg, phase="setup", scheduler=scheduler,
2967 settings=settings, tree=self._tree)
2969 setup_phase.addExitListener(self._setup_exit)
2970 self._current_task = setup_phase
2971 self.scheduler.scheduleSetup(setup_phase)
2973 def _setup_exit(self, setup_phase):
2975 if self._default_exit(setup_phase) != os.EX_OK:
2979 unpack_phase = EbuildPhase(background=self.background,
2980 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2981 settings=self.settings, tree=self._tree)
2983 if self._live_eclasses.intersection(self.pkg.inherited):
2984 # Serialize $DISTDIR access for live ebuilds since
2985 # otherwise they can interfere with eachother.
2987 unpack_phase.addExitListener(self._unpack_exit)
2988 self._current_task = unpack_phase
2989 self.scheduler.scheduleUnpack(unpack_phase)
2992 self._start_task(unpack_phase, self._unpack_exit)
2994 def _unpack_exit(self, unpack_phase):
2996 if self._default_exit(unpack_phase) != os.EX_OK:
3000 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3003 phases = self._phases
3004 eapi = pkg.metadata["EAPI"]
3005 if eapi in ("0", "1"):
3006 # skip src_prepare and src_configure
3009 for phase in phases:
3010 ebuild_phases.add(EbuildPhase(background=self.background,
3011 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3012 settings=self.settings, tree=self._tree))
3014 self._start_task(ebuild_phases, self._default_final_exit)
3016 class EbuildMetadataPhase(SubProcess):
3019 Asynchronous interface for the ebuild "depend" phase which is
3020 used to extract metadata from the ebuild.
3023 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3024 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3027 _file_names = ("ebuild",)
3028 _files_dict = slot_dict_class(_file_names, prefix="")
3032 settings = self.settings
3034 ebuild_path = self.ebuild_path
3035 debug = settings.get("PORTAGE_DEBUG") == "1"
3039 if self.fd_pipes is not None:
3040 fd_pipes = self.fd_pipes.copy()
3044 fd_pipes.setdefault(0, sys.stdin.fileno())
3045 fd_pipes.setdefault(1, sys.stdout.fileno())
3046 fd_pipes.setdefault(2, sys.stderr.fileno())
3048 # flush any pending output
3049 for fd in fd_pipes.itervalues():
3050 if fd == sys.stdout.fileno():
3052 if fd == sys.stderr.fileno():
3055 fd_pipes_orig = fd_pipes.copy()
3056 self._files = self._files_dict()
3059 master_fd, slave_fd = os.pipe()
3060 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3061 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3063 fd_pipes[self._metadata_fd] = slave_fd
3065 self._raw_metadata = []
3066 files.ebuild = os.fdopen(master_fd, 'r')
3067 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3068 self._registered_events, self._output_handler)
3069 self._registered = True
3071 retval = portage.doebuild(ebuild_path, "depend",
3072 settings["ROOT"], settings, debug,
3073 mydbapi=self.portdb, tree="porttree",
3074 fd_pipes=fd_pipes, returnpid=True)
3078 if isinstance(retval, int):
3079 # doebuild failed before spawning
3081 self.returncode = retval
3085 self.pid = retval[0]
3086 portage.process.spawned_pids.remove(self.pid)
3088 def _output_handler(self, fd, event):
3090 if event & PollConstants.POLLIN:
3091 self._raw_metadata.append(self._files.ebuild.read())
3092 if not self._raw_metadata[-1]:
3096 self._unregister_if_appropriate(event)
3097 return self._registered
3099 def _set_returncode(self, wait_retval):
3100 SubProcess._set_returncode(self, wait_retval)
3101 if self.returncode == os.EX_OK:
3102 metadata_lines = "".join(self._raw_metadata).splitlines()
3103 if len(portage.auxdbkeys) != len(metadata_lines):
3104 # Don't trust bash's returncode if the
3105 # number of lines is incorrect.
3108 metadata = izip(portage.auxdbkeys, metadata_lines)
3109 self.metadata_callback(self.cpv, self.ebuild_path,
3110 self.repo_path, metadata, self.ebuild_mtime)
3112 class EbuildProcess(SpawnProcess):
3114 __slots__ = ("phase", "pkg", "settings", "tree")
3117 # Don't open the log file during the clean phase since the
3118 # open file can result in an nfs lock on $T/build.log which
3119 # prevents the clean phase from removing $T.
3120 if self.phase not in ("clean", "cleanrm"):
3121 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3122 SpawnProcess._start(self)
3124 def _pipe(self, fd_pipes):
3125 stdout_pipe = fd_pipes.get(1)
3126 got_pty, master_fd, slave_fd = \
3127 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3128 return (master_fd, slave_fd)
3130 def _spawn(self, args, **kwargs):
3132 root_config = self.pkg.root_config
3134 mydbapi = root_config.trees[tree].dbapi
3135 settings = self.settings
3136 ebuild_path = settings["EBUILD"]
3137 debug = settings.get("PORTAGE_DEBUG") == "1"
3139 rval = portage.doebuild(ebuild_path, self.phase,
3140 root_config.root, settings, debug,
3141 mydbapi=mydbapi, tree=tree, **kwargs)
3145 def _set_returncode(self, wait_retval):
3146 SpawnProcess._set_returncode(self, wait_retval)
3148 if self.phase not in ("clean", "cleanrm"):
3149 self.returncode = portage._doebuild_exit_status_check_and_log(
3150 self.settings, self.phase, self.returncode)
3152 if self.phase == "test" and self.returncode != os.EX_OK and \
3153 "test-fail-continue" in self.settings.features:
3154 self.returncode = os.EX_OK
3156 portage._post_phase_userpriv_perms(self.settings)
3158 class EbuildPhase(CompositeTask):
3160 __slots__ = ("background", "pkg", "phase",
3161 "scheduler", "settings", "tree")
3163 _post_phase_cmds = portage._post_phase_cmds
3167 ebuild_process = EbuildProcess(background=self.background,
3168 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3169 settings=self.settings, tree=self.tree)
3171 self._start_task(ebuild_process, self._ebuild_exit)
3173 def _ebuild_exit(self, ebuild_process):
3175 if self.phase == "install":
3177 log_path = self.settings.get("PORTAGE_LOG_FILE")
3179 if self.background and log_path is not None:
3180 log_file = open(log_path, 'a')
3183 portage._check_build_log(self.settings, out=out)
3185 if log_file is not None:
3188 if self._default_exit(ebuild_process) != os.EX_OK:
3192 settings = self.settings
3194 if self.phase == "install":
3195 portage._post_src_install_uid_fix(settings)
3197 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3198 if post_phase_cmds is not None:
3199 post_phase = MiscFunctionsProcess(background=self.background,
3200 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3201 scheduler=self.scheduler, settings=settings)
3202 self._start_task(post_phase, self._post_phase_exit)
3205 self.returncode = ebuild_process.returncode
3206 self._current_task = None
3209 def _post_phase_exit(self, post_phase):
3210 if self._final_exit(post_phase) != os.EX_OK:
3211 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3213 self._current_task = None
3217 class EbuildBinpkg(EbuildProcess):
3219 This assumes that src_install() has successfully completed.
3221 __slots__ = ("_binpkg_tmpfile",)
3224 self.phase = "package"
3225 self.tree = "porttree"
3227 root_config = pkg.root_config
3228 portdb = root_config.trees["porttree"].dbapi
3229 bintree = root_config.trees["bintree"]
3230 ebuild_path = portdb.findname(self.pkg.cpv)
3231 settings = self.settings
3232 debug = settings.get("PORTAGE_DEBUG") == "1"
3234 bintree.prevent_collision(pkg.cpv)
3235 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3236 pkg.cpv + ".tbz2." + str(os.getpid()))
3237 self._binpkg_tmpfile = binpkg_tmpfile
3238 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3239 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3242 EbuildProcess._start(self)
3244 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3246 def _set_returncode(self, wait_retval):
3247 EbuildProcess._set_returncode(self, wait_retval)
3250 bintree = pkg.root_config.trees["bintree"]
3251 binpkg_tmpfile = self._binpkg_tmpfile
3252 if self.returncode == os.EX_OK:
3253 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3255 class EbuildMerge(SlotObject):
3257 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3258 "pkg", "pkg_count", "pkg_path", "pretend",
3259 "scheduler", "settings", "tree", "world_atom")
3262 root_config = self.pkg.root_config
3263 settings = self.settings
3264 retval = portage.merge(settings["CATEGORY"],
3265 settings["PF"], settings["D"],
3266 os.path.join(settings["PORTAGE_BUILDDIR"],
3267 "build-info"), root_config.root, settings,
3268 myebuild=settings["EBUILD"],
3269 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3270 vartree=root_config.trees["vartree"],
3271 prev_mtimes=self.ldpath_mtimes,
3272 scheduler=self.scheduler,
3273 blockers=self.find_blockers)
3275 if retval == os.EX_OK:
3276 self.world_atom(self.pkg)
3281 def _log_success(self):
3283 pkg_count = self.pkg_count
3284 pkg_path = self.pkg_path
3285 logger = self.logger
3286 if "noclean" not in self.settings.features:
3287 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3288 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3289 logger.log((" === (%s of %s) " + \
3290 "Post-Build Cleaning (%s::%s)") % \
3291 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3292 short_msg=short_msg)
3293 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3294 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3296 class PackageUninstall(AsynchronousTask):
3298 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3302 unmerge(self.pkg.root_config, self.opts, "unmerge",
3303 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3304 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3305 writemsg_level=self._writemsg_level)
3306 except UninstallFailure, e:
3307 self.returncode = e.status
3309 self.returncode = os.EX_OK
3312 def _writemsg_level(self, msg, level=0, noiselevel=0):
3314 log_path = self.settings.get("PORTAGE_LOG_FILE")
3315 background = self.background
3317 if log_path is None:
3318 if not (background and level < logging.WARNING):
3319 portage.util.writemsg_level(msg,
3320 level=level, noiselevel=noiselevel)
3323 portage.util.writemsg_level(msg,
3324 level=level, noiselevel=noiselevel)
3326 f = open(log_path, 'a')
3332 class Binpkg(CompositeTask):
3334 __slots__ = ("find_blockers",
3335 "ldpath_mtimes", "logger", "opts",
3336 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3337 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3338 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3340 def _writemsg_level(self, msg, level=0, noiselevel=0):
3342 if not self.background:
3343 portage.util.writemsg_level(msg,
3344 level=level, noiselevel=noiselevel)
3346 log_path = self.settings.get("PORTAGE_LOG_FILE")
3347 if log_path is not None:
3348 f = open(log_path, 'a')
3357 settings = self.settings
3358 settings.setcpv(pkg)
3359 self._tree = "bintree"
3360 self._bintree = self.pkg.root_config.trees[self._tree]
3361 self._verify = not self.opts.pretend
3363 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3364 "portage", pkg.category, pkg.pf)
3365 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3366 pkg=pkg, settings=settings)
3367 self._image_dir = os.path.join(dir_path, "image")
3368 self._infloc = os.path.join(dir_path, "build-info")
3369 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3370 settings["EBUILD"] = self._ebuild_path
3371 debug = settings.get("PORTAGE_DEBUG") == "1"
3372 portage.doebuild_environment(self._ebuild_path, "setup",
3373 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3374 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3376 # The prefetcher has already completed or it
3377 # could be running now. If it's running now,
3378 # wait for it to complete since it holds
3379 # a lock on the file being fetched. The
3380 # portage.locks functions are only designed
3381 # to work between separate processes. Since
3382 # the lock is held by the current process,
3383 # use the scheduler and fetcher methods to
3384 # synchronize with the fetcher.
3385 prefetcher = self.prefetcher
3386 if prefetcher is None:
3388 elif not prefetcher.isAlive():
3390 elif prefetcher.poll() is None:
3392 waiting_msg = ("Fetching '%s' " + \
3393 "in the background. " + \
3394 "To view fetch progress, run `tail -f " + \
3395 "/var/log/emerge-fetch.log` in another " + \
3396 "terminal.") % prefetcher.pkg_path
3397 msg_prefix = colorize("GOOD", " * ")
3398 from textwrap import wrap
3399 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3400 for line in wrap(waiting_msg, 65))
3401 if not self.background:
3402 writemsg(waiting_msg, noiselevel=-1)
3404 self._current_task = prefetcher
3405 prefetcher.addExitListener(self._prefetch_exit)
3408 self._prefetch_exit(prefetcher)
3410 def _prefetch_exit(self, prefetcher):
3413 pkg_count = self.pkg_count
3414 if not (self.opts.pretend or self.opts.fetchonly):
3415 self._build_dir.lock()
3416 # If necessary, discard old log so that we don't
3418 for x in ('.logid', 'temp/build.log'):
3420 os.unlink(os.path.join(self._build_dir.dir_path, x))
3423 # Initialze PORTAGE_LOG_FILE.
3424 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3425 fetcher = BinpkgFetcher(background=self.background,
3426 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3427 pretend=self.opts.pretend, scheduler=self.scheduler)
3428 pkg_path = fetcher.pkg_path
3429 self._pkg_path = pkg_path
3431 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3433 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3434 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3435 short_msg = "emerge: (%s of %s) %s Fetch" % \
3436 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3437 self.logger.log(msg, short_msg=short_msg)
3438 self._start_task(fetcher, self._fetcher_exit)
3441 self._fetcher_exit(fetcher)
3443 def _fetcher_exit(self, fetcher):
3445 # The fetcher only has a returncode when
3446 # --getbinpkg is enabled.
3447 if fetcher.returncode is not None:
3448 self._fetched_pkg = True
3449 if self._default_exit(fetcher) != os.EX_OK:
3450 self._unlock_builddir()
3454 if self.opts.pretend:
3455 self._current_task = None
3456 self.returncode = os.EX_OK
3464 logfile = self.settings.get("PORTAGE_LOG_FILE")
3465 verifier = BinpkgVerifier(background=self.background,
3466 logfile=logfile, pkg=self.pkg)
3467 self._start_task(verifier, self._verifier_exit)
3470 self._verifier_exit(verifier)
3472 def _verifier_exit(self, verifier):
3473 if verifier is not None and \
3474 self._default_exit(verifier) != os.EX_OK:
3475 self._unlock_builddir()
3479 logger = self.logger
3481 pkg_count = self.pkg_count
3482 pkg_path = self._pkg_path
3484 if self._fetched_pkg:
3485 self._bintree.inject(pkg.cpv, filename=pkg_path)
3487 if self.opts.fetchonly:
3488 self._current_task = None
3489 self.returncode = os.EX_OK
3493 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3494 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3495 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3496 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3497 logger.log(msg, short_msg=short_msg)
3500 settings = self.settings
3501 ebuild_phase = EbuildPhase(background=self.background,
3502 pkg=pkg, phase=phase, scheduler=self.scheduler,
3503 settings=settings, tree=self._tree)
3505 self._start_task(ebuild_phase, self._clean_exit)
3507 def _clean_exit(self, clean_phase):
3508 if self._default_exit(clean_phase) != os.EX_OK:
3509 self._unlock_builddir()
3513 dir_path = self._build_dir.dir_path
3515 infloc = self._infloc
3517 pkg_path = self._pkg_path
3520 for mydir in (dir_path, self._image_dir, infloc):
3521 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3522 gid=portage.data.portage_gid, mode=dir_mode)
3524 # This initializes PORTAGE_LOG_FILE.
3525 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3526 self._writemsg_level(">>> Extracting info\n")
3528 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3529 check_missing_metadata = ("CATEGORY", "PF")
3530 missing_metadata = set()
3531 for k in check_missing_metadata:
3532 v = pkg_xpak.getfile(k)
3534 missing_metadata.add(k)
3536 pkg_xpak.unpackinfo(infloc)
3537 for k in missing_metadata:
3545 f = open(os.path.join(infloc, k), 'wb')
3551 # Store the md5sum in the vdb.
3552 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3554 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3558 # This gives bashrc users an opportunity to do various things
3559 # such as remove binary packages after they're installed.
3560 settings = self.settings
3561 settings.setcpv(self.pkg)
3562 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3563 settings.backup_changes("PORTAGE_BINPKG_FILE")
3566 setup_phase = EbuildPhase(background=self.background,
3567 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3568 settings=settings, tree=self._tree)
3570 setup_phase.addExitListener(self._setup_exit)
3571 self._current_task = setup_phase
3572 self.scheduler.scheduleSetup(setup_phase)
3574 def _setup_exit(self, setup_phase):
3575 if self._default_exit(setup_phase) != os.EX_OK:
3576 self._unlock_builddir()
3580 extractor = BinpkgExtractorAsync(background=self.background,
3581 image_dir=self._image_dir,
3582 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3583 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3584 self._start_task(extractor, self._extractor_exit)
3586 def _extractor_exit(self, extractor):
3587 if self._final_exit(extractor) != os.EX_OK:
3588 self._unlock_builddir()
3589 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3593 def _unlock_builddir(self):
3594 if self.opts.pretend or self.opts.fetchonly:
3596 portage.elog.elog_process(self.pkg.cpv, self.settings)
3597 self._build_dir.unlock()
3601 # This gives bashrc users an opportunity to do various things
3602 # such as remove binary packages after they're installed.
3603 settings = self.settings
3604 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3605 settings.backup_changes("PORTAGE_BINPKG_FILE")
3607 merge = EbuildMerge(find_blockers=self.find_blockers,
3608 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3609 pkg=self.pkg, pkg_count=self.pkg_count,
3610 pkg_path=self._pkg_path, scheduler=self.scheduler,
3611 settings=settings, tree=self._tree, world_atom=self.world_atom)
3614 retval = merge.execute()
3616 settings.pop("PORTAGE_BINPKG_FILE", None)
3617 self._unlock_builddir()
3620 class BinpkgFetcher(SpawnProcess):
3622 __slots__ = ("pkg", "pretend",
3623 "locked", "pkg_path", "_lock_obj")
3625 def __init__(self, **kwargs):
3626 SpawnProcess.__init__(self, **kwargs)
3628 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3636 pretend = self.pretend
3637 bintree = pkg.root_config.trees["bintree"]
3638 settings = bintree.settings
3639 use_locks = "distlocks" in settings.features
3640 pkg_path = self.pkg_path
3643 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3646 exists = os.path.exists(pkg_path)
3647 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3648 if not (pretend or resume):
3649 # Remove existing file or broken symlink.
3655 # urljoin doesn't work correctly with
3656 # unrecognized protocols like sftp
3657 if bintree._remote_has_index:
3658 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3660 rel_uri = pkg.cpv + ".tbz2"
3661 uri = bintree._remote_base_uri.rstrip("/") + \
3662 "/" + rel_uri.lstrip("/")
3664 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3665 "/" + pkg.pf + ".tbz2"
3668 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3669 self.returncode = os.EX_OK
3673 protocol = urlparse.urlparse(uri)[0]
3674 fcmd_prefix = "FETCHCOMMAND"
3676 fcmd_prefix = "RESUMECOMMAND"
3677 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3679 fcmd = settings.get(fcmd_prefix)
3682 "DISTDIR" : os.path.dirname(pkg_path),
3684 "FILE" : os.path.basename(pkg_path)
3687 fetch_env = dict(settings.iteritems())
3688 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3689 for x in shlex.split(fcmd)]
3691 if self.fd_pipes is None:
3693 fd_pipes = self.fd_pipes
3695 # Redirect all output to stdout since some fetchers like
3696 # wget pollute stderr (if portage detects a problem then it
3697 # can send it's own message to stderr).
3698 fd_pipes.setdefault(0, sys.stdin.fileno())
3699 fd_pipes.setdefault(1, sys.stdout.fileno())
3700 fd_pipes.setdefault(2, sys.stdout.fileno())
3702 self.args = fetch_args
3703 self.env = fetch_env
3704 SpawnProcess._start(self)
3706 def _set_returncode(self, wait_retval):
3707 SpawnProcess._set_returncode(self, wait_retval)
3708 if self.returncode == os.EX_OK:
3709 # If possible, update the mtime to match the remote package if
3710 # the fetcher didn't already do it automatically.
3711 bintree = self.pkg.root_config.trees["bintree"]
3712 if bintree._remote_has_index:
3713 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3714 if remote_mtime is not None:
3716 remote_mtime = long(remote_mtime)
3721 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3725 if remote_mtime != local_mtime:
3727 os.utime(self.pkg_path,
3728 (remote_mtime, remote_mtime))
3737 This raises an AlreadyLocked exception if lock() is called
3738 while a lock is already held. In order to avoid this, call
3739 unlock() or check whether the "locked" attribute is True
3740 or False before calling lock().
3742 if self._lock_obj is not None:
3743 raise self.AlreadyLocked((self._lock_obj,))
3745 self._lock_obj = portage.locks.lockfile(
3746 self.pkg_path, wantnewlockfile=1)
3749 class AlreadyLocked(portage.exception.PortageException):
3753 if self._lock_obj is None:
3755 portage.locks.unlockfile(self._lock_obj)
3756 self._lock_obj = None
3759 class BinpkgVerifier(AsynchronousTask):
3760 __slots__ = ("logfile", "pkg",)
3764 Note: Unlike a normal AsynchronousTask.start() method,
3765 this one does all work is synchronously. The returncode
3766 attribute will be set before it returns.
3770 root_config = pkg.root_config
3771 bintree = root_config.trees["bintree"]
3773 stdout_orig = sys.stdout
3774 stderr_orig = sys.stderr
3776 if self.background and self.logfile is not None:
3777 log_file = open(self.logfile, 'a')
3779 if log_file is not None:
3780 sys.stdout = log_file
3781 sys.stderr = log_file
3783 bintree.digestCheck(pkg)
3784 except portage.exception.FileNotFound:
3785 writemsg("!!! Fetching Binary failed " + \
3786 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3788 except portage.exception.DigestException, e:
3789 writemsg("\n!!! Digest verification failed:\n",
3791 writemsg("!!! %s\n" % e.value[0],
3793 writemsg("!!! Reason: %s\n" % e.value[1],
3795 writemsg("!!! Got: %s\n" % e.value[2],
3797 writemsg("!!! Expected: %s\n" % e.value[3],
3800 if rval != os.EX_OK:
3801 pkg_path = bintree.getname(pkg.cpv)
3802 head, tail = os.path.split(pkg_path)
3803 temp_filename = portage._checksum_failure_temp_file(head, tail)
3804 writemsg("File renamed to '%s'\n" % (temp_filename,),
3807 sys.stdout = stdout_orig
3808 sys.stderr = stderr_orig
3809 if log_file is not None:
3812 self.returncode = rval
3815 class BinpkgPrefetcher(CompositeTask):
3817 __slots__ = ("pkg",) + \
3818 ("pkg_path", "_bintree",)
3821 self._bintree = self.pkg.root_config.trees["bintree"]
3822 fetcher = BinpkgFetcher(background=self.background,
3823 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3824 scheduler=self.scheduler)
3825 self.pkg_path = fetcher.pkg_path
3826 self._start_task(fetcher, self._fetcher_exit)
3828 def _fetcher_exit(self, fetcher):
3830 if self._default_exit(fetcher) != os.EX_OK:
3834 verifier = BinpkgVerifier(background=self.background,
3835 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3836 self._start_task(verifier, self._verifier_exit)
3838 def _verifier_exit(self, verifier):
3839 if self._default_exit(verifier) != os.EX_OK:
3843 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3845 self._current_task = None
3846 self.returncode = os.EX_OK
3849 class BinpkgExtractorAsync(SpawnProcess):
3851 __slots__ = ("image_dir", "pkg", "pkg_path")
3853 _shell_binary = portage.const.BASH_BINARY
3856 self.args = [self._shell_binary, "-c",
3857 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3858 (portage._shell_quote(self.pkg_path),
3859 portage._shell_quote(self.image_dir))]
3861 self.env = self.pkg.root_config.settings.environ()
3862 SpawnProcess._start(self)
3864 class MergeListItem(CompositeTask):
3867 TODO: For parallel scheduling, everything here needs asynchronous
3868 execution support (start, poll, and wait methods).
3871 __slots__ = ("args_set",
3872 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3873 "find_blockers", "logger", "mtimedb", "pkg",
3874 "pkg_count", "pkg_to_replace", "prefetcher",
3875 "settings", "statusMessage", "world_atom") + \
3881 build_opts = self.build_opts
3884 # uninstall, executed by self.merge()
3885 self.returncode = os.EX_OK
3889 args_set = self.args_set
3890 find_blockers = self.find_blockers
3891 logger = self.logger
3892 mtimedb = self.mtimedb
3893 pkg_count = self.pkg_count
3894 scheduler = self.scheduler
3895 settings = self.settings
3896 world_atom = self.world_atom
3897 ldpath_mtimes = mtimedb["ldpath"]
3899 action_desc = "Emerging"
3901 if pkg.type_name == "binary":
3902 action_desc += " binary"
3904 if build_opts.fetchonly:
3905 action_desc = "Fetching"
3907 msg = "%s (%s of %s) %s" % \
3909 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3910 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3911 colorize("GOOD", pkg.cpv))
3913 portdb = pkg.root_config.trees["porttree"].dbapi
3914 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3915 if portdir_repo_name:
3916 pkg_repo_name = pkg.metadata.get("repository")
3917 if pkg_repo_name != portdir_repo_name:
3918 if not pkg_repo_name:
3919 pkg_repo_name = "unknown repo"
3920 msg += " from %s" % pkg_repo_name
3923 msg += " %s %s" % (preposition, pkg.root)
3925 if not build_opts.pretend:
3926 self.statusMessage(msg)
3927 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3928 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3930 if pkg.type_name == "ebuild":
3932 build = EbuildBuild(args_set=args_set,
3933 background=self.background,
3934 config_pool=self.config_pool,
3935 find_blockers=find_blockers,
3936 ldpath_mtimes=ldpath_mtimes, logger=logger,
3937 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3938 prefetcher=self.prefetcher, scheduler=scheduler,
3939 settings=settings, world_atom=world_atom)
3941 self._install_task = build
3942 self._start_task(build, self._default_final_exit)
3945 elif pkg.type_name == "binary":
3947 binpkg = Binpkg(background=self.background,
3948 find_blockers=find_blockers,
3949 ldpath_mtimes=ldpath_mtimes, logger=logger,
3950 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3951 prefetcher=self.prefetcher, settings=settings,
3952 scheduler=scheduler, world_atom=world_atom)
3954 self._install_task = binpkg
3955 self._start_task(binpkg, self._default_final_exit)
3959 self._install_task.poll()
3960 return self.returncode
3963 self._install_task.wait()
3964 return self.returncode
3969 build_opts = self.build_opts
3970 find_blockers = self.find_blockers
3971 logger = self.logger
3972 mtimedb = self.mtimedb
3973 pkg_count = self.pkg_count
3974 prefetcher = self.prefetcher
3975 scheduler = self.scheduler
3976 settings = self.settings
3977 world_atom = self.world_atom
3978 ldpath_mtimes = mtimedb["ldpath"]
3981 if not (build_opts.buildpkgonly or \
3982 build_opts.fetchonly or build_opts.pretend):
3984 uninstall = PackageUninstall(background=self.background,
3985 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3986 pkg=pkg, scheduler=scheduler, settings=settings)
3989 retval = uninstall.wait()
3990 if retval != os.EX_OK:
3994 if build_opts.fetchonly or \
3995 build_opts.buildpkgonly:
3996 return self.returncode
3998 retval = self._install_task.install()
4001 class PackageMerge(AsynchronousTask):
4003 TODO: Implement asynchronous merge so that the scheduler can
4004 run while a merge is executing.
4007 __slots__ = ("merge",)
4011 pkg = self.merge.pkg
4012 pkg_count = self.merge.pkg_count
4015 action_desc = "Uninstalling"
4016 preposition = "from"
4018 action_desc = "Installing"
4021 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4024 msg += " %s %s" % (preposition, pkg.root)
4026 if not self.merge.build_opts.fetchonly and \
4027 not self.merge.build_opts.pretend and \
4028 not self.merge.build_opts.buildpkgonly:
4029 self.merge.statusMessage(msg)
4031 self.returncode = self.merge.merge()
4034 class DependencyArg(object):
4035 def __init__(self, arg=None, root_config=None):
4037 self.root_config = root_config
4040 return str(self.arg)
4042 class AtomArg(DependencyArg):
4043 def __init__(self, atom=None, **kwargs):
4044 DependencyArg.__init__(self, **kwargs)
4046 if not isinstance(self.atom, portage.dep.Atom):
4047 self.atom = portage.dep.Atom(self.atom)
4048 self.set = (self.atom, )
4050 class PackageArg(DependencyArg):
4051 def __init__(self, package=None, **kwargs):
4052 DependencyArg.__init__(self, **kwargs)
4053 self.package = package
4054 self.atom = portage.dep.Atom("=" + package.cpv)
4055 self.set = (self.atom, )
4057 class SetArg(DependencyArg):
4058 def __init__(self, set=None, **kwargs):
4059 DependencyArg.__init__(self, **kwargs)
4061 self.name = self.arg[len(SETPREFIX):]
4063 class Dependency(SlotObject):
4064 __slots__ = ("atom", "blocker", "depth",
4065 "parent", "onlydeps", "priority", "root")
4066 def __init__(self, **kwargs):
4067 SlotObject.__init__(self, **kwargs)
4068 if self.priority is None:
4069 self.priority = DepPriority()
4070 if self.depth is None:
4073 class BlockerCache(portage.cache.mappings.MutableMapping):
4074 """This caches blockers of installed packages so that dep_check does not
4075 have to be done for every single installed package on every invocation of
4076 emerge. The cache is invalidated whenever it is detected that something
4077 has changed that might alter the results of dep_check() calls:
4078 1) the set of installed packages (including COUNTER) has changed
4079 2) the old-style virtuals have changed
4082 # Number of uncached packages to trigger cache update, since
4083 # it's wasteful to update it for every vdb change.
4084 _cache_threshold = 5
4086 class BlockerData(object):
4088 __slots__ = ("__weakref__", "atoms", "counter")
4090 def __init__(self, counter, atoms):
4091 self.counter = counter
4094 def __init__(self, myroot, vardb):
4096 self._virtuals = vardb.settings.getvirtuals()
4097 self._cache_filename = os.path.join(myroot,
4098 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4099 self._cache_version = "1"
4100 self._cache_data = None
4101 self._modified = set()
4106 f = open(self._cache_filename, mode='rb')
4107 mypickle = pickle.Unpickler(f)
4109 mypickle.find_global = None
4110 except AttributeError:
4111 # TODO: If py3k, override Unpickler.find_class().
4113 self._cache_data = mypickle.load()
4116 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4117 if isinstance(e, pickle.UnpicklingError):
4118 writemsg("!!! Error loading '%s': %s\n" % \
4119 (self._cache_filename, str(e)), noiselevel=-1)
4122 cache_valid = self._cache_data and \
4123 isinstance(self._cache_data, dict) and \
4124 self._cache_data.get("version") == self._cache_version and \
4125 isinstance(self._cache_data.get("blockers"), dict)
4127 # Validate all the atoms and counters so that
4128 # corruption is detected as soon as possible.
4129 invalid_items = set()
4130 for k, v in self._cache_data["blockers"].iteritems():
4131 if not isinstance(k, basestring):
4132 invalid_items.add(k)
4135 if portage.catpkgsplit(k) is None:
4136 invalid_items.add(k)
4138 except portage.exception.InvalidData:
4139 invalid_items.add(k)
4141 if not isinstance(v, tuple) or \
4143 invalid_items.add(k)
4146 if not isinstance(counter, (int, long)):
4147 invalid_items.add(k)
4149 if not isinstance(atoms, (list, tuple)):
4150 invalid_items.add(k)
4152 invalid_atom = False
4154 if not isinstance(atom, basestring):
4157 if atom[:1] != "!" or \
4158 not portage.isvalidatom(
4159 atom, allow_blockers=True):
4163 invalid_items.add(k)
4166 for k in invalid_items:
4167 del self._cache_data["blockers"][k]
4168 if not self._cache_data["blockers"]:
4172 self._cache_data = {"version":self._cache_version}
4173 self._cache_data["blockers"] = {}
4174 self._cache_data["virtuals"] = self._virtuals
4175 self._modified.clear()
4178 """If the current user has permission and the internal blocker cache
4179 been updated, save it to disk and mark it unmodified. This is called
4180 by emerge after it has proccessed blockers for all installed packages.
4181 Currently, the cache is only written if the user has superuser
4182 privileges (since that's required to obtain a lock), but all users
4183 have read access and benefit from faster blocker lookups (as long as
4184 the entire cache is still valid). The cache is stored as a pickled
4185 dict object with the following format:
4189 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4190 "virtuals" : vardb.settings.getvirtuals()
4193 if len(self._modified) >= self._cache_threshold and \
4196 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4197 pickle.dump(self._cache_data, f, protocol=2)
4199 portage.util.apply_secpass_permissions(
4200 self._cache_filename, gid=portage.portage_gid, mode=0644)
4201 except (IOError, OSError), e:
4203 self._modified.clear()
4205 def __setitem__(self, cpv, blocker_data):
4207 Update the cache and mark it as modified for a future call to
4210 @param cpv: Package for which to cache blockers.
4212 @param blocker_data: An object with counter and atoms attributes.
4213 @type blocker_data: BlockerData
4215 self._cache_data["blockers"][cpv] = \
4216 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4217 self._modified.add(cpv)
4220 if self._cache_data is None:
4221 # triggered by python-trace
4223 return iter(self._cache_data["blockers"])
4225 def __delitem__(self, cpv):
4226 del self._cache_data["blockers"][cpv]
4228 def __getitem__(self, cpv):
4231 @returns: An object with counter and atoms attributes.
4233 return self.BlockerData(*self._cache_data["blockers"][cpv])
4235 class BlockerDB(object):
4237 def __init__(self, root_config):
4238 self._root_config = root_config
4239 self._vartree = root_config.trees["vartree"]
4240 self._portdb = root_config.trees["porttree"].dbapi
4242 self._dep_check_trees = None
4243 self._fake_vartree = None
4245 def _get_fake_vartree(self, acquire_lock=0):
4246 fake_vartree = self._fake_vartree
4247 if fake_vartree is None:
4248 fake_vartree = FakeVartree(self._root_config,
4249 acquire_lock=acquire_lock)
4250 self._fake_vartree = fake_vartree
4251 self._dep_check_trees = { self._vartree.root : {
4252 "porttree" : fake_vartree,
4253 "vartree" : fake_vartree,
4256 fake_vartree.sync(acquire_lock=acquire_lock)
4259 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4260 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4261 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4262 settings = self._vartree.settings
4263 stale_cache = set(blocker_cache)
4264 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4265 dep_check_trees = self._dep_check_trees
4266 vardb = fake_vartree.dbapi
4267 installed_pkgs = list(vardb)
4269 for inst_pkg in installed_pkgs:
4270 stale_cache.discard(inst_pkg.cpv)
4271 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4272 if cached_blockers is not None and \
4273 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4274 cached_blockers = None
4275 if cached_blockers is not None:
4276 blocker_atoms = cached_blockers.atoms
4278 # Use aux_get() to trigger FakeVartree global
4279 # updates on *DEPEND when appropriate.
4280 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4282 portage.dep._dep_check_strict = False
4283 success, atoms = portage.dep_check(depstr,
4284 vardb, settings, myuse=inst_pkg.use.enabled,
4285 trees=dep_check_trees, myroot=inst_pkg.root)
4287 portage.dep._dep_check_strict = True
4289 pkg_location = os.path.join(inst_pkg.root,
4290 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4291 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4292 (pkg_location, atoms), noiselevel=-1)
4295 blocker_atoms = [atom for atom in atoms \
4296 if atom.startswith("!")]
4297 blocker_atoms.sort()
4298 counter = long(inst_pkg.metadata["COUNTER"])
4299 blocker_cache[inst_pkg.cpv] = \
4300 blocker_cache.BlockerData(counter, blocker_atoms)
4301 for cpv in stale_cache:
4302 del blocker_cache[cpv]
4303 blocker_cache.flush()
4305 blocker_parents = digraph()
4307 for pkg in installed_pkgs:
4308 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4309 blocker_atom = blocker_atom.lstrip("!")
4310 blocker_atoms.append(blocker_atom)
4311 blocker_parents.add(blocker_atom, pkg)
4313 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4314 blocking_pkgs = set()
4315 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4316 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4318 # Check for blockers in the other direction.
4319 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4321 portage.dep._dep_check_strict = False
4322 success, atoms = portage.dep_check(depstr,
4323 vardb, settings, myuse=new_pkg.use.enabled,
4324 trees=dep_check_trees, myroot=new_pkg.root)
4326 portage.dep._dep_check_strict = True
4328 # We should never get this far with invalid deps.
4329 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4332 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4335 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4336 for inst_pkg in installed_pkgs:
4338 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4339 except (portage.exception.InvalidDependString, StopIteration):
4341 blocking_pkgs.add(inst_pkg)
4343 return blocking_pkgs
4345 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4347 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4348 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4349 p_type, p_root, p_key, p_status = parent_node
4351 if p_status == "nomerge":
4352 category, pf = portage.catsplit(p_key)
4353 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4354 msg.append("Portage is unable to process the dependencies of the ")
4355 msg.append("'%s' package. " % p_key)
4356 msg.append("In order to correct this problem, the package ")
4357 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4358 msg.append("As a temporary workaround, the --nodeps option can ")
4359 msg.append("be used to ignore all dependencies. For reference, ")
4360 msg.append("the problematic dependencies can be found in the ")
4361 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4363 msg.append("This package can not be installed. ")
4364 msg.append("Please notify the '%s' package maintainer " % p_key)
4365 msg.append("about this problem.")
4367 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4368 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4370 class PackageVirtualDbapi(portage.dbapi):
4372 A dbapi-like interface class that represents the state of the installed
4373 package database as new packages are installed, replacing any packages
4374 that previously existed in the same slot. The main difference between
4375 this class and fakedbapi is that this one uses Package instances
4376 internally (passed in via cpv_inject() and cpv_remove() calls).
4378 def __init__(self, settings):
4379 portage.dbapi.__init__(self)
4380 self.settings = settings
4381 self._match_cache = {}
4387 Remove all packages.
4391 self._cp_map.clear()
4392 self._cpv_map.clear()
4395 obj = PackageVirtualDbapi(self.settings)
4396 obj._match_cache = self._match_cache.copy()
4397 obj._cp_map = self._cp_map.copy()
4398 for k, v in obj._cp_map.iteritems():
4399 obj._cp_map[k] = v[:]
4400 obj._cpv_map = self._cpv_map.copy()
4404 return self._cpv_map.itervalues()
4406 def __contains__(self, item):
4407 existing = self._cpv_map.get(item.cpv)
4408 if existing is not None and \
4413 def get(self, item, default=None):
4414 cpv = getattr(item, "cpv", None)
4418 type_name, root, cpv, operation = item
4420 existing = self._cpv_map.get(cpv)
4421 if existing is not None and \
4426 def match_pkgs(self, atom):
4427 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4429 def _clear_cache(self):
4430 if self._categories is not None:
4431 self._categories = None
4432 if self._match_cache:
4433 self._match_cache = {}
4435 def match(self, origdep, use_cache=1):
4436 result = self._match_cache.get(origdep)
4437 if result is not None:
4439 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4440 self._match_cache[origdep] = result
4443 def cpv_exists(self, cpv):
4444 return cpv in self._cpv_map
4446 def cp_list(self, mycp, use_cache=1):
4447 cachelist = self._match_cache.get(mycp)
4448 # cp_list() doesn't expand old-style virtuals
4449 if cachelist and cachelist[0].startswith(mycp):
4451 cpv_list = self._cp_map.get(mycp)
4452 if cpv_list is None:
4455 cpv_list = [pkg.cpv for pkg in cpv_list]
4456 self._cpv_sort_ascending(cpv_list)
4457 if not (not cpv_list and mycp.startswith("virtual/")):
4458 self._match_cache[mycp] = cpv_list
4462 return list(self._cp_map)
4465 return list(self._cpv_map)
4467 def cpv_inject(self, pkg):
4468 cp_list = self._cp_map.get(pkg.cp)
4471 self._cp_map[pkg.cp] = cp_list
4472 e_pkg = self._cpv_map.get(pkg.cpv)
4473 if e_pkg is not None:
4476 self.cpv_remove(e_pkg)
4477 for e_pkg in cp_list:
4478 if e_pkg.slot_atom == pkg.slot_atom:
4481 self.cpv_remove(e_pkg)
4484 self._cpv_map[pkg.cpv] = pkg
4487 def cpv_remove(self, pkg):
4488 old_pkg = self._cpv_map.get(pkg.cpv)
4491 self._cp_map[pkg.cp].remove(pkg)
4492 del self._cpv_map[pkg.cpv]
4495 def aux_get(self, cpv, wants):
4496 metadata = self._cpv_map[cpv].metadata
4497 return [metadata.get(x, "") for x in wants]
4499 def aux_update(self, cpv, values):
4500 self._cpv_map[cpv].metadata.update(values)
4503 class depgraph(object):
4505 pkg_tree_map = RootConfig.pkg_tree_map
4507 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4509 def __init__(self, settings, trees, myopts, myparams, spinner):
4510 self.settings = settings
4511 self.target_root = settings["ROOT"]
4512 self.myopts = myopts
4513 self.myparams = myparams
4515 if settings.get("PORTAGE_DEBUG", "") == "1":
4517 self.spinner = spinner
4518 self._running_root = trees["/"]["root_config"]
4519 self._opts_no_restart = Scheduler._opts_no_restart
4520 self.pkgsettings = {}
4521 # Maps slot atom to package for each Package added to the graph.
4522 self._slot_pkg_map = {}
4523 # Maps nodes to the reasons they were selected for reinstallation.
4524 self._reinstall_nodes = {}
4527 self._trees_orig = trees
4529 # Contains a filtered view of preferred packages that are selected
4530 # from available repositories.
4531 self._filtered_trees = {}
4532 # Contains installed packages and new packages that have been added
4534 self._graph_trees = {}
4535 # All Package instances
4536 self._pkg_cache = {}
4537 for myroot in trees:
4538 self.trees[myroot] = {}
4539 # Create a RootConfig instance that references
4540 # the FakeVartree instead of the real one.
4541 self.roots[myroot] = RootConfig(
4542 trees[myroot]["vartree"].settings,
4544 trees[myroot]["root_config"].setconfig)
4545 for tree in ("porttree", "bintree"):
4546 self.trees[myroot][tree] = trees[myroot][tree]
4547 self.trees[myroot]["vartree"] = \
4548 FakeVartree(trees[myroot]["root_config"],
4549 pkg_cache=self._pkg_cache)
4550 self.pkgsettings[myroot] = portage.config(
4551 clone=self.trees[myroot]["vartree"].settings)
4552 self._slot_pkg_map[myroot] = {}
4553 vardb = self.trees[myroot]["vartree"].dbapi
4554 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4555 "--buildpkgonly" not in self.myopts
4556 # This fakedbapi instance will model the state that the vdb will
4557 # have after new packages have been installed.
4558 fakedb = PackageVirtualDbapi(vardb.settings)
4559 if preload_installed_pkgs:
4561 self.spinner.update()
4562 # This triggers metadata updates via FakeVartree.
4563 vardb.aux_get(pkg.cpv, [])
4564 fakedb.cpv_inject(pkg)
4566 # Now that the vardb state is cached in our FakeVartree,
4567 # we won't be needing the real vartree cache for awhile.
4568 # To make some room on the heap, clear the vardbapi
4570 trees[myroot]["vartree"].dbapi._clear_cache()
4573 self.mydbapi[myroot] = fakedb
4576 graph_tree.dbapi = fakedb
4577 self._graph_trees[myroot] = {}
4578 self._filtered_trees[myroot] = {}
4579 # Substitute the graph tree for the vartree in dep_check() since we
4580 # want atom selections to be consistent with package selections
4581 # have already been made.
4582 self._graph_trees[myroot]["porttree"] = graph_tree
4583 self._graph_trees[myroot]["vartree"] = graph_tree
4584 def filtered_tree():
4586 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4587 self._filtered_trees[myroot]["porttree"] = filtered_tree
4589 # Passing in graph_tree as the vartree here could lead to better
4590 # atom selections in some cases by causing atoms for packages that
4591 # have been added to the graph to be preferred over other choices.
4592 # However, it can trigger atom selections that result in
4593 # unresolvable direct circular dependencies. For example, this
4594 # happens with gwydion-dylan which depends on either itself or
4595 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4596 # gwydion-dylan-bin needs to be selected in order to avoid a
4597 # an unresolvable direct circular dependency.
4599 # To solve the problem described above, pass in "graph_db" so that
4600 # packages that have been added to the graph are distinguishable
4601 # from other available packages and installed packages. Also, pass
4602 # the parent package into self._select_atoms() calls so that
4603 # unresolvable direct circular dependencies can be detected and
4604 # avoided when possible.
4605 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4606 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4609 portdb = self.trees[myroot]["porttree"].dbapi
4610 bindb = self.trees[myroot]["bintree"].dbapi
4611 vardb = self.trees[myroot]["vartree"].dbapi
4612 # (db, pkg_type, built, installed, db_keys)
4613 if "--usepkgonly" not in self.myopts:
4614 db_keys = list(portdb._aux_cache_keys)
4615 dbs.append((portdb, "ebuild", False, False, db_keys))
4616 if "--usepkg" in self.myopts:
4617 db_keys = list(bindb._aux_cache_keys)
4618 dbs.append((bindb, "binary", True, False, db_keys))
4619 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4620 dbs.append((vardb, "installed", True, True, db_keys))
4621 self._filtered_trees[myroot]["dbs"] = dbs
4622 if "--usepkg" in self.myopts:
4623 self.trees[myroot]["bintree"].populate(
4624 "--getbinpkg" in self.myopts,
4625 "--getbinpkgonly" in self.myopts)
4628 self.digraph=portage.digraph()
4629 # contains all sets added to the graph
4631 # contains atoms given as arguments
4632 self._sets["args"] = InternalPackageSet()
4633 # contains all atoms from all sets added to the graph, including
4634 # atoms given as arguments
4635 self._set_atoms = InternalPackageSet()
4636 self._atom_arg_map = {}
4637 # contains all nodes pulled in by self._set_atoms
4638 self._set_nodes = set()
4639 # Contains only Blocker -> Uninstall edges
4640 self._blocker_uninstalls = digraph()
4641 # Contains only Package -> Blocker edges
4642 self._blocker_parents = digraph()
4643 # Contains only irrelevant Package -> Blocker edges
4644 self._irrelevant_blockers = digraph()
4645 # Contains only unsolvable Package -> Blocker edges
4646 self._unsolvable_blockers = digraph()
4647 # Contains all Blocker -> Blocked Package edges
4648 self._blocked_pkgs = digraph()
4649 # Contains world packages that have been protected from
4650 # uninstallation but may not have been added to the graph
4651 # if the graph is not complete yet.
4652 self._blocked_world_pkgs = {}
4653 self._slot_collision_info = {}
4654 # Slot collision nodes are not allowed to block other packages since
4655 # blocker validation is only able to account for one package per slot.
4656 self._slot_collision_nodes = set()
4657 self._parent_atoms = {}
4658 self._slot_conflict_parent_atoms = set()
4659 self._serialized_tasks_cache = None
4660 self._scheduler_graph = None
4661 self._displayed_list = None
4662 self._pprovided_args = []
4663 self._missing_args = []
4664 self._masked_installed = set()
4665 self._unsatisfied_deps_for_display = []
4666 self._unsatisfied_blockers_for_display = None
4667 self._circular_deps_for_display = None
4668 self._dep_stack = []
4669 self._unsatisfied_deps = []
4670 self._initially_unsatisfied_deps = []
4671 self._ignored_deps = []
4672 self._required_set_names = set(["system", "world"])
4673 self._select_atoms = self._select_atoms_highest_available
4674 self._select_package = self._select_pkg_highest_available
4675 self._highest_pkg_cache = {}
4677 def _show_slot_collision_notice(self):
4678 """Show an informational message advising the user to mask one of the
4679 the packages. In some cases it may be possible to resolve this
4680 automatically, but support for backtracking (removal nodes that have
4681 already been selected) will be required in order to handle all possible
4685 if not self._slot_collision_info:
4688 self._show_merge_list()
4691 msg.append("\n!!! Multiple package instances within a single " + \
4692 "package slot have been pulled\n")
4693 msg.append("!!! into the dependency graph, resulting" + \
4694 " in a slot conflict:\n\n")
4696 # Max number of parents shown, to avoid flooding the display.
4698 explanation_columns = 70
4700 for (slot_atom, root), slot_nodes \
4701 in self._slot_collision_info.iteritems():
4702 msg.append(str(slot_atom))
4705 for node in slot_nodes:
4707 msg.append(str(node))
4708 parent_atoms = self._parent_atoms.get(node)
4711 # Prefer conflict atoms over others.
4712 for parent_atom in parent_atoms:
4713 if len(pruned_list) >= max_parents:
4715 if parent_atom in self._slot_conflict_parent_atoms:
4716 pruned_list.add(parent_atom)
4718 # If this package was pulled in by conflict atoms then
4719 # show those alone since those are the most interesting.
4721 # When generating the pruned list, prefer instances
4722 # of DependencyArg over instances of Package.
4723 for parent_atom in parent_atoms:
4724 if len(pruned_list) >= max_parents:
4726 parent, atom = parent_atom
4727 if isinstance(parent, DependencyArg):
4728 pruned_list.add(parent_atom)
4729 # Prefer Packages instances that themselves have been
4730 # pulled into collision slots.
4731 for parent_atom in parent_atoms:
4732 if len(pruned_list) >= max_parents:
4734 parent, atom = parent_atom
4735 if isinstance(parent, Package) and \
4736 (parent.slot_atom, parent.root) \
4737 in self._slot_collision_info:
4738 pruned_list.add(parent_atom)
4739 for parent_atom in parent_atoms:
4740 if len(pruned_list) >= max_parents:
4742 pruned_list.add(parent_atom)
4743 omitted_parents = len(parent_atoms) - len(pruned_list)
4744 parent_atoms = pruned_list
4745 msg.append(" pulled in by\n")
4746 for parent_atom in parent_atoms:
4747 parent, atom = parent_atom
4748 msg.append(2*indent)
4749 if isinstance(parent,
4750 (PackageArg, AtomArg)):
4751 # For PackageArg and AtomArg types, it's
4752 # redundant to display the atom attribute.
4753 msg.append(str(parent))
4755 # Display the specific atom from SetArg or
4757 msg.append("%s required by %s" % (atom, parent))
4760 msg.append(2*indent)
4761 msg.append("(and %d more)\n" % omitted_parents)
4763 msg.append(" (no parents)\n")
4765 explanation = self._slot_conflict_explanation(slot_nodes)
4768 msg.append(indent + "Explanation:\n\n")
4769 for line in textwrap.wrap(explanation, explanation_columns):
4770 msg.append(2*indent + line + "\n")
4773 sys.stderr.write("".join(msg))
4776 explanations_for_all = explanations == len(self._slot_collision_info)
4778 if explanations_for_all or "--quiet" in self.myopts:
4782 msg.append("It may be possible to solve this problem ")
4783 msg.append("by using package.mask to prevent one of ")
4784 msg.append("those packages from being selected. ")
4785 msg.append("However, it is also possible that conflicting ")
4786 msg.append("dependencies exist such that they are impossible to ")
4787 msg.append("satisfy simultaneously. If such a conflict exists in ")
4788 msg.append("the dependencies of two different packages, then those ")
4789 msg.append("packages can not be installed simultaneously.")
4791 from formatter import AbstractFormatter, DumbWriter
4792 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4794 f.add_flowing_data(x)
4798 msg.append("For more information, see MASKED PACKAGES ")
4799 msg.append("section in the emerge man page or refer ")
4800 msg.append("to the Gentoo Handbook.")
4802 f.add_flowing_data(x)
4806 def _slot_conflict_explanation(self, slot_nodes):
4808 When a slot conflict occurs due to USE deps, there are a few
4809 different cases to consider:
4811 1) New USE are correctly set but --newuse wasn't requested so an
4812 installed package with incorrect USE happened to get pulled
4813 into graph before the new one.
4815 2) New USE are incorrectly set but an installed package has correct
4816 USE so it got pulled into the graph, and a new instance also got
4817 pulled in due to --newuse or an upgrade.
4819 3) Multiple USE deps exist that can't be satisfied simultaneously,
4820 and multiple package instances got pulled into the same slot to
4821 satisfy the conflicting deps.
4823 Currently, explanations and suggested courses of action are generated
4824 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4827 if len(slot_nodes) != 2:
4828 # Suggestions are only implemented for
4829 # conflicts between two packages.
4832 all_conflict_atoms = self._slot_conflict_parent_atoms
4834 matched_atoms = None
4835 unmatched_node = None
4836 for node in slot_nodes:
4837 parent_atoms = self._parent_atoms.get(node)
4838 if not parent_atoms:
4839 # Normally, there are always parent atoms. If there are
4840 # none then something unexpected is happening and there's
4841 # currently no suggestion for this case.
4843 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4844 for parent_atom in conflict_atoms:
4845 parent, atom = parent_atom
4847 # Suggestions are currently only implemented for cases
4848 # in which all conflict atoms have USE deps.
4851 if matched_node is not None:
4852 # If conflict atoms match multiple nodes
4853 # then there's no suggestion.
4856 matched_atoms = conflict_atoms
4858 if unmatched_node is not None:
4859 # Neither node is matched by conflict atoms, and
4860 # there is no suggestion for this case.
4862 unmatched_node = node
4864 if matched_node is None or unmatched_node is None:
4865 # This shouldn't happen.
4868 if unmatched_node.installed and not matched_node.installed and \
4869 unmatched_node.cpv == matched_node.cpv:
4870 # If the conflicting packages are the same version then
4871 # --newuse should be all that's needed. If they are different
4872 # versions then there's some other problem.
4873 return "New USE are correctly set, but --newuse wasn't" + \
4874 " requested, so an installed package with incorrect USE " + \
4875 "happened to get pulled into the dependency graph. " + \
4876 "In order to solve " + \
4877 "this, either specify the --newuse option or explicitly " + \
4878 " reinstall '%s'." % matched_node.slot_atom
4880 if matched_node.installed and not unmatched_node.installed:
4881 atoms = sorted(set(atom for parent, atom in matched_atoms))
4882 explanation = ("New USE for '%s' are incorrectly set. " + \
4883 "In order to solve this, adjust USE to satisfy '%s'") % \
4884 (matched_node.slot_atom, atoms[0])
4886 for atom in atoms[1:-1]:
4887 explanation += ", '%s'" % (atom,)
4890 explanation += " and '%s'" % (atoms[-1],)
4896 def _process_slot_conflicts(self):
4898 Process slot conflict data to identify specific atoms which
4899 lead to conflict. These atoms only match a subset of the
4900 packages that have been pulled into a given slot.
4902 for (slot_atom, root), slot_nodes \
4903 in self._slot_collision_info.iteritems():
4905 all_parent_atoms = set()
4906 for pkg in slot_nodes:
4907 parent_atoms = self._parent_atoms.get(pkg)
4908 if not parent_atoms:
4910 all_parent_atoms.update(parent_atoms)
4912 for pkg in slot_nodes:
4913 parent_atoms = self._parent_atoms.get(pkg)
4914 if parent_atoms is None:
4915 parent_atoms = set()
4916 self._parent_atoms[pkg] = parent_atoms
4917 for parent_atom in all_parent_atoms:
4918 if parent_atom in parent_atoms:
4920 # Use package set for matching since it will match via
4921 # PROVIDE when necessary, while match_from_list does not.
4922 parent, atom = parent_atom
4923 atom_set = InternalPackageSet(
4924 initial_atoms=(atom,))
4925 if atom_set.findAtomForPackage(pkg):
4926 parent_atoms.add(parent_atom)
4928 self._slot_conflict_parent_atoms.add(parent_atom)
4930 def _reinstall_for_flags(self, forced_flags,
4931 orig_use, orig_iuse, cur_use, cur_iuse):
4932 """Return a set of flags that trigger reinstallation, or None if there
4933 are no such flags."""
4934 if "--newuse" in self.myopts:
4935 flags = set(orig_iuse.symmetric_difference(
4936 cur_iuse).difference(forced_flags))
4937 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4938 cur_iuse.intersection(cur_use)))
4941 elif "changed-use" == self.myopts.get("--reinstall"):
4942 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4943 cur_iuse.intersection(cur_use))
4948 def _create_graph(self, allow_unsatisfied=False):
4949 dep_stack = self._dep_stack
4951 self.spinner.update()
4952 dep = dep_stack.pop()
4953 if isinstance(dep, Package):
4954 if not self._add_pkg_deps(dep,
4955 allow_unsatisfied=allow_unsatisfied):
4958 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4962 def _add_dep(self, dep, allow_unsatisfied=False):
4963 debug = "--debug" in self.myopts
4964 buildpkgonly = "--buildpkgonly" in self.myopts
4965 nodeps = "--nodeps" in self.myopts
4966 empty = "empty" in self.myparams
4967 deep = "deep" in self.myparams
4968 update = "--update" in self.myopts and dep.depth <= 1
4970 if not buildpkgonly and \
4972 dep.parent not in self._slot_collision_nodes:
4973 if dep.parent.onlydeps:
4974 # It's safe to ignore blockers if the
4975 # parent is an --onlydeps node.
4977 # The blocker applies to the root where
4978 # the parent is or will be installed.
4979 blocker = Blocker(atom=dep.atom,
4980 eapi=dep.parent.metadata["EAPI"],
4981 root=dep.parent.root)
4982 self._blocker_parents.add(blocker, dep.parent)
4984 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4985 onlydeps=dep.onlydeps)
4987 if dep.priority.optional:
4988 # This could be an unecessary build-time dep
4989 # pulled in by --with-bdeps=y.
4991 if allow_unsatisfied:
4992 self._unsatisfied_deps.append(dep)
4994 self._unsatisfied_deps_for_display.append(
4995 ((dep.root, dep.atom), {"myparent":dep.parent}))
4997 # In some cases, dep_check will return deps that shouldn't
4998 # be proccessed any further, so they are identified and
4999 # discarded here. Try to discard as few as possible since
5000 # discarded dependencies reduce the amount of information
5001 # available for optimization of merge order.
5002 if dep.priority.satisfied and \
5003 not dep_pkg.installed and \
5004 not (existing_node or empty or deep or update):
5006 if dep.root == self.target_root:
5008 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5009 except StopIteration:
5011 except portage.exception.InvalidDependString:
5012 if not dep_pkg.installed:
5013 # This shouldn't happen since the package
5014 # should have been masked.
5017 self._ignored_deps.append(dep)
5020 if not self._add_pkg(dep_pkg, dep):
5024 def _add_pkg(self, pkg, dep):
5031 myparent = dep.parent
5032 priority = dep.priority
5034 if priority is None:
5035 priority = DepPriority()
5037 Fills the digraph with nodes comprised of packages to merge.
5038 mybigkey is the package spec of the package to merge.
5039 myparent is the package depending on mybigkey ( or None )
5040 addme = Should we add this package to the digraph or are we just looking at it's deps?
5041 Think --onlydeps, we need to ignore packages in that case.
5044 #IUSE-aware emerge -> USE DEP aware depgraph
5045 #"no downgrade" emerge
5047 # Ensure that the dependencies of the same package
5048 # are never processed more than once.
5049 previously_added = pkg in self.digraph
5051 # select the correct /var database that we'll be checking against
5052 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5053 pkgsettings = self.pkgsettings[pkg.root]
5058 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5059 except portage.exception.InvalidDependString, e:
5060 if not pkg.installed:
5061 show_invalid_depstring_notice(
5062 pkg, pkg.metadata["PROVIDE"], str(e))
5066 if not pkg.onlydeps:
5067 if not pkg.installed and \
5068 "empty" not in self.myparams and \
5069 vardbapi.match(pkg.slot_atom):
5070 # Increase the priority of dependencies on packages that
5071 # are being rebuilt. This optimizes merge order so that
5072 # dependencies are rebuilt/updated as soon as possible,
5073 # which is needed especially when emerge is called by
5074 # revdep-rebuild since dependencies may be affected by ABI
5075 # breakage that has rendered them useless. Don't adjust
5076 # priority here when in "empty" mode since all packages
5077 # are being merged in that case.
5078 priority.rebuild = True
5080 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5081 slot_collision = False
5083 existing_node_matches = pkg.cpv == existing_node.cpv
5084 if existing_node_matches and \
5085 pkg != existing_node and \
5086 dep.atom is not None:
5087 # Use package set for matching since it will match via
5088 # PROVIDE when necessary, while match_from_list does not.
5089 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5090 if not atom_set.findAtomForPackage(existing_node):
5091 existing_node_matches = False
5092 if existing_node_matches:
5093 # The existing node can be reused.
5095 for parent_atom in arg_atoms:
5096 parent, atom = parent_atom
5097 self.digraph.add(existing_node, parent,
5099 self._add_parent_atom(existing_node, parent_atom)
5100 # If a direct circular dependency is not an unsatisfied
5101 # buildtime dependency then drop it here since otherwise
5102 # it can skew the merge order calculation in an unwanted
5104 if existing_node != myparent or \
5105 (priority.buildtime and not priority.satisfied):
5106 self.digraph.addnode(existing_node, myparent,
5108 if dep.atom is not None and dep.parent is not None:
5109 self._add_parent_atom(existing_node,
5110 (dep.parent, dep.atom))
5114 # A slot collision has occurred. Sometimes this coincides
5115 # with unresolvable blockers, so the slot collision will be
5116 # shown later if there are no unresolvable blockers.
5117 self._add_slot_conflict(pkg)
5118 slot_collision = True
5121 # Now add this node to the graph so that self.display()
5122 # can show use flags and --tree portage.output. This node is
5123 # only being partially added to the graph. It must not be
5124 # allowed to interfere with the other nodes that have been
5125 # added. Do not overwrite data for existing nodes in
5126 # self.mydbapi since that data will be used for blocker
5128 # Even though the graph is now invalid, continue to process
5129 # dependencies so that things like --fetchonly can still
5130 # function despite collisions.
5132 elif not previously_added:
5133 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5134 self.mydbapi[pkg.root].cpv_inject(pkg)
5135 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5137 if not pkg.installed:
5138 # Allow this package to satisfy old-style virtuals in case it
5139 # doesn't already. Any pre-existing providers will be preferred
5142 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5143 # For consistency, also update the global virtuals.
5144 settings = self.roots[pkg.root].settings
5146 settings.setinst(pkg.cpv, pkg.metadata)
5148 except portage.exception.InvalidDependString, e:
5149 show_invalid_depstring_notice(
5150 pkg, pkg.metadata["PROVIDE"], str(e))
5155 self._set_nodes.add(pkg)
5157 # Do this even when addme is False (--onlydeps) so that the
5158 # parent/child relationship is always known in case
5159 # self._show_slot_collision_notice() needs to be called later.
5160 self.digraph.add(pkg, myparent, priority=priority)
5161 if dep.atom is not None and dep.parent is not None:
5162 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5165 for parent_atom in arg_atoms:
5166 parent, atom = parent_atom
5167 self.digraph.add(pkg, parent, priority=priority)
5168 self._add_parent_atom(pkg, parent_atom)
5170 """ This section determines whether we go deeper into dependencies or not.
5171 We want to go deeper on a few occasions:
5172 Installing package A, we need to make sure package A's deps are met.
5173 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5174 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5176 dep_stack = self._dep_stack
5177 if "recurse" not in self.myparams:
5179 elif pkg.installed and \
5180 "deep" not in self.myparams:
5181 dep_stack = self._ignored_deps
5183 self.spinner.update()
5188 if not previously_added:
5189 dep_stack.append(pkg)
5192 def _add_parent_atom(self, pkg, parent_atom):
5193 parent_atoms = self._parent_atoms.get(pkg)
5194 if parent_atoms is None:
5195 parent_atoms = set()
5196 self._parent_atoms[pkg] = parent_atoms
5197 parent_atoms.add(parent_atom)
5199 def _add_slot_conflict(self, pkg):
5200 self._slot_collision_nodes.add(pkg)
5201 slot_key = (pkg.slot_atom, pkg.root)
5202 slot_nodes = self._slot_collision_info.get(slot_key)
5203 if slot_nodes is None:
5205 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5206 self._slot_collision_info[slot_key] = slot_nodes
5209 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5211 mytype = pkg.type_name
5214 metadata = pkg.metadata
5215 myuse = pkg.use.enabled
5217 depth = pkg.depth + 1
5218 removal_action = "remove" in self.myparams
5221 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5223 edepend[k] = metadata[k]
5225 if not pkg.built and \
5226 "--buildpkgonly" in self.myopts and \
5227 "deep" not in self.myparams and \
5228 "empty" not in self.myparams:
5229 edepend["RDEPEND"] = ""
5230 edepend["PDEPEND"] = ""
5231 bdeps_optional = False
5233 if pkg.built and not removal_action:
5234 if self.myopts.get("--with-bdeps", "n") == "y":
5235 # Pull in build time deps as requested, but marked them as
5236 # "optional" since they are not strictly required. This allows
5237 # more freedom in the merge order calculation for solving
5238 # circular dependencies. Don't convert to PDEPEND since that
5239 # could make --with-bdeps=y less effective if it is used to
5240 # adjust merge order to prevent built_with_use() calls from
5242 bdeps_optional = True
5244 # built packages do not have build time dependencies.
5245 edepend["DEPEND"] = ""
5247 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5248 edepend["DEPEND"] = ""
5251 ("/", edepend["DEPEND"],
5252 self._priority(buildtime=(not bdeps_optional),
5253 optional=bdeps_optional)),
5254 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5255 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5258 debug = "--debug" in self.myopts
5259 strict = mytype != "installed"
5261 for dep_root, dep_string, dep_priority in deps:
5266 print "Parent: ", jbigkey
5267 print "Depstring:", dep_string
5268 print "Priority:", dep_priority
5269 vardb = self.roots[dep_root].trees["vartree"].dbapi
5271 selected_atoms = self._select_atoms(dep_root,
5272 dep_string, myuse=myuse, parent=pkg, strict=strict,
5273 priority=dep_priority)
5274 except portage.exception.InvalidDependString, e:
5275 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5278 print "Candidates:", selected_atoms
5280 for atom in selected_atoms:
5283 atom = portage.dep.Atom(atom)
5285 mypriority = dep_priority.copy()
5286 if not atom.blocker and vardb.match(atom):
5287 mypriority.satisfied = True
5289 if not self._add_dep(Dependency(atom=atom,
5290 blocker=atom.blocker, depth=depth, parent=pkg,
5291 priority=mypriority, root=dep_root),
5292 allow_unsatisfied=allow_unsatisfied):
5295 except portage.exception.InvalidAtom, e:
5296 show_invalid_depstring_notice(
5297 pkg, dep_string, str(e))
5299 if not pkg.installed:
5303 print "Exiting...", jbigkey
5304 except portage.exception.AmbiguousPackageName, e:
5306 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5307 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5309 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5310 portage.writemsg("\n", noiselevel=-1)
5311 if mytype == "binary":
5313 "!!! This binary package cannot be installed: '%s'\n" % \
5314 mykey, noiselevel=-1)
5315 elif mytype == "ebuild":
5316 portdb = self.roots[myroot].trees["porttree"].dbapi
5317 myebuild, mylocation = portdb.findname2(mykey)
5318 portage.writemsg("!!! This ebuild cannot be installed: " + \
5319 "'%s'\n" % myebuild, noiselevel=-1)
5320 portage.writemsg("!!! Please notify the package maintainer " + \
5321 "that atoms must be fully-qualified.\n", noiselevel=-1)
5325 def _priority(self, **kwargs):
5326 if "remove" in self.myparams:
5327 priority_constructor = UnmergeDepPriority
5329 priority_constructor = DepPriority
5330 return priority_constructor(**kwargs)
5332 def _dep_expand(self, root_config, atom_without_category):
5334 @param root_config: a root config instance
5335 @type root_config: RootConfig
5336 @param atom_without_category: an atom without a category component
5337 @type atom_without_category: String
5339 @returns: a list of atoms containing categories (possibly empty)
5341 null_cp = portage.dep_getkey(insert_category_into_atom(
5342 atom_without_category, "null"))
5343 cat, atom_pn = portage.catsplit(null_cp)
5345 dbs = self._filtered_trees[root_config.root]["dbs"]
5347 for db, pkg_type, built, installed, db_keys in dbs:
5348 for cat in db.categories:
5349 if db.cp_list("%s/%s" % (cat, atom_pn)):
5353 for cat in categories:
5354 deps.append(insert_category_into_atom(
5355 atom_without_category, cat))
5358 def _have_new_virt(self, root, atom_cp):
5360 for db, pkg_type, built, installed, db_keys in \
5361 self._filtered_trees[root]["dbs"]:
5362 if db.cp_list(atom_cp):
5367 def _iter_atoms_for_pkg(self, pkg):
5368 # TODO: add multiple $ROOT support
5369 if pkg.root != self.target_root:
5371 atom_arg_map = self._atom_arg_map
5372 root_config = self.roots[pkg.root]
5373 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5374 atom_cp = portage.dep_getkey(atom)
5375 if atom_cp != pkg.cp and \
5376 self._have_new_virt(pkg.root, atom_cp):
5378 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5379 visible_pkgs.reverse() # descending order
5381 for visible_pkg in visible_pkgs:
5382 if visible_pkg.cp != atom_cp:
5384 if pkg >= visible_pkg:
5385 # This is descending order, and we're not
5386 # interested in any versions <= pkg given.
5388 if pkg.slot_atom != visible_pkg.slot_atom:
5389 higher_slot = visible_pkg
5391 if higher_slot is not None:
5393 for arg in atom_arg_map[(atom, pkg.root)]:
5394 if isinstance(arg, PackageArg) and \
5399 def select_files(self, myfiles):
5400 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5401 appropriate depgraph and return a favorite list."""
5402 debug = "--debug" in self.myopts
5403 root_config = self.roots[self.target_root]
5404 sets = root_config.sets
5405 getSetAtoms = root_config.setconfig.getSetAtoms
5407 myroot = self.target_root
5408 dbs = self._filtered_trees[myroot]["dbs"]
5409 vardb = self.trees[myroot]["vartree"].dbapi
5410 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5411 portdb = self.trees[myroot]["porttree"].dbapi
5412 bindb = self.trees[myroot]["bintree"].dbapi
5413 pkgsettings = self.pkgsettings[myroot]
5415 onlydeps = "--onlydeps" in self.myopts
5418 ext = os.path.splitext(x)[1]
5420 if not os.path.exists(x):
5422 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5423 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5424 elif os.path.exists(
5425 os.path.join(pkgsettings["PKGDIR"], x)):
5426 x = os.path.join(pkgsettings["PKGDIR"], x)
5428 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5429 print "!!! Please ensure the tbz2 exists as specified.\n"
5430 return 0, myfavorites
5431 mytbz2=portage.xpak.tbz2(x)
5432 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5433 if os.path.realpath(x) != \
5434 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5435 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5436 return 0, myfavorites
5437 db_keys = list(bindb._aux_cache_keys)
5438 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5439 pkg = Package(type_name="binary", root_config=root_config,
5440 cpv=mykey, built=True, metadata=metadata,
5442 self._pkg_cache[pkg] = pkg
5443 args.append(PackageArg(arg=x, package=pkg,
5444 root_config=root_config))
5445 elif ext==".ebuild":
5446 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5447 pkgdir = os.path.dirname(ebuild_path)
5448 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5449 cp = pkgdir[len(tree_root)+1:]
5450 e = portage.exception.PackageNotFound(
5451 ("%s is not in a valid portage tree " + \
5452 "hierarchy or does not exist") % x)
5453 if not portage.isvalidatom(cp):
5455 cat = portage.catsplit(cp)[0]
5456 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5457 if not portage.isvalidatom("="+mykey):
5459 ebuild_path = portdb.findname(mykey)
5461 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5462 cp, os.path.basename(ebuild_path)):
5463 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5464 return 0, myfavorites
5465 if mykey not in portdb.xmatch(
5466 "match-visible", portage.dep_getkey(mykey)):
5467 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5468 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5469 print colorize("BAD", "*** page for details.")
5470 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5473 raise portage.exception.PackageNotFound(
5474 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5475 db_keys = list(portdb._aux_cache_keys)
5476 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5477 pkg = Package(type_name="ebuild", root_config=root_config,
5478 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5479 pkgsettings.setcpv(pkg)
5480 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5481 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5482 self._pkg_cache[pkg] = pkg
5483 args.append(PackageArg(arg=x, package=pkg,
5484 root_config=root_config))
5485 elif x.startswith(os.path.sep):
5486 if not x.startswith(myroot):
5487 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5488 " $ROOT.\n") % x, noiselevel=-1)
5490 # Queue these up since it's most efficient to handle
5491 # multiple files in a single iter_owners() call.
5492 lookup_owners.append(x)
5494 if x in ("system", "world"):
5496 if x.startswith(SETPREFIX):
5497 s = x[len(SETPREFIX):]
5499 raise portage.exception.PackageSetNotFound(s)
5502 # Recursively expand sets so that containment tests in
5503 # self._get_parent_sets() properly match atoms in nested
5504 # sets (like if world contains system).
5505 expanded_set = InternalPackageSet(
5506 initial_atoms=getSetAtoms(s))
5507 self._sets[s] = expanded_set
5508 args.append(SetArg(arg=x, set=expanded_set,
5509 root_config=root_config))
5511 if not is_valid_package_atom(x):
5512 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5514 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5515 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5517 # Don't expand categories or old-style virtuals here unless
5518 # necessary. Expansion of old-style virtuals here causes at
5519 # least the following problems:
5520 # 1) It's more difficult to determine which set(s) an atom
5521 # came from, if any.
5522 # 2) It takes away freedom from the resolver to choose other
5523 # possible expansions when necessary.
5525 args.append(AtomArg(arg=x, atom=x,
5526 root_config=root_config))
5528 expanded_atoms = self._dep_expand(root_config, x)
5529 installed_cp_set = set()
5530 for atom in expanded_atoms:
5531 atom_cp = portage.dep_getkey(atom)
5532 if vardb.cp_list(atom_cp):
5533 installed_cp_set.add(atom_cp)
5534 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5535 installed_cp = iter(installed_cp_set).next()
5536 expanded_atoms = [atom for atom in expanded_atoms \
5537 if portage.dep_getkey(atom) == installed_cp]
5539 if len(expanded_atoms) > 1:
5542 ambiguous_package_name(x, expanded_atoms, root_config,
5543 self.spinner, self.myopts)
5544 return False, myfavorites
5546 atom = expanded_atoms[0]
5548 null_atom = insert_category_into_atom(x, "null")
5549 null_cp = portage.dep_getkey(null_atom)
5550 cat, atom_pn = portage.catsplit(null_cp)
5551 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5553 # Allow the depgraph to choose which virtual.
5554 atom = insert_category_into_atom(x, "virtual")
5556 atom = insert_category_into_atom(x, "null")
5558 args.append(AtomArg(arg=x, atom=atom,
5559 root_config=root_config))
5563 search_for_multiple = False
5564 if len(lookup_owners) > 1:
5565 search_for_multiple = True
5567 for x in lookup_owners:
5568 if not search_for_multiple and os.path.isdir(x):
5569 search_for_multiple = True
5570 relative_paths.append(x[len(myroot):])
5573 for pkg, relative_path in \
5574 real_vardb._owners.iter_owners(relative_paths):
5575 owners.add(pkg.mycpv)
5576 if not search_for_multiple:
5580 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5581 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5585 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5587 # portage now masks packages with missing slot, but it's
5588 # possible that one was installed by an older version
5589 atom = portage.cpv_getkey(cpv)
5591 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5592 args.append(AtomArg(arg=atom, atom=atom,
5593 root_config=root_config))
5595 if "--update" in self.myopts:
5596 # In some cases, the greedy slots behavior can pull in a slot that
5597 # the user would want to uninstall due to it being blocked by a
5598 # newer version in a different slot. Therefore, it's necessary to
5599 # detect and discard any that should be uninstalled. Each time
5600 # that arguments are updated, package selections are repeated in
5601 # order to ensure consistency with the current arguments:
5603 # 1) Initialize args
5604 # 2) Select packages and generate initial greedy atoms
5605 # 3) Update args with greedy atoms
5606 # 4) Select packages and generate greedy atoms again, while
5607 # accounting for any blockers between selected packages
5608 # 5) Update args with revised greedy atoms
5610 self._set_args(args)
5613 greedy_args.append(arg)
5614 if not isinstance(arg, AtomArg):
5616 for atom in self._greedy_slots(arg.root_config, arg.atom):
5618 AtomArg(arg=arg.arg, atom=atom,
5619 root_config=arg.root_config))
5621 self._set_args(greedy_args)
5624 # Revise greedy atoms, accounting for any blockers
5625 # between selected packages.
5626 revised_greedy_args = []
5628 revised_greedy_args.append(arg)
5629 if not isinstance(arg, AtomArg):
5631 for atom in self._greedy_slots(arg.root_config, arg.atom,
5632 blocker_lookahead=True):
5633 revised_greedy_args.append(
5634 AtomArg(arg=arg.arg, atom=atom,
5635 root_config=arg.root_config))
5636 args = revised_greedy_args
5637 del revised_greedy_args
5639 self._set_args(args)
5641 myfavorites = set(myfavorites)
5643 if isinstance(arg, (AtomArg, PackageArg)):
5644 myfavorites.add(arg.atom)
5645 elif isinstance(arg, SetArg):
5646 myfavorites.add(arg.arg)
5647 myfavorites = list(myfavorites)
5649 pprovideddict = pkgsettings.pprovideddict
5651 portage.writemsg("\n", noiselevel=-1)
5652 # Order needs to be preserved since a feature of --nodeps
5653 # is to allow the user to force a specific merge order.
5657 for atom in arg.set:
5658 self.spinner.update()
5659 dep = Dependency(atom=atom, onlydeps=onlydeps,
5660 root=myroot, parent=arg)
5661 atom_cp = portage.dep_getkey(atom)
5663 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5664 if pprovided and portage.match_from_list(atom, pprovided):
5665 # A provided package has been specified on the command line.
5666 self._pprovided_args.append((arg, atom))
5668 if isinstance(arg, PackageArg):
5669 if not self._add_pkg(arg.package, dep) or \
5670 not self._create_graph():
5671 sys.stderr.write(("\n\n!!! Problem resolving " + \
5672 "dependencies for %s\n") % arg.arg)
5673 return 0, myfavorites
5676 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5677 (arg, atom), noiselevel=-1)
5678 pkg, existing_node = self._select_package(
5679 myroot, atom, onlydeps=onlydeps)
5681 if not (isinstance(arg, SetArg) and \
5682 arg.name in ("system", "world")):
5683 self._unsatisfied_deps_for_display.append(
5684 ((myroot, atom), {}))
5685 return 0, myfavorites
5686 self._missing_args.append((arg, atom))
5688 if atom_cp != pkg.cp:
5689 # For old-style virtuals, we need to repeat the
5690 # package.provided check against the selected package.
5691 expanded_atom = atom.replace(atom_cp, pkg.cp)
5692 pprovided = pprovideddict.get(pkg.cp)
5694 portage.match_from_list(expanded_atom, pprovided):
5695 # A provided package has been
5696 # specified on the command line.
5697 self._pprovided_args.append((arg, atom))
5699 if pkg.installed and "selective" not in self.myparams:
5700 self._unsatisfied_deps_for_display.append(
5701 ((myroot, atom), {}))
5702 # Previous behavior was to bail out in this case, but
5703 # since the dep is satisfied by the installed package,
5704 # it's more friendly to continue building the graph
5705 # and just show a warning message. Therefore, only bail
5706 # out here if the atom is not from either the system or
5708 if not (isinstance(arg, SetArg) and \
5709 arg.name in ("system", "world")):
5710 return 0, myfavorites
5712 # Add the selected package to the graph as soon as possible
5713 # so that later dep_check() calls can use it as feedback
5714 # for making more consistent atom selections.
5715 if not self._add_pkg(pkg, dep):
5716 if isinstance(arg, SetArg):
5717 sys.stderr.write(("\n\n!!! Problem resolving " + \
5718 "dependencies for %s from %s\n") % \
5721 sys.stderr.write(("\n\n!!! Problem resolving " + \
5722 "dependencies for %s\n") % atom)
5723 return 0, myfavorites
5725 except portage.exception.MissingSignature, e:
5726 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5727 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5728 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5729 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5730 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5731 return 0, myfavorites
5732 except portage.exception.InvalidSignature, e:
5733 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5734 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5735 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5736 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5737 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5738 return 0, myfavorites
5739 except SystemExit, e:
5740 raise # Needed else can't exit
5741 except Exception, e:
5742 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5743 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5746 # Now that the root packages have been added to the graph,
5747 # process the dependencies.
5748 if not self._create_graph():
5749 return 0, myfavorites
5752 if "--usepkgonly" in self.myopts:
5753 for xs in self.digraph.all_nodes():
5754 if not isinstance(xs, Package):
5756 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5760 print "Missing binary for:",xs[2]
5764 except self._unknown_internal_error:
5765 return False, myfavorites
5767 # We're true here unless we are missing binaries.
5768 return (not missing,myfavorites)
5770 def _set_args(self, args):
5772 Create the "args" package set from atoms and packages given as
5773 arguments. This method can be called multiple times if necessary.
5774 The package selection cache is automatically invalidated, since
5775 arguments influence package selections.
5777 args_set = self._sets["args"]
5780 if not isinstance(arg, (AtomArg, PackageArg)):
5783 if atom in args_set:
5787 self._set_atoms.clear()
5788 self._set_atoms.update(chain(*self._sets.itervalues()))
5789 atom_arg_map = self._atom_arg_map
5790 atom_arg_map.clear()
5792 for atom in arg.set:
5793 atom_key = (atom, arg.root_config.root)
5794 refs = atom_arg_map.get(atom_key)
5797 atom_arg_map[atom_key] = refs
5801 # Invalidate the package selection cache, since
5802 # arguments influence package selections.
5803 self._highest_pkg_cache.clear()
5804 for trees in self._filtered_trees.itervalues():
5805 trees["porttree"].dbapi._clear_cache()
5807 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5809 Return a list of slot atoms corresponding to installed slots that
5810 differ from the slot of the highest visible match. When
5811 blocker_lookahead is True, slot atoms that would trigger a blocker
5812 conflict are automatically discarded, potentially allowing automatic
5813 uninstallation of older slots when appropriate.
5815 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5816 if highest_pkg is None:
5818 vardb = root_config.trees["vartree"].dbapi
5820 for cpv in vardb.match(atom):
5821 # don't mix new virtuals with old virtuals
5822 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5823 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5825 slots.add(highest_pkg.metadata["SLOT"])
5829 slots.remove(highest_pkg.metadata["SLOT"])
5832 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5833 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5834 if pkg is not None and \
5835 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5836 greedy_pkgs.append(pkg)
5839 if not blocker_lookahead:
5840 return [pkg.slot_atom for pkg in greedy_pkgs]
5843 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5844 for pkg in greedy_pkgs + [highest_pkg]:
5845 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5847 atoms = self._select_atoms(
5848 pkg.root, dep_str, pkg.use.enabled,
5849 parent=pkg, strict=True)
5850 except portage.exception.InvalidDependString:
5852 blocker_atoms = (x for x in atoms if x.blocker)
5853 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5855 if highest_pkg not in blockers:
5858 # filter packages with invalid deps
5859 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5861 # filter packages that conflict with highest_pkg
5862 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5863 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5864 blockers[pkg].findAtomForPackage(highest_pkg))]
5869 # If two packages conflict, discard the lower version.
5870 discard_pkgs = set()
5871 greedy_pkgs.sort(reverse=True)
5872 for i in xrange(len(greedy_pkgs) - 1):
5873 pkg1 = greedy_pkgs[i]
5874 if pkg1 in discard_pkgs:
5876 for j in xrange(i + 1, len(greedy_pkgs)):
5877 pkg2 = greedy_pkgs[j]
5878 if pkg2 in discard_pkgs:
5880 if blockers[pkg1].findAtomForPackage(pkg2) or \
5881 blockers[pkg2].findAtomForPackage(pkg1):
5883 discard_pkgs.add(pkg2)
5885 return [pkg.slot_atom for pkg in greedy_pkgs \
5886 if pkg not in discard_pkgs]
5888 def _select_atoms_from_graph(self, *pargs, **kwargs):
5890 Prefer atoms matching packages that have already been
5891 added to the graph or those that are installed and have
5892 not been scheduled for replacement.
5894 kwargs["trees"] = self._graph_trees
5895 return self._select_atoms_highest_available(*pargs, **kwargs)
5897 def _select_atoms_highest_available(self, root, depstring,
5898 myuse=None, parent=None, strict=True, trees=None, priority=None):
5899 """This will raise InvalidDependString if necessary. If trees is
5900 None then self._filtered_trees is used."""
5901 pkgsettings = self.pkgsettings[root]
5903 trees = self._filtered_trees
5904 if not getattr(priority, "buildtime", False):
5905 # The parent should only be passed to dep_check() for buildtime
5906 # dependencies since that's the only case when it's appropriate
5907 # to trigger the circular dependency avoidance code which uses it.
5908 # It's important not to trigger the same circular dependency
5909 # avoidance code for runtime dependencies since it's not needed
5910 # and it can promote an incorrect package choice.
5914 if parent is not None:
5915 trees[root]["parent"] = parent
5917 portage.dep._dep_check_strict = False
5918 mycheck = portage.dep_check(depstring, None,
5919 pkgsettings, myuse=myuse,
5920 myroot=root, trees=trees)
5922 if parent is not None:
5923 trees[root].pop("parent")
5924 portage.dep._dep_check_strict = True
5926 raise portage.exception.InvalidDependString(mycheck[1])
5927 selected_atoms = mycheck[1]
5928 return selected_atoms
5930 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5931 atom = portage.dep.Atom(atom)
5932 atom_set = InternalPackageSet(initial_atoms=(atom,))
5933 atom_without_use = atom
5935 atom_without_use = portage.dep.remove_slot(atom)
5937 atom_without_use += ":" + atom.slot
5938 atom_without_use = portage.dep.Atom(atom_without_use)
5939 xinfo = '"%s"' % atom
5942 # Discard null/ from failed cpv_expand category expansion.
5943 xinfo = xinfo.replace("null/", "")
5944 masked_packages = []
5946 masked_pkg_instances = set()
5947 missing_licenses = []
5948 have_eapi_mask = False
5949 pkgsettings = self.pkgsettings[root]
5950 implicit_iuse = pkgsettings._get_implicit_iuse()
5951 root_config = self.roots[root]
5952 portdb = self.roots[root].trees["porttree"].dbapi
5953 dbs = self._filtered_trees[root]["dbs"]
5954 for db, pkg_type, built, installed, db_keys in dbs:
5958 if hasattr(db, "xmatch"):
5959 cpv_list = db.xmatch("match-all", atom_without_use)
5961 cpv_list = db.match(atom_without_use)
5964 for cpv in cpv_list:
5965 metadata, mreasons = get_mask_info(root_config, cpv,
5966 pkgsettings, db, pkg_type, built, installed, db_keys)
5967 if metadata is not None:
5968 pkg = Package(built=built, cpv=cpv,
5969 installed=installed, metadata=metadata,
5970 root_config=root_config)
5971 if pkg.cp != atom.cp:
5972 # A cpv can be returned from dbapi.match() as an
5973 # old-style virtual match even in cases when the
5974 # package does not actually PROVIDE the virtual.
5975 # Filter out any such false matches here.
5976 if not atom_set.findAtomForPackage(pkg):
5979 masked_pkg_instances.add(pkg)
5981 missing_use.append(pkg)
5984 masked_packages.append(
5985 (root_config, pkgsettings, cpv, metadata, mreasons))
5987 missing_use_reasons = []
5988 missing_iuse_reasons = []
5989 for pkg in missing_use:
5990 use = pkg.use.enabled
5991 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5992 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5994 for x in atom.use.required:
5995 if iuse_re.match(x) is None:
5996 missing_iuse.append(x)
5999 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6000 missing_iuse_reasons.append((pkg, mreasons))
6002 need_enable = sorted(atom.use.enabled.difference(use))
6003 need_disable = sorted(atom.use.disabled.intersection(use))
6004 if need_enable or need_disable:
6006 changes.extend(colorize("red", "+" + x) \
6007 for x in need_enable)
6008 changes.extend(colorize("blue", "-" + x) \
6009 for x in need_disable)
6010 mreasons.append("Change USE: %s" % " ".join(changes))
6011 missing_use_reasons.append((pkg, mreasons))
6013 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6014 in missing_use_reasons if pkg not in masked_pkg_instances]
6016 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6017 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6019 show_missing_use = False
6020 if unmasked_use_reasons:
6021 # Only show the latest version.
6022 show_missing_use = unmasked_use_reasons[:1]
6023 elif unmasked_iuse_reasons:
6024 if missing_use_reasons:
6025 # All packages with required IUSE are masked,
6026 # so display a normal masking message.
6029 show_missing_use = unmasked_iuse_reasons
6031 if show_missing_use:
6032 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6033 print "!!! One of the following packages is required to complete your request:"
6034 for pkg, mreasons in show_missing_use:
6035 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6037 elif masked_packages:
6039 colorize("BAD", "All ebuilds that could satisfy ") + \
6040 colorize("INFORM", xinfo) + \
6041 colorize("BAD", " have been masked.")
6042 print "!!! One of the following masked packages is required to complete your request:"
6043 have_eapi_mask = show_masked_packages(masked_packages)
6046 msg = ("The current version of portage supports " + \
6047 "EAPI '%s'. You must upgrade to a newer version" + \
6048 " of portage before EAPI masked packages can" + \
6049 " be installed.") % portage.const.EAPI
6050 from textwrap import wrap
6051 for line in wrap(msg, 75):
6056 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6058 # Show parent nodes and the argument that pulled them in.
6059 traversed_nodes = set()
6062 while node is not None:
6063 traversed_nodes.add(node)
6064 msg.append('(dependency required by "%s" [%s])' % \
6065 (colorize('INFORM', str(node.cpv)), node.type_name))
6066 # When traversing to parents, prefer arguments over packages
6067 # since arguments are root nodes. Never traverse the same
6068 # package twice, in order to prevent an infinite loop.
6069 selected_parent = None
6070 for parent in self.digraph.parent_nodes(node):
6071 if isinstance(parent, DependencyArg):
6072 msg.append('(dependency required by "%s" [argument])' % \
6073 (colorize('INFORM', str(parent))))
6074 selected_parent = None
6076 if parent not in traversed_nodes:
6077 selected_parent = parent
6078 node = selected_parent
6084 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6085 cache_key = (root, atom, onlydeps)
6086 ret = self._highest_pkg_cache.get(cache_key)
6089 if pkg and not existing:
6090 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6091 if existing and existing == pkg:
6092 # Update the cache to reflect that the
6093 # package has been added to the graph.
6095 self._highest_pkg_cache[cache_key] = ret
6097 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6098 self._highest_pkg_cache[cache_key] = ret
6101 settings = pkg.root_config.settings
6102 if visible(settings, pkg) and not (pkg.installed and \
6103 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6104 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6107 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6108 root_config = self.roots[root]
6109 pkgsettings = self.pkgsettings[root]
6110 dbs = self._filtered_trees[root]["dbs"]
6111 vardb = self.roots[root].trees["vartree"].dbapi
6112 portdb = self.roots[root].trees["porttree"].dbapi
6113 # List of acceptable packages, ordered by type preference.
6114 matched_packages = []
6115 highest_version = None
6116 if not isinstance(atom, portage.dep.Atom):
6117 atom = portage.dep.Atom(atom)
6119 atom_set = InternalPackageSet(initial_atoms=(atom,))
6120 existing_node = None
6122 usepkgonly = "--usepkgonly" in self.myopts
6123 empty = "empty" in self.myparams
6124 selective = "selective" in self.myparams
6126 noreplace = "--noreplace" in self.myopts
6127 # Behavior of the "selective" parameter depends on
6128 # whether or not a package matches an argument atom.
6129 # If an installed package provides an old-style
6130 # virtual that is no longer provided by an available
6131 # package, the installed package may match an argument
6132 # atom even though none of the available packages do.
6133 # Therefore, "selective" logic does not consider
6134 # whether or not an installed package matches an
6135 # argument atom. It only considers whether or not
6136 # available packages match argument atoms, which is
6137 # represented by the found_available_arg flag.
6138 found_available_arg = False
6139 for find_existing_node in True, False:
6142 for db, pkg_type, built, installed, db_keys in dbs:
6145 if installed and not find_existing_node:
6146 want_reinstall = reinstall or empty or \
6147 (found_available_arg and not selective)
6148 if want_reinstall and matched_packages:
6150 if hasattr(db, "xmatch"):
6151 cpv_list = db.xmatch("match-all", atom)
6153 cpv_list = db.match(atom)
6155 # USE=multislot can make an installed package appear as if
6156 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6157 # won't do any good as long as USE=multislot is enabled since
6158 # the newly built package still won't have the expected slot.
6159 # Therefore, assume that such SLOT dependencies are already
6160 # satisfied rather than forcing a rebuild.
6161 if installed and not cpv_list and atom.slot:
6162 for cpv in db.match(atom.cp):
6163 slot_available = False
6164 for other_db, other_type, other_built, \
6165 other_installed, other_keys in dbs:
6168 other_db.aux_get(cpv, ["SLOT"])[0]:
6169 slot_available = True
6173 if not slot_available:
6175 inst_pkg = self._pkg(cpv, "installed",
6176 root_config, installed=installed)
6177 # Remove the slot from the atom and verify that
6178 # the package matches the resulting atom.
6179 atom_without_slot = portage.dep.remove_slot(atom)
6181 atom_without_slot += str(atom.use)
6182 atom_without_slot = portage.dep.Atom(atom_without_slot)
6183 if portage.match_from_list(
6184 atom_without_slot, [inst_pkg]):
6185 cpv_list = [inst_pkg.cpv]
6190 pkg_status = "merge"
6191 if installed or onlydeps:
6192 pkg_status = "nomerge"
6195 for cpv in cpv_list:
6196 # Make --noreplace take precedence over --newuse.
6197 if not installed and noreplace and \
6198 cpv in vardb.match(atom):
6199 # If the installed version is masked, it may
6200 # be necessary to look at lower versions,
6201 # in case there is a visible downgrade.
6203 reinstall_for_flags = None
6204 cache_key = (pkg_type, root, cpv, pkg_status)
6205 calculated_use = True
6206 pkg = self._pkg_cache.get(cache_key)
6208 calculated_use = False
6210 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6213 pkg = Package(built=built, cpv=cpv,
6214 installed=installed, metadata=metadata,
6215 onlydeps=onlydeps, root_config=root_config,
6217 metadata = pkg.metadata
6219 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6220 if not built and ("?" in metadata["LICENSE"] or \
6221 "?" in metadata["PROVIDE"]):
6222 # This is avoided whenever possible because
6223 # it's expensive. It only needs to be done here
6224 # if it has an effect on visibility.
6225 pkgsettings.setcpv(pkg)
6226 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6227 calculated_use = True
6228 self._pkg_cache[pkg] = pkg
6230 if not installed or (built and matched_packages):
6231 # Only enforce visibility on installed packages
6232 # if there is at least one other visible package
6233 # available. By filtering installed masked packages
6234 # here, packages that have been masked since they
6235 # were installed can be automatically downgraded
6236 # to an unmasked version.
6238 if not visible(pkgsettings, pkg):
6240 except portage.exception.InvalidDependString:
6244 # Enable upgrade or downgrade to a version
6245 # with visible KEYWORDS when the installed
6246 # version is masked by KEYWORDS, but never
6247 # reinstall the same exact version only due
6248 # to a KEYWORDS mask.
6249 if built and matched_packages:
6251 different_version = None
6252 for avail_pkg in matched_packages:
6253 if not portage.dep.cpvequal(
6254 pkg.cpv, avail_pkg.cpv):
6255 different_version = avail_pkg
6257 if different_version is not None:
6260 pkgsettings._getMissingKeywords(
6261 pkg.cpv, pkg.metadata):
6264 # If the ebuild no longer exists or it's
6265 # keywords have been dropped, reject built
6266 # instances (installed or binary).
6267 # If --usepkgonly is enabled, assume that
6268 # the ebuild status should be ignored.
6272 pkg.cpv, "ebuild", root_config)
6273 except portage.exception.PackageNotFound:
6276 if not visible(pkgsettings, pkg_eb):
6279 if not pkg.built and not calculated_use:
6280 # This is avoided whenever possible because
6282 pkgsettings.setcpv(pkg)
6283 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6285 if pkg.cp != atom.cp:
6286 # A cpv can be returned from dbapi.match() as an
6287 # old-style virtual match even in cases when the
6288 # package does not actually PROVIDE the virtual.
6289 # Filter out any such false matches here.
6290 if not atom_set.findAtomForPackage(pkg):
6294 if root == self.target_root:
6296 # Ebuild USE must have been calculated prior
6297 # to this point, in case atoms have USE deps.
6298 myarg = self._iter_atoms_for_pkg(pkg).next()
6299 except StopIteration:
6301 except portage.exception.InvalidDependString:
6303 # masked by corruption
6305 if not installed and myarg:
6306 found_available_arg = True
6308 if atom.use and not pkg.built:
6309 use = pkg.use.enabled
6310 if atom.use.enabled.difference(use):
6312 if atom.use.disabled.intersection(use):
6314 if pkg.cp == atom_cp:
6315 if highest_version is None:
6316 highest_version = pkg
6317 elif pkg > highest_version:
6318 highest_version = pkg
6319 # At this point, we've found the highest visible
6320 # match from the current repo. Any lower versions
6321 # from this repo are ignored, so this so the loop
6322 # will always end with a break statement below
6324 if find_existing_node:
6325 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6328 if portage.dep.match_from_list(atom, [e_pkg]):
6329 if highest_version and \
6330 e_pkg.cp == atom_cp and \
6331 e_pkg < highest_version and \
6332 e_pkg.slot_atom != highest_version.slot_atom:
6333 # There is a higher version available in a
6334 # different slot, so this existing node is
6338 matched_packages.append(e_pkg)
6339 existing_node = e_pkg
6341 # Compare built package to current config and
6342 # reject the built package if necessary.
6343 if built and not installed and \
6344 ("--newuse" in self.myopts or \
6345 "--reinstall" in self.myopts):
6346 iuses = pkg.iuse.all
6347 old_use = pkg.use.enabled
6349 pkgsettings.setcpv(myeb)
6351 pkgsettings.setcpv(pkg)
6352 now_use = pkgsettings["PORTAGE_USE"].split()
6353 forced_flags = set()
6354 forced_flags.update(pkgsettings.useforce)
6355 forced_flags.update(pkgsettings.usemask)
6357 if myeb and not usepkgonly:
6358 cur_iuse = myeb.iuse.all
6359 if self._reinstall_for_flags(forced_flags,
6363 # Compare current config to installed package
6364 # and do not reinstall if possible.
6365 if not installed and \
6366 ("--newuse" in self.myopts or \
6367 "--reinstall" in self.myopts) and \
6368 cpv in vardb.match(atom):
6369 pkgsettings.setcpv(pkg)
6370 forced_flags = set()
6371 forced_flags.update(pkgsettings.useforce)
6372 forced_flags.update(pkgsettings.usemask)
6373 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6374 old_iuse = set(filter_iuse_defaults(
6375 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6376 cur_use = pkgsettings["PORTAGE_USE"].split()
6377 cur_iuse = pkg.iuse.all
6378 reinstall_for_flags = \
6379 self._reinstall_for_flags(
6380 forced_flags, old_use, old_iuse,
6382 if reinstall_for_flags:
6386 matched_packages.append(pkg)
6387 if reinstall_for_flags:
6388 self._reinstall_nodes[pkg] = \
6392 if not matched_packages:
6395 if "--debug" in self.myopts:
6396 for pkg in matched_packages:
6397 portage.writemsg("%s %s\n" % \
6398 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6400 # Filter out any old-style virtual matches if they are
6401 # mixed with new-style virtual matches.
6402 cp = portage.dep_getkey(atom)
6403 if len(matched_packages) > 1 and \
6404 "virtual" == portage.catsplit(cp)[0]:
6405 for pkg in matched_packages:
6408 # Got a new-style virtual, so filter
6409 # out any old-style virtuals.
6410 matched_packages = [pkg for pkg in matched_packages \
6414 if len(matched_packages) > 1:
6415 bestmatch = portage.best(
6416 [pkg.cpv for pkg in matched_packages])
6417 matched_packages = [pkg for pkg in matched_packages \
6418 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6420 # ordered by type preference ("ebuild" type is the last resort)
6421 return matched_packages[-1], existing_node
6423 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6425 Select packages that have already been added to the graph or
6426 those that are installed and have not been scheduled for
6429 graph_db = self._graph_trees[root]["porttree"].dbapi
6430 matches = graph_db.match_pkgs(atom)
6433 pkg = matches[-1] # highest match
6434 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6435 return pkg, in_graph
6437 def _complete_graph(self):
6439 Add any deep dependencies of required sets (args, system, world) that
6440 have not been pulled into the graph yet. This ensures that the graph
6441 is consistent such that initially satisfied deep dependencies are not
6442 broken in the new graph. Initially unsatisfied dependencies are
6443 irrelevant since we only want to avoid breaking dependencies that are
6446 Since this method can consume enough time to disturb users, it is
6447 currently only enabled by the --complete-graph option.
6449 if "--buildpkgonly" in self.myopts or \
6450 "recurse" not in self.myparams:
6453 if "complete" not in self.myparams:
6454 # Skip this to avoid consuming enough time to disturb users.
6457 # Put the depgraph into a mode that causes it to only
6458 # select packages that have already been added to the
6459 # graph or those that are installed and have not been
6460 # scheduled for replacement. Also, toggle the "deep"
6461 # parameter so that all dependencies are traversed and
6463 self._select_atoms = self._select_atoms_from_graph
6464 self._select_package = self._select_pkg_from_graph
6465 already_deep = "deep" in self.myparams
6466 if not already_deep:
6467 self.myparams.add("deep")
6469 for root in self.roots:
6470 required_set_names = self._required_set_names.copy()
6471 if root == self.target_root and \
6472 (already_deep or "empty" in self.myparams):
6473 required_set_names.difference_update(self._sets)
6474 if not required_set_names and not self._ignored_deps:
6476 root_config = self.roots[root]
6477 setconfig = root_config.setconfig
6479 # Reuse existing SetArg instances when available.
6480 for arg in self.digraph.root_nodes():
6481 if not isinstance(arg, SetArg):
6483 if arg.root_config != root_config:
6485 if arg.name in required_set_names:
6487 required_set_names.remove(arg.name)
6488 # Create new SetArg instances only when necessary.
6489 for s in required_set_names:
6490 expanded_set = InternalPackageSet(
6491 initial_atoms=setconfig.getSetAtoms(s))
6492 atom = SETPREFIX + s
6493 args.append(SetArg(arg=atom, set=expanded_set,
6494 root_config=root_config))
6495 vardb = root_config.trees["vartree"].dbapi
6497 for atom in arg.set:
6498 self._dep_stack.append(
6499 Dependency(atom=atom, root=root, parent=arg))
6500 if self._ignored_deps:
6501 self._dep_stack.extend(self._ignored_deps)
6502 self._ignored_deps = []
6503 if not self._create_graph(allow_unsatisfied=True):
6505 # Check the unsatisfied deps to see if any initially satisfied deps
6506 # will become unsatisfied due to an upgrade. Initially unsatisfied
6507 # deps are irrelevant since we only want to avoid breaking deps
6508 # that are initially satisfied.
6509 while self._unsatisfied_deps:
6510 dep = self._unsatisfied_deps.pop()
6511 matches = vardb.match_pkgs(dep.atom)
6513 self._initially_unsatisfied_deps.append(dep)
6515 # An scheduled installation broke a deep dependency.
6516 # Add the installed package to the graph so that it
6517 # will be appropriately reported as a slot collision
6518 # (possibly solvable via backtracking).
6519 pkg = matches[-1] # highest match
6520 if not self._add_pkg(pkg, dep):
6522 if not self._create_graph(allow_unsatisfied=True):
6526 def _pkg(self, cpv, type_name, root_config, installed=False):
6528 Get a package instance from the cache, or create a new
6529 one if necessary. Raises KeyError from aux_get if it
6530 failures for some reason (package does not exist or is
6535 operation = "nomerge"
6536 pkg = self._pkg_cache.get(
6537 (type_name, root_config.root, cpv, operation))
6539 tree_type = self.pkg_tree_map[type_name]
6540 db = root_config.trees[tree_type].dbapi
6541 db_keys = list(self._trees_orig[root_config.root][
6542 tree_type].dbapi._aux_cache_keys)
6544 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6546 raise portage.exception.PackageNotFound(cpv)
6547 pkg = Package(cpv=cpv, metadata=metadata,
6548 root_config=root_config, installed=installed)
6549 if type_name == "ebuild":
6550 settings = self.pkgsettings[root_config.root]
6551 settings.setcpv(pkg)
6552 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6553 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6554 self._pkg_cache[pkg] = pkg
6557 def validate_blockers(self):
6558 """Remove any blockers from the digraph that do not match any of the
6559 packages within the graph. If necessary, create hard deps to ensure
6560 correct merge order such that mutually blocking packages are never
6561 installed simultaneously."""
6563 if "--buildpkgonly" in self.myopts or \
6564 "--nodeps" in self.myopts:
6567 #if "deep" in self.myparams:
6569 # Pull in blockers from all installed packages that haven't already
6570 # been pulled into the depgraph. This is not enabled by default
6571 # due to the performance penalty that is incurred by all the
6572 # additional dep_check calls that are required.
6574 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6575 for myroot in self.trees:
6576 vardb = self.trees[myroot]["vartree"].dbapi
6577 portdb = self.trees[myroot]["porttree"].dbapi
6578 pkgsettings = self.pkgsettings[myroot]
6579 final_db = self.mydbapi[myroot]
6581 blocker_cache = BlockerCache(myroot, vardb)
6582 stale_cache = set(blocker_cache)
6585 stale_cache.discard(cpv)
6586 pkg_in_graph = self.digraph.contains(pkg)
6588 # Check for masked installed packages. Only warn about
6589 # packages that are in the graph in order to avoid warning
6590 # about those that will be automatically uninstalled during
6591 # the merge process or by --depclean.
6593 if pkg_in_graph and not visible(pkgsettings, pkg):
6594 self._masked_installed.add(pkg)
6596 blocker_atoms = None
6602 self._blocker_parents.child_nodes(pkg))
6607 self._irrelevant_blockers.child_nodes(pkg))
6610 if blockers is not None:
6611 blockers = set(str(blocker.atom) \
6612 for blocker in blockers)
6614 # If this node has any blockers, create a "nomerge"
6615 # node for it so that they can be enforced.
6616 self.spinner.update()
6617 blocker_data = blocker_cache.get(cpv)
6618 if blocker_data is not None and \
6619 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6622 # If blocker data from the graph is available, use
6623 # it to validate the cache and update the cache if
6625 if blocker_data is not None and \
6626 blockers is not None:
6627 if not blockers.symmetric_difference(
6628 blocker_data.atoms):
6632 if blocker_data is None and \
6633 blockers is not None:
6634 # Re-use the blockers from the graph.
6635 blocker_atoms = sorted(blockers)
6636 counter = long(pkg.metadata["COUNTER"])
6638 blocker_cache.BlockerData(counter, blocker_atoms)
6639 blocker_cache[pkg.cpv] = blocker_data
6643 blocker_atoms = blocker_data.atoms
6645 # Use aux_get() to trigger FakeVartree global
6646 # updates on *DEPEND when appropriate.
6647 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6648 # It is crucial to pass in final_db here in order to
6649 # optimize dep_check calls by eliminating atoms via
6650 # dep_wordreduce and dep_eval calls.
6652 portage.dep._dep_check_strict = False
6654 success, atoms = portage.dep_check(depstr,
6655 final_db, pkgsettings, myuse=pkg.use.enabled,
6656 trees=self._graph_trees, myroot=myroot)
6657 except Exception, e:
6658 if isinstance(e, SystemExit):
6660 # This is helpful, for example, if a ValueError
6661 # is thrown from cpv_expand due to multiple
6662 # matches (this can happen if an atom lacks a
6664 show_invalid_depstring_notice(
6665 pkg, depstr, str(e))
6669 portage.dep._dep_check_strict = True
6671 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6672 if replacement_pkg and \
6673 replacement_pkg[0].operation == "merge":
6674 # This package is being replaced anyway, so
6675 # ignore invalid dependencies so as not to
6676 # annoy the user too much (otherwise they'd be
6677 # forced to manually unmerge it first).
6679 show_invalid_depstring_notice(pkg, depstr, atoms)
6681 blocker_atoms = [myatom for myatom in atoms \
6682 if myatom.startswith("!")]
6683 blocker_atoms.sort()
6684 counter = long(pkg.metadata["COUNTER"])
6685 blocker_cache[cpv] = \
6686 blocker_cache.BlockerData(counter, blocker_atoms)
6689 for atom in blocker_atoms:
6690 blocker = Blocker(atom=portage.dep.Atom(atom),
6691 eapi=pkg.metadata["EAPI"], root=myroot)
6692 self._blocker_parents.add(blocker, pkg)
6693 except portage.exception.InvalidAtom, e:
6694 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6695 show_invalid_depstring_notice(
6696 pkg, depstr, "Invalid Atom: %s" % (e,))
6698 for cpv in stale_cache:
6699 del blocker_cache[cpv]
6700 blocker_cache.flush()
6703 # Discard any "uninstall" tasks scheduled by previous calls
6704 # to this method, since those tasks may not make sense given
6705 # the current graph state.
6706 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6707 if previous_uninstall_tasks:
6708 self._blocker_uninstalls = digraph()
6709 self.digraph.difference_update(previous_uninstall_tasks)
6711 for blocker in self._blocker_parents.leaf_nodes():
6712 self.spinner.update()
6713 root_config = self.roots[blocker.root]
6714 virtuals = root_config.settings.getvirtuals()
6715 myroot = blocker.root
6716 initial_db = self.trees[myroot]["vartree"].dbapi
6717 final_db = self.mydbapi[myroot]
6719 provider_virtual = False
6720 if blocker.cp in virtuals and \
6721 not self._have_new_virt(blocker.root, blocker.cp):
6722 provider_virtual = True
6724 if provider_virtual:
6726 for provider_entry in virtuals[blocker.cp]:
6728 portage.dep_getkey(provider_entry)
6729 atoms.append(blocker.atom.replace(
6730 blocker.cp, provider_cp))
6732 atoms = [blocker.atom]
6734 blocked_initial = []
6736 blocked_initial.extend(initial_db.match_pkgs(atom))
6740 blocked_final.extend(final_db.match_pkgs(atom))
6742 if not blocked_initial and not blocked_final:
6743 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6744 self._blocker_parents.remove(blocker)
6745 # Discard any parents that don't have any more blockers.
6746 for pkg in parent_pkgs:
6747 self._irrelevant_blockers.add(blocker, pkg)
6748 if not self._blocker_parents.child_nodes(pkg):
6749 self._blocker_parents.remove(pkg)
6751 for parent in self._blocker_parents.parent_nodes(blocker):
6752 unresolved_blocks = False
6753 depends_on_order = set()
6754 for pkg in blocked_initial:
6755 if pkg.slot_atom == parent.slot_atom:
6756 # TODO: Support blocks within slots in cases where it
6757 # might make sense. For example, a new version might
6758 # require that the old version be uninstalled at build
6761 if parent.installed:
6762 # Two currently installed packages conflict with
6763 # eachother. Ignore this case since the damage
6764 # is already done and this would be likely to
6765 # confuse users if displayed like a normal blocker.
6768 self._blocked_pkgs.add(pkg, blocker)
6770 if parent.operation == "merge":
6771 # Maybe the blocked package can be replaced or simply
6772 # unmerged to resolve this block.
6773 depends_on_order.add((pkg, parent))
6775 # None of the above blocker resolutions techniques apply,
6776 # so apparently this one is unresolvable.
6777 unresolved_blocks = True
6778 for pkg in blocked_final:
6779 if pkg.slot_atom == parent.slot_atom:
6780 # TODO: Support blocks within slots.
6782 if parent.operation == "nomerge" and \
6783 pkg.operation == "nomerge":
6784 # This blocker will be handled the next time that a
6785 # merge of either package is triggered.
6788 self._blocked_pkgs.add(pkg, blocker)
6790 # Maybe the blocking package can be
6791 # unmerged to resolve this block.
6792 if parent.operation == "merge" and pkg.installed:
6793 depends_on_order.add((pkg, parent))
6795 elif parent.operation == "nomerge":
6796 depends_on_order.add((parent, pkg))
6798 # None of the above blocker resolutions techniques apply,
6799 # so apparently this one is unresolvable.
6800 unresolved_blocks = True
6802 # Make sure we don't unmerge any package that have been pulled
6804 if not unresolved_blocks and depends_on_order:
6805 for inst_pkg, inst_task in depends_on_order:
6806 if self.digraph.contains(inst_pkg) and \
6807 self.digraph.parent_nodes(inst_pkg):
6808 unresolved_blocks = True
6811 if not unresolved_blocks and depends_on_order:
6812 for inst_pkg, inst_task in depends_on_order:
6813 uninst_task = Package(built=inst_pkg.built,
6814 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6815 metadata=inst_pkg.metadata,
6816 operation="uninstall",
6817 root_config=inst_pkg.root_config,
6818 type_name=inst_pkg.type_name)
6819 self._pkg_cache[uninst_task] = uninst_task
6820 # Enforce correct merge order with a hard dep.
6821 self.digraph.addnode(uninst_task, inst_task,
6822 priority=BlockerDepPriority.instance)
6823 # Count references to this blocker so that it can be
6824 # invalidated after nodes referencing it have been
6826 self._blocker_uninstalls.addnode(uninst_task, blocker)
6827 if not unresolved_blocks and not depends_on_order:
6828 self._irrelevant_blockers.add(blocker, parent)
6829 self._blocker_parents.remove_edge(blocker, parent)
6830 if not self._blocker_parents.parent_nodes(blocker):
6831 self._blocker_parents.remove(blocker)
6832 if not self._blocker_parents.child_nodes(parent):
6833 self._blocker_parents.remove(parent)
6834 if unresolved_blocks:
6835 self._unsolvable_blockers.add(blocker, parent)
6839 def _accept_blocker_conflicts(self):
6841 for x in ("--buildpkgonly", "--fetchonly",
6842 "--fetch-all-uri", "--nodeps"):
6843 if x in self.myopts:
6848 def _merge_order_bias(self, mygraph):
6850 For optimal leaf node selection, promote deep system runtime deps and
6851 order nodes from highest to lowest overall reference count.
6855 for node in mygraph.order:
6856 node_info[node] = len(mygraph.parent_nodes(node))
6857 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6859 def cmp_merge_preference(node1, node2):
6861 if node1.operation == 'uninstall':
6862 if node2.operation == 'uninstall':
6866 if node2.operation == 'uninstall':
6867 if node1.operation == 'uninstall':
6871 node1_sys = node1 in deep_system_deps
6872 node2_sys = node2 in deep_system_deps
6873 if node1_sys != node2_sys:
6878 return node_info[node2] - node_info[node1]
6880 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6882 def altlist(self, reversed=False):
6884 while self._serialized_tasks_cache is None:
6885 self._resolve_conflicts()
6887 self._serialized_tasks_cache, self._scheduler_graph = \
6888 self._serialize_tasks()
6889 except self._serialize_tasks_retry:
6892 retlist = self._serialized_tasks_cache[:]
6897 def schedulerGraph(self):
6899 The scheduler graph is identical to the normal one except that
6900 uninstall edges are reversed in specific cases that require
6901 conflicting packages to be temporarily installed simultaneously.
6902 This is intended for use by the Scheduler in it's parallelization
6903 logic. It ensures that temporary simultaneous installation of
6904 conflicting packages is avoided when appropriate (especially for
6905 !!atom blockers), but allowed in specific cases that require it.
6907 Note that this method calls break_refs() which alters the state of
6908 internal Package instances such that this depgraph instance should
6909 not be used to perform any more calculations.
6911 if self._scheduler_graph is None:
6913 self.break_refs(self._scheduler_graph.order)
6914 return self._scheduler_graph
6916 def break_refs(self, nodes):
6918 Take a mergelist like that returned from self.altlist() and
6919 break any references that lead back to the depgraph. This is
6920 useful if you want to hold references to packages without
6921 also holding the depgraph on the heap.
6924 if hasattr(node, "root_config"):
6925 # The FakeVartree references the _package_cache which
6926 # references the depgraph. So that Package instances don't
6927 # hold the depgraph and FakeVartree on the heap, replace
6928 # the RootConfig that references the FakeVartree with the
6929 # original RootConfig instance which references the actual
6931 node.root_config = \
6932 self._trees_orig[node.root_config.root]["root_config"]
6934 def _resolve_conflicts(self):
6935 if not self._complete_graph():
6936 raise self._unknown_internal_error()
6938 if not self.validate_blockers():
6939 raise self._unknown_internal_error()
6941 if self._slot_collision_info:
6942 self._process_slot_conflicts()
6944 def _serialize_tasks(self):
6946 if "--debug" in self.myopts:
6947 writemsg("\ndigraph:\n\n", noiselevel=-1)
6948 self.digraph.debug_print()
6949 writemsg("\n", noiselevel=-1)
6951 scheduler_graph = self.digraph.copy()
6952 mygraph=self.digraph.copy()
6953 # Prune "nomerge" root nodes if nothing depends on them, since
6954 # otherwise they slow down merge order calculation. Don't remove
6955 # non-root nodes since they help optimize merge order in some cases
6956 # such as revdep-rebuild.
6957 removed_nodes = set()
6959 for node in mygraph.root_nodes():
6960 if not isinstance(node, Package) or \
6961 node.installed or node.onlydeps:
6962 removed_nodes.add(node)
6964 self.spinner.update()
6965 mygraph.difference_update(removed_nodes)
6966 if not removed_nodes:
6968 removed_nodes.clear()
6969 self._merge_order_bias(mygraph)
6970 def cmp_circular_bias(n1, n2):
6972 RDEPEND is stronger than PDEPEND and this function
6973 measures such a strength bias within a circular
6974 dependency relationship.
6976 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6977 ignore_priority=priority_range.ignore_medium_soft)
6978 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6979 ignore_priority=priority_range.ignore_medium_soft)
6980 if n1_n2_medium == n2_n1_medium:
6985 myblocker_uninstalls = self._blocker_uninstalls.copy()
6987 # Contains uninstall tasks that have been scheduled to
6988 # occur after overlapping blockers have been installed.
6989 scheduled_uninstalls = set()
6990 # Contains any Uninstall tasks that have been ignored
6991 # in order to avoid the circular deps code path. These
6992 # correspond to blocker conflicts that could not be
6994 ignored_uninstall_tasks = set()
6995 have_uninstall_task = False
6996 complete = "complete" in self.myparams
6999 def get_nodes(**kwargs):
7001 Returns leaf nodes excluding Uninstall instances
7002 since those should be executed as late as possible.
7004 return [node for node in mygraph.leaf_nodes(**kwargs) \
7005 if isinstance(node, Package) and \
7006 (node.operation != "uninstall" or \
7007 node in scheduled_uninstalls)]
7009 # sys-apps/portage needs special treatment if ROOT="/"
7010 running_root = self._running_root.root
7011 from portage.const import PORTAGE_PACKAGE_ATOM
7012 runtime_deps = InternalPackageSet(
7013 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7014 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7015 PORTAGE_PACKAGE_ATOM)
7016 replacement_portage = self.mydbapi[running_root].match_pkgs(
7017 PORTAGE_PACKAGE_ATOM)
7020 running_portage = running_portage[0]
7022 running_portage = None
7024 if replacement_portage:
7025 replacement_portage = replacement_portage[0]
7027 replacement_portage = None
7029 if replacement_portage == running_portage:
7030 replacement_portage = None
7032 if replacement_portage is not None:
7033 # update from running_portage to replacement_portage asap
7034 asap_nodes.append(replacement_portage)
7036 if running_portage is not None:
7038 portage_rdepend = self._select_atoms_highest_available(
7039 running_root, running_portage.metadata["RDEPEND"],
7040 myuse=running_portage.use.enabled,
7041 parent=running_portage, strict=False)
7042 except portage.exception.InvalidDependString, e:
7043 portage.writemsg("!!! Invalid RDEPEND in " + \
7044 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7045 (running_root, running_portage.cpv, e), noiselevel=-1)
7047 portage_rdepend = []
7048 runtime_deps.update(atom for atom in portage_rdepend \
7049 if not atom.startswith("!"))
7051 def gather_deps(ignore_priority, mergeable_nodes,
7052 selected_nodes, node):
7054 Recursively gather a group of nodes that RDEPEND on
7055 eachother. This ensures that they are merged as a group
7056 and get their RDEPENDs satisfied as soon as possible.
7058 if node in selected_nodes:
7060 if node not in mergeable_nodes:
7062 if node == replacement_portage and \
7063 mygraph.child_nodes(node,
7064 ignore_priority=priority_range.ignore_medium_soft):
7065 # Make sure that portage always has all of it's
7066 # RDEPENDs installed first.
7068 selected_nodes.add(node)
7069 for child in mygraph.child_nodes(node,
7070 ignore_priority=ignore_priority):
7071 if not gather_deps(ignore_priority,
7072 mergeable_nodes, selected_nodes, child):
7076 def ignore_uninst_or_med(priority):
7077 if priority is BlockerDepPriority.instance:
7079 return priority_range.ignore_medium(priority)
7081 def ignore_uninst_or_med_soft(priority):
7082 if priority is BlockerDepPriority.instance:
7084 return priority_range.ignore_medium_soft(priority)
7086 tree_mode = "--tree" in self.myopts
7087 # Tracks whether or not the current iteration should prefer asap_nodes
7088 # if available. This is set to False when the previous iteration
7089 # failed to select any nodes. It is reset whenever nodes are
7090 # successfully selected.
7093 # Controls whether or not the current iteration should drop edges that
7094 # are "satisfied" by installed packages, in order to solve circular
7095 # dependencies. The deep runtime dependencies of installed packages are
7096 # not checked in this case (bug #199856), so it must be avoided
7097 # whenever possible.
7098 drop_satisfied = False
7100 # State of variables for successive iterations that loosen the
7101 # criteria for node selection.
7103 # iteration prefer_asap drop_satisfied
7108 # If no nodes are selected on the last iteration, it is due to
7109 # unresolved blockers or circular dependencies.
7111 while not mygraph.empty():
7112 self.spinner.update()
7113 selected_nodes = None
7114 ignore_priority = None
7115 if drop_satisfied or (prefer_asap and asap_nodes):
7116 priority_range = DepPrioritySatisfiedRange
7118 priority_range = DepPriorityNormalRange
7119 if prefer_asap and asap_nodes:
7120 # ASAP nodes are merged before their soft deps. Go ahead and
7121 # select root nodes here if necessary, since it's typical for
7122 # the parent to have been removed from the graph already.
7123 asap_nodes = [node for node in asap_nodes \
7124 if mygraph.contains(node)]
7125 for node in asap_nodes:
7126 if not mygraph.child_nodes(node,
7127 ignore_priority=priority_range.ignore_soft):
7128 selected_nodes = [node]
7129 asap_nodes.remove(node)
7131 if not selected_nodes and \
7132 not (prefer_asap and asap_nodes):
7133 for i in xrange(priority_range.NONE,
7134 priority_range.MEDIUM_SOFT + 1):
7135 ignore_priority = priority_range.ignore_priority[i]
7136 nodes = get_nodes(ignore_priority=ignore_priority)
7138 # If there is a mix of uninstall nodes with other
7139 # types, save the uninstall nodes for later since
7140 # sometimes a merge node will render an uninstall
7141 # node unnecessary (due to occupying the same slot),
7142 # and we want to avoid executing a separate uninstall
7143 # task in that case.
7145 good_uninstalls = []
7146 with_some_uninstalls_excluded = []
7148 if node.operation == "uninstall":
7149 slot_node = self.mydbapi[node.root
7150 ].match_pkgs(node.slot_atom)
7152 slot_node[0].operation == "merge":
7154 good_uninstalls.append(node)
7155 with_some_uninstalls_excluded.append(node)
7157 nodes = good_uninstalls
7158 elif with_some_uninstalls_excluded:
7159 nodes = with_some_uninstalls_excluded
7163 if ignore_priority is None and not tree_mode:
7164 # Greedily pop all of these nodes since no
7165 # relationship has been ignored. This optimization
7166 # destroys --tree output, so it's disabled in tree
7168 selected_nodes = nodes
7170 # For optimal merge order:
7171 # * Only pop one node.
7172 # * Removing a root node (node without a parent)
7173 # will not produce a leaf node, so avoid it.
7174 # * It's normal for a selected uninstall to be a
7175 # root node, so don't check them for parents.
7177 if node.operation == "uninstall" or \
7178 mygraph.parent_nodes(node):
7179 selected_nodes = [node]
7185 if not selected_nodes:
7186 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7188 mergeable_nodes = set(nodes)
7189 if prefer_asap and asap_nodes:
7191 for i in xrange(priority_range.SOFT,
7192 priority_range.MEDIUM_SOFT + 1):
7193 ignore_priority = priority_range.ignore_priority[i]
7195 if not mygraph.parent_nodes(node):
7197 selected_nodes = set()
7198 if gather_deps(ignore_priority,
7199 mergeable_nodes, selected_nodes, node):
7202 selected_nodes = None
7206 if prefer_asap and asap_nodes and not selected_nodes:
7207 # We failed to find any asap nodes to merge, so ignore
7208 # them for the next iteration.
7212 if selected_nodes and ignore_priority is not None:
7213 # Try to merge ignored medium_soft deps as soon as possible
7214 # if they're not satisfied by installed packages.
7215 for node in selected_nodes:
7216 children = set(mygraph.child_nodes(node))
7217 soft = children.difference(
7218 mygraph.child_nodes(node,
7219 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7220 medium_soft = children.difference(
7221 mygraph.child_nodes(node,
7223 DepPrioritySatisfiedRange.ignore_medium_soft))
7224 medium_soft.difference_update(soft)
7225 for child in medium_soft:
7226 if child in selected_nodes:
7228 if child in asap_nodes:
7230 asap_nodes.append(child)
7232 if selected_nodes and len(selected_nodes) > 1:
7233 if not isinstance(selected_nodes, list):
7234 selected_nodes = list(selected_nodes)
7235 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7237 if not selected_nodes and not myblocker_uninstalls.is_empty():
7238 # An Uninstall task needs to be executed in order to
7239 # avoid conflict if possible.
7242 priority_range = DepPrioritySatisfiedRange
7244 priority_range = DepPriorityNormalRange
7246 mergeable_nodes = get_nodes(
7247 ignore_priority=ignore_uninst_or_med)
7249 min_parent_deps = None
7251 for task in myblocker_uninstalls.leaf_nodes():
7252 # Do some sanity checks so that system or world packages
7253 # don't get uninstalled inappropriately here (only really
7254 # necessary when --complete-graph has not been enabled).
7256 if task in ignored_uninstall_tasks:
7259 if task in scheduled_uninstalls:
7260 # It's been scheduled but it hasn't
7261 # been executed yet due to dependence
7262 # on installation of blocking packages.
7265 root_config = self.roots[task.root]
7266 inst_pkg = self._pkg_cache[
7267 ("installed", task.root, task.cpv, "nomerge")]
7269 if self.digraph.contains(inst_pkg):
7272 forbid_overlap = False
7273 heuristic_overlap = False
7274 for blocker in myblocker_uninstalls.parent_nodes(task):
7275 if blocker.eapi in ("0", "1"):
7276 heuristic_overlap = True
7277 elif blocker.atom.blocker.overlap.forbid:
7278 forbid_overlap = True
7280 if forbid_overlap and running_root == task.root:
7283 if heuristic_overlap and running_root == task.root:
7284 # Never uninstall sys-apps/portage or it's essential
7285 # dependencies, except through replacement.
7287 runtime_dep_atoms = \
7288 list(runtime_deps.iterAtomsForPackage(task))
7289 except portage.exception.InvalidDependString, e:
7290 portage.writemsg("!!! Invalid PROVIDE in " + \
7291 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7292 (task.root, task.cpv, e), noiselevel=-1)
7296 # Don't uninstall a runtime dep if it appears
7297 # to be the only suitable one installed.
7299 vardb = root_config.trees["vartree"].dbapi
7300 for atom in runtime_dep_atoms:
7301 other_version = None
7302 for pkg in vardb.match_pkgs(atom):
7303 if pkg.cpv == task.cpv and \
7304 pkg.metadata["COUNTER"] == \
7305 task.metadata["COUNTER"]:
7309 if other_version is None:
7315 # For packages in the system set, don't take
7316 # any chances. If the conflict can't be resolved
7317 # by a normal replacement operation then abort.
7320 for atom in root_config.sets[
7321 "system"].iterAtomsForPackage(task):
7324 except portage.exception.InvalidDependString, e:
7325 portage.writemsg("!!! Invalid PROVIDE in " + \
7326 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7327 (task.root, task.cpv, e), noiselevel=-1)
7333 # Note that the world check isn't always
7334 # necessary since self._complete_graph() will
7335 # add all packages from the system and world sets to the
7336 # graph. This just allows unresolved conflicts to be
7337 # detected as early as possible, which makes it possible
7338 # to avoid calling self._complete_graph() when it is
7339 # unnecessary due to blockers triggering an abortion.
7341 # For packages in the world set, go ahead an uninstall
7342 # when necessary, as long as the atom will be satisfied
7343 # in the final state.
7344 graph_db = self.mydbapi[task.root]
7347 for atom in root_config.sets[
7348 "world"].iterAtomsForPackage(task):
7350 for pkg in graph_db.match_pkgs(atom):
7357 self._blocked_world_pkgs[inst_pkg] = atom
7359 except portage.exception.InvalidDependString, e:
7360 portage.writemsg("!!! Invalid PROVIDE in " + \
7361 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7362 (task.root, task.cpv, e), noiselevel=-1)
7368 # Check the deps of parent nodes to ensure that
7369 # the chosen task produces a leaf node. Maybe
7370 # this can be optimized some more to make the
7371 # best possible choice, but the current algorithm
7372 # is simple and should be near optimal for most
7374 mergeable_parent = False
7376 for parent in mygraph.parent_nodes(task):
7377 parent_deps.update(mygraph.child_nodes(parent,
7378 ignore_priority=priority_range.ignore_medium_soft))
7379 if parent in mergeable_nodes and \
7380 gather_deps(ignore_uninst_or_med_soft,
7381 mergeable_nodes, set(), parent):
7382 mergeable_parent = True
7384 if not mergeable_parent:
7387 parent_deps.remove(task)
7388 if min_parent_deps is None or \
7389 len(parent_deps) < min_parent_deps:
7390 min_parent_deps = len(parent_deps)
7393 if uninst_task is not None:
7394 # The uninstall is performed only after blocking
7395 # packages have been merged on top of it. File
7396 # collisions between blocking packages are detected
7397 # and removed from the list of files to be uninstalled.
7398 scheduled_uninstalls.add(uninst_task)
7399 parent_nodes = mygraph.parent_nodes(uninst_task)
7401 # Reverse the parent -> uninstall edges since we want
7402 # to do the uninstall after blocking packages have
7403 # been merged on top of it.
7404 mygraph.remove(uninst_task)
7405 for blocked_pkg in parent_nodes:
7406 mygraph.add(blocked_pkg, uninst_task,
7407 priority=BlockerDepPriority.instance)
7408 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7409 scheduler_graph.add(blocked_pkg, uninst_task,
7410 priority=BlockerDepPriority.instance)
7412 # Reset the state variables for leaf node selection and
7413 # continue trying to select leaf nodes.
7415 drop_satisfied = False
7418 if not selected_nodes:
7419 # Only select root nodes as a last resort. This case should
7420 # only trigger when the graph is nearly empty and the only
7421 # remaining nodes are isolated (no parents or children). Since
7422 # the nodes must be isolated, ignore_priority is not needed.
7423 selected_nodes = get_nodes()
7425 if not selected_nodes and not drop_satisfied:
7426 drop_satisfied = True
7429 if not selected_nodes and not myblocker_uninstalls.is_empty():
7430 # If possible, drop an uninstall task here in order to avoid
7431 # the circular deps code path. The corresponding blocker will
7432 # still be counted as an unresolved conflict.
7434 for node in myblocker_uninstalls.leaf_nodes():
7436 mygraph.remove(node)
7441 ignored_uninstall_tasks.add(node)
7444 if uninst_task is not None:
7445 # Reset the state variables for leaf node selection and
7446 # continue trying to select leaf nodes.
7448 drop_satisfied = False
7451 if not selected_nodes:
7452 self._circular_deps_for_display = mygraph
7453 raise self._unknown_internal_error()
7455 # At this point, we've succeeded in selecting one or more nodes, so
7456 # reset state variables for leaf node selection.
7458 drop_satisfied = False
7460 mygraph.difference_update(selected_nodes)
7462 for node in selected_nodes:
7463 if isinstance(node, Package) and \
7464 node.operation == "nomerge":
7467 # Handle interactions between blockers
7468 # and uninstallation tasks.
7469 solved_blockers = set()
7471 if isinstance(node, Package) and \
7472 "uninstall" == node.operation:
7473 have_uninstall_task = True
7476 vardb = self.trees[node.root]["vartree"].dbapi
7477 previous_cpv = vardb.match(node.slot_atom)
7479 # The package will be replaced by this one, so remove
7480 # the corresponding Uninstall task if necessary.
7481 previous_cpv = previous_cpv[0]
7483 ("installed", node.root, previous_cpv, "uninstall")
7485 mygraph.remove(uninst_task)
7489 if uninst_task is not None and \
7490 uninst_task not in ignored_uninstall_tasks and \
7491 myblocker_uninstalls.contains(uninst_task):
7492 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7493 myblocker_uninstalls.remove(uninst_task)
7494 # Discard any blockers that this Uninstall solves.
7495 for blocker in blocker_nodes:
7496 if not myblocker_uninstalls.child_nodes(blocker):
7497 myblocker_uninstalls.remove(blocker)
7498 solved_blockers.add(blocker)
7500 retlist.append(node)
7502 if (isinstance(node, Package) and \
7503 "uninstall" == node.operation) or \
7504 (uninst_task is not None and \
7505 uninst_task in scheduled_uninstalls):
7506 # Include satisfied blockers in the merge list
7507 # since the user might be interested and also
7508 # it serves as an indicator that blocking packages
7509 # will be temporarily installed simultaneously.
7510 for blocker in solved_blockers:
7511 retlist.append(Blocker(atom=blocker.atom,
7512 root=blocker.root, eapi=blocker.eapi,
7515 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7516 for node in myblocker_uninstalls.root_nodes():
7517 unsolvable_blockers.add(node)
7519 for blocker in unsolvable_blockers:
7520 retlist.append(blocker)
7522 # If any Uninstall tasks need to be executed in order
7523 # to avoid a conflict, complete the graph with any
7524 # dependencies that may have been initially
7525 # neglected (to ensure that unsafe Uninstall tasks
7526 # are properly identified and blocked from execution).
7527 if have_uninstall_task and \
7529 not unsolvable_blockers:
7530 self.myparams.add("complete")
7531 raise self._serialize_tasks_retry("")
7533 if unsolvable_blockers and \
7534 not self._accept_blocker_conflicts():
7535 self._unsatisfied_blockers_for_display = unsolvable_blockers
7536 self._serialized_tasks_cache = retlist[:]
7537 self._scheduler_graph = scheduler_graph
7538 raise self._unknown_internal_error()
7540 if self._slot_collision_info and \
7541 not self._accept_blocker_conflicts():
7542 self._serialized_tasks_cache = retlist[:]
7543 self._scheduler_graph = scheduler_graph
7544 raise self._unknown_internal_error()
7546 return retlist, scheduler_graph
7548 def _show_circular_deps(self, mygraph):
7549 # No leaf nodes are available, so we have a circular
7550 # dependency panic situation. Reduce the noise level to a
7551 # minimum via repeated elimination of root nodes since they
7552 # have no parents and thus can not be part of a cycle.
7554 root_nodes = mygraph.root_nodes(
7555 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7558 mygraph.difference_update(root_nodes)
7559 # Display the USE flags that are enabled on nodes that are part
7560 # of dependency cycles in case that helps the user decide to
7561 # disable some of them.
7563 tempgraph = mygraph.copy()
7564 while not tempgraph.empty():
7565 nodes = tempgraph.leaf_nodes()
7567 node = tempgraph.order[0]
7570 display_order.append(node)
7571 tempgraph.remove(node)
7572 display_order.reverse()
7573 self.myopts.pop("--quiet", None)
7574 self.myopts.pop("--verbose", None)
7575 self.myopts["--tree"] = True
7576 portage.writemsg("\n\n", noiselevel=-1)
7577 self.display(display_order)
7578 prefix = colorize("BAD", " * ")
7579 portage.writemsg("\n", noiselevel=-1)
7580 portage.writemsg(prefix + "Error: circular dependencies:\n",
7582 portage.writemsg("\n", noiselevel=-1)
7583 mygraph.debug_print()
7584 portage.writemsg("\n", noiselevel=-1)
7585 portage.writemsg(prefix + "Note that circular dependencies " + \
7586 "can often be avoided by temporarily\n", noiselevel=-1)
7587 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7588 "optional dependencies.\n", noiselevel=-1)
7590 def _show_merge_list(self):
7591 if self._serialized_tasks_cache is not None and \
7592 not (self._displayed_list and \
7593 (self._displayed_list == self._serialized_tasks_cache or \
7594 self._displayed_list == \
7595 list(reversed(self._serialized_tasks_cache)))):
7596 display_list = self._serialized_tasks_cache[:]
7597 if "--tree" in self.myopts:
7598 display_list.reverse()
7599 self.display(display_list)
7601 def _show_unsatisfied_blockers(self, blockers):
7602 self._show_merge_list()
7603 msg = "Error: The above package list contains " + \
7604 "packages which cannot be installed " + \
7605 "at the same time on the same system."
7606 prefix = colorize("BAD", " * ")
7607 from textwrap import wrap
7608 portage.writemsg("\n", noiselevel=-1)
7609 for line in wrap(msg, 70):
7610 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7612 # Display the conflicting packages along with the packages
7613 # that pulled them in. This is helpful for troubleshooting
7614 # cases in which blockers don't solve automatically and
7615 # the reasons are not apparent from the normal merge list
7619 for blocker in blockers:
7620 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7621 self._blocker_parents.parent_nodes(blocker)):
7622 parent_atoms = self._parent_atoms.get(pkg)
7623 if not parent_atoms:
7624 atom = self._blocked_world_pkgs.get(pkg)
7625 if atom is not None:
7626 parent_atoms = set([("@world", atom)])
7628 conflict_pkgs[pkg] = parent_atoms
7631 # Reduce noise by pruning packages that are only
7632 # pulled in by other conflict packages.
7634 for pkg, parent_atoms in conflict_pkgs.iteritems():
7635 relevant_parent = False
7636 for parent, atom in parent_atoms:
7637 if parent not in conflict_pkgs:
7638 relevant_parent = True
7640 if not relevant_parent:
7641 pruned_pkgs.add(pkg)
7642 for pkg in pruned_pkgs:
7643 del conflict_pkgs[pkg]
7649 # Max number of parents shown, to avoid flooding the display.
7651 for pkg, parent_atoms in conflict_pkgs.iteritems():
7655 # Prefer packages that are not directly involved in a conflict.
7656 for parent_atom in parent_atoms:
7657 if len(pruned_list) >= max_parents:
7659 parent, atom = parent_atom
7660 if parent not in conflict_pkgs:
7661 pruned_list.add(parent_atom)
7663 for parent_atom in parent_atoms:
7664 if len(pruned_list) >= max_parents:
7666 pruned_list.add(parent_atom)
7668 omitted_parents = len(parent_atoms) - len(pruned_list)
7669 msg.append(indent + "%s pulled in by\n" % pkg)
7671 for parent_atom in pruned_list:
7672 parent, atom = parent_atom
7673 msg.append(2*indent)
7674 if isinstance(parent,
7675 (PackageArg, AtomArg)):
7676 # For PackageArg and AtomArg types, it's
7677 # redundant to display the atom attribute.
7678 msg.append(str(parent))
7680 # Display the specific atom from SetArg or
7682 msg.append("%s required by %s" % (atom, parent))
7686 msg.append(2*indent)
7687 msg.append("(and %d more)\n" % omitted_parents)
7691 sys.stderr.write("".join(msg))
7694 if "--quiet" not in self.myopts:
7695 show_blocker_docs_link()
7697 def display(self, mylist, favorites=[], verbosity=None):
7699 # This is used to prevent display_problems() from
7700 # redundantly displaying this exact same merge list
7701 # again via _show_merge_list().
7702 self._displayed_list = mylist
7704 if verbosity is None:
7705 verbosity = ("--quiet" in self.myopts and 1 or \
7706 "--verbose" in self.myopts and 3 or 2)
7707 favorites_set = InternalPackageSet(favorites)
7708 oneshot = "--oneshot" in self.myopts or \
7709 "--onlydeps" in self.myopts
7710 columns = "--columns" in self.myopts
7715 counters = PackageCounters()
7717 if verbosity == 1 and "--verbose" not in self.myopts:
7718 def create_use_string(*args):
7721 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7723 is_new, reinst_flags,
7724 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7725 alphabetical=("--alphabetical" in self.myopts)):
7733 cur_iuse = set(cur_iuse)
7734 enabled_flags = cur_iuse.intersection(cur_use)
7735 removed_iuse = set(old_iuse).difference(cur_iuse)
7736 any_iuse = cur_iuse.union(old_iuse)
7737 any_iuse = list(any_iuse)
7739 for flag in any_iuse:
7742 reinst_flag = reinst_flags and flag in reinst_flags
7743 if flag in enabled_flags:
7745 if is_new or flag in old_use and \
7746 (all_flags or reinst_flag):
7747 flag_str = red(flag)
7748 elif flag not in old_iuse:
7749 flag_str = yellow(flag) + "%*"
7750 elif flag not in old_use:
7751 flag_str = green(flag) + "*"
7752 elif flag in removed_iuse:
7753 if all_flags or reinst_flag:
7754 flag_str = yellow("-" + flag) + "%"
7757 flag_str = "(" + flag_str + ")"
7758 removed.append(flag_str)
7761 if is_new or flag in old_iuse and \
7762 flag not in old_use and \
7763 (all_flags or reinst_flag):
7764 flag_str = blue("-" + flag)
7765 elif flag not in old_iuse:
7766 flag_str = yellow("-" + flag)
7767 if flag not in iuse_forced:
7769 elif flag in old_use:
7770 flag_str = green("-" + flag) + "*"
7772 if flag in iuse_forced:
7773 flag_str = "(" + flag_str + ")"
7775 enabled.append(flag_str)
7777 disabled.append(flag_str)
7780 ret = " ".join(enabled)
7782 ret = " ".join(enabled + disabled + removed)
7784 ret = '%s="%s" ' % (name, ret)
7787 repo_display = RepoDisplay(self.roots)
7791 mygraph = self.digraph.copy()
7793 # If there are any Uninstall instances, add the corresponding
7794 # blockers to the digraph (useful for --tree display).
7796 executed_uninstalls = set(node for node in mylist \
7797 if isinstance(node, Package) and node.operation == "unmerge")
7799 for uninstall in self._blocker_uninstalls.leaf_nodes():
7800 uninstall_parents = \
7801 self._blocker_uninstalls.parent_nodes(uninstall)
7802 if not uninstall_parents:
7805 # Remove the corresponding "nomerge" node and substitute
7806 # the Uninstall node.
7807 inst_pkg = self._pkg_cache[
7808 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7810 mygraph.remove(inst_pkg)
7815 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7817 inst_pkg_blockers = []
7819 # Break the Package -> Uninstall edges.
7820 mygraph.remove(uninstall)
7822 # Resolution of a package's blockers
7823 # depend on it's own uninstallation.
7824 for blocker in inst_pkg_blockers:
7825 mygraph.add(uninstall, blocker)
7827 # Expand Package -> Uninstall edges into
7828 # Package -> Blocker -> Uninstall edges.
7829 for blocker in uninstall_parents:
7830 mygraph.add(uninstall, blocker)
7831 for parent in self._blocker_parents.parent_nodes(blocker):
7832 if parent != inst_pkg:
7833 mygraph.add(blocker, parent)
7835 # If the uninstall task did not need to be executed because
7836 # of an upgrade, display Blocker -> Upgrade edges since the
7837 # corresponding Blocker -> Uninstall edges will not be shown.
7839 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7840 if upgrade_node is not None and \
7841 uninstall not in executed_uninstalls:
7842 for blocker in uninstall_parents:
7843 mygraph.add(upgrade_node, blocker)
7845 unsatisfied_blockers = []
7850 if isinstance(x, Blocker) and not x.satisfied:
7851 unsatisfied_blockers.append(x)
7854 if "--tree" in self.myopts:
7855 depth = len(tree_nodes)
7856 while depth and graph_key not in \
7857 mygraph.child_nodes(tree_nodes[depth-1]):
7860 tree_nodes = tree_nodes[:depth]
7861 tree_nodes.append(graph_key)
7862 display_list.append((x, depth, True))
7863 shown_edges.add((graph_key, tree_nodes[depth-1]))
7865 traversed_nodes = set() # prevent endless circles
7866 traversed_nodes.add(graph_key)
7867 def add_parents(current_node, ordered):
7869 # Do not traverse to parents if this node is an
7870 # an argument or a direct member of a set that has
7871 # been specified as an argument (system or world).
7872 if current_node not in self._set_nodes:
7873 parent_nodes = mygraph.parent_nodes(current_node)
7875 child_nodes = set(mygraph.child_nodes(current_node))
7876 selected_parent = None
7877 # First, try to avoid a direct cycle.
7878 for node in parent_nodes:
7879 if not isinstance(node, (Blocker, Package)):
7881 if node not in traversed_nodes and \
7882 node not in child_nodes:
7883 edge = (current_node, node)
7884 if edge in shown_edges:
7886 selected_parent = node
7888 if not selected_parent:
7889 # A direct cycle is unavoidable.
7890 for node in parent_nodes:
7891 if not isinstance(node, (Blocker, Package)):
7893 if node not in traversed_nodes:
7894 edge = (current_node, node)
7895 if edge in shown_edges:
7897 selected_parent = node
7900 shown_edges.add((current_node, selected_parent))
7901 traversed_nodes.add(selected_parent)
7902 add_parents(selected_parent, False)
7903 display_list.append((current_node,
7904 len(tree_nodes), ordered))
7905 tree_nodes.append(current_node)
7907 add_parents(graph_key, True)
7909 display_list.append((x, depth, True))
7910 mylist = display_list
7911 for x in unsatisfied_blockers:
7912 mylist.append((x, 0, True))
7914 last_merge_depth = 0
7915 for i in xrange(len(mylist)-1,-1,-1):
7916 graph_key, depth, ordered = mylist[i]
7917 if not ordered and depth == 0 and i > 0 \
7918 and graph_key == mylist[i-1][0] and \
7919 mylist[i-1][1] == 0:
7920 # An ordered node got a consecutive duplicate when the tree was
7924 if ordered and graph_key[-1] != "nomerge":
7925 last_merge_depth = depth
7927 if depth >= last_merge_depth or \
7928 i < len(mylist) - 1 and \
7929 depth >= mylist[i+1][1]:
7932 from portage import flatten
7933 from portage.dep import use_reduce, paren_reduce
7934 # files to fetch list - avoids counting a same file twice
7935 # in size display (verbose mode)
7938 # Use this set to detect when all the "repoadd" strings are "[0]"
7939 # and disable the entire repo display in this case.
7942 for mylist_index in xrange(len(mylist)):
7943 x, depth, ordered = mylist[mylist_index]
7947 portdb = self.trees[myroot]["porttree"].dbapi
7948 bindb = self.trees[myroot]["bintree"].dbapi
7949 vardb = self.trees[myroot]["vartree"].dbapi
7950 vartree = self.trees[myroot]["vartree"]
7951 pkgsettings = self.pkgsettings[myroot]
7954 indent = " " * depth
7956 if isinstance(x, Blocker):
7958 blocker_style = "PKG_BLOCKER_SATISFIED"
7959 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7961 blocker_style = "PKG_BLOCKER"
7962 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7964 counters.blocks += 1
7966 counters.blocks_satisfied += 1
7967 resolved = portage.key_expand(
7968 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7969 if "--columns" in self.myopts and "--quiet" in self.myopts:
7970 addl += " " + colorize(blocker_style, resolved)
7972 addl = "[%s %s] %s%s" % \
7973 (colorize(blocker_style, "blocks"),
7974 addl, indent, colorize(blocker_style, resolved))
7975 block_parents = self._blocker_parents.parent_nodes(x)
7976 block_parents = set([pnode[2] for pnode in block_parents])
7977 block_parents = ", ".join(block_parents)
7979 addl += colorize(blocker_style,
7980 " (\"%s\" is blocking %s)") % \
7981 (str(x.atom).lstrip("!"), block_parents)
7983 addl += colorize(blocker_style,
7984 " (is blocking %s)") % block_parents
7985 if isinstance(x, Blocker) and x.satisfied:
7990 blockers.append(addl)
7993 pkg_merge = ordered and pkg_status == "merge"
7994 if not pkg_merge and pkg_status == "merge":
7995 pkg_status = "nomerge"
7996 built = pkg_type != "ebuild"
7997 installed = pkg_type == "installed"
7999 metadata = pkg.metadata
8001 repo_name = metadata["repository"]
8002 if pkg_type == "ebuild":
8003 ebuild_path = portdb.findname(pkg_key)
8004 if not ebuild_path: # shouldn't happen
8005 raise portage.exception.PackageNotFound(pkg_key)
8006 repo_path_real = os.path.dirname(os.path.dirname(
8007 os.path.dirname(ebuild_path)))
8009 repo_path_real = portdb.getRepositoryPath(repo_name)
8010 pkg_use = list(pkg.use.enabled)
8012 restrict = flatten(use_reduce(paren_reduce(
8013 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8014 except portage.exception.InvalidDependString, e:
8015 if not pkg.installed:
8016 show_invalid_depstring_notice(x,
8017 pkg.metadata["RESTRICT"], str(e))
8021 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8022 "fetch" in restrict:
8025 counters.restrict_fetch += 1
8026 if portdb.fetch_check(pkg_key, pkg_use):
8029 counters.restrict_fetch_satisfied += 1
8031 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8032 #param is used for -u, where you still *do* want to see when something is being upgraded.
8035 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8036 if vardb.cpv_exists(pkg_key):
8037 addl=" "+yellow("R")+fetch+" "
8040 counters.reinst += 1
8041 elif pkg_status == "uninstall":
8042 counters.uninst += 1
8043 # filter out old-style virtual matches
8044 elif installed_versions and \
8045 portage.cpv_getkey(installed_versions[0]) == \
8046 portage.cpv_getkey(pkg_key):
8047 myinslotlist = vardb.match(pkg.slot_atom)
8048 # If this is the first install of a new-style virtual, we
8049 # need to filter out old-style virtual matches.
8050 if myinslotlist and \
8051 portage.cpv_getkey(myinslotlist[0]) != \
8052 portage.cpv_getkey(pkg_key):
8055 myoldbest = myinslotlist[:]
8057 if not portage.dep.cpvequal(pkg_key,
8058 portage.best([pkg_key] + myoldbest)):
8060 addl += turquoise("U")+blue("D")
8062 counters.downgrades += 1
8065 addl += turquoise("U") + " "
8067 counters.upgrades += 1
8069 # New slot, mark it new.
8070 addl = " " + green("NS") + fetch + " "
8071 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8073 counters.newslot += 1
8075 if "--changelog" in self.myopts:
8076 inst_matches = vardb.match(pkg.slot_atom)
8078 changelogs.extend(self.calc_changelog(
8079 portdb.findname(pkg_key),
8080 inst_matches[0], pkg_key))
8082 addl = " " + green("N") + " " + fetch + " "
8091 forced_flags = set()
8092 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8093 forced_flags.update(pkgsettings.useforce)
8094 forced_flags.update(pkgsettings.usemask)
8096 cur_use = [flag for flag in pkg.use.enabled \
8097 if flag in pkg.iuse.all]
8098 cur_iuse = sorted(pkg.iuse.all)
8100 if myoldbest and myinslotlist:
8101 previous_cpv = myoldbest[0]
8103 previous_cpv = pkg.cpv
8104 if vardb.cpv_exists(previous_cpv):
8105 old_iuse, old_use = vardb.aux_get(
8106 previous_cpv, ["IUSE", "USE"])
8107 old_iuse = list(set(
8108 filter_iuse_defaults(old_iuse.split())))
8110 old_use = old_use.split()
8117 old_use = [flag for flag in old_use if flag in old_iuse]
8119 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8121 use_expand.reverse()
8122 use_expand_hidden = \
8123 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8125 def map_to_use_expand(myvals, forcedFlags=False,
8129 for exp in use_expand:
8132 for val in myvals[:]:
8133 if val.startswith(exp.lower()+"_"):
8134 if val in forced_flags:
8135 forced[exp].add(val[len(exp)+1:])
8136 ret[exp].append(val[len(exp)+1:])
8139 forced["USE"] = [val for val in myvals \
8140 if val in forced_flags]
8142 for exp in use_expand_hidden:
8148 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8149 # are the only thing that triggered reinstallation.
8150 reinst_flags_map = {}
8151 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8152 reinst_expand_map = None
8153 if reinstall_for_flags:
8154 reinst_flags_map = map_to_use_expand(
8155 list(reinstall_for_flags), removeHidden=False)
8156 for k in list(reinst_flags_map):
8157 if not reinst_flags_map[k]:
8158 del reinst_flags_map[k]
8159 if not reinst_flags_map.get("USE"):
8160 reinst_expand_map = reinst_flags_map.copy()
8161 reinst_expand_map.pop("USE", None)
8162 if reinst_expand_map and \
8163 not set(reinst_expand_map).difference(
8165 use_expand_hidden = \
8166 set(use_expand_hidden).difference(
8169 cur_iuse_map, iuse_forced = \
8170 map_to_use_expand(cur_iuse, forcedFlags=True)
8171 cur_use_map = map_to_use_expand(cur_use)
8172 old_iuse_map = map_to_use_expand(old_iuse)
8173 old_use_map = map_to_use_expand(old_use)
8176 use_expand.insert(0, "USE")
8178 for key in use_expand:
8179 if key in use_expand_hidden:
8181 verboseadd += create_use_string(key.upper(),
8182 cur_iuse_map[key], iuse_forced[key],
8183 cur_use_map[key], old_iuse_map[key],
8184 old_use_map[key], is_new,
8185 reinst_flags_map.get(key))
8190 if pkg_type == "ebuild" and pkg_merge:
8192 myfilesdict = portdb.getfetchsizes(pkg_key,
8193 useflags=pkg_use, debug=self.edebug)
8194 except portage.exception.InvalidDependString, e:
8195 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8196 show_invalid_depstring_notice(x, src_uri, str(e))
8199 if myfilesdict is None:
8200 myfilesdict="[empty/missing/bad digest]"
8202 for myfetchfile in myfilesdict:
8203 if myfetchfile not in myfetchlist:
8204 mysize+=myfilesdict[myfetchfile]
8205 myfetchlist.append(myfetchfile)
8207 counters.totalsize += mysize
8208 verboseadd += format_size(mysize)
8211 # assign index for a previous version in the same slot
8212 has_previous = False
8213 repo_name_prev = None
8214 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8216 slot_matches = vardb.match(slot_atom)
8219 repo_name_prev = vardb.aux_get(slot_matches[0],
8222 # now use the data to generate output
8223 if pkg.installed or not has_previous:
8224 repoadd = repo_display.repoStr(repo_path_real)
8226 repo_path_prev = None
8228 repo_path_prev = portdb.getRepositoryPath(
8230 if repo_path_prev == repo_path_real:
8231 repoadd = repo_display.repoStr(repo_path_real)
8233 repoadd = "%s=>%s" % (
8234 repo_display.repoStr(repo_path_prev),
8235 repo_display.repoStr(repo_path_real))
8237 repoadd_set.add(repoadd)
8239 xs = [portage.cpv_getkey(pkg_key)] + \
8240 list(portage.catpkgsplit(pkg_key)[2:])
8247 if "COLUMNWIDTH" in self.settings:
8249 mywidth = int(self.settings["COLUMNWIDTH"])
8250 except ValueError, e:
8251 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8253 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8254 self.settings["COLUMNWIDTH"], noiselevel=-1)
8256 oldlp = mywidth - 30
8259 # Convert myoldbest from a list to a string.
8263 for pos, key in enumerate(myoldbest):
8264 key = portage.catpkgsplit(key)[2] + \
8265 "-" + portage.catpkgsplit(key)[3]
8266 if key[-3:] == "-r0":
8268 myoldbest[pos] = key
8269 myoldbest = blue("["+", ".join(myoldbest)+"]")
8272 root_config = self.roots[myroot]
8273 system_set = root_config.sets["system"]
8274 world_set = root_config.sets["world"]
8279 pkg_system = system_set.findAtomForPackage(pkg)
8280 pkg_world = world_set.findAtomForPackage(pkg)
8281 if not (oneshot or pkg_world) and \
8282 myroot == self.target_root and \
8283 favorites_set.findAtomForPackage(pkg):
8284 # Maybe it will be added to world now.
8285 if create_world_atom(pkg, favorites_set, root_config):
8287 except portage.exception.InvalidDependString:
8288 # This is reported elsewhere if relevant.
8291 def pkgprint(pkg_str):
8294 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8296 return colorize("PKG_MERGE_WORLD", pkg_str)
8298 return colorize("PKG_MERGE", pkg_str)
8299 elif pkg_status == "uninstall":
8300 return colorize("PKG_UNINSTALL", pkg_str)
8303 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8305 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8307 return colorize("PKG_NOMERGE", pkg_str)
8310 properties = flatten(use_reduce(paren_reduce(
8311 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8312 except portage.exception.InvalidDependString, e:
8313 if not pkg.installed:
8314 show_invalid_depstring_notice(pkg,
8315 pkg.metadata["PROPERTIES"], str(e))
8319 interactive = "interactive" in properties
8320 if interactive and pkg.operation == "merge":
8321 addl = colorize("WARN", "I") + addl[1:]
8323 counters.interactive += 1
8328 if "--columns" in self.myopts:
8329 if "--quiet" in self.myopts:
8330 myprint=addl+" "+indent+pkgprint(pkg_cp)
8331 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8332 myprint=myprint+myoldbest
8333 myprint=myprint+darkgreen("to "+x[1])
8337 myprint = "[%s] %s%s" % \
8338 (pkgprint(pkg_status.ljust(13)),
8339 indent, pkgprint(pkg.cp))
8341 myprint = "[%s %s] %s%s" % \
8342 (pkgprint(pkg.type_name), addl,
8343 indent, pkgprint(pkg.cp))
8344 if (newlp-nc_len(myprint)) > 0:
8345 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8346 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8347 if (oldlp-nc_len(myprint)) > 0:
8348 myprint=myprint+" "*(oldlp-nc_len(myprint))
8349 myprint=myprint+myoldbest
8350 myprint += darkgreen("to " + pkg.root)
8353 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8355 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8356 myprint += indent + pkgprint(pkg_key) + " " + \
8357 myoldbest + darkgreen("to " + myroot)
8359 if "--columns" in self.myopts:
8360 if "--quiet" in self.myopts:
8361 myprint=addl+" "+indent+pkgprint(pkg_cp)
8362 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8363 myprint=myprint+myoldbest
8367 myprint = "[%s] %s%s" % \
8368 (pkgprint(pkg_status.ljust(13)),
8369 indent, pkgprint(pkg.cp))
8371 myprint = "[%s %s] %s%s" % \
8372 (pkgprint(pkg.type_name), addl,
8373 indent, pkgprint(pkg.cp))
8374 if (newlp-nc_len(myprint)) > 0:
8375 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8376 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8377 if (oldlp-nc_len(myprint)) > 0:
8378 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8379 myprint += myoldbest
8382 myprint = "[%s] %s%s %s" % \
8383 (pkgprint(pkg_status.ljust(13)),
8384 indent, pkgprint(pkg.cpv),
8387 myprint = "[%s %s] %s%s %s" % \
8388 (pkgprint(pkg_type), addl, indent,
8389 pkgprint(pkg.cpv), myoldbest)
8391 if columns and pkg.operation == "uninstall":
8393 p.append((myprint, verboseadd, repoadd))
8395 if "--tree" not in self.myopts and \
8396 "--quiet" not in self.myopts and \
8397 not self._opts_no_restart.intersection(self.myopts) and \
8398 pkg.root == self._running_root.root and \
8399 portage.match_from_list(
8400 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8401 not vardb.cpv_exists(pkg.cpv) and \
8402 "--quiet" not in self.myopts:
8403 if mylist_index < len(mylist) - 1:
8404 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8405 p.append(colorize("WARN", " then resume the merge."))
8408 show_repos = repoadd_set and repoadd_set != set(["0"])
8411 if isinstance(x, basestring):
8412 out.write("%s\n" % (x,))
8415 myprint, verboseadd, repoadd = x
8418 myprint += " " + verboseadd
8420 if show_repos and repoadd:
8421 myprint += " " + teal("[%s]" % repoadd)
8423 out.write("%s\n" % (myprint,))
8432 sys.stdout.write(str(repo_display))
8434 if "--changelog" in self.myopts:
8436 for revision,text in changelogs:
8437 print bold('*'+revision)
8438 sys.stdout.write(text)
8443 def display_problems(self):
8445 Display problems with the dependency graph such as slot collisions.
8446 This is called internally by display() to show the problems _after_
8447 the merge list where it is most likely to be seen, but if display()
8448 is not going to be called then this method should be called explicitly
8449 to ensure that the user is notified of problems with the graph.
8451 All output goes to stderr, except for unsatisfied dependencies which
8452 go to stdout for parsing by programs such as autounmask.
8455 # Note that show_masked_packages() sends it's output to
8456 # stdout, and some programs such as autounmask parse the
8457 # output in cases when emerge bails out. However, when
8458 # show_masked_packages() is called for installed packages
8459 # here, the message is a warning that is more appropriate
8460 # to send to stderr, so temporarily redirect stdout to
8461 # stderr. TODO: Fix output code so there's a cleaner way
8462 # to redirect everything to stderr.
8467 sys.stdout = sys.stderr
8468 self._display_problems()
8474 # This goes to stdout for parsing by programs like autounmask.
8475 for pargs, kwargs in self._unsatisfied_deps_for_display:
8476 self._show_unsatisfied_dep(*pargs, **kwargs)
8478 def _display_problems(self):
8479 if self._circular_deps_for_display is not None:
8480 self._show_circular_deps(
8481 self._circular_deps_for_display)
8483 # The user is only notified of a slot conflict if
8484 # there are no unresolvable blocker conflicts.
8485 if self._unsatisfied_blockers_for_display is not None:
8486 self._show_unsatisfied_blockers(
8487 self._unsatisfied_blockers_for_display)
8489 self._show_slot_collision_notice()
8491 # TODO: Add generic support for "set problem" handlers so that
8492 # the below warnings aren't special cases for world only.
8494 if self._missing_args:
8495 world_problems = False
8496 if "world" in self._sets:
8497 # Filter out indirect members of world (from nested sets)
8498 # since only direct members of world are desired here.
8499 world_set = self.roots[self.target_root].sets["world"]
8500 for arg, atom in self._missing_args:
8501 if arg.name == "world" and atom in world_set:
8502 world_problems = True
8506 sys.stderr.write("\n!!! Problems have been " + \
8507 "detected with your world file\n")
8508 sys.stderr.write("!!! Please run " + \
8509 green("emaint --check world")+"\n\n")
8511 if self._missing_args:
8512 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8513 " Ebuilds for the following packages are either all\n")
8514 sys.stderr.write(colorize("BAD", "!!!") + \
8515 " masked or don't exist:\n")
8516 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8517 self._missing_args) + "\n")
8519 if self._pprovided_args:
8521 for arg, atom in self._pprovided_args:
8522 if isinstance(arg, SetArg):
8524 arg_atom = (atom, atom)
8527 arg_atom = (arg.arg, atom)
8528 refs = arg_refs.setdefault(arg_atom, [])
8529 if parent not in refs:
8532 msg.append(bad("\nWARNING: "))
8533 if len(self._pprovided_args) > 1:
8534 msg.append("Requested packages will not be " + \
8535 "merged because they are listed in\n")
8537 msg.append("A requested package will not be " + \
8538 "merged because it is listed in\n")
8539 msg.append("package.provided:\n\n")
8540 problems_sets = set()
8541 for (arg, atom), refs in arg_refs.iteritems():
8544 problems_sets.update(refs)
8546 ref_string = ", ".join(["'%s'" % name for name in refs])
8547 ref_string = " pulled in by " + ref_string
8548 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8550 if "world" in problems_sets:
8551 msg.append("This problem can be solved in one of the following ways:\n\n")
8552 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8553 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8554 msg.append(" C) Remove offending entries from package.provided.\n\n")
8555 msg.append("The best course of action depends on the reason that an offending\n")
8556 msg.append("package.provided entry exists.\n\n")
8557 sys.stderr.write("".join(msg))
8559 masked_packages = []
8560 for pkg in self._masked_installed:
8561 root_config = pkg.root_config
8562 pkgsettings = self.pkgsettings[pkg.root]
8563 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8564 masked_packages.append((root_config, pkgsettings,
8565 pkg.cpv, pkg.metadata, mreasons))
8567 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8568 " The following installed packages are masked:\n")
8569 show_masked_packages(masked_packages)
8573 def calc_changelog(self,ebuildpath,current,next):
8574 if ebuildpath == None or not os.path.exists(ebuildpath):
8576 current = '-'.join(portage.catpkgsplit(current)[1:])
8577 if current.endswith('-r0'):
8578 current = current[:-3]
8579 next = '-'.join(portage.catpkgsplit(next)[1:])
8580 if next.endswith('-r0'):
8582 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8584 changelog = open(changelogpath).read()
8585 except SystemExit, e:
8586 raise # Needed else can't exit
8589 divisions = self.find_changelog_tags(changelog)
8590 #print 'XX from',current,'to',next
8591 #for div,text in divisions: print 'XX',div
8592 # skip entries for all revisions above the one we are about to emerge
8593 for i in range(len(divisions)):
8594 if divisions[i][0]==next:
8595 divisions = divisions[i:]
8597 # find out how many entries we are going to display
8598 for i in range(len(divisions)):
8599 if divisions[i][0]==current:
8600 divisions = divisions[:i]
8603 # couldnt find the current revision in the list. display nothing
8607 def find_changelog_tags(self,changelog):
8611 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8613 if release is not None:
8614 divs.append((release,changelog))
8616 if release is not None:
8617 divs.append((release,changelog[:match.start()]))
8618 changelog = changelog[match.end():]
8619 release = match.group(1)
8620 if release.endswith('.ebuild'):
8621 release = release[:-7]
8622 if release.endswith('-r0'):
8623 release = release[:-3]
8625 def saveNomergeFavorites(self):
8626 """Find atoms in favorites that are not in the mergelist and add them
8627 to the world file if necessary."""
8628 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8629 "--oneshot", "--onlydeps", "--pretend"):
8630 if x in self.myopts:
8632 root_config = self.roots[self.target_root]
8633 world_set = root_config.sets["world"]
8635 world_locked = False
8636 if hasattr(world_set, "lock"):
8640 if hasattr(world_set, "load"):
8641 world_set.load() # maybe it's changed on disk
8643 args_set = self._sets["args"]
8644 portdb = self.trees[self.target_root]["porttree"].dbapi
8645 added_favorites = set()
8646 for x in self._set_nodes:
8647 pkg_type, root, pkg_key, pkg_status = x
8648 if pkg_status != "nomerge":
8652 myfavkey = create_world_atom(x, args_set, root_config)
8654 if myfavkey in added_favorites:
8656 added_favorites.add(myfavkey)
8657 except portage.exception.InvalidDependString, e:
8658 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8659 (pkg_key, str(e)), noiselevel=-1)
8660 writemsg("!!! see '%s'\n\n" % os.path.join(
8661 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8664 for k in self._sets:
8665 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8670 all_added.append(SETPREFIX + k)
8671 all_added.extend(added_favorites)
8674 print ">>> Recording %s in \"world\" favorites file..." % \
8675 colorize("INFORM", str(a))
8677 world_set.update(all_added)
8682 def loadResumeCommand(self, resume_data, skip_masked=False):
8684 Add a resume command to the graph and validate it in the process. This
8685 will raise a PackageNotFound exception if a package is not available.
8688 if not isinstance(resume_data, dict):
8691 mergelist = resume_data.get("mergelist")
8692 if not isinstance(mergelist, list):
8695 fakedb = self.mydbapi
8697 serialized_tasks = []
8700 if not (isinstance(x, list) and len(x) == 4):
8702 pkg_type, myroot, pkg_key, action = x
8703 if pkg_type not in self.pkg_tree_map:
8705 if action != "merge":
8707 tree_type = self.pkg_tree_map[pkg_type]
8708 mydb = trees[myroot][tree_type].dbapi
8709 db_keys = list(self._trees_orig[myroot][
8710 tree_type].dbapi._aux_cache_keys)
8712 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8714 # It does no exist or it is corrupt.
8715 if action == "uninstall":
8717 raise portage.exception.PackageNotFound(pkg_key)
8718 installed = action == "uninstall"
8719 built = pkg_type != "ebuild"
8720 root_config = self.roots[myroot]
8721 pkg = Package(built=built, cpv=pkg_key,
8722 installed=installed, metadata=metadata,
8723 operation=action, root_config=root_config,
8725 if pkg_type == "ebuild":
8726 pkgsettings = self.pkgsettings[myroot]
8727 pkgsettings.setcpv(pkg)
8728 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8729 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8730 self._pkg_cache[pkg] = pkg
8732 root_config = self.roots[pkg.root]
8733 if "merge" == pkg.operation and \
8734 not visible(root_config.settings, pkg):
8736 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8738 self._unsatisfied_deps_for_display.append(
8739 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8741 fakedb[myroot].cpv_inject(pkg)
8742 serialized_tasks.append(pkg)
8743 self.spinner.update()
8745 if self._unsatisfied_deps_for_display:
8748 if not serialized_tasks or "--nodeps" in self.myopts:
8749 self._serialized_tasks_cache = serialized_tasks
8750 self._scheduler_graph = self.digraph
8752 self._select_package = self._select_pkg_from_graph
8753 self.myparams.add("selective")
8754 # Always traverse deep dependencies in order to account for
8755 # potentially unsatisfied dependencies of installed packages.
8756 # This is necessary for correct --keep-going or --resume operation
8757 # in case a package from a group of circularly dependent packages
8758 # fails. In this case, a package which has recently been installed
8759 # may have an unsatisfied circular dependency (pulled in by
8760 # PDEPEND, for example). So, even though a package is already
8761 # installed, it may not have all of it's dependencies satisfied, so
8762 # it may not be usable. If such a package is in the subgraph of
8763 # deep depenedencies of a scheduled build, that build needs to
8764 # be cancelled. In order for this type of situation to be
8765 # recognized, deep traversal of dependencies is required.
8766 self.myparams.add("deep")
8768 favorites = resume_data.get("favorites")
8769 args_set = self._sets["args"]
8770 if isinstance(favorites, list):
8771 args = self._load_favorites(favorites)
8775 for task in serialized_tasks:
8776 if isinstance(task, Package) and \
8777 task.operation == "merge":
8778 if not self._add_pkg(task, None):
8781 # Packages for argument atoms need to be explicitly
8782 # added via _add_pkg() so that they are included in the
8783 # digraph (needed at least for --tree display).
8785 for atom in arg.set:
8786 pkg, existing_node = self._select_package(
8787 arg.root_config.root, atom)
8788 if existing_node is None and \
8790 if not self._add_pkg(pkg, Dependency(atom=atom,
8791 root=pkg.root, parent=arg)):
8794 # Allow unsatisfied deps here to avoid showing a masking
8795 # message for an unsatisfied dep that isn't necessarily
8797 if not self._create_graph(allow_unsatisfied=True):
8800 unsatisfied_deps = []
8801 for dep in self._unsatisfied_deps:
8802 if not isinstance(dep.parent, Package):
8804 if dep.parent.operation == "merge":
8805 unsatisfied_deps.append(dep)
8808 # For unsatisfied deps of installed packages, only account for
8809 # them if they are in the subgraph of dependencies of a package
8810 # which is scheduled to be installed.
8811 unsatisfied_install = False
8813 dep_stack = self.digraph.parent_nodes(dep.parent)
8815 node = dep_stack.pop()
8816 if not isinstance(node, Package):
8818 if node.operation == "merge":
8819 unsatisfied_install = True
8821 if node in traversed:
8824 dep_stack.extend(self.digraph.parent_nodes(node))
8826 if unsatisfied_install:
8827 unsatisfied_deps.append(dep)
8829 if masked_tasks or unsatisfied_deps:
8830 # This probably means that a required package
8831 # was dropped via --skipfirst. It makes the
8832 # resume list invalid, so convert it to a
8833 # UnsatisfiedResumeDep exception.
8834 raise self.UnsatisfiedResumeDep(self,
8835 masked_tasks + unsatisfied_deps)
8836 self._serialized_tasks_cache = None
8839 except self._unknown_internal_error:
8844 def _load_favorites(self, favorites):
8846 Use a list of favorites to resume state from a
8847 previous select_files() call. This creates similar
8848 DependencyArg instances to those that would have
8849 been created by the original select_files() call.
8850 This allows Package instances to be matched with
8851 DependencyArg instances during graph creation.
8853 root_config = self.roots[self.target_root]
8854 getSetAtoms = root_config.setconfig.getSetAtoms
8855 sets = root_config.sets
8858 if not isinstance(x, basestring):
8860 if x in ("system", "world"):
8862 if x.startswith(SETPREFIX):
8863 s = x[len(SETPREFIX):]
8868 # Recursively expand sets so that containment tests in
8869 # self._get_parent_sets() properly match atoms in nested
8870 # sets (like if world contains system).
8871 expanded_set = InternalPackageSet(
8872 initial_atoms=getSetAtoms(s))
8873 self._sets[s] = expanded_set
8874 args.append(SetArg(arg=x, set=expanded_set,
8875 root_config=root_config))
8877 if not portage.isvalidatom(x):
8879 args.append(AtomArg(arg=x, atom=x,
8880 root_config=root_config))
8882 self._set_args(args)
8885 class UnsatisfiedResumeDep(portage.exception.PortageException):
8887 A dependency of a resume list is not installed. This
8888 can occur when a required package is dropped from the
8889 merge list via --skipfirst.
8891 def __init__(self, depgraph, value):
8892 portage.exception.PortageException.__init__(self, value)
8893 self.depgraph = depgraph
8895 class _internal_exception(portage.exception.PortageException):
8896 def __init__(self, value=""):
8897 portage.exception.PortageException.__init__(self, value)
8899 class _unknown_internal_error(_internal_exception):
8901 Used by the depgraph internally to terminate graph creation.
8902 The specific reason for the failure should have been dumped
8903 to stderr, unfortunately, the exact reason for the failure
8907 class _serialize_tasks_retry(_internal_exception):
8909 This is raised by the _serialize_tasks() method when it needs to
8910 be called again for some reason. The only case that it's currently
8911 used for is when neglected dependencies need to be added to the
8912 graph in order to avoid making a potentially unsafe decision.
8915 class _dep_check_composite_db(portage.dbapi):
8917 A dbapi-like interface that is optimized for use in dep_check() calls.
8918 This is built on top of the existing depgraph package selection logic.
8919 Some packages that have been added to the graph may be masked from this
8920 view in order to influence the atom preference selection that occurs
8923 def __init__(self, depgraph, root):
8924 portage.dbapi.__init__(self)
8925 self._depgraph = depgraph
8927 self._match_cache = {}
8928 self._cpv_pkg_map = {}
8930 def _clear_cache(self):
8931 self._match_cache.clear()
8932 self._cpv_pkg_map.clear()
8934 def match(self, atom):
8935 ret = self._match_cache.get(atom)
8940 atom = self._dep_expand(atom)
8941 pkg, existing = self._depgraph._select_package(self._root, atom)
8945 # Return the highest available from select_package() as well as
8946 # any matching slots in the graph db.
8948 slots.add(pkg.metadata["SLOT"])
8949 atom_cp = portage.dep_getkey(atom)
8950 if pkg.cp.startswith("virtual/"):
8951 # For new-style virtual lookahead that occurs inside
8952 # dep_check(), examine all slots. This is needed
8953 # so that newer slots will not unnecessarily be pulled in
8954 # when a satisfying lower slot is already installed. For
8955 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8956 # there's no need to pull in a newer slot to satisfy a
8957 # virtual/jdk dependency.
8958 for db, pkg_type, built, installed, db_keys in \
8959 self._depgraph._filtered_trees[self._root]["dbs"]:
8960 for cpv in db.match(atom):
8961 if portage.cpv_getkey(cpv) != pkg.cp:
8963 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8965 if self._visible(pkg):
8966 self._cpv_pkg_map[pkg.cpv] = pkg
8968 slots.remove(pkg.metadata["SLOT"])
8970 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8971 pkg, existing = self._depgraph._select_package(
8972 self._root, slot_atom)
8975 if not self._visible(pkg):
8977 self._cpv_pkg_map[pkg.cpv] = pkg
8980 self._cpv_sort_ascending(ret)
8981 self._match_cache[orig_atom] = ret
8984 def _visible(self, pkg):
8985 if pkg.installed and "selective" not in self._depgraph.myparams:
8987 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8988 except (StopIteration, portage.exception.InvalidDependString):
8995 self._depgraph.pkgsettings[pkg.root], pkg):
8997 except portage.exception.InvalidDependString:
8999 in_graph = self._depgraph._slot_pkg_map[
9000 self._root].get(pkg.slot_atom)
9001 if in_graph is None:
9002 # Mask choices for packages which are not the highest visible
9003 # version within their slot (since they usually trigger slot
9005 highest_visible, in_graph = self._depgraph._select_package(
9006 self._root, pkg.slot_atom)
9007 if pkg != highest_visible:
9009 elif in_graph != pkg:
9010 # Mask choices for packages that would trigger a slot
9011 # conflict with a previously selected package.
9015 def _dep_expand(self, atom):
9017 This is only needed for old installed packages that may
9018 contain atoms that are not fully qualified with a specific
9019 category. Emulate the cpv_expand() function that's used by
9020 dbapi.match() in cases like this. If there are multiple
9021 matches, it's often due to a new-style virtual that has
9022 been added, so try to filter those out to avoid raising
9025 root_config = self._depgraph.roots[self._root]
9027 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9028 if len(expanded_atoms) > 1:
9029 non_virtual_atoms = []
9030 for x in expanded_atoms:
9031 if not portage.dep_getkey(x).startswith("virtual/"):
9032 non_virtual_atoms.append(x)
9033 if len(non_virtual_atoms) == 1:
9034 expanded_atoms = non_virtual_atoms
9035 if len(expanded_atoms) > 1:
9036 # compatible with portage.cpv_expand()
9037 raise portage.exception.AmbiguousPackageName(
9038 [portage.dep_getkey(x) for x in expanded_atoms])
9040 atom = expanded_atoms[0]
9042 null_atom = insert_category_into_atom(atom, "null")
9043 null_cp = portage.dep_getkey(null_atom)
9044 cat, atom_pn = portage.catsplit(null_cp)
9045 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9047 # Allow the resolver to choose which virtual.
9048 atom = insert_category_into_atom(atom, "virtual")
9050 atom = insert_category_into_atom(atom, "null")
9053 def aux_get(self, cpv, wants):
9054 metadata = self._cpv_pkg_map[cpv].metadata
9055 return [metadata.get(x, "") for x in wants]
9057 class RepoDisplay(object):
9058 def __init__(self, roots):
9059 self._shown_repos = {}
9060 self._unknown_repo = False
9062 for root_config in roots.itervalues():
9063 portdir = root_config.settings.get("PORTDIR")
9065 repo_paths.add(portdir)
9066 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9068 repo_paths.update(overlays.split())
9069 repo_paths = list(repo_paths)
9070 self._repo_paths = repo_paths
9071 self._repo_paths_real = [ os.path.realpath(repo_path) \
9072 for repo_path in repo_paths ]
9074 # pre-allocate index for PORTDIR so that it always has index 0.
9075 for root_config in roots.itervalues():
9076 portdb = root_config.trees["porttree"].dbapi
9077 portdir = portdb.porttree_root
9079 self.repoStr(portdir)
9081 def repoStr(self, repo_path_real):
9084 real_index = self._repo_paths_real.index(repo_path_real)
9085 if real_index == -1:
9087 self._unknown_repo = True
9089 shown_repos = self._shown_repos
9090 repo_paths = self._repo_paths
9091 repo_path = repo_paths[real_index]
9092 index = shown_repos.get(repo_path)
9094 index = len(shown_repos)
9095 shown_repos[repo_path] = index
9101 shown_repos = self._shown_repos
9102 unknown_repo = self._unknown_repo
9103 if shown_repos or self._unknown_repo:
9104 output.append("Portage tree and overlays:\n")
9105 show_repo_paths = list(shown_repos)
9106 for repo_path, repo_index in shown_repos.iteritems():
9107 show_repo_paths[repo_index] = repo_path
9109 for index, repo_path in enumerate(show_repo_paths):
9110 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9112 output.append(" "+teal("[?]") + \
9113 " indicates that the source repository could not be determined\n")
9114 return "".join(output)
9116 class PackageCounters(object):
9126 self.blocks_satisfied = 0
9128 self.restrict_fetch = 0
9129 self.restrict_fetch_satisfied = 0
9130 self.interactive = 0
9133 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9136 myoutput.append("Total: %s package" % total_installs)
9137 if total_installs != 1:
9138 myoutput.append("s")
9139 if total_installs != 0:
9140 myoutput.append(" (")
9141 if self.upgrades > 0:
9142 details.append("%s upgrade" % self.upgrades)
9143 if self.upgrades > 1:
9145 if self.downgrades > 0:
9146 details.append("%s downgrade" % self.downgrades)
9147 if self.downgrades > 1:
9150 details.append("%s new" % self.new)
9151 if self.newslot > 0:
9152 details.append("%s in new slot" % self.newslot)
9153 if self.newslot > 1:
9156 details.append("%s reinstall" % self.reinst)
9160 details.append("%s uninstall" % self.uninst)
9163 if self.interactive > 0:
9164 details.append("%s %s" % (self.interactive,
9165 colorize("WARN", "interactive")))
9166 myoutput.append(", ".join(details))
9167 if total_installs != 0:
9168 myoutput.append(")")
9169 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9170 if self.restrict_fetch:
9171 myoutput.append("\nFetch Restriction: %s package" % \
9172 self.restrict_fetch)
9173 if self.restrict_fetch > 1:
9174 myoutput.append("s")
9175 if self.restrict_fetch_satisfied < self.restrict_fetch:
9176 myoutput.append(bad(" (%s unsatisfied)") % \
9177 (self.restrict_fetch - self.restrict_fetch_satisfied))
9179 myoutput.append("\nConflict: %s block" % \
9182 myoutput.append("s")
9183 if self.blocks_satisfied < self.blocks:
9184 myoutput.append(bad(" (%s unsatisfied)") % \
9185 (self.blocks - self.blocks_satisfied))
9186 return "".join(myoutput)
9188 class PollSelectAdapter(PollConstants):
9191 Use select to emulate a poll object, for
9192 systems that don't support poll().
9196 self._registered = {}
9197 self._select_args = [[], [], []]
9199 def register(self, fd, *args):
9201 Only POLLIN is currently supported!
9205 "register expected at most 2 arguments, got " + \
9206 repr(1 + len(args)))
9208 eventmask = PollConstants.POLLIN | \
9209 PollConstants.POLLPRI | PollConstants.POLLOUT
9213 self._registered[fd] = eventmask
9214 self._select_args = None
9216 def unregister(self, fd):
9217 self._select_args = None
9218 del self._registered[fd]
9220 def poll(self, *args):
9223 "poll expected at most 2 arguments, got " + \
9224 repr(1 + len(args)))
9230 select_args = self._select_args
9231 if select_args is None:
9232 select_args = [self._registered.keys(), [], []]
9234 if timeout is not None:
9235 select_args = select_args[:]
9236 # Translate poll() timeout args to select() timeout args:
9238 # | units | value(s) for indefinite block
9239 # ---------|--------------|------------------------------
9240 # poll | milliseconds | omitted, negative, or None
9241 # ---------|--------------|------------------------------
9242 # select | seconds | omitted
9243 # ---------|--------------|------------------------------
9245 if timeout is not None and timeout < 0:
9247 if timeout is not None:
9248 select_args.append(timeout / 1000)
9250 select_events = select.select(*select_args)
9252 for fd in select_events[0]:
9253 poll_events.append((fd, PollConstants.POLLIN))
9256 class SequentialTaskQueue(SlotObject):
9258 __slots__ = ("max_jobs", "running_tasks") + \
9259 ("_dirty", "_scheduling", "_task_queue")
9261 def __init__(self, **kwargs):
9262 SlotObject.__init__(self, **kwargs)
9263 self._task_queue = deque()
9264 self.running_tasks = set()
9265 if self.max_jobs is None:
9269 def add(self, task):
9270 self._task_queue.append(task)
9273 def addFront(self, task):
9274 self._task_queue.appendleft(task)
9285 if self._scheduling:
9286 # Ignore any recursive schedule() calls triggered via
9287 # self._task_exit().
9290 self._scheduling = True
9292 task_queue = self._task_queue
9293 running_tasks = self.running_tasks
9294 max_jobs = self.max_jobs
9295 state_changed = False
9297 while task_queue and \
9298 (max_jobs is True or len(running_tasks) < max_jobs):
9299 task = task_queue.popleft()
9300 cancelled = getattr(task, "cancelled", None)
9302 running_tasks.add(task)
9303 task.addExitListener(self._task_exit)
9305 state_changed = True
9308 self._scheduling = False
9310 return state_changed
9312 def _task_exit(self, task):
9314 Since we can always rely on exit listeners being called, the set of
9315 running tasks is always pruned automatically and there is never any need
9316 to actively prune it.
9318 self.running_tasks.remove(task)
9319 if self._task_queue:
9323 self._task_queue.clear()
9324 running_tasks = self.running_tasks
9325 while running_tasks:
9326 task = running_tasks.pop()
9327 task.removeExitListener(self._task_exit)
9331 def __nonzero__(self):
9332 return bool(self._task_queue or self.running_tasks)
9335 return len(self._task_queue) + len(self.running_tasks)
9337 _can_poll_device = None
9339 def can_poll_device():
9341 Test if it's possible to use poll() on a device such as a pty. This
9342 is known to fail on Darwin.
9344 @returns: True if poll() on a device succeeds, False otherwise.
9347 global _can_poll_device
9348 if _can_poll_device is not None:
9349 return _can_poll_device
9351 if not hasattr(select, "poll"):
9352 _can_poll_device = False
9353 return _can_poll_device
9356 dev_null = open('/dev/null', 'rb')
9358 _can_poll_device = False
9359 return _can_poll_device
9362 p.register(dev_null.fileno(), PollConstants.POLLIN)
9364 invalid_request = False
9365 for f, event in p.poll():
9366 if event & PollConstants.POLLNVAL:
9367 invalid_request = True
9371 _can_poll_device = not invalid_request
9372 return _can_poll_device
9374 def create_poll_instance():
9376 Create an instance of select.poll, or an instance of
9377 PollSelectAdapter there is no poll() implementation or
9378 it is broken somehow.
9380 if can_poll_device():
9381 return select.poll()
9382 return PollSelectAdapter()
9384 getloadavg = getattr(os, "getloadavg", None)
9385 if getloadavg is None:
9388 Uses /proc/loadavg to emulate os.getloadavg().
9389 Raises OSError if the load average was unobtainable.
9392 loadavg_str = open('/proc/loadavg').readline()
9394 # getloadavg() is only supposed to raise OSError, so convert
9395 raise OSError('unknown')
9396 loadavg_split = loadavg_str.split()
9397 if len(loadavg_split) < 3:
9398 raise OSError('unknown')
9402 loadavg_floats.append(float(loadavg_split[i]))
9404 raise OSError('unknown')
9405 return tuple(loadavg_floats)
9407 class PollScheduler(object):
9409 class _sched_iface_class(SlotObject):
9410 __slots__ = ("register", "schedule", "unregister")
9414 self._max_load = None
9416 self._poll_event_queue = []
9417 self._poll_event_handlers = {}
9418 self._poll_event_handler_ids = {}
9419 # Increment id for each new handler.
9420 self._event_handler_id = 0
9421 self._poll_obj = create_poll_instance()
9422 self._scheduling = False
9424 def _schedule(self):
9426 Calls _schedule_tasks() and automatically returns early from
9427 any recursive calls to this method that the _schedule_tasks()
9428 call might trigger. This makes _schedule() safe to call from
9429 inside exit listeners.
9431 if self._scheduling:
9433 self._scheduling = True
9435 return self._schedule_tasks()
9437 self._scheduling = False
9439 def _running_job_count(self):
9442 def _can_add_job(self):
9443 max_jobs = self._max_jobs
9444 max_load = self._max_load
9446 if self._max_jobs is not True and \
9447 self._running_job_count() >= self._max_jobs:
9450 if max_load is not None and \
9451 (max_jobs is True or max_jobs > 1) and \
9452 self._running_job_count() >= 1:
9454 avg1, avg5, avg15 = getloadavg()
9458 if avg1 >= max_load:
9463 def _poll(self, timeout=None):
9465 All poll() calls pass through here. The poll events
9466 are added directly to self._poll_event_queue.
9467 In order to avoid endless blocking, this raises
9468 StopIteration if timeout is None and there are
9469 no file descriptors to poll.
9471 if not self._poll_event_handlers:
9473 if timeout is None and \
9474 not self._poll_event_handlers:
9475 raise StopIteration(
9476 "timeout is None and there are no poll() event handlers")
9478 # The following error is known to occur with Linux kernel versions
9481 # select.error: (4, 'Interrupted system call')
9483 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9484 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9485 # without any events.
9488 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9490 except select.error, e:
9491 writemsg_level("\n!!! select error: %s\n" % (e,),
9492 level=logging.ERROR, noiselevel=-1)
9494 if timeout is not None:
9497 def _next_poll_event(self, timeout=None):
9499 Since the _schedule_wait() loop is called by event
9500 handlers from _poll_loop(), maintain a central event
9501 queue for both of them to share events from a single
9502 poll() call. In order to avoid endless blocking, this
9503 raises StopIteration if timeout is None and there are
9504 no file descriptors to poll.
9506 if not self._poll_event_queue:
9508 return self._poll_event_queue.pop()
9510 def _poll_loop(self):
9512 event_handlers = self._poll_event_handlers
9513 event_handled = False
9516 while event_handlers:
9517 f, event = self._next_poll_event()
9518 handler, reg_id = event_handlers[f]
9520 event_handled = True
9521 except StopIteration:
9522 event_handled = True
9524 if not event_handled:
9525 raise AssertionError("tight loop")
9527 def _schedule_yield(self):
9529 Schedule for a short period of time chosen by the scheduler based
9530 on internal state. Synchronous tasks should call this periodically
9531 in order to allow the scheduler to service pending poll events. The
9532 scheduler will call poll() exactly once, without blocking, and any
9533 resulting poll events will be serviced.
9535 event_handlers = self._poll_event_handlers
9538 if not event_handlers:
9539 return bool(events_handled)
9541 if not self._poll_event_queue:
9545 while event_handlers and self._poll_event_queue:
9546 f, event = self._next_poll_event()
9547 handler, reg_id = event_handlers[f]
9550 except StopIteration:
9553 return bool(events_handled)
9555 def _register(self, f, eventmask, handler):
9558 @return: A unique registration id, for use in schedule() or
9561 if f in self._poll_event_handlers:
9562 raise AssertionError("fd %d is already registered" % f)
9563 self._event_handler_id += 1
9564 reg_id = self._event_handler_id
9565 self._poll_event_handler_ids[reg_id] = f
9566 self._poll_event_handlers[f] = (handler, reg_id)
9567 self._poll_obj.register(f, eventmask)
9570 def _unregister(self, reg_id):
9571 f = self._poll_event_handler_ids[reg_id]
9572 self._poll_obj.unregister(f)
9573 del self._poll_event_handlers[f]
9574 del self._poll_event_handler_ids[reg_id]
9576 def _schedule_wait(self, wait_ids):
9578 Schedule until wait_id is not longer registered
9581 @param wait_id: a task id to wait for
9583 event_handlers = self._poll_event_handlers
9584 handler_ids = self._poll_event_handler_ids
9585 event_handled = False
9587 if isinstance(wait_ids, int):
9588 wait_ids = frozenset([wait_ids])
9591 while wait_ids.intersection(handler_ids):
9592 f, event = self._next_poll_event()
9593 handler, reg_id = event_handlers[f]
9595 event_handled = True
9596 except StopIteration:
9597 event_handled = True
9599 return event_handled
9601 class QueueScheduler(PollScheduler):
9604 Add instances of SequentialTaskQueue and then call run(). The
9605 run() method returns when no tasks remain.
9608 def __init__(self, max_jobs=None, max_load=None):
9609 PollScheduler.__init__(self)
9611 if max_jobs is None:
9614 self._max_jobs = max_jobs
9615 self._max_load = max_load
9616 self.sched_iface = self._sched_iface_class(
9617 register=self._register,
9618 schedule=self._schedule_wait,
9619 unregister=self._unregister)
9622 self._schedule_listeners = []
9625 self._queues.append(q)
9627 def remove(self, q):
9628 self._queues.remove(q)
9632 while self._schedule():
9635 while self._running_job_count():
9638 def _schedule_tasks(self):
9641 @returns: True if there may be remaining tasks to schedule,
9644 while self._can_add_job():
9645 n = self._max_jobs - self._running_job_count()
9649 if not self._start_next_job(n):
9652 for q in self._queues:
9657 def _running_job_count(self):
9659 for q in self._queues:
9660 job_count += len(q.running_tasks)
9661 self._jobs = job_count
9664 def _start_next_job(self, n=1):
9666 for q in self._queues:
9667 initial_job_count = len(q.running_tasks)
9669 final_job_count = len(q.running_tasks)
9670 if final_job_count > initial_job_count:
9671 started_count += (final_job_count - initial_job_count)
9672 if started_count >= n:
9674 return started_count
9676 class TaskScheduler(object):
9679 A simple way to handle scheduling of AsynchrousTask instances. Simply
9680 add tasks and call run(). The run() method returns when no tasks remain.
9683 def __init__(self, max_jobs=None, max_load=None):
9684 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9685 self._scheduler = QueueScheduler(
9686 max_jobs=max_jobs, max_load=max_load)
9687 self.sched_iface = self._scheduler.sched_iface
9688 self.run = self._scheduler.run
9689 self._scheduler.add(self._queue)
9691 def add(self, task):
9692 self._queue.add(task)
9694 class JobStatusDisplay(object):
9696 _bound_properties = ("curval", "failed", "running")
9697 _jobs_column_width = 48
9699 # Don't update the display unless at least this much
9700 # time has passed, in units of seconds.
9701 _min_display_latency = 2
9703 _default_term_codes = {
9709 _termcap_name_map = {
9710 'carriage_return' : 'cr',
9715 def __init__(self, out=sys.stdout, quiet=False):
9716 object.__setattr__(self, "out", out)
9717 object.__setattr__(self, "quiet", quiet)
9718 object.__setattr__(self, "maxval", 0)
9719 object.__setattr__(self, "merges", 0)
9720 object.__setattr__(self, "_changed", False)
9721 object.__setattr__(self, "_displayed", False)
9722 object.__setattr__(self, "_last_display_time", 0)
9723 object.__setattr__(self, "width", 80)
9726 isatty = hasattr(out, "isatty") and out.isatty()
9727 object.__setattr__(self, "_isatty", isatty)
9728 if not isatty or not self._init_term():
9730 for k, capname in self._termcap_name_map.iteritems():
9731 term_codes[k] = self._default_term_codes[capname]
9732 object.__setattr__(self, "_term_codes", term_codes)
9733 encoding = sys.getdefaultencoding()
9734 for k, v in self._term_codes.items():
9735 if not isinstance(v, basestring):
9736 self._term_codes[k] = v.decode(encoding, 'replace')
9738 def _init_term(self):
9740 Initialize term control codes.
9742 @returns: True if term codes were successfully initialized,
9746 term_type = os.environ.get("TERM", "vt100")
9752 curses.setupterm(term_type, self.out.fileno())
9753 tigetstr = curses.tigetstr
9754 except curses.error:
9759 if tigetstr is None:
9763 for k, capname in self._termcap_name_map.iteritems():
9764 code = tigetstr(capname)
9766 code = self._default_term_codes[capname]
9767 term_codes[k] = code
9768 object.__setattr__(self, "_term_codes", term_codes)
9771 def _format_msg(self, msg):
9772 return ">>> %s" % msg
9776 self._term_codes['carriage_return'] + \
9777 self._term_codes['clr_eol'])
9779 self._displayed = False
9781 def _display(self, line):
9782 self.out.write(line)
9784 self._displayed = True
9786 def _update(self, msg):
9789 if not self._isatty:
9790 out.write(self._format_msg(msg) + self._term_codes['newline'])
9792 self._displayed = True
9798 self._display(self._format_msg(msg))
9800 def displayMessage(self, msg):
9802 was_displayed = self._displayed
9804 if self._isatty and self._displayed:
9807 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9809 self._displayed = False
9812 self._changed = True
9818 for name in self._bound_properties:
9819 object.__setattr__(self, name, 0)
9822 self.out.write(self._term_codes['newline'])
9824 self._displayed = False
9826 def __setattr__(self, name, value):
9827 old_value = getattr(self, name)
9828 if value == old_value:
9830 object.__setattr__(self, name, value)
9831 if name in self._bound_properties:
9832 self._property_change(name, old_value, value)
9834 def _property_change(self, name, old_value, new_value):
9835 self._changed = True
9838 def _load_avg_str(self):
9853 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9857 Display status on stdout, but only if something has
9858 changed since the last call.
9864 current_time = time.time()
9865 time_delta = current_time - self._last_display_time
9866 if self._displayed and \
9868 if not self._isatty:
9870 if time_delta < self._min_display_latency:
9873 self._last_display_time = current_time
9874 self._changed = False
9875 self._display_status()
9877 def _display_status(self):
9878 # Don't use len(self._completed_tasks) here since that also
9879 # can include uninstall tasks.
9880 curval_str = str(self.curval)
9881 maxval_str = str(self.maxval)
9882 running_str = str(self.running)
9883 failed_str = str(self.failed)
9884 load_avg_str = self._load_avg_str()
9886 color_output = StringIO()
9887 plain_output = StringIO()
9888 style_file = portage.output.ConsoleStyleFile(color_output)
9889 style_file.write_listener = plain_output
9890 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9891 style_writer.style_listener = style_file.new_styles
9892 f = formatter.AbstractFormatter(style_writer)
9894 number_style = "INFORM"
9895 f.add_literal_data("Jobs: ")
9896 f.push_style(number_style)
9897 f.add_literal_data(curval_str)
9899 f.add_literal_data(" of ")
9900 f.push_style(number_style)
9901 f.add_literal_data(maxval_str)
9903 f.add_literal_data(" complete")
9906 f.add_literal_data(", ")
9907 f.push_style(number_style)
9908 f.add_literal_data(running_str)
9910 f.add_literal_data(" running")
9913 f.add_literal_data(", ")
9914 f.push_style(number_style)
9915 f.add_literal_data(failed_str)
9917 f.add_literal_data(" failed")
9919 padding = self._jobs_column_width - len(plain_output.getvalue())
9921 f.add_literal_data(padding * " ")
9923 f.add_literal_data("Load avg: ")
9924 f.add_literal_data(load_avg_str)
9926 # Truncate to fit width, to avoid making the terminal scroll if the
9927 # line overflows (happens when the load average is large).
9928 plain_output = plain_output.getvalue()
9929 if self._isatty and len(plain_output) > self.width:
9930 # Use plain_output here since it's easier to truncate
9931 # properly than the color output which contains console
9933 self._update(plain_output[:self.width])
9935 self._update(color_output.getvalue())
9937 xtermTitle(" ".join(plain_output.split()))
9939 class Scheduler(PollScheduler):
9941 _opts_ignore_blockers = \
9942 frozenset(["--buildpkgonly",
9943 "--fetchonly", "--fetch-all-uri",
9944 "--nodeps", "--pretend"])
9946 _opts_no_background = \
9947 frozenset(["--pretend",
9948 "--fetchonly", "--fetch-all-uri"])
9950 _opts_no_restart = frozenset(["--buildpkgonly",
9951 "--fetchonly", "--fetch-all-uri", "--pretend"])
9953 _bad_resume_opts = set(["--ask", "--changelog",
9954 "--resume", "--skipfirst"])
9956 _fetch_log = "/var/log/emerge-fetch.log"
9958 class _iface_class(SlotObject):
9959 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9960 "dblinkElog", "fetch", "register", "schedule",
9961 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9964 class _fetch_iface_class(SlotObject):
9965 __slots__ = ("log_file", "schedule")
9967 _task_queues_class = slot_dict_class(
9968 ("merge", "jobs", "fetch", "unpack"), prefix="")
9970 class _build_opts_class(SlotObject):
9971 __slots__ = ("buildpkg", "buildpkgonly",
9972 "fetch_all_uri", "fetchonly", "pretend")
9974 class _binpkg_opts_class(SlotObject):
9975 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9977 class _pkg_count_class(SlotObject):
9978 __slots__ = ("curval", "maxval")
9980 class _emerge_log_class(SlotObject):
9981 __slots__ = ("xterm_titles",)
9983 def log(self, *pargs, **kwargs):
9984 if not self.xterm_titles:
9985 # Avoid interference with the scheduler's status display.
9986 kwargs.pop("short_msg", None)
9987 emergelog(self.xterm_titles, *pargs, **kwargs)
9989 class _failed_pkg(SlotObject):
9990 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9992 class _ConfigPool(object):
9993 """Interface for a task to temporarily allocate a config
9994 instance from a pool. This allows a task to be constructed
9995 long before the config instance actually becomes needed, like
9996 when prefetchers are constructed for the whole merge list."""
9997 __slots__ = ("_root", "_allocate", "_deallocate")
9998 def __init__(self, root, allocate, deallocate):
10000 self._allocate = allocate
10001 self._deallocate = deallocate
10002 def allocate(self):
10003 return self._allocate(self._root)
10004 def deallocate(self, settings):
10005 self._deallocate(settings)
10007 class _unknown_internal_error(portage.exception.PortageException):
10009 Used internally to terminate scheduling. The specific reason for
10010 the failure should have been dumped to stderr.
10012 def __init__(self, value=""):
10013 portage.exception.PortageException.__init__(self, value)
10015 def __init__(self, settings, trees, mtimedb, myopts,
10016 spinner, mergelist, favorites, digraph):
10017 PollScheduler.__init__(self)
10018 self.settings = settings
10019 self.target_root = settings["ROOT"]
10021 self.myopts = myopts
10022 self._spinner = spinner
10023 self._mtimedb = mtimedb
10024 self._mergelist = mergelist
10025 self._favorites = favorites
10026 self._args_set = InternalPackageSet(favorites)
10027 self._build_opts = self._build_opts_class()
10028 for k in self._build_opts.__slots__:
10029 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10030 self._binpkg_opts = self._binpkg_opts_class()
10031 for k in self._binpkg_opts.__slots__:
10032 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10035 self._logger = self._emerge_log_class()
10036 self._task_queues = self._task_queues_class()
10037 for k in self._task_queues.allowed_keys:
10038 setattr(self._task_queues, k,
10039 SequentialTaskQueue())
10041 # Holds merges that will wait to be executed when no builds are
10042 # executing. This is useful for system packages since dependencies
10043 # on system packages are frequently unspecified.
10044 self._merge_wait_queue = []
10045 # Holds merges that have been transfered from the merge_wait_queue to
10046 # the actual merge queue. They are removed from this list upon
10047 # completion. Other packages can start building only when this list is
10049 self._merge_wait_scheduled = []
10051 # Holds system packages and their deep runtime dependencies. Before
10052 # being merged, these packages go to merge_wait_queue, to be merged
10053 # when no other packages are building.
10054 self._deep_system_deps = set()
10056 # Holds packages to merge which will satisfy currently unsatisfied
10057 # deep runtime dependencies of system packages. If this is not empty
10058 # then no parallel builds will be spawned until it is empty. This
10059 # minimizes the possibility that a build will fail due to the system
10060 # being in a fragile state. For example, see bug #259954.
10061 self._unsatisfied_system_deps = set()
10063 self._status_display = JobStatusDisplay()
10064 self._max_load = myopts.get("--load-average")
10065 max_jobs = myopts.get("--jobs")
10066 if max_jobs is None:
10068 self._set_max_jobs(max_jobs)
10070 # The root where the currently running
10071 # portage instance is installed.
10072 self._running_root = trees["/"]["root_config"]
10074 if settings.get("PORTAGE_DEBUG", "") == "1":
10076 self.pkgsettings = {}
10077 self._config_pool = {}
10078 self._blocker_db = {}
10080 self._config_pool[root] = []
10081 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10083 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10084 schedule=self._schedule_fetch)
10085 self._sched_iface = self._iface_class(
10086 dblinkEbuildPhase=self._dblink_ebuild_phase,
10087 dblinkDisplayMerge=self._dblink_display_merge,
10088 dblinkElog=self._dblink_elog,
10089 fetch=fetch_iface, register=self._register,
10090 schedule=self._schedule_wait,
10091 scheduleSetup=self._schedule_setup,
10092 scheduleUnpack=self._schedule_unpack,
10093 scheduleYield=self._schedule_yield,
10094 unregister=self._unregister)
10096 self._prefetchers = weakref.WeakValueDictionary()
10097 self._pkg_queue = []
10098 self._completed_tasks = set()
10100 self._failed_pkgs = []
10101 self._failed_pkgs_all = []
10102 self._failed_pkgs_die_msgs = []
10103 self._post_mod_echo_msgs = []
10104 self._parallel_fetch = False
10105 merge_count = len([x for x in mergelist \
10106 if isinstance(x, Package) and x.operation == "merge"])
10107 self._pkg_count = self._pkg_count_class(
10108 curval=0, maxval=merge_count)
10109 self._status_display.maxval = self._pkg_count.maxval
10111 # The load average takes some time to respond when new
10112 # jobs are added, so we need to limit the rate of adding
10114 self._job_delay_max = 10
10115 self._job_delay_factor = 1.0
10116 self._job_delay_exp = 1.5
10117 self._previous_job_start_time = None
10119 self._set_digraph(digraph)
10121 # This is used to memoize the _choose_pkg() result when
10122 # no packages can be chosen until one of the existing
10124 self._choose_pkg_return_early = False
10126 features = self.settings.features
10127 if "parallel-fetch" in features and \
10128 not ("--pretend" in self.myopts or \
10129 "--fetch-all-uri" in self.myopts or \
10130 "--fetchonly" in self.myopts):
10131 if "distlocks" not in features:
10132 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10133 portage.writemsg(red("!!!")+" parallel-fetching " + \
10134 "requires the distlocks feature enabled"+"\n",
10136 portage.writemsg(red("!!!")+" you have it disabled, " + \
10137 "thus parallel-fetching is being disabled"+"\n",
10139 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10140 elif len(mergelist) > 1:
10141 self._parallel_fetch = True
10143 if self._parallel_fetch:
10144 # clear out existing fetch log if it exists
10146 open(self._fetch_log, 'w')
10147 except EnvironmentError:
10150 self._running_portage = None
10151 portage_match = self._running_root.trees["vartree"].dbapi.match(
10152 portage.const.PORTAGE_PACKAGE_ATOM)
10154 cpv = portage_match.pop()
10155 self._running_portage = self._pkg(cpv, "installed",
10156 self._running_root, installed=True)
10158 def _poll(self, timeout=None):
10160 PollScheduler._poll(self, timeout=timeout)
10162 def _set_max_jobs(self, max_jobs):
10163 self._max_jobs = max_jobs
10164 self._task_queues.jobs.max_jobs = max_jobs
10166 def _background_mode(self):
10168 Check if background mode is enabled and adjust states as necessary.
10171 @returns: True if background mode is enabled, False otherwise.
10173 background = (self._max_jobs is True or \
10174 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10175 not bool(self._opts_no_background.intersection(self.myopts))
10178 interactive_tasks = self._get_interactive_tasks()
10179 if interactive_tasks:
10181 writemsg_level(">>> Sending package output to stdio due " + \
10182 "to interactive package(s):\n",
10183 level=logging.INFO, noiselevel=-1)
10185 for pkg in interactive_tasks:
10186 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10187 if pkg.root != "/":
10188 pkg_str += " for " + pkg.root
10189 msg.append(pkg_str)
10191 writemsg_level("".join("%s\n" % (l,) for l in msg),
10192 level=logging.INFO, noiselevel=-1)
10193 if self._max_jobs is True or self._max_jobs > 1:
10194 self._set_max_jobs(1)
10195 writemsg_level(">>> Setting --jobs=1 due " + \
10196 "to the above interactive package(s)\n",
10197 level=logging.INFO, noiselevel=-1)
10199 self._status_display.quiet = \
10200 not background or \
10201 ("--quiet" in self.myopts and \
10202 "--verbose" not in self.myopts)
10204 self._logger.xterm_titles = \
10205 "notitles" not in self.settings.features and \
10206 self._status_display.quiet
10210 def _get_interactive_tasks(self):
10211 from portage import flatten
10212 from portage.dep import use_reduce, paren_reduce
10213 interactive_tasks = []
10214 for task in self._mergelist:
10215 if not (isinstance(task, Package) and \
10216 task.operation == "merge"):
10219 properties = flatten(use_reduce(paren_reduce(
10220 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10221 except portage.exception.InvalidDependString, e:
10222 show_invalid_depstring_notice(task,
10223 task.metadata["PROPERTIES"], str(e))
10224 raise self._unknown_internal_error()
10225 if "interactive" in properties:
10226 interactive_tasks.append(task)
10227 return interactive_tasks
10229 def _set_digraph(self, digraph):
10230 if "--nodeps" in self.myopts or \
10231 (self._max_jobs is not True and self._max_jobs < 2):
10233 self._digraph = None
10236 self._digraph = digraph
10237 self._find_system_deps()
10238 self._prune_digraph()
10239 self._prevent_builddir_collisions()
10241 def _find_system_deps(self):
10243 Find system packages and their deep runtime dependencies. Before being
10244 merged, these packages go to merge_wait_queue, to be merged when no
10245 other packages are building.
10247 deep_system_deps = self._deep_system_deps
10248 deep_system_deps.clear()
10249 deep_system_deps.update(
10250 _find_deep_system_runtime_deps(self._digraph))
10251 deep_system_deps.difference_update([pkg for pkg in \
10252 deep_system_deps if pkg.operation != "merge"])
10254 def _prune_digraph(self):
10256 Prune any root nodes that are irrelevant.
10259 graph = self._digraph
10260 completed_tasks = self._completed_tasks
10261 removed_nodes = set()
10263 for node in graph.root_nodes():
10264 if not isinstance(node, Package) or \
10265 (node.installed and node.operation == "nomerge") or \
10267 node in completed_tasks:
10268 removed_nodes.add(node)
10270 graph.difference_update(removed_nodes)
10271 if not removed_nodes:
10273 removed_nodes.clear()
10275 def _prevent_builddir_collisions(self):
10277 When building stages, sometimes the same exact cpv needs to be merged
10278 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10279 in the builddir. Currently, normal file locks would be inappropriate
10280 for this purpose since emerge holds all of it's build dir locks from
10284 for pkg in self._mergelist:
10285 if not isinstance(pkg, Package):
10286 # a satisfied blocker
10290 if pkg.cpv not in cpv_map:
10291 cpv_map[pkg.cpv] = [pkg]
10293 for earlier_pkg in cpv_map[pkg.cpv]:
10294 self._digraph.add(earlier_pkg, pkg,
10295 priority=DepPriority(buildtime=True))
10296 cpv_map[pkg.cpv].append(pkg)
10298 class _pkg_failure(portage.exception.PortageException):
10300 An instance of this class is raised by unmerge() when
10301 an uninstallation fails.
10304 def __init__(self, *pargs):
10305 portage.exception.PortageException.__init__(self, pargs)
10307 self.status = pargs[0]
10309 def _schedule_fetch(self, fetcher):
10311 Schedule a fetcher on the fetch queue, in order to
10312 serialize access to the fetch log.
10314 self._task_queues.fetch.addFront(fetcher)
10316 def _schedule_setup(self, setup_phase):
10318 Schedule a setup phase on the merge queue, in order to
10319 serialize unsandboxed access to the live filesystem.
10321 self._task_queues.merge.addFront(setup_phase)
10324 def _schedule_unpack(self, unpack_phase):
10326 Schedule an unpack phase on the unpack queue, in order
10327 to serialize $DISTDIR access for live ebuilds.
10329 self._task_queues.unpack.add(unpack_phase)
10331 def _find_blockers(self, new_pkg):
10333 Returns a callable which should be called only when
10334 the vdb lock has been acquired.
10336 def get_blockers():
10337 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10338 return get_blockers
10340 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10341 if self._opts_ignore_blockers.intersection(self.myopts):
10344 # Call gc.collect() here to avoid heap overflow that
10345 # triggers 'Cannot allocate memory' errors (reported
10346 # with python-2.5).
10350 blocker_db = self._blocker_db[new_pkg.root]
10352 blocker_dblinks = []
10353 for blocking_pkg in blocker_db.findInstalledBlockers(
10354 new_pkg, acquire_lock=acquire_lock):
10355 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10357 if new_pkg.cpv == blocking_pkg.cpv:
10359 blocker_dblinks.append(portage.dblink(
10360 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10361 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10362 vartree=self.trees[blocking_pkg.root]["vartree"]))
10366 return blocker_dblinks
10368 def _dblink_pkg(self, pkg_dblink):
10369 cpv = pkg_dblink.mycpv
10370 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10371 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10372 installed = type_name == "installed"
10373 return self._pkg(cpv, type_name, root_config, installed=installed)
10375 def _append_to_log_path(self, log_path, msg):
10376 f = open(log_path, 'a')
10382 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10384 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10387 background = self._background
10389 if background and log_path is not None:
10390 log_file = open(log_path, 'a')
10395 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10397 if log_file is not None:
10400 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10401 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10402 background = self._background
10404 if log_path is None:
10405 if not (background and level < logging.WARN):
10406 portage.util.writemsg_level(msg,
10407 level=level, noiselevel=noiselevel)
10410 portage.util.writemsg_level(msg,
10411 level=level, noiselevel=noiselevel)
10412 self._append_to_log_path(log_path, msg)
10414 def _dblink_ebuild_phase(self,
10415 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10417 Using this callback for merge phases allows the scheduler
10418 to run while these phases execute asynchronously, and allows
10419 the scheduler control output handling.
10422 scheduler = self._sched_iface
10423 settings = pkg_dblink.settings
10424 pkg = self._dblink_pkg(pkg_dblink)
10425 background = self._background
10426 log_path = settings.get("PORTAGE_LOG_FILE")
10428 ebuild_phase = EbuildPhase(background=background,
10429 pkg=pkg, phase=phase, scheduler=scheduler,
10430 settings=settings, tree=pkg_dblink.treetype)
10431 ebuild_phase.start()
10432 ebuild_phase.wait()
10434 return ebuild_phase.returncode
10436 def _generate_digests(self):
10438 Generate digests if necessary for --digests or FEATURES=digest.
10439 In order to avoid interference, this must done before parallel
10443 if '--fetchonly' in self.myopts:
10446 digest = '--digest' in self.myopts
10448 for pkgsettings in self.pkgsettings.itervalues():
10449 if 'digest' in pkgsettings.features:
10456 for x in self._mergelist:
10457 if not isinstance(x, Package) or \
10458 x.type_name != 'ebuild' or \
10459 x.operation != 'merge':
10461 pkgsettings = self.pkgsettings[x.root]
10462 if '--digest' not in self.myopts and \
10463 'digest' not in pkgsettings.features:
10465 portdb = x.root_config.trees['porttree'].dbapi
10466 ebuild_path = portdb.findname(x.cpv)
10467 if not ebuild_path:
10469 "!!! Could not locate ebuild for '%s'.\n" \
10470 % x.cpv, level=logging.ERROR, noiselevel=-1)
10472 pkgsettings['O'] = os.path.dirname(ebuild_path)
10473 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10475 "!!! Unable to generate manifest for '%s'.\n" \
10476 % x.cpv, level=logging.ERROR, noiselevel=-1)
10481 def _check_manifests(self):
10482 # Verify all the manifests now so that the user is notified of failure
10483 # as soon as possible.
10484 if "strict" not in self.settings.features or \
10485 "--fetchonly" in self.myopts or \
10486 "--fetch-all-uri" in self.myopts:
10489 shown_verifying_msg = False
10490 quiet_settings = {}
10491 for myroot, pkgsettings in self.pkgsettings.iteritems():
10492 quiet_config = portage.config(clone=pkgsettings)
10493 quiet_config["PORTAGE_QUIET"] = "1"
10494 quiet_config.backup_changes("PORTAGE_QUIET")
10495 quiet_settings[myroot] = quiet_config
10498 for x in self._mergelist:
10499 if not isinstance(x, Package) or \
10500 x.type_name != "ebuild":
10503 if not shown_verifying_msg:
10504 shown_verifying_msg = True
10505 self._status_msg("Verifying ebuild manifests")
10507 root_config = x.root_config
10508 portdb = root_config.trees["porttree"].dbapi
10509 quiet_config = quiet_settings[root_config.root]
10510 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10511 if not portage.digestcheck([], quiet_config, strict=True):
10516 def _add_prefetchers(self):
10518 if not self._parallel_fetch:
10521 if self._parallel_fetch:
10522 self._status_msg("Starting parallel fetch")
10524 prefetchers = self._prefetchers
10525 getbinpkg = "--getbinpkg" in self.myopts
10527 # In order to avoid "waiting for lock" messages
10528 # at the beginning, which annoy users, never
10529 # spawn a prefetcher for the first package.
10530 for pkg in self._mergelist[1:]:
10531 prefetcher = self._create_prefetcher(pkg)
10532 if prefetcher is not None:
10533 self._task_queues.fetch.add(prefetcher)
10534 prefetchers[pkg] = prefetcher
10536 def _create_prefetcher(self, pkg):
10538 @return: a prefetcher, or None if not applicable
10542 if not isinstance(pkg, Package):
10545 elif pkg.type_name == "ebuild":
10547 prefetcher = EbuildFetcher(background=True,
10548 config_pool=self._ConfigPool(pkg.root,
10549 self._allocate_config, self._deallocate_config),
10550 fetchonly=1, logfile=self._fetch_log,
10551 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10553 elif pkg.type_name == "binary" and \
10554 "--getbinpkg" in self.myopts and \
10555 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10557 prefetcher = BinpkgPrefetcher(background=True,
10558 pkg=pkg, scheduler=self._sched_iface)
10562 def _is_restart_scheduled(self):
10564 Check if the merge list contains a replacement
10565 for the current running instance, that will result
10566 in restart after merge.
10568 @returns: True if a restart is scheduled, False otherwise.
10570 if self._opts_no_restart.intersection(self.myopts):
10573 mergelist = self._mergelist
10575 for i, pkg in enumerate(mergelist):
10576 if self._is_restart_necessary(pkg) and \
10577 i != len(mergelist) - 1:
10582 def _is_restart_necessary(self, pkg):
10584 @return: True if merging the given package
10585 requires restart, False otherwise.
10588 # Figure out if we need a restart.
10589 if pkg.root == self._running_root.root and \
10590 portage.match_from_list(
10591 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10592 if self._running_portage:
10593 return pkg.cpv != self._running_portage.cpv
10597 def _restart_if_necessary(self, pkg):
10599 Use execv() to restart emerge. This happens
10600 if portage upgrades itself and there are
10601 remaining packages in the list.
10604 if self._opts_no_restart.intersection(self.myopts):
10607 if not self._is_restart_necessary(pkg):
10610 if pkg == self._mergelist[-1]:
10613 self._main_loop_cleanup()
10615 logger = self._logger
10616 pkg_count = self._pkg_count
10617 mtimedb = self._mtimedb
10618 bad_resume_opts = self._bad_resume_opts
10620 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10621 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10623 logger.log(" *** RESTARTING " + \
10624 "emerge via exec() after change of " + \
10625 "portage version.")
10627 mtimedb["resume"]["mergelist"].remove(list(pkg))
10629 portage.run_exitfuncs()
10630 mynewargv = [sys.argv[0], "--resume"]
10631 resume_opts = self.myopts.copy()
10632 # For automatic resume, we need to prevent
10633 # any of bad_resume_opts from leaking in
10634 # via EMERGE_DEFAULT_OPTS.
10635 resume_opts["--ignore-default-opts"] = True
10636 for myopt, myarg in resume_opts.iteritems():
10637 if myopt not in bad_resume_opts:
10639 mynewargv.append(myopt)
10641 mynewargv.append(myopt +"="+ str(myarg))
10642 # priority only needs to be adjusted on the first run
10643 os.environ["PORTAGE_NICENESS"] = "0"
10644 os.execv(mynewargv[0], mynewargv)
10648 if "--resume" in self.myopts:
10650 portage.writemsg_stdout(
10651 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10652 self._logger.log(" *** Resuming merge...")
10654 self._save_resume_list()
10657 self._background = self._background_mode()
10658 except self._unknown_internal_error:
10661 for root in self.trees:
10662 root_config = self.trees[root]["root_config"]
10664 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10665 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10666 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10667 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10668 if not tmpdir or not os.path.isdir(tmpdir):
10669 msg = "The directory specified in your " + \
10670 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10671 "does not exist. Please create this " + \
10672 "directory or correct your PORTAGE_TMPDIR setting."
10673 msg = textwrap.wrap(msg, 70)
10674 out = portage.output.EOutput()
10679 if self._background:
10680 root_config.settings.unlock()
10681 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10682 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10683 root_config.settings.lock()
10685 self.pkgsettings[root] = portage.config(
10686 clone=root_config.settings)
10688 rval = self._generate_digests()
10689 if rval != os.EX_OK:
10692 rval = self._check_manifests()
10693 if rval != os.EX_OK:
10696 keep_going = "--keep-going" in self.myopts
10697 fetchonly = self._build_opts.fetchonly
10698 mtimedb = self._mtimedb
10699 failed_pkgs = self._failed_pkgs
10702 rval = self._merge()
10703 if rval == os.EX_OK or fetchonly or not keep_going:
10705 if "resume" not in mtimedb:
10707 mergelist = self._mtimedb["resume"].get("mergelist")
10711 if not failed_pkgs:
10714 for failed_pkg in failed_pkgs:
10715 mergelist.remove(list(failed_pkg.pkg))
10717 self._failed_pkgs_all.extend(failed_pkgs)
10723 if not self._calc_resume_list():
10726 clear_caches(self.trees)
10727 if not self._mergelist:
10730 self._save_resume_list()
10731 self._pkg_count.curval = 0
10732 self._pkg_count.maxval = len([x for x in self._mergelist \
10733 if isinstance(x, Package) and x.operation == "merge"])
10734 self._status_display.maxval = self._pkg_count.maxval
10736 self._logger.log(" *** Finished. Cleaning up...")
10739 self._failed_pkgs_all.extend(failed_pkgs)
10742 background = self._background
10743 failure_log_shown = False
10744 if background and len(self._failed_pkgs_all) == 1:
10745 # If only one package failed then just show it's
10746 # whole log for easy viewing.
10747 failed_pkg = self._failed_pkgs_all[-1]
10748 build_dir = failed_pkg.build_dir
10751 log_paths = [failed_pkg.build_log]
10753 log_path = self._locate_failure_log(failed_pkg)
10754 if log_path is not None:
10756 log_file = open(log_path)
10760 if log_file is not None:
10762 for line in log_file:
10763 writemsg_level(line, noiselevel=-1)
10766 failure_log_shown = True
10768 # Dump mod_echo output now since it tends to flood the terminal.
10769 # This allows us to avoid having more important output, generated
10770 # later, from being swept away by the mod_echo output.
10771 mod_echo_output = _flush_elog_mod_echo()
10773 if background and not failure_log_shown and \
10774 self._failed_pkgs_all and \
10775 self._failed_pkgs_die_msgs and \
10776 not mod_echo_output:
10778 printer = portage.output.EOutput()
10779 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10781 if mysettings["ROOT"] != "/":
10782 root_msg = " merged to %s" % mysettings["ROOT"]
10784 printer.einfo("Error messages for package %s%s:" % \
10785 (colorize("INFORM", key), root_msg))
10787 for phase in portage.const.EBUILD_PHASES:
10788 if phase not in logentries:
10790 for msgtype, msgcontent in logentries[phase]:
10791 if isinstance(msgcontent, basestring):
10792 msgcontent = [msgcontent]
10793 for line in msgcontent:
10794 printer.eerror(line.strip("\n"))
10796 if self._post_mod_echo_msgs:
10797 for msg in self._post_mod_echo_msgs:
10800 if len(self._failed_pkgs_all) > 1 or \
10801 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10802 if len(self._failed_pkgs_all) > 1:
10803 msg = "The following %d packages have " % \
10804 len(self._failed_pkgs_all) + \
10805 "failed to build or install:"
10807 msg = "The following package has " + \
10808 "failed to build or install:"
10809 prefix = bad(" * ")
10810 writemsg(prefix + "\n", noiselevel=-1)
10811 from textwrap import wrap
10812 for line in wrap(msg, 72):
10813 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10814 writemsg(prefix + "\n", noiselevel=-1)
10815 for failed_pkg in self._failed_pkgs_all:
10816 writemsg("%s\t%s\n" % (prefix,
10817 colorize("INFORM", str(failed_pkg.pkg))),
10819 writemsg(prefix + "\n", noiselevel=-1)
10823 def _elog_listener(self, mysettings, key, logentries, fulltext):
10824 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10826 self._failed_pkgs_die_msgs.append(
10827 (mysettings, key, errors))
10829 def _locate_failure_log(self, failed_pkg):
10831 build_dir = failed_pkg.build_dir
10834 log_paths = [failed_pkg.build_log]
10836 for log_path in log_paths:
10841 log_size = os.stat(log_path).st_size
10852 def _add_packages(self):
10853 pkg_queue = self._pkg_queue
10854 for pkg in self._mergelist:
10855 if isinstance(pkg, Package):
10856 pkg_queue.append(pkg)
10857 elif isinstance(pkg, Blocker):
10860 def _system_merge_started(self, merge):
10862 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10864 graph = self._digraph
10867 pkg = merge.merge.pkg
10869 # Skip this if $ROOT != / since it shouldn't matter if there
10870 # are unsatisfied system runtime deps in this case.
10871 if pkg.root != '/':
10874 completed_tasks = self._completed_tasks
10875 unsatisfied = self._unsatisfied_system_deps
10877 def ignore_non_runtime_or_satisfied(priority):
10879 Ignore non-runtime and satisfied runtime priorities.
10881 if isinstance(priority, DepPriority) and \
10882 not priority.satisfied and \
10883 (priority.runtime or priority.runtime_post):
10887 # When checking for unsatisfied runtime deps, only check
10888 # direct deps since indirect deps are checked when the
10889 # corresponding parent is merged.
10890 for child in graph.child_nodes(pkg,
10891 ignore_priority=ignore_non_runtime_or_satisfied):
10892 if not isinstance(child, Package) or \
10893 child.operation == 'uninstall':
10897 if child.operation == 'merge' and \
10898 child not in completed_tasks:
10899 unsatisfied.add(child)
10901 def _merge_wait_exit_handler(self, task):
10902 self._merge_wait_scheduled.remove(task)
10903 self._merge_exit(task)
10905 def _merge_exit(self, merge):
10906 self._do_merge_exit(merge)
10907 self._deallocate_config(merge.merge.settings)
10908 if merge.returncode == os.EX_OK and \
10909 not merge.merge.pkg.installed:
10910 self._status_display.curval += 1
10911 self._status_display.merges = len(self._task_queues.merge)
10914 def _do_merge_exit(self, merge):
10915 pkg = merge.merge.pkg
10916 if merge.returncode != os.EX_OK:
10917 settings = merge.merge.settings
10918 build_dir = settings.get("PORTAGE_BUILDDIR")
10919 build_log = settings.get("PORTAGE_LOG_FILE")
10921 self._failed_pkgs.append(self._failed_pkg(
10922 build_dir=build_dir, build_log=build_log,
10924 returncode=merge.returncode))
10925 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10927 self._status_display.failed = len(self._failed_pkgs)
10930 self._task_complete(pkg)
10931 pkg_to_replace = merge.merge.pkg_to_replace
10932 if pkg_to_replace is not None:
10933 # When a package is replaced, mark it's uninstall
10934 # task complete (if any).
10935 uninst_hash_key = \
10936 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10937 self._task_complete(uninst_hash_key)
10942 self._restart_if_necessary(pkg)
10944 # Call mtimedb.commit() after each merge so that
10945 # --resume still works after being interrupted
10946 # by reboot, sigkill or similar.
10947 mtimedb = self._mtimedb
10948 mtimedb["resume"]["mergelist"].remove(list(pkg))
10949 if not mtimedb["resume"]["mergelist"]:
10950 del mtimedb["resume"]
10953 def _build_exit(self, build):
10954 if build.returncode == os.EX_OK:
10956 merge = PackageMerge(merge=build)
10957 if not build.build_opts.buildpkgonly and \
10958 build.pkg in self._deep_system_deps:
10959 # Since dependencies on system packages are frequently
10960 # unspecified, merge them only when no builds are executing.
10961 self._merge_wait_queue.append(merge)
10962 merge.addStartListener(self._system_merge_started)
10964 merge.addExitListener(self._merge_exit)
10965 self._task_queues.merge.add(merge)
10966 self._status_display.merges = len(self._task_queues.merge)
10968 settings = build.settings
10969 build_dir = settings.get("PORTAGE_BUILDDIR")
10970 build_log = settings.get("PORTAGE_LOG_FILE")
10972 self._failed_pkgs.append(self._failed_pkg(
10973 build_dir=build_dir, build_log=build_log,
10975 returncode=build.returncode))
10976 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10978 self._status_display.failed = len(self._failed_pkgs)
10979 self._deallocate_config(build.settings)
10981 self._status_display.running = self._jobs
10984 def _extract_exit(self, build):
10985 self._build_exit(build)
10987 def _task_complete(self, pkg):
10988 self._completed_tasks.add(pkg)
10989 self._unsatisfied_system_deps.discard(pkg)
10990 self._choose_pkg_return_early = False
10994 self._add_prefetchers()
10995 self._add_packages()
10996 pkg_queue = self._pkg_queue
10997 failed_pkgs = self._failed_pkgs
10998 portage.locks._quiet = self._background
10999 portage.elog._emerge_elog_listener = self._elog_listener
11005 self._main_loop_cleanup()
11006 portage.locks._quiet = False
11007 portage.elog._emerge_elog_listener = None
11009 rval = failed_pkgs[-1].returncode
11013 def _main_loop_cleanup(self):
11014 del self._pkg_queue[:]
11015 self._completed_tasks.clear()
11016 self._deep_system_deps.clear()
11017 self._unsatisfied_system_deps.clear()
11018 self._choose_pkg_return_early = False
11019 self._status_display.reset()
11020 self._digraph = None
11021 self._task_queues.fetch.clear()
11023 def _choose_pkg(self):
11025 Choose a task that has all it's dependencies satisfied.
11028 if self._choose_pkg_return_early:
11031 if self._digraph is None:
11032 if (self._jobs or self._task_queues.merge) and \
11033 not ("--nodeps" in self.myopts and \
11034 (self._max_jobs is True or self._max_jobs > 1)):
11035 self._choose_pkg_return_early = True
11037 return self._pkg_queue.pop(0)
11039 if not (self._jobs or self._task_queues.merge):
11040 return self._pkg_queue.pop(0)
11042 self._prune_digraph()
11045 later = set(self._pkg_queue)
11046 for pkg in self._pkg_queue:
11048 if not self._dependent_on_scheduled_merges(pkg, later):
11052 if chosen_pkg is not None:
11053 self._pkg_queue.remove(chosen_pkg)
11055 if chosen_pkg is None:
11056 # There's no point in searching for a package to
11057 # choose until at least one of the existing jobs
11059 self._choose_pkg_return_early = True
11063 def _dependent_on_scheduled_merges(self, pkg, later):
11065 Traverse the subgraph of the given packages deep dependencies
11066 to see if it contains any scheduled merges.
11067 @param pkg: a package to check dependencies for
11069 @param later: packages for which dependence should be ignored
11070 since they will be merged later than pkg anyway and therefore
11071 delaying the merge of pkg will not result in a more optimal
11075 @returns: True if the package is dependent, False otherwise.
11078 graph = self._digraph
11079 completed_tasks = self._completed_tasks
11082 traversed_nodes = set([pkg])
11083 direct_deps = graph.child_nodes(pkg)
11084 node_stack = direct_deps
11085 direct_deps = frozenset(direct_deps)
11087 node = node_stack.pop()
11088 if node in traversed_nodes:
11090 traversed_nodes.add(node)
11091 if not ((node.installed and node.operation == "nomerge") or \
11092 (node.operation == "uninstall" and \
11093 node not in direct_deps) or \
11094 node in completed_tasks or \
11098 node_stack.extend(graph.child_nodes(node))
11102 def _allocate_config(self, root):
11104 Allocate a unique config instance for a task in order
11105 to prevent interference between parallel tasks.
11107 if self._config_pool[root]:
11108 temp_settings = self._config_pool[root].pop()
11110 temp_settings = portage.config(clone=self.pkgsettings[root])
11111 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11112 # performance reasons, call it here to make sure all settings from the
11113 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11114 temp_settings.reload()
11115 temp_settings.reset()
11116 return temp_settings
11118 def _deallocate_config(self, settings):
11119 self._config_pool[settings["ROOT"]].append(settings)
11121 def _main_loop(self):
11123 # Only allow 1 job max if a restart is scheduled
11124 # due to portage update.
11125 if self._is_restart_scheduled() or \
11126 self._opts_no_background.intersection(self.myopts):
11127 self._set_max_jobs(1)
11129 merge_queue = self._task_queues.merge
11131 while self._schedule():
11132 if self._poll_event_handlers:
11137 if not (self._jobs or merge_queue):
11139 if self._poll_event_handlers:
11142 def _keep_scheduling(self):
11143 return bool(self._pkg_queue and \
11144 not (self._failed_pkgs and not self._build_opts.fetchonly))
11146 def _schedule_tasks(self):
11148 # When the number of jobs drops to zero, process all waiting merges.
11149 if not self._jobs and self._merge_wait_queue:
11150 for task in self._merge_wait_queue:
11151 task.addExitListener(self._merge_wait_exit_handler)
11152 self._task_queues.merge.add(task)
11153 self._status_display.merges = len(self._task_queues.merge)
11154 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11155 del self._merge_wait_queue[:]
11157 self._schedule_tasks_imp()
11158 self._status_display.display()
11161 for q in self._task_queues.values():
11165 # Cancel prefetchers if they're the only reason
11166 # the main poll loop is still running.
11167 if self._failed_pkgs and not self._build_opts.fetchonly and \
11168 not (self._jobs or self._task_queues.merge) and \
11169 self._task_queues.fetch:
11170 self._task_queues.fetch.clear()
11174 self._schedule_tasks_imp()
11175 self._status_display.display()
11177 return self._keep_scheduling()
11179 def _job_delay(self):
11182 @returns: True if job scheduling should be delayed, False otherwise.
11185 if self._jobs and self._max_load is not None:
11187 current_time = time.time()
11189 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11190 if delay > self._job_delay_max:
11191 delay = self._job_delay_max
11192 if (current_time - self._previous_job_start_time) < delay:
11197 def _schedule_tasks_imp(self):
11200 @returns: True if state changed, False otherwise.
11207 if not self._keep_scheduling():
11208 return bool(state_change)
11210 if self._choose_pkg_return_early or \
11211 self._merge_wait_scheduled or \
11212 (self._jobs and self._unsatisfied_system_deps) or \
11213 not self._can_add_job() or \
11215 return bool(state_change)
11217 pkg = self._choose_pkg()
11219 return bool(state_change)
11223 if not pkg.installed:
11224 self._pkg_count.curval += 1
11226 task = self._task(pkg)
11229 merge = PackageMerge(merge=task)
11230 merge.addExitListener(self._merge_exit)
11231 self._task_queues.merge.add(merge)
11235 self._previous_job_start_time = time.time()
11236 self._status_display.running = self._jobs
11237 task.addExitListener(self._extract_exit)
11238 self._task_queues.jobs.add(task)
11242 self._previous_job_start_time = time.time()
11243 self._status_display.running = self._jobs
11244 task.addExitListener(self._build_exit)
11245 self._task_queues.jobs.add(task)
11247 return bool(state_change)
11249 def _task(self, pkg):
11251 pkg_to_replace = None
11252 if pkg.operation != "uninstall":
11253 vardb = pkg.root_config.trees["vartree"].dbapi
11254 previous_cpv = vardb.match(pkg.slot_atom)
11256 previous_cpv = previous_cpv.pop()
11257 pkg_to_replace = self._pkg(previous_cpv,
11258 "installed", pkg.root_config, installed=True)
11260 task = MergeListItem(args_set=self._args_set,
11261 background=self._background, binpkg_opts=self._binpkg_opts,
11262 build_opts=self._build_opts,
11263 config_pool=self._ConfigPool(pkg.root,
11264 self._allocate_config, self._deallocate_config),
11265 emerge_opts=self.myopts,
11266 find_blockers=self._find_blockers(pkg), logger=self._logger,
11267 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11268 pkg_to_replace=pkg_to_replace,
11269 prefetcher=self._prefetchers.get(pkg),
11270 scheduler=self._sched_iface,
11271 settings=self._allocate_config(pkg.root),
11272 statusMessage=self._status_msg,
11273 world_atom=self._world_atom)
11277 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11278 pkg = failed_pkg.pkg
11279 msg = "%s to %s %s" % \
11280 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11281 if pkg.root != "/":
11282 msg += " %s %s" % (preposition, pkg.root)
11284 log_path = self._locate_failure_log(failed_pkg)
11285 if log_path is not None:
11286 msg += ", Log file:"
11287 self._status_msg(msg)
11289 if log_path is not None:
11290 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11292 def _status_msg(self, msg):
11294 Display a brief status message (no newlines) in the status display.
11295 This is called by tasks to provide feedback to the user. This
11296 delegates the resposibility of generating \r and \n control characters,
11297 to guarantee that lines are created or erased when necessary and
11301 @param msg: a brief status message (no newlines allowed)
11303 if not self._background:
11304 writemsg_level("\n")
11305 self._status_display.displayMessage(msg)
11307 def _save_resume_list(self):
11309 Do this before verifying the ebuild Manifests since it might
11310 be possible for the user to use --resume --skipfirst get past
11311 a non-essential package with a broken digest.
11313 mtimedb = self._mtimedb
11314 mtimedb["resume"]["mergelist"] = [list(x) \
11315 for x in self._mergelist \
11316 if isinstance(x, Package) and x.operation == "merge"]
11320 def _calc_resume_list(self):
11322 Use the current resume list to calculate a new one,
11323 dropping any packages with unsatisfied deps.
11325 @returns: True if successful, False otherwise.
11327 print colorize("GOOD", "*** Resuming merge...")
11329 if self._show_list():
11330 if "--tree" in self.myopts:
11331 portage.writemsg_stdout("\n" + \
11332 darkgreen("These are the packages that " + \
11333 "would be merged, in reverse order:\n\n"))
11336 portage.writemsg_stdout("\n" + \
11337 darkgreen("These are the packages that " + \
11338 "would be merged, in order:\n\n"))
11340 show_spinner = "--quiet" not in self.myopts and \
11341 "--nodeps" not in self.myopts
11344 print "Calculating dependencies ",
11346 myparams = create_depgraph_params(self.myopts, None)
11350 success, mydepgraph, dropped_tasks = resume_depgraph(
11351 self.settings, self.trees, self._mtimedb, self.myopts,
11352 myparams, self._spinner)
11353 except depgraph.UnsatisfiedResumeDep, exc:
11354 # rename variable to avoid python-3.0 error:
11355 # SyntaxError: can not delete variable 'e' referenced in nested
11358 mydepgraph = e.depgraph
11359 dropped_tasks = set()
11362 print "\b\b... done!"
11365 def unsatisfied_resume_dep_msg():
11366 mydepgraph.display_problems()
11367 out = portage.output.EOutput()
11368 out.eerror("One or more packages are either masked or " + \
11369 "have missing dependencies:")
11372 show_parents = set()
11373 for dep in e.value:
11374 if dep.parent in show_parents:
11376 show_parents.add(dep.parent)
11377 if dep.atom is None:
11378 out.eerror(indent + "Masked package:")
11379 out.eerror(2 * indent + str(dep.parent))
11382 out.eerror(indent + str(dep.atom) + " pulled in by:")
11383 out.eerror(2 * indent + str(dep.parent))
11385 msg = "The resume list contains packages " + \
11386 "that are either masked or have " + \
11387 "unsatisfied dependencies. " + \
11388 "Please restart/continue " + \
11389 "the operation manually, or use --skipfirst " + \
11390 "to skip the first package in the list and " + \
11391 "any other packages that may be " + \
11392 "masked or have missing dependencies."
11393 for line in textwrap.wrap(msg, 72):
11395 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11398 if success and self._show_list():
11399 mylist = mydepgraph.altlist()
11401 if "--tree" in self.myopts:
11403 mydepgraph.display(mylist, favorites=self._favorites)
11406 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11408 mydepgraph.display_problems()
11410 mylist = mydepgraph.altlist()
11411 mydepgraph.break_refs(mylist)
11412 mydepgraph.break_refs(dropped_tasks)
11413 self._mergelist = mylist
11414 self._set_digraph(mydepgraph.schedulerGraph())
11417 for task in dropped_tasks:
11418 if not (isinstance(task, Package) and task.operation == "merge"):
11421 msg = "emerge --keep-going:" + \
11423 if pkg.root != "/":
11424 msg += " for %s" % (pkg.root,)
11425 msg += " dropped due to unsatisfied dependency."
11426 for line in textwrap.wrap(msg, msg_width):
11427 eerror(line, phase="other", key=pkg.cpv)
11428 settings = self.pkgsettings[pkg.root]
11429 # Ensure that log collection from $T is disabled inside
11430 # elog_process(), since any logs that might exist are
11432 settings.pop("T", None)
11433 portage.elog.elog_process(pkg.cpv, settings)
11434 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11438 def _show_list(self):
11439 myopts = self.myopts
11440 if "--quiet" not in myopts and \
11441 ("--ask" in myopts or "--tree" in myopts or \
11442 "--verbose" in myopts):
11446 def _world_atom(self, pkg):
11448 Add the package to the world file, but only if
11449 it's supposed to be added. Otherwise, do nothing.
11452 if set(("--buildpkgonly", "--fetchonly",
11454 "--oneshot", "--onlydeps",
11455 "--pretend")).intersection(self.myopts):
11458 if pkg.root != self.target_root:
11461 args_set = self._args_set
11462 if not args_set.findAtomForPackage(pkg):
11465 logger = self._logger
11466 pkg_count = self._pkg_count
11467 root_config = pkg.root_config
11468 world_set = root_config.sets["world"]
11469 world_locked = False
11470 if hasattr(world_set, "lock"):
11472 world_locked = True
11475 if hasattr(world_set, "load"):
11476 world_set.load() # maybe it's changed on disk
11478 atom = create_world_atom(pkg, args_set, root_config)
11480 if hasattr(world_set, "add"):
11481 self._status_msg(('Recording %s in "world" ' + \
11482 'favorites file...') % atom)
11483 logger.log(" === (%s of %s) Updating world file (%s)" % \
11484 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11485 world_set.add(atom)
11487 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11488 (atom,), level=logging.WARN, noiselevel=-1)
11493 def _pkg(self, cpv, type_name, root_config, installed=False):
11495 Get a package instance from the cache, or create a new
11496 one if necessary. Raises KeyError from aux_get if it
11497 failures for some reason (package does not exist or is
11500 operation = "merge"
11502 operation = "nomerge"
11504 if self._digraph is not None:
11505 # Reuse existing instance when available.
11506 pkg = self._digraph.get(
11507 (type_name, root_config.root, cpv, operation))
11508 if pkg is not None:
11511 tree_type = depgraph.pkg_tree_map[type_name]
11512 db = root_config.trees[tree_type].dbapi
11513 db_keys = list(self.trees[root_config.root][
11514 tree_type].dbapi._aux_cache_keys)
11515 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11516 pkg = Package(cpv=cpv, metadata=metadata,
11517 root_config=root_config, installed=installed)
11518 if type_name == "ebuild":
11519 settings = self.pkgsettings[root_config.root]
11520 settings.setcpv(pkg)
11521 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11522 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11526 class MetadataRegen(PollScheduler):
11528 def __init__(self, portdb, max_jobs=None, max_load=None):
11529 PollScheduler.__init__(self)
11530 self._portdb = portdb
11532 if max_jobs is None:
11535 self._max_jobs = max_jobs
11536 self._max_load = max_load
11537 self._sched_iface = self._sched_iface_class(
11538 register=self._register,
11539 schedule=self._schedule_wait,
11540 unregister=self._unregister)
11542 self._valid_pkgs = set()
11543 self._process_iter = self._iter_metadata_processes()
11544 self.returncode = os.EX_OK
11545 self._error_count = 0
11547 def _iter_metadata_processes(self):
11548 portdb = self._portdb
11549 valid_pkgs = self._valid_pkgs
11550 every_cp = portdb.cp_all()
11551 every_cp.sort(reverse=True)
11554 cp = every_cp.pop()
11555 portage.writemsg_stdout("Processing %s\n" % cp)
11556 cpv_list = portdb.cp_list(cp)
11557 for cpv in cpv_list:
11558 valid_pkgs.add(cpv)
11559 ebuild_path, repo_path = portdb.findname2(cpv)
11560 metadata_process = portdb._metadata_process(
11561 cpv, ebuild_path, repo_path)
11562 if metadata_process is None:
11564 yield metadata_process
11568 portdb = self._portdb
11569 from portage.cache.cache_errors import CacheError
11572 for mytree in portdb.porttrees:
11574 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11575 except CacheError, e:
11576 portage.writemsg("Error listing cache entries for " + \
11577 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11582 while self._schedule():
11589 for y in self._valid_pkgs:
11590 for mytree in portdb.porttrees:
11591 if portdb.findname2(y, mytree=mytree)[0]:
11592 dead_nodes[mytree].discard(y)
11594 for mytree, nodes in dead_nodes.iteritems():
11595 auxdb = portdb.auxdb[mytree]
11599 except (KeyError, CacheError):
11602 def _schedule_tasks(self):
11605 @returns: True if there may be remaining tasks to schedule,
11608 while self._can_add_job():
11610 metadata_process = self._process_iter.next()
11611 except StopIteration:
11615 metadata_process.scheduler = self._sched_iface
11616 metadata_process.addExitListener(self._metadata_exit)
11617 metadata_process.start()
11620 def _metadata_exit(self, metadata_process):
11622 if metadata_process.returncode != os.EX_OK:
11623 self.returncode = 1
11624 self._error_count += 1
11625 self._valid_pkgs.discard(metadata_process.cpv)
11626 portage.writemsg("Error processing %s, continuing...\n" % \
11627 (metadata_process.cpv,))
11630 class UninstallFailure(portage.exception.PortageException):
11632 An instance of this class is raised by unmerge() when
11633 an uninstallation fails.
11636 def __init__(self, *pargs):
11637 portage.exception.PortageException.__init__(self, pargs)
11639 self.status = pargs[0]
11641 def unmerge(root_config, myopts, unmerge_action,
11642 unmerge_files, ldpath_mtimes, autoclean=0,
11643 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11644 scheduler=None, writemsg_level=portage.util.writemsg_level):
11646 quiet = "--quiet" in myopts
11647 settings = root_config.settings
11648 sets = root_config.sets
11649 vartree = root_config.trees["vartree"]
11650 candidate_catpkgs=[]
11652 xterm_titles = "notitles" not in settings.features
11653 out = portage.output.EOutput()
11655 db_keys = list(vartree.dbapi._aux_cache_keys)
11658 pkg = pkg_cache.get(cpv)
11660 pkg = Package(cpv=cpv, installed=True,
11661 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11662 root_config=root_config,
11663 type_name="installed")
11664 pkg_cache[cpv] = pkg
11667 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11669 # At least the parent needs to exist for the lock file.
11670 portage.util.ensure_dirs(vdb_path)
11671 except portage.exception.PortageException:
11675 if os.access(vdb_path, os.W_OK):
11676 vdb_lock = portage.locks.lockdir(vdb_path)
11677 realsyslist = sets["system"].getAtoms()
11679 for x in realsyslist:
11680 mycp = portage.dep_getkey(x)
11681 if mycp in settings.getvirtuals():
11683 for provider in settings.getvirtuals()[mycp]:
11684 if vartree.dbapi.match(provider):
11685 providers.append(provider)
11686 if len(providers) == 1:
11687 syslist.extend(providers)
11689 syslist.append(mycp)
11691 mysettings = portage.config(clone=settings)
11693 if not unmerge_files:
11694 if unmerge_action == "unmerge":
11696 print bold("emerge unmerge") + " can only be used with specific package names"
11702 localtree = vartree
11703 # process all arguments and add all
11704 # valid db entries to candidate_catpkgs
11706 if not unmerge_files:
11707 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11709 #we've got command-line arguments
11710 if not unmerge_files:
11711 print "\nNo packages to unmerge have been provided.\n"
11713 for x in unmerge_files:
11714 arg_parts = x.split('/')
11715 if x[0] not in [".","/"] and \
11716 arg_parts[-1][-7:] != ".ebuild":
11717 #possible cat/pkg or dep; treat as such
11718 candidate_catpkgs.append(x)
11719 elif unmerge_action in ["prune","clean"]:
11720 print "\n!!! Prune and clean do not accept individual" + \
11721 " ebuilds as arguments;\n skipping.\n"
11724 # it appears that the user is specifying an installed
11725 # ebuild and we're in "unmerge" mode, so it's ok.
11726 if not os.path.exists(x):
11727 print "\n!!! The path '"+x+"' doesn't exist.\n"
11730 absx = os.path.abspath(x)
11731 sp_absx = absx.split("/")
11732 if sp_absx[-1][-7:] == ".ebuild":
11734 absx = "/".join(sp_absx)
11736 sp_absx_len = len(sp_absx)
11738 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11739 vdb_len = len(vdb_path)
11741 sp_vdb = vdb_path.split("/")
11742 sp_vdb_len = len(sp_vdb)
11744 if not os.path.exists(absx+"/CONTENTS"):
11745 print "!!! Not a valid db dir: "+str(absx)
11748 if sp_absx_len <= sp_vdb_len:
11749 # The Path is shorter... so it can't be inside the vdb.
11752 print "\n!!!",x,"cannot be inside "+ \
11753 vdb_path+"; aborting.\n"
11756 for idx in range(0,sp_vdb_len):
11757 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11760 print "\n!!!", x, "is not inside "+\
11761 vdb_path+"; aborting.\n"
11764 print "="+"/".join(sp_absx[sp_vdb_len:])
11765 candidate_catpkgs.append(
11766 "="+"/".join(sp_absx[sp_vdb_len:]))
11769 if (not "--quiet" in myopts):
11771 if settings["ROOT"] != "/":
11772 writemsg_level(darkgreen(newline+ \
11773 ">>> Using system located in ROOT tree %s\n" % \
11776 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11777 not ("--quiet" in myopts):
11778 writemsg_level(darkgreen(newline+\
11779 ">>> These are the packages that would be unmerged:\n"))
11781 # Preservation of order is required for --depclean and --prune so
11782 # that dependencies are respected. Use all_selected to eliminate
11783 # duplicate packages since the same package may be selected by
11786 all_selected = set()
11787 for x in candidate_catpkgs:
11788 # cycle through all our candidate deps and determine
11789 # what will and will not get unmerged
11791 mymatch = vartree.dbapi.match(x)
11792 except portage.exception.AmbiguousPackageName, errpkgs:
11793 print "\n\n!!! The short ebuild name \"" + \
11794 x + "\" is ambiguous. Please specify"
11795 print "!!! one of the following fully-qualified " + \
11796 "ebuild names instead:\n"
11797 for i in errpkgs[0]:
11798 print " " + green(i)
11802 if not mymatch and x[0] not in "<>=~":
11803 mymatch = localtree.dep_match(x)
11805 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11806 (x, unmerge_action), noiselevel=-1)
11810 {"protected": set(), "selected": set(), "omitted": set()})
11811 mykey = len(pkgmap) - 1
11812 if unmerge_action=="unmerge":
11814 if y not in all_selected:
11815 pkgmap[mykey]["selected"].add(y)
11816 all_selected.add(y)
11817 elif unmerge_action == "prune":
11818 if len(mymatch) == 1:
11820 best_version = mymatch[0]
11821 best_slot = vartree.getslot(best_version)
11822 best_counter = vartree.dbapi.cpv_counter(best_version)
11823 for mypkg in mymatch[1:]:
11824 myslot = vartree.getslot(mypkg)
11825 mycounter = vartree.dbapi.cpv_counter(mypkg)
11826 if (myslot == best_slot and mycounter > best_counter) or \
11827 mypkg == portage.best([mypkg, best_version]):
11828 if myslot == best_slot:
11829 if mycounter < best_counter:
11830 # On slot collision, keep the one with the
11831 # highest counter since it is the most
11832 # recently installed.
11834 best_version = mypkg
11836 best_counter = mycounter
11837 pkgmap[mykey]["protected"].add(best_version)
11838 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11839 if mypkg != best_version and mypkg not in all_selected)
11840 all_selected.update(pkgmap[mykey]["selected"])
11842 # unmerge_action == "clean"
11844 for mypkg in mymatch:
11845 if unmerge_action == "clean":
11846 myslot = localtree.getslot(mypkg)
11848 # since we're pruning, we don't care about slots
11849 # and put all the pkgs in together
11851 if myslot not in slotmap:
11852 slotmap[myslot] = {}
11853 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11855 for mypkg in vartree.dbapi.cp_list(
11856 portage.dep_getkey(mymatch[0])):
11857 myslot = vartree.getslot(mypkg)
11858 if myslot not in slotmap:
11859 slotmap[myslot] = {}
11860 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11862 for myslot in slotmap:
11863 counterkeys = slotmap[myslot].keys()
11864 if not counterkeys:
11867 pkgmap[mykey]["protected"].add(
11868 slotmap[myslot][counterkeys[-1]])
11869 del counterkeys[-1]
11871 for counter in counterkeys[:]:
11872 mypkg = slotmap[myslot][counter]
11873 if mypkg not in mymatch:
11874 counterkeys.remove(counter)
11875 pkgmap[mykey]["protected"].add(
11876 slotmap[myslot][counter])
11878 #be pretty and get them in order of merge:
11879 for ckey in counterkeys:
11880 mypkg = slotmap[myslot][ckey]
11881 if mypkg not in all_selected:
11882 pkgmap[mykey]["selected"].add(mypkg)
11883 all_selected.add(mypkg)
11884 # ok, now the last-merged package
11885 # is protected, and the rest are selected
11886 numselected = len(all_selected)
11887 if global_unmerge and not numselected:
11888 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11891 if not numselected:
11892 portage.writemsg_stdout(
11893 "\n>>> No packages selected for removal by " + \
11894 unmerge_action + "\n")
11898 vartree.dbapi.flush_cache()
11899 portage.locks.unlockdir(vdb_lock)
11901 from portage.sets.base import EditablePackageSet
11903 # generate a list of package sets that are directly or indirectly listed in "world",
11904 # as there is no persistent list of "installed" sets
11905 installed_sets = ["world"]
11910 pos = len(installed_sets)
11911 for s in installed_sets[pos - 1:]:
11914 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11917 installed_sets += candidates
11918 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11921 # we don't want to unmerge packages that are still listed in user-editable package sets
11922 # listed in "world" as they would be remerged on the next update of "world" or the
11923 # relevant package sets.
11924 unknown_sets = set()
11925 for cp in xrange(len(pkgmap)):
11926 for cpv in pkgmap[cp]["selected"].copy():
11930 # It could have been uninstalled
11931 # by a concurrent process.
11934 if unmerge_action != "clean" and \
11935 root_config.root == "/" and \
11936 portage.match_from_list(
11937 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11938 msg = ("Not unmerging package %s since there is no valid " + \
11939 "reason for portage to unmerge itself.") % (pkg.cpv,)
11940 for line in textwrap.wrap(msg, 75):
11942 # adjust pkgmap so the display output is correct
11943 pkgmap[cp]["selected"].remove(cpv)
11944 all_selected.remove(cpv)
11945 pkgmap[cp]["protected"].add(cpv)
11949 for s in installed_sets:
11950 # skip sets that the user requested to unmerge, and skip world
11951 # unless we're unmerging a package set (as the package would be
11952 # removed from "world" later on)
11953 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11957 if s in unknown_sets:
11959 unknown_sets.add(s)
11960 out = portage.output.EOutput()
11961 out.eerror(("Unknown set '@%s' in " + \
11962 "%svar/lib/portage/world_sets") % \
11963 (s, root_config.root))
11966 # only check instances of EditablePackageSet as other classes are generally used for
11967 # special purposes and can be ignored here (and are usually generated dynamically, so the
11968 # user can't do much about them anyway)
11969 if isinstance(sets[s], EditablePackageSet):
11971 # This is derived from a snippet of code in the
11972 # depgraph._iter_atoms_for_pkg() method.
11973 for atom in sets[s].iterAtomsForPackage(pkg):
11974 inst_matches = vartree.dbapi.match(atom)
11975 inst_matches.reverse() # descending order
11977 for inst_cpv in inst_matches:
11979 inst_pkg = _pkg(inst_cpv)
11981 # It could have been uninstalled
11982 # by a concurrent process.
11985 if inst_pkg.cp != atom.cp:
11987 if pkg >= inst_pkg:
11988 # This is descending order, and we're not
11989 # interested in any versions <= pkg given.
11991 if pkg.slot_atom != inst_pkg.slot_atom:
11992 higher_slot = inst_pkg
11994 if higher_slot is None:
11998 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11999 #print colorize("WARN", "but still listed in the following package sets:")
12000 #print " %s\n" % ", ".join(parents)
12001 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12002 print colorize("WARN", "still referenced by the following package sets:")
12003 print " %s\n" % ", ".join(parents)
12004 # adjust pkgmap so the display output is correct
12005 pkgmap[cp]["selected"].remove(cpv)
12006 all_selected.remove(cpv)
12007 pkgmap[cp]["protected"].add(cpv)
12011 numselected = len(all_selected)
12012 if not numselected:
12014 "\n>>> No packages selected for removal by " + \
12015 unmerge_action + "\n")
12018 # Unmerge order only matters in some cases
12022 selected = d["selected"]
12025 cp = portage.cpv_getkey(iter(selected).next())
12026 cp_dict = unordered.get(cp)
12027 if cp_dict is None:
12029 unordered[cp] = cp_dict
12032 for k, v in d.iteritems():
12033 cp_dict[k].update(v)
12034 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12036 for x in xrange(len(pkgmap)):
12037 selected = pkgmap[x]["selected"]
12040 for mytype, mylist in pkgmap[x].iteritems():
12041 if mytype == "selected":
12043 mylist.difference_update(all_selected)
12044 cp = portage.cpv_getkey(iter(selected).next())
12045 for y in localtree.dep_match(cp):
12046 if y not in pkgmap[x]["omitted"] and \
12047 y not in pkgmap[x]["selected"] and \
12048 y not in pkgmap[x]["protected"] and \
12049 y not in all_selected:
12050 pkgmap[x]["omitted"].add(y)
12051 if global_unmerge and not pkgmap[x]["selected"]:
12052 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12054 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12055 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12056 "'%s' is part of your system profile.\n" % cp),
12057 level=logging.WARNING, noiselevel=-1)
12058 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12059 "be damaging to your system.\n\n"),
12060 level=logging.WARNING, noiselevel=-1)
12061 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12062 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12063 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12065 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12067 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12068 for mytype in ["selected","protected","omitted"]:
12070 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12071 if pkgmap[x][mytype]:
12072 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12073 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12074 for pn, ver, rev in sorted_pkgs:
12078 myversion = ver + "-" + rev
12079 if mytype == "selected":
12081 colorize("UNMERGE_WARN", myversion + " "),
12085 colorize("GOOD", myversion + " "), noiselevel=-1)
12087 writemsg_level("none ", noiselevel=-1)
12089 writemsg_level("\n", noiselevel=-1)
12091 writemsg_level("\n", noiselevel=-1)
12093 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12094 " packages are slated for removal.\n")
12095 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12096 " and " + colorize("GOOD", "'omitted'") + \
12097 " packages will not be removed.\n\n")
12099 if "--pretend" in myopts:
12100 #we're done... return
12102 if "--ask" in myopts:
12103 if userquery("Would you like to unmerge these packages?")=="No":
12104 # enter pretend mode for correct formatting of results
12105 myopts["--pretend"] = True
12110 #the real unmerging begins, after a short delay....
12111 if clean_delay and not autoclean:
12112 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12114 for x in xrange(len(pkgmap)):
12115 for y in pkgmap[x]["selected"]:
12116 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12117 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12118 mysplit = y.split("/")
12120 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12121 mysettings, unmerge_action not in ["clean","prune"],
12122 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12123 scheduler=scheduler)
12125 if retval != os.EX_OK:
12126 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12128 raise UninstallFailure(retval)
12131 if clean_world and hasattr(sets["world"], "cleanPackage"):
12132 sets["world"].cleanPackage(vartree.dbapi, y)
12133 emergelog(xterm_titles, " >>> unmerge success: "+y)
12134 if clean_world and hasattr(sets["world"], "remove"):
12135 for s in root_config.setconfig.active:
12136 sets["world"].remove(SETPREFIX+s)
12139 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12141 if os.path.exists("/usr/bin/install-info"):
12142 out = portage.output.EOutput()
12147 inforoot=normpath(root+z)
12148 if os.path.isdir(inforoot):
12149 infomtime = long(os.stat(inforoot).st_mtime)
12150 if inforoot not in prev_mtimes or \
12151 prev_mtimes[inforoot] != infomtime:
12152 regen_infodirs.append(inforoot)
12154 if not regen_infodirs:
12155 portage.writemsg_stdout("\n")
12156 out.einfo("GNU info directory index is up-to-date.")
12158 portage.writemsg_stdout("\n")
12159 out.einfo("Regenerating GNU info directory index...")
12161 dir_extensions = ("", ".gz", ".bz2")
12165 for inforoot in regen_infodirs:
12169 if not os.path.isdir(inforoot) or \
12170 not os.access(inforoot, os.W_OK):
12173 file_list = os.listdir(inforoot)
12175 dir_file = os.path.join(inforoot, "dir")
12176 moved_old_dir = False
12177 processed_count = 0
12178 for x in file_list:
12179 if x.startswith(".") or \
12180 os.path.isdir(os.path.join(inforoot, x)):
12182 if x.startswith("dir"):
12184 for ext in dir_extensions:
12185 if x == "dir" + ext or \
12186 x == "dir" + ext + ".old":
12191 if processed_count == 0:
12192 for ext in dir_extensions:
12194 os.rename(dir_file + ext, dir_file + ext + ".old")
12195 moved_old_dir = True
12196 except EnvironmentError, e:
12197 if e.errno != errno.ENOENT:
12200 processed_count += 1
12201 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12202 existsstr="already exists, for file `"
12204 if re.search(existsstr,myso):
12205 # Already exists... Don't increment the count for this.
12207 elif myso[:44]=="install-info: warning: no info dir entry in ":
12208 # This info file doesn't contain a DIR-header: install-info produces this
12209 # (harmless) warning (the --quiet switch doesn't seem to work).
12210 # Don't increment the count for this.
12213 badcount=badcount+1
12214 errmsg += myso + "\n"
12217 if moved_old_dir and not os.path.exists(dir_file):
12218 # We didn't generate a new dir file, so put the old file
12219 # back where it was originally found.
12220 for ext in dir_extensions:
12222 os.rename(dir_file + ext + ".old", dir_file + ext)
12223 except EnvironmentError, e:
12224 if e.errno != errno.ENOENT:
12228 # Clean dir.old cruft so that they don't prevent
12229 # unmerge of otherwise empty directories.
12230 for ext in dir_extensions:
12232 os.unlink(dir_file + ext + ".old")
12233 except EnvironmentError, e:
12234 if e.errno != errno.ENOENT:
12238 #update mtime so we can potentially avoid regenerating.
12239 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12242 out.eerror("Processed %d info files; %d errors." % \
12243 (icount, badcount))
12244 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12247 out.einfo("Processed %d info files." % (icount,))
12250 def display_news_notification(root_config, myopts):
12251 target_root = root_config.root
12252 trees = root_config.trees
12253 settings = trees["vartree"].settings
12254 portdb = trees["porttree"].dbapi
12255 vardb = trees["vartree"].dbapi
12256 NEWS_PATH = os.path.join("metadata", "news")
12257 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12258 newsReaderDisplay = False
12259 update = "--pretend" not in myopts
12261 for repo in portdb.getRepositories():
12262 unreadItems = checkUpdatedNewsItems(
12263 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12265 if not newsReaderDisplay:
12266 newsReaderDisplay = True
12268 print colorize("WARN", " * IMPORTANT:"),
12269 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12272 if newsReaderDisplay:
12273 print colorize("WARN", " *"),
12274 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12277 def display_preserved_libs(vardbapi):
12280 # Ensure the registry is consistent with existing files.
12281 vardbapi.plib_registry.pruneNonExisting()
12283 if vardbapi.plib_registry.hasEntries():
12285 print colorize("WARN", "!!!") + " existing preserved libs:"
12286 plibdata = vardbapi.plib_registry.getPreservedLibs()
12287 linkmap = vardbapi.linkmap
12290 linkmap_broken = False
12294 except portage.exception.CommandNotFound, e:
12295 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12296 level=logging.ERROR, noiselevel=-1)
12298 linkmap_broken = True
12300 search_for_owners = set()
12301 for cpv in plibdata:
12302 internal_plib_keys = set(linkmap._obj_key(f) \
12303 for f in plibdata[cpv])
12304 for f in plibdata[cpv]:
12305 if f in consumer_map:
12308 for c in linkmap.findConsumers(f):
12309 # Filter out any consumers that are also preserved libs
12310 # belonging to the same package as the provider.
12311 if linkmap._obj_key(c) not in internal_plib_keys:
12312 consumers.append(c)
12314 consumer_map[f] = consumers
12315 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12317 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12319 for cpv in plibdata:
12320 print colorize("WARN", ">>>") + " package: %s" % cpv
12322 for f in plibdata[cpv]:
12323 obj_key = linkmap._obj_key(f)
12324 alt_paths = samefile_map.get(obj_key)
12325 if alt_paths is None:
12327 samefile_map[obj_key] = alt_paths
12330 for alt_paths in samefile_map.itervalues():
12331 alt_paths = sorted(alt_paths)
12332 for p in alt_paths:
12333 print colorize("WARN", " * ") + " - %s" % (p,)
12335 consumers = consumer_map.get(f, [])
12336 for c in consumers[:MAX_DISPLAY]:
12337 print colorize("WARN", " * ") + " used by %s (%s)" % \
12338 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12339 if len(consumers) == MAX_DISPLAY + 1:
12340 print colorize("WARN", " * ") + " used by %s (%s)" % \
12341 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12342 for x in owners.get(consumers[MAX_DISPLAY], [])))
12343 elif len(consumers) > MAX_DISPLAY:
12344 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12345 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12348 def _flush_elog_mod_echo():
12350 Dump the mod_echo output now so that our other
12351 notifications are shown last.
12353 @returns: True if messages were shown, False otherwise.
12355 messages_shown = False
12357 from portage.elog import mod_echo
12358 except ImportError:
12359 pass # happens during downgrade to a version without the module
12361 messages_shown = bool(mod_echo._items)
12362 mod_echo.finalize()
12363 return messages_shown
12365 def post_emerge(root_config, myopts, mtimedb, retval):
12367 Misc. things to run at the end of a merge session.
12370 Update Config Files
12373 Display preserved libs warnings
12376 @param trees: A dictionary mapping each ROOT to it's package databases
12378 @param mtimedb: The mtimeDB to store data needed across merge invocations
12379 @type mtimedb: MtimeDB class instance
12380 @param retval: Emerge's return value
12384 1. Calls sys.exit(retval)
12387 target_root = root_config.root
12388 trees = { target_root : root_config.trees }
12389 vardbapi = trees[target_root]["vartree"].dbapi
12390 settings = vardbapi.settings
12391 info_mtimes = mtimedb["info"]
12393 # Load the most current variables from ${ROOT}/etc/profile.env
12396 settings.regenerate()
12399 config_protect = settings.get("CONFIG_PROTECT","").split()
12400 infodirs = settings.get("INFOPATH","").split(":") + \
12401 settings.get("INFODIR","").split(":")
12405 if retval == os.EX_OK:
12406 exit_msg = " *** exiting successfully."
12408 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12409 emergelog("notitles" not in settings.features, exit_msg)
12411 _flush_elog_mod_echo()
12413 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12414 if "--pretend" in myopts or (counter_hash is not None and \
12415 counter_hash == vardbapi._counter_hash()):
12416 display_news_notification(root_config, myopts)
12417 # If vdb state has not changed then there's nothing else to do.
12420 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12421 portage.util.ensure_dirs(vdb_path)
12423 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12424 vdb_lock = portage.locks.lockdir(vdb_path)
12428 if "noinfo" not in settings.features:
12429 chk_updated_info_files(target_root,
12430 infodirs, info_mtimes, retval)
12434 portage.locks.unlockdir(vdb_lock)
12436 chk_updated_cfg_files(target_root, config_protect)
12438 display_news_notification(root_config, myopts)
12439 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12440 display_preserved_libs(vardbapi)
12445 def chk_updated_cfg_files(target_root, config_protect):
12447 #number of directories with some protect files in them
12449 for x in config_protect:
12450 x = os.path.join(target_root, x.lstrip(os.path.sep))
12451 if not os.access(x, os.W_OK):
12452 # Avoid Permission denied errors generated
12456 mymode = os.lstat(x).st_mode
12459 if stat.S_ISLNK(mymode):
12460 # We want to treat it like a directory if it
12461 # is a symlink to an existing directory.
12463 real_mode = os.stat(x).st_mode
12464 if stat.S_ISDIR(real_mode):
12468 if stat.S_ISDIR(mymode):
12469 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12471 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12472 os.path.split(x.rstrip(os.path.sep))
12473 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12474 a = commands.getstatusoutput(mycommand)
12476 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12478 # Show the error message alone, sending stdout to /dev/null.
12479 os.system(mycommand + " 1>/dev/null")
12481 files = a[1].split('\0')
12482 # split always produces an empty string as the last element
12483 if files and not files[-1]:
12487 print "\n"+colorize("WARN", " * IMPORTANT:"),
12488 if stat.S_ISDIR(mymode):
12489 print "%d config files in '%s' need updating." % \
12492 print "config file '%s' needs updating." % x
12495 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12496 " section of the " + bold("emerge")
12497 print " "+yellow("*")+" man page to learn how to update config files."
12499 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12502 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12503 Returns the number of unread (yet relevent) items.
12505 @param portdb: a portage tree database
12506 @type portdb: pordbapi
12507 @param vardb: an installed package database
12508 @type vardb: vardbapi
12511 @param UNREAD_PATH:
12517 1. The number of unread but relevant news items.
12520 from portage.news import NewsManager
12521 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12522 return manager.getUnreadItems( repo_id, update=update )
12524 def insert_category_into_atom(atom, category):
12525 alphanum = re.search(r'\w', atom)
12527 ret = atom[:alphanum.start()] + "%s/" % category + \
12528 atom[alphanum.start():]
12533 def is_valid_package_atom(x):
12535 alphanum = re.search(r'\w', x)
12537 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12538 return portage.isvalidatom(x)
12540 def show_blocker_docs_link():
12542 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12543 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12545 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12548 def show_mask_docs():
12549 print "For more information, see the MASKED PACKAGES section in the emerge"
12550 print "man page or refer to the Gentoo Handbook."
12552 def action_sync(settings, trees, mtimedb, myopts, myaction):
12553 xterm_titles = "notitles" not in settings.features
12554 emergelog(xterm_titles, " === sync")
12555 myportdir = settings.get("PORTDIR", None)
12556 out = portage.output.EOutput()
12558 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12560 if myportdir[-1]=="/":
12561 myportdir=myportdir[:-1]
12563 st = os.stat(myportdir)
12567 print ">>>",myportdir,"not found, creating it."
12568 os.makedirs(myportdir,0755)
12569 st = os.stat(myportdir)
12572 spawn_kwargs["env"] = settings.environ()
12573 if 'usersync' in settings.features and \
12574 portage.data.secpass >= 2 and \
12575 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12576 st.st_gid != os.getgid() and st.st_mode & 0070):
12578 homedir = pwd.getpwuid(st.st_uid).pw_dir
12582 # Drop privileges when syncing, in order to match
12583 # existing uid/gid settings.
12584 spawn_kwargs["uid"] = st.st_uid
12585 spawn_kwargs["gid"] = st.st_gid
12586 spawn_kwargs["groups"] = [st.st_gid]
12587 spawn_kwargs["env"]["HOME"] = homedir
12589 if not st.st_mode & 0020:
12590 umask = umask | 0020
12591 spawn_kwargs["umask"] = umask
12593 syncuri = settings.get("SYNC", "").strip()
12595 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12596 noiselevel=-1, level=logging.ERROR)
12599 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12600 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12603 dosyncuri = syncuri
12604 updatecache_flg = False
12605 if myaction == "metadata":
12606 print "skipping sync"
12607 updatecache_flg = True
12608 elif ".git" in vcs_dirs:
12609 # Update existing git repository, and ignore the syncuri. We are
12610 # going to trust the user and assume that the user is in the branch
12611 # that he/she wants updated. We'll let the user manage branches with
12613 if portage.process.find_binary("git") is None:
12614 msg = ["Command not found: git",
12615 "Type \"emerge dev-util/git\" to enable git support."]
12617 writemsg_level("!!! %s\n" % l,
12618 level=logging.ERROR, noiselevel=-1)
12620 msg = ">>> Starting git pull in %s..." % myportdir
12621 emergelog(xterm_titles, msg )
12622 writemsg_level(msg + "\n")
12623 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12624 (portage._shell_quote(myportdir),), **spawn_kwargs)
12625 if exitcode != os.EX_OK:
12626 msg = "!!! git pull error in %s." % myportdir
12627 emergelog(xterm_titles, msg)
12628 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12630 msg = ">>> Git pull in %s successful" % myportdir
12631 emergelog(xterm_titles, msg)
12632 writemsg_level(msg + "\n")
12633 exitcode = git_sync_timestamps(settings, myportdir)
12634 if exitcode == os.EX_OK:
12635 updatecache_flg = True
12636 elif syncuri[:8]=="rsync://":
12637 for vcs_dir in vcs_dirs:
12638 writemsg_level(("!!! %s appears to be under revision " + \
12639 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12640 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12642 if not os.path.exists("/usr/bin/rsync"):
12643 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12644 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12649 if settings["PORTAGE_RSYNC_OPTS"] == "":
12650 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12651 rsync_opts.extend([
12652 "--recursive", # Recurse directories
12653 "--links", # Consider symlinks
12654 "--safe-links", # Ignore links outside of tree
12655 "--perms", # Preserve permissions
12656 "--times", # Preserive mod times
12657 "--compress", # Compress the data transmitted
12658 "--force", # Force deletion on non-empty dirs
12659 "--whole-file", # Don't do block transfers, only entire files
12660 "--delete", # Delete files that aren't in the master tree
12661 "--stats", # Show final statistics about what was transfered
12662 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12663 "--exclude=/distfiles", # Exclude distfiles from consideration
12664 "--exclude=/local", # Exclude local from consideration
12665 "--exclude=/packages", # Exclude packages from consideration
12669 # The below validation is not needed when using the above hardcoded
12672 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12674 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12675 for opt in ("--recursive", "--times"):
12676 if opt not in rsync_opts:
12677 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12678 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12679 rsync_opts.append(opt)
12681 for exclude in ("distfiles", "local", "packages"):
12682 opt = "--exclude=/%s" % exclude
12683 if opt not in rsync_opts:
12684 portage.writemsg(yellow("WARNING:") + \
12685 " adding required option %s not included in " % opt + \
12686 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12687 rsync_opts.append(opt)
12689 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12690 def rsync_opt_startswith(opt_prefix):
12691 for x in rsync_opts:
12692 if x.startswith(opt_prefix):
12696 if not rsync_opt_startswith("--timeout="):
12697 rsync_opts.append("--timeout=%d" % mytimeout)
12699 for opt in ("--compress", "--whole-file"):
12700 if opt not in rsync_opts:
12701 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12702 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12703 rsync_opts.append(opt)
12705 if "--quiet" in myopts:
12706 rsync_opts.append("--quiet") # Shut up a lot
12708 rsync_opts.append("--verbose") # Print filelist
12710 if "--verbose" in myopts:
12711 rsync_opts.append("--progress") # Progress meter for each file
12713 if "--debug" in myopts:
12714 rsync_opts.append("--checksum") # Force checksum on all files
12716 # Real local timestamp file.
12717 servertimestampfile = os.path.join(
12718 myportdir, "metadata", "timestamp.chk")
12720 content = portage.util.grabfile(servertimestampfile)
12724 mytimestamp = time.mktime(time.strptime(content[0],
12725 "%a, %d %b %Y %H:%M:%S +0000"))
12726 except (OverflowError, ValueError):
12731 rsync_initial_timeout = \
12732 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12734 rsync_initial_timeout = 15
12737 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12738 except SystemExit, e:
12739 raise # Needed else can't exit
12741 maxretries=3 #default number of retries
12744 user_name, hostname, port = re.split(
12745 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12748 if user_name is None:
12750 updatecache_flg=True
12751 all_rsync_opts = set(rsync_opts)
12752 extra_rsync_opts = shlex.split(
12753 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12754 all_rsync_opts.update(extra_rsync_opts)
12755 family = socket.AF_INET
12756 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12757 family = socket.AF_INET
12758 elif socket.has_ipv6 and \
12759 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12760 family = socket.AF_INET6
12762 SERVER_OUT_OF_DATE = -1
12763 EXCEEDED_MAX_RETRIES = -2
12769 for addrinfo in socket.getaddrinfo(
12770 hostname, None, family, socket.SOCK_STREAM):
12771 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12772 # IPv6 addresses need to be enclosed in square brackets
12773 ips.append("[%s]" % addrinfo[4][0])
12775 ips.append(addrinfo[4][0])
12776 from random import shuffle
12778 except SystemExit, e:
12779 raise # Needed else can't exit
12780 except Exception, e:
12781 print "Notice:",str(e)
12786 dosyncuri = syncuri.replace(
12787 "//" + user_name + hostname + port + "/",
12788 "//" + user_name + ips[0] + port + "/", 1)
12789 except SystemExit, e:
12790 raise # Needed else can't exit
12791 except Exception, e:
12792 print "Notice:",str(e)
12796 if "--ask" in myopts:
12797 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12802 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12803 if "--quiet" not in myopts:
12804 print ">>> Starting rsync with "+dosyncuri+"..."
12806 emergelog(xterm_titles,
12807 ">>> Starting retry %d of %d with %s" % \
12808 (retries,maxretries,dosyncuri))
12809 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12811 if mytimestamp != 0 and "--quiet" not in myopts:
12812 print ">>> Checking server timestamp ..."
12814 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12816 if "--debug" in myopts:
12819 exitcode = os.EX_OK
12820 servertimestamp = 0
12821 # Even if there's no timestamp available locally, fetch the
12822 # timestamp anyway as an initial probe to verify that the server is
12823 # responsive. This protects us from hanging indefinitely on a
12824 # connection attempt to an unresponsive server which rsync's
12825 # --timeout option does not prevent.
12827 # Temporary file for remote server timestamp comparison.
12828 from tempfile import mkstemp
12829 fd, tmpservertimestampfile = mkstemp()
12831 mycommand = rsynccommand[:]
12832 mycommand.append(dosyncuri.rstrip("/") + \
12833 "/metadata/timestamp.chk")
12834 mycommand.append(tmpservertimestampfile)
12838 def timeout_handler(signum, frame):
12839 raise portage.exception.PortageException("timed out")
12840 signal.signal(signal.SIGALRM, timeout_handler)
12841 # Timeout here in case the server is unresponsive. The
12842 # --timeout rsync option doesn't apply to the initial
12843 # connection attempt.
12844 if rsync_initial_timeout:
12845 signal.alarm(rsync_initial_timeout)
12847 mypids.extend(portage.process.spawn(
12848 mycommand, env=settings.environ(), returnpid=True))
12849 exitcode = os.waitpid(mypids[0], 0)[1]
12850 content = portage.grabfile(tmpservertimestampfile)
12852 if rsync_initial_timeout:
12855 os.unlink(tmpservertimestampfile)
12858 except portage.exception.PortageException, e:
12862 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12863 os.kill(mypids[0], signal.SIGTERM)
12864 os.waitpid(mypids[0], 0)
12865 # This is the same code rsync uses for timeout.
12868 if exitcode != os.EX_OK:
12869 if exitcode & 0xff:
12870 exitcode = (exitcode & 0xff) << 8
12872 exitcode = exitcode >> 8
12874 portage.process.spawned_pids.remove(mypids[0])
12877 servertimestamp = time.mktime(time.strptime(
12878 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12879 except (OverflowError, ValueError):
12881 del mycommand, mypids, content
12882 if exitcode == os.EX_OK:
12883 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12884 emergelog(xterm_titles,
12885 ">>> Cancelling sync -- Already current.")
12888 print ">>> Timestamps on the server and in the local repository are the same."
12889 print ">>> Cancelling all further sync action. You are already up to date."
12891 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12895 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12896 emergelog(xterm_titles,
12897 ">>> Server out of date: %s" % dosyncuri)
12900 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12902 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12905 exitcode = SERVER_OUT_OF_DATE
12906 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12908 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12909 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12910 if exitcode in [0,1,3,4,11,14,20,21]:
12912 elif exitcode in [1,3,4,11,14,20,21]:
12915 # Code 2 indicates protocol incompatibility, which is expected
12916 # for servers with protocol < 29 that don't support
12917 # --prune-empty-directories. Retry for a server that supports
12918 # at least rsync protocol version 29 (>=rsync-2.6.4).
12923 if retries<=maxretries:
12924 print ">>> Retrying..."
12929 updatecache_flg=False
12930 exitcode = EXCEEDED_MAX_RETRIES
12934 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12935 elif exitcode == SERVER_OUT_OF_DATE:
12937 elif exitcode == EXCEEDED_MAX_RETRIES:
12939 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12944 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12945 msg.append("that your SYNC statement is proper.")
12946 msg.append("SYNC=" + settings["SYNC"])
12948 msg.append("Rsync has reported that there is a File IO error. Normally")
12949 msg.append("this means your disk is full, but can be caused by corruption")
12950 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12951 msg.append("and try again after the problem has been fixed.")
12952 msg.append("PORTDIR=" + settings["PORTDIR"])
12954 msg.append("Rsync was killed before it finished.")
12956 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12957 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12958 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12959 msg.append("temporary problem unless complications exist with your network")
12960 msg.append("(and possibly your system's filesystem) configuration.")
12964 elif syncuri[:6]=="cvs://":
12965 if not os.path.exists("/usr/bin/cvs"):
12966 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12967 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12969 cvsroot=syncuri[6:]
12970 cvsdir=os.path.dirname(myportdir)
12971 if not os.path.exists(myportdir+"/CVS"):
12973 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12974 if os.path.exists(cvsdir+"/gentoo-x86"):
12975 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12978 os.rmdir(myportdir)
12980 if e.errno != errno.ENOENT:
12982 "!!! existing '%s' directory; exiting.\n" % myportdir)
12985 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12986 print "!!! cvs checkout error; exiting."
12988 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12991 print ">>> Starting cvs update with "+syncuri+"..."
12992 retval = portage.process.spawn_bash(
12993 "cd %s; cvs -z0 -q update -dP" % \
12994 (portage._shell_quote(myportdir),), **spawn_kwargs)
12995 if retval != os.EX_OK:
12997 dosyncuri = syncuri
12999 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13000 noiselevel=-1, level=logging.ERROR)
13003 if updatecache_flg and \
13004 myaction != "metadata" and \
13005 "metadata-transfer" not in settings.features:
13006 updatecache_flg = False
13008 # Reload the whole config from scratch.
13009 settings, trees, mtimedb = load_emerge_config(trees=trees)
13010 root_config = trees[settings["ROOT"]]["root_config"]
13011 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13013 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13014 action_metadata(settings, portdb, myopts)
13016 if portage._global_updates(trees, mtimedb["updates"]):
13018 # Reload the whole config from scratch.
13019 settings, trees, mtimedb = load_emerge_config(trees=trees)
13020 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13021 root_config = trees[settings["ROOT"]]["root_config"]
13023 mybestpv = portdb.xmatch("bestmatch-visible",
13024 portage.const.PORTAGE_PACKAGE_ATOM)
13025 mypvs = portage.best(
13026 trees[settings["ROOT"]]["vartree"].dbapi.match(
13027 portage.const.PORTAGE_PACKAGE_ATOM))
13029 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13031 if myaction != "metadata":
13032 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13033 retval = portage.process.spawn(
13034 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13035 dosyncuri], env=settings.environ())
13036 if retval != os.EX_OK:
13037 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13039 if(mybestpv != mypvs) and not "--quiet" in myopts:
13041 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13042 print red(" * ")+"that you update portage now, before any other packages are updated."
13044 print red(" * ")+"To update portage, run 'emerge portage' now."
13047 display_news_notification(root_config, myopts)
13050 def git_sync_timestamps(settings, portdir):
13052 Since git doesn't preserve timestamps, synchronize timestamps between
13053 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13054 for a given file as long as the file in the working tree is not modified
13055 (relative to HEAD).
13057 cache_dir = os.path.join(portdir, "metadata", "cache")
13058 if not os.path.isdir(cache_dir):
13060 writemsg_level(">>> Synchronizing timestamps...\n")
13062 from portage.cache.cache_errors import CacheError
13064 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13065 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13066 except CacheError, e:
13067 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13068 level=logging.ERROR, noiselevel=-1)
13071 ec_dir = os.path.join(portdir, "eclass")
13073 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13074 if f.endswith(".eclass"))
13076 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13077 level=logging.ERROR, noiselevel=-1)
13080 args = [portage.const.BASH_BINARY, "-c",
13081 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13082 portage._shell_quote(portdir)]
13084 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13085 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13087 if rval != os.EX_OK:
13090 modified_eclasses = set(ec for ec in ec_names \
13091 if os.path.join("eclass", ec + ".eclass") in modified_files)
13093 updated_ec_mtimes = {}
13095 for cpv in cache_db:
13096 cpv_split = portage.catpkgsplit(cpv)
13097 if cpv_split is None:
13098 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13099 level=logging.ERROR, noiselevel=-1)
13102 cat, pn, ver, rev = cpv_split
13103 cat, pf = portage.catsplit(cpv)
13104 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13105 if relative_eb_path in modified_files:
13109 cache_entry = cache_db[cpv]
13110 eb_mtime = cache_entry.get("_mtime_")
13111 ec_mtimes = cache_entry.get("_eclasses_")
13113 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13114 level=logging.ERROR, noiselevel=-1)
13116 except CacheError, e:
13117 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13118 (cpv, e), level=logging.ERROR, noiselevel=-1)
13121 if eb_mtime is None:
13122 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13123 level=logging.ERROR, noiselevel=-1)
13127 eb_mtime = long(eb_mtime)
13129 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13130 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13133 if ec_mtimes is None:
13134 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13135 level=logging.ERROR, noiselevel=-1)
13138 if modified_eclasses.intersection(ec_mtimes):
13141 missing_eclasses = set(ec_mtimes).difference(ec_names)
13142 if missing_eclasses:
13143 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13144 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13148 eb_path = os.path.join(portdir, relative_eb_path)
13150 current_eb_mtime = os.stat(eb_path)
13152 writemsg_level("!!! Missing ebuild: %s\n" % \
13153 (cpv,), level=logging.ERROR, noiselevel=-1)
13156 inconsistent = False
13157 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13158 updated_mtime = updated_ec_mtimes.get(ec)
13159 if updated_mtime is not None and updated_mtime != ec_mtime:
13160 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13161 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13162 inconsistent = True
13168 if current_eb_mtime != eb_mtime:
13169 os.utime(eb_path, (eb_mtime, eb_mtime))
13171 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13172 if ec in updated_ec_mtimes:
13174 ec_path = os.path.join(ec_dir, ec + ".eclass")
13175 current_mtime = long(os.stat(ec_path).st_mtime)
13176 if current_mtime != ec_mtime:
13177 os.utime(ec_path, (ec_mtime, ec_mtime))
13178 updated_ec_mtimes[ec] = ec_mtime
13182 def action_metadata(settings, portdb, myopts):
13183 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13184 old_umask = os.umask(0002)
13185 cachedir = os.path.normpath(settings.depcachedir)
13186 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13187 "/lib", "/opt", "/proc", "/root", "/sbin",
13188 "/sys", "/tmp", "/usr", "/var"]:
13189 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13190 "ROOT DIRECTORY ON YOUR SYSTEM."
13191 print >> sys.stderr, \
13192 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13194 if not os.path.exists(cachedir):
13197 ec = portage.eclass_cache.cache(portdb.porttree_root)
13198 myportdir = os.path.realpath(settings["PORTDIR"])
13199 cm = settings.load_best_module("portdbapi.metadbmodule")(
13200 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13202 from portage.cache import util
13204 class percentage_noise_maker(util.quiet_mirroring):
13205 def __init__(self, dbapi):
13207 self.cp_all = dbapi.cp_all()
13208 l = len(self.cp_all)
13209 self.call_update_min = 100000000
13210 self.min_cp_all = l/100.0
13214 def __iter__(self):
13215 for x in self.cp_all:
13217 if self.count > self.min_cp_all:
13218 self.call_update_min = 0
13220 for y in self.dbapi.cp_list(x):
13222 self.call_update_mine = 0
13224 def update(self, *arg):
13225 try: self.pstr = int(self.pstr) + 1
13226 except ValueError: self.pstr = 1
13227 sys.stdout.write("%s%i%%" % \
13228 ("\b" * (len(str(self.pstr))+1), self.pstr))
13230 self.call_update_min = 10000000
13232 def finish(self, *arg):
13233 sys.stdout.write("\b\b\b\b100%\n")
13236 if "--quiet" in myopts:
13237 def quicky_cpv_generator(cp_all_list):
13238 for x in cp_all_list:
13239 for y in portdb.cp_list(x):
13241 source = quicky_cpv_generator(portdb.cp_all())
13242 noise_maker = portage.cache.util.quiet_mirroring()
13244 noise_maker = source = percentage_noise_maker(portdb)
13245 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13246 eclass_cache=ec, verbose_instance=noise_maker)
13249 os.umask(old_umask)
13251 def action_regen(settings, portdb, max_jobs, max_load):
13252 xterm_titles = "notitles" not in settings.features
13253 emergelog(xterm_titles, " === regen")
13254 #regenerate cache entries
13255 portage.writemsg_stdout("Regenerating cache entries...\n")
13257 os.close(sys.stdin.fileno())
13258 except SystemExit, e:
13259 raise # Needed else can't exit
13264 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13267 portage.writemsg_stdout("done!\n")
13268 return regen.returncode
13270 def action_config(settings, trees, myopts, myfiles):
13271 if len(myfiles) != 1:
13272 print red("!!! config can only take a single package atom at this time\n")
13274 if not is_valid_package_atom(myfiles[0]):
13275 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13277 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13278 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13282 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13283 except portage.exception.AmbiguousPackageName, e:
13284 # Multiple matches thrown from cpv_expand
13287 print "No packages found.\n"
13289 elif len(pkgs) > 1:
13290 if "--ask" in myopts:
13292 print "Please select a package to configure:"
13296 options.append(str(idx))
13297 print options[-1]+") "+pkg
13299 options.append("X")
13300 idx = userquery("Selection?", options)
13303 pkg = pkgs[int(idx)-1]
13305 print "The following packages available:"
13308 print "\nPlease use a specific atom or the --ask option."
13314 if "--ask" in myopts:
13315 if userquery("Ready to configure "+pkg+"?") == "No":
13318 print "Configuring pkg..."
13320 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13321 mysettings = portage.config(clone=settings)
13322 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13323 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13324 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13326 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13327 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13328 if retval == os.EX_OK:
13329 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13330 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13333 def action_info(settings, trees, myopts, myfiles):
13334 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13335 settings.profile_path, settings["CHOST"],
13336 trees[settings["ROOT"]]["vartree"].dbapi)
13338 header_title = "System Settings"
13340 print header_width * "="
13341 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13342 print header_width * "="
13343 print "System uname: "+platform.platform(aliased=1)
13345 lastSync = portage.grabfile(os.path.join(
13346 settings["PORTDIR"], "metadata", "timestamp.chk"))
13347 print "Timestamp of tree:",
13353 output=commands.getstatusoutput("distcc --version")
13355 print str(output[1].split("\n",1)[0]),
13356 if "distcc" in settings.features:
13361 output=commands.getstatusoutput("ccache -V")
13363 print str(output[1].split("\n",1)[0]),
13364 if "ccache" in settings.features:
13369 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13370 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13371 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13372 myvars = portage.util.unique_array(myvars)
13376 if portage.isvalidatom(x):
13377 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13378 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13379 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13381 for pn, ver, rev in pkg_matches:
13383 pkgs.append(ver + "-" + rev)
13387 pkgs = ", ".join(pkgs)
13388 print "%-20s %s" % (x+":", pkgs)
13390 print "%-20s %s" % (x+":", "[NOT VALID]")
13392 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13394 if "--verbose" in myopts:
13395 myvars=settings.keys()
13397 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13398 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13399 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13400 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13402 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13404 myvars = portage.util.unique_array(myvars)
13410 print '%s="%s"' % (x, settings[x])
13412 use = set(settings["USE"].split())
13413 use_expand = settings["USE_EXPAND"].split()
13415 for varname in use_expand:
13416 flag_prefix = varname.lower() + "_"
13417 for f in list(use):
13418 if f.startswith(flag_prefix):
13422 print 'USE="%s"' % " ".join(use),
13423 for varname in use_expand:
13424 myval = settings.get(varname)
13426 print '%s="%s"' % (varname, myval),
13429 unset_vars.append(x)
13431 print "Unset: "+", ".join(unset_vars)
13434 if "--debug" in myopts:
13435 for x in dir(portage):
13436 module = getattr(portage, x)
13437 if "cvs_id_string" in dir(module):
13438 print "%s: %s" % (str(x), str(module.cvs_id_string))
13440 # See if we can find any packages installed matching the strings
13441 # passed on the command line
13443 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13444 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13446 mypkgs.extend(vardb.match(x))
13448 # If some packages were found...
13450 # Get our global settings (we only print stuff if it varies from
13451 # the current config)
13452 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13453 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13455 pkgsettings = portage.config(clone=settings)
13457 for myvar in mydesiredvars:
13458 global_vals[myvar] = set(settings.get(myvar, "").split())
13460 # Loop through each package
13461 # Only print settings if they differ from global settings
13462 header_title = "Package Settings"
13463 print header_width * "="
13464 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13465 print header_width * "="
13466 from portage.output import EOutput
13469 # Get all package specific variables
13470 auxvalues = vardb.aux_get(pkg, auxkeys)
13472 for i in xrange(len(auxkeys)):
13473 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13475 for myvar in mydesiredvars:
13476 # If the package variable doesn't match the
13477 # current global variable, something has changed
13478 # so set diff_found so we know to print
13479 if valuesmap[myvar] != global_vals[myvar]:
13480 diff_values[myvar] = valuesmap[myvar]
13481 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13482 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13483 pkgsettings.reset()
13484 # If a matching ebuild is no longer available in the tree, maybe it
13485 # would make sense to compare against the flags for the best
13486 # available version with the same slot?
13488 if portdb.cpv_exists(pkg):
13490 pkgsettings.setcpv(pkg, mydb=mydb)
13491 if valuesmap["IUSE"].intersection(
13492 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13493 diff_values["USE"] = valuesmap["USE"]
13494 # If a difference was found, print the info for
13497 # Print package info
13498 print "%s was built with the following:" % pkg
13499 for myvar in mydesiredvars + ["USE"]:
13500 if myvar in diff_values:
13501 mylist = list(diff_values[myvar])
13503 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13505 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13506 ebuildpath = vardb.findname(pkg)
13507 if not ebuildpath or not os.path.exists(ebuildpath):
13508 out.ewarn("No ebuild found for '%s'" % pkg)
13510 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13511 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13512 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13515 def action_search(root_config, myopts, myfiles, spinner):
13517 print "emerge: no search terms provided."
13519 searchinstance = search(root_config,
13520 spinner, "--searchdesc" in myopts,
13521 "--quiet" not in myopts, "--usepkg" in myopts,
13522 "--usepkgonly" in myopts)
13523 for mysearch in myfiles:
13525 searchinstance.execute(mysearch)
13526 except re.error, comment:
13527 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13529 searchinstance.output()
13531 def action_depclean(settings, trees, ldpath_mtimes,
13532 myopts, action, myfiles, spinner):
13533 # Kill packages that aren't explicitly merged or are required as a
13534 # dependency of another package. World file is explicit.
13536 # Global depclean or prune operations are not very safe when there are
13537 # missing dependencies since it's unknown how badly incomplete
13538 # the dependency graph is, and we might accidentally remove packages
13539 # that should have been pulled into the graph. On the other hand, it's
13540 # relatively safe to ignore missing deps when only asked to remove
13541 # specific packages.
13542 allow_missing_deps = len(myfiles) > 0
13545 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13546 msg.append("mistakes. Packages that are part of the world set will always\n")
13547 msg.append("be kept. They can be manually added to this set with\n")
13548 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13549 msg.append("package.provided (see portage(5)) will be removed by\n")
13550 msg.append("depclean, even if they are part of the world set.\n")
13552 msg.append("As a safety measure, depclean will not remove any packages\n")
13553 msg.append("unless *all* required dependencies have been resolved. As a\n")
13554 msg.append("consequence, it is often necessary to run %s\n" % \
13555 good("`emerge --update"))
13556 msg.append(good("--newuse --deep @system @world`") + \
13557 " prior to depclean.\n")
13559 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13560 portage.writemsg_stdout("\n")
13562 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13564 xterm_titles = "notitles" not in settings.features
13565 myroot = settings["ROOT"]
13566 root_config = trees[myroot]["root_config"]
13567 getSetAtoms = root_config.setconfig.getSetAtoms
13568 vardb = trees[myroot]["vartree"].dbapi
13570 required_set_names = ("system", "world")
13574 for s in required_set_names:
13575 required_sets[s] = InternalPackageSet(
13576 initial_atoms=getSetAtoms(s))
13579 # When removing packages, use a temporary version of world
13580 # which excludes packages that are intended to be eligible for
13582 world_temp_set = required_sets["world"]
13583 system_set = required_sets["system"]
13585 if not system_set or not world_temp_set:
13588 writemsg_level("!!! You have no system list.\n",
13589 level=logging.ERROR, noiselevel=-1)
13591 if not world_temp_set:
13592 writemsg_level("!!! You have no world file.\n",
13593 level=logging.WARNING, noiselevel=-1)
13595 writemsg_level("!!! Proceeding is likely to " + \
13596 "break your installation.\n",
13597 level=logging.WARNING, noiselevel=-1)
13598 if "--pretend" not in myopts:
13599 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13601 if action == "depclean":
13602 emergelog(xterm_titles, " >>> depclean")
13605 args_set = InternalPackageSet()
13608 if not is_valid_package_atom(x):
13609 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13610 level=logging.ERROR, noiselevel=-1)
13611 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13614 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13615 except portage.exception.AmbiguousPackageName, e:
13616 msg = "The short ebuild name \"" + x + \
13617 "\" is ambiguous. Please specify " + \
13618 "one of the following " + \
13619 "fully-qualified ebuild names instead:"
13620 for line in textwrap.wrap(msg, 70):
13621 writemsg_level("!!! %s\n" % (line,),
13622 level=logging.ERROR, noiselevel=-1)
13624 writemsg_level(" %s\n" % colorize("INFORM", i),
13625 level=logging.ERROR, noiselevel=-1)
13626 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13629 matched_packages = False
13632 matched_packages = True
13634 if not matched_packages:
13635 writemsg_level(">>> No packages selected for removal by %s\n" % \
13639 writemsg_level("\nCalculating dependencies ")
13640 resolver_params = create_depgraph_params(myopts, "remove")
13641 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13642 vardb = resolver.trees[myroot]["vartree"].dbapi
13644 if action == "depclean":
13647 # Pull in everything that's installed but not matched
13648 # by an argument atom since we don't want to clean any
13649 # package if something depends on it.
13651 world_temp_set.clear()
13656 if args_set.findAtomForPackage(pkg) is None:
13657 world_temp_set.add("=" + pkg.cpv)
13659 except portage.exception.InvalidDependString, e:
13660 show_invalid_depstring_notice(pkg,
13661 pkg.metadata["PROVIDE"], str(e))
13663 world_temp_set.add("=" + pkg.cpv)
13666 elif action == "prune":
13668 # Pull in everything that's installed since we don't
13669 # to prune a package if something depends on it.
13670 world_temp_set.clear()
13671 world_temp_set.update(vardb.cp_all())
13675 # Try to prune everything that's slotted.
13676 for cp in vardb.cp_all():
13677 if len(vardb.cp_list(cp)) > 1:
13680 # Remove atoms from world that match installed packages
13681 # that are also matched by argument atoms, but do not remove
13682 # them if they match the highest installed version.
13685 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13686 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13687 raise AssertionError("package expected in matches: " + \
13688 "cp = %s, cpv = %s matches = %s" % \
13689 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13691 highest_version = pkgs_for_cp[-1]
13692 if pkg == highest_version:
13693 # pkg is the highest version
13694 world_temp_set.add("=" + pkg.cpv)
13697 if len(pkgs_for_cp) <= 1:
13698 raise AssertionError("more packages expected: " + \
13699 "cp = %s, cpv = %s matches = %s" % \
13700 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13703 if args_set.findAtomForPackage(pkg) is None:
13704 world_temp_set.add("=" + pkg.cpv)
13706 except portage.exception.InvalidDependString, e:
13707 show_invalid_depstring_notice(pkg,
13708 pkg.metadata["PROVIDE"], str(e))
13710 world_temp_set.add("=" + pkg.cpv)
13714 for s, package_set in required_sets.iteritems():
13715 set_atom = SETPREFIX + s
13716 set_arg = SetArg(arg=set_atom, set=package_set,
13717 root_config=resolver.roots[myroot])
13718 set_args[s] = set_arg
13719 for atom in set_arg.set:
13720 resolver._dep_stack.append(
13721 Dependency(atom=atom, root=myroot, parent=set_arg))
13722 resolver.digraph.add(set_arg, None)
13724 success = resolver._complete_graph()
13725 writemsg_level("\b\b... done!\n")
13727 resolver.display_problems()
13732 def unresolved_deps():
13734 unresolvable = set()
13735 for dep in resolver._initially_unsatisfied_deps:
13736 if isinstance(dep.parent, Package) and \
13737 (dep.priority > UnmergeDepPriority.SOFT):
13738 unresolvable.add((dep.atom, dep.parent.cpv))
13740 if not unresolvable:
13743 if unresolvable and not allow_missing_deps:
13744 prefix = bad(" * ")
13746 msg.append("Dependencies could not be completely resolved due to")
13747 msg.append("the following required packages not being installed:")
13749 for atom, parent in unresolvable:
13750 msg.append(" %s pulled in by:" % (atom,))
13751 msg.append(" %s" % (parent,))
13753 msg.append("Have you forgotten to run " + \
13754 good("`emerge --update --newuse --deep @system @world`") + " prior")
13755 msg.append(("to %s? It may be necessary to manually " + \
13756 "uninstall packages that no longer") % action)
13757 msg.append("exist in the portage tree since " + \
13758 "it may not be possible to satisfy their")
13759 msg.append("dependencies. Also, be aware of " + \
13760 "the --with-bdeps option that is documented")
13761 msg.append("in " + good("`man emerge`") + ".")
13762 if action == "prune":
13764 msg.append("If you would like to ignore " + \
13765 "dependencies then use %s." % good("--nodeps"))
13766 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13767 level=logging.ERROR, noiselevel=-1)
13771 if unresolved_deps():
13774 graph = resolver.digraph.copy()
13775 required_pkgs_total = 0
13777 if isinstance(node, Package):
13778 required_pkgs_total += 1
13780 def show_parents(child_node):
13781 parent_nodes = graph.parent_nodes(child_node)
13782 if not parent_nodes:
13783 # With --prune, the highest version can be pulled in without any
13784 # real parent since all installed packages are pulled in. In that
13785 # case there's nothing to show here.
13788 for node in parent_nodes:
13789 parent_strs.append(str(getattr(node, "cpv", node)))
13792 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13793 for parent_str in parent_strs:
13794 msg.append(" %s\n" % (parent_str,))
13796 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13798 def cmp_pkg_cpv(pkg1, pkg2):
13799 """Sort Package instances by cpv."""
13800 if pkg1.cpv > pkg2.cpv:
13802 elif pkg1.cpv == pkg2.cpv:
13807 def create_cleanlist():
13808 pkgs_to_remove = []
13810 if action == "depclean":
13813 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13816 arg_atom = args_set.findAtomForPackage(pkg)
13817 except portage.exception.InvalidDependString:
13818 # this error has already been displayed by now
13822 if pkg not in graph:
13823 pkgs_to_remove.append(pkg)
13824 elif "--verbose" in myopts:
13828 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13829 if pkg not in graph:
13830 pkgs_to_remove.append(pkg)
13831 elif "--verbose" in myopts:
13834 elif action == "prune":
13835 # Prune really uses all installed instead of world. It's not
13836 # a real reverse dependency so don't display it as such.
13837 graph.remove(set_args["world"])
13839 for atom in args_set:
13840 for pkg in vardb.match_pkgs(atom):
13841 if pkg not in graph:
13842 pkgs_to_remove.append(pkg)
13843 elif "--verbose" in myopts:
13846 if not pkgs_to_remove:
13848 ">>> No packages selected for removal by %s\n" % action)
13849 if "--verbose" not in myopts:
13851 ">>> To see reverse dependencies, use %s\n" % \
13853 if action == "prune":
13855 ">>> To ignore dependencies, use %s\n" % \
13858 return pkgs_to_remove
13860 cleanlist = create_cleanlist()
13863 clean_set = set(cleanlist)
13865 # Check if any of these package are the sole providers of libraries
13866 # with consumers that have not been selected for removal. If so, these
13867 # packages and any dependencies need to be added to the graph.
13868 real_vardb = trees[myroot]["vartree"].dbapi
13869 linkmap = real_vardb.linkmap
13870 liblist = linkmap.listLibraryObjects()
13871 consumer_cache = {}
13872 provider_cache = {}
13876 writemsg_level(">>> Checking for lib consumers...\n")
13878 for pkg in cleanlist:
13879 pkg_dblink = real_vardb._dblink(pkg.cpv)
13880 provided_libs = set()
13882 for lib in liblist:
13883 if pkg_dblink.isowner(lib, myroot):
13884 provided_libs.add(lib)
13886 if not provided_libs:
13890 for lib in provided_libs:
13891 lib_consumers = consumer_cache.get(lib)
13892 if lib_consumers is None:
13893 lib_consumers = linkmap.findConsumers(lib)
13894 consumer_cache[lib] = lib_consumers
13896 consumers[lib] = lib_consumers
13901 for lib, lib_consumers in consumers.items():
13902 for consumer_file in list(lib_consumers):
13903 if pkg_dblink.isowner(consumer_file, myroot):
13904 lib_consumers.remove(consumer_file)
13905 if not lib_consumers:
13911 for lib, lib_consumers in consumers.iteritems():
13913 soname = soname_cache.get(lib)
13915 soname = linkmap.getSoname(lib)
13916 soname_cache[lib] = soname
13918 consumer_providers = []
13919 for lib_consumer in lib_consumers:
13920 providers = provider_cache.get(lib)
13921 if providers is None:
13922 providers = linkmap.findProviders(lib_consumer)
13923 provider_cache[lib_consumer] = providers
13924 if soname not in providers:
13925 # Why does this happen?
13927 consumer_providers.append(
13928 (lib_consumer, providers[soname]))
13930 consumers[lib] = consumer_providers
13932 consumer_map[pkg] = consumers
13936 search_files = set()
13937 for consumers in consumer_map.itervalues():
13938 for lib, consumer_providers in consumers.iteritems():
13939 for lib_consumer, providers in consumer_providers:
13940 search_files.add(lib_consumer)
13941 search_files.update(providers)
13943 writemsg_level(">>> Assigning files to packages...\n")
13944 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13946 for pkg, consumers in consumer_map.items():
13947 for lib, consumer_providers in consumers.items():
13948 lib_consumers = set()
13950 for lib_consumer, providers in consumer_providers:
13951 owner_set = file_owners.get(lib_consumer)
13952 provider_dblinks = set()
13953 provider_pkgs = set()
13955 if len(providers) > 1:
13956 for provider in providers:
13957 provider_set = file_owners.get(provider)
13958 if provider_set is not None:
13959 provider_dblinks.update(provider_set)
13961 if len(provider_dblinks) > 1:
13962 for provider_dblink in provider_dblinks:
13963 pkg_key = ("installed", myroot,
13964 provider_dblink.mycpv, "nomerge")
13965 if pkg_key not in clean_set:
13966 provider_pkgs.add(vardb.get(pkg_key))
13971 if owner_set is not None:
13972 lib_consumers.update(owner_set)
13974 for consumer_dblink in list(lib_consumers):
13975 if ("installed", myroot, consumer_dblink.mycpv,
13976 "nomerge") in clean_set:
13977 lib_consumers.remove(consumer_dblink)
13981 consumers[lib] = lib_consumers
13985 del consumer_map[pkg]
13988 # TODO: Implement a package set for rebuilding consumer packages.
13990 msg = "In order to avoid breakage of link level " + \
13991 "dependencies, one or more packages will not be removed. " + \
13992 "This can be solved by rebuilding " + \
13993 "the packages that pulled them in."
13995 prefix = bad(" * ")
13996 from textwrap import wrap
13997 writemsg_level("".join(prefix + "%s\n" % line for \
13998 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14001 for pkg, consumers in consumer_map.iteritems():
14002 unique_consumers = set(chain(*consumers.values()))
14003 unique_consumers = sorted(consumer.mycpv \
14004 for consumer in unique_consumers)
14006 msg.append(" %s pulled in by:" % (pkg.cpv,))
14007 for consumer in unique_consumers:
14008 msg.append(" %s" % (consumer,))
14010 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14011 level=logging.WARNING, noiselevel=-1)
14013 # Add lib providers to the graph as children of lib consumers,
14014 # and also add any dependencies pulled in by the provider.
14015 writemsg_level(">>> Adding lib providers to graph...\n")
14017 for pkg, consumers in consumer_map.iteritems():
14018 for consumer_dblink in set(chain(*consumers.values())):
14019 consumer_pkg = vardb.get(("installed", myroot,
14020 consumer_dblink.mycpv, "nomerge"))
14021 if not resolver._add_pkg(pkg,
14022 Dependency(parent=consumer_pkg,
14023 priority=UnmergeDepPriority(runtime=True),
14025 resolver.display_problems()
14028 writemsg_level("\nCalculating dependencies ")
14029 success = resolver._complete_graph()
14030 writemsg_level("\b\b... done!\n")
14031 resolver.display_problems()
14034 if unresolved_deps():
14037 graph = resolver.digraph.copy()
14038 required_pkgs_total = 0
14040 if isinstance(node, Package):
14041 required_pkgs_total += 1
14042 cleanlist = create_cleanlist()
14045 clean_set = set(cleanlist)
14047 # Use a topological sort to create an unmerge order such that
14048 # each package is unmerged before it's dependencies. This is
14049 # necessary to avoid breaking things that may need to run
14050 # during pkg_prerm or pkg_postrm phases.
14052 # Create a new graph to account for dependencies between the
14053 # packages being unmerged.
14057 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14058 runtime = UnmergeDepPriority(runtime=True)
14059 runtime_post = UnmergeDepPriority(runtime_post=True)
14060 buildtime = UnmergeDepPriority(buildtime=True)
14062 "RDEPEND": runtime,
14063 "PDEPEND": runtime_post,
14064 "DEPEND": buildtime,
14067 for node in clean_set:
14068 graph.add(node, None)
14070 node_use = node.metadata["USE"].split()
14071 for dep_type in dep_keys:
14072 depstr = node.metadata[dep_type]
14076 portage.dep._dep_check_strict = False
14077 success, atoms = portage.dep_check(depstr, None, settings,
14078 myuse=node_use, trees=resolver._graph_trees,
14081 portage.dep._dep_check_strict = True
14083 # Ignore invalid deps of packages that will
14084 # be uninstalled anyway.
14087 priority = priority_map[dep_type]
14089 if not isinstance(atom, portage.dep.Atom):
14090 # Ignore invalid atoms returned from dep_check().
14094 matches = vardb.match_pkgs(atom)
14097 for child_node in matches:
14098 if child_node in clean_set:
14099 graph.add(child_node, node, priority=priority)
14102 if len(graph.order) == len(graph.root_nodes()):
14103 # If there are no dependencies between packages
14104 # let unmerge() group them by cat/pn.
14106 cleanlist = [pkg.cpv for pkg in graph.order]
14108 # Order nodes from lowest to highest overall reference count for
14109 # optimal root node selection.
14110 node_refcounts = {}
14111 for node in graph.order:
14112 node_refcounts[node] = len(graph.parent_nodes(node))
14113 def cmp_reference_count(node1, node2):
14114 return node_refcounts[node1] - node_refcounts[node2]
14115 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14117 ignore_priority_range = [None]
14118 ignore_priority_range.extend(
14119 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14120 while not graph.empty():
14121 for ignore_priority in ignore_priority_range:
14122 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14126 raise AssertionError("no root nodes")
14127 if ignore_priority is not None:
14128 # Some deps have been dropped due to circular dependencies,
14129 # so only pop one node in order do minimize the number that
14134 cleanlist.append(node.cpv)
14136 unmerge(root_config, myopts, "unmerge", cleanlist,
14137 ldpath_mtimes, ordered=ordered)
14139 if action == "prune":
14142 if not cleanlist and "--quiet" in myopts:
14145 print "Packages installed: "+str(len(vardb.cpv_all()))
14146 print "Packages in world: " + \
14147 str(len(root_config.sets["world"].getAtoms()))
14148 print "Packages in system: " + \
14149 str(len(root_config.sets["system"].getAtoms()))
14150 print "Required packages: "+str(required_pkgs_total)
14151 if "--pretend" in myopts:
14152 print "Number to remove: "+str(len(cleanlist))
14154 print "Number removed: "+str(len(cleanlist))
14156 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14158 Construct a depgraph for the given resume list. This will raise
14159 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14161 @returns: (success, depgraph, dropped_tasks)
14164 skip_unsatisfied = True
14165 mergelist = mtimedb["resume"]["mergelist"]
14166 dropped_tasks = set()
14168 mydepgraph = depgraph(settings, trees,
14169 myopts, myparams, spinner)
14171 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14172 skip_masked=skip_masked)
14173 except depgraph.UnsatisfiedResumeDep, e:
14174 if not skip_unsatisfied:
14177 graph = mydepgraph.digraph
14178 unsatisfied_parents = dict((dep.parent, dep.parent) \
14179 for dep in e.value)
14180 traversed_nodes = set()
14181 unsatisfied_stack = list(unsatisfied_parents)
14182 while unsatisfied_stack:
14183 pkg = unsatisfied_stack.pop()
14184 if pkg in traversed_nodes:
14186 traversed_nodes.add(pkg)
14188 # If this package was pulled in by a parent
14189 # package scheduled for merge, removing this
14190 # package may cause the the parent package's
14191 # dependency to become unsatisfied.
14192 for parent_node in graph.parent_nodes(pkg):
14193 if not isinstance(parent_node, Package) \
14194 or parent_node.operation not in ("merge", "nomerge"):
14197 graph.child_nodes(parent_node,
14198 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14199 if pkg in unsatisfied:
14200 unsatisfied_parents[parent_node] = parent_node
14201 unsatisfied_stack.append(parent_node)
14203 pruned_mergelist = []
14204 for x in mergelist:
14205 if isinstance(x, list) and \
14206 tuple(x) not in unsatisfied_parents:
14207 pruned_mergelist.append(x)
14209 # If the mergelist doesn't shrink then this loop is infinite.
14210 if len(pruned_mergelist) == len(mergelist):
14211 # This happens if a package can't be dropped because
14212 # it's already installed, but it has unsatisfied PDEPEND.
14214 mergelist[:] = pruned_mergelist
14216 # Exclude installed packages that have been removed from the graph due
14217 # to failure to build/install runtime dependencies after the dependent
14218 # package has already been installed.
14219 dropped_tasks.update(pkg for pkg in \
14220 unsatisfied_parents if pkg.operation != "nomerge")
14221 mydepgraph.break_refs(unsatisfied_parents)
14223 del e, graph, traversed_nodes, \
14224 unsatisfied_parents, unsatisfied_stack
14228 return (success, mydepgraph, dropped_tasks)
14230 def action_build(settings, trees, mtimedb,
14231 myopts, myaction, myfiles, spinner):
14233 # validate the state of the resume data
14234 # so that we can make assumptions later.
14235 for k in ("resume", "resume_backup"):
14236 if k not in mtimedb:
14238 resume_data = mtimedb[k]
14239 if not isinstance(resume_data, dict):
14242 mergelist = resume_data.get("mergelist")
14243 if not isinstance(mergelist, list):
14246 for x in mergelist:
14247 if not (isinstance(x, list) and len(x) == 4):
14249 pkg_type, pkg_root, pkg_key, pkg_action = x
14250 if pkg_root not in trees:
14251 # Current $ROOT setting differs,
14252 # so the list must be stale.
14258 resume_opts = resume_data.get("myopts")
14259 if not isinstance(resume_opts, (dict, list)):
14262 favorites = resume_data.get("favorites")
14263 if not isinstance(favorites, list):
14268 if "--resume" in myopts and \
14269 ("resume" in mtimedb or
14270 "resume_backup" in mtimedb):
14272 if "resume" not in mtimedb:
14273 mtimedb["resume"] = mtimedb["resume_backup"]
14274 del mtimedb["resume_backup"]
14276 # "myopts" is a list for backward compatibility.
14277 resume_opts = mtimedb["resume"].get("myopts", [])
14278 if isinstance(resume_opts, list):
14279 resume_opts = dict((k,True) for k in resume_opts)
14280 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14281 resume_opts.pop(opt, None)
14282 myopts.update(resume_opts)
14284 if "--debug" in myopts:
14285 writemsg_level("myopts %s\n" % (myopts,))
14287 # Adjust config according to options of the command being resumed.
14288 for myroot in trees:
14289 mysettings = trees[myroot]["vartree"].settings
14290 mysettings.unlock()
14291 adjust_config(myopts, mysettings)
14293 del myroot, mysettings
14295 ldpath_mtimes = mtimedb["ldpath"]
14298 buildpkgonly = "--buildpkgonly" in myopts
14299 pretend = "--pretend" in myopts
14300 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14301 ask = "--ask" in myopts
14302 nodeps = "--nodeps" in myopts
14303 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14304 tree = "--tree" in myopts
14305 if nodeps and tree:
14307 del myopts["--tree"]
14308 portage.writemsg(colorize("WARN", " * ") + \
14309 "--tree is broken with --nodeps. Disabling...\n")
14310 debug = "--debug" in myopts
14311 verbose = "--verbose" in myopts
14312 quiet = "--quiet" in myopts
14313 if pretend or fetchonly:
14314 # make the mtimedb readonly
14315 mtimedb.filename = None
14316 if '--digest' in myopts or 'digest' in settings.features:
14317 if '--digest' in myopts:
14318 msg = "The --digest option"
14320 msg = "The FEATURES=digest setting"
14322 msg += " can prevent corruption from being" + \
14323 " noticed. The `repoman manifest` command is the preferred" + \
14324 " way to generate manifests and it is capable of doing an" + \
14325 " entire repository or category at once."
14326 prefix = bad(" * ")
14327 writemsg(prefix + "\n")
14328 from textwrap import wrap
14329 for line in wrap(msg, 72):
14330 writemsg("%s%s\n" % (prefix, line))
14331 writemsg(prefix + "\n")
14333 if "--quiet" not in myopts and \
14334 ("--pretend" in myopts or "--ask" in myopts or \
14335 "--tree" in myopts or "--verbose" in myopts):
14337 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14339 elif "--buildpkgonly" in myopts:
14343 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14345 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14349 print darkgreen("These are the packages that would be %s, in order:") % action
14352 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14353 if not show_spinner:
14354 spinner.update = spinner.update_quiet
14357 favorites = mtimedb["resume"].get("favorites")
14358 if not isinstance(favorites, list):
14362 print "Calculating dependencies ",
14363 myparams = create_depgraph_params(myopts, myaction)
14365 resume_data = mtimedb["resume"]
14366 mergelist = resume_data["mergelist"]
14367 if mergelist and "--skipfirst" in myopts:
14368 for i, task in enumerate(mergelist):
14369 if isinstance(task, list) and \
14370 task and task[-1] == "merge":
14377 success, mydepgraph, dropped_tasks = resume_depgraph(
14378 settings, trees, mtimedb, myopts, myparams, spinner)
14379 except (portage.exception.PackageNotFound,
14380 depgraph.UnsatisfiedResumeDep), e:
14381 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14382 mydepgraph = e.depgraph
14385 from textwrap import wrap
14386 from portage.output import EOutput
14389 resume_data = mtimedb["resume"]
14390 mergelist = resume_data.get("mergelist")
14391 if not isinstance(mergelist, list):
14393 if mergelist and debug or (verbose and not quiet):
14394 out.eerror("Invalid resume list:")
14397 for task in mergelist:
14398 if isinstance(task, list):
14399 out.eerror(indent + str(tuple(task)))
14402 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14403 out.eerror("One or more packages are either masked or " + \
14404 "have missing dependencies:")
14407 for dep in e.value:
14408 if dep.atom is None:
14409 out.eerror(indent + "Masked package:")
14410 out.eerror(2 * indent + str(dep.parent))
14413 out.eerror(indent + str(dep.atom) + " pulled in by:")
14414 out.eerror(2 * indent + str(dep.parent))
14416 msg = "The resume list contains packages " + \
14417 "that are either masked or have " + \
14418 "unsatisfied dependencies. " + \
14419 "Please restart/continue " + \
14420 "the operation manually, or use --skipfirst " + \
14421 "to skip the first package in the list and " + \
14422 "any other packages that may be " + \
14423 "masked or have missing dependencies."
14424 for line in wrap(msg, 72):
14426 elif isinstance(e, portage.exception.PackageNotFound):
14427 out.eerror("An expected package is " + \
14428 "not available: %s" % str(e))
14430 msg = "The resume list contains one or more " + \
14431 "packages that are no longer " + \
14432 "available. Please restart/continue " + \
14433 "the operation manually."
14434 for line in wrap(msg, 72):
14438 print "\b\b... done!"
14442 portage.writemsg("!!! One or more packages have been " + \
14443 "dropped due to\n" + \
14444 "!!! masking or unsatisfied dependencies:\n\n",
14446 for task in dropped_tasks:
14447 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14448 portage.writemsg("\n", noiselevel=-1)
14451 if mydepgraph is not None:
14452 mydepgraph.display_problems()
14453 if not (ask or pretend):
14454 # delete the current list and also the backup
14455 # since it's probably stale too.
14456 for k in ("resume", "resume_backup"):
14457 mtimedb.pop(k, None)
14462 if ("--resume" in myopts):
14463 print darkgreen("emerge: It seems we have nothing to resume...")
14466 myparams = create_depgraph_params(myopts, myaction)
14467 if "--quiet" not in myopts and "--nodeps" not in myopts:
14468 print "Calculating dependencies ",
14470 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14472 retval, favorites = mydepgraph.select_files(myfiles)
14473 except portage.exception.PackageNotFound, e:
14474 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14476 except portage.exception.PackageSetNotFound, e:
14477 root_config = trees[settings["ROOT"]]["root_config"]
14478 display_missing_pkg_set(root_config, e.value)
14481 print "\b\b... done!"
14483 mydepgraph.display_problems()
14486 if "--pretend" not in myopts and \
14487 ("--ask" in myopts or "--tree" in myopts or \
14488 "--verbose" in myopts) and \
14489 not ("--quiet" in myopts and "--ask" not in myopts):
14490 if "--resume" in myopts:
14491 mymergelist = mydepgraph.altlist()
14492 if len(mymergelist) == 0:
14493 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14495 favorites = mtimedb["resume"]["favorites"]
14496 retval = mydepgraph.display(
14497 mydepgraph.altlist(reversed=tree),
14498 favorites=favorites)
14499 mydepgraph.display_problems()
14500 if retval != os.EX_OK:
14502 prompt="Would you like to resume merging these packages?"
14504 retval = mydepgraph.display(
14505 mydepgraph.altlist(reversed=("--tree" in myopts)),
14506 favorites=favorites)
14507 mydepgraph.display_problems()
14508 if retval != os.EX_OK:
14511 for x in mydepgraph.altlist():
14512 if isinstance(x, Package) and x.operation == "merge":
14516 sets = trees[settings["ROOT"]]["root_config"].sets
14517 world_candidates = None
14518 if "--noreplace" in myopts and \
14519 not oneshot and favorites:
14520 # Sets that are not world candidates are filtered
14521 # out here since the favorites list needs to be
14522 # complete for depgraph.loadResumeCommand() to
14523 # operate correctly.
14524 world_candidates = [x for x in favorites \
14525 if not (x.startswith(SETPREFIX) and \
14526 not sets[x[1:]].world_candidate)]
14527 if "--noreplace" in myopts and \
14528 not oneshot and world_candidates:
14530 for x in world_candidates:
14531 print " %s %s" % (good("*"), x)
14532 prompt="Would you like to add these packages to your world favorites?"
14533 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14534 prompt="Nothing to merge; would you like to auto-clean packages?"
14537 print "Nothing to merge; quitting."
14540 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14541 prompt="Would you like to fetch the source files for these packages?"
14543 prompt="Would you like to merge these packages?"
14545 if "--ask" in myopts and userquery(prompt) == "No":
14550 # Don't ask again (e.g. when auto-cleaning packages after merge)
14551 myopts.pop("--ask", None)
14553 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14554 if ("--resume" in myopts):
14555 mymergelist = mydepgraph.altlist()
14556 if len(mymergelist) == 0:
14557 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14559 favorites = mtimedb["resume"]["favorites"]
14560 retval = mydepgraph.display(
14561 mydepgraph.altlist(reversed=tree),
14562 favorites=favorites)
14563 mydepgraph.display_problems()
14564 if retval != os.EX_OK:
14567 retval = mydepgraph.display(
14568 mydepgraph.altlist(reversed=("--tree" in myopts)),
14569 favorites=favorites)
14570 mydepgraph.display_problems()
14571 if retval != os.EX_OK:
14573 if "--buildpkgonly" in myopts:
14574 graph_copy = mydepgraph.digraph.clone()
14575 removed_nodes = set()
14576 for node in graph_copy:
14577 if not isinstance(node, Package) or \
14578 node.operation == "nomerge":
14579 removed_nodes.add(node)
14580 graph_copy.difference_update(removed_nodes)
14581 if not graph_copy.hasallzeros(ignore_priority = \
14582 DepPrioritySatisfiedRange.ignore_medium):
14583 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14584 print "!!! You have to merge the dependencies before you can build this package.\n"
14587 if "--buildpkgonly" in myopts:
14588 graph_copy = mydepgraph.digraph.clone()
14589 removed_nodes = set()
14590 for node in graph_copy:
14591 if not isinstance(node, Package) or \
14592 node.operation == "nomerge":
14593 removed_nodes.add(node)
14594 graph_copy.difference_update(removed_nodes)
14595 if not graph_copy.hasallzeros(ignore_priority = \
14596 DepPrioritySatisfiedRange.ignore_medium):
14597 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14598 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14601 if ("--resume" in myopts):
14602 favorites=mtimedb["resume"]["favorites"]
14603 mymergelist = mydepgraph.altlist()
14604 mydepgraph.break_refs(mymergelist)
14605 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14606 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14607 del mydepgraph, mymergelist
14608 clear_caches(trees)
14610 retval = mergetask.merge()
14611 merge_count = mergetask.curval
14613 if "resume" in mtimedb and \
14614 "mergelist" in mtimedb["resume"] and \
14615 len(mtimedb["resume"]["mergelist"]) > 1:
14616 mtimedb["resume_backup"] = mtimedb["resume"]
14617 del mtimedb["resume"]
14619 mtimedb["resume"]={}
14620 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14621 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14622 # a list type for options.
14623 mtimedb["resume"]["myopts"] = myopts.copy()
14625 # Convert Atom instances to plain str.
14626 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14628 pkglist = mydepgraph.altlist()
14629 mydepgraph.saveNomergeFavorites()
14630 mydepgraph.break_refs(pkglist)
14631 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14632 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14633 del mydepgraph, pkglist
14634 clear_caches(trees)
14636 retval = mergetask.merge()
14637 merge_count = mergetask.curval
14639 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14640 if "yes" == settings.get("AUTOCLEAN"):
14641 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14642 unmerge(trees[settings["ROOT"]]["root_config"],
14643 myopts, "clean", [],
14644 ldpath_mtimes, autoclean=1)
14646 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14647 + " AUTOCLEAN is disabled. This can cause serious"
14648 + " problems due to overlapping packages.\n")
14649 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14653 def multiple_actions(action1, action2):
14654 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14655 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14658 def insert_optional_args(args):
14660 Parse optional arguments and insert a value if one has
14661 not been provided. This is done before feeding the args
14662 to the optparse parser since that parser does not support
14663 this feature natively.
14667 jobs_opts = ("-j", "--jobs")
14668 arg_stack = args[:]
14669 arg_stack.reverse()
14671 arg = arg_stack.pop()
14673 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14674 if not (short_job_opt or arg in jobs_opts):
14675 new_args.append(arg)
14678 # Insert an empty placeholder in order to
14679 # satisfy the requirements of optparse.
14681 new_args.append("--jobs")
14684 if short_job_opt and len(arg) > 2:
14685 if arg[:2] == "-j":
14687 job_count = int(arg[2:])
14689 saved_opts = arg[2:]
14692 saved_opts = arg[1:].replace("j", "")
14694 if job_count is None and arg_stack:
14696 job_count = int(arg_stack[-1])
14700 # Discard the job count from the stack
14701 # since we're consuming it here.
14704 if job_count is None:
14705 # unlimited number of jobs
14706 new_args.append("True")
14708 new_args.append(str(job_count))
14710 if saved_opts is not None:
14711 new_args.append("-" + saved_opts)
14715 def parse_opts(tmpcmdline, silent=False):
14720 global actions, options, shortmapping
14722 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14723 argument_options = {
14725 "help":"specify the location for portage configuration files",
14729 "help":"enable or disable color output",
14731 "choices":("y", "n")
14736 "help" : "Specifies the number of packages to build " + \
14742 "--load-average": {
14744 "help" :"Specifies that no new builds should be started " + \
14745 "if there are other builds running and the load average " + \
14746 "is at least LOAD (a floating-point number).",
14752 "help":"include unnecessary build time dependencies",
14754 "choices":("y", "n")
14757 "help":"specify conditions to trigger package reinstallation",
14759 "choices":["changed-use"]
14763 from optparse import OptionParser
14764 parser = OptionParser()
14765 if parser.has_option("--help"):
14766 parser.remove_option("--help")
14768 for action_opt in actions:
14769 parser.add_option("--" + action_opt, action="store_true",
14770 dest=action_opt.replace("-", "_"), default=False)
14771 for myopt in options:
14772 parser.add_option(myopt, action="store_true",
14773 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14774 for shortopt, longopt in shortmapping.iteritems():
14775 parser.add_option("-" + shortopt, action="store_true",
14776 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14777 for myalias, myopt in longopt_aliases.iteritems():
14778 parser.add_option(myalias, action="store_true",
14779 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14781 for myopt, kwargs in argument_options.iteritems():
14782 parser.add_option(myopt,
14783 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14785 tmpcmdline = insert_optional_args(tmpcmdline)
14787 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14791 if myoptions.jobs == "True":
14795 jobs = int(myoptions.jobs)
14799 if jobs is not True and \
14803 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14804 (myoptions.jobs,), noiselevel=-1)
14806 myoptions.jobs = jobs
14808 if myoptions.load_average:
14810 load_average = float(myoptions.load_average)
14814 if load_average <= 0.0:
14815 load_average = None
14817 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14818 (myoptions.load_average,), noiselevel=-1)
14820 myoptions.load_average = load_average
14822 for myopt in options:
14823 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14825 myopts[myopt] = True
14827 for myopt in argument_options:
14828 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14832 if myoptions.searchdesc:
14833 myoptions.search = True
14835 for action_opt in actions:
14836 v = getattr(myoptions, action_opt.replace("-", "_"))
14839 multiple_actions(myaction, action_opt)
14841 myaction = action_opt
14845 return myaction, myopts, myfiles
14847 def validate_ebuild_environment(trees):
14848 for myroot in trees:
14849 settings = trees[myroot]["vartree"].settings
14850 settings.validate()
14852 def clear_caches(trees):
14853 for d in trees.itervalues():
14854 d["porttree"].dbapi.melt()
14855 d["porttree"].dbapi._aux_cache.clear()
14856 d["bintree"].dbapi._aux_cache.clear()
14857 d["bintree"].dbapi._clear_cache()
14858 d["vartree"].dbapi.linkmap._clear_cache()
14859 portage.dircache.clear()
14862 def load_emerge_config(trees=None):
14864 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14865 v = os.environ.get(envvar, None)
14866 if v and v.strip():
14868 trees = portage.create_trees(trees=trees, **kwargs)
14870 for root, root_trees in trees.iteritems():
14871 settings = root_trees["vartree"].settings
14872 setconfig = load_default_config(settings, root_trees)
14873 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14875 settings = trees["/"]["vartree"].settings
14877 for myroot in trees:
14879 settings = trees[myroot]["vartree"].settings
14882 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14883 mtimedb = portage.MtimeDB(mtimedbfile)
14885 return settings, trees, mtimedb
14887 def adjust_config(myopts, settings):
14888 """Make emerge specific adjustments to the config."""
14890 # To enhance usability, make some vars case insensitive by forcing them to
14892 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14893 if myvar in settings:
14894 settings[myvar] = settings[myvar].lower()
14895 settings.backup_changes(myvar)
14898 # Kill noauto as it will break merges otherwise.
14899 if "noauto" in settings.features:
14900 while "noauto" in settings.features:
14901 settings.features.remove("noauto")
14902 settings["FEATURES"] = " ".join(settings.features)
14903 settings.backup_changes("FEATURES")
14907 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14908 except ValueError, e:
14909 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14910 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14911 settings["CLEAN_DELAY"], noiselevel=-1)
14912 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14913 settings.backup_changes("CLEAN_DELAY")
14915 EMERGE_WARNING_DELAY = 10
14917 EMERGE_WARNING_DELAY = int(settings.get(
14918 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14919 except ValueError, e:
14920 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14921 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14922 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14923 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14924 settings.backup_changes("EMERGE_WARNING_DELAY")
14926 if "--quiet" in myopts:
14927 settings["PORTAGE_QUIET"]="1"
14928 settings.backup_changes("PORTAGE_QUIET")
14930 if "--verbose" in myopts:
14931 settings["PORTAGE_VERBOSE"] = "1"
14932 settings.backup_changes("PORTAGE_VERBOSE")
14934 # Set so that configs will be merged regardless of remembered status
14935 if ("--noconfmem" in myopts):
14936 settings["NOCONFMEM"]="1"
14937 settings.backup_changes("NOCONFMEM")
14939 # Set various debug markers... They should be merged somehow.
14942 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14943 if PORTAGE_DEBUG not in (0, 1):
14944 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14945 PORTAGE_DEBUG, noiselevel=-1)
14946 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14949 except ValueError, e:
14950 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14951 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14952 settings["PORTAGE_DEBUG"], noiselevel=-1)
14954 if "--debug" in myopts:
14956 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14957 settings.backup_changes("PORTAGE_DEBUG")
14959 if settings.get("NOCOLOR") not in ("yes","true"):
14960 portage.output.havecolor = 1
14962 """The explicit --color < y | n > option overrides the NOCOLOR environment
14963 variable and stdout auto-detection."""
14964 if "--color" in myopts:
14965 if "y" == myopts["--color"]:
14966 portage.output.havecolor = 1
14967 settings["NOCOLOR"] = "false"
14969 portage.output.havecolor = 0
14970 settings["NOCOLOR"] = "true"
14971 settings.backup_changes("NOCOLOR")
14972 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14973 portage.output.havecolor = 0
14974 settings["NOCOLOR"] = "true"
14975 settings.backup_changes("NOCOLOR")
14977 def apply_priorities(settings):
14981 def nice(settings):
14983 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14984 except (OSError, ValueError), e:
14985 out = portage.output.EOutput()
14986 out.eerror("Failed to change nice value to '%s'" % \
14987 settings["PORTAGE_NICENESS"])
14988 out.eerror("%s\n" % str(e))
14990 def ionice(settings):
14992 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14994 ionice_cmd = shlex.split(ionice_cmd)
14998 from portage.util import varexpand
14999 variables = {"PID" : str(os.getpid())}
15000 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15003 rval = portage.process.spawn(cmd, env=os.environ)
15004 except portage.exception.CommandNotFound:
15005 # The OS kernel probably doesn't support ionice,
15006 # so return silently.
15009 if rval != os.EX_OK:
15010 out = portage.output.EOutput()
15011 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15012 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15014 def display_missing_pkg_set(root_config, set_name):
15017 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15018 "The following sets exist:") % \
15019 colorize("INFORM", set_name))
15022 for s in sorted(root_config.sets):
15023 msg.append(" %s" % s)
15026 writemsg_level("".join("%s\n" % l for l in msg),
15027 level=logging.ERROR, noiselevel=-1)
15029 def expand_set_arguments(myfiles, myaction, root_config):
15031 setconfig = root_config.setconfig
15033 sets = setconfig.getSets()
15035 # In order to know exactly which atoms/sets should be added to the
15036 # world file, the depgraph performs set expansion later. It will get
15037 # confused about where the atoms came from if it's not allowed to
15038 # expand them itself.
15039 do_not_expand = (None, )
15042 if a in ("system", "world"):
15043 newargs.append(SETPREFIX+a)
15050 # separators for set arguments
15054 # WARNING: all operators must be of equal length
15056 DIFF_OPERATOR = "-@"
15057 UNION_OPERATOR = "+@"
15059 for i in range(0, len(myfiles)):
15060 if myfiles[i].startswith(SETPREFIX):
15063 x = myfiles[i][len(SETPREFIX):]
15066 start = x.find(ARG_START)
15067 end = x.find(ARG_END)
15068 if start > 0 and start < end:
15069 namepart = x[:start]
15070 argpart = x[start+1:end]
15072 # TODO: implement proper quoting
15073 args = argpart.split(",")
15077 k, v = a.split("=", 1)
15080 options[a] = "True"
15081 setconfig.update(namepart, options)
15082 newset += (x[:start-len(namepart)]+namepart)
15083 x = x[end+len(ARG_END):]
15087 myfiles[i] = SETPREFIX+newset
15089 sets = setconfig.getSets()
15091 # display errors that occured while loading the SetConfig instance
15092 for e in setconfig.errors:
15093 print colorize("BAD", "Error during set creation: %s" % e)
15095 # emerge relies on the existance of sets with names "world" and "system"
15096 required_sets = ("world", "system")
15099 for s in required_sets:
15101 missing_sets.append(s)
15103 if len(missing_sets) > 2:
15104 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15105 missing_sets_str += ', and "%s"' % missing_sets[-1]
15106 elif len(missing_sets) == 2:
15107 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15109 missing_sets_str = '"%s"' % missing_sets[-1]
15110 msg = ["emerge: incomplete set configuration, " + \
15111 "missing set(s): %s" % missing_sets_str]
15113 msg.append(" sets defined: %s" % ", ".join(sets))
15114 msg.append(" This usually means that '%s'" % \
15115 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15116 msg.append(" is missing or corrupt.")
15118 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15120 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15123 if a.startswith(SETPREFIX):
15124 # support simple set operations (intersection, difference and union)
15125 # on the commandline. Expressions are evaluated strictly left-to-right
15126 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15127 expression = a[len(SETPREFIX):]
15130 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15131 is_pos = expression.rfind(IS_OPERATOR)
15132 diff_pos = expression.rfind(DIFF_OPERATOR)
15133 union_pos = expression.rfind(UNION_OPERATOR)
15134 op_pos = max(is_pos, diff_pos, union_pos)
15135 s1 = expression[:op_pos]
15136 s2 = expression[op_pos+len(IS_OPERATOR):]
15137 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15139 display_missing_pkg_set(root_config, s2)
15141 expr_sets.insert(0, s2)
15142 expr_ops.insert(0, op)
15144 if not expression in sets:
15145 display_missing_pkg_set(root_config, expression)
15147 expr_sets.insert(0, expression)
15148 result = set(setconfig.getSetAtoms(expression))
15149 for i in range(0, len(expr_ops)):
15150 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15151 if expr_ops[i] == IS_OPERATOR:
15152 result.intersection_update(s2)
15153 elif expr_ops[i] == DIFF_OPERATOR:
15154 result.difference_update(s2)
15155 elif expr_ops[i] == UNION_OPERATOR:
15158 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15159 newargs.extend(result)
15161 s = a[len(SETPREFIX):]
15163 display_missing_pkg_set(root_config, s)
15165 setconfig.active.append(s)
15167 set_atoms = setconfig.getSetAtoms(s)
15168 except portage.exception.PackageSetNotFound, e:
15169 writemsg_level(("emerge: the given set '%s' " + \
15170 "contains a non-existent set named '%s'.\n") % \
15171 (s, e), level=logging.ERROR, noiselevel=-1)
15173 if myaction in unmerge_actions and \
15174 not sets[s].supportsOperation("unmerge"):
15175 sys.stderr.write("emerge: the given set '%s' does " % s + \
15176 "not support unmerge operations\n")
15178 elif not set_atoms:
15179 print "emerge: '%s' is an empty set" % s
15180 elif myaction not in do_not_expand:
15181 newargs.extend(set_atoms)
15183 newargs.append(SETPREFIX+s)
15184 for e in sets[s].errors:
15188 return (newargs, retval)
15190 def repo_name_check(trees):
15191 missing_repo_names = set()
15192 for root, root_trees in trees.iteritems():
15193 if "porttree" in root_trees:
15194 portdb = root_trees["porttree"].dbapi
15195 missing_repo_names.update(portdb.porttrees)
15196 repos = portdb.getRepositories()
15198 missing_repo_names.discard(portdb.getRepositoryPath(r))
15199 if portdb.porttree_root in missing_repo_names and \
15200 not os.path.exists(os.path.join(
15201 portdb.porttree_root, "profiles")):
15202 # This is normal if $PORTDIR happens to be empty,
15203 # so don't warn about it.
15204 missing_repo_names.remove(portdb.porttree_root)
15206 if missing_repo_names:
15208 msg.append("WARNING: One or more repositories " + \
15209 "have missing repo_name entries:")
15211 for p in missing_repo_names:
15212 msg.append("\t%s/profiles/repo_name" % (p,))
15214 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15215 "should be a plain text file containing a unique " + \
15216 "name for the repository on the first line.", 70))
15217 writemsg_level("".join("%s\n" % l for l in msg),
15218 level=logging.WARNING, noiselevel=-1)
15220 return bool(missing_repo_names)
15222 def config_protect_check(trees):
15223 for root, root_trees in trees.iteritems():
15224 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15225 msg = "!!! CONFIG_PROTECT is empty"
15227 msg += " for '%s'" % root
15228 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15230 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15232 if "--quiet" in myopts:
15233 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15234 print "!!! one of the following fully-qualified ebuild names instead:\n"
15235 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15236 print " " + colorize("INFORM", cp)
15239 s = search(root_config, spinner, "--searchdesc" in myopts,
15240 "--quiet" not in myopts, "--usepkg" in myopts,
15241 "--usepkgonly" in myopts)
15242 null_cp = portage.dep_getkey(insert_category_into_atom(
15244 cat, atom_pn = portage.catsplit(null_cp)
15245 s.searchkey = atom_pn
15246 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15249 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15250 print "!!! one of the above fully-qualified ebuild names instead.\n"
15252 def profile_check(trees, myaction, myopts):
15253 if myaction in ("info", "sync"):
15255 elif "--version" in myopts or "--help" in myopts:
15257 for root, root_trees in trees.iteritems():
15258 if root_trees["root_config"].settings.profiles:
15260 # generate some profile related warning messages
15261 validate_ebuild_environment(trees)
15262 msg = "If you have just changed your profile configuration, you " + \
15263 "should revert back to the previous configuration. Due to " + \
15264 "your current profile being invalid, allowed actions are " + \
15265 "limited to --help, --info, --sync, and --version."
15266 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15267 level=logging.ERROR, noiselevel=-1)
15272 global portage # NFC why this is necessary now - genone
15273 portage._disable_legacy_globals()
15274 # Disable color until we're sure that it should be enabled (after
15275 # EMERGE_DEFAULT_OPTS has been parsed).
15276 portage.output.havecolor = 0
15277 # This first pass is just for options that need to be known as early as
15278 # possible, such as --config-root. They will be parsed again later,
15279 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15280 # the value of --config-root).
15281 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15282 if "--debug" in myopts:
15283 os.environ["PORTAGE_DEBUG"] = "1"
15284 if "--config-root" in myopts:
15285 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15287 # Portage needs to ensure a sane umask for the files it creates.
15289 settings, trees, mtimedb = load_emerge_config()
15290 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15291 rval = profile_check(trees, myaction, myopts)
15292 if rval != os.EX_OK:
15295 if portage._global_updates(trees, mtimedb["updates"]):
15297 # Reload the whole config from scratch.
15298 settings, trees, mtimedb = load_emerge_config(trees=trees)
15299 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15301 xterm_titles = "notitles" not in settings.features
15304 if "--ignore-default-opts" not in myopts:
15305 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15306 tmpcmdline.extend(sys.argv[1:])
15307 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15309 if "--digest" in myopts:
15310 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15311 # Reload the whole config from scratch so that the portdbapi internal
15312 # config is updated with new FEATURES.
15313 settings, trees, mtimedb = load_emerge_config(trees=trees)
15314 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15316 for myroot in trees:
15317 mysettings = trees[myroot]["vartree"].settings
15318 mysettings.unlock()
15319 adjust_config(myopts, mysettings)
15320 if '--pretend' not in myopts and myaction in \
15321 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15322 mysettings["PORTAGE_COUNTER_HASH"] = \
15323 trees[myroot]["vartree"].dbapi._counter_hash()
15324 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15326 del myroot, mysettings
15328 apply_priorities(settings)
15330 spinner = stdout_spinner()
15331 if "candy" in settings.features:
15332 spinner.update = spinner.update_scroll
15334 if "--quiet" not in myopts:
15335 portage.deprecated_profile_check(settings=settings)
15336 repo_name_check(trees)
15337 config_protect_check(trees)
15339 eclasses_overridden = {}
15340 for mytrees in trees.itervalues():
15341 mydb = mytrees["porttree"].dbapi
15342 # Freeze the portdbapi for performance (memoize all xmatch results).
15344 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15347 if eclasses_overridden and \
15348 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15349 prefix = bad(" * ")
15350 if len(eclasses_overridden) == 1:
15351 writemsg(prefix + "Overlay eclass overrides " + \
15352 "eclass from PORTDIR:\n", noiselevel=-1)
15354 writemsg(prefix + "Overlay eclasses override " + \
15355 "eclasses from PORTDIR:\n", noiselevel=-1)
15356 writemsg(prefix + "\n", noiselevel=-1)
15357 for eclass_name in sorted(eclasses_overridden):
15358 writemsg(prefix + " '%s/%s.eclass'\n" % \
15359 (eclasses_overridden[eclass_name], eclass_name),
15361 writemsg(prefix + "\n", noiselevel=-1)
15362 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15363 "because it will trigger invalidation of cached ebuild metadata " + \
15364 "that is distributed with the portage tree. If you must " + \
15365 "override eclasses from PORTDIR then you are advised to add " + \
15366 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15367 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15368 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15369 "you would like to disable this warning."
15370 from textwrap import wrap
15371 for line in wrap(msg, 72):
15372 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15374 if "moo" in myfiles:
15377 Larry loves Gentoo (""" + platform.system() + """)
15379 _______________________
15380 < Have you mooed today? >
15381 -----------------------
15391 ext = os.path.splitext(x)[1]
15392 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15393 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15396 root_config = trees[settings["ROOT"]]["root_config"]
15397 if myaction == "list-sets":
15398 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15402 # only expand sets for actions taking package arguments
15403 oldargs = myfiles[:]
15404 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15405 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15406 if retval != os.EX_OK:
15409 # Need to handle empty sets specially, otherwise emerge will react
15410 # with the help message for empty argument lists
15411 if oldargs and not myfiles:
15412 print "emerge: no targets left after set expansion"
15415 if ("--tree" in myopts) and ("--columns" in myopts):
15416 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15419 if ("--quiet" in myopts):
15420 spinner.update = spinner.update_quiet
15421 portage.util.noiselimit = -1
15423 # Always create packages if FEATURES=buildpkg
15424 # Imply --buildpkg if --buildpkgonly
15425 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15426 if "--buildpkg" not in myopts:
15427 myopts["--buildpkg"] = True
15429 # Always try and fetch binary packages if FEATURES=getbinpkg
15430 if ("getbinpkg" in settings.features):
15431 myopts["--getbinpkg"] = True
15433 if "--buildpkgonly" in myopts:
15434 # --buildpkgonly will not merge anything, so
15435 # it cancels all binary package options.
15436 for opt in ("--getbinpkg", "--getbinpkgonly",
15437 "--usepkg", "--usepkgonly"):
15438 myopts.pop(opt, None)
15440 if "--fetch-all-uri" in myopts:
15441 myopts["--fetchonly"] = True
15443 if "--skipfirst" in myopts and "--resume" not in myopts:
15444 myopts["--resume"] = True
15446 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15447 myopts["--usepkgonly"] = True
15449 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15450 myopts["--getbinpkg"] = True
15452 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15453 myopts["--usepkg"] = True
15455 # Also allow -K to apply --usepkg/-k
15456 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15457 myopts["--usepkg"] = True
15459 # Allow -p to remove --ask
15460 if ("--pretend" in myopts) and ("--ask" in myopts):
15461 print ">>> --pretend disables --ask... removing --ask from options."
15462 del myopts["--ask"]
15464 # forbid --ask when not in a terminal
15465 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15466 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15467 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15471 if settings.get("PORTAGE_DEBUG", "") == "1":
15472 spinner.update = spinner.update_quiet
15474 if "python-trace" in settings.features:
15475 import portage.debug
15476 portage.debug.set_trace(True)
15478 if not ("--quiet" in myopts):
15479 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15480 spinner.update = spinner.update_basic
15482 if myaction == 'version':
15483 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15484 settings.profile_path, settings["CHOST"],
15485 trees[settings["ROOT"]]["vartree"].dbapi)
15487 elif "--help" in myopts:
15488 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15491 if "--debug" in myopts:
15492 print "myaction", myaction
15493 print "myopts", myopts
15495 if not myaction and not myfiles and "--resume" not in myopts:
15496 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15499 pretend = "--pretend" in myopts
15500 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15501 buildpkgonly = "--buildpkgonly" in myopts
15503 # check if root user is the current user for the actions where emerge needs this
15504 if portage.secpass < 2:
15505 # We've already allowed "--version" and "--help" above.
15506 if "--pretend" not in myopts and myaction not in ("search","info"):
15507 need_superuser = not \
15509 (buildpkgonly and secpass >= 1) or \
15510 myaction in ("metadata", "regen") or \
15511 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15512 if portage.secpass < 1 or \
15515 access_desc = "superuser"
15517 access_desc = "portage group"
15518 # Always show portage_group_warning() when only portage group
15519 # access is required but the user is not in the portage group.
15520 from portage.data import portage_group_warning
15521 if "--ask" in myopts:
15522 myopts["--pretend"] = True
15523 del myopts["--ask"]
15524 print ("%s access is required... " + \
15525 "adding --pretend to options.\n") % access_desc
15526 if portage.secpass < 1 and not need_superuser:
15527 portage_group_warning()
15529 sys.stderr.write(("emerge: %s access is " + \
15530 "required.\n\n") % access_desc)
15531 if portage.secpass < 1 and not need_superuser:
15532 portage_group_warning()
15535 disable_emergelog = False
15536 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15538 disable_emergelog = True
15540 if myaction in ("search", "info"):
15541 disable_emergelog = True
15542 if disable_emergelog:
15543 """ Disable emergelog for everything except build or unmerge
15544 operations. This helps minimize parallel emerge.log entries that can
15545 confuse log parsers. We especially want it disabled during
15546 parallel-fetch, which uses --resume --fetchonly."""
15548 def emergelog(*pargs, **kargs):
15551 if not "--pretend" in myopts:
15552 emergelog(xterm_titles, "Started emerge on: "+\
15553 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15556 myelogstr=" ".join(myopts)
15558 myelogstr+=" "+myaction
15560 myelogstr += " " + " ".join(oldargs)
15561 emergelog(xterm_titles, " *** emerge " + myelogstr)
15564 def emergeexitsig(signum, frame):
15565 signal.signal(signal.SIGINT, signal.SIG_IGN)
15566 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15567 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15568 sys.exit(100+signum)
15569 signal.signal(signal.SIGINT, emergeexitsig)
15570 signal.signal(signal.SIGTERM, emergeexitsig)
15573 """This gets out final log message in before we quit."""
15574 if "--pretend" not in myopts:
15575 emergelog(xterm_titles, " *** terminating.")
15576 if "notitles" not in settings.features:
15578 portage.atexit_register(emergeexit)
15580 if myaction in ("config", "metadata", "regen", "sync"):
15581 if "--pretend" in myopts:
15582 sys.stderr.write(("emerge: The '%s' action does " + \
15583 "not support '--pretend'.\n") % myaction)
15586 if "sync" == myaction:
15587 return action_sync(settings, trees, mtimedb, myopts, myaction)
15588 elif "metadata" == myaction:
15589 action_metadata(settings, portdb, myopts)
15590 elif myaction=="regen":
15591 validate_ebuild_environment(trees)
15592 return action_regen(settings, portdb, myopts.get("--jobs"),
15593 myopts.get("--load-average"))
15595 elif "config"==myaction:
15596 validate_ebuild_environment(trees)
15597 action_config(settings, trees, myopts, myfiles)
15600 elif "search"==myaction:
15601 validate_ebuild_environment(trees)
15602 action_search(trees[settings["ROOT"]]["root_config"],
15603 myopts, myfiles, spinner)
15604 elif myaction in ("clean", "unmerge") or \
15605 (myaction == "prune" and "--nodeps" in myopts):
15606 validate_ebuild_environment(trees)
15608 # Ensure atoms are valid before calling unmerge().
15609 # For backward compat, leading '=' is not required.
15611 if is_valid_package_atom(x) or \
15612 is_valid_package_atom("=" + x):
15615 msg.append("'%s' is not a valid package atom." % (x,))
15616 msg.append("Please check ebuild(5) for full details.")
15617 writemsg_level("".join("!!! %s\n" % line for line in msg),
15618 level=logging.ERROR, noiselevel=-1)
15621 # When given a list of atoms, unmerge
15622 # them in the order given.
15623 ordered = myaction == "unmerge"
15624 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15625 mtimedb["ldpath"], ordered=ordered):
15626 if not (buildpkgonly or fetchonly or pretend):
15627 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15629 elif myaction in ("depclean", "info", "prune"):
15631 # Ensure atoms are valid before calling unmerge().
15632 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15635 if is_valid_package_atom(x):
15637 valid_atoms.append(
15638 portage.dep_expand(x, mydb=vardb, settings=settings))
15639 except portage.exception.AmbiguousPackageName, e:
15640 msg = "The short ebuild name \"" + x + \
15641 "\" is ambiguous. Please specify " + \
15642 "one of the following " + \
15643 "fully-qualified ebuild names instead:"
15644 for line in textwrap.wrap(msg, 70):
15645 writemsg_level("!!! %s\n" % (line,),
15646 level=logging.ERROR, noiselevel=-1)
15648 writemsg_level(" %s\n" % colorize("INFORM", i),
15649 level=logging.ERROR, noiselevel=-1)
15650 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15654 msg.append("'%s' is not a valid package atom." % (x,))
15655 msg.append("Please check ebuild(5) for full details.")
15656 writemsg_level("".join("!!! %s\n" % line for line in msg),
15657 level=logging.ERROR, noiselevel=-1)
15660 if myaction == "info":
15661 return action_info(settings, trees, myopts, valid_atoms)
15663 validate_ebuild_environment(trees)
15664 action_depclean(settings, trees, mtimedb["ldpath"],
15665 myopts, myaction, valid_atoms, spinner)
15666 if not (buildpkgonly or fetchonly or pretend):
15667 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15668 # "update", "system", or just process files:
15670 validate_ebuild_environment(trees)
15671 if "--pretend" not in myopts:
15672 display_news_notification(root_config, myopts)
15673 retval = action_build(settings, trees, mtimedb,
15674 myopts, myaction, myfiles, spinner)
15675 root_config = trees[settings["ROOT"]]["root_config"]
15676 post_emerge(root_config, myopts, mtimedb, retval)