2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
61 import cPickle as pickle
66 from cStringIO import StringIO
68 from StringIO import StringIO
70 class stdout_spinner(object):
72 "Gentoo Rocks ("+platform.system()+")",
73 "Thank you for using Gentoo. :)",
74 "Are you actually trying to read this?",
75 "How many times have you stared at this?",
76 "We are generating the cache right now",
77 "You are paying too much attention.",
78 "A theory is better than its explanation.",
79 "Phasers locked on target, Captain.",
80 "Thrashing is just virtual crashing.",
81 "To be is to program.",
82 "Real Users hate Real Programmers.",
83 "When all else fails, read the instructions.",
84 "Functionality breeds Contempt.",
85 "The future lies ahead.",
86 "3.1415926535897932384626433832795028841971694",
87 "Sometimes insanity is the only alternative.",
88 "Inaccuracy saves a world of explanation.",
91 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
95 self.update = self.update_twirl
96 self.scroll_sequence = self.scroll_msgs[
97 int(time.time() * 100) % len(self.scroll_msgs)]
99 self.min_display_latency = 0.05
101 def _return_early(self):
103 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104 each update* method should return without doing any output when this
107 cur_time = time.time()
108 if cur_time - self.last_update < self.min_display_latency:
110 self.last_update = cur_time
113 def update_basic(self):
114 self.spinpos = (self.spinpos + 1) % 500
115 if self._return_early():
117 if (self.spinpos % 100) == 0:
118 if self.spinpos == 0:
119 sys.stdout.write(". ")
121 sys.stdout.write(".")
124 def update_scroll(self):
125 if self._return_early():
127 if(self.spinpos >= len(self.scroll_sequence)):
128 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135 def update_twirl(self):
136 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137 if self._return_early():
139 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142 def update_quiet(self):
145 def userquery(prompt, responses=None, colours=None):
146 """Displays a prompt and a set of responses, then waits for a response
147 which is checked against the responses and the first to match is
148 returned. An empty response will match the first value in responses. The
149 input buffer is *not* cleared prior to the prompt!
152 responses: a List of Strings.
153 colours: a List of Functions taking and returning a String, used to
154 process the responses for display. Typically these will be functions
155 like red() but could be e.g. lambda x: "DisplayString".
156 If responses is omitted, defaults to ["Yes", "No"], [green, red].
157 If only colours is omitted, defaults to [bold, ...].
159 Returns a member of the List responses. (If called without optional
160 arguments, returns "Yes" or "No".)
161 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163 if responses is None:
164 responses = ["Yes", "No"]
166 create_color_func("PROMPT_CHOICE_DEFAULT"),
167 create_color_func("PROMPT_CHOICE_OTHER")
169 elif colours is None:
171 colours=(colours*len(responses))[:len(responses)]
175 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176 for key in responses:
177 # An empty response will match the first value in responses.
178 if response.upper()==key[:len(response)].upper():
180 print "Sorry, response '%s' not understood." % response,
181 except (EOFError, KeyboardInterrupt):
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen", "search",
189 "sync", "unmerge", "version",
192 "--ask", "--alphabetical",
193 "--buildpkg", "--buildpkgonly",
194 "--changelog", "--columns",
199 "--fetchonly", "--fetch-all-uri",
200 "--getbinpkg", "--getbinpkgonly",
201 "--help", "--ignore-default-opts",
204 "--newuse", "--nocolor",
205 "--nodeps", "--noreplace",
206 "--nospinner", "--oneshot",
207 "--onlydeps", "--pretend",
208 "--quiet", "--resume",
209 "--searchdesc", "--selective",
213 "--usepkg", "--usepkgonly",
220 "b":"--buildpkg", "B":"--buildpkgonly",
221 "c":"--clean", "C":"--unmerge",
222 "d":"--debug", "D":"--deep",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "k":"--usepkg", "K":"--usepkgonly",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps", "O":"--nodeps",
231 "p":"--pretend", "P":"--prune",
233 "s":"--search", "S":"--searchdesc",
236 "v":"--verbose", "V":"--version"
239 def emergelog(xterm_titles, mystr, short_msg=None):
240 if xterm_titles and short_msg:
241 if "HOSTNAME" in os.environ:
242 short_msg = os.environ["HOSTNAME"]+": "+short_msg
243 xtermTitle(short_msg)
245 file_path = "/var/log/emerge.log"
246 mylogfile = open(file_path, "a")
247 portage.util.apply_secpass_permissions(file_path,
248 uid=portage.portage_uid, gid=portage.portage_gid,
252 mylock = portage.locks.lockfile(mylogfile)
253 # seek because we may have gotten held up by the lock.
254 # if so, we may not be positioned at the end of the file.
256 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
260 portage.locks.unlockfile(mylock)
262 except (IOError,OSError,portage.exception.PortageException), e:
264 print >> sys.stderr, "emergelog():",e
266 def countdown(secs=5, doing="Starting"):
268 print ">>> Waiting",secs,"seconds before starting..."
269 print ">>> (Control-C to abort)...\n"+doing+" in: ",
273 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280 if isinstance(mysize, basestring):
282 if 0 != mysize % 1024:
283 # Always round up to the next kB so that it doesn't show 0 kB when
284 # some small file still needs to be fetched.
285 mysize += 1024 - mysize % 1024
286 mystr=str(mysize/1024)
290 mystr=mystr[:mycount]+","+mystr[mycount:]
294 def getgccversion(chost):
297 return: the current in-use gcc version
300 gcc_ver_command = 'gcc -dumpversion'
301 gcc_ver_prefix = 'gcc-'
303 gcc_not_found_error = red(
304 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305 "!!! to update the environment of this terminal and possibly\n" +
306 "!!! other terminals also.\n"
309 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313 mystatus, myoutput = commands.getstatusoutput(
314 chost + "-" + gcc_ver_command)
315 if mystatus == os.EX_OK:
316 return gcc_ver_prefix + myoutput
318 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319 if mystatus == os.EX_OK:
320 return gcc_ver_prefix + myoutput
322 portage.writemsg(gcc_not_found_error, noiselevel=-1)
323 return "[unavailable]"
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326 profilever = "unavailable"
328 realpath = os.path.realpath(profile)
329 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
330 if realpath.startswith(basepath):
331 profilever = realpath[1 + len(basepath):]
334 profilever = "!" + os.readlink(profile)
337 del realpath, basepath
340 libclist = vardb.match("virtual/libc")
341 libclist += vardb.match("virtual/glibc")
342 libclist = portage.util.unique_array(libclist)
344 xs=portage.catpkgsplit(x)
346 libcver+=","+"-".join(xs[1:])
348 libcver="-".join(xs[1:])
350 libcver="unavailable"
352 gccver = getgccversion(chost)
353 unameout=platform.release()+" "+platform.machine()
355 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357 def create_depgraph_params(myopts, myaction):
358 #configure emerge engine parameters
360 # self: include _this_ package regardless of if it is merged.
361 # selective: exclude the package if it is merged
362 # recurse: go into the dependencies
363 # deep: go into the dependencies of already merged packages
364 # empty: pretend nothing is merged
365 # complete: completely account for all known dependencies
366 # remove: build graph for use in removing packages
367 myparams = set(["recurse"])
369 if myaction == "remove":
370 myparams.add("remove")
371 myparams.add("complete")
374 if "--update" in myopts or \
375 "--newuse" in myopts or \
376 "--reinstall" in myopts or \
377 "--noreplace" in myopts:
378 myparams.add("selective")
379 if "--emptytree" in myopts:
380 myparams.add("empty")
381 myparams.discard("selective")
382 if "--nodeps" in myopts:
383 myparams.discard("recurse")
384 if "--deep" in myopts:
386 if "--complete-graph" in myopts:
387 myparams.add("complete")
390 # search functionality
391 class search(object):
402 def __init__(self, root_config, spinner, searchdesc,
403 verbose, usepkg, usepkgonly):
404 """Searches the available and installed packages for the supplied search key.
405 The list of available and installed packages is created at object instantiation.
406 This makes successive searches faster."""
407 self.settings = root_config.settings
408 self.vartree = root_config.trees["vartree"]
409 self.spinner = spinner
410 self.verbose = verbose
411 self.searchdesc = searchdesc
412 self.root_config = root_config
413 self.setconfig = root_config.setconfig
414 self.matches = {"pkg" : []}
419 self.portdb = fake_portdb
420 for attrib in ("aux_get", "cp_all",
421 "xmatch", "findname", "getFetchMap"):
422 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
426 portdb = root_config.trees["porttree"].dbapi
427 bindb = root_config.trees["bintree"].dbapi
428 vardb = root_config.trees["vartree"].dbapi
430 if not usepkgonly and portdb._have_root_eclass_dir:
431 self._dbs.append(portdb)
433 if (usepkg or usepkgonly) and bindb.cp_all():
434 self._dbs.append(bindb)
436 self._dbs.append(vardb)
437 self._portdb = portdb
442 cp_all.update(db.cp_all())
443 return list(sorted(cp_all))
445 def _aux_get(self, *args, **kwargs):
448 return db.aux_get(*args, **kwargs)
453 def _findname(self, *args, **kwargs):
455 if db is not self._portdb:
456 # We don't want findname to return anything
457 # unless it's an ebuild in a portage tree.
458 # Otherwise, it's already built and we don't
461 func = getattr(db, "findname", None)
463 value = func(*args, **kwargs)
468 def _getFetchMap(self, *args, **kwargs):
470 func = getattr(db, "getFetchMap", None)
472 value = func(*args, **kwargs)
477 def _visible(self, db, cpv, metadata):
478 installed = db is self.vartree.dbapi
479 built = installed or db is not self._portdb
482 pkg_type = "installed"
485 return visible(self.settings,
486 Package(type_name=pkg_type, root_config=self.root_config,
487 cpv=cpv, built=built, installed=installed, metadata=metadata))
489 def _xmatch(self, level, atom):
491 This method does not expand old-style virtuals because it
492 is restricted to returning matches for a single ${CATEGORY}/${PN}
493 and old-style virual matches unreliable for that when querying
494 multiple package databases. If necessary, old-style virtuals
495 can be performed on atoms prior to calling this method.
497 cp = portage.dep_getkey(atom)
498 if level == "match-all":
501 if hasattr(db, "xmatch"):
502 matches.update(db.xmatch(level, atom))
504 matches.update(db.match(atom))
505 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506 db._cpv_sort_ascending(result)
507 elif level == "match-visible":
510 if hasattr(db, "xmatch"):
511 matches.update(db.xmatch(level, atom))
513 db_keys = list(db._aux_cache_keys)
514 for cpv in db.match(atom):
515 metadata = izip(db_keys,
516 db.aux_get(cpv, db_keys))
517 if not self._visible(db, cpv, metadata):
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "bestmatch-visible":
525 if hasattr(db, "xmatch"):
526 cpv = db.xmatch("bestmatch-visible", atom)
527 if not cpv or portage.cpv_getkey(cpv) != cp:
529 if not result or cpv == portage.best([cpv, result]):
532 db_keys = Package.metadata_keys
533 # break out of this loop with highest visible
534 # match, checked in descending order
535 for cpv in reversed(db.match(atom)):
536 if portage.cpv_getkey(cpv) != cp:
538 metadata = izip(db_keys,
539 db.aux_get(cpv, db_keys))
540 if not self._visible(db, cpv, metadata):
542 if not result or cpv == portage.best([cpv, result]):
546 raise NotImplementedError(level)
549 def execute(self,searchkey):
550 """Performs the search for the supplied search key"""
552 self.searchkey=searchkey
553 self.packagematches = []
556 self.matches = {"pkg":[], "desc":[], "set":[]}
559 self.matches = {"pkg":[], "set":[]}
560 print "Searching... ",
563 if self.searchkey.startswith('%'):
565 self.searchkey = self.searchkey[1:]
566 if self.searchkey.startswith('@'):
568 self.searchkey = self.searchkey[1:]
570 self.searchre=re.compile(self.searchkey,re.I)
572 self.searchre=re.compile(re.escape(self.searchkey), re.I)
573 for package in self.portdb.cp_all():
574 self.spinner.update()
577 match_string = package[:]
579 match_string = package.split("/")[-1]
582 if self.searchre.search(match_string):
583 if not self.portdb.xmatch("match-visible", package):
585 self.matches["pkg"].append([package,masked])
586 elif self.searchdesc: # DESCRIPTION searching
587 full_package = self.portdb.xmatch("bestmatch-visible", package)
589 #no match found; we don't want to query description
590 full_package = portage.best(
591 self.portdb.xmatch("match-all", package))
597 full_desc = self.portdb.aux_get(
598 full_package, ["DESCRIPTION"])[0]
600 print "emerge: search: aux_get() failed, skipping"
602 if self.searchre.search(full_desc):
603 self.matches["desc"].append([full_package,masked])
605 self.sdict = self.setconfig.getSets()
606 for setname in self.sdict:
607 self.spinner.update()
609 match_string = setname
611 match_string = setname.split("/")[-1]
613 if self.searchre.search(match_string):
614 self.matches["set"].append([setname, False])
615 elif self.searchdesc:
616 if self.searchre.search(
617 self.sdict[setname].getMetadata("DESCRIPTION")):
618 self.matches["set"].append([setname, False])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
656 print green("*")+" "+white(match)
657 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
661 desc, homepage, license = self.portdb.aux_get(
662 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664 print "emerge: search: aux_get() failed, skipping"
667 print green("*")+" "+white(match)+" "+red("[ Masked ]")
669 print green("*")+" "+white(match)
670 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
674 mycat = match.split("/")[0]
675 mypkg = match.split("/")[1]
676 mycpv = match + "-" + myversion
677 myebuild = self.portdb.findname(mycpv)
679 pkgdir = os.path.dirname(myebuild)
680 from portage import manifest
681 mf = manifest.Manifest(
682 pkgdir, self.settings["DISTDIR"])
684 uri_map = self.portdb.getFetchMap(mycpv)
685 except portage.exception.InvalidDependString, e:
686 file_size_str = "Unknown (%s)" % (e,)
690 mysum[0] = mf.getDistfilesSize(uri_map)
692 file_size_str = "Unknown (missing " + \
693 "digest for %s)" % (e,)
698 if db is not vardb and \
699 db.cpv_exists(mycpv):
701 if not myebuild and hasattr(db, "bintree"):
702 myebuild = db.bintree.getname(mycpv)
704 mysum[0] = os.stat(myebuild).st_size
709 if myebuild and file_size_str is None:
710 mystr = str(mysum[0] / 1024)
714 mystr = mystr[:mycount] + "," + mystr[mycount:]
715 file_size_str = mystr + " kB"
719 print " ", darkgreen("Latest version available:"),myversion
720 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
723 (darkgreen("Size of files:"), file_size_str)
724 print " ", darkgreen("Homepage:")+" ",homepage
725 print " ", darkgreen("Description:")+" ",desc
726 print " ", darkgreen("License:")+" ",license
731 def getInstallationStatus(self,package):
732 installed_package = self.vartree.dep_bestmatch(package)
734 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736 result = darkgreen("Latest version installed:")+" "+version
738 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741 def getVersion(self,full_package,detail):
742 if len(full_package) > 1:
743 package_parts = portage.catpkgsplit(full_package)
744 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745 result = package_parts[2]+ "-" + package_parts[3]
747 result = package_parts[2]
752 class RootConfig(object):
753 """This is used internally by depgraph to track information about a
757 "ebuild" : "porttree",
758 "binary" : "bintree",
759 "installed" : "vartree"
763 for k, v in pkg_tree_map.iteritems():
766 def __init__(self, settings, trees, setconfig):
768 self.settings = settings
769 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770 self.root = self.settings["ROOT"]
771 self.setconfig = setconfig
772 self.sets = self.setconfig.getSets()
773 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775 def create_world_atom(pkg, args_set, root_config):
776 """Create a new atom for the world file if one does not exist. If the
777 argument atom is precise enough to identify a specific slot then a slot
778 atom will be returned. Atoms that are in the system set may also be stored
779 in world since system atoms can only match one slot while world atoms can
780 be greedy with respect to slots. Unslotted system packages will not be
783 arg_atom = args_set.findAtomForPackage(pkg)
786 cp = portage.dep_getkey(arg_atom)
788 sets = root_config.sets
789 portdb = root_config.trees["porttree"].dbapi
790 vardb = root_config.trees["vartree"].dbapi
791 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792 for cpv in portdb.match(cp))
793 slotted = len(available_slots) > 1 or \
794 (len(available_slots) == 1 and "0" not in available_slots)
796 # check the vdb in case this is multislot
797 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798 for cpv in vardb.match(cp))
799 slotted = len(available_slots) > 1 or \
800 (len(available_slots) == 1 and "0" not in available_slots)
801 if slotted and arg_atom != cp:
802 # If the user gave a specific atom, store it as a
803 # slot atom in the world file.
804 slot_atom = pkg.slot_atom
806 # For USE=multislot, there are a couple of cases to
809 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810 # unknown value, so just record an unslotted atom.
812 # 2) SLOT comes from an installed package and there is no
813 # matching SLOT in the portage tree.
815 # Make sure that the slot atom is available in either the
816 # portdb or the vardb, since otherwise the user certainly
817 # doesn't want the SLOT atom recorded in the world file
818 # (case 1 above). If it's only available in the vardb,
819 # the user may be trying to prevent a USE=multislot
820 # package from being removed by --depclean (case 2 above).
823 if not portdb.match(slot_atom):
824 # SLOT seems to come from an installed multislot package
826 # If there is no installed package matching the SLOT atom,
827 # it probably changed SLOT spontaneously due to USE=multislot,
828 # so just record an unslotted atom.
829 if vardb.match(slot_atom):
830 # Now verify that the argument is precise
831 # enough to identify a specific slot.
832 matches = mydb.match(arg_atom)
833 matched_slots = set()
835 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836 if len(matched_slots) == 1:
837 new_world_atom = slot_atom
839 if new_world_atom == sets["world"].findAtomForPackage(pkg):
840 # Both atoms would be identical, so there's nothing to add.
843 # Unlike world atoms, system atoms are not greedy for slots, so they
844 # can't be safely excluded from world if they are slotted.
845 system_atom = sets["system"].findAtomForPackage(pkg)
847 if not portage.dep_getkey(system_atom).startswith("virtual/"):
849 # System virtuals aren't safe to exclude from world since they can
850 # match multiple old-style virtuals but only one of them will be
851 # pulled in by update or depclean.
852 providers = portdb.mysettings.getvirtuals().get(
853 portage.dep_getkey(system_atom))
854 if providers and len(providers) == 1 and providers[0] == cp:
856 return new_world_atom
858 def filter_iuse_defaults(iuse):
860 if flag.startswith("+") or flag.startswith("-"):
865 class SlotObject(object):
866 __slots__ = ("__weakref__",)
868 def __init__(self, **kwargs):
869 classes = [self.__class__]
874 classes.extend(c.__bases__)
875 slots = getattr(c, "__slots__", None)
879 myvalue = kwargs.get(myattr, None)
880 setattr(self, myattr, myvalue)
884 Create a new instance and copy all attributes
885 defined from __slots__ (including those from
888 obj = self.__class__()
890 classes = [self.__class__]
895 classes.extend(c.__bases__)
896 slots = getattr(c, "__slots__", None)
900 setattr(obj, myattr, getattr(self, myattr))
904 class AbstractDepPriority(SlotObject):
905 __slots__ = ("buildtime", "runtime", "runtime_post")
907 def __lt__(self, other):
908 return self.__int__() < other
910 def __le__(self, other):
911 return self.__int__() <= other
913 def __eq__(self, other):
914 return self.__int__() == other
916 def __ne__(self, other):
917 return self.__int__() != other
919 def __gt__(self, other):
920 return self.__int__() > other
922 def __ge__(self, other):
923 return self.__int__() >= other
927 return copy.copy(self)
929 class DepPriority(AbstractDepPriority):
931 __slots__ = ("satisfied", "optional", "rebuild")
943 if self.runtime_post:
944 return "runtime_post"
947 class BlockerDepPriority(DepPriority):
955 BlockerDepPriority.instance = BlockerDepPriority()
957 class UnmergeDepPriority(AbstractDepPriority):
958 __slots__ = ("optional", "satisfied",)
960 Combination of properties Priority Category
965 (none of the above) -2 SOFT
975 if self.runtime_post:
982 myvalue = self.__int__()
983 if myvalue > self.SOFT:
987 class DepPriorityNormalRange(object):
989 DepPriority properties Index Category
993 runtime_post 2 MEDIUM_SOFT
995 (none of the above) 0 NONE
1003 def _ignore_optional(cls, priority):
1004 if priority.__class__ is not DepPriority:
1006 return bool(priority.optional)
1009 def _ignore_runtime_post(cls, priority):
1010 if priority.__class__ is not DepPriority:
1012 return bool(priority.optional or priority.runtime_post)
1015 def _ignore_runtime(cls, priority):
1016 if priority.__class__ is not DepPriority:
1018 return not priority.buildtime
1020 ignore_medium = _ignore_runtime
1021 ignore_medium_soft = _ignore_runtime_post
1022 ignore_soft = _ignore_optional
1024 DepPriorityNormalRange.ignore_priority = (
1026 DepPriorityNormalRange._ignore_optional,
1027 DepPriorityNormalRange._ignore_runtime_post,
1028 DepPriorityNormalRange._ignore_runtime
1031 class DepPrioritySatisfiedRange(object):
1033 DepPriority Index Category
1035 not satisfied and buildtime HARD
1036 not satisfied and runtime 7 MEDIUM
1037 not satisfied and runtime_post 6 MEDIUM_SOFT
1038 satisfied and buildtime and rebuild 5 SOFT
1039 satisfied and buildtime 4 SOFT
1040 satisfied and runtime 3 SOFT
1041 satisfied and runtime_post 2 SOFT
1043 (none of the above) 0 NONE
1051 def _ignore_optional(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional)
1057 def _ignore_satisfied_runtime_post(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 if priority.optional:
1062 if not priority.satisfied:
1064 return bool(priority.runtime_post)
1067 def _ignore_satisfied_runtime(cls, priority):
1068 if priority.__class__ is not DepPriority:
1070 if priority.optional:
1072 if not priority.satisfied:
1074 return not priority.buildtime
1077 def _ignore_satisfied_buildtime(cls, priority):
1078 if priority.__class__ is not DepPriority:
1080 if priority.optional:
1082 if not priority.satisfied:
1084 if priority.buildtime:
1085 return not priority.rebuild
1089 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090 if priority.__class__ is not DepPriority:
1092 if priority.optional:
1094 return bool(priority.satisfied)
1097 def _ignore_runtime_post(cls, priority):
1098 if priority.__class__ is not DepPriority:
1100 return bool(priority.optional or \
1101 priority.satisfied or \
1102 priority.runtime_post)
1105 def _ignore_runtime(cls, priority):
1106 if priority.__class__ is not DepPriority:
1108 return bool(priority.satisfied or \
1109 not priority.buildtime)
1111 ignore_medium = _ignore_runtime
1112 ignore_medium_soft = _ignore_runtime_post
1113 ignore_soft = _ignore_satisfied_buildtime_rebuild
1115 DepPrioritySatisfiedRange.ignore_priority = (
1117 DepPrioritySatisfiedRange._ignore_optional,
1118 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122 DepPrioritySatisfiedRange._ignore_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_runtime
1126 def _find_deep_system_runtime_deps(graph):
1127 deep_system_deps = set()
1130 if not isinstance(node, Package) or \
1131 node.operation == 'uninstall':
1133 if node.root_config.sets['system'].findAtomForPackage(node):
1134 node_stack.append(node)
1136 def ignore_priority(priority):
1138 Ignore non-runtime priorities.
1140 if isinstance(priority, DepPriority) and \
1141 (priority.runtime or priority.runtime_post):
1146 node = node_stack.pop()
1147 if node in deep_system_deps:
1149 deep_system_deps.add(node)
1150 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151 if not isinstance(child, Package) or \
1152 child.operation == 'uninstall':
1154 node_stack.append(child)
1156 return deep_system_deps
1158 class FakeVartree(portage.vartree):
1159 """This is implements an in-memory copy of a vartree instance that provides
1160 all the interfaces required for use by the depgraph. The vardb is locked
1161 during the constructor call just long enough to read a copy of the
1162 installed package information. This allows the depgraph to do it's
1163 dependency calculations without holding a lock on the vardb. It also
1164 allows things like vardb global updates to be done in memory so that the
1165 user doesn't necessarily need write access to the vardb in cases where
1166 global updates are necessary (updates are performed when necessary if there
1167 is not a matching ebuild in the tree)."""
1168 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169 self._root_config = root_config
1170 if pkg_cache is None:
1172 real_vartree = root_config.trees["vartree"]
1173 portdb = root_config.trees["porttree"].dbapi
1174 self.root = real_vartree.root
1175 self.settings = real_vartree.settings
1176 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177 if "_mtime_" not in mykeys:
1178 mykeys.append("_mtime_")
1179 self._db_keys = mykeys
1180 self._pkg_cache = pkg_cache
1181 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184 # At least the parent needs to exist for the lock file.
1185 portage.util.ensure_dirs(vdb_path)
1186 except portage.exception.PortageException:
1190 if acquire_lock and os.access(vdb_path, os.W_OK):
1191 vdb_lock = portage.locks.lockdir(vdb_path)
1192 real_dbapi = real_vartree.dbapi
1194 for cpv in real_dbapi.cpv_all():
1195 cache_key = ("installed", self.root, cpv, "nomerge")
1196 pkg = self._pkg_cache.get(cache_key)
1198 metadata = pkg.metadata
1200 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201 myslot = metadata["SLOT"]
1202 mycp = portage.dep_getkey(cpv)
1203 myslot_atom = "%s:%s" % (mycp, myslot)
1205 mycounter = long(metadata["COUNTER"])
1208 metadata["COUNTER"] = str(mycounter)
1209 other_counter = slot_counters.get(myslot_atom, None)
1210 if other_counter is not None:
1211 if other_counter > mycounter:
1213 slot_counters[myslot_atom] = mycounter
1215 pkg = Package(built=True, cpv=cpv,
1216 installed=True, metadata=metadata,
1217 root_config=root_config, type_name="installed")
1218 self._pkg_cache[pkg] = pkg
1219 self.dbapi.cpv_inject(pkg)
1220 real_dbapi.flush_cache()
1223 portage.locks.unlockdir(vdb_lock)
1224 # Populate the old-style virtuals using the cached values.
1225 if not self.settings.treeVirtuals:
1226 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227 portage.getCPFromCPV, self.get_all_provides())
1229 # Intialize variables needed for lazy cache pulls of the live ebuild
1230 # metadata. This ensures that the vardb lock is released ASAP, without
1231 # being delayed in case cache generation is triggered.
1232 self._aux_get = self.dbapi.aux_get
1233 self.dbapi.aux_get = self._aux_get_wrapper
1234 self._match = self.dbapi.match
1235 self.dbapi.match = self._match_wrapper
1236 self._aux_get_history = set()
1237 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238 self._portdb = portdb
1239 self._global_updates = None
1241 def _match_wrapper(self, cpv, use_cache=1):
1243 Make sure the metadata in Package instances gets updated for any
1244 cpv that is returned from a match() call, since the metadata can
1245 be accessed directly from the Package instance instead of via
1248 matches = self._match(cpv, use_cache=use_cache)
1250 if cpv in self._aux_get_history:
1252 self._aux_get_wrapper(cpv, [])
1255 def _aux_get_wrapper(self, pkg, wants):
1256 if pkg in self._aux_get_history:
1257 return self._aux_get(pkg, wants)
1258 self._aux_get_history.add(pkg)
1260 # Use the live ebuild metadata if possible.
1261 live_metadata = dict(izip(self._portdb_keys,
1262 self._portdb.aux_get(pkg, self._portdb_keys)))
1263 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265 self.dbapi.aux_update(pkg, live_metadata)
1266 except (KeyError, portage.exception.PortageException):
1267 if self._global_updates is None:
1268 self._global_updates = \
1269 grab_global_updates(self._portdb.porttree_root)
1270 perform_global_updates(
1271 pkg, self.dbapi, self._global_updates)
1272 return self._aux_get(pkg, wants)
1274 def sync(self, acquire_lock=1):
1276 Call this method to synchronize state with the real vardb
1277 after one or more packages may have been installed or
1280 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282 # At least the parent needs to exist for the lock file.
1283 portage.util.ensure_dirs(vdb_path)
1284 except portage.exception.PortageException:
1288 if acquire_lock and os.access(vdb_path, os.W_OK):
1289 vdb_lock = portage.locks.lockdir(vdb_path)
1293 portage.locks.unlockdir(vdb_lock)
1297 real_vardb = self._root_config.trees["vartree"].dbapi
1298 current_cpv_set = frozenset(real_vardb.cpv_all())
1299 pkg_vardb = self.dbapi
1300 aux_get_history = self._aux_get_history
1302 # Remove any packages that have been uninstalled.
1303 for pkg in list(pkg_vardb):
1304 if pkg.cpv not in current_cpv_set:
1305 pkg_vardb.cpv_remove(pkg)
1306 aux_get_history.discard(pkg.cpv)
1308 # Validate counters and timestamps.
1311 validation_keys = ["COUNTER", "_mtime_"]
1312 for cpv in current_cpv_set:
1314 pkg_hash_key = ("installed", root, cpv, "nomerge")
1315 pkg = pkg_vardb.get(pkg_hash_key)
1317 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319 counter = long(counter)
1323 if counter != pkg.counter or \
1325 pkg_vardb.cpv_remove(pkg)
1326 aux_get_history.discard(pkg.cpv)
1330 pkg = self._pkg(cpv)
1332 other_counter = slot_counters.get(pkg.slot_atom)
1333 if other_counter is not None:
1334 if other_counter > pkg.counter:
1337 slot_counters[pkg.slot_atom] = pkg.counter
1338 pkg_vardb.cpv_inject(pkg)
1340 real_vardb.flush_cache()
1342 def _pkg(self, cpv):
1343 root_config = self._root_config
1344 real_vardb = root_config.trees["vartree"].dbapi
1345 pkg = Package(cpv=cpv, installed=True,
1346 metadata=izip(self._db_keys,
1347 real_vardb.aux_get(cpv, self._db_keys)),
1348 root_config=root_config,
1349 type_name="installed")
1352 mycounter = long(pkg.metadata["COUNTER"])
1355 pkg.metadata["COUNTER"] = str(mycounter)
1359 def grab_global_updates(portdir):
1360 from portage.update import grab_updates, parse_updates
1361 updpath = os.path.join(portdir, "profiles", "updates")
1363 rawupdates = grab_updates(updpath)
1364 except portage.exception.DirectoryNotFound:
1367 for mykey, mystat, mycontent in rawupdates:
1368 commands, errors = parse_updates(mycontent)
1369 upd_commands.extend(commands)
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373 from portage.update import update_dbentries
1374 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376 updates = update_dbentries(mycommands, aux_dict)
1378 mydb.aux_update(mycpv, updates)
1380 def visible(pkgsettings, pkg):
1382 Check if a package is visible. This can raise an InvalidDependString
1383 exception if LICENSE is invalid.
1384 TODO: optionally generate a list of masking reasons
1386 @returns: True if the package is visible, False otherwise.
1388 if not pkg.metadata["SLOT"]:
1390 if not pkg.installed:
1391 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1393 eapi = pkg.metadata["EAPI"]
1394 if not portage.eapi_is_supported(eapi):
1396 if not pkg.installed:
1397 if portage._eapi_is_deprecated(eapi):
1399 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408 except portage.exception.InvalidDependString:
1412 def get_masking_status(pkg, pkgsettings, root_config):
1414 mreasons = portage.getmaskingstatus(
1415 pkg, settings=pkgsettings,
1416 portdb=root_config.trees["porttree"].dbapi)
1418 if not pkg.installed:
1419 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420 mreasons.append("CHOST: %s" % \
1421 pkg.metadata["CHOST"])
1423 if not pkg.metadata["SLOT"]:
1424 mreasons.append("invalid: SLOT is undefined")
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429 db, pkg_type, built, installed, db_keys):
1432 metadata = dict(izip(db_keys,
1433 db.aux_get(cpv, db_keys)))
1436 if metadata and not built:
1437 pkgsettings.setcpv(cpv, mydb=metadata)
1438 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440 if metadata is None:
1441 mreasons = ["corruption"]
1443 pkg = Package(type_name=pkg_type, root_config=root_config,
1444 cpv=cpv, built=built, installed=installed, metadata=metadata)
1445 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446 return metadata, mreasons
1448 def show_masked_packages(masked_packages):
1449 shown_licenses = set()
1450 shown_comments = set()
1451 # Maybe there is both an ebuild and a binary. Only
1452 # show one of them to avoid redundant appearance.
1454 have_eapi_mask = False
1455 for (root_config, pkgsettings, cpv,
1456 metadata, mreasons) in masked_packages:
1457 if cpv in shown_cpvs:
1460 comment, filename = None, None
1461 if "package.mask" in mreasons:
1462 comment, filename = \
1463 portage.getmaskingreason(
1464 cpv, metadata=metadata,
1465 settings=pkgsettings,
1466 portdb=root_config.trees["porttree"].dbapi,
1467 return_location=True)
1468 missing_licenses = []
1470 if not portage.eapi_is_supported(metadata["EAPI"]):
1471 have_eapi_mask = True
1473 missing_licenses = \
1474 pkgsettings._getMissingLicenses(
1476 except portage.exception.InvalidDependString:
1477 # This will have already been reported
1478 # above via mreasons.
1481 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482 if comment and comment not in shown_comments:
1485 shown_comments.add(comment)
1486 portdb = root_config.trees["porttree"].dbapi
1487 for l in missing_licenses:
1488 l_path = portdb.findLicensePath(l)
1489 if l in shown_licenses:
1491 msg = ("A copy of the '%s' license" + \
1492 " is located at '%s'.") % (l, l_path)
1495 shown_licenses.add(l)
1496 return have_eapi_mask
1498 class Task(SlotObject):
1499 __slots__ = ("_hash_key", "_hash_value")
1501 def _get_hash_key(self):
1502 hash_key = getattr(self, "_hash_key", None)
1503 if hash_key is None:
1504 raise NotImplementedError(self)
1507 def __eq__(self, other):
1508 return self._get_hash_key() == other
1510 def __ne__(self, other):
1511 return self._get_hash_key() != other
1514 hash_value = getattr(self, "_hash_value", None)
1515 if hash_value is None:
1516 self._hash_value = hash(self._get_hash_key())
1517 return self._hash_value
1520 return len(self._get_hash_key())
1522 def __getitem__(self, key):
1523 return self._get_hash_key()[key]
1526 return iter(self._get_hash_key())
1528 def __contains__(self, key):
1529 return key in self._get_hash_key()
1532 return str(self._get_hash_key())
1534 class Blocker(Task):
1536 __hash__ = Task.__hash__
1537 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1539 def __init__(self, **kwargs):
1540 Task.__init__(self, **kwargs)
1541 self.cp = portage.dep_getkey(self.atom)
1543 def _get_hash_key(self):
1544 hash_key = getattr(self, "_hash_key", None)
1545 if hash_key is None:
1547 ("blocks", self.root, self.atom, self.eapi)
1548 return self._hash_key
1550 class Package(Task):
1552 __hash__ = Task.__hash__
1553 __slots__ = ("built", "cpv", "depth",
1554 "installed", "metadata", "onlydeps", "operation",
1555 "root_config", "type_name",
1556 "category", "counter", "cp", "cpv_split",
1557 "inherited", "iuse", "mtime",
1558 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1561 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562 "INHERITED", "IUSE", "KEYWORDS",
1563 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1566 def __init__(self, **kwargs):
1567 Task.__init__(self, **kwargs)
1568 self.root = self.root_config.root
1569 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570 self.cp = portage.cpv_getkey(self.cpv)
1573 # Avoid an InvalidAtom exception when creating slot_atom.
1574 # This package instance will be masked due to empty SLOT.
1576 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1577 self.category, self.pf = portage.catsplit(self.cpv)
1578 self.cpv_split = portage.catpkgsplit(self.cpv)
1579 self.pv_split = self.cpv_split[1:]
1583 __slots__ = ("__weakref__", "enabled")
1585 def __init__(self, use):
1586 self.enabled = frozenset(use)
1588 class _iuse(object):
1590 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1592 def __init__(self, tokens, iuse_implicit):
1593 self.tokens = tuple(tokens)
1594 self.iuse_implicit = iuse_implicit
1601 enabled.append(x[1:])
1603 disabled.append(x[1:])
1606 self.enabled = frozenset(enabled)
1607 self.disabled = frozenset(disabled)
1608 self.all = frozenset(chain(enabled, disabled, other))
1610 def __getattribute__(self, name):
1613 return object.__getattribute__(self, "regex")
1614 except AttributeError:
1615 all = object.__getattribute__(self, "all")
1616 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1617 # Escape anything except ".*" which is supposed
1618 # to pass through from _get_implicit_iuse()
1619 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1620 regex = "^(%s)$" % "|".join(regex)
1621 regex = regex.replace("\\.\\*", ".*")
1622 self.regex = re.compile(regex)
1623 return object.__getattribute__(self, name)
1625 def _get_hash_key(self):
1626 hash_key = getattr(self, "_hash_key", None)
1627 if hash_key is None:
1628 if self.operation is None:
1629 self.operation = "merge"
1630 if self.onlydeps or self.installed:
1631 self.operation = "nomerge"
1633 (self.type_name, self.root, self.cpv, self.operation)
1634 return self._hash_key
1636 def __lt__(self, other):
1637 if other.cp != self.cp:
1639 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1643 def __le__(self, other):
1644 if other.cp != self.cp:
1646 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1650 def __gt__(self, other):
1651 if other.cp != self.cp:
1653 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1657 def __ge__(self, other):
1658 if other.cp != self.cp:
1660 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1664 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1665 if not x.startswith("UNUSED_"))
1666 _all_metadata_keys.discard("CDEPEND")
1667 _all_metadata_keys.update(Package.metadata_keys)
1669 from portage.cache.mappings import slot_dict_class
1670 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1672 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1674 Detect metadata updates and synchronize Package attributes.
1677 __slots__ = ("_pkg",)
1678 _wrapped_keys = frozenset(
1679 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1681 def __init__(self, pkg, metadata):
1682 _PackageMetadataWrapperBase.__init__(self)
1684 self.update(metadata)
1686 def __setitem__(self, k, v):
1687 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1688 if k in self._wrapped_keys:
1689 getattr(self, "_set_" + k.lower())(k, v)
1691 def _set_inherited(self, k, v):
1692 if isinstance(v, basestring):
1693 v = frozenset(v.split())
1694 self._pkg.inherited = v
1696 def _set_iuse(self, k, v):
1697 self._pkg.iuse = self._pkg._iuse(
1698 v.split(), self._pkg.root_config.iuse_implicit)
1700 def _set_slot(self, k, v):
1703 def _set_use(self, k, v):
1704 self._pkg.use = self._pkg._use(v.split())
1706 def _set_counter(self, k, v):
1707 if isinstance(v, basestring):
1712 self._pkg.counter = v
1714 def _set__mtime_(self, k, v):
1715 if isinstance(v, basestring):
1722 class EbuildFetchonly(SlotObject):
1724 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1727 settings = self.settings
1729 portdb = pkg.root_config.trees["porttree"].dbapi
1730 ebuild_path = portdb.findname(pkg.cpv)
1731 settings.setcpv(pkg)
1732 debug = settings.get("PORTAGE_DEBUG") == "1"
1733 use_cache = 1 # always true
1734 portage.doebuild_environment(ebuild_path, "fetch",
1735 settings["ROOT"], settings, debug, use_cache, portdb)
1736 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1739 rval = self._execute_with_builddir()
1741 rval = portage.doebuild(ebuild_path, "fetch",
1742 settings["ROOT"], settings, debug=debug,
1743 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1744 mydbapi=portdb, tree="porttree")
1746 if rval != os.EX_OK:
1747 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1748 eerror(msg, phase="unpack", key=pkg.cpv)
1752 def _execute_with_builddir(self):
1753 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1754 # ensuring sane $PWD (bug #239560) and storing elog
1755 # messages. Use a private temp directory, in order
1756 # to avoid locking the main one.
1757 settings = self.settings
1758 global_tmpdir = settings["PORTAGE_TMPDIR"]
1759 from tempfile import mkdtemp
1761 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1763 if e.errno != portage.exception.PermissionDenied.errno:
1765 raise portage.exception.PermissionDenied(global_tmpdir)
1766 settings["PORTAGE_TMPDIR"] = private_tmpdir
1767 settings.backup_changes("PORTAGE_TMPDIR")
1769 retval = self._execute()
1771 settings["PORTAGE_TMPDIR"] = global_tmpdir
1772 settings.backup_changes("PORTAGE_TMPDIR")
1773 shutil.rmtree(private_tmpdir)
1777 settings = self.settings
1779 root_config = pkg.root_config
1780 portdb = root_config.trees["porttree"].dbapi
1781 ebuild_path = portdb.findname(pkg.cpv)
1782 debug = settings.get("PORTAGE_DEBUG") == "1"
1783 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1785 retval = portage.doebuild(ebuild_path, "fetch",
1786 self.settings["ROOT"], self.settings, debug=debug,
1787 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1788 mydbapi=portdb, tree="porttree")
1790 if retval != os.EX_OK:
1791 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1792 eerror(msg, phase="unpack", key=pkg.cpv)
1794 portage.elog.elog_process(self.pkg.cpv, self.settings)
1797 class PollConstants(object):
1800 Provides POLL* constants that are equivalent to those from the
1801 select module, for use by PollSelectAdapter.
1804 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1807 locals()[k] = getattr(select, k, v)
1811 class AsynchronousTask(SlotObject):
1813 Subclasses override _wait() and _poll() so that calls
1814 to public methods can be wrapped for implementing
1815 hooks such as exit listener notification.
1817 Sublasses should call self.wait() to notify exit listeners after
1818 the task is complete and self.returncode has been set.
1821 __slots__ = ("background", "cancelled", "returncode") + \
1822 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1826 Start an asynchronous task and then return as soon as possible.
1832 raise NotImplementedError(self)
1835 return self.returncode is None
1842 return self.returncode
1845 if self.returncode is None:
1848 return self.returncode
1851 return self.returncode
1854 self.cancelled = True
1857 def addStartListener(self, f):
1859 The function will be called with one argument, a reference to self.
1861 if self._start_listeners is None:
1862 self._start_listeners = []
1863 self._start_listeners.append(f)
1865 def removeStartListener(self, f):
1866 if self._start_listeners is None:
1868 self._start_listeners.remove(f)
1870 def _start_hook(self):
1871 if self._start_listeners is not None:
1872 start_listeners = self._start_listeners
1873 self._start_listeners = None
1875 for f in start_listeners:
1878 def addExitListener(self, f):
1880 The function will be called with one argument, a reference to self.
1882 if self._exit_listeners is None:
1883 self._exit_listeners = []
1884 self._exit_listeners.append(f)
1886 def removeExitListener(self, f):
1887 if self._exit_listeners is None:
1888 if self._exit_listener_stack is not None:
1889 self._exit_listener_stack.remove(f)
1891 self._exit_listeners.remove(f)
1893 def _wait_hook(self):
1895 Call this method after the task completes, just before returning
1896 the returncode from wait() or poll(). This hook is
1897 used to trigger exit listeners when the returncode first
1900 if self.returncode is not None and \
1901 self._exit_listeners is not None:
1903 # This prevents recursion, in case one of the
1904 # exit handlers triggers this method again by
1905 # calling wait(). Use a stack that gives
1906 # removeExitListener() an opportunity to consume
1907 # listeners from the stack, before they can get
1908 # called below. This is necessary because a call
1909 # to one exit listener may result in a call to
1910 # removeExitListener() for another listener on
1911 # the stack. That listener needs to be removed
1912 # from the stack since it would be inconsistent
1913 # to call it after it has been been passed into
1914 # removeExitListener().
1915 self._exit_listener_stack = self._exit_listeners
1916 self._exit_listeners = None
1918 self._exit_listener_stack.reverse()
1919 while self._exit_listener_stack:
1920 self._exit_listener_stack.pop()(self)
1922 class AbstractPollTask(AsynchronousTask):
1924 __slots__ = ("scheduler",) + \
1928 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1929 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1932 def _unregister(self):
1933 raise NotImplementedError(self)
1935 def _unregister_if_appropriate(self, event):
1936 if self._registered:
1937 if event & self._exceptional_events:
1940 elif event & PollConstants.POLLHUP:
1944 class PipeReader(AbstractPollTask):
1947 Reads output from one or more files and saves it in memory,
1948 for retrieval via the getvalue() method. This is driven by
1949 the scheduler's poll() loop, so it runs entirely within the
1953 __slots__ = ("input_files",) + \
1954 ("_read_data", "_reg_ids")
1957 self._reg_ids = set()
1958 self._read_data = []
1959 for k, f in self.input_files.iteritems():
1960 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1961 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1962 self._reg_ids.add(self.scheduler.register(f.fileno(),
1963 self._registered_events, self._output_handler))
1964 self._registered = True
1967 return self._registered
1970 if self.returncode is None:
1972 self.cancelled = True
1976 if self.returncode is not None:
1977 return self.returncode
1979 if self._registered:
1980 self.scheduler.schedule(self._reg_ids)
1983 self.returncode = os.EX_OK
1984 return self.returncode
1987 """Retrieve the entire contents"""
1988 if sys.hexversion >= 0x3000000:
1989 return bytes().join(self._read_data)
1990 return "".join(self._read_data)
1993 """Free the memory buffer."""
1994 self._read_data = None
1996 def _output_handler(self, fd, event):
1998 if event & PollConstants.POLLIN:
2000 for f in self.input_files.itervalues():
2001 if fd == f.fileno():
2004 buf = array.array('B')
2006 buf.fromfile(f, self._bufsize)
2011 self._read_data.append(buf.tostring())
2016 self._unregister_if_appropriate(event)
2017 return self._registered
2019 def _unregister(self):
2021 Unregister from the scheduler and close open files.
2024 self._registered = False
2026 if self._reg_ids is not None:
2027 for reg_id in self._reg_ids:
2028 self.scheduler.unregister(reg_id)
2029 self._reg_ids = None
2031 if self.input_files is not None:
2032 for f in self.input_files.itervalues():
2034 self.input_files = None
2036 class CompositeTask(AsynchronousTask):
2038 __slots__ = ("scheduler",) + ("_current_task",)
2041 return self._current_task is not None
2044 self.cancelled = True
2045 if self._current_task is not None:
2046 self._current_task.cancel()
2050 This does a loop calling self._current_task.poll()
2051 repeatedly as long as the value of self._current_task
2052 keeps changing. It calls poll() a maximum of one time
2053 for a given self._current_task instance. This is useful
2054 since calling poll() on a task can trigger advance to
2055 the next task could eventually lead to the returncode
2056 being set in cases when polling only a single task would
2057 not have the same effect.
2062 task = self._current_task
2063 if task is None or task is prev:
2064 # don't poll the same task more than once
2069 return self.returncode
2075 task = self._current_task
2077 # don't wait for the same task more than once
2080 # Before the task.wait() method returned, an exit
2081 # listener should have set self._current_task to either
2082 # a different task or None. Something is wrong.
2083 raise AssertionError("self._current_task has not " + \
2084 "changed since calling wait", self, task)
2088 return self.returncode
2090 def _assert_current(self, task):
2092 Raises an AssertionError if the given task is not the
2093 same one as self._current_task. This can be useful
2096 if task is not self._current_task:
2097 raise AssertionError("Unrecognized task: %s" % (task,))
2099 def _default_exit(self, task):
2101 Calls _assert_current() on the given task and then sets the
2102 composite returncode attribute if task.returncode != os.EX_OK.
2103 If the task failed then self._current_task will be set to None.
2104 Subclasses can use this as a generic task exit callback.
2107 @returns: The task.returncode attribute.
2109 self._assert_current(task)
2110 if task.returncode != os.EX_OK:
2111 self.returncode = task.returncode
2112 self._current_task = None
2113 return task.returncode
2115 def _final_exit(self, task):
2117 Assumes that task is the final task of this composite task.
2118 Calls _default_exit() and sets self.returncode to the task's
2119 returncode and sets self._current_task to None.
2121 self._default_exit(task)
2122 self._current_task = None
2123 self.returncode = task.returncode
2124 return self.returncode
2126 def _default_final_exit(self, task):
2128 This calls _final_exit() and then wait().
2130 Subclasses can use this as a generic final task exit callback.
2133 self._final_exit(task)
2136 def _start_task(self, task, exit_handler):
2138 Register exit handler for the given task, set it
2139 as self._current_task, and call task.start().
2141 Subclasses can use this as a generic way to start
2145 task.addExitListener(exit_handler)
2146 self._current_task = task
2149 class TaskSequence(CompositeTask):
2151 A collection of tasks that executes sequentially. Each task
2152 must have a addExitListener() method that can be used as
2153 a means to trigger movement from one task to the next.
2156 __slots__ = ("_task_queue",)
2158 def __init__(self, **kwargs):
2159 AsynchronousTask.__init__(self, **kwargs)
2160 self._task_queue = deque()
2162 def add(self, task):
2163 self._task_queue.append(task)
2166 self._start_next_task()
2169 self._task_queue.clear()
2170 CompositeTask.cancel(self)
2172 def _start_next_task(self):
2173 self._start_task(self._task_queue.popleft(),
2174 self._task_exit_handler)
2176 def _task_exit_handler(self, task):
2177 if self._default_exit(task) != os.EX_OK:
2179 elif self._task_queue:
2180 self._start_next_task()
2182 self._final_exit(task)
2185 class SubProcess(AbstractPollTask):
2187 __slots__ = ("pid",) + \
2188 ("_files", "_reg_id")
2190 # A file descriptor is required for the scheduler to monitor changes from
2191 # inside a poll() loop. When logging is not enabled, create a pipe just to
2192 # serve this purpose alone.
2196 if self.returncode is not None:
2197 return self.returncode
2198 if self.pid is None:
2199 return self.returncode
2200 if self._registered:
2201 return self.returncode
2204 retval = os.waitpid(self.pid, os.WNOHANG)
2206 if e.errno != errno.ECHILD:
2209 retval = (self.pid, 1)
2211 if retval == (0, 0):
2213 self._set_returncode(retval)
2214 return self.returncode
2219 os.kill(self.pid, signal.SIGTERM)
2221 if e.errno != errno.ESRCH:
2225 self.cancelled = True
2226 if self.pid is not None:
2228 return self.returncode
2231 return self.pid is not None and \
2232 self.returncode is None
2236 if self.returncode is not None:
2237 return self.returncode
2239 if self._registered:
2240 self.scheduler.schedule(self._reg_id)
2242 if self.returncode is not None:
2243 return self.returncode
2246 wait_retval = os.waitpid(self.pid, 0)
2248 if e.errno != errno.ECHILD:
2251 self._set_returncode((self.pid, 1))
2253 self._set_returncode(wait_retval)
2255 return self.returncode
2257 def _unregister(self):
2259 Unregister from the scheduler and close open files.
2262 self._registered = False
2264 if self._reg_id is not None:
2265 self.scheduler.unregister(self._reg_id)
2268 if self._files is not None:
2269 for f in self._files.itervalues():
2273 def _set_returncode(self, wait_retval):
2275 retval = wait_retval[1]
2277 if retval != os.EX_OK:
2279 retval = (retval & 0xff) << 8
2281 retval = retval >> 8
2283 self.returncode = retval
2285 class SpawnProcess(SubProcess):
2288 Constructor keyword args are passed into portage.process.spawn().
2289 The required "args" keyword argument will be passed as the first
2293 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2294 "uid", "gid", "groups", "umask", "logfile",
2295 "path_lookup", "pre_exec")
2297 __slots__ = ("args",) + \
2300 _file_names = ("log", "process", "stdout")
2301 _files_dict = slot_dict_class(_file_names, prefix="")
2308 if self.fd_pipes is None:
2310 fd_pipes = self.fd_pipes
2311 fd_pipes.setdefault(0, sys.stdin.fileno())
2312 fd_pipes.setdefault(1, sys.stdout.fileno())
2313 fd_pipes.setdefault(2, sys.stderr.fileno())
2315 # flush any pending output
2316 for fd in fd_pipes.itervalues():
2317 if fd == sys.stdout.fileno():
2319 if fd == sys.stderr.fileno():
2322 logfile = self.logfile
2323 self._files = self._files_dict()
2326 master_fd, slave_fd = self._pipe(fd_pipes)
2327 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2328 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2331 fd_pipes_orig = fd_pipes.copy()
2333 # TODO: Use job control functions like tcsetpgrp() to control
2334 # access to stdin. Until then, use /dev/null so that any
2335 # attempts to read from stdin will immediately return EOF
2336 # instead of blocking indefinitely.
2337 null_input = open('/dev/null', 'rb')
2338 fd_pipes[0] = null_input.fileno()
2340 fd_pipes[0] = fd_pipes_orig[0]
2342 files.process = os.fdopen(master_fd, 'rb')
2343 if logfile is not None:
2345 fd_pipes[1] = slave_fd
2346 fd_pipes[2] = slave_fd
2348 files.log = open(logfile, mode='ab')
2349 portage.util.apply_secpass_permissions(logfile,
2350 uid=portage.portage_uid, gid=portage.portage_gid,
2353 if not self.background:
2354 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2356 output_handler = self._output_handler
2360 # Create a dummy pipe so the scheduler can monitor
2361 # the process from inside a poll() loop.
2362 fd_pipes[self._dummy_pipe_fd] = slave_fd
2364 fd_pipes[1] = slave_fd
2365 fd_pipes[2] = slave_fd
2366 output_handler = self._dummy_handler
2369 for k in self._spawn_kwarg_names:
2370 v = getattr(self, k)
2374 kwargs["fd_pipes"] = fd_pipes
2375 kwargs["returnpid"] = True
2376 kwargs.pop("logfile", None)
2378 self._reg_id = self.scheduler.register(files.process.fileno(),
2379 self._registered_events, output_handler)
2380 self._registered = True
2382 retval = self._spawn(self.args, **kwargs)
2385 if null_input is not None:
2388 if isinstance(retval, int):
2391 self.returncode = retval
2395 self.pid = retval[0]
2396 portage.process.spawned_pids.remove(self.pid)
2398 def _pipe(self, fd_pipes):
2400 @type fd_pipes: dict
2401 @param fd_pipes: pipes from which to copy terminal size if desired.
2405 def _spawn(self, args, **kwargs):
2406 return portage.process.spawn(args, **kwargs)
2408 def _output_handler(self, fd, event):
2410 if event & PollConstants.POLLIN:
2413 buf = array.array('B')
2415 buf.fromfile(files.process, self._bufsize)
2420 if not self.background:
2421 buf.tofile(files.stdout)
2422 files.stdout.flush()
2423 buf.tofile(files.log)
2429 self._unregister_if_appropriate(event)
2430 return self._registered
2432 def _dummy_handler(self, fd, event):
2434 This method is mainly interested in detecting EOF, since
2435 the only purpose of the pipe is to allow the scheduler to
2436 monitor the process from inside a poll() loop.
2439 if event & PollConstants.POLLIN:
2441 buf = array.array('B')
2443 buf.fromfile(self._files.process, self._bufsize)
2453 self._unregister_if_appropriate(event)
2454 return self._registered
2456 class MiscFunctionsProcess(SpawnProcess):
2458 Spawns misc-functions.sh with an existing ebuild environment.
2461 __slots__ = ("commands", "phase", "pkg", "settings")
2464 settings = self.settings
2465 settings.pop("EBUILD_PHASE", None)
2466 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2467 misc_sh_binary = os.path.join(portage_bin_path,
2468 os.path.basename(portage.const.MISC_SH_BINARY))
2470 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2471 self.logfile = settings.get("PORTAGE_LOG_FILE")
2473 portage._doebuild_exit_status_unlink(
2474 settings.get("EBUILD_EXIT_STATUS_FILE"))
2476 SpawnProcess._start(self)
2478 def _spawn(self, args, **kwargs):
2479 settings = self.settings
2480 debug = settings.get("PORTAGE_DEBUG") == "1"
2481 return portage.spawn(" ".join(args), settings,
2482 debug=debug, **kwargs)
2484 def _set_returncode(self, wait_retval):
2485 SpawnProcess._set_returncode(self, wait_retval)
2486 self.returncode = portage._doebuild_exit_status_check_and_log(
2487 self.settings, self.phase, self.returncode)
2489 class EbuildFetcher(SpawnProcess):
2491 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2496 root_config = self.pkg.root_config
2497 portdb = root_config.trees["porttree"].dbapi
2498 ebuild_path = portdb.findname(self.pkg.cpv)
2499 settings = self.config_pool.allocate()
2500 settings.setcpv(self.pkg)
2502 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2503 # should not be touched since otherwise it could interfere with
2504 # another instance of the same cpv concurrently being built for a
2505 # different $ROOT (currently, builds only cooperate with prefetchers
2506 # that are spawned for the same $ROOT).
2507 if not self.prefetch:
2508 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2509 self._build_dir.lock()
2510 self._build_dir.clean_log()
2511 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2512 if self.logfile is None:
2513 self.logfile = settings.get("PORTAGE_LOG_FILE")
2519 # If any incremental variables have been overridden
2520 # via the environment, those values need to be passed
2521 # along here so that they are correctly considered by
2522 # the config instance in the subproccess.
2523 fetch_env = os.environ.copy()
2525 nocolor = settings.get("NOCOLOR")
2526 if nocolor is not None:
2527 fetch_env["NOCOLOR"] = nocolor
2529 fetch_env["PORTAGE_NICENESS"] = "0"
2531 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2533 ebuild_binary = os.path.join(
2534 settings["PORTAGE_BIN_PATH"], "ebuild")
2536 fetch_args = [ebuild_binary, ebuild_path, phase]
2537 debug = settings.get("PORTAGE_DEBUG") == "1"
2539 fetch_args.append("--debug")
2541 self.args = fetch_args
2542 self.env = fetch_env
2543 SpawnProcess._start(self)
2545 def _pipe(self, fd_pipes):
2546 """When appropriate, use a pty so that fetcher progress bars,
2547 like wget has, will work properly."""
2548 if self.background or not sys.stdout.isatty():
2549 # When the output only goes to a log file,
2550 # there's no point in creating a pty.
2552 stdout_pipe = fd_pipes.get(1)
2553 got_pty, master_fd, slave_fd = \
2554 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2555 return (master_fd, slave_fd)
2557 def _set_returncode(self, wait_retval):
2558 SpawnProcess._set_returncode(self, wait_retval)
2559 # Collect elog messages that might have been
2560 # created by the pkg_nofetch phase.
2561 if self._build_dir is not None:
2562 # Skip elog messages for prefetch, in order to avoid duplicates.
2563 if not self.prefetch and self.returncode != os.EX_OK:
2565 if self.logfile is not None:
2567 elog_out = open(self.logfile, 'a')
2568 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2569 if self.logfile is not None:
2570 msg += ", Log file:"
2571 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2572 if self.logfile is not None:
2573 eerror(" '%s'" % (self.logfile,),
2574 phase="unpack", key=self.pkg.cpv, out=elog_out)
2575 if elog_out is not None:
2577 if not self.prefetch:
2578 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2579 features = self._build_dir.settings.features
2580 if self.returncode == os.EX_OK:
2581 self._build_dir.clean_log()
2582 self._build_dir.unlock()
2583 self.config_pool.deallocate(self._build_dir.settings)
2584 self._build_dir = None
2586 class EbuildBuildDir(SlotObject):
2588 __slots__ = ("dir_path", "pkg", "settings",
2589 "locked", "_catdir", "_lock_obj")
2591 def __init__(self, **kwargs):
2592 SlotObject.__init__(self, **kwargs)
2597 This raises an AlreadyLocked exception if lock() is called
2598 while a lock is already held. In order to avoid this, call
2599 unlock() or check whether the "locked" attribute is True
2600 or False before calling lock().
2602 if self._lock_obj is not None:
2603 raise self.AlreadyLocked((self._lock_obj,))
2605 dir_path = self.dir_path
2606 if dir_path is None:
2607 root_config = self.pkg.root_config
2608 portdb = root_config.trees["porttree"].dbapi
2609 ebuild_path = portdb.findname(self.pkg.cpv)
2610 settings = self.settings
2611 settings.setcpv(self.pkg)
2612 debug = settings.get("PORTAGE_DEBUG") == "1"
2613 use_cache = 1 # always true
2614 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2615 self.settings, debug, use_cache, portdb)
2616 dir_path = self.settings["PORTAGE_BUILDDIR"]
2618 catdir = os.path.dirname(dir_path)
2619 self._catdir = catdir
2621 portage.util.ensure_dirs(os.path.dirname(catdir),
2622 gid=portage.portage_gid,
2626 catdir_lock = portage.locks.lockdir(catdir)
2627 portage.util.ensure_dirs(catdir,
2628 gid=portage.portage_gid,
2630 self._lock_obj = portage.locks.lockdir(dir_path)
2632 self.locked = self._lock_obj is not None
2633 if catdir_lock is not None:
2634 portage.locks.unlockdir(catdir_lock)
2636 def clean_log(self):
2637 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2638 by keepwork or keeptemp in FEATURES."""
2639 settings = self.settings
2641 for x in ('.logid', 'temp/build.log'):
2643 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2648 if self._lock_obj is None:
2651 portage.locks.unlockdir(self._lock_obj)
2652 self._lock_obj = None
2655 catdir = self._catdir
2658 catdir_lock = portage.locks.lockdir(catdir)
2664 if e.errno not in (errno.ENOENT,
2665 errno.ENOTEMPTY, errno.EEXIST):
2668 portage.locks.unlockdir(catdir_lock)
2670 class AlreadyLocked(portage.exception.PortageException):
2673 class EbuildBuild(CompositeTask):
2675 __slots__ = ("args_set", "config_pool", "find_blockers",
2676 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2677 "prefetcher", "settings", "world_atom") + \
2678 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2682 logger = self.logger
2685 settings = self.settings
2686 world_atom = self.world_atom
2687 root_config = pkg.root_config
2690 portdb = root_config.trees[tree].dbapi
2691 settings.setcpv(pkg)
2692 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2693 ebuild_path = portdb.findname(self.pkg.cpv)
2694 self._ebuild_path = ebuild_path
2696 prefetcher = self.prefetcher
2697 if prefetcher is None:
2699 elif not prefetcher.isAlive():
2701 elif prefetcher.poll() is None:
2703 waiting_msg = "Fetching files " + \
2704 "in the background. " + \
2705 "To view fetch progress, run `tail -f " + \
2706 "/var/log/emerge-fetch.log` in another " + \
2708 msg_prefix = colorize("GOOD", " * ")
2709 from textwrap import wrap
2710 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2711 for line in wrap(waiting_msg, 65))
2712 if not self.background:
2713 writemsg(waiting_msg, noiselevel=-1)
2715 self._current_task = prefetcher
2716 prefetcher.addExitListener(self._prefetch_exit)
2719 self._prefetch_exit(prefetcher)
2721 def _prefetch_exit(self, prefetcher):
2725 settings = self.settings
2728 fetcher = EbuildFetchonly(
2729 fetch_all=opts.fetch_all_uri,
2730 pkg=pkg, pretend=opts.pretend,
2732 retval = fetcher.execute()
2733 self.returncode = retval
2737 fetcher = EbuildFetcher(config_pool=self.config_pool,
2738 fetchall=opts.fetch_all_uri,
2739 fetchonly=opts.fetchonly,
2740 background=self.background,
2741 pkg=pkg, scheduler=self.scheduler)
2743 self._start_task(fetcher, self._fetch_exit)
2745 def _fetch_exit(self, fetcher):
2749 fetch_failed = False
2751 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2753 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2755 if fetch_failed and fetcher.logfile is not None and \
2756 os.path.exists(fetcher.logfile):
2757 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2759 if not fetch_failed and fetcher.logfile is not None:
2760 # Fetch was successful, so remove the fetch log.
2762 os.unlink(fetcher.logfile)
2766 if fetch_failed or opts.fetchonly:
2770 logger = self.logger
2772 pkg_count = self.pkg_count
2773 scheduler = self.scheduler
2774 settings = self.settings
2775 features = settings.features
2776 ebuild_path = self._ebuild_path
2777 system_set = pkg.root_config.sets["system"]
2779 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2780 self._build_dir.lock()
2782 # Cleaning is triggered before the setup
2783 # phase, in portage.doebuild().
2784 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2785 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2786 short_msg = "emerge: (%s of %s) %s Clean" % \
2787 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2788 logger.log(msg, short_msg=short_msg)
2790 #buildsyspkg: Check if we need to _force_ binary package creation
2791 self._issyspkg = "buildsyspkg" in features and \
2792 system_set.findAtomForPackage(pkg) and \
2795 if opts.buildpkg or self._issyspkg:
2797 self._buildpkg = True
2799 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2800 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2801 short_msg = "emerge: (%s of %s) %s Compile" % \
2802 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2803 logger.log(msg, short_msg=short_msg)
2806 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2807 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2808 short_msg = "emerge: (%s of %s) %s Compile" % \
2809 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2810 logger.log(msg, short_msg=short_msg)
2812 build = EbuildExecuter(background=self.background, pkg=pkg,
2813 scheduler=scheduler, settings=settings)
2814 self._start_task(build, self._build_exit)
2816 def _unlock_builddir(self):
2817 portage.elog.elog_process(self.pkg.cpv, self.settings)
2818 self._build_dir.unlock()
2820 def _build_exit(self, build):
2821 if self._default_exit(build) != os.EX_OK:
2822 self._unlock_builddir()
2827 buildpkg = self._buildpkg
2830 self._final_exit(build)
2835 msg = ">>> This is a system package, " + \
2836 "let's pack a rescue tarball.\n"
2838 log_path = self.settings.get("PORTAGE_LOG_FILE")
2839 if log_path is not None:
2840 log_file = open(log_path, 'a')
2846 if not self.background:
2847 portage.writemsg_stdout(msg, noiselevel=-1)
2849 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2850 scheduler=self.scheduler, settings=self.settings)
2852 self._start_task(packager, self._buildpkg_exit)
2854 def _buildpkg_exit(self, packager):
2856 Released build dir lock when there is a failure or
2857 when in buildpkgonly mode. Otherwise, the lock will
2858 be released when merge() is called.
2861 if self._default_exit(packager) != os.EX_OK:
2862 self._unlock_builddir()
2866 if self.opts.buildpkgonly:
2867 # Need to call "clean" phase for buildpkgonly mode
2868 portage.elog.elog_process(self.pkg.cpv, self.settings)
2870 clean_phase = EbuildPhase(background=self.background,
2871 pkg=self.pkg, phase=phase,
2872 scheduler=self.scheduler, settings=self.settings,
2874 self._start_task(clean_phase, self._clean_exit)
2877 # Continue holding the builddir lock until
2878 # after the package has been installed.
2879 self._current_task = None
2880 self.returncode = packager.returncode
2883 def _clean_exit(self, clean_phase):
2884 if self._final_exit(clean_phase) != os.EX_OK or \
2885 self.opts.buildpkgonly:
2886 self._unlock_builddir()
2891 Install the package and then clean up and release locks.
2892 Only call this after the build has completed successfully
2893 and neither fetchonly nor buildpkgonly mode are enabled.
2896 find_blockers = self.find_blockers
2897 ldpath_mtimes = self.ldpath_mtimes
2898 logger = self.logger
2900 pkg_count = self.pkg_count
2901 settings = self.settings
2902 world_atom = self.world_atom
2903 ebuild_path = self._ebuild_path
2906 merge = EbuildMerge(find_blockers=self.find_blockers,
2907 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2908 pkg_count=pkg_count, pkg_path=ebuild_path,
2909 scheduler=self.scheduler,
2910 settings=settings, tree=tree, world_atom=world_atom)
2912 msg = " === (%s of %s) Merging (%s::%s)" % \
2913 (pkg_count.curval, pkg_count.maxval,
2914 pkg.cpv, ebuild_path)
2915 short_msg = "emerge: (%s of %s) %s Merge" % \
2916 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2917 logger.log(msg, short_msg=short_msg)
2920 rval = merge.execute()
2922 self._unlock_builddir()
2926 class EbuildExecuter(CompositeTask):
2928 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2930 _phases = ("prepare", "configure", "compile", "test", "install")
2932 _live_eclasses = frozenset([
2942 self._tree = "porttree"
2945 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2946 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2947 self._start_task(clean_phase, self._clean_phase_exit)
2949 def _clean_phase_exit(self, clean_phase):
2951 if self._default_exit(clean_phase) != os.EX_OK:
2956 scheduler = self.scheduler
2957 settings = self.settings
2960 # This initializes PORTAGE_LOG_FILE.
2961 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2963 setup_phase = EbuildPhase(background=self.background,
2964 pkg=pkg, phase="setup", scheduler=scheduler,
2965 settings=settings, tree=self._tree)
2967 setup_phase.addExitListener(self._setup_exit)
2968 self._current_task = setup_phase
2969 self.scheduler.scheduleSetup(setup_phase)
2971 def _setup_exit(self, setup_phase):
2973 if self._default_exit(setup_phase) != os.EX_OK:
2977 unpack_phase = EbuildPhase(background=self.background,
2978 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2979 settings=self.settings, tree=self._tree)
2981 if self._live_eclasses.intersection(self.pkg.inherited):
2982 # Serialize $DISTDIR access for live ebuilds since
2983 # otherwise they can interfere with eachother.
2985 unpack_phase.addExitListener(self._unpack_exit)
2986 self._current_task = unpack_phase
2987 self.scheduler.scheduleUnpack(unpack_phase)
2990 self._start_task(unpack_phase, self._unpack_exit)
2992 def _unpack_exit(self, unpack_phase):
2994 if self._default_exit(unpack_phase) != os.EX_OK:
2998 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3001 phases = self._phases
3002 eapi = pkg.metadata["EAPI"]
3003 if eapi in ("0", "1"):
3004 # skip src_prepare and src_configure
3007 for phase in phases:
3008 ebuild_phases.add(EbuildPhase(background=self.background,
3009 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3010 settings=self.settings, tree=self._tree))
3012 self._start_task(ebuild_phases, self._default_final_exit)
3014 class EbuildMetadataPhase(SubProcess):
3017 Asynchronous interface for the ebuild "depend" phase which is
3018 used to extract metadata from the ebuild.
3021 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3022 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3025 _file_names = ("ebuild",)
3026 _files_dict = slot_dict_class(_file_names, prefix="")
3030 settings = self.settings
3032 ebuild_path = self.ebuild_path
3033 debug = settings.get("PORTAGE_DEBUG") == "1"
3037 if self.fd_pipes is not None:
3038 fd_pipes = self.fd_pipes.copy()
3042 fd_pipes.setdefault(0, sys.stdin.fileno())
3043 fd_pipes.setdefault(1, sys.stdout.fileno())
3044 fd_pipes.setdefault(2, sys.stderr.fileno())
3046 # flush any pending output
3047 for fd in fd_pipes.itervalues():
3048 if fd == sys.stdout.fileno():
3050 if fd == sys.stderr.fileno():
3053 fd_pipes_orig = fd_pipes.copy()
3054 self._files = self._files_dict()
3057 master_fd, slave_fd = os.pipe()
3058 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3059 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3061 fd_pipes[self._metadata_fd] = slave_fd
3063 self._raw_metadata = []
3064 files.ebuild = os.fdopen(master_fd, 'r')
3065 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3066 self._registered_events, self._output_handler)
3067 self._registered = True
3069 retval = portage.doebuild(ebuild_path, "depend",
3070 settings["ROOT"], settings, debug,
3071 mydbapi=self.portdb, tree="porttree",
3072 fd_pipes=fd_pipes, returnpid=True)
3076 if isinstance(retval, int):
3077 # doebuild failed before spawning
3079 self.returncode = retval
3083 self.pid = retval[0]
3084 portage.process.spawned_pids.remove(self.pid)
3086 def _output_handler(self, fd, event):
3088 if event & PollConstants.POLLIN:
3089 self._raw_metadata.append(self._files.ebuild.read())
3090 if not self._raw_metadata[-1]:
3094 self._unregister_if_appropriate(event)
3095 return self._registered
3097 def _set_returncode(self, wait_retval):
3098 SubProcess._set_returncode(self, wait_retval)
3099 if self.returncode == os.EX_OK:
3100 metadata_lines = "".join(self._raw_metadata).splitlines()
3101 if len(portage.auxdbkeys) != len(metadata_lines):
3102 # Don't trust bash's returncode if the
3103 # number of lines is incorrect.
3106 metadata = izip(portage.auxdbkeys, metadata_lines)
3107 self.metadata_callback(self.cpv, self.ebuild_path,
3108 self.repo_path, metadata, self.ebuild_mtime)
3110 class EbuildProcess(SpawnProcess):
3112 __slots__ = ("phase", "pkg", "settings", "tree")
3115 # Don't open the log file during the clean phase since the
3116 # open file can result in an nfs lock on $T/build.log which
3117 # prevents the clean phase from removing $T.
3118 if self.phase not in ("clean", "cleanrm"):
3119 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3120 SpawnProcess._start(self)
3122 def _pipe(self, fd_pipes):
3123 stdout_pipe = fd_pipes.get(1)
3124 got_pty, master_fd, slave_fd = \
3125 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3126 return (master_fd, slave_fd)
3128 def _spawn(self, args, **kwargs):
3130 root_config = self.pkg.root_config
3132 mydbapi = root_config.trees[tree].dbapi
3133 settings = self.settings
3134 ebuild_path = settings["EBUILD"]
3135 debug = settings.get("PORTAGE_DEBUG") == "1"
3137 rval = portage.doebuild(ebuild_path, self.phase,
3138 root_config.root, settings, debug,
3139 mydbapi=mydbapi, tree=tree, **kwargs)
3143 def _set_returncode(self, wait_retval):
3144 SpawnProcess._set_returncode(self, wait_retval)
3146 if self.phase not in ("clean", "cleanrm"):
3147 self.returncode = portage._doebuild_exit_status_check_and_log(
3148 self.settings, self.phase, self.returncode)
3150 if self.phase == "test" and self.returncode != os.EX_OK and \
3151 "test-fail-continue" in self.settings.features:
3152 self.returncode = os.EX_OK
3154 portage._post_phase_userpriv_perms(self.settings)
3156 class EbuildPhase(CompositeTask):
3158 __slots__ = ("background", "pkg", "phase",
3159 "scheduler", "settings", "tree")
3161 _post_phase_cmds = portage._post_phase_cmds
3165 ebuild_process = EbuildProcess(background=self.background,
3166 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3167 settings=self.settings, tree=self.tree)
3169 self._start_task(ebuild_process, self._ebuild_exit)
3171 def _ebuild_exit(self, ebuild_process):
3173 if self.phase == "install":
3175 log_path = self.settings.get("PORTAGE_LOG_FILE")
3177 if self.background and log_path is not None:
3178 log_file = open(log_path, 'a')
3181 portage._check_build_log(self.settings, out=out)
3183 if log_file is not None:
3186 if self._default_exit(ebuild_process) != os.EX_OK:
3190 settings = self.settings
3192 if self.phase == "install":
3193 portage._post_src_install_uid_fix(settings)
3195 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3196 if post_phase_cmds is not None:
3197 post_phase = MiscFunctionsProcess(background=self.background,
3198 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3199 scheduler=self.scheduler, settings=settings)
3200 self._start_task(post_phase, self._post_phase_exit)
3203 self.returncode = ebuild_process.returncode
3204 self._current_task = None
3207 def _post_phase_exit(self, post_phase):
3208 if self._final_exit(post_phase) != os.EX_OK:
3209 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3211 self._current_task = None
3215 class EbuildBinpkg(EbuildProcess):
3217 This assumes that src_install() has successfully completed.
3219 __slots__ = ("_binpkg_tmpfile",)
3222 self.phase = "package"
3223 self.tree = "porttree"
3225 root_config = pkg.root_config
3226 portdb = root_config.trees["porttree"].dbapi
3227 bintree = root_config.trees["bintree"]
3228 ebuild_path = portdb.findname(self.pkg.cpv)
3229 settings = self.settings
3230 debug = settings.get("PORTAGE_DEBUG") == "1"
3232 bintree.prevent_collision(pkg.cpv)
3233 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3234 pkg.cpv + ".tbz2." + str(os.getpid()))
3235 self._binpkg_tmpfile = binpkg_tmpfile
3236 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3237 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3240 EbuildProcess._start(self)
3242 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3244 def _set_returncode(self, wait_retval):
3245 EbuildProcess._set_returncode(self, wait_retval)
3248 bintree = pkg.root_config.trees["bintree"]
3249 binpkg_tmpfile = self._binpkg_tmpfile
3250 if self.returncode == os.EX_OK:
3251 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3253 class EbuildMerge(SlotObject):
3255 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3256 "pkg", "pkg_count", "pkg_path", "pretend",
3257 "scheduler", "settings", "tree", "world_atom")
3260 root_config = self.pkg.root_config
3261 settings = self.settings
3262 retval = portage.merge(settings["CATEGORY"],
3263 settings["PF"], settings["D"],
3264 os.path.join(settings["PORTAGE_BUILDDIR"],
3265 "build-info"), root_config.root, settings,
3266 myebuild=settings["EBUILD"],
3267 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3268 vartree=root_config.trees["vartree"],
3269 prev_mtimes=self.ldpath_mtimes,
3270 scheduler=self.scheduler,
3271 blockers=self.find_blockers)
3273 if retval == os.EX_OK:
3274 self.world_atom(self.pkg)
3279 def _log_success(self):
3281 pkg_count = self.pkg_count
3282 pkg_path = self.pkg_path
3283 logger = self.logger
3284 if "noclean" not in self.settings.features:
3285 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3286 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3287 logger.log((" === (%s of %s) " + \
3288 "Post-Build Cleaning (%s::%s)") % \
3289 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3290 short_msg=short_msg)
3291 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3292 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3294 class PackageUninstall(AsynchronousTask):
3296 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3300 unmerge(self.pkg.root_config, self.opts, "unmerge",
3301 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3302 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3303 writemsg_level=self._writemsg_level)
3304 except UninstallFailure, e:
3305 self.returncode = e.status
3307 self.returncode = os.EX_OK
3310 def _writemsg_level(self, msg, level=0, noiselevel=0):
3312 log_path = self.settings.get("PORTAGE_LOG_FILE")
3313 background = self.background
3315 if log_path is None:
3316 if not (background and level < logging.WARNING):
3317 portage.util.writemsg_level(msg,
3318 level=level, noiselevel=noiselevel)
3321 portage.util.writemsg_level(msg,
3322 level=level, noiselevel=noiselevel)
3324 f = open(log_path, 'a')
3330 class Binpkg(CompositeTask):
3332 __slots__ = ("find_blockers",
3333 "ldpath_mtimes", "logger", "opts",
3334 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3335 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3336 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3338 def _writemsg_level(self, msg, level=0, noiselevel=0):
3340 if not self.background:
3341 portage.util.writemsg_level(msg,
3342 level=level, noiselevel=noiselevel)
3344 log_path = self.settings.get("PORTAGE_LOG_FILE")
3345 if log_path is not None:
3346 f = open(log_path, 'a')
3355 settings = self.settings
3356 settings.setcpv(pkg)
3357 self._tree = "bintree"
3358 self._bintree = self.pkg.root_config.trees[self._tree]
3359 self._verify = not self.opts.pretend
3361 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3362 "portage", pkg.category, pkg.pf)
3363 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3364 pkg=pkg, settings=settings)
3365 self._image_dir = os.path.join(dir_path, "image")
3366 self._infloc = os.path.join(dir_path, "build-info")
3367 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3368 settings["EBUILD"] = self._ebuild_path
3369 debug = settings.get("PORTAGE_DEBUG") == "1"
3370 portage.doebuild_environment(self._ebuild_path, "setup",
3371 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3372 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3374 # The prefetcher has already completed or it
3375 # could be running now. If it's running now,
3376 # wait for it to complete since it holds
3377 # a lock on the file being fetched. The
3378 # portage.locks functions are only designed
3379 # to work between separate processes. Since
3380 # the lock is held by the current process,
3381 # use the scheduler and fetcher methods to
3382 # synchronize with the fetcher.
3383 prefetcher = self.prefetcher
3384 if prefetcher is None:
3386 elif not prefetcher.isAlive():
3388 elif prefetcher.poll() is None:
3390 waiting_msg = ("Fetching '%s' " + \
3391 "in the background. " + \
3392 "To view fetch progress, run `tail -f " + \
3393 "/var/log/emerge-fetch.log` in another " + \
3394 "terminal.") % prefetcher.pkg_path
3395 msg_prefix = colorize("GOOD", " * ")
3396 from textwrap import wrap
3397 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3398 for line in wrap(waiting_msg, 65))
3399 if not self.background:
3400 writemsg(waiting_msg, noiselevel=-1)
3402 self._current_task = prefetcher
3403 prefetcher.addExitListener(self._prefetch_exit)
3406 self._prefetch_exit(prefetcher)
3408 def _prefetch_exit(self, prefetcher):
3411 pkg_count = self.pkg_count
3412 if not (self.opts.pretend or self.opts.fetchonly):
3413 self._build_dir.lock()
3414 # If necessary, discard old log so that we don't
3416 self._build_dir.clean_log()
3417 # Initialze PORTAGE_LOG_FILE.
3418 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3419 fetcher = BinpkgFetcher(background=self.background,
3420 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3421 pretend=self.opts.pretend, scheduler=self.scheduler)
3422 pkg_path = fetcher.pkg_path
3423 self._pkg_path = pkg_path
3425 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3427 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3428 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3429 short_msg = "emerge: (%s of %s) %s Fetch" % \
3430 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3431 self.logger.log(msg, short_msg=short_msg)
3432 self._start_task(fetcher, self._fetcher_exit)
3435 self._fetcher_exit(fetcher)
3437 def _fetcher_exit(self, fetcher):
3439 # The fetcher only has a returncode when
3440 # --getbinpkg is enabled.
3441 if fetcher.returncode is not None:
3442 self._fetched_pkg = True
3443 if self._default_exit(fetcher) != os.EX_OK:
3444 self._unlock_builddir()
3448 if self.opts.pretend:
3449 self._current_task = None
3450 self.returncode = os.EX_OK
3458 logfile = self.settings.get("PORTAGE_LOG_FILE")
3459 verifier = BinpkgVerifier(background=self.background,
3460 logfile=logfile, pkg=self.pkg)
3461 self._start_task(verifier, self._verifier_exit)
3464 self._verifier_exit(verifier)
3466 def _verifier_exit(self, verifier):
3467 if verifier is not None and \
3468 self._default_exit(verifier) != os.EX_OK:
3469 self._unlock_builddir()
3473 logger = self.logger
3475 pkg_count = self.pkg_count
3476 pkg_path = self._pkg_path
3478 if self._fetched_pkg:
3479 self._bintree.inject(pkg.cpv, filename=pkg_path)
3481 if self.opts.fetchonly:
3482 self._current_task = None
3483 self.returncode = os.EX_OK
3487 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3488 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3489 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3490 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3491 logger.log(msg, short_msg=short_msg)
3494 settings = self.settings
3495 ebuild_phase = EbuildPhase(background=self.background,
3496 pkg=pkg, phase=phase, scheduler=self.scheduler,
3497 settings=settings, tree=self._tree)
3499 self._start_task(ebuild_phase, self._clean_exit)
3501 def _clean_exit(self, clean_phase):
3502 if self._default_exit(clean_phase) != os.EX_OK:
3503 self._unlock_builddir()
3507 dir_path = self._build_dir.dir_path
3509 infloc = self._infloc
3511 pkg_path = self._pkg_path
3514 for mydir in (dir_path, self._image_dir, infloc):
3515 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3516 gid=portage.data.portage_gid, mode=dir_mode)
3518 # This initializes PORTAGE_LOG_FILE.
3519 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3520 self._writemsg_level(">>> Extracting info\n")
3522 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3523 check_missing_metadata = ("CATEGORY", "PF")
3524 missing_metadata = set()
3525 for k in check_missing_metadata:
3526 v = pkg_xpak.getfile(k)
3528 missing_metadata.add(k)
3530 pkg_xpak.unpackinfo(infloc)
3531 for k in missing_metadata:
3539 f = open(os.path.join(infloc, k), 'wb')
3545 # Store the md5sum in the vdb.
3546 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3548 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3552 # This gives bashrc users an opportunity to do various things
3553 # such as remove binary packages after they're installed.
3554 settings = self.settings
3555 settings.setcpv(self.pkg)
3556 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3557 settings.backup_changes("PORTAGE_BINPKG_FILE")
3560 setup_phase = EbuildPhase(background=self.background,
3561 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3562 settings=settings, tree=self._tree)
3564 setup_phase.addExitListener(self._setup_exit)
3565 self._current_task = setup_phase
3566 self.scheduler.scheduleSetup(setup_phase)
3568 def _setup_exit(self, setup_phase):
3569 if self._default_exit(setup_phase) != os.EX_OK:
3570 self._unlock_builddir()
3574 extractor = BinpkgExtractorAsync(background=self.background,
3575 image_dir=self._image_dir,
3576 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3577 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3578 self._start_task(extractor, self._extractor_exit)
3580 def _extractor_exit(self, extractor):
3581 if self._final_exit(extractor) != os.EX_OK:
3582 self._unlock_builddir()
3583 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3587 def _unlock_builddir(self):
3588 if self.opts.pretend or self.opts.fetchonly:
3590 portage.elog.elog_process(self.pkg.cpv, self.settings)
3591 self._build_dir.unlock()
3595 # This gives bashrc users an opportunity to do various things
3596 # such as remove binary packages after they're installed.
3597 settings = self.settings
3598 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3599 settings.backup_changes("PORTAGE_BINPKG_FILE")
3601 merge = EbuildMerge(find_blockers=self.find_blockers,
3602 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3603 pkg=self.pkg, pkg_count=self.pkg_count,
3604 pkg_path=self._pkg_path, scheduler=self.scheduler,
3605 settings=settings, tree=self._tree, world_atom=self.world_atom)
3608 retval = merge.execute()
3610 settings.pop("PORTAGE_BINPKG_FILE", None)
3611 self._unlock_builddir()
3614 class BinpkgFetcher(SpawnProcess):
3616 __slots__ = ("pkg", "pretend",
3617 "locked", "pkg_path", "_lock_obj")
3619 def __init__(self, **kwargs):
3620 SpawnProcess.__init__(self, **kwargs)
3622 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3630 pretend = self.pretend
3631 bintree = pkg.root_config.trees["bintree"]
3632 settings = bintree.settings
3633 use_locks = "distlocks" in settings.features
3634 pkg_path = self.pkg_path
3637 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3640 exists = os.path.exists(pkg_path)
3641 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3642 if not (pretend or resume):
3643 # Remove existing file or broken symlink.
3649 # urljoin doesn't work correctly with
3650 # unrecognized protocols like sftp
3651 if bintree._remote_has_index:
3652 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3654 rel_uri = pkg.cpv + ".tbz2"
3655 uri = bintree._remote_base_uri.rstrip("/") + \
3656 "/" + rel_uri.lstrip("/")
3658 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3659 "/" + pkg.pf + ".tbz2"
3662 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3663 self.returncode = os.EX_OK
3667 protocol = urlparse.urlparse(uri)[0]
3668 fcmd_prefix = "FETCHCOMMAND"
3670 fcmd_prefix = "RESUMECOMMAND"
3671 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3673 fcmd = settings.get(fcmd_prefix)
3676 "DISTDIR" : os.path.dirname(pkg_path),
3678 "FILE" : os.path.basename(pkg_path)
3681 fetch_env = dict(settings.iteritems())
3682 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3683 for x in shlex.split(fcmd)]
3685 if self.fd_pipes is None:
3687 fd_pipes = self.fd_pipes
3689 # Redirect all output to stdout since some fetchers like
3690 # wget pollute stderr (if portage detects a problem then it
3691 # can send it's own message to stderr).
3692 fd_pipes.setdefault(0, sys.stdin.fileno())
3693 fd_pipes.setdefault(1, sys.stdout.fileno())
3694 fd_pipes.setdefault(2, sys.stdout.fileno())
3696 self.args = fetch_args
3697 self.env = fetch_env
3698 SpawnProcess._start(self)
3700 def _set_returncode(self, wait_retval):
3701 SpawnProcess._set_returncode(self, wait_retval)
3702 if self.returncode == os.EX_OK:
3703 # If possible, update the mtime to match the remote package if
3704 # the fetcher didn't already do it automatically.
3705 bintree = self.pkg.root_config.trees["bintree"]
3706 if bintree._remote_has_index:
3707 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3708 if remote_mtime is not None:
3710 remote_mtime = long(remote_mtime)
3715 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3719 if remote_mtime != local_mtime:
3721 os.utime(self.pkg_path,
3722 (remote_mtime, remote_mtime))
3731 This raises an AlreadyLocked exception if lock() is called
3732 while a lock is already held. In order to avoid this, call
3733 unlock() or check whether the "locked" attribute is True
3734 or False before calling lock().
3736 if self._lock_obj is not None:
3737 raise self.AlreadyLocked((self._lock_obj,))
3739 self._lock_obj = portage.locks.lockfile(
3740 self.pkg_path, wantnewlockfile=1)
3743 class AlreadyLocked(portage.exception.PortageException):
3747 if self._lock_obj is None:
3749 portage.locks.unlockfile(self._lock_obj)
3750 self._lock_obj = None
3753 class BinpkgVerifier(AsynchronousTask):
3754 __slots__ = ("logfile", "pkg",)
3758 Note: Unlike a normal AsynchronousTask.start() method,
3759 this one does all work is synchronously. The returncode
3760 attribute will be set before it returns.
3764 root_config = pkg.root_config
3765 bintree = root_config.trees["bintree"]
3767 stdout_orig = sys.stdout
3768 stderr_orig = sys.stderr
3770 if self.background and self.logfile is not None:
3771 log_file = open(self.logfile, 'a')
3773 if log_file is not None:
3774 sys.stdout = log_file
3775 sys.stderr = log_file
3777 bintree.digestCheck(pkg)
3778 except portage.exception.FileNotFound:
3779 writemsg("!!! Fetching Binary failed " + \
3780 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3782 except portage.exception.DigestException, e:
3783 writemsg("\n!!! Digest verification failed:\n",
3785 writemsg("!!! %s\n" % e.value[0],
3787 writemsg("!!! Reason: %s\n" % e.value[1],
3789 writemsg("!!! Got: %s\n" % e.value[2],
3791 writemsg("!!! Expected: %s\n" % e.value[3],
3794 if rval != os.EX_OK:
3795 pkg_path = bintree.getname(pkg.cpv)
3796 head, tail = os.path.split(pkg_path)
3797 temp_filename = portage._checksum_failure_temp_file(head, tail)
3798 writemsg("File renamed to '%s'\n" % (temp_filename,),
3801 sys.stdout = stdout_orig
3802 sys.stderr = stderr_orig
3803 if log_file is not None:
3806 self.returncode = rval
3809 class BinpkgPrefetcher(CompositeTask):
3811 __slots__ = ("pkg",) + \
3812 ("pkg_path", "_bintree",)
3815 self._bintree = self.pkg.root_config.trees["bintree"]
3816 fetcher = BinpkgFetcher(background=self.background,
3817 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3818 scheduler=self.scheduler)
3819 self.pkg_path = fetcher.pkg_path
3820 self._start_task(fetcher, self._fetcher_exit)
3822 def _fetcher_exit(self, fetcher):
3824 if self._default_exit(fetcher) != os.EX_OK:
3828 verifier = BinpkgVerifier(background=self.background,
3829 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3830 self._start_task(verifier, self._verifier_exit)
3832 def _verifier_exit(self, verifier):
3833 if self._default_exit(verifier) != os.EX_OK:
3837 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3839 self._current_task = None
3840 self.returncode = os.EX_OK
3843 class BinpkgExtractorAsync(SpawnProcess):
3845 __slots__ = ("image_dir", "pkg", "pkg_path")
3847 _shell_binary = portage.const.BASH_BINARY
3850 self.args = [self._shell_binary, "-c",
3851 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3852 (portage._shell_quote(self.pkg_path),
3853 portage._shell_quote(self.image_dir))]
3855 self.env = self.pkg.root_config.settings.environ()
3856 SpawnProcess._start(self)
3858 class MergeListItem(CompositeTask):
3861 TODO: For parallel scheduling, everything here needs asynchronous
3862 execution support (start, poll, and wait methods).
3865 __slots__ = ("args_set",
3866 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3867 "find_blockers", "logger", "mtimedb", "pkg",
3868 "pkg_count", "pkg_to_replace", "prefetcher",
3869 "settings", "statusMessage", "world_atom") + \
3875 build_opts = self.build_opts
3878 # uninstall, executed by self.merge()
3879 self.returncode = os.EX_OK
3883 args_set = self.args_set
3884 find_blockers = self.find_blockers
3885 logger = self.logger
3886 mtimedb = self.mtimedb
3887 pkg_count = self.pkg_count
3888 scheduler = self.scheduler
3889 settings = self.settings
3890 world_atom = self.world_atom
3891 ldpath_mtimes = mtimedb["ldpath"]
3893 action_desc = "Emerging"
3895 if pkg.type_name == "binary":
3896 action_desc += " binary"
3898 if build_opts.fetchonly:
3899 action_desc = "Fetching"
3901 msg = "%s (%s of %s) %s" % \
3903 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3904 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3905 colorize("GOOD", pkg.cpv))
3907 portdb = pkg.root_config.trees["porttree"].dbapi
3908 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3909 if portdir_repo_name:
3910 pkg_repo_name = pkg.metadata.get("repository")
3911 if pkg_repo_name != portdir_repo_name:
3912 if not pkg_repo_name:
3913 pkg_repo_name = "unknown repo"
3914 msg += " from %s" % pkg_repo_name
3917 msg += " %s %s" % (preposition, pkg.root)
3919 if not build_opts.pretend:
3920 self.statusMessage(msg)
3921 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3922 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3924 if pkg.type_name == "ebuild":
3926 build = EbuildBuild(args_set=args_set,
3927 background=self.background,
3928 config_pool=self.config_pool,
3929 find_blockers=find_blockers,
3930 ldpath_mtimes=ldpath_mtimes, logger=logger,
3931 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3932 prefetcher=self.prefetcher, scheduler=scheduler,
3933 settings=settings, world_atom=world_atom)
3935 self._install_task = build
3936 self._start_task(build, self._default_final_exit)
3939 elif pkg.type_name == "binary":
3941 binpkg = Binpkg(background=self.background,
3942 find_blockers=find_blockers,
3943 ldpath_mtimes=ldpath_mtimes, logger=logger,
3944 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3945 prefetcher=self.prefetcher, settings=settings,
3946 scheduler=scheduler, world_atom=world_atom)
3948 self._install_task = binpkg
3949 self._start_task(binpkg, self._default_final_exit)
3953 self._install_task.poll()
3954 return self.returncode
3957 self._install_task.wait()
3958 return self.returncode
3963 build_opts = self.build_opts
3964 find_blockers = self.find_blockers
3965 logger = self.logger
3966 mtimedb = self.mtimedb
3967 pkg_count = self.pkg_count
3968 prefetcher = self.prefetcher
3969 scheduler = self.scheduler
3970 settings = self.settings
3971 world_atom = self.world_atom
3972 ldpath_mtimes = mtimedb["ldpath"]
3975 if not (build_opts.buildpkgonly or \
3976 build_opts.fetchonly or build_opts.pretend):
3978 uninstall = PackageUninstall(background=self.background,
3979 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3980 pkg=pkg, scheduler=scheduler, settings=settings)
3983 retval = uninstall.wait()
3984 if retval != os.EX_OK:
3988 if build_opts.fetchonly or \
3989 build_opts.buildpkgonly:
3990 return self.returncode
3992 retval = self._install_task.install()
3995 class PackageMerge(AsynchronousTask):
3997 TODO: Implement asynchronous merge so that the scheduler can
3998 run while a merge is executing.
4001 __slots__ = ("merge",)
4005 pkg = self.merge.pkg
4006 pkg_count = self.merge.pkg_count
4009 action_desc = "Uninstalling"
4010 preposition = "from"
4012 action_desc = "Installing"
4015 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4018 msg += " %s %s" % (preposition, pkg.root)
4020 if not self.merge.build_opts.fetchonly and \
4021 not self.merge.build_opts.pretend and \
4022 not self.merge.build_opts.buildpkgonly:
4023 self.merge.statusMessage(msg)
4025 self.returncode = self.merge.merge()
4028 class DependencyArg(object):
4029 def __init__(self, arg=None, root_config=None):
4031 self.root_config = root_config
4034 return str(self.arg)
4036 class AtomArg(DependencyArg):
4037 def __init__(self, atom=None, **kwargs):
4038 DependencyArg.__init__(self, **kwargs)
4040 if not isinstance(self.atom, portage.dep.Atom):
4041 self.atom = portage.dep.Atom(self.atom)
4042 self.set = (self.atom, )
4044 class PackageArg(DependencyArg):
4045 def __init__(self, package=None, **kwargs):
4046 DependencyArg.__init__(self, **kwargs)
4047 self.package = package
4048 self.atom = portage.dep.Atom("=" + package.cpv)
4049 self.set = (self.atom, )
4051 class SetArg(DependencyArg):
4052 def __init__(self, set=None, **kwargs):
4053 DependencyArg.__init__(self, **kwargs)
4055 self.name = self.arg[len(SETPREFIX):]
4057 class Dependency(SlotObject):
4058 __slots__ = ("atom", "blocker", "depth",
4059 "parent", "onlydeps", "priority", "root")
4060 def __init__(self, **kwargs):
4061 SlotObject.__init__(self, **kwargs)
4062 if self.priority is None:
4063 self.priority = DepPriority()
4064 if self.depth is None:
4067 class BlockerCache(portage.cache.mappings.MutableMapping):
4068 """This caches blockers of installed packages so that dep_check does not
4069 have to be done for every single installed package on every invocation of
4070 emerge. The cache is invalidated whenever it is detected that something
4071 has changed that might alter the results of dep_check() calls:
4072 1) the set of installed packages (including COUNTER) has changed
4073 2) the old-style virtuals have changed
4076 # Number of uncached packages to trigger cache update, since
4077 # it's wasteful to update it for every vdb change.
4078 _cache_threshold = 5
4080 class BlockerData(object):
4082 __slots__ = ("__weakref__", "atoms", "counter")
4084 def __init__(self, counter, atoms):
4085 self.counter = counter
4088 def __init__(self, myroot, vardb):
4090 self._virtuals = vardb.settings.getvirtuals()
4091 self._cache_filename = os.path.join(myroot,
4092 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4093 self._cache_version = "1"
4094 self._cache_data = None
4095 self._modified = set()
4100 f = open(self._cache_filename, mode='rb')
4101 mypickle = pickle.Unpickler(f)
4103 mypickle.find_global = None
4104 except AttributeError:
4105 # TODO: If py3k, override Unpickler.find_class().
4107 self._cache_data = mypickle.load()
4110 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4111 if isinstance(e, pickle.UnpicklingError):
4112 writemsg("!!! Error loading '%s': %s\n" % \
4113 (self._cache_filename, str(e)), noiselevel=-1)
4116 cache_valid = self._cache_data and \
4117 isinstance(self._cache_data, dict) and \
4118 self._cache_data.get("version") == self._cache_version and \
4119 isinstance(self._cache_data.get("blockers"), dict)
4121 # Validate all the atoms and counters so that
4122 # corruption is detected as soon as possible.
4123 invalid_items = set()
4124 for k, v in self._cache_data["blockers"].iteritems():
4125 if not isinstance(k, basestring):
4126 invalid_items.add(k)
4129 if portage.catpkgsplit(k) is None:
4130 invalid_items.add(k)
4132 except portage.exception.InvalidData:
4133 invalid_items.add(k)
4135 if not isinstance(v, tuple) or \
4137 invalid_items.add(k)
4140 if not isinstance(counter, (int, long)):
4141 invalid_items.add(k)
4143 if not isinstance(atoms, (list, tuple)):
4144 invalid_items.add(k)
4146 invalid_atom = False
4148 if not isinstance(atom, basestring):
4151 if atom[:1] != "!" or \
4152 not portage.isvalidatom(
4153 atom, allow_blockers=True):
4157 invalid_items.add(k)
4160 for k in invalid_items:
4161 del self._cache_data["blockers"][k]
4162 if not self._cache_data["blockers"]:
4166 self._cache_data = {"version":self._cache_version}
4167 self._cache_data["blockers"] = {}
4168 self._cache_data["virtuals"] = self._virtuals
4169 self._modified.clear()
4172 """If the current user has permission and the internal blocker cache
4173 been updated, save it to disk and mark it unmodified. This is called
4174 by emerge after it has proccessed blockers for all installed packages.
4175 Currently, the cache is only written if the user has superuser
4176 privileges (since that's required to obtain a lock), but all users
4177 have read access and benefit from faster blocker lookups (as long as
4178 the entire cache is still valid). The cache is stored as a pickled
4179 dict object with the following format:
4183 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4184 "virtuals" : vardb.settings.getvirtuals()
4187 if len(self._modified) >= self._cache_threshold and \
4190 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4191 pickle.dump(self._cache_data, f, protocol=2)
4193 portage.util.apply_secpass_permissions(
4194 self._cache_filename, gid=portage.portage_gid, mode=0644)
4195 except (IOError, OSError), e:
4197 self._modified.clear()
4199 def __setitem__(self, cpv, blocker_data):
4201 Update the cache and mark it as modified for a future call to
4204 @param cpv: Package for which to cache blockers.
4206 @param blocker_data: An object with counter and atoms attributes.
4207 @type blocker_data: BlockerData
4209 self._cache_data["blockers"][cpv] = \
4210 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4211 self._modified.add(cpv)
4214 if self._cache_data is None:
4215 # triggered by python-trace
4217 return iter(self._cache_data["blockers"])
4219 def __delitem__(self, cpv):
4220 del self._cache_data["blockers"][cpv]
4222 def __getitem__(self, cpv):
4225 @returns: An object with counter and atoms attributes.
4227 return self.BlockerData(*self._cache_data["blockers"][cpv])
4229 class BlockerDB(object):
4231 def __init__(self, root_config):
4232 self._root_config = root_config
4233 self._vartree = root_config.trees["vartree"]
4234 self._portdb = root_config.trees["porttree"].dbapi
4236 self._dep_check_trees = None
4237 self._fake_vartree = None
4239 def _get_fake_vartree(self, acquire_lock=0):
4240 fake_vartree = self._fake_vartree
4241 if fake_vartree is None:
4242 fake_vartree = FakeVartree(self._root_config,
4243 acquire_lock=acquire_lock)
4244 self._fake_vartree = fake_vartree
4245 self._dep_check_trees = { self._vartree.root : {
4246 "porttree" : fake_vartree,
4247 "vartree" : fake_vartree,
4250 fake_vartree.sync(acquire_lock=acquire_lock)
4253 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4254 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4255 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4256 settings = self._vartree.settings
4257 stale_cache = set(blocker_cache)
4258 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4259 dep_check_trees = self._dep_check_trees
4260 vardb = fake_vartree.dbapi
4261 installed_pkgs = list(vardb)
4263 for inst_pkg in installed_pkgs:
4264 stale_cache.discard(inst_pkg.cpv)
4265 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4266 if cached_blockers is not None and \
4267 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4268 cached_blockers = None
4269 if cached_blockers is not None:
4270 blocker_atoms = cached_blockers.atoms
4272 # Use aux_get() to trigger FakeVartree global
4273 # updates on *DEPEND when appropriate.
4274 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4276 portage.dep._dep_check_strict = False
4277 success, atoms = portage.dep_check(depstr,
4278 vardb, settings, myuse=inst_pkg.use.enabled,
4279 trees=dep_check_trees, myroot=inst_pkg.root)
4281 portage.dep._dep_check_strict = True
4283 pkg_location = os.path.join(inst_pkg.root,
4284 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4285 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4286 (pkg_location, atoms), noiselevel=-1)
4289 blocker_atoms = [atom for atom in atoms \
4290 if atom.startswith("!")]
4291 blocker_atoms.sort()
4292 counter = long(inst_pkg.metadata["COUNTER"])
4293 blocker_cache[inst_pkg.cpv] = \
4294 blocker_cache.BlockerData(counter, blocker_atoms)
4295 for cpv in stale_cache:
4296 del blocker_cache[cpv]
4297 blocker_cache.flush()
4299 blocker_parents = digraph()
4301 for pkg in installed_pkgs:
4302 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4303 blocker_atom = blocker_atom.lstrip("!")
4304 blocker_atoms.append(blocker_atom)
4305 blocker_parents.add(blocker_atom, pkg)
4307 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4308 blocking_pkgs = set()
4309 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4310 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4312 # Check for blockers in the other direction.
4313 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4315 portage.dep._dep_check_strict = False
4316 success, atoms = portage.dep_check(depstr,
4317 vardb, settings, myuse=new_pkg.use.enabled,
4318 trees=dep_check_trees, myroot=new_pkg.root)
4320 portage.dep._dep_check_strict = True
4322 # We should never get this far with invalid deps.
4323 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4326 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4329 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4330 for inst_pkg in installed_pkgs:
4332 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4333 except (portage.exception.InvalidDependString, StopIteration):
4335 blocking_pkgs.add(inst_pkg)
4337 return blocking_pkgs
4339 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4341 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4342 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4343 p_type, p_root, p_key, p_status = parent_node
4345 if p_status == "nomerge":
4346 category, pf = portage.catsplit(p_key)
4347 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4348 msg.append("Portage is unable to process the dependencies of the ")
4349 msg.append("'%s' package. " % p_key)
4350 msg.append("In order to correct this problem, the package ")
4351 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4352 msg.append("As a temporary workaround, the --nodeps option can ")
4353 msg.append("be used to ignore all dependencies. For reference, ")
4354 msg.append("the problematic dependencies can be found in the ")
4355 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4357 msg.append("This package can not be installed. ")
4358 msg.append("Please notify the '%s' package maintainer " % p_key)
4359 msg.append("about this problem.")
4361 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4362 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4364 class PackageVirtualDbapi(portage.dbapi):
4366 A dbapi-like interface class that represents the state of the installed
4367 package database as new packages are installed, replacing any packages
4368 that previously existed in the same slot. The main difference between
4369 this class and fakedbapi is that this one uses Package instances
4370 internally (passed in via cpv_inject() and cpv_remove() calls).
4372 def __init__(self, settings):
4373 portage.dbapi.__init__(self)
4374 self.settings = settings
4375 self._match_cache = {}
4381 Remove all packages.
4385 self._cp_map.clear()
4386 self._cpv_map.clear()
4389 obj = PackageVirtualDbapi(self.settings)
4390 obj._match_cache = self._match_cache.copy()
4391 obj._cp_map = self._cp_map.copy()
4392 for k, v in obj._cp_map.iteritems():
4393 obj._cp_map[k] = v[:]
4394 obj._cpv_map = self._cpv_map.copy()
4398 return self._cpv_map.itervalues()
4400 def __contains__(self, item):
4401 existing = self._cpv_map.get(item.cpv)
4402 if existing is not None and \
4407 def get(self, item, default=None):
4408 cpv = getattr(item, "cpv", None)
4412 type_name, root, cpv, operation = item
4414 existing = self._cpv_map.get(cpv)
4415 if existing is not None and \
4420 def match_pkgs(self, atom):
4421 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4423 def _clear_cache(self):
4424 if self._categories is not None:
4425 self._categories = None
4426 if self._match_cache:
4427 self._match_cache = {}
4429 def match(self, origdep, use_cache=1):
4430 result = self._match_cache.get(origdep)
4431 if result is not None:
4433 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4434 self._match_cache[origdep] = result
4437 def cpv_exists(self, cpv):
4438 return cpv in self._cpv_map
4440 def cp_list(self, mycp, use_cache=1):
4441 cachelist = self._match_cache.get(mycp)
4442 # cp_list() doesn't expand old-style virtuals
4443 if cachelist and cachelist[0].startswith(mycp):
4445 cpv_list = self._cp_map.get(mycp)
4446 if cpv_list is None:
4449 cpv_list = [pkg.cpv for pkg in cpv_list]
4450 self._cpv_sort_ascending(cpv_list)
4451 if not (not cpv_list and mycp.startswith("virtual/")):
4452 self._match_cache[mycp] = cpv_list
4456 return list(self._cp_map)
4459 return list(self._cpv_map)
4461 def cpv_inject(self, pkg):
4462 cp_list = self._cp_map.get(pkg.cp)
4465 self._cp_map[pkg.cp] = cp_list
4466 e_pkg = self._cpv_map.get(pkg.cpv)
4467 if e_pkg is not None:
4470 self.cpv_remove(e_pkg)
4471 for e_pkg in cp_list:
4472 if e_pkg.slot_atom == pkg.slot_atom:
4475 self.cpv_remove(e_pkg)
4478 self._cpv_map[pkg.cpv] = pkg
4481 def cpv_remove(self, pkg):
4482 old_pkg = self._cpv_map.get(pkg.cpv)
4485 self._cp_map[pkg.cp].remove(pkg)
4486 del self._cpv_map[pkg.cpv]
4489 def aux_get(self, cpv, wants):
4490 metadata = self._cpv_map[cpv].metadata
4491 return [metadata.get(x, "") for x in wants]
4493 def aux_update(self, cpv, values):
4494 self._cpv_map[cpv].metadata.update(values)
4497 class depgraph(object):
4499 pkg_tree_map = RootConfig.pkg_tree_map
4501 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4503 def __init__(self, settings, trees, myopts, myparams, spinner):
4504 self.settings = settings
4505 self.target_root = settings["ROOT"]
4506 self.myopts = myopts
4507 self.myparams = myparams
4509 if settings.get("PORTAGE_DEBUG", "") == "1":
4511 self.spinner = spinner
4512 self._running_root = trees["/"]["root_config"]
4513 self._opts_no_restart = Scheduler._opts_no_restart
4514 self.pkgsettings = {}
4515 # Maps slot atom to package for each Package added to the graph.
4516 self._slot_pkg_map = {}
4517 # Maps nodes to the reasons they were selected for reinstallation.
4518 self._reinstall_nodes = {}
4521 self._trees_orig = trees
4523 # Contains a filtered view of preferred packages that are selected
4524 # from available repositories.
4525 self._filtered_trees = {}
4526 # Contains installed packages and new packages that have been added
4528 self._graph_trees = {}
4529 # All Package instances
4530 self._pkg_cache = {}
4531 for myroot in trees:
4532 self.trees[myroot] = {}
4533 # Create a RootConfig instance that references
4534 # the FakeVartree instead of the real one.
4535 self.roots[myroot] = RootConfig(
4536 trees[myroot]["vartree"].settings,
4538 trees[myroot]["root_config"].setconfig)
4539 for tree in ("porttree", "bintree"):
4540 self.trees[myroot][tree] = trees[myroot][tree]
4541 self.trees[myroot]["vartree"] = \
4542 FakeVartree(trees[myroot]["root_config"],
4543 pkg_cache=self._pkg_cache)
4544 self.pkgsettings[myroot] = portage.config(
4545 clone=self.trees[myroot]["vartree"].settings)
4546 self._slot_pkg_map[myroot] = {}
4547 vardb = self.trees[myroot]["vartree"].dbapi
4548 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4549 "--buildpkgonly" not in self.myopts
4550 # This fakedbapi instance will model the state that the vdb will
4551 # have after new packages have been installed.
4552 fakedb = PackageVirtualDbapi(vardb.settings)
4553 if preload_installed_pkgs:
4555 self.spinner.update()
4556 # This triggers metadata updates via FakeVartree.
4557 vardb.aux_get(pkg.cpv, [])
4558 fakedb.cpv_inject(pkg)
4560 # Now that the vardb state is cached in our FakeVartree,
4561 # we won't be needing the real vartree cache for awhile.
4562 # To make some room on the heap, clear the vardbapi
4564 trees[myroot]["vartree"].dbapi._clear_cache()
4567 self.mydbapi[myroot] = fakedb
4570 graph_tree.dbapi = fakedb
4571 self._graph_trees[myroot] = {}
4572 self._filtered_trees[myroot] = {}
4573 # Substitute the graph tree for the vartree in dep_check() since we
4574 # want atom selections to be consistent with package selections
4575 # have already been made.
4576 self._graph_trees[myroot]["porttree"] = graph_tree
4577 self._graph_trees[myroot]["vartree"] = graph_tree
4578 def filtered_tree():
4580 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4581 self._filtered_trees[myroot]["porttree"] = filtered_tree
4583 # Passing in graph_tree as the vartree here could lead to better
4584 # atom selections in some cases by causing atoms for packages that
4585 # have been added to the graph to be preferred over other choices.
4586 # However, it can trigger atom selections that result in
4587 # unresolvable direct circular dependencies. For example, this
4588 # happens with gwydion-dylan which depends on either itself or
4589 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4590 # gwydion-dylan-bin needs to be selected in order to avoid a
4591 # an unresolvable direct circular dependency.
4593 # To solve the problem described above, pass in "graph_db" so that
4594 # packages that have been added to the graph are distinguishable
4595 # from other available packages and installed packages. Also, pass
4596 # the parent package into self._select_atoms() calls so that
4597 # unresolvable direct circular dependencies can be detected and
4598 # avoided when possible.
4599 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4600 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4603 portdb = self.trees[myroot]["porttree"].dbapi
4604 bindb = self.trees[myroot]["bintree"].dbapi
4605 vardb = self.trees[myroot]["vartree"].dbapi
4606 # (db, pkg_type, built, installed, db_keys)
4607 if "--usepkgonly" not in self.myopts:
4608 db_keys = list(portdb._aux_cache_keys)
4609 dbs.append((portdb, "ebuild", False, False, db_keys))
4610 if "--usepkg" in self.myopts:
4611 db_keys = list(bindb._aux_cache_keys)
4612 dbs.append((bindb, "binary", True, False, db_keys))
4613 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4614 dbs.append((vardb, "installed", True, True, db_keys))
4615 self._filtered_trees[myroot]["dbs"] = dbs
4616 if "--usepkg" in self.myopts:
4617 self.trees[myroot]["bintree"].populate(
4618 "--getbinpkg" in self.myopts,
4619 "--getbinpkgonly" in self.myopts)
4622 self.digraph=portage.digraph()
4623 # contains all sets added to the graph
4625 # contains atoms given as arguments
4626 self._sets["args"] = InternalPackageSet()
4627 # contains all atoms from all sets added to the graph, including
4628 # atoms given as arguments
4629 self._set_atoms = InternalPackageSet()
4630 self._atom_arg_map = {}
4631 # contains all nodes pulled in by self._set_atoms
4632 self._set_nodes = set()
4633 # Contains only Blocker -> Uninstall edges
4634 self._blocker_uninstalls = digraph()
4635 # Contains only Package -> Blocker edges
4636 self._blocker_parents = digraph()
4637 # Contains only irrelevant Package -> Blocker edges
4638 self._irrelevant_blockers = digraph()
4639 # Contains only unsolvable Package -> Blocker edges
4640 self._unsolvable_blockers = digraph()
4641 # Contains all Blocker -> Blocked Package edges
4642 self._blocked_pkgs = digraph()
4643 # Contains world packages that have been protected from
4644 # uninstallation but may not have been added to the graph
4645 # if the graph is not complete yet.
4646 self._blocked_world_pkgs = {}
4647 self._slot_collision_info = {}
4648 # Slot collision nodes are not allowed to block other packages since
4649 # blocker validation is only able to account for one package per slot.
4650 self._slot_collision_nodes = set()
4651 self._parent_atoms = {}
4652 self._slot_conflict_parent_atoms = set()
4653 self._serialized_tasks_cache = None
4654 self._scheduler_graph = None
4655 self._displayed_list = None
4656 self._pprovided_args = []
4657 self._missing_args = []
4658 self._masked_installed = set()
4659 self._unsatisfied_deps_for_display = []
4660 self._unsatisfied_blockers_for_display = None
4661 self._circular_deps_for_display = None
4662 self._dep_stack = []
4663 self._unsatisfied_deps = []
4664 self._initially_unsatisfied_deps = []
4665 self._ignored_deps = []
4666 self._required_set_names = set(["system", "world"])
4667 self._select_atoms = self._select_atoms_highest_available
4668 self._select_package = self._select_pkg_highest_available
4669 self._highest_pkg_cache = {}
4671 def _show_slot_collision_notice(self):
4672 """Show an informational message advising the user to mask one of the
4673 the packages. In some cases it may be possible to resolve this
4674 automatically, but support for backtracking (removal nodes that have
4675 already been selected) will be required in order to handle all possible
4679 if not self._slot_collision_info:
4682 self._show_merge_list()
4685 msg.append("\n!!! Multiple package instances within a single " + \
4686 "package slot have been pulled\n")
4687 msg.append("!!! into the dependency graph, resulting" + \
4688 " in a slot conflict:\n\n")
4690 # Max number of parents shown, to avoid flooding the display.
4692 explanation_columns = 70
4694 for (slot_atom, root), slot_nodes \
4695 in self._slot_collision_info.iteritems():
4696 msg.append(str(slot_atom))
4699 for node in slot_nodes:
4701 msg.append(str(node))
4702 parent_atoms = self._parent_atoms.get(node)
4705 # Prefer conflict atoms over others.
4706 for parent_atom in parent_atoms:
4707 if len(pruned_list) >= max_parents:
4709 if parent_atom in self._slot_conflict_parent_atoms:
4710 pruned_list.add(parent_atom)
4712 # If this package was pulled in by conflict atoms then
4713 # show those alone since those are the most interesting.
4715 # When generating the pruned list, prefer instances
4716 # of DependencyArg over instances of Package.
4717 for parent_atom in parent_atoms:
4718 if len(pruned_list) >= max_parents:
4720 parent, atom = parent_atom
4721 if isinstance(parent, DependencyArg):
4722 pruned_list.add(parent_atom)
4723 # Prefer Packages instances that themselves have been
4724 # pulled into collision slots.
4725 for parent_atom in parent_atoms:
4726 if len(pruned_list) >= max_parents:
4728 parent, atom = parent_atom
4729 if isinstance(parent, Package) and \
4730 (parent.slot_atom, parent.root) \
4731 in self._slot_collision_info:
4732 pruned_list.add(parent_atom)
4733 for parent_atom in parent_atoms:
4734 if len(pruned_list) >= max_parents:
4736 pruned_list.add(parent_atom)
4737 omitted_parents = len(parent_atoms) - len(pruned_list)
4738 parent_atoms = pruned_list
4739 msg.append(" pulled in by\n")
4740 for parent_atom in parent_atoms:
4741 parent, atom = parent_atom
4742 msg.append(2*indent)
4743 if isinstance(parent,
4744 (PackageArg, AtomArg)):
4745 # For PackageArg and AtomArg types, it's
4746 # redundant to display the atom attribute.
4747 msg.append(str(parent))
4749 # Display the specific atom from SetArg or
4751 msg.append("%s required by %s" % (atom, parent))
4754 msg.append(2*indent)
4755 msg.append("(and %d more)\n" % omitted_parents)
4757 msg.append(" (no parents)\n")
4759 explanation = self._slot_conflict_explanation(slot_nodes)
4762 msg.append(indent + "Explanation:\n\n")
4763 for line in textwrap.wrap(explanation, explanation_columns):
4764 msg.append(2*indent + line + "\n")
4767 sys.stderr.write("".join(msg))
4770 explanations_for_all = explanations == len(self._slot_collision_info)
4772 if explanations_for_all or "--quiet" in self.myopts:
4776 msg.append("It may be possible to solve this problem ")
4777 msg.append("by using package.mask to prevent one of ")
4778 msg.append("those packages from being selected. ")
4779 msg.append("However, it is also possible that conflicting ")
4780 msg.append("dependencies exist such that they are impossible to ")
4781 msg.append("satisfy simultaneously. If such a conflict exists in ")
4782 msg.append("the dependencies of two different packages, then those ")
4783 msg.append("packages can not be installed simultaneously.")
4785 from formatter import AbstractFormatter, DumbWriter
4786 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4788 f.add_flowing_data(x)
4792 msg.append("For more information, see MASKED PACKAGES ")
4793 msg.append("section in the emerge man page or refer ")
4794 msg.append("to the Gentoo Handbook.")
4796 f.add_flowing_data(x)
4800 def _slot_conflict_explanation(self, slot_nodes):
4802 When a slot conflict occurs due to USE deps, there are a few
4803 different cases to consider:
4805 1) New USE are correctly set but --newuse wasn't requested so an
4806 installed package with incorrect USE happened to get pulled
4807 into graph before the new one.
4809 2) New USE are incorrectly set but an installed package has correct
4810 USE so it got pulled into the graph, and a new instance also got
4811 pulled in due to --newuse or an upgrade.
4813 3) Multiple USE deps exist that can't be satisfied simultaneously,
4814 and multiple package instances got pulled into the same slot to
4815 satisfy the conflicting deps.
4817 Currently, explanations and suggested courses of action are generated
4818 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4821 if len(slot_nodes) != 2:
4822 # Suggestions are only implemented for
4823 # conflicts between two packages.
4826 all_conflict_atoms = self._slot_conflict_parent_atoms
4828 matched_atoms = None
4829 unmatched_node = None
4830 for node in slot_nodes:
4831 parent_atoms = self._parent_atoms.get(node)
4832 if not parent_atoms:
4833 # Normally, there are always parent atoms. If there are
4834 # none then something unexpected is happening and there's
4835 # currently no suggestion for this case.
4837 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4838 for parent_atom in conflict_atoms:
4839 parent, atom = parent_atom
4841 # Suggestions are currently only implemented for cases
4842 # in which all conflict atoms have USE deps.
4845 if matched_node is not None:
4846 # If conflict atoms match multiple nodes
4847 # then there's no suggestion.
4850 matched_atoms = conflict_atoms
4852 if unmatched_node is not None:
4853 # Neither node is matched by conflict atoms, and
4854 # there is no suggestion for this case.
4856 unmatched_node = node
4858 if matched_node is None or unmatched_node is None:
4859 # This shouldn't happen.
4862 if unmatched_node.installed and not matched_node.installed and \
4863 unmatched_node.cpv == matched_node.cpv:
4864 # If the conflicting packages are the same version then
4865 # --newuse should be all that's needed. If they are different
4866 # versions then there's some other problem.
4867 return "New USE are correctly set, but --newuse wasn't" + \
4868 " requested, so an installed package with incorrect USE " + \
4869 "happened to get pulled into the dependency graph. " + \
4870 "In order to solve " + \
4871 "this, either specify the --newuse option or explicitly " + \
4872 " reinstall '%s'." % matched_node.slot_atom
4874 if matched_node.installed and not unmatched_node.installed:
4875 atoms = sorted(set(atom for parent, atom in matched_atoms))
4876 explanation = ("New USE for '%s' are incorrectly set. " + \
4877 "In order to solve this, adjust USE to satisfy '%s'") % \
4878 (matched_node.slot_atom, atoms[0])
4880 for atom in atoms[1:-1]:
4881 explanation += ", '%s'" % (atom,)
4884 explanation += " and '%s'" % (atoms[-1],)
4890 def _process_slot_conflicts(self):
4892 Process slot conflict data to identify specific atoms which
4893 lead to conflict. These atoms only match a subset of the
4894 packages that have been pulled into a given slot.
4896 for (slot_atom, root), slot_nodes \
4897 in self._slot_collision_info.iteritems():
4899 all_parent_atoms = set()
4900 for pkg in slot_nodes:
4901 parent_atoms = self._parent_atoms.get(pkg)
4902 if not parent_atoms:
4904 all_parent_atoms.update(parent_atoms)
4906 for pkg in slot_nodes:
4907 parent_atoms = self._parent_atoms.get(pkg)
4908 if parent_atoms is None:
4909 parent_atoms = set()
4910 self._parent_atoms[pkg] = parent_atoms
4911 for parent_atom in all_parent_atoms:
4912 if parent_atom in parent_atoms:
4914 # Use package set for matching since it will match via
4915 # PROVIDE when necessary, while match_from_list does not.
4916 parent, atom = parent_atom
4917 atom_set = InternalPackageSet(
4918 initial_atoms=(atom,))
4919 if atom_set.findAtomForPackage(pkg):
4920 parent_atoms.add(parent_atom)
4922 self._slot_conflict_parent_atoms.add(parent_atom)
4924 def _reinstall_for_flags(self, forced_flags,
4925 orig_use, orig_iuse, cur_use, cur_iuse):
4926 """Return a set of flags that trigger reinstallation, or None if there
4927 are no such flags."""
4928 if "--newuse" in self.myopts:
4929 flags = set(orig_iuse.symmetric_difference(
4930 cur_iuse).difference(forced_flags))
4931 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4932 cur_iuse.intersection(cur_use)))
4935 elif "changed-use" == self.myopts.get("--reinstall"):
4936 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4937 cur_iuse.intersection(cur_use))
4942 def _create_graph(self, allow_unsatisfied=False):
4943 dep_stack = self._dep_stack
4945 self.spinner.update()
4946 dep = dep_stack.pop()
4947 if isinstance(dep, Package):
4948 if not self._add_pkg_deps(dep,
4949 allow_unsatisfied=allow_unsatisfied):
4952 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4956 def _add_dep(self, dep, allow_unsatisfied=False):
4957 debug = "--debug" in self.myopts
4958 buildpkgonly = "--buildpkgonly" in self.myopts
4959 nodeps = "--nodeps" in self.myopts
4960 empty = "empty" in self.myparams
4961 deep = "deep" in self.myparams
4962 update = "--update" in self.myopts and dep.depth <= 1
4964 if not buildpkgonly and \
4966 dep.parent not in self._slot_collision_nodes:
4967 if dep.parent.onlydeps:
4968 # It's safe to ignore blockers if the
4969 # parent is an --onlydeps node.
4971 # The blocker applies to the root where
4972 # the parent is or will be installed.
4973 blocker = Blocker(atom=dep.atom,
4974 eapi=dep.parent.metadata["EAPI"],
4975 root=dep.parent.root)
4976 self._blocker_parents.add(blocker, dep.parent)
4978 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4979 onlydeps=dep.onlydeps)
4981 if dep.priority.optional:
4982 # This could be an unecessary build-time dep
4983 # pulled in by --with-bdeps=y.
4985 if allow_unsatisfied:
4986 self._unsatisfied_deps.append(dep)
4988 self._unsatisfied_deps_for_display.append(
4989 ((dep.root, dep.atom), {"myparent":dep.parent}))
4991 # In some cases, dep_check will return deps that shouldn't
4992 # be proccessed any further, so they are identified and
4993 # discarded here. Try to discard as few as possible since
4994 # discarded dependencies reduce the amount of information
4995 # available for optimization of merge order.
4996 if dep.priority.satisfied and \
4997 not dep_pkg.installed and \
4998 not (existing_node or empty or deep or update):
5000 if dep.root == self.target_root:
5002 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5003 except StopIteration:
5005 except portage.exception.InvalidDependString:
5006 if not dep_pkg.installed:
5007 # This shouldn't happen since the package
5008 # should have been masked.
5011 self._ignored_deps.append(dep)
5014 if not self._add_pkg(dep_pkg, dep):
5018 def _add_pkg(self, pkg, dep):
5025 myparent = dep.parent
5026 priority = dep.priority
5028 if priority is None:
5029 priority = DepPriority()
5031 Fills the digraph with nodes comprised of packages to merge.
5032 mybigkey is the package spec of the package to merge.
5033 myparent is the package depending on mybigkey ( or None )
5034 addme = Should we add this package to the digraph or are we just looking at it's deps?
5035 Think --onlydeps, we need to ignore packages in that case.
5038 #IUSE-aware emerge -> USE DEP aware depgraph
5039 #"no downgrade" emerge
5041 # Ensure that the dependencies of the same package
5042 # are never processed more than once.
5043 previously_added = pkg in self.digraph
5045 # select the correct /var database that we'll be checking against
5046 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5047 pkgsettings = self.pkgsettings[pkg.root]
5052 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5053 except portage.exception.InvalidDependString, e:
5054 if not pkg.installed:
5055 show_invalid_depstring_notice(
5056 pkg, pkg.metadata["PROVIDE"], str(e))
5060 if not pkg.onlydeps:
5061 if not pkg.installed and \
5062 "empty" not in self.myparams and \
5063 vardbapi.match(pkg.slot_atom):
5064 # Increase the priority of dependencies on packages that
5065 # are being rebuilt. This optimizes merge order so that
5066 # dependencies are rebuilt/updated as soon as possible,
5067 # which is needed especially when emerge is called by
5068 # revdep-rebuild since dependencies may be affected by ABI
5069 # breakage that has rendered them useless. Don't adjust
5070 # priority here when in "empty" mode since all packages
5071 # are being merged in that case.
5072 priority.rebuild = True
5074 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5075 slot_collision = False
5077 existing_node_matches = pkg.cpv == existing_node.cpv
5078 if existing_node_matches and \
5079 pkg != existing_node and \
5080 dep.atom is not None:
5081 # Use package set for matching since it will match via
5082 # PROVIDE when necessary, while match_from_list does not.
5083 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5084 if not atom_set.findAtomForPackage(existing_node):
5085 existing_node_matches = False
5086 if existing_node_matches:
5087 # The existing node can be reused.
5089 for parent_atom in arg_atoms:
5090 parent, atom = parent_atom
5091 self.digraph.add(existing_node, parent,
5093 self._add_parent_atom(existing_node, parent_atom)
5094 # If a direct circular dependency is not an unsatisfied
5095 # buildtime dependency then drop it here since otherwise
5096 # it can skew the merge order calculation in an unwanted
5098 if existing_node != myparent or \
5099 (priority.buildtime and not priority.satisfied):
5100 self.digraph.addnode(existing_node, myparent,
5102 if dep.atom is not None and dep.parent is not None:
5103 self._add_parent_atom(existing_node,
5104 (dep.parent, dep.atom))
5108 # A slot collision has occurred. Sometimes this coincides
5109 # with unresolvable blockers, so the slot collision will be
5110 # shown later if there are no unresolvable blockers.
5111 self._add_slot_conflict(pkg)
5112 slot_collision = True
5115 # Now add this node to the graph so that self.display()
5116 # can show use flags and --tree portage.output. This node is
5117 # only being partially added to the graph. It must not be
5118 # allowed to interfere with the other nodes that have been
5119 # added. Do not overwrite data for existing nodes in
5120 # self.mydbapi since that data will be used for blocker
5122 # Even though the graph is now invalid, continue to process
5123 # dependencies so that things like --fetchonly can still
5124 # function despite collisions.
5126 elif not previously_added:
5127 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5128 self.mydbapi[pkg.root].cpv_inject(pkg)
5129 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5131 if not pkg.installed:
5132 # Allow this package to satisfy old-style virtuals in case it
5133 # doesn't already. Any pre-existing providers will be preferred
5136 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5137 # For consistency, also update the global virtuals.
5138 settings = self.roots[pkg.root].settings
5140 settings.setinst(pkg.cpv, pkg.metadata)
5142 except portage.exception.InvalidDependString, e:
5143 show_invalid_depstring_notice(
5144 pkg, pkg.metadata["PROVIDE"], str(e))
5149 self._set_nodes.add(pkg)
5151 # Do this even when addme is False (--onlydeps) so that the
5152 # parent/child relationship is always known in case
5153 # self._show_slot_collision_notice() needs to be called later.
5154 self.digraph.add(pkg, myparent, priority=priority)
5155 if dep.atom is not None and dep.parent is not None:
5156 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5159 for parent_atom in arg_atoms:
5160 parent, atom = parent_atom
5161 self.digraph.add(pkg, parent, priority=priority)
5162 self._add_parent_atom(pkg, parent_atom)
5164 """ This section determines whether we go deeper into dependencies or not.
5165 We want to go deeper on a few occasions:
5166 Installing package A, we need to make sure package A's deps are met.
5167 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5168 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5170 dep_stack = self._dep_stack
5171 if "recurse" not in self.myparams:
5173 elif pkg.installed and \
5174 "deep" not in self.myparams:
5175 dep_stack = self._ignored_deps
5177 self.spinner.update()
5182 if not previously_added:
5183 dep_stack.append(pkg)
5186 def _add_parent_atom(self, pkg, parent_atom):
5187 parent_atoms = self._parent_atoms.get(pkg)
5188 if parent_atoms is None:
5189 parent_atoms = set()
5190 self._parent_atoms[pkg] = parent_atoms
5191 parent_atoms.add(parent_atom)
5193 def _add_slot_conflict(self, pkg):
5194 self._slot_collision_nodes.add(pkg)
5195 slot_key = (pkg.slot_atom, pkg.root)
5196 slot_nodes = self._slot_collision_info.get(slot_key)
5197 if slot_nodes is None:
5199 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5200 self._slot_collision_info[slot_key] = slot_nodes
5203 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5205 mytype = pkg.type_name
5208 metadata = pkg.metadata
5209 myuse = pkg.use.enabled
5211 depth = pkg.depth + 1
5212 removal_action = "remove" in self.myparams
5215 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5217 edepend[k] = metadata[k]
5219 if not pkg.built and \
5220 "--buildpkgonly" in self.myopts and \
5221 "deep" not in self.myparams and \
5222 "empty" not in self.myparams:
5223 edepend["RDEPEND"] = ""
5224 edepend["PDEPEND"] = ""
5225 bdeps_optional = False
5227 if pkg.built and not removal_action:
5228 if self.myopts.get("--with-bdeps", "n") == "y":
5229 # Pull in build time deps as requested, but marked them as
5230 # "optional" since they are not strictly required. This allows
5231 # more freedom in the merge order calculation for solving
5232 # circular dependencies. Don't convert to PDEPEND since that
5233 # could make --with-bdeps=y less effective if it is used to
5234 # adjust merge order to prevent built_with_use() calls from
5236 bdeps_optional = True
5238 # built packages do not have build time dependencies.
5239 edepend["DEPEND"] = ""
5241 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5242 edepend["DEPEND"] = ""
5245 ("/", edepend["DEPEND"],
5246 self._priority(buildtime=(not bdeps_optional),
5247 optional=bdeps_optional)),
5248 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5249 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5252 debug = "--debug" in self.myopts
5253 strict = mytype != "installed"
5255 for dep_root, dep_string, dep_priority in deps:
5260 print "Parent: ", jbigkey
5261 print "Depstring:", dep_string
5262 print "Priority:", dep_priority
5263 vardb = self.roots[dep_root].trees["vartree"].dbapi
5265 selected_atoms = self._select_atoms(dep_root,
5266 dep_string, myuse=myuse, parent=pkg, strict=strict,
5267 priority=dep_priority)
5268 except portage.exception.InvalidDependString, e:
5269 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5272 print "Candidates:", selected_atoms
5274 for atom in selected_atoms:
5277 atom = portage.dep.Atom(atom)
5279 mypriority = dep_priority.copy()
5280 if not atom.blocker and vardb.match(atom):
5281 mypriority.satisfied = True
5283 if not self._add_dep(Dependency(atom=atom,
5284 blocker=atom.blocker, depth=depth, parent=pkg,
5285 priority=mypriority, root=dep_root),
5286 allow_unsatisfied=allow_unsatisfied):
5289 except portage.exception.InvalidAtom, e:
5290 show_invalid_depstring_notice(
5291 pkg, dep_string, str(e))
5293 if not pkg.installed:
5297 print "Exiting...", jbigkey
5298 except portage.exception.AmbiguousPackageName, e:
5300 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5301 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5303 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5304 portage.writemsg("\n", noiselevel=-1)
5305 if mytype == "binary":
5307 "!!! This binary package cannot be installed: '%s'\n" % \
5308 mykey, noiselevel=-1)
5309 elif mytype == "ebuild":
5310 portdb = self.roots[myroot].trees["porttree"].dbapi
5311 myebuild, mylocation = portdb.findname2(mykey)
5312 portage.writemsg("!!! This ebuild cannot be installed: " + \
5313 "'%s'\n" % myebuild, noiselevel=-1)
5314 portage.writemsg("!!! Please notify the package maintainer " + \
5315 "that atoms must be fully-qualified.\n", noiselevel=-1)
5319 def _priority(self, **kwargs):
5320 if "remove" in self.myparams:
5321 priority_constructor = UnmergeDepPriority
5323 priority_constructor = DepPriority
5324 return priority_constructor(**kwargs)
5326 def _dep_expand(self, root_config, atom_without_category):
5328 @param root_config: a root config instance
5329 @type root_config: RootConfig
5330 @param atom_without_category: an atom without a category component
5331 @type atom_without_category: String
5333 @returns: a list of atoms containing categories (possibly empty)
5335 null_cp = portage.dep_getkey(insert_category_into_atom(
5336 atom_without_category, "null"))
5337 cat, atom_pn = portage.catsplit(null_cp)
5339 dbs = self._filtered_trees[root_config.root]["dbs"]
5341 for db, pkg_type, built, installed, db_keys in dbs:
5342 for cat in db.categories:
5343 if db.cp_list("%s/%s" % (cat, atom_pn)):
5347 for cat in categories:
5348 deps.append(insert_category_into_atom(
5349 atom_without_category, cat))
5352 def _have_new_virt(self, root, atom_cp):
5354 for db, pkg_type, built, installed, db_keys in \
5355 self._filtered_trees[root]["dbs"]:
5356 if db.cp_list(atom_cp):
5361 def _iter_atoms_for_pkg(self, pkg):
5362 # TODO: add multiple $ROOT support
5363 if pkg.root != self.target_root:
5365 atom_arg_map = self._atom_arg_map
5366 root_config = self.roots[pkg.root]
5367 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5368 atom_cp = portage.dep_getkey(atom)
5369 if atom_cp != pkg.cp and \
5370 self._have_new_virt(pkg.root, atom_cp):
5372 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5373 visible_pkgs.reverse() # descending order
5375 for visible_pkg in visible_pkgs:
5376 if visible_pkg.cp != atom_cp:
5378 if pkg >= visible_pkg:
5379 # This is descending order, and we're not
5380 # interested in any versions <= pkg given.
5382 if pkg.slot_atom != visible_pkg.slot_atom:
5383 higher_slot = visible_pkg
5385 if higher_slot is not None:
5387 for arg in atom_arg_map[(atom, pkg.root)]:
5388 if isinstance(arg, PackageArg) and \
5393 def select_files(self, myfiles):
5394 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5395 appropriate depgraph and return a favorite list."""
5396 debug = "--debug" in self.myopts
5397 root_config = self.roots[self.target_root]
5398 sets = root_config.sets
5399 getSetAtoms = root_config.setconfig.getSetAtoms
5401 myroot = self.target_root
5402 dbs = self._filtered_trees[myroot]["dbs"]
5403 vardb = self.trees[myroot]["vartree"].dbapi
5404 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5405 portdb = self.trees[myroot]["porttree"].dbapi
5406 bindb = self.trees[myroot]["bintree"].dbapi
5407 pkgsettings = self.pkgsettings[myroot]
5409 onlydeps = "--onlydeps" in self.myopts
5412 ext = os.path.splitext(x)[1]
5414 if not os.path.exists(x):
5416 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5417 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5418 elif os.path.exists(
5419 os.path.join(pkgsettings["PKGDIR"], x)):
5420 x = os.path.join(pkgsettings["PKGDIR"], x)
5422 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5423 print "!!! Please ensure the tbz2 exists as specified.\n"
5424 return 0, myfavorites
5425 mytbz2=portage.xpak.tbz2(x)
5426 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5427 if os.path.realpath(x) != \
5428 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5429 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5430 return 0, myfavorites
5431 db_keys = list(bindb._aux_cache_keys)
5432 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5433 pkg = Package(type_name="binary", root_config=root_config,
5434 cpv=mykey, built=True, metadata=metadata,
5436 self._pkg_cache[pkg] = pkg
5437 args.append(PackageArg(arg=x, package=pkg,
5438 root_config=root_config))
5439 elif ext==".ebuild":
5440 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5441 pkgdir = os.path.dirname(ebuild_path)
5442 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5443 cp = pkgdir[len(tree_root)+1:]
5444 e = portage.exception.PackageNotFound(
5445 ("%s is not in a valid portage tree " + \
5446 "hierarchy or does not exist") % x)
5447 if not portage.isvalidatom(cp):
5449 cat = portage.catsplit(cp)[0]
5450 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5451 if not portage.isvalidatom("="+mykey):
5453 ebuild_path = portdb.findname(mykey)
5455 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5456 cp, os.path.basename(ebuild_path)):
5457 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5458 return 0, myfavorites
5459 if mykey not in portdb.xmatch(
5460 "match-visible", portage.dep_getkey(mykey)):
5461 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5462 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5463 print colorize("BAD", "*** page for details.")
5464 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5467 raise portage.exception.PackageNotFound(
5468 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5469 db_keys = list(portdb._aux_cache_keys)
5470 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5471 pkg = Package(type_name="ebuild", root_config=root_config,
5472 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5473 pkgsettings.setcpv(pkg)
5474 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5475 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5476 self._pkg_cache[pkg] = pkg
5477 args.append(PackageArg(arg=x, package=pkg,
5478 root_config=root_config))
5479 elif x.startswith(os.path.sep):
5480 if not x.startswith(myroot):
5481 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5482 " $ROOT.\n") % x, noiselevel=-1)
5484 # Queue these up since it's most efficient to handle
5485 # multiple files in a single iter_owners() call.
5486 lookup_owners.append(x)
5488 if x in ("system", "world"):
5490 if x.startswith(SETPREFIX):
5491 s = x[len(SETPREFIX):]
5493 raise portage.exception.PackageSetNotFound(s)
5496 # Recursively expand sets so that containment tests in
5497 # self._get_parent_sets() properly match atoms in nested
5498 # sets (like if world contains system).
5499 expanded_set = InternalPackageSet(
5500 initial_atoms=getSetAtoms(s))
5501 self._sets[s] = expanded_set
5502 args.append(SetArg(arg=x, set=expanded_set,
5503 root_config=root_config))
5505 if not is_valid_package_atom(x):
5506 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5508 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5509 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5511 # Don't expand categories or old-style virtuals here unless
5512 # necessary. Expansion of old-style virtuals here causes at
5513 # least the following problems:
5514 # 1) It's more difficult to determine which set(s) an atom
5515 # came from, if any.
5516 # 2) It takes away freedom from the resolver to choose other
5517 # possible expansions when necessary.
5519 args.append(AtomArg(arg=x, atom=x,
5520 root_config=root_config))
5522 expanded_atoms = self._dep_expand(root_config, x)
5523 installed_cp_set = set()
5524 for atom in expanded_atoms:
5525 atom_cp = portage.dep_getkey(atom)
5526 if vardb.cp_list(atom_cp):
5527 installed_cp_set.add(atom_cp)
5528 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5529 installed_cp = iter(installed_cp_set).next()
5530 expanded_atoms = [atom for atom in expanded_atoms \
5531 if portage.dep_getkey(atom) == installed_cp]
5533 if len(expanded_atoms) > 1:
5536 ambiguous_package_name(x, expanded_atoms, root_config,
5537 self.spinner, self.myopts)
5538 return False, myfavorites
5540 atom = expanded_atoms[0]
5542 null_atom = insert_category_into_atom(x, "null")
5543 null_cp = portage.dep_getkey(null_atom)
5544 cat, atom_pn = portage.catsplit(null_cp)
5545 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5547 # Allow the depgraph to choose which virtual.
5548 atom = insert_category_into_atom(x, "virtual")
5550 atom = insert_category_into_atom(x, "null")
5552 args.append(AtomArg(arg=x, atom=atom,
5553 root_config=root_config))
5557 search_for_multiple = False
5558 if len(lookup_owners) > 1:
5559 search_for_multiple = True
5561 for x in lookup_owners:
5562 if not search_for_multiple and os.path.isdir(x):
5563 search_for_multiple = True
5564 relative_paths.append(x[len(myroot):])
5567 for pkg, relative_path in \
5568 real_vardb._owners.iter_owners(relative_paths):
5569 owners.add(pkg.mycpv)
5570 if not search_for_multiple:
5574 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5575 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5579 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5581 # portage now masks packages with missing slot, but it's
5582 # possible that one was installed by an older version
5583 atom = portage.cpv_getkey(cpv)
5585 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5586 args.append(AtomArg(arg=atom, atom=atom,
5587 root_config=root_config))
5589 if "--update" in self.myopts:
5590 # In some cases, the greedy slots behavior can pull in a slot that
5591 # the user would want to uninstall due to it being blocked by a
5592 # newer version in a different slot. Therefore, it's necessary to
5593 # detect and discard any that should be uninstalled. Each time
5594 # that arguments are updated, package selections are repeated in
5595 # order to ensure consistency with the current arguments:
5597 # 1) Initialize args
5598 # 2) Select packages and generate initial greedy atoms
5599 # 3) Update args with greedy atoms
5600 # 4) Select packages and generate greedy atoms again, while
5601 # accounting for any blockers between selected packages
5602 # 5) Update args with revised greedy atoms
5604 self._set_args(args)
5607 greedy_args.append(arg)
5608 if not isinstance(arg, AtomArg):
5610 for atom in self._greedy_slots(arg.root_config, arg.atom):
5612 AtomArg(arg=arg.arg, atom=atom,
5613 root_config=arg.root_config))
5615 self._set_args(greedy_args)
5618 # Revise greedy atoms, accounting for any blockers
5619 # between selected packages.
5620 revised_greedy_args = []
5622 revised_greedy_args.append(arg)
5623 if not isinstance(arg, AtomArg):
5625 for atom in self._greedy_slots(arg.root_config, arg.atom,
5626 blocker_lookahead=True):
5627 revised_greedy_args.append(
5628 AtomArg(arg=arg.arg, atom=atom,
5629 root_config=arg.root_config))
5630 args = revised_greedy_args
5631 del revised_greedy_args
5633 self._set_args(args)
5635 myfavorites = set(myfavorites)
5637 if isinstance(arg, (AtomArg, PackageArg)):
5638 myfavorites.add(arg.atom)
5639 elif isinstance(arg, SetArg):
5640 myfavorites.add(arg.arg)
5641 myfavorites = list(myfavorites)
5643 pprovideddict = pkgsettings.pprovideddict
5645 portage.writemsg("\n", noiselevel=-1)
5646 # Order needs to be preserved since a feature of --nodeps
5647 # is to allow the user to force a specific merge order.
5651 for atom in arg.set:
5652 self.spinner.update()
5653 dep = Dependency(atom=atom, onlydeps=onlydeps,
5654 root=myroot, parent=arg)
5655 atom_cp = portage.dep_getkey(atom)
5657 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5658 if pprovided and portage.match_from_list(atom, pprovided):
5659 # A provided package has been specified on the command line.
5660 self._pprovided_args.append((arg, atom))
5662 if isinstance(arg, PackageArg):
5663 if not self._add_pkg(arg.package, dep) or \
5664 not self._create_graph():
5665 sys.stderr.write(("\n\n!!! Problem resolving " + \
5666 "dependencies for %s\n") % arg.arg)
5667 return 0, myfavorites
5670 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5671 (arg, atom), noiselevel=-1)
5672 pkg, existing_node = self._select_package(
5673 myroot, atom, onlydeps=onlydeps)
5675 if not (isinstance(arg, SetArg) and \
5676 arg.name in ("system", "world")):
5677 self._unsatisfied_deps_for_display.append(
5678 ((myroot, atom), {}))
5679 return 0, myfavorites
5680 self._missing_args.append((arg, atom))
5682 if atom_cp != pkg.cp:
5683 # For old-style virtuals, we need to repeat the
5684 # package.provided check against the selected package.
5685 expanded_atom = atom.replace(atom_cp, pkg.cp)
5686 pprovided = pprovideddict.get(pkg.cp)
5688 portage.match_from_list(expanded_atom, pprovided):
5689 # A provided package has been
5690 # specified on the command line.
5691 self._pprovided_args.append((arg, atom))
5693 if pkg.installed and "selective" not in self.myparams:
5694 self._unsatisfied_deps_for_display.append(
5695 ((myroot, atom), {}))
5696 # Previous behavior was to bail out in this case, but
5697 # since the dep is satisfied by the installed package,
5698 # it's more friendly to continue building the graph
5699 # and just show a warning message. Therefore, only bail
5700 # out here if the atom is not from either the system or
5702 if not (isinstance(arg, SetArg) and \
5703 arg.name in ("system", "world")):
5704 return 0, myfavorites
5706 # Add the selected package to the graph as soon as possible
5707 # so that later dep_check() calls can use it as feedback
5708 # for making more consistent atom selections.
5709 if not self._add_pkg(pkg, dep):
5710 if isinstance(arg, SetArg):
5711 sys.stderr.write(("\n\n!!! Problem resolving " + \
5712 "dependencies for %s from %s\n") % \
5715 sys.stderr.write(("\n\n!!! Problem resolving " + \
5716 "dependencies for %s\n") % atom)
5717 return 0, myfavorites
5719 except portage.exception.MissingSignature, e:
5720 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5721 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5722 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5723 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5724 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5725 return 0, myfavorites
5726 except portage.exception.InvalidSignature, e:
5727 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5728 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5729 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5730 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5731 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5732 return 0, myfavorites
5733 except SystemExit, e:
5734 raise # Needed else can't exit
5735 except Exception, e:
5736 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5737 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5740 # Now that the root packages have been added to the graph,
5741 # process the dependencies.
5742 if not self._create_graph():
5743 return 0, myfavorites
5746 if "--usepkgonly" in self.myopts:
5747 for xs in self.digraph.all_nodes():
5748 if not isinstance(xs, Package):
5750 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5754 print "Missing binary for:",xs[2]
5758 except self._unknown_internal_error:
5759 return False, myfavorites
5761 # We're true here unless we are missing binaries.
5762 return (not missing,myfavorites)
5764 def _set_args(self, args):
5766 Create the "args" package set from atoms and packages given as
5767 arguments. This method can be called multiple times if necessary.
5768 The package selection cache is automatically invalidated, since
5769 arguments influence package selections.
5771 args_set = self._sets["args"]
5774 if not isinstance(arg, (AtomArg, PackageArg)):
5777 if atom in args_set:
5781 self._set_atoms.clear()
5782 self._set_atoms.update(chain(*self._sets.itervalues()))
5783 atom_arg_map = self._atom_arg_map
5784 atom_arg_map.clear()
5786 for atom in arg.set:
5787 atom_key = (atom, arg.root_config.root)
5788 refs = atom_arg_map.get(atom_key)
5791 atom_arg_map[atom_key] = refs
5795 # Invalidate the package selection cache, since
5796 # arguments influence package selections.
5797 self._highest_pkg_cache.clear()
5798 for trees in self._filtered_trees.itervalues():
5799 trees["porttree"].dbapi._clear_cache()
5801 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5803 Return a list of slot atoms corresponding to installed slots that
5804 differ from the slot of the highest visible match. When
5805 blocker_lookahead is True, slot atoms that would trigger a blocker
5806 conflict are automatically discarded, potentially allowing automatic
5807 uninstallation of older slots when appropriate.
5809 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5810 if highest_pkg is None:
5812 vardb = root_config.trees["vartree"].dbapi
5814 for cpv in vardb.match(atom):
5815 # don't mix new virtuals with old virtuals
5816 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5817 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5819 slots.add(highest_pkg.metadata["SLOT"])
5823 slots.remove(highest_pkg.metadata["SLOT"])
5826 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5827 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5828 if pkg is not None and \
5829 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5830 greedy_pkgs.append(pkg)
5833 if not blocker_lookahead:
5834 return [pkg.slot_atom for pkg in greedy_pkgs]
5837 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5838 for pkg in greedy_pkgs + [highest_pkg]:
5839 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5841 atoms = self._select_atoms(
5842 pkg.root, dep_str, pkg.use.enabled,
5843 parent=pkg, strict=True)
5844 except portage.exception.InvalidDependString:
5846 blocker_atoms = (x for x in atoms if x.blocker)
5847 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5849 if highest_pkg not in blockers:
5852 # filter packages with invalid deps
5853 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5855 # filter packages that conflict with highest_pkg
5856 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5857 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5858 blockers[pkg].findAtomForPackage(highest_pkg))]
5863 # If two packages conflict, discard the lower version.
5864 discard_pkgs = set()
5865 greedy_pkgs.sort(reverse=True)
5866 for i in xrange(len(greedy_pkgs) - 1):
5867 pkg1 = greedy_pkgs[i]
5868 if pkg1 in discard_pkgs:
5870 for j in xrange(i + 1, len(greedy_pkgs)):
5871 pkg2 = greedy_pkgs[j]
5872 if pkg2 in discard_pkgs:
5874 if blockers[pkg1].findAtomForPackage(pkg2) or \
5875 blockers[pkg2].findAtomForPackage(pkg1):
5877 discard_pkgs.add(pkg2)
5879 return [pkg.slot_atom for pkg in greedy_pkgs \
5880 if pkg not in discard_pkgs]
5882 def _select_atoms_from_graph(self, *pargs, **kwargs):
5884 Prefer atoms matching packages that have already been
5885 added to the graph or those that are installed and have
5886 not been scheduled for replacement.
5888 kwargs["trees"] = self._graph_trees
5889 return self._select_atoms_highest_available(*pargs, **kwargs)
5891 def _select_atoms_highest_available(self, root, depstring,
5892 myuse=None, parent=None, strict=True, trees=None, priority=None):
5893 """This will raise InvalidDependString if necessary. If trees is
5894 None then self._filtered_trees is used."""
5895 pkgsettings = self.pkgsettings[root]
5897 trees = self._filtered_trees
5898 if not getattr(priority, "buildtime", False):
5899 # The parent should only be passed to dep_check() for buildtime
5900 # dependencies since that's the only case when it's appropriate
5901 # to trigger the circular dependency avoidance code which uses it.
5902 # It's important not to trigger the same circular dependency
5903 # avoidance code for runtime dependencies since it's not needed
5904 # and it can promote an incorrect package choice.
5908 if parent is not None:
5909 trees[root]["parent"] = parent
5911 portage.dep._dep_check_strict = False
5912 mycheck = portage.dep_check(depstring, None,
5913 pkgsettings, myuse=myuse,
5914 myroot=root, trees=trees)
5916 if parent is not None:
5917 trees[root].pop("parent")
5918 portage.dep._dep_check_strict = True
5920 raise portage.exception.InvalidDependString(mycheck[1])
5921 selected_atoms = mycheck[1]
5922 return selected_atoms
5924 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5925 atom = portage.dep.Atom(atom)
5926 atom_set = InternalPackageSet(initial_atoms=(atom,))
5927 atom_without_use = atom
5929 atom_without_use = portage.dep.remove_slot(atom)
5931 atom_without_use += ":" + atom.slot
5932 atom_without_use = portage.dep.Atom(atom_without_use)
5933 xinfo = '"%s"' % atom
5936 # Discard null/ from failed cpv_expand category expansion.
5937 xinfo = xinfo.replace("null/", "")
5938 masked_packages = []
5940 masked_pkg_instances = set()
5941 missing_licenses = []
5942 have_eapi_mask = False
5943 pkgsettings = self.pkgsettings[root]
5944 implicit_iuse = pkgsettings._get_implicit_iuse()
5945 root_config = self.roots[root]
5946 portdb = self.roots[root].trees["porttree"].dbapi
5947 dbs = self._filtered_trees[root]["dbs"]
5948 for db, pkg_type, built, installed, db_keys in dbs:
5952 if hasattr(db, "xmatch"):
5953 cpv_list = db.xmatch("match-all", atom_without_use)
5955 cpv_list = db.match(atom_without_use)
5958 for cpv in cpv_list:
5959 metadata, mreasons = get_mask_info(root_config, cpv,
5960 pkgsettings, db, pkg_type, built, installed, db_keys)
5961 if metadata is not None:
5962 pkg = Package(built=built, cpv=cpv,
5963 installed=installed, metadata=metadata,
5964 root_config=root_config)
5965 if pkg.cp != atom.cp:
5966 # A cpv can be returned from dbapi.match() as an
5967 # old-style virtual match even in cases when the
5968 # package does not actually PROVIDE the virtual.
5969 # Filter out any such false matches here.
5970 if not atom_set.findAtomForPackage(pkg):
5973 masked_pkg_instances.add(pkg)
5975 missing_use.append(pkg)
5978 masked_packages.append(
5979 (root_config, pkgsettings, cpv, metadata, mreasons))
5981 missing_use_reasons = []
5982 missing_iuse_reasons = []
5983 for pkg in missing_use:
5984 use = pkg.use.enabled
5985 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5986 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5988 for x in atom.use.required:
5989 if iuse_re.match(x) is None:
5990 missing_iuse.append(x)
5993 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5994 missing_iuse_reasons.append((pkg, mreasons))
5996 need_enable = sorted(atom.use.enabled.difference(use))
5997 need_disable = sorted(atom.use.disabled.intersection(use))
5998 if need_enable or need_disable:
6000 changes.extend(colorize("red", "+" + x) \
6001 for x in need_enable)
6002 changes.extend(colorize("blue", "-" + x) \
6003 for x in need_disable)
6004 mreasons.append("Change USE: %s" % " ".join(changes))
6005 missing_use_reasons.append((pkg, mreasons))
6007 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6008 in missing_use_reasons if pkg not in masked_pkg_instances]
6010 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6011 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6013 show_missing_use = False
6014 if unmasked_use_reasons:
6015 # Only show the latest version.
6016 show_missing_use = unmasked_use_reasons[:1]
6017 elif unmasked_iuse_reasons:
6018 if missing_use_reasons:
6019 # All packages with required IUSE are masked,
6020 # so display a normal masking message.
6023 show_missing_use = unmasked_iuse_reasons
6025 if show_missing_use:
6026 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6027 print "!!! One of the following packages is required to complete your request:"
6028 for pkg, mreasons in show_missing_use:
6029 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6031 elif masked_packages:
6033 colorize("BAD", "All ebuilds that could satisfy ") + \
6034 colorize("INFORM", xinfo) + \
6035 colorize("BAD", " have been masked.")
6036 print "!!! One of the following masked packages is required to complete your request:"
6037 have_eapi_mask = show_masked_packages(masked_packages)
6040 msg = ("The current version of portage supports " + \
6041 "EAPI '%s'. You must upgrade to a newer version" + \
6042 " of portage before EAPI masked packages can" + \
6043 " be installed.") % portage.const.EAPI
6044 from textwrap import wrap
6045 for line in wrap(msg, 75):
6050 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6052 # Show parent nodes and the argument that pulled them in.
6053 traversed_nodes = set()
6056 while node is not None:
6057 traversed_nodes.add(node)
6058 msg.append('(dependency required by "%s" [%s])' % \
6059 (colorize('INFORM', str(node.cpv)), node.type_name))
6060 # When traversing to parents, prefer arguments over packages
6061 # since arguments are root nodes. Never traverse the same
6062 # package twice, in order to prevent an infinite loop.
6063 selected_parent = None
6064 for parent in self.digraph.parent_nodes(node):
6065 if isinstance(parent, DependencyArg):
6066 msg.append('(dependency required by "%s" [argument])' % \
6067 (colorize('INFORM', str(parent))))
6068 selected_parent = None
6070 if parent not in traversed_nodes:
6071 selected_parent = parent
6072 node = selected_parent
6078 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6079 cache_key = (root, atom, onlydeps)
6080 ret = self._highest_pkg_cache.get(cache_key)
6083 if pkg and not existing:
6084 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6085 if existing and existing == pkg:
6086 # Update the cache to reflect that the
6087 # package has been added to the graph.
6089 self._highest_pkg_cache[cache_key] = ret
6091 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6092 self._highest_pkg_cache[cache_key] = ret
6095 settings = pkg.root_config.settings
6096 if visible(settings, pkg) and not (pkg.installed and \
6097 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6098 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6101 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6102 root_config = self.roots[root]
6103 pkgsettings = self.pkgsettings[root]
6104 dbs = self._filtered_trees[root]["dbs"]
6105 vardb = self.roots[root].trees["vartree"].dbapi
6106 portdb = self.roots[root].trees["porttree"].dbapi
6107 # List of acceptable packages, ordered by type preference.
6108 matched_packages = []
6109 highest_version = None
6110 if not isinstance(atom, portage.dep.Atom):
6111 atom = portage.dep.Atom(atom)
6113 atom_set = InternalPackageSet(initial_atoms=(atom,))
6114 existing_node = None
6116 usepkgonly = "--usepkgonly" in self.myopts
6117 empty = "empty" in self.myparams
6118 selective = "selective" in self.myparams
6120 noreplace = "--noreplace" in self.myopts
6121 # Behavior of the "selective" parameter depends on
6122 # whether or not a package matches an argument atom.
6123 # If an installed package provides an old-style
6124 # virtual that is no longer provided by an available
6125 # package, the installed package may match an argument
6126 # atom even though none of the available packages do.
6127 # Therefore, "selective" logic does not consider
6128 # whether or not an installed package matches an
6129 # argument atom. It only considers whether or not
6130 # available packages match argument atoms, which is
6131 # represented by the found_available_arg flag.
6132 found_available_arg = False
6133 for find_existing_node in True, False:
6136 for db, pkg_type, built, installed, db_keys in dbs:
6139 if installed and not find_existing_node:
6140 want_reinstall = reinstall or empty or \
6141 (found_available_arg and not selective)
6142 if want_reinstall and matched_packages:
6144 if hasattr(db, "xmatch"):
6145 cpv_list = db.xmatch("match-all", atom)
6147 cpv_list = db.match(atom)
6149 # USE=multislot can make an installed package appear as if
6150 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6151 # won't do any good as long as USE=multislot is enabled since
6152 # the newly built package still won't have the expected slot.
6153 # Therefore, assume that such SLOT dependencies are already
6154 # satisfied rather than forcing a rebuild.
6155 if installed and not cpv_list and atom.slot:
6156 for cpv in db.match(atom.cp):
6157 slot_available = False
6158 for other_db, other_type, other_built, \
6159 other_installed, other_keys in dbs:
6162 other_db.aux_get(cpv, ["SLOT"])[0]:
6163 slot_available = True
6167 if not slot_available:
6169 inst_pkg = self._pkg(cpv, "installed",
6170 root_config, installed=installed)
6171 # Remove the slot from the atom and verify that
6172 # the package matches the resulting atom.
6173 atom_without_slot = portage.dep.remove_slot(atom)
6175 atom_without_slot += str(atom.use)
6176 atom_without_slot = portage.dep.Atom(atom_without_slot)
6177 if portage.match_from_list(
6178 atom_without_slot, [inst_pkg]):
6179 cpv_list = [inst_pkg.cpv]
6184 pkg_status = "merge"
6185 if installed or onlydeps:
6186 pkg_status = "nomerge"
6189 for cpv in cpv_list:
6190 # Make --noreplace take precedence over --newuse.
6191 if not installed and noreplace and \
6192 cpv in vardb.match(atom):
6193 # If the installed version is masked, it may
6194 # be necessary to look at lower versions,
6195 # in case there is a visible downgrade.
6197 reinstall_for_flags = None
6198 cache_key = (pkg_type, root, cpv, pkg_status)
6199 calculated_use = True
6200 pkg = self._pkg_cache.get(cache_key)
6202 calculated_use = False
6204 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6207 pkg = Package(built=built, cpv=cpv,
6208 installed=installed, metadata=metadata,
6209 onlydeps=onlydeps, root_config=root_config,
6211 metadata = pkg.metadata
6213 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6214 if not built and ("?" in metadata["LICENSE"] or \
6215 "?" in metadata["PROVIDE"]):
6216 # This is avoided whenever possible because
6217 # it's expensive. It only needs to be done here
6218 # if it has an effect on visibility.
6219 pkgsettings.setcpv(pkg)
6220 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6221 calculated_use = True
6222 self._pkg_cache[pkg] = pkg
6224 if not installed or (built and matched_packages):
6225 # Only enforce visibility on installed packages
6226 # if there is at least one other visible package
6227 # available. By filtering installed masked packages
6228 # here, packages that have been masked since they
6229 # were installed can be automatically downgraded
6230 # to an unmasked version.
6232 if not visible(pkgsettings, pkg):
6234 except portage.exception.InvalidDependString:
6238 # Enable upgrade or downgrade to a version
6239 # with visible KEYWORDS when the installed
6240 # version is masked by KEYWORDS, but never
6241 # reinstall the same exact version only due
6242 # to a KEYWORDS mask.
6243 if built and matched_packages:
6245 different_version = None
6246 for avail_pkg in matched_packages:
6247 if not portage.dep.cpvequal(
6248 pkg.cpv, avail_pkg.cpv):
6249 different_version = avail_pkg
6251 if different_version is not None:
6254 pkgsettings._getMissingKeywords(
6255 pkg.cpv, pkg.metadata):
6258 # If the ebuild no longer exists or it's
6259 # keywords have been dropped, reject built
6260 # instances (installed or binary).
6261 # If --usepkgonly is enabled, assume that
6262 # the ebuild status should be ignored.
6266 pkg.cpv, "ebuild", root_config)
6267 except portage.exception.PackageNotFound:
6270 if not visible(pkgsettings, pkg_eb):
6273 if not pkg.built and not calculated_use:
6274 # This is avoided whenever possible because
6276 pkgsettings.setcpv(pkg)
6277 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6279 if pkg.cp != atom.cp:
6280 # A cpv can be returned from dbapi.match() as an
6281 # old-style virtual match even in cases when the
6282 # package does not actually PROVIDE the virtual.
6283 # Filter out any such false matches here.
6284 if not atom_set.findAtomForPackage(pkg):
6288 if root == self.target_root:
6290 # Ebuild USE must have been calculated prior
6291 # to this point, in case atoms have USE deps.
6292 myarg = self._iter_atoms_for_pkg(pkg).next()
6293 except StopIteration:
6295 except portage.exception.InvalidDependString:
6297 # masked by corruption
6299 if not installed and myarg:
6300 found_available_arg = True
6302 if atom.use and not pkg.built:
6303 use = pkg.use.enabled
6304 if atom.use.enabled.difference(use):
6306 if atom.use.disabled.intersection(use):
6308 if pkg.cp == atom_cp:
6309 if highest_version is None:
6310 highest_version = pkg
6311 elif pkg > highest_version:
6312 highest_version = pkg
6313 # At this point, we've found the highest visible
6314 # match from the current repo. Any lower versions
6315 # from this repo are ignored, so this so the loop
6316 # will always end with a break statement below
6318 if find_existing_node:
6319 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6322 if portage.dep.match_from_list(atom, [e_pkg]):
6323 if highest_version and \
6324 e_pkg.cp == atom_cp and \
6325 e_pkg < highest_version and \
6326 e_pkg.slot_atom != highest_version.slot_atom:
6327 # There is a higher version available in a
6328 # different slot, so this existing node is
6332 matched_packages.append(e_pkg)
6333 existing_node = e_pkg
6335 # Compare built package to current config and
6336 # reject the built package if necessary.
6337 if built and not installed and \
6338 ("--newuse" in self.myopts or \
6339 "--reinstall" in self.myopts):
6340 iuses = pkg.iuse.all
6341 old_use = pkg.use.enabled
6343 pkgsettings.setcpv(myeb)
6345 pkgsettings.setcpv(pkg)
6346 now_use = pkgsettings["PORTAGE_USE"].split()
6347 forced_flags = set()
6348 forced_flags.update(pkgsettings.useforce)
6349 forced_flags.update(pkgsettings.usemask)
6351 if myeb and not usepkgonly:
6352 cur_iuse = myeb.iuse.all
6353 if self._reinstall_for_flags(forced_flags,
6357 # Compare current config to installed package
6358 # and do not reinstall if possible.
6359 if not installed and \
6360 ("--newuse" in self.myopts or \
6361 "--reinstall" in self.myopts) and \
6362 cpv in vardb.match(atom):
6363 pkgsettings.setcpv(pkg)
6364 forced_flags = set()
6365 forced_flags.update(pkgsettings.useforce)
6366 forced_flags.update(pkgsettings.usemask)
6367 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6368 old_iuse = set(filter_iuse_defaults(
6369 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6370 cur_use = pkgsettings["PORTAGE_USE"].split()
6371 cur_iuse = pkg.iuse.all
6372 reinstall_for_flags = \
6373 self._reinstall_for_flags(
6374 forced_flags, old_use, old_iuse,
6376 if reinstall_for_flags:
6380 matched_packages.append(pkg)
6381 if reinstall_for_flags:
6382 self._reinstall_nodes[pkg] = \
6386 if not matched_packages:
6389 if "--debug" in self.myopts:
6390 for pkg in matched_packages:
6391 portage.writemsg("%s %s\n" % \
6392 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6394 # Filter out any old-style virtual matches if they are
6395 # mixed with new-style virtual matches.
6396 cp = portage.dep_getkey(atom)
6397 if len(matched_packages) > 1 and \
6398 "virtual" == portage.catsplit(cp)[0]:
6399 for pkg in matched_packages:
6402 # Got a new-style virtual, so filter
6403 # out any old-style virtuals.
6404 matched_packages = [pkg for pkg in matched_packages \
6408 if len(matched_packages) > 1:
6409 bestmatch = portage.best(
6410 [pkg.cpv for pkg in matched_packages])
6411 matched_packages = [pkg for pkg in matched_packages \
6412 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6414 # ordered by type preference ("ebuild" type is the last resort)
6415 return matched_packages[-1], existing_node
6417 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6419 Select packages that have already been added to the graph or
6420 those that are installed and have not been scheduled for
6423 graph_db = self._graph_trees[root]["porttree"].dbapi
6424 matches = graph_db.match_pkgs(atom)
6427 pkg = matches[-1] # highest match
6428 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6429 return pkg, in_graph
6431 def _complete_graph(self):
6433 Add any deep dependencies of required sets (args, system, world) that
6434 have not been pulled into the graph yet. This ensures that the graph
6435 is consistent such that initially satisfied deep dependencies are not
6436 broken in the new graph. Initially unsatisfied dependencies are
6437 irrelevant since we only want to avoid breaking dependencies that are
6440 Since this method can consume enough time to disturb users, it is
6441 currently only enabled by the --complete-graph option.
6443 if "--buildpkgonly" in self.myopts or \
6444 "recurse" not in self.myparams:
6447 if "complete" not in self.myparams:
6448 # Skip this to avoid consuming enough time to disturb users.
6451 # Put the depgraph into a mode that causes it to only
6452 # select packages that have already been added to the
6453 # graph or those that are installed and have not been
6454 # scheduled for replacement. Also, toggle the "deep"
6455 # parameter so that all dependencies are traversed and
6457 self._select_atoms = self._select_atoms_from_graph
6458 self._select_package = self._select_pkg_from_graph
6459 already_deep = "deep" in self.myparams
6460 if not already_deep:
6461 self.myparams.add("deep")
6463 for root in self.roots:
6464 required_set_names = self._required_set_names.copy()
6465 if root == self.target_root and \
6466 (already_deep or "empty" in self.myparams):
6467 required_set_names.difference_update(self._sets)
6468 if not required_set_names and not self._ignored_deps:
6470 root_config = self.roots[root]
6471 setconfig = root_config.setconfig
6473 # Reuse existing SetArg instances when available.
6474 for arg in self.digraph.root_nodes():
6475 if not isinstance(arg, SetArg):
6477 if arg.root_config != root_config:
6479 if arg.name in required_set_names:
6481 required_set_names.remove(arg.name)
6482 # Create new SetArg instances only when necessary.
6483 for s in required_set_names:
6484 expanded_set = InternalPackageSet(
6485 initial_atoms=setconfig.getSetAtoms(s))
6486 atom = SETPREFIX + s
6487 args.append(SetArg(arg=atom, set=expanded_set,
6488 root_config=root_config))
6489 vardb = root_config.trees["vartree"].dbapi
6491 for atom in arg.set:
6492 self._dep_stack.append(
6493 Dependency(atom=atom, root=root, parent=arg))
6494 if self._ignored_deps:
6495 self._dep_stack.extend(self._ignored_deps)
6496 self._ignored_deps = []
6497 if not self._create_graph(allow_unsatisfied=True):
6499 # Check the unsatisfied deps to see if any initially satisfied deps
6500 # will become unsatisfied due to an upgrade. Initially unsatisfied
6501 # deps are irrelevant since we only want to avoid breaking deps
6502 # that are initially satisfied.
6503 while self._unsatisfied_deps:
6504 dep = self._unsatisfied_deps.pop()
6505 matches = vardb.match_pkgs(dep.atom)
6507 self._initially_unsatisfied_deps.append(dep)
6509 # An scheduled installation broke a deep dependency.
6510 # Add the installed package to the graph so that it
6511 # will be appropriately reported as a slot collision
6512 # (possibly solvable via backtracking).
6513 pkg = matches[-1] # highest match
6514 if not self._add_pkg(pkg, dep):
6516 if not self._create_graph(allow_unsatisfied=True):
6520 def _pkg(self, cpv, type_name, root_config, installed=False):
6522 Get a package instance from the cache, or create a new
6523 one if necessary. Raises KeyError from aux_get if it
6524 failures for some reason (package does not exist or is
6529 operation = "nomerge"
6530 pkg = self._pkg_cache.get(
6531 (type_name, root_config.root, cpv, operation))
6533 tree_type = self.pkg_tree_map[type_name]
6534 db = root_config.trees[tree_type].dbapi
6535 db_keys = list(self._trees_orig[root_config.root][
6536 tree_type].dbapi._aux_cache_keys)
6538 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6540 raise portage.exception.PackageNotFound(cpv)
6541 pkg = Package(cpv=cpv, metadata=metadata,
6542 root_config=root_config, installed=installed)
6543 if type_name == "ebuild":
6544 settings = self.pkgsettings[root_config.root]
6545 settings.setcpv(pkg)
6546 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6547 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6548 self._pkg_cache[pkg] = pkg
6551 def validate_blockers(self):
6552 """Remove any blockers from the digraph that do not match any of the
6553 packages within the graph. If necessary, create hard deps to ensure
6554 correct merge order such that mutually blocking packages are never
6555 installed simultaneously."""
6557 if "--buildpkgonly" in self.myopts or \
6558 "--nodeps" in self.myopts:
6561 #if "deep" in self.myparams:
6563 # Pull in blockers from all installed packages that haven't already
6564 # been pulled into the depgraph. This is not enabled by default
6565 # due to the performance penalty that is incurred by all the
6566 # additional dep_check calls that are required.
6568 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6569 for myroot in self.trees:
6570 vardb = self.trees[myroot]["vartree"].dbapi
6571 portdb = self.trees[myroot]["porttree"].dbapi
6572 pkgsettings = self.pkgsettings[myroot]
6573 final_db = self.mydbapi[myroot]
6575 blocker_cache = BlockerCache(myroot, vardb)
6576 stale_cache = set(blocker_cache)
6579 stale_cache.discard(cpv)
6580 pkg_in_graph = self.digraph.contains(pkg)
6582 # Check for masked installed packages. Only warn about
6583 # packages that are in the graph in order to avoid warning
6584 # about those that will be automatically uninstalled during
6585 # the merge process or by --depclean.
6587 if pkg_in_graph and not visible(pkgsettings, pkg):
6588 self._masked_installed.add(pkg)
6590 blocker_atoms = None
6596 self._blocker_parents.child_nodes(pkg))
6601 self._irrelevant_blockers.child_nodes(pkg))
6604 if blockers is not None:
6605 blockers = set(str(blocker.atom) \
6606 for blocker in blockers)
6608 # If this node has any blockers, create a "nomerge"
6609 # node for it so that they can be enforced.
6610 self.spinner.update()
6611 blocker_data = blocker_cache.get(cpv)
6612 if blocker_data is not None and \
6613 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6616 # If blocker data from the graph is available, use
6617 # it to validate the cache and update the cache if
6619 if blocker_data is not None and \
6620 blockers is not None:
6621 if not blockers.symmetric_difference(
6622 blocker_data.atoms):
6626 if blocker_data is None and \
6627 blockers is not None:
6628 # Re-use the blockers from the graph.
6629 blocker_atoms = sorted(blockers)
6630 counter = long(pkg.metadata["COUNTER"])
6632 blocker_cache.BlockerData(counter, blocker_atoms)
6633 blocker_cache[pkg.cpv] = blocker_data
6637 blocker_atoms = blocker_data.atoms
6639 # Use aux_get() to trigger FakeVartree global
6640 # updates on *DEPEND when appropriate.
6641 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6642 # It is crucial to pass in final_db here in order to
6643 # optimize dep_check calls by eliminating atoms via
6644 # dep_wordreduce and dep_eval calls.
6646 portage.dep._dep_check_strict = False
6648 success, atoms = portage.dep_check(depstr,
6649 final_db, pkgsettings, myuse=pkg.use.enabled,
6650 trees=self._graph_trees, myroot=myroot)
6651 except Exception, e:
6652 if isinstance(e, SystemExit):
6654 # This is helpful, for example, if a ValueError
6655 # is thrown from cpv_expand due to multiple
6656 # matches (this can happen if an atom lacks a
6658 show_invalid_depstring_notice(
6659 pkg, depstr, str(e))
6663 portage.dep._dep_check_strict = True
6665 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6666 if replacement_pkg and \
6667 replacement_pkg[0].operation == "merge":
6668 # This package is being replaced anyway, so
6669 # ignore invalid dependencies so as not to
6670 # annoy the user too much (otherwise they'd be
6671 # forced to manually unmerge it first).
6673 show_invalid_depstring_notice(pkg, depstr, atoms)
6675 blocker_atoms = [myatom for myatom in atoms \
6676 if myatom.startswith("!")]
6677 blocker_atoms.sort()
6678 counter = long(pkg.metadata["COUNTER"])
6679 blocker_cache[cpv] = \
6680 blocker_cache.BlockerData(counter, blocker_atoms)
6683 for atom in blocker_atoms:
6684 blocker = Blocker(atom=portage.dep.Atom(atom),
6685 eapi=pkg.metadata["EAPI"], root=myroot)
6686 self._blocker_parents.add(blocker, pkg)
6687 except portage.exception.InvalidAtom, e:
6688 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6689 show_invalid_depstring_notice(
6690 pkg, depstr, "Invalid Atom: %s" % (e,))
6692 for cpv in stale_cache:
6693 del blocker_cache[cpv]
6694 blocker_cache.flush()
6697 # Discard any "uninstall" tasks scheduled by previous calls
6698 # to this method, since those tasks may not make sense given
6699 # the current graph state.
6700 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6701 if previous_uninstall_tasks:
6702 self._blocker_uninstalls = digraph()
6703 self.digraph.difference_update(previous_uninstall_tasks)
6705 for blocker in self._blocker_parents.leaf_nodes():
6706 self.spinner.update()
6707 root_config = self.roots[blocker.root]
6708 virtuals = root_config.settings.getvirtuals()
6709 myroot = blocker.root
6710 initial_db = self.trees[myroot]["vartree"].dbapi
6711 final_db = self.mydbapi[myroot]
6713 provider_virtual = False
6714 if blocker.cp in virtuals and \
6715 not self._have_new_virt(blocker.root, blocker.cp):
6716 provider_virtual = True
6718 if provider_virtual:
6720 for provider_entry in virtuals[blocker.cp]:
6722 portage.dep_getkey(provider_entry)
6723 atoms.append(blocker.atom.replace(
6724 blocker.cp, provider_cp))
6726 atoms = [blocker.atom]
6728 blocked_initial = []
6730 blocked_initial.extend(initial_db.match_pkgs(atom))
6734 blocked_final.extend(final_db.match_pkgs(atom))
6736 if not blocked_initial and not blocked_final:
6737 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6738 self._blocker_parents.remove(blocker)
6739 # Discard any parents that don't have any more blockers.
6740 for pkg in parent_pkgs:
6741 self._irrelevant_blockers.add(blocker, pkg)
6742 if not self._blocker_parents.child_nodes(pkg):
6743 self._blocker_parents.remove(pkg)
6745 for parent in self._blocker_parents.parent_nodes(blocker):
6746 unresolved_blocks = False
6747 depends_on_order = set()
6748 for pkg in blocked_initial:
6749 if pkg.slot_atom == parent.slot_atom:
6750 # TODO: Support blocks within slots in cases where it
6751 # might make sense. For example, a new version might
6752 # require that the old version be uninstalled at build
6755 if parent.installed:
6756 # Two currently installed packages conflict with
6757 # eachother. Ignore this case since the damage
6758 # is already done and this would be likely to
6759 # confuse users if displayed like a normal blocker.
6762 self._blocked_pkgs.add(pkg, blocker)
6764 if parent.operation == "merge":
6765 # Maybe the blocked package can be replaced or simply
6766 # unmerged to resolve this block.
6767 depends_on_order.add((pkg, parent))
6769 # None of the above blocker resolutions techniques apply,
6770 # so apparently this one is unresolvable.
6771 unresolved_blocks = True
6772 for pkg in blocked_final:
6773 if pkg.slot_atom == parent.slot_atom:
6774 # TODO: Support blocks within slots.
6776 if parent.operation == "nomerge" and \
6777 pkg.operation == "nomerge":
6778 # This blocker will be handled the next time that a
6779 # merge of either package is triggered.
6782 self._blocked_pkgs.add(pkg, blocker)
6784 # Maybe the blocking package can be
6785 # unmerged to resolve this block.
6786 if parent.operation == "merge" and pkg.installed:
6787 depends_on_order.add((pkg, parent))
6789 elif parent.operation == "nomerge":
6790 depends_on_order.add((parent, pkg))
6792 # None of the above blocker resolutions techniques apply,
6793 # so apparently this one is unresolvable.
6794 unresolved_blocks = True
6796 # Make sure we don't unmerge any package that have been pulled
6798 if not unresolved_blocks and depends_on_order:
6799 for inst_pkg, inst_task in depends_on_order:
6800 if self.digraph.contains(inst_pkg) and \
6801 self.digraph.parent_nodes(inst_pkg):
6802 unresolved_blocks = True
6805 if not unresolved_blocks and depends_on_order:
6806 for inst_pkg, inst_task in depends_on_order:
6807 uninst_task = Package(built=inst_pkg.built,
6808 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6809 metadata=inst_pkg.metadata,
6810 operation="uninstall",
6811 root_config=inst_pkg.root_config,
6812 type_name=inst_pkg.type_name)
6813 self._pkg_cache[uninst_task] = uninst_task
6814 # Enforce correct merge order with a hard dep.
6815 self.digraph.addnode(uninst_task, inst_task,
6816 priority=BlockerDepPriority.instance)
6817 # Count references to this blocker so that it can be
6818 # invalidated after nodes referencing it have been
6820 self._blocker_uninstalls.addnode(uninst_task, blocker)
6821 if not unresolved_blocks and not depends_on_order:
6822 self._irrelevant_blockers.add(blocker, parent)
6823 self._blocker_parents.remove_edge(blocker, parent)
6824 if not self._blocker_parents.parent_nodes(blocker):
6825 self._blocker_parents.remove(blocker)
6826 if not self._blocker_parents.child_nodes(parent):
6827 self._blocker_parents.remove(parent)
6828 if unresolved_blocks:
6829 self._unsolvable_blockers.add(blocker, parent)
6833 def _accept_blocker_conflicts(self):
6835 for x in ("--buildpkgonly", "--fetchonly",
6836 "--fetch-all-uri", "--nodeps"):
6837 if x in self.myopts:
6842 def _merge_order_bias(self, mygraph):
6844 For optimal leaf node selection, promote deep system runtime deps and
6845 order nodes from highest to lowest overall reference count.
6849 for node in mygraph.order:
6850 node_info[node] = len(mygraph.parent_nodes(node))
6851 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6853 def cmp_merge_preference(node1, node2):
6855 if node1.operation == 'uninstall':
6856 if node2.operation == 'uninstall':
6860 if node2.operation == 'uninstall':
6861 if node1.operation == 'uninstall':
6865 node1_sys = node1 in deep_system_deps
6866 node2_sys = node2 in deep_system_deps
6867 if node1_sys != node2_sys:
6872 return node_info[node2] - node_info[node1]
6874 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6876 def altlist(self, reversed=False):
6878 while self._serialized_tasks_cache is None:
6879 self._resolve_conflicts()
6881 self._serialized_tasks_cache, self._scheduler_graph = \
6882 self._serialize_tasks()
6883 except self._serialize_tasks_retry:
6886 retlist = self._serialized_tasks_cache[:]
6891 def schedulerGraph(self):
6893 The scheduler graph is identical to the normal one except that
6894 uninstall edges are reversed in specific cases that require
6895 conflicting packages to be temporarily installed simultaneously.
6896 This is intended for use by the Scheduler in it's parallelization
6897 logic. It ensures that temporary simultaneous installation of
6898 conflicting packages is avoided when appropriate (especially for
6899 !!atom blockers), but allowed in specific cases that require it.
6901 Note that this method calls break_refs() which alters the state of
6902 internal Package instances such that this depgraph instance should
6903 not be used to perform any more calculations.
6905 if self._scheduler_graph is None:
6907 self.break_refs(self._scheduler_graph.order)
6908 return self._scheduler_graph
6910 def break_refs(self, nodes):
6912 Take a mergelist like that returned from self.altlist() and
6913 break any references that lead back to the depgraph. This is
6914 useful if you want to hold references to packages without
6915 also holding the depgraph on the heap.
6918 if hasattr(node, "root_config"):
6919 # The FakeVartree references the _package_cache which
6920 # references the depgraph. So that Package instances don't
6921 # hold the depgraph and FakeVartree on the heap, replace
6922 # the RootConfig that references the FakeVartree with the
6923 # original RootConfig instance which references the actual
6925 node.root_config = \
6926 self._trees_orig[node.root_config.root]["root_config"]
6928 def _resolve_conflicts(self):
6929 if not self._complete_graph():
6930 raise self._unknown_internal_error()
6932 if not self.validate_blockers():
6933 raise self._unknown_internal_error()
6935 if self._slot_collision_info:
6936 self._process_slot_conflicts()
6938 def _serialize_tasks(self):
6940 if "--debug" in self.myopts:
6941 writemsg("\ndigraph:\n\n", noiselevel=-1)
6942 self.digraph.debug_print()
6943 writemsg("\n", noiselevel=-1)
6945 scheduler_graph = self.digraph.copy()
6946 mygraph=self.digraph.copy()
6947 # Prune "nomerge" root nodes if nothing depends on them, since
6948 # otherwise they slow down merge order calculation. Don't remove
6949 # non-root nodes since they help optimize merge order in some cases
6950 # such as revdep-rebuild.
6951 removed_nodes = set()
6953 for node in mygraph.root_nodes():
6954 if not isinstance(node, Package) or \
6955 node.installed or node.onlydeps:
6956 removed_nodes.add(node)
6958 self.spinner.update()
6959 mygraph.difference_update(removed_nodes)
6960 if not removed_nodes:
6962 removed_nodes.clear()
6963 self._merge_order_bias(mygraph)
6964 def cmp_circular_bias(n1, n2):
6966 RDEPEND is stronger than PDEPEND and this function
6967 measures such a strength bias within a circular
6968 dependency relationship.
6970 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6971 ignore_priority=priority_range.ignore_medium_soft)
6972 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6973 ignore_priority=priority_range.ignore_medium_soft)
6974 if n1_n2_medium == n2_n1_medium:
6979 myblocker_uninstalls = self._blocker_uninstalls.copy()
6981 # Contains uninstall tasks that have been scheduled to
6982 # occur after overlapping blockers have been installed.
6983 scheduled_uninstalls = set()
6984 # Contains any Uninstall tasks that have been ignored
6985 # in order to avoid the circular deps code path. These
6986 # correspond to blocker conflicts that could not be
6988 ignored_uninstall_tasks = set()
6989 have_uninstall_task = False
6990 complete = "complete" in self.myparams
6993 def get_nodes(**kwargs):
6995 Returns leaf nodes excluding Uninstall instances
6996 since those should be executed as late as possible.
6998 return [node for node in mygraph.leaf_nodes(**kwargs) \
6999 if isinstance(node, Package) and \
7000 (node.operation != "uninstall" or \
7001 node in scheduled_uninstalls)]
7003 # sys-apps/portage needs special treatment if ROOT="/"
7004 running_root = self._running_root.root
7005 from portage.const import PORTAGE_PACKAGE_ATOM
7006 runtime_deps = InternalPackageSet(
7007 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7008 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7009 PORTAGE_PACKAGE_ATOM)
7010 replacement_portage = self.mydbapi[running_root].match_pkgs(
7011 PORTAGE_PACKAGE_ATOM)
7014 running_portage = running_portage[0]
7016 running_portage = None
7018 if replacement_portage:
7019 replacement_portage = replacement_portage[0]
7021 replacement_portage = None
7023 if replacement_portage == running_portage:
7024 replacement_portage = None
7026 if replacement_portage is not None:
7027 # update from running_portage to replacement_portage asap
7028 asap_nodes.append(replacement_portage)
7030 if running_portage is not None:
7032 portage_rdepend = self._select_atoms_highest_available(
7033 running_root, running_portage.metadata["RDEPEND"],
7034 myuse=running_portage.use.enabled,
7035 parent=running_portage, strict=False)
7036 except portage.exception.InvalidDependString, e:
7037 portage.writemsg("!!! Invalid RDEPEND in " + \
7038 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7039 (running_root, running_portage.cpv, e), noiselevel=-1)
7041 portage_rdepend = []
7042 runtime_deps.update(atom for atom in portage_rdepend \
7043 if not atom.startswith("!"))
7045 def gather_deps(ignore_priority, mergeable_nodes,
7046 selected_nodes, node):
7048 Recursively gather a group of nodes that RDEPEND on
7049 eachother. This ensures that they are merged as a group
7050 and get their RDEPENDs satisfied as soon as possible.
7052 if node in selected_nodes:
7054 if node not in mergeable_nodes:
7056 if node == replacement_portage and \
7057 mygraph.child_nodes(node,
7058 ignore_priority=priority_range.ignore_medium_soft):
7059 # Make sure that portage always has all of it's
7060 # RDEPENDs installed first.
7062 selected_nodes.add(node)
7063 for child in mygraph.child_nodes(node,
7064 ignore_priority=ignore_priority):
7065 if not gather_deps(ignore_priority,
7066 mergeable_nodes, selected_nodes, child):
7070 def ignore_uninst_or_med(priority):
7071 if priority is BlockerDepPriority.instance:
7073 return priority_range.ignore_medium(priority)
7075 def ignore_uninst_or_med_soft(priority):
7076 if priority is BlockerDepPriority.instance:
7078 return priority_range.ignore_medium_soft(priority)
7080 tree_mode = "--tree" in self.myopts
7081 # Tracks whether or not the current iteration should prefer asap_nodes
7082 # if available. This is set to False when the previous iteration
7083 # failed to select any nodes. It is reset whenever nodes are
7084 # successfully selected.
7087 # Controls whether or not the current iteration should drop edges that
7088 # are "satisfied" by installed packages, in order to solve circular
7089 # dependencies. The deep runtime dependencies of installed packages are
7090 # not checked in this case (bug #199856), so it must be avoided
7091 # whenever possible.
7092 drop_satisfied = False
7094 # State of variables for successive iterations that loosen the
7095 # criteria for node selection.
7097 # iteration prefer_asap drop_satisfied
7102 # If no nodes are selected on the last iteration, it is due to
7103 # unresolved blockers or circular dependencies.
7105 while not mygraph.empty():
7106 self.spinner.update()
7107 selected_nodes = None
7108 ignore_priority = None
7109 if drop_satisfied or (prefer_asap and asap_nodes):
7110 priority_range = DepPrioritySatisfiedRange
7112 priority_range = DepPriorityNormalRange
7113 if prefer_asap and asap_nodes:
7114 # ASAP nodes are merged before their soft deps. Go ahead and
7115 # select root nodes here if necessary, since it's typical for
7116 # the parent to have been removed from the graph already.
7117 asap_nodes = [node for node in asap_nodes \
7118 if mygraph.contains(node)]
7119 for node in asap_nodes:
7120 if not mygraph.child_nodes(node,
7121 ignore_priority=priority_range.ignore_soft):
7122 selected_nodes = [node]
7123 asap_nodes.remove(node)
7125 if not selected_nodes and \
7126 not (prefer_asap and asap_nodes):
7127 for i in xrange(priority_range.NONE,
7128 priority_range.MEDIUM_SOFT + 1):
7129 ignore_priority = priority_range.ignore_priority[i]
7130 nodes = get_nodes(ignore_priority=ignore_priority)
7132 # If there is a mix of uninstall nodes with other
7133 # types, save the uninstall nodes for later since
7134 # sometimes a merge node will render an uninstall
7135 # node unnecessary (due to occupying the same slot),
7136 # and we want to avoid executing a separate uninstall
7137 # task in that case.
7139 good_uninstalls = []
7140 with_some_uninstalls_excluded = []
7142 if node.operation == "uninstall":
7143 slot_node = self.mydbapi[node.root
7144 ].match_pkgs(node.slot_atom)
7146 slot_node[0].operation == "merge":
7148 good_uninstalls.append(node)
7149 with_some_uninstalls_excluded.append(node)
7151 nodes = good_uninstalls
7152 elif with_some_uninstalls_excluded:
7153 nodes = with_some_uninstalls_excluded
7157 if ignore_priority is None and not tree_mode:
7158 # Greedily pop all of these nodes since no
7159 # relationship has been ignored. This optimization
7160 # destroys --tree output, so it's disabled in tree
7162 selected_nodes = nodes
7164 # For optimal merge order:
7165 # * Only pop one node.
7166 # * Removing a root node (node without a parent)
7167 # will not produce a leaf node, so avoid it.
7168 # * It's normal for a selected uninstall to be a
7169 # root node, so don't check them for parents.
7171 if node.operation == "uninstall" or \
7172 mygraph.parent_nodes(node):
7173 selected_nodes = [node]
7179 if not selected_nodes:
7180 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7182 mergeable_nodes = set(nodes)
7183 if prefer_asap and asap_nodes:
7185 for i in xrange(priority_range.SOFT,
7186 priority_range.MEDIUM_SOFT + 1):
7187 ignore_priority = priority_range.ignore_priority[i]
7189 if not mygraph.parent_nodes(node):
7191 selected_nodes = set()
7192 if gather_deps(ignore_priority,
7193 mergeable_nodes, selected_nodes, node):
7196 selected_nodes = None
7200 if prefer_asap and asap_nodes and not selected_nodes:
7201 # We failed to find any asap nodes to merge, so ignore
7202 # them for the next iteration.
7206 if selected_nodes and ignore_priority is not None:
7207 # Try to merge ignored medium_soft deps as soon as possible
7208 # if they're not satisfied by installed packages.
7209 for node in selected_nodes:
7210 children = set(mygraph.child_nodes(node))
7211 soft = children.difference(
7212 mygraph.child_nodes(node,
7213 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7214 medium_soft = children.difference(
7215 mygraph.child_nodes(node,
7217 DepPrioritySatisfiedRange.ignore_medium_soft))
7218 medium_soft.difference_update(soft)
7219 for child in medium_soft:
7220 if child in selected_nodes:
7222 if child in asap_nodes:
7224 asap_nodes.append(child)
7226 if selected_nodes and len(selected_nodes) > 1:
7227 if not isinstance(selected_nodes, list):
7228 selected_nodes = list(selected_nodes)
7229 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7231 if not selected_nodes and not myblocker_uninstalls.is_empty():
7232 # An Uninstall task needs to be executed in order to
7233 # avoid conflict if possible.
7236 priority_range = DepPrioritySatisfiedRange
7238 priority_range = DepPriorityNormalRange
7240 mergeable_nodes = get_nodes(
7241 ignore_priority=ignore_uninst_or_med)
7243 min_parent_deps = None
7245 for task in myblocker_uninstalls.leaf_nodes():
7246 # Do some sanity checks so that system or world packages
7247 # don't get uninstalled inappropriately here (only really
7248 # necessary when --complete-graph has not been enabled).
7250 if task in ignored_uninstall_tasks:
7253 if task in scheduled_uninstalls:
7254 # It's been scheduled but it hasn't
7255 # been executed yet due to dependence
7256 # on installation of blocking packages.
7259 root_config = self.roots[task.root]
7260 inst_pkg = self._pkg_cache[
7261 ("installed", task.root, task.cpv, "nomerge")]
7263 if self.digraph.contains(inst_pkg):
7266 forbid_overlap = False
7267 heuristic_overlap = False
7268 for blocker in myblocker_uninstalls.parent_nodes(task):
7269 if blocker.eapi in ("0", "1"):
7270 heuristic_overlap = True
7271 elif blocker.atom.blocker.overlap.forbid:
7272 forbid_overlap = True
7274 if forbid_overlap and running_root == task.root:
7277 if heuristic_overlap and running_root == task.root:
7278 # Never uninstall sys-apps/portage or it's essential
7279 # dependencies, except through replacement.
7281 runtime_dep_atoms = \
7282 list(runtime_deps.iterAtomsForPackage(task))
7283 except portage.exception.InvalidDependString, e:
7284 portage.writemsg("!!! Invalid PROVIDE in " + \
7285 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7286 (task.root, task.cpv, e), noiselevel=-1)
7290 # Don't uninstall a runtime dep if it appears
7291 # to be the only suitable one installed.
7293 vardb = root_config.trees["vartree"].dbapi
7294 for atom in runtime_dep_atoms:
7295 other_version = None
7296 for pkg in vardb.match_pkgs(atom):
7297 if pkg.cpv == task.cpv and \
7298 pkg.metadata["COUNTER"] == \
7299 task.metadata["COUNTER"]:
7303 if other_version is None:
7309 # For packages in the system set, don't take
7310 # any chances. If the conflict can't be resolved
7311 # by a normal replacement operation then abort.
7314 for atom in root_config.sets[
7315 "system"].iterAtomsForPackage(task):
7318 except portage.exception.InvalidDependString, e:
7319 portage.writemsg("!!! Invalid PROVIDE in " + \
7320 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7321 (task.root, task.cpv, e), noiselevel=-1)
7327 # Note that the world check isn't always
7328 # necessary since self._complete_graph() will
7329 # add all packages from the system and world sets to the
7330 # graph. This just allows unresolved conflicts to be
7331 # detected as early as possible, which makes it possible
7332 # to avoid calling self._complete_graph() when it is
7333 # unnecessary due to blockers triggering an abortion.
7335 # For packages in the world set, go ahead an uninstall
7336 # when necessary, as long as the atom will be satisfied
7337 # in the final state.
7338 graph_db = self.mydbapi[task.root]
7341 for atom in root_config.sets[
7342 "world"].iterAtomsForPackage(task):
7344 for pkg in graph_db.match_pkgs(atom):
7351 self._blocked_world_pkgs[inst_pkg] = atom
7353 except portage.exception.InvalidDependString, e:
7354 portage.writemsg("!!! Invalid PROVIDE in " + \
7355 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7356 (task.root, task.cpv, e), noiselevel=-1)
7362 # Check the deps of parent nodes to ensure that
7363 # the chosen task produces a leaf node. Maybe
7364 # this can be optimized some more to make the
7365 # best possible choice, but the current algorithm
7366 # is simple and should be near optimal for most
7368 mergeable_parent = False
7370 for parent in mygraph.parent_nodes(task):
7371 parent_deps.update(mygraph.child_nodes(parent,
7372 ignore_priority=priority_range.ignore_medium_soft))
7373 if parent in mergeable_nodes and \
7374 gather_deps(ignore_uninst_or_med_soft,
7375 mergeable_nodes, set(), parent):
7376 mergeable_parent = True
7378 if not mergeable_parent:
7381 parent_deps.remove(task)
7382 if min_parent_deps is None or \
7383 len(parent_deps) < min_parent_deps:
7384 min_parent_deps = len(parent_deps)
7387 if uninst_task is not None:
7388 # The uninstall is performed only after blocking
7389 # packages have been merged on top of it. File
7390 # collisions between blocking packages are detected
7391 # and removed from the list of files to be uninstalled.
7392 scheduled_uninstalls.add(uninst_task)
7393 parent_nodes = mygraph.parent_nodes(uninst_task)
7395 # Reverse the parent -> uninstall edges since we want
7396 # to do the uninstall after blocking packages have
7397 # been merged on top of it.
7398 mygraph.remove(uninst_task)
7399 for blocked_pkg in parent_nodes:
7400 mygraph.add(blocked_pkg, uninst_task,
7401 priority=BlockerDepPriority.instance)
7402 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7403 scheduler_graph.add(blocked_pkg, uninst_task,
7404 priority=BlockerDepPriority.instance)
7406 # Reset the state variables for leaf node selection and
7407 # continue trying to select leaf nodes.
7409 drop_satisfied = False
7412 if not selected_nodes:
7413 # Only select root nodes as a last resort. This case should
7414 # only trigger when the graph is nearly empty and the only
7415 # remaining nodes are isolated (no parents or children). Since
7416 # the nodes must be isolated, ignore_priority is not needed.
7417 selected_nodes = get_nodes()
7419 if not selected_nodes and not drop_satisfied:
7420 drop_satisfied = True
7423 if not selected_nodes and not myblocker_uninstalls.is_empty():
7424 # If possible, drop an uninstall task here in order to avoid
7425 # the circular deps code path. The corresponding blocker will
7426 # still be counted as an unresolved conflict.
7428 for node in myblocker_uninstalls.leaf_nodes():
7430 mygraph.remove(node)
7435 ignored_uninstall_tasks.add(node)
7438 if uninst_task is not None:
7439 # Reset the state variables for leaf node selection and
7440 # continue trying to select leaf nodes.
7442 drop_satisfied = False
7445 if not selected_nodes:
7446 self._circular_deps_for_display = mygraph
7447 raise self._unknown_internal_error()
7449 # At this point, we've succeeded in selecting one or more nodes, so
7450 # reset state variables for leaf node selection.
7452 drop_satisfied = False
7454 mygraph.difference_update(selected_nodes)
7456 for node in selected_nodes:
7457 if isinstance(node, Package) and \
7458 node.operation == "nomerge":
7461 # Handle interactions between blockers
7462 # and uninstallation tasks.
7463 solved_blockers = set()
7465 if isinstance(node, Package) and \
7466 "uninstall" == node.operation:
7467 have_uninstall_task = True
7470 vardb = self.trees[node.root]["vartree"].dbapi
7471 previous_cpv = vardb.match(node.slot_atom)
7473 # The package will be replaced by this one, so remove
7474 # the corresponding Uninstall task if necessary.
7475 previous_cpv = previous_cpv[0]
7477 ("installed", node.root, previous_cpv, "uninstall")
7479 mygraph.remove(uninst_task)
7483 if uninst_task is not None and \
7484 uninst_task not in ignored_uninstall_tasks and \
7485 myblocker_uninstalls.contains(uninst_task):
7486 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7487 myblocker_uninstalls.remove(uninst_task)
7488 # Discard any blockers that this Uninstall solves.
7489 for blocker in blocker_nodes:
7490 if not myblocker_uninstalls.child_nodes(blocker):
7491 myblocker_uninstalls.remove(blocker)
7492 solved_blockers.add(blocker)
7494 retlist.append(node)
7496 if (isinstance(node, Package) and \
7497 "uninstall" == node.operation) or \
7498 (uninst_task is not None and \
7499 uninst_task in scheduled_uninstalls):
7500 # Include satisfied blockers in the merge list
7501 # since the user might be interested and also
7502 # it serves as an indicator that blocking packages
7503 # will be temporarily installed simultaneously.
7504 for blocker in solved_blockers:
7505 retlist.append(Blocker(atom=blocker.atom,
7506 root=blocker.root, eapi=blocker.eapi,
7509 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7510 for node in myblocker_uninstalls.root_nodes():
7511 unsolvable_blockers.add(node)
7513 for blocker in unsolvable_blockers:
7514 retlist.append(blocker)
7516 # If any Uninstall tasks need to be executed in order
7517 # to avoid a conflict, complete the graph with any
7518 # dependencies that may have been initially
7519 # neglected (to ensure that unsafe Uninstall tasks
7520 # are properly identified and blocked from execution).
7521 if have_uninstall_task and \
7523 not unsolvable_blockers:
7524 self.myparams.add("complete")
7525 raise self._serialize_tasks_retry("")
7527 if unsolvable_blockers and \
7528 not self._accept_blocker_conflicts():
7529 self._unsatisfied_blockers_for_display = unsolvable_blockers
7530 self._serialized_tasks_cache = retlist[:]
7531 self._scheduler_graph = scheduler_graph
7532 raise self._unknown_internal_error()
7534 if self._slot_collision_info and \
7535 not self._accept_blocker_conflicts():
7536 self._serialized_tasks_cache = retlist[:]
7537 self._scheduler_graph = scheduler_graph
7538 raise self._unknown_internal_error()
7540 return retlist, scheduler_graph
7542 def _show_circular_deps(self, mygraph):
7543 # No leaf nodes are available, so we have a circular
7544 # dependency panic situation. Reduce the noise level to a
7545 # minimum via repeated elimination of root nodes since they
7546 # have no parents and thus can not be part of a cycle.
7548 root_nodes = mygraph.root_nodes(
7549 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7552 mygraph.difference_update(root_nodes)
7553 # Display the USE flags that are enabled on nodes that are part
7554 # of dependency cycles in case that helps the user decide to
7555 # disable some of them.
7557 tempgraph = mygraph.copy()
7558 while not tempgraph.empty():
7559 nodes = tempgraph.leaf_nodes()
7561 node = tempgraph.order[0]
7564 display_order.append(node)
7565 tempgraph.remove(node)
7566 display_order.reverse()
7567 self.myopts.pop("--quiet", None)
7568 self.myopts.pop("--verbose", None)
7569 self.myopts["--tree"] = True
7570 portage.writemsg("\n\n", noiselevel=-1)
7571 self.display(display_order)
7572 prefix = colorize("BAD", " * ")
7573 portage.writemsg("\n", noiselevel=-1)
7574 portage.writemsg(prefix + "Error: circular dependencies:\n",
7576 portage.writemsg("\n", noiselevel=-1)
7577 mygraph.debug_print()
7578 portage.writemsg("\n", noiselevel=-1)
7579 portage.writemsg(prefix + "Note that circular dependencies " + \
7580 "can often be avoided by temporarily\n", noiselevel=-1)
7581 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7582 "optional dependencies.\n", noiselevel=-1)
7584 def _show_merge_list(self):
7585 if self._serialized_tasks_cache is not None and \
7586 not (self._displayed_list and \
7587 (self._displayed_list == self._serialized_tasks_cache or \
7588 self._displayed_list == \
7589 list(reversed(self._serialized_tasks_cache)))):
7590 display_list = self._serialized_tasks_cache[:]
7591 if "--tree" in self.myopts:
7592 display_list.reverse()
7593 self.display(display_list)
7595 def _show_unsatisfied_blockers(self, blockers):
7596 self._show_merge_list()
7597 msg = "Error: The above package list contains " + \
7598 "packages which cannot be installed " + \
7599 "at the same time on the same system."
7600 prefix = colorize("BAD", " * ")
7601 from textwrap import wrap
7602 portage.writemsg("\n", noiselevel=-1)
7603 for line in wrap(msg, 70):
7604 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7606 # Display the conflicting packages along with the packages
7607 # that pulled them in. This is helpful for troubleshooting
7608 # cases in which blockers don't solve automatically and
7609 # the reasons are not apparent from the normal merge list
7613 for blocker in blockers:
7614 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7615 self._blocker_parents.parent_nodes(blocker)):
7616 parent_atoms = self._parent_atoms.get(pkg)
7617 if not parent_atoms:
7618 atom = self._blocked_world_pkgs.get(pkg)
7619 if atom is not None:
7620 parent_atoms = set([("@world", atom)])
7622 conflict_pkgs[pkg] = parent_atoms
7625 # Reduce noise by pruning packages that are only
7626 # pulled in by other conflict packages.
7628 for pkg, parent_atoms in conflict_pkgs.iteritems():
7629 relevant_parent = False
7630 for parent, atom in parent_atoms:
7631 if parent not in conflict_pkgs:
7632 relevant_parent = True
7634 if not relevant_parent:
7635 pruned_pkgs.add(pkg)
7636 for pkg in pruned_pkgs:
7637 del conflict_pkgs[pkg]
7643 # Max number of parents shown, to avoid flooding the display.
7645 for pkg, parent_atoms in conflict_pkgs.iteritems():
7649 # Prefer packages that are not directly involved in a conflict.
7650 for parent_atom in parent_atoms:
7651 if len(pruned_list) >= max_parents:
7653 parent, atom = parent_atom
7654 if parent not in conflict_pkgs:
7655 pruned_list.add(parent_atom)
7657 for parent_atom in parent_atoms:
7658 if len(pruned_list) >= max_parents:
7660 pruned_list.add(parent_atom)
7662 omitted_parents = len(parent_atoms) - len(pruned_list)
7663 msg.append(indent + "%s pulled in by\n" % pkg)
7665 for parent_atom in pruned_list:
7666 parent, atom = parent_atom
7667 msg.append(2*indent)
7668 if isinstance(parent,
7669 (PackageArg, AtomArg)):
7670 # For PackageArg and AtomArg types, it's
7671 # redundant to display the atom attribute.
7672 msg.append(str(parent))
7674 # Display the specific atom from SetArg or
7676 msg.append("%s required by %s" % (atom, parent))
7680 msg.append(2*indent)
7681 msg.append("(and %d more)\n" % omitted_parents)
7685 sys.stderr.write("".join(msg))
7688 if "--quiet" not in self.myopts:
7689 show_blocker_docs_link()
7691 def display(self, mylist, favorites=[], verbosity=None):
7693 # This is used to prevent display_problems() from
7694 # redundantly displaying this exact same merge list
7695 # again via _show_merge_list().
7696 self._displayed_list = mylist
7698 if verbosity is None:
7699 verbosity = ("--quiet" in self.myopts and 1 or \
7700 "--verbose" in self.myopts and 3 or 2)
7701 favorites_set = InternalPackageSet(favorites)
7702 oneshot = "--oneshot" in self.myopts or \
7703 "--onlydeps" in self.myopts
7704 columns = "--columns" in self.myopts
7709 counters = PackageCounters()
7711 if verbosity == 1 and "--verbose" not in self.myopts:
7712 def create_use_string(*args):
7715 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7717 is_new, reinst_flags,
7718 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7719 alphabetical=("--alphabetical" in self.myopts)):
7727 cur_iuse = set(cur_iuse)
7728 enabled_flags = cur_iuse.intersection(cur_use)
7729 removed_iuse = set(old_iuse).difference(cur_iuse)
7730 any_iuse = cur_iuse.union(old_iuse)
7731 any_iuse = list(any_iuse)
7733 for flag in any_iuse:
7736 reinst_flag = reinst_flags and flag in reinst_flags
7737 if flag in enabled_flags:
7739 if is_new or flag in old_use and \
7740 (all_flags or reinst_flag):
7741 flag_str = red(flag)
7742 elif flag not in old_iuse:
7743 flag_str = yellow(flag) + "%*"
7744 elif flag not in old_use:
7745 flag_str = green(flag) + "*"
7746 elif flag in removed_iuse:
7747 if all_flags or reinst_flag:
7748 flag_str = yellow("-" + flag) + "%"
7751 flag_str = "(" + flag_str + ")"
7752 removed.append(flag_str)
7755 if is_new or flag in old_iuse and \
7756 flag not in old_use and \
7757 (all_flags or reinst_flag):
7758 flag_str = blue("-" + flag)
7759 elif flag not in old_iuse:
7760 flag_str = yellow("-" + flag)
7761 if flag not in iuse_forced:
7763 elif flag in old_use:
7764 flag_str = green("-" + flag) + "*"
7766 if flag in iuse_forced:
7767 flag_str = "(" + flag_str + ")"
7769 enabled.append(flag_str)
7771 disabled.append(flag_str)
7774 ret = " ".join(enabled)
7776 ret = " ".join(enabled + disabled + removed)
7778 ret = '%s="%s" ' % (name, ret)
7781 repo_display = RepoDisplay(self.roots)
7785 mygraph = self.digraph.copy()
7787 # If there are any Uninstall instances, add the corresponding
7788 # blockers to the digraph (useful for --tree display).
7790 executed_uninstalls = set(node for node in mylist \
7791 if isinstance(node, Package) and node.operation == "unmerge")
7793 for uninstall in self._blocker_uninstalls.leaf_nodes():
7794 uninstall_parents = \
7795 self._blocker_uninstalls.parent_nodes(uninstall)
7796 if not uninstall_parents:
7799 # Remove the corresponding "nomerge" node and substitute
7800 # the Uninstall node.
7801 inst_pkg = self._pkg_cache[
7802 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7804 mygraph.remove(inst_pkg)
7809 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7811 inst_pkg_blockers = []
7813 # Break the Package -> Uninstall edges.
7814 mygraph.remove(uninstall)
7816 # Resolution of a package's blockers
7817 # depend on it's own uninstallation.
7818 for blocker in inst_pkg_blockers:
7819 mygraph.add(uninstall, blocker)
7821 # Expand Package -> Uninstall edges into
7822 # Package -> Blocker -> Uninstall edges.
7823 for blocker in uninstall_parents:
7824 mygraph.add(uninstall, blocker)
7825 for parent in self._blocker_parents.parent_nodes(blocker):
7826 if parent != inst_pkg:
7827 mygraph.add(blocker, parent)
7829 # If the uninstall task did not need to be executed because
7830 # of an upgrade, display Blocker -> Upgrade edges since the
7831 # corresponding Blocker -> Uninstall edges will not be shown.
7833 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7834 if upgrade_node is not None and \
7835 uninstall not in executed_uninstalls:
7836 for blocker in uninstall_parents:
7837 mygraph.add(upgrade_node, blocker)
7839 unsatisfied_blockers = []
7844 if isinstance(x, Blocker) and not x.satisfied:
7845 unsatisfied_blockers.append(x)
7848 if "--tree" in self.myopts:
7849 depth = len(tree_nodes)
7850 while depth and graph_key not in \
7851 mygraph.child_nodes(tree_nodes[depth-1]):
7854 tree_nodes = tree_nodes[:depth]
7855 tree_nodes.append(graph_key)
7856 display_list.append((x, depth, True))
7857 shown_edges.add((graph_key, tree_nodes[depth-1]))
7859 traversed_nodes = set() # prevent endless circles
7860 traversed_nodes.add(graph_key)
7861 def add_parents(current_node, ordered):
7863 # Do not traverse to parents if this node is an
7864 # an argument or a direct member of a set that has
7865 # been specified as an argument (system or world).
7866 if current_node not in self._set_nodes:
7867 parent_nodes = mygraph.parent_nodes(current_node)
7869 child_nodes = set(mygraph.child_nodes(current_node))
7870 selected_parent = None
7871 # First, try to avoid a direct cycle.
7872 for node in parent_nodes:
7873 if not isinstance(node, (Blocker, Package)):
7875 if node not in traversed_nodes and \
7876 node not in child_nodes:
7877 edge = (current_node, node)
7878 if edge in shown_edges:
7880 selected_parent = node
7882 if not selected_parent:
7883 # A direct cycle is unavoidable.
7884 for node in parent_nodes:
7885 if not isinstance(node, (Blocker, Package)):
7887 if node not in traversed_nodes:
7888 edge = (current_node, node)
7889 if edge in shown_edges:
7891 selected_parent = node
7894 shown_edges.add((current_node, selected_parent))
7895 traversed_nodes.add(selected_parent)
7896 add_parents(selected_parent, False)
7897 display_list.append((current_node,
7898 len(tree_nodes), ordered))
7899 tree_nodes.append(current_node)
7901 add_parents(graph_key, True)
7903 display_list.append((x, depth, True))
7904 mylist = display_list
7905 for x in unsatisfied_blockers:
7906 mylist.append((x, 0, True))
7908 last_merge_depth = 0
7909 for i in xrange(len(mylist)-1,-1,-1):
7910 graph_key, depth, ordered = mylist[i]
7911 if not ordered and depth == 0 and i > 0 \
7912 and graph_key == mylist[i-1][0] and \
7913 mylist[i-1][1] == 0:
7914 # An ordered node got a consecutive duplicate when the tree was
7918 if ordered and graph_key[-1] != "nomerge":
7919 last_merge_depth = depth
7921 if depth >= last_merge_depth or \
7922 i < len(mylist) - 1 and \
7923 depth >= mylist[i+1][1]:
7926 from portage import flatten
7927 from portage.dep import use_reduce, paren_reduce
7928 # files to fetch list - avoids counting a same file twice
7929 # in size display (verbose mode)
7932 # Use this set to detect when all the "repoadd" strings are "[0]"
7933 # and disable the entire repo display in this case.
7936 for mylist_index in xrange(len(mylist)):
7937 x, depth, ordered = mylist[mylist_index]
7941 portdb = self.trees[myroot]["porttree"].dbapi
7942 bindb = self.trees[myroot]["bintree"].dbapi
7943 vardb = self.trees[myroot]["vartree"].dbapi
7944 vartree = self.trees[myroot]["vartree"]
7945 pkgsettings = self.pkgsettings[myroot]
7948 indent = " " * depth
7950 if isinstance(x, Blocker):
7952 blocker_style = "PKG_BLOCKER_SATISFIED"
7953 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7955 blocker_style = "PKG_BLOCKER"
7956 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7958 counters.blocks += 1
7960 counters.blocks_satisfied += 1
7961 resolved = portage.key_expand(
7962 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7963 if "--columns" in self.myopts and "--quiet" in self.myopts:
7964 addl += " " + colorize(blocker_style, resolved)
7966 addl = "[%s %s] %s%s" % \
7967 (colorize(blocker_style, "blocks"),
7968 addl, indent, colorize(blocker_style, resolved))
7969 block_parents = self._blocker_parents.parent_nodes(x)
7970 block_parents = set([pnode[2] for pnode in block_parents])
7971 block_parents = ", ".join(block_parents)
7973 addl += colorize(blocker_style,
7974 " (\"%s\" is blocking %s)") % \
7975 (str(x.atom).lstrip("!"), block_parents)
7977 addl += colorize(blocker_style,
7978 " (is blocking %s)") % block_parents
7979 if isinstance(x, Blocker) and x.satisfied:
7984 blockers.append(addl)
7987 pkg_merge = ordered and pkg_status == "merge"
7988 if not pkg_merge and pkg_status == "merge":
7989 pkg_status = "nomerge"
7990 built = pkg_type != "ebuild"
7991 installed = pkg_type == "installed"
7993 metadata = pkg.metadata
7995 repo_name = metadata["repository"]
7996 if pkg_type == "ebuild":
7997 ebuild_path = portdb.findname(pkg_key)
7998 if not ebuild_path: # shouldn't happen
7999 raise portage.exception.PackageNotFound(pkg_key)
8000 repo_path_real = os.path.dirname(os.path.dirname(
8001 os.path.dirname(ebuild_path)))
8003 repo_path_real = portdb.getRepositoryPath(repo_name)
8004 pkg_use = list(pkg.use.enabled)
8006 restrict = flatten(use_reduce(paren_reduce(
8007 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8008 except portage.exception.InvalidDependString, e:
8009 if not pkg.installed:
8010 show_invalid_depstring_notice(x,
8011 pkg.metadata["RESTRICT"], str(e))
8015 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8016 "fetch" in restrict:
8019 counters.restrict_fetch += 1
8020 if portdb.fetch_check(pkg_key, pkg_use):
8023 counters.restrict_fetch_satisfied += 1
8025 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8026 #param is used for -u, where you still *do* want to see when something is being upgraded.
8029 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8030 if vardb.cpv_exists(pkg_key):
8031 addl=" "+yellow("R")+fetch+" "
8034 counters.reinst += 1
8035 elif pkg_status == "uninstall":
8036 counters.uninst += 1
8037 # filter out old-style virtual matches
8038 elif installed_versions and \
8039 portage.cpv_getkey(installed_versions[0]) == \
8040 portage.cpv_getkey(pkg_key):
8041 myinslotlist = vardb.match(pkg.slot_atom)
8042 # If this is the first install of a new-style virtual, we
8043 # need to filter out old-style virtual matches.
8044 if myinslotlist and \
8045 portage.cpv_getkey(myinslotlist[0]) != \
8046 portage.cpv_getkey(pkg_key):
8049 myoldbest = myinslotlist[:]
8051 if not portage.dep.cpvequal(pkg_key,
8052 portage.best([pkg_key] + myoldbest)):
8054 addl += turquoise("U")+blue("D")
8056 counters.downgrades += 1
8059 addl += turquoise("U") + " "
8061 counters.upgrades += 1
8063 # New slot, mark it new.
8064 addl = " " + green("NS") + fetch + " "
8065 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8067 counters.newslot += 1
8069 if "--changelog" in self.myopts:
8070 inst_matches = vardb.match(pkg.slot_atom)
8072 changelogs.extend(self.calc_changelog(
8073 portdb.findname(pkg_key),
8074 inst_matches[0], pkg_key))
8076 addl = " " + green("N") + " " + fetch + " "
8085 forced_flags = set()
8086 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8087 forced_flags.update(pkgsettings.useforce)
8088 forced_flags.update(pkgsettings.usemask)
8090 cur_use = [flag for flag in pkg.use.enabled \
8091 if flag in pkg.iuse.all]
8092 cur_iuse = sorted(pkg.iuse.all)
8094 if myoldbest and myinslotlist:
8095 previous_cpv = myoldbest[0]
8097 previous_cpv = pkg.cpv
8098 if vardb.cpv_exists(previous_cpv):
8099 old_iuse, old_use = vardb.aux_get(
8100 previous_cpv, ["IUSE", "USE"])
8101 old_iuse = list(set(
8102 filter_iuse_defaults(old_iuse.split())))
8104 old_use = old_use.split()
8111 old_use = [flag for flag in old_use if flag in old_iuse]
8113 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8115 use_expand.reverse()
8116 use_expand_hidden = \
8117 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8119 def map_to_use_expand(myvals, forcedFlags=False,
8123 for exp in use_expand:
8126 for val in myvals[:]:
8127 if val.startswith(exp.lower()+"_"):
8128 if val in forced_flags:
8129 forced[exp].add(val[len(exp)+1:])
8130 ret[exp].append(val[len(exp)+1:])
8133 forced["USE"] = [val for val in myvals \
8134 if val in forced_flags]
8136 for exp in use_expand_hidden:
8142 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8143 # are the only thing that triggered reinstallation.
8144 reinst_flags_map = {}
8145 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8146 reinst_expand_map = None
8147 if reinstall_for_flags:
8148 reinst_flags_map = map_to_use_expand(
8149 list(reinstall_for_flags), removeHidden=False)
8150 for k in list(reinst_flags_map):
8151 if not reinst_flags_map[k]:
8152 del reinst_flags_map[k]
8153 if not reinst_flags_map.get("USE"):
8154 reinst_expand_map = reinst_flags_map.copy()
8155 reinst_expand_map.pop("USE", None)
8156 if reinst_expand_map and \
8157 not set(reinst_expand_map).difference(
8159 use_expand_hidden = \
8160 set(use_expand_hidden).difference(
8163 cur_iuse_map, iuse_forced = \
8164 map_to_use_expand(cur_iuse, forcedFlags=True)
8165 cur_use_map = map_to_use_expand(cur_use)
8166 old_iuse_map = map_to_use_expand(old_iuse)
8167 old_use_map = map_to_use_expand(old_use)
8170 use_expand.insert(0, "USE")
8172 for key in use_expand:
8173 if key in use_expand_hidden:
8175 verboseadd += create_use_string(key.upper(),
8176 cur_iuse_map[key], iuse_forced[key],
8177 cur_use_map[key], old_iuse_map[key],
8178 old_use_map[key], is_new,
8179 reinst_flags_map.get(key))
8184 if pkg_type == "ebuild" and pkg_merge:
8186 myfilesdict = portdb.getfetchsizes(pkg_key,
8187 useflags=pkg_use, debug=self.edebug)
8188 except portage.exception.InvalidDependString, e:
8189 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8190 show_invalid_depstring_notice(x, src_uri, str(e))
8193 if myfilesdict is None:
8194 myfilesdict="[empty/missing/bad digest]"
8196 for myfetchfile in myfilesdict:
8197 if myfetchfile not in myfetchlist:
8198 mysize+=myfilesdict[myfetchfile]
8199 myfetchlist.append(myfetchfile)
8201 counters.totalsize += mysize
8202 verboseadd += format_size(mysize)
8205 # assign index for a previous version in the same slot
8206 has_previous = False
8207 repo_name_prev = None
8208 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8210 slot_matches = vardb.match(slot_atom)
8213 repo_name_prev = vardb.aux_get(slot_matches[0],
8216 # now use the data to generate output
8217 if pkg.installed or not has_previous:
8218 repoadd = repo_display.repoStr(repo_path_real)
8220 repo_path_prev = None
8222 repo_path_prev = portdb.getRepositoryPath(
8224 if repo_path_prev == repo_path_real:
8225 repoadd = repo_display.repoStr(repo_path_real)
8227 repoadd = "%s=>%s" % (
8228 repo_display.repoStr(repo_path_prev),
8229 repo_display.repoStr(repo_path_real))
8231 repoadd_set.add(repoadd)
8233 xs = [portage.cpv_getkey(pkg_key)] + \
8234 list(portage.catpkgsplit(pkg_key)[2:])
8241 if "COLUMNWIDTH" in self.settings:
8243 mywidth = int(self.settings["COLUMNWIDTH"])
8244 except ValueError, e:
8245 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8247 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8248 self.settings["COLUMNWIDTH"], noiselevel=-1)
8250 oldlp = mywidth - 30
8253 # Convert myoldbest from a list to a string.
8257 for pos, key in enumerate(myoldbest):
8258 key = portage.catpkgsplit(key)[2] + \
8259 "-" + portage.catpkgsplit(key)[3]
8260 if key[-3:] == "-r0":
8262 myoldbest[pos] = key
8263 myoldbest = blue("["+", ".join(myoldbest)+"]")
8266 root_config = self.roots[myroot]
8267 system_set = root_config.sets["system"]
8268 world_set = root_config.sets["world"]
8273 pkg_system = system_set.findAtomForPackage(pkg)
8274 pkg_world = world_set.findAtomForPackage(pkg)
8275 if not (oneshot or pkg_world) and \
8276 myroot == self.target_root and \
8277 favorites_set.findAtomForPackage(pkg):
8278 # Maybe it will be added to world now.
8279 if create_world_atom(pkg, favorites_set, root_config):
8281 except portage.exception.InvalidDependString:
8282 # This is reported elsewhere if relevant.
8285 def pkgprint(pkg_str):
8288 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8290 return colorize("PKG_MERGE_WORLD", pkg_str)
8292 return colorize("PKG_MERGE", pkg_str)
8293 elif pkg_status == "uninstall":
8294 return colorize("PKG_UNINSTALL", pkg_str)
8297 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8299 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8301 return colorize("PKG_NOMERGE", pkg_str)
8304 properties = flatten(use_reduce(paren_reduce(
8305 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8306 except portage.exception.InvalidDependString, e:
8307 if not pkg.installed:
8308 show_invalid_depstring_notice(pkg,
8309 pkg.metadata["PROPERTIES"], str(e))
8313 interactive = "interactive" in properties
8314 if interactive and pkg.operation == "merge":
8315 addl = colorize("WARN", "I") + addl[1:]
8317 counters.interactive += 1
8322 if "--columns" in self.myopts:
8323 if "--quiet" in self.myopts:
8324 myprint=addl+" "+indent+pkgprint(pkg_cp)
8325 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8326 myprint=myprint+myoldbest
8327 myprint=myprint+darkgreen("to "+x[1])
8331 myprint = "[%s] %s%s" % \
8332 (pkgprint(pkg_status.ljust(13)),
8333 indent, pkgprint(pkg.cp))
8335 myprint = "[%s %s] %s%s" % \
8336 (pkgprint(pkg.type_name), addl,
8337 indent, pkgprint(pkg.cp))
8338 if (newlp-nc_len(myprint)) > 0:
8339 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8340 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8341 if (oldlp-nc_len(myprint)) > 0:
8342 myprint=myprint+" "*(oldlp-nc_len(myprint))
8343 myprint=myprint+myoldbest
8344 myprint += darkgreen("to " + pkg.root)
8347 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8349 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8350 myprint += indent + pkgprint(pkg_key) + " " + \
8351 myoldbest + darkgreen("to " + myroot)
8353 if "--columns" in self.myopts:
8354 if "--quiet" in self.myopts:
8355 myprint=addl+" "+indent+pkgprint(pkg_cp)
8356 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8357 myprint=myprint+myoldbest
8361 myprint = "[%s] %s%s" % \
8362 (pkgprint(pkg_status.ljust(13)),
8363 indent, pkgprint(pkg.cp))
8365 myprint = "[%s %s] %s%s" % \
8366 (pkgprint(pkg.type_name), addl,
8367 indent, pkgprint(pkg.cp))
8368 if (newlp-nc_len(myprint)) > 0:
8369 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8370 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8371 if (oldlp-nc_len(myprint)) > 0:
8372 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8373 myprint += myoldbest
8376 myprint = "[%s] %s%s %s" % \
8377 (pkgprint(pkg_status.ljust(13)),
8378 indent, pkgprint(pkg.cpv),
8381 myprint = "[%s %s] %s%s %s" % \
8382 (pkgprint(pkg_type), addl, indent,
8383 pkgprint(pkg.cpv), myoldbest)
8385 if columns and pkg.operation == "uninstall":
8387 p.append((myprint, verboseadd, repoadd))
8389 if "--tree" not in self.myopts and \
8390 "--quiet" not in self.myopts and \
8391 not self._opts_no_restart.intersection(self.myopts) and \
8392 pkg.root == self._running_root.root and \
8393 portage.match_from_list(
8394 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8395 not vardb.cpv_exists(pkg.cpv) and \
8396 "--quiet" not in self.myopts:
8397 if mylist_index < len(mylist) - 1:
8398 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8399 p.append(colorize("WARN", " then resume the merge."))
8402 show_repos = repoadd_set and repoadd_set != set(["0"])
8405 if isinstance(x, basestring):
8406 out.write("%s\n" % (x,))
8409 myprint, verboseadd, repoadd = x
8412 myprint += " " + verboseadd
8414 if show_repos and repoadd:
8415 myprint += " " + teal("[%s]" % repoadd)
8417 out.write("%s\n" % (myprint,))
8426 sys.stdout.write(str(repo_display))
8428 if "--changelog" in self.myopts:
8430 for revision,text in changelogs:
8431 print bold('*'+revision)
8432 sys.stdout.write(text)
8437 def display_problems(self):
8439 Display problems with the dependency graph such as slot collisions.
8440 This is called internally by display() to show the problems _after_
8441 the merge list where it is most likely to be seen, but if display()
8442 is not going to be called then this method should be called explicitly
8443 to ensure that the user is notified of problems with the graph.
8445 All output goes to stderr, except for unsatisfied dependencies which
8446 go to stdout for parsing by programs such as autounmask.
8449 # Note that show_masked_packages() sends it's output to
8450 # stdout, and some programs such as autounmask parse the
8451 # output in cases when emerge bails out. However, when
8452 # show_masked_packages() is called for installed packages
8453 # here, the message is a warning that is more appropriate
8454 # to send to stderr, so temporarily redirect stdout to
8455 # stderr. TODO: Fix output code so there's a cleaner way
8456 # to redirect everything to stderr.
8461 sys.stdout = sys.stderr
8462 self._display_problems()
8468 # This goes to stdout for parsing by programs like autounmask.
8469 for pargs, kwargs in self._unsatisfied_deps_for_display:
8470 self._show_unsatisfied_dep(*pargs, **kwargs)
8472 def _display_problems(self):
8473 if self._circular_deps_for_display is not None:
8474 self._show_circular_deps(
8475 self._circular_deps_for_display)
8477 # The user is only notified of a slot conflict if
8478 # there are no unresolvable blocker conflicts.
8479 if self._unsatisfied_blockers_for_display is not None:
8480 self._show_unsatisfied_blockers(
8481 self._unsatisfied_blockers_for_display)
8483 self._show_slot_collision_notice()
8485 # TODO: Add generic support for "set problem" handlers so that
8486 # the below warnings aren't special cases for world only.
8488 if self._missing_args:
8489 world_problems = False
8490 if "world" in self._sets:
8491 # Filter out indirect members of world (from nested sets)
8492 # since only direct members of world are desired here.
8493 world_set = self.roots[self.target_root].sets["world"]
8494 for arg, atom in self._missing_args:
8495 if arg.name == "world" and atom in world_set:
8496 world_problems = True
8500 sys.stderr.write("\n!!! Problems have been " + \
8501 "detected with your world file\n")
8502 sys.stderr.write("!!! Please run " + \
8503 green("emaint --check world")+"\n\n")
8505 if self._missing_args:
8506 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8507 " Ebuilds for the following packages are either all\n")
8508 sys.stderr.write(colorize("BAD", "!!!") + \
8509 " masked or don't exist:\n")
8510 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8511 self._missing_args) + "\n")
8513 if self._pprovided_args:
8515 for arg, atom in self._pprovided_args:
8516 if isinstance(arg, SetArg):
8518 arg_atom = (atom, atom)
8521 arg_atom = (arg.arg, atom)
8522 refs = arg_refs.setdefault(arg_atom, [])
8523 if parent not in refs:
8526 msg.append(bad("\nWARNING: "))
8527 if len(self._pprovided_args) > 1:
8528 msg.append("Requested packages will not be " + \
8529 "merged because they are listed in\n")
8531 msg.append("A requested package will not be " + \
8532 "merged because it is listed in\n")
8533 msg.append("package.provided:\n\n")
8534 problems_sets = set()
8535 for (arg, atom), refs in arg_refs.iteritems():
8538 problems_sets.update(refs)
8540 ref_string = ", ".join(["'%s'" % name for name in refs])
8541 ref_string = " pulled in by " + ref_string
8542 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8544 if "world" in problems_sets:
8545 msg.append("This problem can be solved in one of the following ways:\n\n")
8546 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8547 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8548 msg.append(" C) Remove offending entries from package.provided.\n\n")
8549 msg.append("The best course of action depends on the reason that an offending\n")
8550 msg.append("package.provided entry exists.\n\n")
8551 sys.stderr.write("".join(msg))
8553 masked_packages = []
8554 for pkg in self._masked_installed:
8555 root_config = pkg.root_config
8556 pkgsettings = self.pkgsettings[pkg.root]
8557 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8558 masked_packages.append((root_config, pkgsettings,
8559 pkg.cpv, pkg.metadata, mreasons))
8561 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8562 " The following installed packages are masked:\n")
8563 show_masked_packages(masked_packages)
8567 def calc_changelog(self,ebuildpath,current,next):
8568 if ebuildpath == None or not os.path.exists(ebuildpath):
8570 current = '-'.join(portage.catpkgsplit(current)[1:])
8571 if current.endswith('-r0'):
8572 current = current[:-3]
8573 next = '-'.join(portage.catpkgsplit(next)[1:])
8574 if next.endswith('-r0'):
8576 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8578 changelog = open(changelogpath).read()
8579 except SystemExit, e:
8580 raise # Needed else can't exit
8583 divisions = self.find_changelog_tags(changelog)
8584 #print 'XX from',current,'to',next
8585 #for div,text in divisions: print 'XX',div
8586 # skip entries for all revisions above the one we are about to emerge
8587 for i in range(len(divisions)):
8588 if divisions[i][0]==next:
8589 divisions = divisions[i:]
8591 # find out how many entries we are going to display
8592 for i in range(len(divisions)):
8593 if divisions[i][0]==current:
8594 divisions = divisions[:i]
8597 # couldnt find the current revision in the list. display nothing
8601 def find_changelog_tags(self,changelog):
8605 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8607 if release is not None:
8608 divs.append((release,changelog))
8610 if release is not None:
8611 divs.append((release,changelog[:match.start()]))
8612 changelog = changelog[match.end():]
8613 release = match.group(1)
8614 if release.endswith('.ebuild'):
8615 release = release[:-7]
8616 if release.endswith('-r0'):
8617 release = release[:-3]
8619 def saveNomergeFavorites(self):
8620 """Find atoms in favorites that are not in the mergelist and add them
8621 to the world file if necessary."""
8622 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8623 "--oneshot", "--onlydeps", "--pretend"):
8624 if x in self.myopts:
8626 root_config = self.roots[self.target_root]
8627 world_set = root_config.sets["world"]
8629 world_locked = False
8630 if hasattr(world_set, "lock"):
8634 if hasattr(world_set, "load"):
8635 world_set.load() # maybe it's changed on disk
8637 args_set = self._sets["args"]
8638 portdb = self.trees[self.target_root]["porttree"].dbapi
8639 added_favorites = set()
8640 for x in self._set_nodes:
8641 pkg_type, root, pkg_key, pkg_status = x
8642 if pkg_status != "nomerge":
8646 myfavkey = create_world_atom(x, args_set, root_config)
8648 if myfavkey in added_favorites:
8650 added_favorites.add(myfavkey)
8651 except portage.exception.InvalidDependString, e:
8652 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8653 (pkg_key, str(e)), noiselevel=-1)
8654 writemsg("!!! see '%s'\n\n" % os.path.join(
8655 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8658 for k in self._sets:
8659 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8664 all_added.append(SETPREFIX + k)
8665 all_added.extend(added_favorites)
8668 print ">>> Recording %s in \"world\" favorites file..." % \
8669 colorize("INFORM", str(a))
8671 world_set.update(all_added)
8676 def loadResumeCommand(self, resume_data, skip_masked=False):
8678 Add a resume command to the graph and validate it in the process. This
8679 will raise a PackageNotFound exception if a package is not available.
8682 if not isinstance(resume_data, dict):
8685 mergelist = resume_data.get("mergelist")
8686 if not isinstance(mergelist, list):
8689 fakedb = self.mydbapi
8691 serialized_tasks = []
8694 if not (isinstance(x, list) and len(x) == 4):
8696 pkg_type, myroot, pkg_key, action = x
8697 if pkg_type not in self.pkg_tree_map:
8699 if action != "merge":
8701 tree_type = self.pkg_tree_map[pkg_type]
8702 mydb = trees[myroot][tree_type].dbapi
8703 db_keys = list(self._trees_orig[myroot][
8704 tree_type].dbapi._aux_cache_keys)
8706 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8708 # It does no exist or it is corrupt.
8709 if action == "uninstall":
8711 raise portage.exception.PackageNotFound(pkg_key)
8712 installed = action == "uninstall"
8713 built = pkg_type != "ebuild"
8714 root_config = self.roots[myroot]
8715 pkg = Package(built=built, cpv=pkg_key,
8716 installed=installed, metadata=metadata,
8717 operation=action, root_config=root_config,
8719 if pkg_type == "ebuild":
8720 pkgsettings = self.pkgsettings[myroot]
8721 pkgsettings.setcpv(pkg)
8722 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8723 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8724 self._pkg_cache[pkg] = pkg
8726 root_config = self.roots[pkg.root]
8727 if "merge" == pkg.operation and \
8728 not visible(root_config.settings, pkg):
8730 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8732 self._unsatisfied_deps_for_display.append(
8733 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8735 fakedb[myroot].cpv_inject(pkg)
8736 serialized_tasks.append(pkg)
8737 self.spinner.update()
8739 if self._unsatisfied_deps_for_display:
8742 if not serialized_tasks or "--nodeps" in self.myopts:
8743 self._serialized_tasks_cache = serialized_tasks
8744 self._scheduler_graph = self.digraph
8746 self._select_package = self._select_pkg_from_graph
8747 self.myparams.add("selective")
8748 # Always traverse deep dependencies in order to account for
8749 # potentially unsatisfied dependencies of installed packages.
8750 # This is necessary for correct --keep-going or --resume operation
8751 # in case a package from a group of circularly dependent packages
8752 # fails. In this case, a package which has recently been installed
8753 # may have an unsatisfied circular dependency (pulled in by
8754 # PDEPEND, for example). So, even though a package is already
8755 # installed, it may not have all of it's dependencies satisfied, so
8756 # it may not be usable. If such a package is in the subgraph of
8757 # deep depenedencies of a scheduled build, that build needs to
8758 # be cancelled. In order for this type of situation to be
8759 # recognized, deep traversal of dependencies is required.
8760 self.myparams.add("deep")
8762 favorites = resume_data.get("favorites")
8763 args_set = self._sets["args"]
8764 if isinstance(favorites, list):
8765 args = self._load_favorites(favorites)
8769 for task in serialized_tasks:
8770 if isinstance(task, Package) and \
8771 task.operation == "merge":
8772 if not self._add_pkg(task, None):
8775 # Packages for argument atoms need to be explicitly
8776 # added via _add_pkg() so that they are included in the
8777 # digraph (needed at least for --tree display).
8779 for atom in arg.set:
8780 pkg, existing_node = self._select_package(
8781 arg.root_config.root, atom)
8782 if existing_node is None and \
8784 if not self._add_pkg(pkg, Dependency(atom=atom,
8785 root=pkg.root, parent=arg)):
8788 # Allow unsatisfied deps here to avoid showing a masking
8789 # message for an unsatisfied dep that isn't necessarily
8791 if not self._create_graph(allow_unsatisfied=True):
8794 unsatisfied_deps = []
8795 for dep in self._unsatisfied_deps:
8796 if not isinstance(dep.parent, Package):
8798 if dep.parent.operation == "merge":
8799 unsatisfied_deps.append(dep)
8802 # For unsatisfied deps of installed packages, only account for
8803 # them if they are in the subgraph of dependencies of a package
8804 # which is scheduled to be installed.
8805 unsatisfied_install = False
8807 dep_stack = self.digraph.parent_nodes(dep.parent)
8809 node = dep_stack.pop()
8810 if not isinstance(node, Package):
8812 if node.operation == "merge":
8813 unsatisfied_install = True
8815 if node in traversed:
8818 dep_stack.extend(self.digraph.parent_nodes(node))
8820 if unsatisfied_install:
8821 unsatisfied_deps.append(dep)
8823 if masked_tasks or unsatisfied_deps:
8824 # This probably means that a required package
8825 # was dropped via --skipfirst. It makes the
8826 # resume list invalid, so convert it to a
8827 # UnsatisfiedResumeDep exception.
8828 raise self.UnsatisfiedResumeDep(self,
8829 masked_tasks + unsatisfied_deps)
8830 self._serialized_tasks_cache = None
8833 except self._unknown_internal_error:
8838 def _load_favorites(self, favorites):
8840 Use a list of favorites to resume state from a
8841 previous select_files() call. This creates similar
8842 DependencyArg instances to those that would have
8843 been created by the original select_files() call.
8844 This allows Package instances to be matched with
8845 DependencyArg instances during graph creation.
8847 root_config = self.roots[self.target_root]
8848 getSetAtoms = root_config.setconfig.getSetAtoms
8849 sets = root_config.sets
8852 if not isinstance(x, basestring):
8854 if x in ("system", "world"):
8856 if x.startswith(SETPREFIX):
8857 s = x[len(SETPREFIX):]
8862 # Recursively expand sets so that containment tests in
8863 # self._get_parent_sets() properly match atoms in nested
8864 # sets (like if world contains system).
8865 expanded_set = InternalPackageSet(
8866 initial_atoms=getSetAtoms(s))
8867 self._sets[s] = expanded_set
8868 args.append(SetArg(arg=x, set=expanded_set,
8869 root_config=root_config))
8871 if not portage.isvalidatom(x):
8873 args.append(AtomArg(arg=x, atom=x,
8874 root_config=root_config))
8876 self._set_args(args)
8879 class UnsatisfiedResumeDep(portage.exception.PortageException):
8881 A dependency of a resume list is not installed. This
8882 can occur when a required package is dropped from the
8883 merge list via --skipfirst.
8885 def __init__(self, depgraph, value):
8886 portage.exception.PortageException.__init__(self, value)
8887 self.depgraph = depgraph
8889 class _internal_exception(portage.exception.PortageException):
8890 def __init__(self, value=""):
8891 portage.exception.PortageException.__init__(self, value)
8893 class _unknown_internal_error(_internal_exception):
8895 Used by the depgraph internally to terminate graph creation.
8896 The specific reason for the failure should have been dumped
8897 to stderr, unfortunately, the exact reason for the failure
8901 class _serialize_tasks_retry(_internal_exception):
8903 This is raised by the _serialize_tasks() method when it needs to
8904 be called again for some reason. The only case that it's currently
8905 used for is when neglected dependencies need to be added to the
8906 graph in order to avoid making a potentially unsafe decision.
8909 class _dep_check_composite_db(portage.dbapi):
8911 A dbapi-like interface that is optimized for use in dep_check() calls.
8912 This is built on top of the existing depgraph package selection logic.
8913 Some packages that have been added to the graph may be masked from this
8914 view in order to influence the atom preference selection that occurs
8917 def __init__(self, depgraph, root):
8918 portage.dbapi.__init__(self)
8919 self._depgraph = depgraph
8921 self._match_cache = {}
8922 self._cpv_pkg_map = {}
8924 def _clear_cache(self):
8925 self._match_cache.clear()
8926 self._cpv_pkg_map.clear()
8928 def match(self, atom):
8929 ret = self._match_cache.get(atom)
8934 atom = self._dep_expand(atom)
8935 pkg, existing = self._depgraph._select_package(self._root, atom)
8939 # Return the highest available from select_package() as well as
8940 # any matching slots in the graph db.
8942 slots.add(pkg.metadata["SLOT"])
8943 atom_cp = portage.dep_getkey(atom)
8944 if pkg.cp.startswith("virtual/"):
8945 # For new-style virtual lookahead that occurs inside
8946 # dep_check(), examine all slots. This is needed
8947 # so that newer slots will not unnecessarily be pulled in
8948 # when a satisfying lower slot is already installed. For
8949 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8950 # there's no need to pull in a newer slot to satisfy a
8951 # virtual/jdk dependency.
8952 for db, pkg_type, built, installed, db_keys in \
8953 self._depgraph._filtered_trees[self._root]["dbs"]:
8954 for cpv in db.match(atom):
8955 if portage.cpv_getkey(cpv) != pkg.cp:
8957 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8959 if self._visible(pkg):
8960 self._cpv_pkg_map[pkg.cpv] = pkg
8962 slots.remove(pkg.metadata["SLOT"])
8964 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8965 pkg, existing = self._depgraph._select_package(
8966 self._root, slot_atom)
8969 if not self._visible(pkg):
8971 self._cpv_pkg_map[pkg.cpv] = pkg
8974 self._cpv_sort_ascending(ret)
8975 self._match_cache[orig_atom] = ret
8978 def _visible(self, pkg):
8979 if pkg.installed and "selective" not in self._depgraph.myparams:
8981 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8982 except (StopIteration, portage.exception.InvalidDependString):
8989 self._depgraph.pkgsettings[pkg.root], pkg):
8991 except portage.exception.InvalidDependString:
8993 in_graph = self._depgraph._slot_pkg_map[
8994 self._root].get(pkg.slot_atom)
8995 if in_graph is None:
8996 # Mask choices for packages which are not the highest visible
8997 # version within their slot (since they usually trigger slot
8999 highest_visible, in_graph = self._depgraph._select_package(
9000 self._root, pkg.slot_atom)
9001 if pkg != highest_visible:
9003 elif in_graph != pkg:
9004 # Mask choices for packages that would trigger a slot
9005 # conflict with a previously selected package.
9009 def _dep_expand(self, atom):
9011 This is only needed for old installed packages that may
9012 contain atoms that are not fully qualified with a specific
9013 category. Emulate the cpv_expand() function that's used by
9014 dbapi.match() in cases like this. If there are multiple
9015 matches, it's often due to a new-style virtual that has
9016 been added, so try to filter those out to avoid raising
9019 root_config = self._depgraph.roots[self._root]
9021 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9022 if len(expanded_atoms) > 1:
9023 non_virtual_atoms = []
9024 for x in expanded_atoms:
9025 if not portage.dep_getkey(x).startswith("virtual/"):
9026 non_virtual_atoms.append(x)
9027 if len(non_virtual_atoms) == 1:
9028 expanded_atoms = non_virtual_atoms
9029 if len(expanded_atoms) > 1:
9030 # compatible with portage.cpv_expand()
9031 raise portage.exception.AmbiguousPackageName(
9032 [portage.dep_getkey(x) for x in expanded_atoms])
9034 atom = expanded_atoms[0]
9036 null_atom = insert_category_into_atom(atom, "null")
9037 null_cp = portage.dep_getkey(null_atom)
9038 cat, atom_pn = portage.catsplit(null_cp)
9039 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9041 # Allow the resolver to choose which virtual.
9042 atom = insert_category_into_atom(atom, "virtual")
9044 atom = insert_category_into_atom(atom, "null")
9047 def aux_get(self, cpv, wants):
9048 metadata = self._cpv_pkg_map[cpv].metadata
9049 return [metadata.get(x, "") for x in wants]
9051 class RepoDisplay(object):
9052 def __init__(self, roots):
9053 self._shown_repos = {}
9054 self._unknown_repo = False
9056 for root_config in roots.itervalues():
9057 portdir = root_config.settings.get("PORTDIR")
9059 repo_paths.add(portdir)
9060 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9062 repo_paths.update(overlays.split())
9063 repo_paths = list(repo_paths)
9064 self._repo_paths = repo_paths
9065 self._repo_paths_real = [ os.path.realpath(repo_path) \
9066 for repo_path in repo_paths ]
9068 # pre-allocate index for PORTDIR so that it always has index 0.
9069 for root_config in roots.itervalues():
9070 portdb = root_config.trees["porttree"].dbapi
9071 portdir = portdb.porttree_root
9073 self.repoStr(portdir)
9075 def repoStr(self, repo_path_real):
9078 real_index = self._repo_paths_real.index(repo_path_real)
9079 if real_index == -1:
9081 self._unknown_repo = True
9083 shown_repos = self._shown_repos
9084 repo_paths = self._repo_paths
9085 repo_path = repo_paths[real_index]
9086 index = shown_repos.get(repo_path)
9088 index = len(shown_repos)
9089 shown_repos[repo_path] = index
9095 shown_repos = self._shown_repos
9096 unknown_repo = self._unknown_repo
9097 if shown_repos or self._unknown_repo:
9098 output.append("Portage tree and overlays:\n")
9099 show_repo_paths = list(shown_repos)
9100 for repo_path, repo_index in shown_repos.iteritems():
9101 show_repo_paths[repo_index] = repo_path
9103 for index, repo_path in enumerate(show_repo_paths):
9104 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9106 output.append(" "+teal("[?]") + \
9107 " indicates that the source repository could not be determined\n")
9108 return "".join(output)
9110 class PackageCounters(object):
9120 self.blocks_satisfied = 0
9122 self.restrict_fetch = 0
9123 self.restrict_fetch_satisfied = 0
9124 self.interactive = 0
9127 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9130 myoutput.append("Total: %s package" % total_installs)
9131 if total_installs != 1:
9132 myoutput.append("s")
9133 if total_installs != 0:
9134 myoutput.append(" (")
9135 if self.upgrades > 0:
9136 details.append("%s upgrade" % self.upgrades)
9137 if self.upgrades > 1:
9139 if self.downgrades > 0:
9140 details.append("%s downgrade" % self.downgrades)
9141 if self.downgrades > 1:
9144 details.append("%s new" % self.new)
9145 if self.newslot > 0:
9146 details.append("%s in new slot" % self.newslot)
9147 if self.newslot > 1:
9150 details.append("%s reinstall" % self.reinst)
9154 details.append("%s uninstall" % self.uninst)
9157 if self.interactive > 0:
9158 details.append("%s %s" % (self.interactive,
9159 colorize("WARN", "interactive")))
9160 myoutput.append(", ".join(details))
9161 if total_installs != 0:
9162 myoutput.append(")")
9163 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9164 if self.restrict_fetch:
9165 myoutput.append("\nFetch Restriction: %s package" % \
9166 self.restrict_fetch)
9167 if self.restrict_fetch > 1:
9168 myoutput.append("s")
9169 if self.restrict_fetch_satisfied < self.restrict_fetch:
9170 myoutput.append(bad(" (%s unsatisfied)") % \
9171 (self.restrict_fetch - self.restrict_fetch_satisfied))
9173 myoutput.append("\nConflict: %s block" % \
9176 myoutput.append("s")
9177 if self.blocks_satisfied < self.blocks:
9178 myoutput.append(bad(" (%s unsatisfied)") % \
9179 (self.blocks - self.blocks_satisfied))
9180 return "".join(myoutput)
9182 class PollSelectAdapter(PollConstants):
9185 Use select to emulate a poll object, for
9186 systems that don't support poll().
9190 self._registered = {}
9191 self._select_args = [[], [], []]
9193 def register(self, fd, *args):
9195 Only POLLIN is currently supported!
9199 "register expected at most 2 arguments, got " + \
9200 repr(1 + len(args)))
9202 eventmask = PollConstants.POLLIN | \
9203 PollConstants.POLLPRI | PollConstants.POLLOUT
9207 self._registered[fd] = eventmask
9208 self._select_args = None
9210 def unregister(self, fd):
9211 self._select_args = None
9212 del self._registered[fd]
9214 def poll(self, *args):
9217 "poll expected at most 2 arguments, got " + \
9218 repr(1 + len(args)))
9224 select_args = self._select_args
9225 if select_args is None:
9226 select_args = [self._registered.keys(), [], []]
9228 if timeout is not None:
9229 select_args = select_args[:]
9230 # Translate poll() timeout args to select() timeout args:
9232 # | units | value(s) for indefinite block
9233 # ---------|--------------|------------------------------
9234 # poll | milliseconds | omitted, negative, or None
9235 # ---------|--------------|------------------------------
9236 # select | seconds | omitted
9237 # ---------|--------------|------------------------------
9239 if timeout is not None and timeout < 0:
9241 if timeout is not None:
9242 select_args.append(timeout / 1000)
9244 select_events = select.select(*select_args)
9246 for fd in select_events[0]:
9247 poll_events.append((fd, PollConstants.POLLIN))
9250 class SequentialTaskQueue(SlotObject):
9252 __slots__ = ("max_jobs", "running_tasks") + \
9253 ("_dirty", "_scheduling", "_task_queue")
9255 def __init__(self, **kwargs):
9256 SlotObject.__init__(self, **kwargs)
9257 self._task_queue = deque()
9258 self.running_tasks = set()
9259 if self.max_jobs is None:
9263 def add(self, task):
9264 self._task_queue.append(task)
9267 def addFront(self, task):
9268 self._task_queue.appendleft(task)
9279 if self._scheduling:
9280 # Ignore any recursive schedule() calls triggered via
9281 # self._task_exit().
9284 self._scheduling = True
9286 task_queue = self._task_queue
9287 running_tasks = self.running_tasks
9288 max_jobs = self.max_jobs
9289 state_changed = False
9291 while task_queue and \
9292 (max_jobs is True or len(running_tasks) < max_jobs):
9293 task = task_queue.popleft()
9294 cancelled = getattr(task, "cancelled", None)
9296 running_tasks.add(task)
9297 task.addExitListener(self._task_exit)
9299 state_changed = True
9302 self._scheduling = False
9304 return state_changed
9306 def _task_exit(self, task):
9308 Since we can always rely on exit listeners being called, the set of
9309 running tasks is always pruned automatically and there is never any need
9310 to actively prune it.
9312 self.running_tasks.remove(task)
9313 if self._task_queue:
9317 self._task_queue.clear()
9318 running_tasks = self.running_tasks
9319 while running_tasks:
9320 task = running_tasks.pop()
9321 task.removeExitListener(self._task_exit)
9325 def __nonzero__(self):
9326 return bool(self._task_queue or self.running_tasks)
9329 return len(self._task_queue) + len(self.running_tasks)
9331 _can_poll_device = None
9333 def can_poll_device():
9335 Test if it's possible to use poll() on a device such as a pty. This
9336 is known to fail on Darwin.
9338 @returns: True if poll() on a device succeeds, False otherwise.
9341 global _can_poll_device
9342 if _can_poll_device is not None:
9343 return _can_poll_device
9345 if not hasattr(select, "poll"):
9346 _can_poll_device = False
9347 return _can_poll_device
9350 dev_null = open('/dev/null', 'rb')
9352 _can_poll_device = False
9353 return _can_poll_device
9356 p.register(dev_null.fileno(), PollConstants.POLLIN)
9358 invalid_request = False
9359 for f, event in p.poll():
9360 if event & PollConstants.POLLNVAL:
9361 invalid_request = True
9365 _can_poll_device = not invalid_request
9366 return _can_poll_device
9368 def create_poll_instance():
9370 Create an instance of select.poll, or an instance of
9371 PollSelectAdapter there is no poll() implementation or
9372 it is broken somehow.
9374 if can_poll_device():
9375 return select.poll()
9376 return PollSelectAdapter()
9378 getloadavg = getattr(os, "getloadavg", None)
9379 if getloadavg is None:
9382 Uses /proc/loadavg to emulate os.getloadavg().
9383 Raises OSError if the load average was unobtainable.
9386 loadavg_str = open('/proc/loadavg').readline()
9388 # getloadavg() is only supposed to raise OSError, so convert
9389 raise OSError('unknown')
9390 loadavg_split = loadavg_str.split()
9391 if len(loadavg_split) < 3:
9392 raise OSError('unknown')
9396 loadavg_floats.append(float(loadavg_split[i]))
9398 raise OSError('unknown')
9399 return tuple(loadavg_floats)
9401 class PollScheduler(object):
9403 class _sched_iface_class(SlotObject):
9404 __slots__ = ("register", "schedule", "unregister")
9408 self._max_load = None
9410 self._poll_event_queue = []
9411 self._poll_event_handlers = {}
9412 self._poll_event_handler_ids = {}
9413 # Increment id for each new handler.
9414 self._event_handler_id = 0
9415 self._poll_obj = create_poll_instance()
9416 self._scheduling = False
9418 def _schedule(self):
9420 Calls _schedule_tasks() and automatically returns early from
9421 any recursive calls to this method that the _schedule_tasks()
9422 call might trigger. This makes _schedule() safe to call from
9423 inside exit listeners.
9425 if self._scheduling:
9427 self._scheduling = True
9429 return self._schedule_tasks()
9431 self._scheduling = False
9433 def _running_job_count(self):
9436 def _can_add_job(self):
9437 max_jobs = self._max_jobs
9438 max_load = self._max_load
9440 if self._max_jobs is not True and \
9441 self._running_job_count() >= self._max_jobs:
9444 if max_load is not None and \
9445 (max_jobs is True or max_jobs > 1) and \
9446 self._running_job_count() >= 1:
9448 avg1, avg5, avg15 = getloadavg()
9452 if avg1 >= max_load:
9457 def _poll(self, timeout=None):
9459 All poll() calls pass through here. The poll events
9460 are added directly to self._poll_event_queue.
9461 In order to avoid endless blocking, this raises
9462 StopIteration if timeout is None and there are
9463 no file descriptors to poll.
9465 if not self._poll_event_handlers:
9467 if timeout is None and \
9468 not self._poll_event_handlers:
9469 raise StopIteration(
9470 "timeout is None and there are no poll() event handlers")
9472 # The following error is known to occur with Linux kernel versions
9475 # select.error: (4, 'Interrupted system call')
9477 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9478 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9479 # without any events.
9482 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9484 except select.error, e:
9485 writemsg_level("\n!!! select error: %s\n" % (e,),
9486 level=logging.ERROR, noiselevel=-1)
9488 if timeout is not None:
9491 def _next_poll_event(self, timeout=None):
9493 Since the _schedule_wait() loop is called by event
9494 handlers from _poll_loop(), maintain a central event
9495 queue for both of them to share events from a single
9496 poll() call. In order to avoid endless blocking, this
9497 raises StopIteration if timeout is None and there are
9498 no file descriptors to poll.
9500 if not self._poll_event_queue:
9502 return self._poll_event_queue.pop()
9504 def _poll_loop(self):
9506 event_handlers = self._poll_event_handlers
9507 event_handled = False
9510 while event_handlers:
9511 f, event = self._next_poll_event()
9512 handler, reg_id = event_handlers[f]
9514 event_handled = True
9515 except StopIteration:
9516 event_handled = True
9518 if not event_handled:
9519 raise AssertionError("tight loop")
9521 def _schedule_yield(self):
9523 Schedule for a short period of time chosen by the scheduler based
9524 on internal state. Synchronous tasks should call this periodically
9525 in order to allow the scheduler to service pending poll events. The
9526 scheduler will call poll() exactly once, without blocking, and any
9527 resulting poll events will be serviced.
9529 event_handlers = self._poll_event_handlers
9532 if not event_handlers:
9533 return bool(events_handled)
9535 if not self._poll_event_queue:
9539 while event_handlers and self._poll_event_queue:
9540 f, event = self._next_poll_event()
9541 handler, reg_id = event_handlers[f]
9544 except StopIteration:
9547 return bool(events_handled)
9549 def _register(self, f, eventmask, handler):
9552 @return: A unique registration id, for use in schedule() or
9555 if f in self._poll_event_handlers:
9556 raise AssertionError("fd %d is already registered" % f)
9557 self._event_handler_id += 1
9558 reg_id = self._event_handler_id
9559 self._poll_event_handler_ids[reg_id] = f
9560 self._poll_event_handlers[f] = (handler, reg_id)
9561 self._poll_obj.register(f, eventmask)
9564 def _unregister(self, reg_id):
9565 f = self._poll_event_handler_ids[reg_id]
9566 self._poll_obj.unregister(f)
9567 del self._poll_event_handlers[f]
9568 del self._poll_event_handler_ids[reg_id]
9570 def _schedule_wait(self, wait_ids):
9572 Schedule until wait_id is not longer registered
9575 @param wait_id: a task id to wait for
9577 event_handlers = self._poll_event_handlers
9578 handler_ids = self._poll_event_handler_ids
9579 event_handled = False
9581 if isinstance(wait_ids, int):
9582 wait_ids = frozenset([wait_ids])
9585 while wait_ids.intersection(handler_ids):
9586 f, event = self._next_poll_event()
9587 handler, reg_id = event_handlers[f]
9589 event_handled = True
9590 except StopIteration:
9591 event_handled = True
9593 return event_handled
9595 class QueueScheduler(PollScheduler):
9598 Add instances of SequentialTaskQueue and then call run(). The
9599 run() method returns when no tasks remain.
9602 def __init__(self, max_jobs=None, max_load=None):
9603 PollScheduler.__init__(self)
9605 if max_jobs is None:
9608 self._max_jobs = max_jobs
9609 self._max_load = max_load
9610 self.sched_iface = self._sched_iface_class(
9611 register=self._register,
9612 schedule=self._schedule_wait,
9613 unregister=self._unregister)
9616 self._schedule_listeners = []
9619 self._queues.append(q)
9621 def remove(self, q):
9622 self._queues.remove(q)
9626 while self._schedule():
9629 while self._running_job_count():
9632 def _schedule_tasks(self):
9635 @returns: True if there may be remaining tasks to schedule,
9638 while self._can_add_job():
9639 n = self._max_jobs - self._running_job_count()
9643 if not self._start_next_job(n):
9646 for q in self._queues:
9651 def _running_job_count(self):
9653 for q in self._queues:
9654 job_count += len(q.running_tasks)
9655 self._jobs = job_count
9658 def _start_next_job(self, n=1):
9660 for q in self._queues:
9661 initial_job_count = len(q.running_tasks)
9663 final_job_count = len(q.running_tasks)
9664 if final_job_count > initial_job_count:
9665 started_count += (final_job_count - initial_job_count)
9666 if started_count >= n:
9668 return started_count
9670 class TaskScheduler(object):
9673 A simple way to handle scheduling of AsynchrousTask instances. Simply
9674 add tasks and call run(). The run() method returns when no tasks remain.
9677 def __init__(self, max_jobs=None, max_load=None):
9678 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9679 self._scheduler = QueueScheduler(
9680 max_jobs=max_jobs, max_load=max_load)
9681 self.sched_iface = self._scheduler.sched_iface
9682 self.run = self._scheduler.run
9683 self._scheduler.add(self._queue)
9685 def add(self, task):
9686 self._queue.add(task)
9688 class JobStatusDisplay(object):
9690 _bound_properties = ("curval", "failed", "running")
9691 _jobs_column_width = 48
9693 # Don't update the display unless at least this much
9694 # time has passed, in units of seconds.
9695 _min_display_latency = 2
9697 _default_term_codes = {
9703 _termcap_name_map = {
9704 'carriage_return' : 'cr',
9709 def __init__(self, out=sys.stdout, quiet=False):
9710 object.__setattr__(self, "out", out)
9711 object.__setattr__(self, "quiet", quiet)
9712 object.__setattr__(self, "maxval", 0)
9713 object.__setattr__(self, "merges", 0)
9714 object.__setattr__(self, "_changed", False)
9715 object.__setattr__(self, "_displayed", False)
9716 object.__setattr__(self, "_last_display_time", 0)
9717 object.__setattr__(self, "width", 80)
9720 isatty = hasattr(out, "isatty") and out.isatty()
9721 object.__setattr__(self, "_isatty", isatty)
9722 if not isatty or not self._init_term():
9724 for k, capname in self._termcap_name_map.iteritems():
9725 term_codes[k] = self._default_term_codes[capname]
9726 object.__setattr__(self, "_term_codes", term_codes)
9727 encoding = sys.getdefaultencoding()
9728 for k, v in self._term_codes.items():
9729 if not isinstance(v, basestring):
9730 self._term_codes[k] = v.decode(encoding, 'replace')
9732 def _init_term(self):
9734 Initialize term control codes.
9736 @returns: True if term codes were successfully initialized,
9740 term_type = os.environ.get("TERM", "vt100")
9746 curses.setupterm(term_type, self.out.fileno())
9747 tigetstr = curses.tigetstr
9748 except curses.error:
9753 if tigetstr is None:
9757 for k, capname in self._termcap_name_map.iteritems():
9758 code = tigetstr(capname)
9760 code = self._default_term_codes[capname]
9761 term_codes[k] = code
9762 object.__setattr__(self, "_term_codes", term_codes)
9765 def _format_msg(self, msg):
9766 return ">>> %s" % msg
9770 self._term_codes['carriage_return'] + \
9771 self._term_codes['clr_eol'])
9773 self._displayed = False
9775 def _display(self, line):
9776 self.out.write(line)
9778 self._displayed = True
9780 def _update(self, msg):
9783 if not self._isatty:
9784 out.write(self._format_msg(msg) + self._term_codes['newline'])
9786 self._displayed = True
9792 self._display(self._format_msg(msg))
9794 def displayMessage(self, msg):
9796 was_displayed = self._displayed
9798 if self._isatty and self._displayed:
9801 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9803 self._displayed = False
9806 self._changed = True
9812 for name in self._bound_properties:
9813 object.__setattr__(self, name, 0)
9816 self.out.write(self._term_codes['newline'])
9818 self._displayed = False
9820 def __setattr__(self, name, value):
9821 old_value = getattr(self, name)
9822 if value == old_value:
9824 object.__setattr__(self, name, value)
9825 if name in self._bound_properties:
9826 self._property_change(name, old_value, value)
9828 def _property_change(self, name, old_value, new_value):
9829 self._changed = True
9832 def _load_avg_str(self):
9847 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9851 Display status on stdout, but only if something has
9852 changed since the last call.
9858 current_time = time.time()
9859 time_delta = current_time - self._last_display_time
9860 if self._displayed and \
9862 if not self._isatty:
9864 if time_delta < self._min_display_latency:
9867 self._last_display_time = current_time
9868 self._changed = False
9869 self._display_status()
9871 def _display_status(self):
9872 # Don't use len(self._completed_tasks) here since that also
9873 # can include uninstall tasks.
9874 curval_str = str(self.curval)
9875 maxval_str = str(self.maxval)
9876 running_str = str(self.running)
9877 failed_str = str(self.failed)
9878 load_avg_str = self._load_avg_str()
9880 color_output = StringIO()
9881 plain_output = StringIO()
9882 style_file = portage.output.ConsoleStyleFile(color_output)
9883 style_file.write_listener = plain_output
9884 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9885 style_writer.style_listener = style_file.new_styles
9886 f = formatter.AbstractFormatter(style_writer)
9888 number_style = "INFORM"
9889 f.add_literal_data("Jobs: ")
9890 f.push_style(number_style)
9891 f.add_literal_data(curval_str)
9893 f.add_literal_data(" of ")
9894 f.push_style(number_style)
9895 f.add_literal_data(maxval_str)
9897 f.add_literal_data(" complete")
9900 f.add_literal_data(", ")
9901 f.push_style(number_style)
9902 f.add_literal_data(running_str)
9904 f.add_literal_data(" running")
9907 f.add_literal_data(", ")
9908 f.push_style(number_style)
9909 f.add_literal_data(failed_str)
9911 f.add_literal_data(" failed")
9913 padding = self._jobs_column_width - len(plain_output.getvalue())
9915 f.add_literal_data(padding * " ")
9917 f.add_literal_data("Load avg: ")
9918 f.add_literal_data(load_avg_str)
9920 # Truncate to fit width, to avoid making the terminal scroll if the
9921 # line overflows (happens when the load average is large).
9922 plain_output = plain_output.getvalue()
9923 if self._isatty and len(plain_output) > self.width:
9924 # Use plain_output here since it's easier to truncate
9925 # properly than the color output which contains console
9927 self._update(plain_output[:self.width])
9929 self._update(color_output.getvalue())
9931 xtermTitle(" ".join(plain_output.split()))
9933 class Scheduler(PollScheduler):
9935 _opts_ignore_blockers = \
9936 frozenset(["--buildpkgonly",
9937 "--fetchonly", "--fetch-all-uri",
9938 "--nodeps", "--pretend"])
9940 _opts_no_background = \
9941 frozenset(["--pretend",
9942 "--fetchonly", "--fetch-all-uri"])
9944 _opts_no_restart = frozenset(["--buildpkgonly",
9945 "--fetchonly", "--fetch-all-uri", "--pretend"])
9947 _bad_resume_opts = set(["--ask", "--changelog",
9948 "--resume", "--skipfirst"])
9950 _fetch_log = "/var/log/emerge-fetch.log"
9952 class _iface_class(SlotObject):
9953 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9954 "dblinkElog", "fetch", "register", "schedule",
9955 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9958 class _fetch_iface_class(SlotObject):
9959 __slots__ = ("log_file", "schedule")
9961 _task_queues_class = slot_dict_class(
9962 ("merge", "jobs", "fetch", "unpack"), prefix="")
9964 class _build_opts_class(SlotObject):
9965 __slots__ = ("buildpkg", "buildpkgonly",
9966 "fetch_all_uri", "fetchonly", "pretend")
9968 class _binpkg_opts_class(SlotObject):
9969 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9971 class _pkg_count_class(SlotObject):
9972 __slots__ = ("curval", "maxval")
9974 class _emerge_log_class(SlotObject):
9975 __slots__ = ("xterm_titles",)
9977 def log(self, *pargs, **kwargs):
9978 if not self.xterm_titles:
9979 # Avoid interference with the scheduler's status display.
9980 kwargs.pop("short_msg", None)
9981 emergelog(self.xterm_titles, *pargs, **kwargs)
9983 class _failed_pkg(SlotObject):
9984 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9986 class _ConfigPool(object):
9987 """Interface for a task to temporarily allocate a config
9988 instance from a pool. This allows a task to be constructed
9989 long before the config instance actually becomes needed, like
9990 when prefetchers are constructed for the whole merge list."""
9991 __slots__ = ("_root", "_allocate", "_deallocate")
9992 def __init__(self, root, allocate, deallocate):
9994 self._allocate = allocate
9995 self._deallocate = deallocate
9997 return self._allocate(self._root)
9998 def deallocate(self, settings):
9999 self._deallocate(settings)
10001 class _unknown_internal_error(portage.exception.PortageException):
10003 Used internally to terminate scheduling. The specific reason for
10004 the failure should have been dumped to stderr.
10006 def __init__(self, value=""):
10007 portage.exception.PortageException.__init__(self, value)
10009 def __init__(self, settings, trees, mtimedb, myopts,
10010 spinner, mergelist, favorites, digraph):
10011 PollScheduler.__init__(self)
10012 self.settings = settings
10013 self.target_root = settings["ROOT"]
10015 self.myopts = myopts
10016 self._spinner = spinner
10017 self._mtimedb = mtimedb
10018 self._mergelist = mergelist
10019 self._favorites = favorites
10020 self._args_set = InternalPackageSet(favorites)
10021 self._build_opts = self._build_opts_class()
10022 for k in self._build_opts.__slots__:
10023 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10024 self._binpkg_opts = self._binpkg_opts_class()
10025 for k in self._binpkg_opts.__slots__:
10026 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10029 self._logger = self._emerge_log_class()
10030 self._task_queues = self._task_queues_class()
10031 for k in self._task_queues.allowed_keys:
10032 setattr(self._task_queues, k,
10033 SequentialTaskQueue())
10035 # Holds merges that will wait to be executed when no builds are
10036 # executing. This is useful for system packages since dependencies
10037 # on system packages are frequently unspecified.
10038 self._merge_wait_queue = []
10039 # Holds merges that have been transfered from the merge_wait_queue to
10040 # the actual merge queue. They are removed from this list upon
10041 # completion. Other packages can start building only when this list is
10043 self._merge_wait_scheduled = []
10045 # Holds system packages and their deep runtime dependencies. Before
10046 # being merged, these packages go to merge_wait_queue, to be merged
10047 # when no other packages are building.
10048 self._deep_system_deps = set()
10050 # Holds packages to merge which will satisfy currently unsatisfied
10051 # deep runtime dependencies of system packages. If this is not empty
10052 # then no parallel builds will be spawned until it is empty. This
10053 # minimizes the possibility that a build will fail due to the system
10054 # being in a fragile state. For example, see bug #259954.
10055 self._unsatisfied_system_deps = set()
10057 self._status_display = JobStatusDisplay()
10058 self._max_load = myopts.get("--load-average")
10059 max_jobs = myopts.get("--jobs")
10060 if max_jobs is None:
10062 self._set_max_jobs(max_jobs)
10064 # The root where the currently running
10065 # portage instance is installed.
10066 self._running_root = trees["/"]["root_config"]
10068 if settings.get("PORTAGE_DEBUG", "") == "1":
10070 self.pkgsettings = {}
10071 self._config_pool = {}
10072 self._blocker_db = {}
10074 self._config_pool[root] = []
10075 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10077 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10078 schedule=self._schedule_fetch)
10079 self._sched_iface = self._iface_class(
10080 dblinkEbuildPhase=self._dblink_ebuild_phase,
10081 dblinkDisplayMerge=self._dblink_display_merge,
10082 dblinkElog=self._dblink_elog,
10083 fetch=fetch_iface, register=self._register,
10084 schedule=self._schedule_wait,
10085 scheduleSetup=self._schedule_setup,
10086 scheduleUnpack=self._schedule_unpack,
10087 scheduleYield=self._schedule_yield,
10088 unregister=self._unregister)
10090 self._prefetchers = weakref.WeakValueDictionary()
10091 self._pkg_queue = []
10092 self._completed_tasks = set()
10094 self._failed_pkgs = []
10095 self._failed_pkgs_all = []
10096 self._failed_pkgs_die_msgs = []
10097 self._post_mod_echo_msgs = []
10098 self._parallel_fetch = False
10099 merge_count = len([x for x in mergelist \
10100 if isinstance(x, Package) and x.operation == "merge"])
10101 self._pkg_count = self._pkg_count_class(
10102 curval=0, maxval=merge_count)
10103 self._status_display.maxval = self._pkg_count.maxval
10105 # The load average takes some time to respond when new
10106 # jobs are added, so we need to limit the rate of adding
10108 self._job_delay_max = 10
10109 self._job_delay_factor = 1.0
10110 self._job_delay_exp = 1.5
10111 self._previous_job_start_time = None
10113 self._set_digraph(digraph)
10115 # This is used to memoize the _choose_pkg() result when
10116 # no packages can be chosen until one of the existing
10118 self._choose_pkg_return_early = False
10120 features = self.settings.features
10121 if "parallel-fetch" in features and \
10122 not ("--pretend" in self.myopts or \
10123 "--fetch-all-uri" in self.myopts or \
10124 "--fetchonly" in self.myopts):
10125 if "distlocks" not in features:
10126 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10127 portage.writemsg(red("!!!")+" parallel-fetching " + \
10128 "requires the distlocks feature enabled"+"\n",
10130 portage.writemsg(red("!!!")+" you have it disabled, " + \
10131 "thus parallel-fetching is being disabled"+"\n",
10133 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10134 elif len(mergelist) > 1:
10135 self._parallel_fetch = True
10137 if self._parallel_fetch:
10138 # clear out existing fetch log if it exists
10140 open(self._fetch_log, 'w')
10141 except EnvironmentError:
10144 self._running_portage = None
10145 portage_match = self._running_root.trees["vartree"].dbapi.match(
10146 portage.const.PORTAGE_PACKAGE_ATOM)
10148 cpv = portage_match.pop()
10149 self._running_portage = self._pkg(cpv, "installed",
10150 self._running_root, installed=True)
10152 def _poll(self, timeout=None):
10154 PollScheduler._poll(self, timeout=timeout)
10156 def _set_max_jobs(self, max_jobs):
10157 self._max_jobs = max_jobs
10158 self._task_queues.jobs.max_jobs = max_jobs
10160 def _background_mode(self):
10162 Check if background mode is enabled and adjust states as necessary.
10165 @returns: True if background mode is enabled, False otherwise.
10167 background = (self._max_jobs is True or \
10168 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10169 not bool(self._opts_no_background.intersection(self.myopts))
10172 interactive_tasks = self._get_interactive_tasks()
10173 if interactive_tasks:
10175 writemsg_level(">>> Sending package output to stdio due " + \
10176 "to interactive package(s):\n",
10177 level=logging.INFO, noiselevel=-1)
10179 for pkg in interactive_tasks:
10180 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10181 if pkg.root != "/":
10182 pkg_str += " for " + pkg.root
10183 msg.append(pkg_str)
10185 writemsg_level("".join("%s\n" % (l,) for l in msg),
10186 level=logging.INFO, noiselevel=-1)
10187 if self._max_jobs is True or self._max_jobs > 1:
10188 self._set_max_jobs(1)
10189 writemsg_level(">>> Setting --jobs=1 due " + \
10190 "to the above interactive package(s)\n",
10191 level=logging.INFO, noiselevel=-1)
10193 self._status_display.quiet = \
10194 not background or \
10195 ("--quiet" in self.myopts and \
10196 "--verbose" not in self.myopts)
10198 self._logger.xterm_titles = \
10199 "notitles" not in self.settings.features and \
10200 self._status_display.quiet
10204 def _get_interactive_tasks(self):
10205 from portage import flatten
10206 from portage.dep import use_reduce, paren_reduce
10207 interactive_tasks = []
10208 for task in self._mergelist:
10209 if not (isinstance(task, Package) and \
10210 task.operation == "merge"):
10213 properties = flatten(use_reduce(paren_reduce(
10214 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10215 except portage.exception.InvalidDependString, e:
10216 show_invalid_depstring_notice(task,
10217 task.metadata["PROPERTIES"], str(e))
10218 raise self._unknown_internal_error()
10219 if "interactive" in properties:
10220 interactive_tasks.append(task)
10221 return interactive_tasks
10223 def _set_digraph(self, digraph):
10224 if "--nodeps" in self.myopts or \
10225 (self._max_jobs is not True and self._max_jobs < 2):
10227 self._digraph = None
10230 self._digraph = digraph
10231 self._find_system_deps()
10232 self._prune_digraph()
10233 self._prevent_builddir_collisions()
10235 def _find_system_deps(self):
10237 Find system packages and their deep runtime dependencies. Before being
10238 merged, these packages go to merge_wait_queue, to be merged when no
10239 other packages are building.
10241 deep_system_deps = self._deep_system_deps
10242 deep_system_deps.clear()
10243 deep_system_deps.update(
10244 _find_deep_system_runtime_deps(self._digraph))
10245 deep_system_deps.difference_update([pkg for pkg in \
10246 deep_system_deps if pkg.operation != "merge"])
10248 def _prune_digraph(self):
10250 Prune any root nodes that are irrelevant.
10253 graph = self._digraph
10254 completed_tasks = self._completed_tasks
10255 removed_nodes = set()
10257 for node in graph.root_nodes():
10258 if not isinstance(node, Package) or \
10259 (node.installed and node.operation == "nomerge") or \
10261 node in completed_tasks:
10262 removed_nodes.add(node)
10264 graph.difference_update(removed_nodes)
10265 if not removed_nodes:
10267 removed_nodes.clear()
10269 def _prevent_builddir_collisions(self):
10271 When building stages, sometimes the same exact cpv needs to be merged
10272 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10273 in the builddir. Currently, normal file locks would be inappropriate
10274 for this purpose since emerge holds all of it's build dir locks from
10278 for pkg in self._mergelist:
10279 if not isinstance(pkg, Package):
10280 # a satisfied blocker
10284 if pkg.cpv not in cpv_map:
10285 cpv_map[pkg.cpv] = [pkg]
10287 for earlier_pkg in cpv_map[pkg.cpv]:
10288 self._digraph.add(earlier_pkg, pkg,
10289 priority=DepPriority(buildtime=True))
10290 cpv_map[pkg.cpv].append(pkg)
10292 class _pkg_failure(portage.exception.PortageException):
10294 An instance of this class is raised by unmerge() when
10295 an uninstallation fails.
10298 def __init__(self, *pargs):
10299 portage.exception.PortageException.__init__(self, pargs)
10301 self.status = pargs[0]
10303 def _schedule_fetch(self, fetcher):
10305 Schedule a fetcher on the fetch queue, in order to
10306 serialize access to the fetch log.
10308 self._task_queues.fetch.addFront(fetcher)
10310 def _schedule_setup(self, setup_phase):
10312 Schedule a setup phase on the merge queue, in order to
10313 serialize unsandboxed access to the live filesystem.
10315 self._task_queues.merge.addFront(setup_phase)
10318 def _schedule_unpack(self, unpack_phase):
10320 Schedule an unpack phase on the unpack queue, in order
10321 to serialize $DISTDIR access for live ebuilds.
10323 self._task_queues.unpack.add(unpack_phase)
10325 def _find_blockers(self, new_pkg):
10327 Returns a callable which should be called only when
10328 the vdb lock has been acquired.
10330 def get_blockers():
10331 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10332 return get_blockers
10334 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10335 if self._opts_ignore_blockers.intersection(self.myopts):
10338 # Call gc.collect() here to avoid heap overflow that
10339 # triggers 'Cannot allocate memory' errors (reported
10340 # with python-2.5).
10344 blocker_db = self._blocker_db[new_pkg.root]
10346 blocker_dblinks = []
10347 for blocking_pkg in blocker_db.findInstalledBlockers(
10348 new_pkg, acquire_lock=acquire_lock):
10349 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10351 if new_pkg.cpv == blocking_pkg.cpv:
10353 blocker_dblinks.append(portage.dblink(
10354 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10355 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10356 vartree=self.trees[blocking_pkg.root]["vartree"]))
10360 return blocker_dblinks
10362 def _dblink_pkg(self, pkg_dblink):
10363 cpv = pkg_dblink.mycpv
10364 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10365 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10366 installed = type_name == "installed"
10367 return self._pkg(cpv, type_name, root_config, installed=installed)
10369 def _append_to_log_path(self, log_path, msg):
10370 f = open(log_path, 'a')
10376 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10378 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10381 background = self._background
10383 if background and log_path is not None:
10384 log_file = open(log_path, 'a')
10389 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10391 if log_file is not None:
10394 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10395 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10396 background = self._background
10398 if log_path is None:
10399 if not (background and level < logging.WARN):
10400 portage.util.writemsg_level(msg,
10401 level=level, noiselevel=noiselevel)
10404 portage.util.writemsg_level(msg,
10405 level=level, noiselevel=noiselevel)
10406 self._append_to_log_path(log_path, msg)
10408 def _dblink_ebuild_phase(self,
10409 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10411 Using this callback for merge phases allows the scheduler
10412 to run while these phases execute asynchronously, and allows
10413 the scheduler control output handling.
10416 scheduler = self._sched_iface
10417 settings = pkg_dblink.settings
10418 pkg = self._dblink_pkg(pkg_dblink)
10419 background = self._background
10420 log_path = settings.get("PORTAGE_LOG_FILE")
10422 ebuild_phase = EbuildPhase(background=background,
10423 pkg=pkg, phase=phase, scheduler=scheduler,
10424 settings=settings, tree=pkg_dblink.treetype)
10425 ebuild_phase.start()
10426 ebuild_phase.wait()
10428 return ebuild_phase.returncode
10430 def _generate_digests(self):
10432 Generate digests if necessary for --digests or FEATURES=digest.
10433 In order to avoid interference, this must done before parallel
10437 if '--fetchonly' in self.myopts:
10440 digest = '--digest' in self.myopts
10442 for pkgsettings in self.pkgsettings.itervalues():
10443 if 'digest' in pkgsettings.features:
10450 for x in self._mergelist:
10451 if not isinstance(x, Package) or \
10452 x.type_name != 'ebuild' or \
10453 x.operation != 'merge':
10455 pkgsettings = self.pkgsettings[x.root]
10456 if '--digest' not in self.myopts and \
10457 'digest' not in pkgsettings.features:
10459 portdb = x.root_config.trees['porttree'].dbapi
10460 ebuild_path = portdb.findname(x.cpv)
10461 if not ebuild_path:
10463 "!!! Could not locate ebuild for '%s'.\n" \
10464 % x.cpv, level=logging.ERROR, noiselevel=-1)
10466 pkgsettings['O'] = os.path.dirname(ebuild_path)
10467 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10469 "!!! Unable to generate manifest for '%s'.\n" \
10470 % x.cpv, level=logging.ERROR, noiselevel=-1)
10475 def _check_manifests(self):
10476 # Verify all the manifests now so that the user is notified of failure
10477 # as soon as possible.
10478 if "strict" not in self.settings.features or \
10479 "--fetchonly" in self.myopts or \
10480 "--fetch-all-uri" in self.myopts:
10483 shown_verifying_msg = False
10484 quiet_settings = {}
10485 for myroot, pkgsettings in self.pkgsettings.iteritems():
10486 quiet_config = portage.config(clone=pkgsettings)
10487 quiet_config["PORTAGE_QUIET"] = "1"
10488 quiet_config.backup_changes("PORTAGE_QUIET")
10489 quiet_settings[myroot] = quiet_config
10492 for x in self._mergelist:
10493 if not isinstance(x, Package) or \
10494 x.type_name != "ebuild":
10497 if not shown_verifying_msg:
10498 shown_verifying_msg = True
10499 self._status_msg("Verifying ebuild manifests")
10501 root_config = x.root_config
10502 portdb = root_config.trees["porttree"].dbapi
10503 quiet_config = quiet_settings[root_config.root]
10504 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10505 if not portage.digestcheck([], quiet_config, strict=True):
10510 def _add_prefetchers(self):
10512 if not self._parallel_fetch:
10515 if self._parallel_fetch:
10516 self._status_msg("Starting parallel fetch")
10518 prefetchers = self._prefetchers
10519 getbinpkg = "--getbinpkg" in self.myopts
10521 # In order to avoid "waiting for lock" messages
10522 # at the beginning, which annoy users, never
10523 # spawn a prefetcher for the first package.
10524 for pkg in self._mergelist[1:]:
10525 prefetcher = self._create_prefetcher(pkg)
10526 if prefetcher is not None:
10527 self._task_queues.fetch.add(prefetcher)
10528 prefetchers[pkg] = prefetcher
10530 def _create_prefetcher(self, pkg):
10532 @return: a prefetcher, or None if not applicable
10536 if not isinstance(pkg, Package):
10539 elif pkg.type_name == "ebuild":
10541 prefetcher = EbuildFetcher(background=True,
10542 config_pool=self._ConfigPool(pkg.root,
10543 self._allocate_config, self._deallocate_config),
10544 fetchonly=1, logfile=self._fetch_log,
10545 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10547 elif pkg.type_name == "binary" and \
10548 "--getbinpkg" in self.myopts and \
10549 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10551 prefetcher = BinpkgPrefetcher(background=True,
10552 pkg=pkg, scheduler=self._sched_iface)
10556 def _is_restart_scheduled(self):
10558 Check if the merge list contains a replacement
10559 for the current running instance, that will result
10560 in restart after merge.
10562 @returns: True if a restart is scheduled, False otherwise.
10564 if self._opts_no_restart.intersection(self.myopts):
10567 mergelist = self._mergelist
10569 for i, pkg in enumerate(mergelist):
10570 if self._is_restart_necessary(pkg) and \
10571 i != len(mergelist) - 1:
10576 def _is_restart_necessary(self, pkg):
10578 @return: True if merging the given package
10579 requires restart, False otherwise.
10582 # Figure out if we need a restart.
10583 if pkg.root == self._running_root.root and \
10584 portage.match_from_list(
10585 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10586 if self._running_portage:
10587 return pkg.cpv != self._running_portage.cpv
10591 def _restart_if_necessary(self, pkg):
10593 Use execv() to restart emerge. This happens
10594 if portage upgrades itself and there are
10595 remaining packages in the list.
10598 if self._opts_no_restart.intersection(self.myopts):
10601 if not self._is_restart_necessary(pkg):
10604 if pkg == self._mergelist[-1]:
10607 self._main_loop_cleanup()
10609 logger = self._logger
10610 pkg_count = self._pkg_count
10611 mtimedb = self._mtimedb
10612 bad_resume_opts = self._bad_resume_opts
10614 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10615 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10617 logger.log(" *** RESTARTING " + \
10618 "emerge via exec() after change of " + \
10619 "portage version.")
10621 mtimedb["resume"]["mergelist"].remove(list(pkg))
10623 portage.run_exitfuncs()
10624 mynewargv = [sys.argv[0], "--resume"]
10625 resume_opts = self.myopts.copy()
10626 # For automatic resume, we need to prevent
10627 # any of bad_resume_opts from leaking in
10628 # via EMERGE_DEFAULT_OPTS.
10629 resume_opts["--ignore-default-opts"] = True
10630 for myopt, myarg in resume_opts.iteritems():
10631 if myopt not in bad_resume_opts:
10633 mynewargv.append(myopt)
10635 mynewargv.append(myopt +"="+ str(myarg))
10636 # priority only needs to be adjusted on the first run
10637 os.environ["PORTAGE_NICENESS"] = "0"
10638 os.execv(mynewargv[0], mynewargv)
10642 if "--resume" in self.myopts:
10644 portage.writemsg_stdout(
10645 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10646 self._logger.log(" *** Resuming merge...")
10648 self._save_resume_list()
10651 self._background = self._background_mode()
10652 except self._unknown_internal_error:
10655 for root in self.trees:
10656 root_config = self.trees[root]["root_config"]
10658 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10659 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10660 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10661 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10662 if not tmpdir or not os.path.isdir(tmpdir):
10663 msg = "The directory specified in your " + \
10664 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10665 "does not exist. Please create this " + \
10666 "directory or correct your PORTAGE_TMPDIR setting."
10667 msg = textwrap.wrap(msg, 70)
10668 out = portage.output.EOutput()
10673 if self._background:
10674 root_config.settings.unlock()
10675 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10676 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10677 root_config.settings.lock()
10679 self.pkgsettings[root] = portage.config(
10680 clone=root_config.settings)
10682 rval = self._generate_digests()
10683 if rval != os.EX_OK:
10686 rval = self._check_manifests()
10687 if rval != os.EX_OK:
10690 keep_going = "--keep-going" in self.myopts
10691 fetchonly = self._build_opts.fetchonly
10692 mtimedb = self._mtimedb
10693 failed_pkgs = self._failed_pkgs
10696 rval = self._merge()
10697 if rval == os.EX_OK or fetchonly or not keep_going:
10699 if "resume" not in mtimedb:
10701 mergelist = self._mtimedb["resume"].get("mergelist")
10705 if not failed_pkgs:
10708 for failed_pkg in failed_pkgs:
10709 mergelist.remove(list(failed_pkg.pkg))
10711 self._failed_pkgs_all.extend(failed_pkgs)
10717 if not self._calc_resume_list():
10720 clear_caches(self.trees)
10721 if not self._mergelist:
10724 self._save_resume_list()
10725 self._pkg_count.curval = 0
10726 self._pkg_count.maxval = len([x for x in self._mergelist \
10727 if isinstance(x, Package) and x.operation == "merge"])
10728 self._status_display.maxval = self._pkg_count.maxval
10730 self._logger.log(" *** Finished. Cleaning up...")
10733 self._failed_pkgs_all.extend(failed_pkgs)
10736 background = self._background
10737 failure_log_shown = False
10738 if background and len(self._failed_pkgs_all) == 1:
10739 # If only one package failed then just show it's
10740 # whole log for easy viewing.
10741 failed_pkg = self._failed_pkgs_all[-1]
10742 build_dir = failed_pkg.build_dir
10745 log_paths = [failed_pkg.build_log]
10747 log_path = self._locate_failure_log(failed_pkg)
10748 if log_path is not None:
10750 log_file = open(log_path)
10754 if log_file is not None:
10756 for line in log_file:
10757 writemsg_level(line, noiselevel=-1)
10760 failure_log_shown = True
10762 # Dump mod_echo output now since it tends to flood the terminal.
10763 # This allows us to avoid having more important output, generated
10764 # later, from being swept away by the mod_echo output.
10765 mod_echo_output = _flush_elog_mod_echo()
10767 if background and not failure_log_shown and \
10768 self._failed_pkgs_all and \
10769 self._failed_pkgs_die_msgs and \
10770 not mod_echo_output:
10772 printer = portage.output.EOutput()
10773 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10775 if mysettings["ROOT"] != "/":
10776 root_msg = " merged to %s" % mysettings["ROOT"]
10778 printer.einfo("Error messages for package %s%s:" % \
10779 (colorize("INFORM", key), root_msg))
10781 for phase in portage.const.EBUILD_PHASES:
10782 if phase not in logentries:
10784 for msgtype, msgcontent in logentries[phase]:
10785 if isinstance(msgcontent, basestring):
10786 msgcontent = [msgcontent]
10787 for line in msgcontent:
10788 printer.eerror(line.strip("\n"))
10790 if self._post_mod_echo_msgs:
10791 for msg in self._post_mod_echo_msgs:
10794 if len(self._failed_pkgs_all) > 1 or \
10795 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10796 if len(self._failed_pkgs_all) > 1:
10797 msg = "The following %d packages have " % \
10798 len(self._failed_pkgs_all) + \
10799 "failed to build or install:"
10801 msg = "The following package has " + \
10802 "failed to build or install:"
10803 prefix = bad(" * ")
10804 writemsg(prefix + "\n", noiselevel=-1)
10805 from textwrap import wrap
10806 for line in wrap(msg, 72):
10807 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10808 writemsg(prefix + "\n", noiselevel=-1)
10809 for failed_pkg in self._failed_pkgs_all:
10810 writemsg("%s\t%s\n" % (prefix,
10811 colorize("INFORM", str(failed_pkg.pkg))),
10813 writemsg(prefix + "\n", noiselevel=-1)
10817 def _elog_listener(self, mysettings, key, logentries, fulltext):
10818 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10820 self._failed_pkgs_die_msgs.append(
10821 (mysettings, key, errors))
10823 def _locate_failure_log(self, failed_pkg):
10825 build_dir = failed_pkg.build_dir
10828 log_paths = [failed_pkg.build_log]
10830 for log_path in log_paths:
10835 log_size = os.stat(log_path).st_size
10846 def _add_packages(self):
10847 pkg_queue = self._pkg_queue
10848 for pkg in self._mergelist:
10849 if isinstance(pkg, Package):
10850 pkg_queue.append(pkg)
10851 elif isinstance(pkg, Blocker):
10854 def _system_merge_started(self, merge):
10856 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10858 graph = self._digraph
10861 pkg = merge.merge.pkg
10863 # Skip this if $ROOT != / since it shouldn't matter if there
10864 # are unsatisfied system runtime deps in this case.
10865 if pkg.root != '/':
10868 completed_tasks = self._completed_tasks
10869 unsatisfied = self._unsatisfied_system_deps
10871 def ignore_non_runtime_or_satisfied(priority):
10873 Ignore non-runtime and satisfied runtime priorities.
10875 if isinstance(priority, DepPriority) and \
10876 not priority.satisfied and \
10877 (priority.runtime or priority.runtime_post):
10881 # When checking for unsatisfied runtime deps, only check
10882 # direct deps since indirect deps are checked when the
10883 # corresponding parent is merged.
10884 for child in graph.child_nodes(pkg,
10885 ignore_priority=ignore_non_runtime_or_satisfied):
10886 if not isinstance(child, Package) or \
10887 child.operation == 'uninstall':
10891 if child.operation == 'merge' and \
10892 child not in completed_tasks:
10893 unsatisfied.add(child)
10895 def _merge_wait_exit_handler(self, task):
10896 self._merge_wait_scheduled.remove(task)
10897 self._merge_exit(task)
10899 def _merge_exit(self, merge):
10900 self._do_merge_exit(merge)
10901 self._deallocate_config(merge.merge.settings)
10902 if merge.returncode == os.EX_OK and \
10903 not merge.merge.pkg.installed:
10904 self._status_display.curval += 1
10905 self._status_display.merges = len(self._task_queues.merge)
10908 def _do_merge_exit(self, merge):
10909 pkg = merge.merge.pkg
10910 if merge.returncode != os.EX_OK:
10911 settings = merge.merge.settings
10912 build_dir = settings.get("PORTAGE_BUILDDIR")
10913 build_log = settings.get("PORTAGE_LOG_FILE")
10915 self._failed_pkgs.append(self._failed_pkg(
10916 build_dir=build_dir, build_log=build_log,
10918 returncode=merge.returncode))
10919 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10921 self._status_display.failed = len(self._failed_pkgs)
10924 self._task_complete(pkg)
10925 pkg_to_replace = merge.merge.pkg_to_replace
10926 if pkg_to_replace is not None:
10927 # When a package is replaced, mark it's uninstall
10928 # task complete (if any).
10929 uninst_hash_key = \
10930 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10931 self._task_complete(uninst_hash_key)
10936 self._restart_if_necessary(pkg)
10938 # Call mtimedb.commit() after each merge so that
10939 # --resume still works after being interrupted
10940 # by reboot, sigkill or similar.
10941 mtimedb = self._mtimedb
10942 mtimedb["resume"]["mergelist"].remove(list(pkg))
10943 if not mtimedb["resume"]["mergelist"]:
10944 del mtimedb["resume"]
10947 def _build_exit(self, build):
10948 if build.returncode == os.EX_OK:
10950 merge = PackageMerge(merge=build)
10951 if not build.build_opts.buildpkgonly and \
10952 build.pkg in self._deep_system_deps:
10953 # Since dependencies on system packages are frequently
10954 # unspecified, merge them only when no builds are executing.
10955 self._merge_wait_queue.append(merge)
10956 merge.addStartListener(self._system_merge_started)
10958 merge.addExitListener(self._merge_exit)
10959 self._task_queues.merge.add(merge)
10960 self._status_display.merges = len(self._task_queues.merge)
10962 settings = build.settings
10963 build_dir = settings.get("PORTAGE_BUILDDIR")
10964 build_log = settings.get("PORTAGE_LOG_FILE")
10966 self._failed_pkgs.append(self._failed_pkg(
10967 build_dir=build_dir, build_log=build_log,
10969 returncode=build.returncode))
10970 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10972 self._status_display.failed = len(self._failed_pkgs)
10973 self._deallocate_config(build.settings)
10975 self._status_display.running = self._jobs
10978 def _extract_exit(self, build):
10979 self._build_exit(build)
10981 def _task_complete(self, pkg):
10982 self._completed_tasks.add(pkg)
10983 self._unsatisfied_system_deps.discard(pkg)
10984 self._choose_pkg_return_early = False
10988 self._add_prefetchers()
10989 self._add_packages()
10990 pkg_queue = self._pkg_queue
10991 failed_pkgs = self._failed_pkgs
10992 portage.locks._quiet = self._background
10993 portage.elog._emerge_elog_listener = self._elog_listener
10999 self._main_loop_cleanup()
11000 portage.locks._quiet = False
11001 portage.elog._emerge_elog_listener = None
11003 rval = failed_pkgs[-1].returncode
11007 def _main_loop_cleanup(self):
11008 del self._pkg_queue[:]
11009 self._completed_tasks.clear()
11010 self._deep_system_deps.clear()
11011 self._unsatisfied_system_deps.clear()
11012 self._choose_pkg_return_early = False
11013 self._status_display.reset()
11014 self._digraph = None
11015 self._task_queues.fetch.clear()
11017 def _choose_pkg(self):
11019 Choose a task that has all it's dependencies satisfied.
11022 if self._choose_pkg_return_early:
11025 if self._digraph is None:
11026 if (self._jobs or self._task_queues.merge) and \
11027 not ("--nodeps" in self.myopts and \
11028 (self._max_jobs is True or self._max_jobs > 1)):
11029 self._choose_pkg_return_early = True
11031 return self._pkg_queue.pop(0)
11033 if not (self._jobs or self._task_queues.merge):
11034 return self._pkg_queue.pop(0)
11036 self._prune_digraph()
11039 later = set(self._pkg_queue)
11040 for pkg in self._pkg_queue:
11042 if not self._dependent_on_scheduled_merges(pkg, later):
11046 if chosen_pkg is not None:
11047 self._pkg_queue.remove(chosen_pkg)
11049 if chosen_pkg is None:
11050 # There's no point in searching for a package to
11051 # choose until at least one of the existing jobs
11053 self._choose_pkg_return_early = True
11057 def _dependent_on_scheduled_merges(self, pkg, later):
11059 Traverse the subgraph of the given packages deep dependencies
11060 to see if it contains any scheduled merges.
11061 @param pkg: a package to check dependencies for
11063 @param later: packages for which dependence should be ignored
11064 since they will be merged later than pkg anyway and therefore
11065 delaying the merge of pkg will not result in a more optimal
11069 @returns: True if the package is dependent, False otherwise.
11072 graph = self._digraph
11073 completed_tasks = self._completed_tasks
11076 traversed_nodes = set([pkg])
11077 direct_deps = graph.child_nodes(pkg)
11078 node_stack = direct_deps
11079 direct_deps = frozenset(direct_deps)
11081 node = node_stack.pop()
11082 if node in traversed_nodes:
11084 traversed_nodes.add(node)
11085 if not ((node.installed and node.operation == "nomerge") or \
11086 (node.operation == "uninstall" and \
11087 node not in direct_deps) or \
11088 node in completed_tasks or \
11092 node_stack.extend(graph.child_nodes(node))
11096 def _allocate_config(self, root):
11098 Allocate a unique config instance for a task in order
11099 to prevent interference between parallel tasks.
11101 if self._config_pool[root]:
11102 temp_settings = self._config_pool[root].pop()
11104 temp_settings = portage.config(clone=self.pkgsettings[root])
11105 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11106 # performance reasons, call it here to make sure all settings from the
11107 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11108 temp_settings.reload()
11109 temp_settings.reset()
11110 return temp_settings
11112 def _deallocate_config(self, settings):
11113 self._config_pool[settings["ROOT"]].append(settings)
11115 def _main_loop(self):
11117 # Only allow 1 job max if a restart is scheduled
11118 # due to portage update.
11119 if self._is_restart_scheduled() or \
11120 self._opts_no_background.intersection(self.myopts):
11121 self._set_max_jobs(1)
11123 merge_queue = self._task_queues.merge
11125 while self._schedule():
11126 if self._poll_event_handlers:
11131 if not (self._jobs or merge_queue):
11133 if self._poll_event_handlers:
11136 def _keep_scheduling(self):
11137 return bool(self._pkg_queue and \
11138 not (self._failed_pkgs and not self._build_opts.fetchonly))
11140 def _schedule_tasks(self):
11142 # When the number of jobs drops to zero, process all waiting merges.
11143 if not self._jobs and self._merge_wait_queue:
11144 for task in self._merge_wait_queue:
11145 task.addExitListener(self._merge_wait_exit_handler)
11146 self._task_queues.merge.add(task)
11147 self._status_display.merges = len(self._task_queues.merge)
11148 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11149 del self._merge_wait_queue[:]
11151 self._schedule_tasks_imp()
11152 self._status_display.display()
11155 for q in self._task_queues.values():
11159 # Cancel prefetchers if they're the only reason
11160 # the main poll loop is still running.
11161 if self._failed_pkgs and not self._build_opts.fetchonly and \
11162 not (self._jobs or self._task_queues.merge) and \
11163 self._task_queues.fetch:
11164 self._task_queues.fetch.clear()
11168 self._schedule_tasks_imp()
11169 self._status_display.display()
11171 return self._keep_scheduling()
11173 def _job_delay(self):
11176 @returns: True if job scheduling should be delayed, False otherwise.
11179 if self._jobs and self._max_load is not None:
11181 current_time = time.time()
11183 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11184 if delay > self._job_delay_max:
11185 delay = self._job_delay_max
11186 if (current_time - self._previous_job_start_time) < delay:
11191 def _schedule_tasks_imp(self):
11194 @returns: True if state changed, False otherwise.
11201 if not self._keep_scheduling():
11202 return bool(state_change)
11204 if self._choose_pkg_return_early or \
11205 self._merge_wait_scheduled or \
11206 (self._jobs and self._unsatisfied_system_deps) or \
11207 not self._can_add_job() or \
11209 return bool(state_change)
11211 pkg = self._choose_pkg()
11213 return bool(state_change)
11217 if not pkg.installed:
11218 self._pkg_count.curval += 1
11220 task = self._task(pkg)
11223 merge = PackageMerge(merge=task)
11224 merge.addExitListener(self._merge_exit)
11225 self._task_queues.merge.add(merge)
11229 self._previous_job_start_time = time.time()
11230 self._status_display.running = self._jobs
11231 task.addExitListener(self._extract_exit)
11232 self._task_queues.jobs.add(task)
11236 self._previous_job_start_time = time.time()
11237 self._status_display.running = self._jobs
11238 task.addExitListener(self._build_exit)
11239 self._task_queues.jobs.add(task)
11241 return bool(state_change)
11243 def _task(self, pkg):
11245 pkg_to_replace = None
11246 if pkg.operation != "uninstall":
11247 vardb = pkg.root_config.trees["vartree"].dbapi
11248 previous_cpv = vardb.match(pkg.slot_atom)
11250 previous_cpv = previous_cpv.pop()
11251 pkg_to_replace = self._pkg(previous_cpv,
11252 "installed", pkg.root_config, installed=True)
11254 task = MergeListItem(args_set=self._args_set,
11255 background=self._background, binpkg_opts=self._binpkg_opts,
11256 build_opts=self._build_opts,
11257 config_pool=self._ConfigPool(pkg.root,
11258 self._allocate_config, self._deallocate_config),
11259 emerge_opts=self.myopts,
11260 find_blockers=self._find_blockers(pkg), logger=self._logger,
11261 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11262 pkg_to_replace=pkg_to_replace,
11263 prefetcher=self._prefetchers.get(pkg),
11264 scheduler=self._sched_iface,
11265 settings=self._allocate_config(pkg.root),
11266 statusMessage=self._status_msg,
11267 world_atom=self._world_atom)
11271 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11272 pkg = failed_pkg.pkg
11273 msg = "%s to %s %s" % \
11274 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11275 if pkg.root != "/":
11276 msg += " %s %s" % (preposition, pkg.root)
11278 log_path = self._locate_failure_log(failed_pkg)
11279 if log_path is not None:
11280 msg += ", Log file:"
11281 self._status_msg(msg)
11283 if log_path is not None:
11284 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11286 def _status_msg(self, msg):
11288 Display a brief status message (no newlines) in the status display.
11289 This is called by tasks to provide feedback to the user. This
11290 delegates the resposibility of generating \r and \n control characters,
11291 to guarantee that lines are created or erased when necessary and
11295 @param msg: a brief status message (no newlines allowed)
11297 if not self._background:
11298 writemsg_level("\n")
11299 self._status_display.displayMessage(msg)
11301 def _save_resume_list(self):
11303 Do this before verifying the ebuild Manifests since it might
11304 be possible for the user to use --resume --skipfirst get past
11305 a non-essential package with a broken digest.
11307 mtimedb = self._mtimedb
11308 mtimedb["resume"]["mergelist"] = [list(x) \
11309 for x in self._mergelist \
11310 if isinstance(x, Package) and x.operation == "merge"]
11314 def _calc_resume_list(self):
11316 Use the current resume list to calculate a new one,
11317 dropping any packages with unsatisfied deps.
11319 @returns: True if successful, False otherwise.
11321 print colorize("GOOD", "*** Resuming merge...")
11323 if self._show_list():
11324 if "--tree" in self.myopts:
11325 portage.writemsg_stdout("\n" + \
11326 darkgreen("These are the packages that " + \
11327 "would be merged, in reverse order:\n\n"))
11330 portage.writemsg_stdout("\n" + \
11331 darkgreen("These are the packages that " + \
11332 "would be merged, in order:\n\n"))
11334 show_spinner = "--quiet" not in self.myopts and \
11335 "--nodeps" not in self.myopts
11338 print "Calculating dependencies ",
11340 myparams = create_depgraph_params(self.myopts, None)
11344 success, mydepgraph, dropped_tasks = resume_depgraph(
11345 self.settings, self.trees, self._mtimedb, self.myopts,
11346 myparams, self._spinner)
11347 except depgraph.UnsatisfiedResumeDep, exc:
11348 # rename variable to avoid python-3.0 error:
11349 # SyntaxError: can not delete variable 'e' referenced in nested
11352 mydepgraph = e.depgraph
11353 dropped_tasks = set()
11356 print "\b\b... done!"
11359 def unsatisfied_resume_dep_msg():
11360 mydepgraph.display_problems()
11361 out = portage.output.EOutput()
11362 out.eerror("One or more packages are either masked or " + \
11363 "have missing dependencies:")
11366 show_parents = set()
11367 for dep in e.value:
11368 if dep.parent in show_parents:
11370 show_parents.add(dep.parent)
11371 if dep.atom is None:
11372 out.eerror(indent + "Masked package:")
11373 out.eerror(2 * indent + str(dep.parent))
11376 out.eerror(indent + str(dep.atom) + " pulled in by:")
11377 out.eerror(2 * indent + str(dep.parent))
11379 msg = "The resume list contains packages " + \
11380 "that are either masked or have " + \
11381 "unsatisfied dependencies. " + \
11382 "Please restart/continue " + \
11383 "the operation manually, or use --skipfirst " + \
11384 "to skip the first package in the list and " + \
11385 "any other packages that may be " + \
11386 "masked or have missing dependencies."
11387 for line in textwrap.wrap(msg, 72):
11389 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11392 if success and self._show_list():
11393 mylist = mydepgraph.altlist()
11395 if "--tree" in self.myopts:
11397 mydepgraph.display(mylist, favorites=self._favorites)
11400 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11402 mydepgraph.display_problems()
11404 mylist = mydepgraph.altlist()
11405 mydepgraph.break_refs(mylist)
11406 mydepgraph.break_refs(dropped_tasks)
11407 self._mergelist = mylist
11408 self._set_digraph(mydepgraph.schedulerGraph())
11411 for task in dropped_tasks:
11412 if not (isinstance(task, Package) and task.operation == "merge"):
11415 msg = "emerge --keep-going:" + \
11417 if pkg.root != "/":
11418 msg += " for %s" % (pkg.root,)
11419 msg += " dropped due to unsatisfied dependency."
11420 for line in textwrap.wrap(msg, msg_width):
11421 eerror(line, phase="other", key=pkg.cpv)
11422 settings = self.pkgsettings[pkg.root]
11423 # Ensure that log collection from $T is disabled inside
11424 # elog_process(), since any logs that might exist are
11426 settings.pop("T", None)
11427 portage.elog.elog_process(pkg.cpv, settings)
11428 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11432 def _show_list(self):
11433 myopts = self.myopts
11434 if "--quiet" not in myopts and \
11435 ("--ask" in myopts or "--tree" in myopts or \
11436 "--verbose" in myopts):
11440 def _world_atom(self, pkg):
11442 Add the package to the world file, but only if
11443 it's supposed to be added. Otherwise, do nothing.
11446 if set(("--buildpkgonly", "--fetchonly",
11448 "--oneshot", "--onlydeps",
11449 "--pretend")).intersection(self.myopts):
11452 if pkg.root != self.target_root:
11455 args_set = self._args_set
11456 if not args_set.findAtomForPackage(pkg):
11459 logger = self._logger
11460 pkg_count = self._pkg_count
11461 root_config = pkg.root_config
11462 world_set = root_config.sets["world"]
11463 world_locked = False
11464 if hasattr(world_set, "lock"):
11466 world_locked = True
11469 if hasattr(world_set, "load"):
11470 world_set.load() # maybe it's changed on disk
11472 atom = create_world_atom(pkg, args_set, root_config)
11474 if hasattr(world_set, "add"):
11475 self._status_msg(('Recording %s in "world" ' + \
11476 'favorites file...') % atom)
11477 logger.log(" === (%s of %s) Updating world file (%s)" % \
11478 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11479 world_set.add(atom)
11481 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11482 (atom,), level=logging.WARN, noiselevel=-1)
11487 def _pkg(self, cpv, type_name, root_config, installed=False):
11489 Get a package instance from the cache, or create a new
11490 one if necessary. Raises KeyError from aux_get if it
11491 failures for some reason (package does not exist or is
11494 operation = "merge"
11496 operation = "nomerge"
11498 if self._digraph is not None:
11499 # Reuse existing instance when available.
11500 pkg = self._digraph.get(
11501 (type_name, root_config.root, cpv, operation))
11502 if pkg is not None:
11505 tree_type = depgraph.pkg_tree_map[type_name]
11506 db = root_config.trees[tree_type].dbapi
11507 db_keys = list(self.trees[root_config.root][
11508 tree_type].dbapi._aux_cache_keys)
11509 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11510 pkg = Package(cpv=cpv, metadata=metadata,
11511 root_config=root_config, installed=installed)
11512 if type_name == "ebuild":
11513 settings = self.pkgsettings[root_config.root]
11514 settings.setcpv(pkg)
11515 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11516 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11520 class MetadataRegen(PollScheduler):
11522 def __init__(self, portdb, max_jobs=None, max_load=None):
11523 PollScheduler.__init__(self)
11524 self._portdb = portdb
11526 if max_jobs is None:
11529 self._max_jobs = max_jobs
11530 self._max_load = max_load
11531 self._sched_iface = self._sched_iface_class(
11532 register=self._register,
11533 schedule=self._schedule_wait,
11534 unregister=self._unregister)
11536 self._valid_pkgs = set()
11537 self._process_iter = self._iter_metadata_processes()
11538 self.returncode = os.EX_OK
11539 self._error_count = 0
11541 def _iter_metadata_processes(self):
11542 portdb = self._portdb
11543 valid_pkgs = self._valid_pkgs
11544 every_cp = portdb.cp_all()
11545 every_cp.sort(reverse=True)
11548 cp = every_cp.pop()
11549 portage.writemsg_stdout("Processing %s\n" % cp)
11550 cpv_list = portdb.cp_list(cp)
11551 for cpv in cpv_list:
11552 valid_pkgs.add(cpv)
11553 ebuild_path, repo_path = portdb.findname2(cpv)
11554 metadata_process = portdb._metadata_process(
11555 cpv, ebuild_path, repo_path)
11556 if metadata_process is None:
11558 yield metadata_process
11562 portdb = self._portdb
11563 from portage.cache.cache_errors import CacheError
11566 for mytree in portdb.porttrees:
11568 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11569 except CacheError, e:
11570 portage.writemsg("Error listing cache entries for " + \
11571 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11576 while self._schedule():
11583 for y in self._valid_pkgs:
11584 for mytree in portdb.porttrees:
11585 if portdb.findname2(y, mytree=mytree)[0]:
11586 dead_nodes[mytree].discard(y)
11588 for mytree, nodes in dead_nodes.iteritems():
11589 auxdb = portdb.auxdb[mytree]
11593 except (KeyError, CacheError):
11596 def _schedule_tasks(self):
11599 @returns: True if there may be remaining tasks to schedule,
11602 while self._can_add_job():
11604 metadata_process = self._process_iter.next()
11605 except StopIteration:
11609 metadata_process.scheduler = self._sched_iface
11610 metadata_process.addExitListener(self._metadata_exit)
11611 metadata_process.start()
11614 def _metadata_exit(self, metadata_process):
11616 if metadata_process.returncode != os.EX_OK:
11617 self.returncode = 1
11618 self._error_count += 1
11619 self._valid_pkgs.discard(metadata_process.cpv)
11620 portage.writemsg("Error processing %s, continuing...\n" % \
11621 (metadata_process.cpv,))
11624 class UninstallFailure(portage.exception.PortageException):
11626 An instance of this class is raised by unmerge() when
11627 an uninstallation fails.
11630 def __init__(self, *pargs):
11631 portage.exception.PortageException.__init__(self, pargs)
11633 self.status = pargs[0]
11635 def unmerge(root_config, myopts, unmerge_action,
11636 unmerge_files, ldpath_mtimes, autoclean=0,
11637 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11638 scheduler=None, writemsg_level=portage.util.writemsg_level):
11640 quiet = "--quiet" in myopts
11641 settings = root_config.settings
11642 sets = root_config.sets
11643 vartree = root_config.trees["vartree"]
11644 candidate_catpkgs=[]
11646 xterm_titles = "notitles" not in settings.features
11647 out = portage.output.EOutput()
11649 db_keys = list(vartree.dbapi._aux_cache_keys)
11652 pkg = pkg_cache.get(cpv)
11654 pkg = Package(cpv=cpv, installed=True,
11655 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11656 root_config=root_config,
11657 type_name="installed")
11658 pkg_cache[cpv] = pkg
11661 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11663 # At least the parent needs to exist for the lock file.
11664 portage.util.ensure_dirs(vdb_path)
11665 except portage.exception.PortageException:
11669 if os.access(vdb_path, os.W_OK):
11670 vdb_lock = portage.locks.lockdir(vdb_path)
11671 realsyslist = sets["system"].getAtoms()
11673 for x in realsyslist:
11674 mycp = portage.dep_getkey(x)
11675 if mycp in settings.getvirtuals():
11677 for provider in settings.getvirtuals()[mycp]:
11678 if vartree.dbapi.match(provider):
11679 providers.append(provider)
11680 if len(providers) == 1:
11681 syslist.extend(providers)
11683 syslist.append(mycp)
11685 mysettings = portage.config(clone=settings)
11687 if not unmerge_files:
11688 if unmerge_action == "unmerge":
11690 print bold("emerge unmerge") + " can only be used with specific package names"
11696 localtree = vartree
11697 # process all arguments and add all
11698 # valid db entries to candidate_catpkgs
11700 if not unmerge_files:
11701 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11703 #we've got command-line arguments
11704 if not unmerge_files:
11705 print "\nNo packages to unmerge have been provided.\n"
11707 for x in unmerge_files:
11708 arg_parts = x.split('/')
11709 if x[0] not in [".","/"] and \
11710 arg_parts[-1][-7:] != ".ebuild":
11711 #possible cat/pkg or dep; treat as such
11712 candidate_catpkgs.append(x)
11713 elif unmerge_action in ["prune","clean"]:
11714 print "\n!!! Prune and clean do not accept individual" + \
11715 " ebuilds as arguments;\n skipping.\n"
11718 # it appears that the user is specifying an installed
11719 # ebuild and we're in "unmerge" mode, so it's ok.
11720 if not os.path.exists(x):
11721 print "\n!!! The path '"+x+"' doesn't exist.\n"
11724 absx = os.path.abspath(x)
11725 sp_absx = absx.split("/")
11726 if sp_absx[-1][-7:] == ".ebuild":
11728 absx = "/".join(sp_absx)
11730 sp_absx_len = len(sp_absx)
11732 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11733 vdb_len = len(vdb_path)
11735 sp_vdb = vdb_path.split("/")
11736 sp_vdb_len = len(sp_vdb)
11738 if not os.path.exists(absx+"/CONTENTS"):
11739 print "!!! Not a valid db dir: "+str(absx)
11742 if sp_absx_len <= sp_vdb_len:
11743 # The Path is shorter... so it can't be inside the vdb.
11746 print "\n!!!",x,"cannot be inside "+ \
11747 vdb_path+"; aborting.\n"
11750 for idx in range(0,sp_vdb_len):
11751 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11754 print "\n!!!", x, "is not inside "+\
11755 vdb_path+"; aborting.\n"
11758 print "="+"/".join(sp_absx[sp_vdb_len:])
11759 candidate_catpkgs.append(
11760 "="+"/".join(sp_absx[sp_vdb_len:]))
11763 if (not "--quiet" in myopts):
11765 if settings["ROOT"] != "/":
11766 writemsg_level(darkgreen(newline+ \
11767 ">>> Using system located in ROOT tree %s\n" % \
11770 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11771 not ("--quiet" in myopts):
11772 writemsg_level(darkgreen(newline+\
11773 ">>> These are the packages that would be unmerged:\n"))
11775 # Preservation of order is required for --depclean and --prune so
11776 # that dependencies are respected. Use all_selected to eliminate
11777 # duplicate packages since the same package may be selected by
11780 all_selected = set()
11781 for x in candidate_catpkgs:
11782 # cycle through all our candidate deps and determine
11783 # what will and will not get unmerged
11785 mymatch = vartree.dbapi.match(x)
11786 except portage.exception.AmbiguousPackageName, errpkgs:
11787 print "\n\n!!! The short ebuild name \"" + \
11788 x + "\" is ambiguous. Please specify"
11789 print "!!! one of the following fully-qualified " + \
11790 "ebuild names instead:\n"
11791 for i in errpkgs[0]:
11792 print " " + green(i)
11796 if not mymatch and x[0] not in "<>=~":
11797 mymatch = localtree.dep_match(x)
11799 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11800 (x, unmerge_action), noiselevel=-1)
11804 {"protected": set(), "selected": set(), "omitted": set()})
11805 mykey = len(pkgmap) - 1
11806 if unmerge_action=="unmerge":
11808 if y not in all_selected:
11809 pkgmap[mykey]["selected"].add(y)
11810 all_selected.add(y)
11811 elif unmerge_action == "prune":
11812 if len(mymatch) == 1:
11814 best_version = mymatch[0]
11815 best_slot = vartree.getslot(best_version)
11816 best_counter = vartree.dbapi.cpv_counter(best_version)
11817 for mypkg in mymatch[1:]:
11818 myslot = vartree.getslot(mypkg)
11819 mycounter = vartree.dbapi.cpv_counter(mypkg)
11820 if (myslot == best_slot and mycounter > best_counter) or \
11821 mypkg == portage.best([mypkg, best_version]):
11822 if myslot == best_slot:
11823 if mycounter < best_counter:
11824 # On slot collision, keep the one with the
11825 # highest counter since it is the most
11826 # recently installed.
11828 best_version = mypkg
11830 best_counter = mycounter
11831 pkgmap[mykey]["protected"].add(best_version)
11832 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11833 if mypkg != best_version and mypkg not in all_selected)
11834 all_selected.update(pkgmap[mykey]["selected"])
11836 # unmerge_action == "clean"
11838 for mypkg in mymatch:
11839 if unmerge_action == "clean":
11840 myslot = localtree.getslot(mypkg)
11842 # since we're pruning, we don't care about slots
11843 # and put all the pkgs in together
11845 if myslot not in slotmap:
11846 slotmap[myslot] = {}
11847 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11849 for mypkg in vartree.dbapi.cp_list(
11850 portage.dep_getkey(mymatch[0])):
11851 myslot = vartree.getslot(mypkg)
11852 if myslot not in slotmap:
11853 slotmap[myslot] = {}
11854 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11856 for myslot in slotmap:
11857 counterkeys = slotmap[myslot].keys()
11858 if not counterkeys:
11861 pkgmap[mykey]["protected"].add(
11862 slotmap[myslot][counterkeys[-1]])
11863 del counterkeys[-1]
11865 for counter in counterkeys[:]:
11866 mypkg = slotmap[myslot][counter]
11867 if mypkg not in mymatch:
11868 counterkeys.remove(counter)
11869 pkgmap[mykey]["protected"].add(
11870 slotmap[myslot][counter])
11872 #be pretty and get them in order of merge:
11873 for ckey in counterkeys:
11874 mypkg = slotmap[myslot][ckey]
11875 if mypkg not in all_selected:
11876 pkgmap[mykey]["selected"].add(mypkg)
11877 all_selected.add(mypkg)
11878 # ok, now the last-merged package
11879 # is protected, and the rest are selected
11880 numselected = len(all_selected)
11881 if global_unmerge and not numselected:
11882 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11885 if not numselected:
11886 portage.writemsg_stdout(
11887 "\n>>> No packages selected for removal by " + \
11888 unmerge_action + "\n")
11892 vartree.dbapi.flush_cache()
11893 portage.locks.unlockdir(vdb_lock)
11895 from portage.sets.base import EditablePackageSet
11897 # generate a list of package sets that are directly or indirectly listed in "world",
11898 # as there is no persistent list of "installed" sets
11899 installed_sets = ["world"]
11904 pos = len(installed_sets)
11905 for s in installed_sets[pos - 1:]:
11908 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11911 installed_sets += candidates
11912 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11915 # we don't want to unmerge packages that are still listed in user-editable package sets
11916 # listed in "world" as they would be remerged on the next update of "world" or the
11917 # relevant package sets.
11918 unknown_sets = set()
11919 for cp in xrange(len(pkgmap)):
11920 for cpv in pkgmap[cp]["selected"].copy():
11924 # It could have been uninstalled
11925 # by a concurrent process.
11928 if unmerge_action != "clean" and \
11929 root_config.root == "/" and \
11930 portage.match_from_list(
11931 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11932 msg = ("Not unmerging package %s since there is no valid " + \
11933 "reason for portage to unmerge itself.") % (pkg.cpv,)
11934 for line in textwrap.wrap(msg, 75):
11936 # adjust pkgmap so the display output is correct
11937 pkgmap[cp]["selected"].remove(cpv)
11938 all_selected.remove(cpv)
11939 pkgmap[cp]["protected"].add(cpv)
11943 for s in installed_sets:
11944 # skip sets that the user requested to unmerge, and skip world
11945 # unless we're unmerging a package set (as the package would be
11946 # removed from "world" later on)
11947 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11951 if s in unknown_sets:
11953 unknown_sets.add(s)
11954 out = portage.output.EOutput()
11955 out.eerror(("Unknown set '@%s' in " + \
11956 "%svar/lib/portage/world_sets") % \
11957 (s, root_config.root))
11960 # only check instances of EditablePackageSet as other classes are generally used for
11961 # special purposes and can be ignored here (and are usually generated dynamically, so the
11962 # user can't do much about them anyway)
11963 if isinstance(sets[s], EditablePackageSet):
11965 # This is derived from a snippet of code in the
11966 # depgraph._iter_atoms_for_pkg() method.
11967 for atom in sets[s].iterAtomsForPackage(pkg):
11968 inst_matches = vartree.dbapi.match(atom)
11969 inst_matches.reverse() # descending order
11971 for inst_cpv in inst_matches:
11973 inst_pkg = _pkg(inst_cpv)
11975 # It could have been uninstalled
11976 # by a concurrent process.
11979 if inst_pkg.cp != atom.cp:
11981 if pkg >= inst_pkg:
11982 # This is descending order, and we're not
11983 # interested in any versions <= pkg given.
11985 if pkg.slot_atom != inst_pkg.slot_atom:
11986 higher_slot = inst_pkg
11988 if higher_slot is None:
11992 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11993 #print colorize("WARN", "but still listed in the following package sets:")
11994 #print " %s\n" % ", ".join(parents)
11995 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11996 print colorize("WARN", "still referenced by the following package sets:")
11997 print " %s\n" % ", ".join(parents)
11998 # adjust pkgmap so the display output is correct
11999 pkgmap[cp]["selected"].remove(cpv)
12000 all_selected.remove(cpv)
12001 pkgmap[cp]["protected"].add(cpv)
12005 numselected = len(all_selected)
12006 if not numselected:
12008 "\n>>> No packages selected for removal by " + \
12009 unmerge_action + "\n")
12012 # Unmerge order only matters in some cases
12016 selected = d["selected"]
12019 cp = portage.cpv_getkey(iter(selected).next())
12020 cp_dict = unordered.get(cp)
12021 if cp_dict is None:
12023 unordered[cp] = cp_dict
12026 for k, v in d.iteritems():
12027 cp_dict[k].update(v)
12028 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12030 for x in xrange(len(pkgmap)):
12031 selected = pkgmap[x]["selected"]
12034 for mytype, mylist in pkgmap[x].iteritems():
12035 if mytype == "selected":
12037 mylist.difference_update(all_selected)
12038 cp = portage.cpv_getkey(iter(selected).next())
12039 for y in localtree.dep_match(cp):
12040 if y not in pkgmap[x]["omitted"] and \
12041 y not in pkgmap[x]["selected"] and \
12042 y not in pkgmap[x]["protected"] and \
12043 y not in all_selected:
12044 pkgmap[x]["omitted"].add(y)
12045 if global_unmerge and not pkgmap[x]["selected"]:
12046 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12048 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12049 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12050 "'%s' is part of your system profile.\n" % cp),
12051 level=logging.WARNING, noiselevel=-1)
12052 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12053 "be damaging to your system.\n\n"),
12054 level=logging.WARNING, noiselevel=-1)
12055 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12056 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12057 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12059 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12061 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12062 for mytype in ["selected","protected","omitted"]:
12064 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12065 if pkgmap[x][mytype]:
12066 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12067 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12068 for pn, ver, rev in sorted_pkgs:
12072 myversion = ver + "-" + rev
12073 if mytype == "selected":
12075 colorize("UNMERGE_WARN", myversion + " "),
12079 colorize("GOOD", myversion + " "), noiselevel=-1)
12081 writemsg_level("none ", noiselevel=-1)
12083 writemsg_level("\n", noiselevel=-1)
12085 writemsg_level("\n", noiselevel=-1)
12087 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12088 " packages are slated for removal.\n")
12089 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12090 " and " + colorize("GOOD", "'omitted'") + \
12091 " packages will not be removed.\n\n")
12093 if "--pretend" in myopts:
12094 #we're done... return
12096 if "--ask" in myopts:
12097 if userquery("Would you like to unmerge these packages?")=="No":
12098 # enter pretend mode for correct formatting of results
12099 myopts["--pretend"] = True
12104 #the real unmerging begins, after a short delay....
12105 if clean_delay and not autoclean:
12106 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12108 for x in xrange(len(pkgmap)):
12109 for y in pkgmap[x]["selected"]:
12110 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12111 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12112 mysplit = y.split("/")
12114 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12115 mysettings, unmerge_action not in ["clean","prune"],
12116 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12117 scheduler=scheduler)
12119 if retval != os.EX_OK:
12120 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12122 raise UninstallFailure(retval)
12125 if clean_world and hasattr(sets["world"], "cleanPackage"):
12126 sets["world"].cleanPackage(vartree.dbapi, y)
12127 emergelog(xterm_titles, " >>> unmerge success: "+y)
12128 if clean_world and hasattr(sets["world"], "remove"):
12129 for s in root_config.setconfig.active:
12130 sets["world"].remove(SETPREFIX+s)
12133 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12135 if os.path.exists("/usr/bin/install-info"):
12136 out = portage.output.EOutput()
12141 inforoot=normpath(root+z)
12142 if os.path.isdir(inforoot):
12143 infomtime = long(os.stat(inforoot).st_mtime)
12144 if inforoot not in prev_mtimes or \
12145 prev_mtimes[inforoot] != infomtime:
12146 regen_infodirs.append(inforoot)
12148 if not regen_infodirs:
12149 portage.writemsg_stdout("\n")
12150 out.einfo("GNU info directory index is up-to-date.")
12152 portage.writemsg_stdout("\n")
12153 out.einfo("Regenerating GNU info directory index...")
12155 dir_extensions = ("", ".gz", ".bz2")
12159 for inforoot in regen_infodirs:
12163 if not os.path.isdir(inforoot) or \
12164 not os.access(inforoot, os.W_OK):
12167 file_list = os.listdir(inforoot)
12169 dir_file = os.path.join(inforoot, "dir")
12170 moved_old_dir = False
12171 processed_count = 0
12172 for x in file_list:
12173 if x.startswith(".") or \
12174 os.path.isdir(os.path.join(inforoot, x)):
12176 if x.startswith("dir"):
12178 for ext in dir_extensions:
12179 if x == "dir" + ext or \
12180 x == "dir" + ext + ".old":
12185 if processed_count == 0:
12186 for ext in dir_extensions:
12188 os.rename(dir_file + ext, dir_file + ext + ".old")
12189 moved_old_dir = True
12190 except EnvironmentError, e:
12191 if e.errno != errno.ENOENT:
12194 processed_count += 1
12195 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12196 existsstr="already exists, for file `"
12198 if re.search(existsstr,myso):
12199 # Already exists... Don't increment the count for this.
12201 elif myso[:44]=="install-info: warning: no info dir entry in ":
12202 # This info file doesn't contain a DIR-header: install-info produces this
12203 # (harmless) warning (the --quiet switch doesn't seem to work).
12204 # Don't increment the count for this.
12207 badcount=badcount+1
12208 errmsg += myso + "\n"
12211 if moved_old_dir and not os.path.exists(dir_file):
12212 # We didn't generate a new dir file, so put the old file
12213 # back where it was originally found.
12214 for ext in dir_extensions:
12216 os.rename(dir_file + ext + ".old", dir_file + ext)
12217 except EnvironmentError, e:
12218 if e.errno != errno.ENOENT:
12222 # Clean dir.old cruft so that they don't prevent
12223 # unmerge of otherwise empty directories.
12224 for ext in dir_extensions:
12226 os.unlink(dir_file + ext + ".old")
12227 except EnvironmentError, e:
12228 if e.errno != errno.ENOENT:
12232 #update mtime so we can potentially avoid regenerating.
12233 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12236 out.eerror("Processed %d info files; %d errors." % \
12237 (icount, badcount))
12238 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12241 out.einfo("Processed %d info files." % (icount,))
12244 def display_news_notification(root_config, myopts):
12245 target_root = root_config.root
12246 trees = root_config.trees
12247 settings = trees["vartree"].settings
12248 portdb = trees["porttree"].dbapi
12249 vardb = trees["vartree"].dbapi
12250 NEWS_PATH = os.path.join("metadata", "news")
12251 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12252 newsReaderDisplay = False
12253 update = "--pretend" not in myopts
12255 for repo in portdb.getRepositories():
12256 unreadItems = checkUpdatedNewsItems(
12257 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12259 if not newsReaderDisplay:
12260 newsReaderDisplay = True
12262 print colorize("WARN", " * IMPORTANT:"),
12263 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12266 if newsReaderDisplay:
12267 print colorize("WARN", " *"),
12268 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12271 def display_preserved_libs(vardbapi):
12274 # Ensure the registry is consistent with existing files.
12275 vardbapi.plib_registry.pruneNonExisting()
12277 if vardbapi.plib_registry.hasEntries():
12279 print colorize("WARN", "!!!") + " existing preserved libs:"
12280 plibdata = vardbapi.plib_registry.getPreservedLibs()
12281 linkmap = vardbapi.linkmap
12284 linkmap_broken = False
12288 except portage.exception.CommandNotFound, e:
12289 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12290 level=logging.ERROR, noiselevel=-1)
12292 linkmap_broken = True
12294 search_for_owners = set()
12295 for cpv in plibdata:
12296 internal_plib_keys = set(linkmap._obj_key(f) \
12297 for f in plibdata[cpv])
12298 for f in plibdata[cpv]:
12299 if f in consumer_map:
12302 for c in linkmap.findConsumers(f):
12303 # Filter out any consumers that are also preserved libs
12304 # belonging to the same package as the provider.
12305 if linkmap._obj_key(c) not in internal_plib_keys:
12306 consumers.append(c)
12308 consumer_map[f] = consumers
12309 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12311 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12313 for cpv in plibdata:
12314 print colorize("WARN", ">>>") + " package: %s" % cpv
12316 for f in plibdata[cpv]:
12317 obj_key = linkmap._obj_key(f)
12318 alt_paths = samefile_map.get(obj_key)
12319 if alt_paths is None:
12321 samefile_map[obj_key] = alt_paths
12324 for alt_paths in samefile_map.itervalues():
12325 alt_paths = sorted(alt_paths)
12326 for p in alt_paths:
12327 print colorize("WARN", " * ") + " - %s" % (p,)
12329 consumers = consumer_map.get(f, [])
12330 for c in consumers[:MAX_DISPLAY]:
12331 print colorize("WARN", " * ") + " used by %s (%s)" % \
12332 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12333 if len(consumers) == MAX_DISPLAY + 1:
12334 print colorize("WARN", " * ") + " used by %s (%s)" % \
12335 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12336 for x in owners.get(consumers[MAX_DISPLAY], [])))
12337 elif len(consumers) > MAX_DISPLAY:
12338 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12339 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12342 def _flush_elog_mod_echo():
12344 Dump the mod_echo output now so that our other
12345 notifications are shown last.
12347 @returns: True if messages were shown, False otherwise.
12349 messages_shown = False
12351 from portage.elog import mod_echo
12352 except ImportError:
12353 pass # happens during downgrade to a version without the module
12355 messages_shown = bool(mod_echo._items)
12356 mod_echo.finalize()
12357 return messages_shown
12359 def post_emerge(root_config, myopts, mtimedb, retval):
12361 Misc. things to run at the end of a merge session.
12364 Update Config Files
12367 Display preserved libs warnings
12370 @param trees: A dictionary mapping each ROOT to it's package databases
12372 @param mtimedb: The mtimeDB to store data needed across merge invocations
12373 @type mtimedb: MtimeDB class instance
12374 @param retval: Emerge's return value
12378 1. Calls sys.exit(retval)
12381 target_root = root_config.root
12382 trees = { target_root : root_config.trees }
12383 vardbapi = trees[target_root]["vartree"].dbapi
12384 settings = vardbapi.settings
12385 info_mtimes = mtimedb["info"]
12387 # Load the most current variables from ${ROOT}/etc/profile.env
12390 settings.regenerate()
12393 config_protect = settings.get("CONFIG_PROTECT","").split()
12394 infodirs = settings.get("INFOPATH","").split(":") + \
12395 settings.get("INFODIR","").split(":")
12399 if retval == os.EX_OK:
12400 exit_msg = " *** exiting successfully."
12402 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12403 emergelog("notitles" not in settings.features, exit_msg)
12405 _flush_elog_mod_echo()
12407 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12408 if "--pretend" in myopts or (counter_hash is not None and \
12409 counter_hash == vardbapi._counter_hash()):
12410 display_news_notification(root_config, myopts)
12411 # If vdb state has not changed then there's nothing else to do.
12414 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12415 portage.util.ensure_dirs(vdb_path)
12417 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12418 vdb_lock = portage.locks.lockdir(vdb_path)
12422 if "noinfo" not in settings.features:
12423 chk_updated_info_files(target_root,
12424 infodirs, info_mtimes, retval)
12428 portage.locks.unlockdir(vdb_lock)
12430 chk_updated_cfg_files(target_root, config_protect)
12432 display_news_notification(root_config, myopts)
12433 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12434 display_preserved_libs(vardbapi)
12439 def chk_updated_cfg_files(target_root, config_protect):
12441 #number of directories with some protect files in them
12443 for x in config_protect:
12444 x = os.path.join(target_root, x.lstrip(os.path.sep))
12445 if not os.access(x, os.W_OK):
12446 # Avoid Permission denied errors generated
12450 mymode = os.lstat(x).st_mode
12453 if stat.S_ISLNK(mymode):
12454 # We want to treat it like a directory if it
12455 # is a symlink to an existing directory.
12457 real_mode = os.stat(x).st_mode
12458 if stat.S_ISDIR(real_mode):
12462 if stat.S_ISDIR(mymode):
12463 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12465 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12466 os.path.split(x.rstrip(os.path.sep))
12467 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12468 a = commands.getstatusoutput(mycommand)
12470 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12472 # Show the error message alone, sending stdout to /dev/null.
12473 os.system(mycommand + " 1>/dev/null")
12475 files = a[1].split('\0')
12476 # split always produces an empty string as the last element
12477 if files and not files[-1]:
12481 print "\n"+colorize("WARN", " * IMPORTANT:"),
12482 if stat.S_ISDIR(mymode):
12483 print "%d config files in '%s' need updating." % \
12486 print "config file '%s' needs updating." % x
12489 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12490 " section of the " + bold("emerge")
12491 print " "+yellow("*")+" man page to learn how to update config files."
12493 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12496 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12497 Returns the number of unread (yet relevent) items.
12499 @param portdb: a portage tree database
12500 @type portdb: pordbapi
12501 @param vardb: an installed package database
12502 @type vardb: vardbapi
12505 @param UNREAD_PATH:
12511 1. The number of unread but relevant news items.
12514 from portage.news import NewsManager
12515 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12516 return manager.getUnreadItems( repo_id, update=update )
12518 def insert_category_into_atom(atom, category):
12519 alphanum = re.search(r'\w', atom)
12521 ret = atom[:alphanum.start()] + "%s/" % category + \
12522 atom[alphanum.start():]
12527 def is_valid_package_atom(x):
12529 alphanum = re.search(r'\w', x)
12531 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12532 return portage.isvalidatom(x)
12534 def show_blocker_docs_link():
12536 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12537 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12539 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12542 def show_mask_docs():
12543 print "For more information, see the MASKED PACKAGES section in the emerge"
12544 print "man page or refer to the Gentoo Handbook."
12546 def action_sync(settings, trees, mtimedb, myopts, myaction):
12547 xterm_titles = "notitles" not in settings.features
12548 emergelog(xterm_titles, " === sync")
12549 myportdir = settings.get("PORTDIR", None)
12550 out = portage.output.EOutput()
12552 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12554 if myportdir[-1]=="/":
12555 myportdir=myportdir[:-1]
12557 st = os.stat(myportdir)
12561 print ">>>",myportdir,"not found, creating it."
12562 os.makedirs(myportdir,0755)
12563 st = os.stat(myportdir)
12566 spawn_kwargs["env"] = settings.environ()
12567 if 'usersync' in settings.features and \
12568 portage.data.secpass >= 2 and \
12569 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12570 st.st_gid != os.getgid() and st.st_mode & 0070):
12572 homedir = pwd.getpwuid(st.st_uid).pw_dir
12576 # Drop privileges when syncing, in order to match
12577 # existing uid/gid settings.
12578 spawn_kwargs["uid"] = st.st_uid
12579 spawn_kwargs["gid"] = st.st_gid
12580 spawn_kwargs["groups"] = [st.st_gid]
12581 spawn_kwargs["env"]["HOME"] = homedir
12583 if not st.st_mode & 0020:
12584 umask = umask | 0020
12585 spawn_kwargs["umask"] = umask
12587 syncuri = settings.get("SYNC", "").strip()
12589 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12590 noiselevel=-1, level=logging.ERROR)
12593 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12594 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12597 dosyncuri = syncuri
12598 updatecache_flg = False
12599 if myaction == "metadata":
12600 print "skipping sync"
12601 updatecache_flg = True
12602 elif ".git" in vcs_dirs:
12603 # Update existing git repository, and ignore the syncuri. We are
12604 # going to trust the user and assume that the user is in the branch
12605 # that he/she wants updated. We'll let the user manage branches with
12607 if portage.process.find_binary("git") is None:
12608 msg = ["Command not found: git",
12609 "Type \"emerge dev-util/git\" to enable git support."]
12611 writemsg_level("!!! %s\n" % l,
12612 level=logging.ERROR, noiselevel=-1)
12614 msg = ">>> Starting git pull in %s..." % myportdir
12615 emergelog(xterm_titles, msg )
12616 writemsg_level(msg + "\n")
12617 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12618 (portage._shell_quote(myportdir),), **spawn_kwargs)
12619 if exitcode != os.EX_OK:
12620 msg = "!!! git pull error in %s." % myportdir
12621 emergelog(xterm_titles, msg)
12622 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12624 msg = ">>> Git pull in %s successful" % myportdir
12625 emergelog(xterm_titles, msg)
12626 writemsg_level(msg + "\n")
12627 exitcode = git_sync_timestamps(settings, myportdir)
12628 if exitcode == os.EX_OK:
12629 updatecache_flg = True
12630 elif syncuri[:8]=="rsync://":
12631 for vcs_dir in vcs_dirs:
12632 writemsg_level(("!!! %s appears to be under revision " + \
12633 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12634 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12636 if not os.path.exists("/usr/bin/rsync"):
12637 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12638 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12643 if settings["PORTAGE_RSYNC_OPTS"] == "":
12644 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12645 rsync_opts.extend([
12646 "--recursive", # Recurse directories
12647 "--links", # Consider symlinks
12648 "--safe-links", # Ignore links outside of tree
12649 "--perms", # Preserve permissions
12650 "--times", # Preserive mod times
12651 "--compress", # Compress the data transmitted
12652 "--force", # Force deletion on non-empty dirs
12653 "--whole-file", # Don't do block transfers, only entire files
12654 "--delete", # Delete files that aren't in the master tree
12655 "--stats", # Show final statistics about what was transfered
12656 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12657 "--exclude=/distfiles", # Exclude distfiles from consideration
12658 "--exclude=/local", # Exclude local from consideration
12659 "--exclude=/packages", # Exclude packages from consideration
12663 # The below validation is not needed when using the above hardcoded
12666 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12668 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12669 for opt in ("--recursive", "--times"):
12670 if opt not in rsync_opts:
12671 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12672 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12673 rsync_opts.append(opt)
12675 for exclude in ("distfiles", "local", "packages"):
12676 opt = "--exclude=/%s" % exclude
12677 if opt not in rsync_opts:
12678 portage.writemsg(yellow("WARNING:") + \
12679 " adding required option %s not included in " % opt + \
12680 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12681 rsync_opts.append(opt)
12683 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12684 def rsync_opt_startswith(opt_prefix):
12685 for x in rsync_opts:
12686 if x.startswith(opt_prefix):
12690 if not rsync_opt_startswith("--timeout="):
12691 rsync_opts.append("--timeout=%d" % mytimeout)
12693 for opt in ("--compress", "--whole-file"):
12694 if opt not in rsync_opts:
12695 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12696 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12697 rsync_opts.append(opt)
12699 if "--quiet" in myopts:
12700 rsync_opts.append("--quiet") # Shut up a lot
12702 rsync_opts.append("--verbose") # Print filelist
12704 if "--verbose" in myopts:
12705 rsync_opts.append("--progress") # Progress meter for each file
12707 if "--debug" in myopts:
12708 rsync_opts.append("--checksum") # Force checksum on all files
12710 # Real local timestamp file.
12711 servertimestampfile = os.path.join(
12712 myportdir, "metadata", "timestamp.chk")
12714 content = portage.util.grabfile(servertimestampfile)
12718 mytimestamp = time.mktime(time.strptime(content[0],
12719 "%a, %d %b %Y %H:%M:%S +0000"))
12720 except (OverflowError, ValueError):
12725 rsync_initial_timeout = \
12726 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12728 rsync_initial_timeout = 15
12731 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12732 except SystemExit, e:
12733 raise # Needed else can't exit
12735 maxretries=3 #default number of retries
12738 user_name, hostname, port = re.split(
12739 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12742 if user_name is None:
12744 updatecache_flg=True
12745 all_rsync_opts = set(rsync_opts)
12746 extra_rsync_opts = shlex.split(
12747 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12748 all_rsync_opts.update(extra_rsync_opts)
12749 family = socket.AF_INET
12750 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12751 family = socket.AF_INET
12752 elif socket.has_ipv6 and \
12753 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12754 family = socket.AF_INET6
12756 SERVER_OUT_OF_DATE = -1
12757 EXCEEDED_MAX_RETRIES = -2
12763 for addrinfo in socket.getaddrinfo(
12764 hostname, None, family, socket.SOCK_STREAM):
12765 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12766 # IPv6 addresses need to be enclosed in square brackets
12767 ips.append("[%s]" % addrinfo[4][0])
12769 ips.append(addrinfo[4][0])
12770 from random import shuffle
12772 except SystemExit, e:
12773 raise # Needed else can't exit
12774 except Exception, e:
12775 print "Notice:",str(e)
12780 dosyncuri = syncuri.replace(
12781 "//" + user_name + hostname + port + "/",
12782 "//" + user_name + ips[0] + port + "/", 1)
12783 except SystemExit, e:
12784 raise # Needed else can't exit
12785 except Exception, e:
12786 print "Notice:",str(e)
12790 if "--ask" in myopts:
12791 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12796 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12797 if "--quiet" not in myopts:
12798 print ">>> Starting rsync with "+dosyncuri+"..."
12800 emergelog(xterm_titles,
12801 ">>> Starting retry %d of %d with %s" % \
12802 (retries,maxretries,dosyncuri))
12803 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12805 if mytimestamp != 0 and "--quiet" not in myopts:
12806 print ">>> Checking server timestamp ..."
12808 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12810 if "--debug" in myopts:
12813 exitcode = os.EX_OK
12814 servertimestamp = 0
12815 # Even if there's no timestamp available locally, fetch the
12816 # timestamp anyway as an initial probe to verify that the server is
12817 # responsive. This protects us from hanging indefinitely on a
12818 # connection attempt to an unresponsive server which rsync's
12819 # --timeout option does not prevent.
12821 # Temporary file for remote server timestamp comparison.
12822 from tempfile import mkstemp
12823 fd, tmpservertimestampfile = mkstemp()
12825 mycommand = rsynccommand[:]
12826 mycommand.append(dosyncuri.rstrip("/") + \
12827 "/metadata/timestamp.chk")
12828 mycommand.append(tmpservertimestampfile)
12832 def timeout_handler(signum, frame):
12833 raise portage.exception.PortageException("timed out")
12834 signal.signal(signal.SIGALRM, timeout_handler)
12835 # Timeout here in case the server is unresponsive. The
12836 # --timeout rsync option doesn't apply to the initial
12837 # connection attempt.
12838 if rsync_initial_timeout:
12839 signal.alarm(rsync_initial_timeout)
12841 mypids.extend(portage.process.spawn(
12842 mycommand, env=settings.environ(), returnpid=True))
12843 exitcode = os.waitpid(mypids[0], 0)[1]
12844 content = portage.grabfile(tmpservertimestampfile)
12846 if rsync_initial_timeout:
12849 os.unlink(tmpservertimestampfile)
12852 except portage.exception.PortageException, e:
12856 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12857 os.kill(mypids[0], signal.SIGTERM)
12858 os.waitpid(mypids[0], 0)
12859 # This is the same code rsync uses for timeout.
12862 if exitcode != os.EX_OK:
12863 if exitcode & 0xff:
12864 exitcode = (exitcode & 0xff) << 8
12866 exitcode = exitcode >> 8
12868 portage.process.spawned_pids.remove(mypids[0])
12871 servertimestamp = time.mktime(time.strptime(
12872 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12873 except (OverflowError, ValueError):
12875 del mycommand, mypids, content
12876 if exitcode == os.EX_OK:
12877 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12878 emergelog(xterm_titles,
12879 ">>> Cancelling sync -- Already current.")
12882 print ">>> Timestamps on the server and in the local repository are the same."
12883 print ">>> Cancelling all further sync action. You are already up to date."
12885 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12889 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12890 emergelog(xterm_titles,
12891 ">>> Server out of date: %s" % dosyncuri)
12894 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12896 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12899 exitcode = SERVER_OUT_OF_DATE
12900 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12902 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12903 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12904 if exitcode in [0,1,3,4,11,14,20,21]:
12906 elif exitcode in [1,3,4,11,14,20,21]:
12909 # Code 2 indicates protocol incompatibility, which is expected
12910 # for servers with protocol < 29 that don't support
12911 # --prune-empty-directories. Retry for a server that supports
12912 # at least rsync protocol version 29 (>=rsync-2.6.4).
12917 if retries<=maxretries:
12918 print ">>> Retrying..."
12923 updatecache_flg=False
12924 exitcode = EXCEEDED_MAX_RETRIES
12928 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12929 elif exitcode == SERVER_OUT_OF_DATE:
12931 elif exitcode == EXCEEDED_MAX_RETRIES:
12933 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12938 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12939 msg.append("that your SYNC statement is proper.")
12940 msg.append("SYNC=" + settings["SYNC"])
12942 msg.append("Rsync has reported that there is a File IO error. Normally")
12943 msg.append("this means your disk is full, but can be caused by corruption")
12944 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12945 msg.append("and try again after the problem has been fixed.")
12946 msg.append("PORTDIR=" + settings["PORTDIR"])
12948 msg.append("Rsync was killed before it finished.")
12950 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12951 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12952 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12953 msg.append("temporary problem unless complications exist with your network")
12954 msg.append("(and possibly your system's filesystem) configuration.")
12958 elif syncuri[:6]=="cvs://":
12959 if not os.path.exists("/usr/bin/cvs"):
12960 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12961 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12963 cvsroot=syncuri[6:]
12964 cvsdir=os.path.dirname(myportdir)
12965 if not os.path.exists(myportdir+"/CVS"):
12967 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12968 if os.path.exists(cvsdir+"/gentoo-x86"):
12969 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12972 os.rmdir(myportdir)
12974 if e.errno != errno.ENOENT:
12976 "!!! existing '%s' directory; exiting.\n" % myportdir)
12979 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12980 print "!!! cvs checkout error; exiting."
12982 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12985 print ">>> Starting cvs update with "+syncuri+"..."
12986 retval = portage.process.spawn_bash(
12987 "cd %s; cvs -z0 -q update -dP" % \
12988 (portage._shell_quote(myportdir),), **spawn_kwargs)
12989 if retval != os.EX_OK:
12991 dosyncuri = syncuri
12993 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12994 noiselevel=-1, level=logging.ERROR)
12997 if updatecache_flg and \
12998 myaction != "metadata" and \
12999 "metadata-transfer" not in settings.features:
13000 updatecache_flg = False
13002 # Reload the whole config from scratch.
13003 settings, trees, mtimedb = load_emerge_config(trees=trees)
13004 root_config = trees[settings["ROOT"]]["root_config"]
13005 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13007 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13008 action_metadata(settings, portdb, myopts)
13010 if portage._global_updates(trees, mtimedb["updates"]):
13012 # Reload the whole config from scratch.
13013 settings, trees, mtimedb = load_emerge_config(trees=trees)
13014 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13015 root_config = trees[settings["ROOT"]]["root_config"]
13017 mybestpv = portdb.xmatch("bestmatch-visible",
13018 portage.const.PORTAGE_PACKAGE_ATOM)
13019 mypvs = portage.best(
13020 trees[settings["ROOT"]]["vartree"].dbapi.match(
13021 portage.const.PORTAGE_PACKAGE_ATOM))
13023 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13025 if myaction != "metadata":
13026 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13027 retval = portage.process.spawn(
13028 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13029 dosyncuri], env=settings.environ())
13030 if retval != os.EX_OK:
13031 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13033 if(mybestpv != mypvs) and not "--quiet" in myopts:
13035 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13036 print red(" * ")+"that you update portage now, before any other packages are updated."
13038 print red(" * ")+"To update portage, run 'emerge portage' now."
13041 display_news_notification(root_config, myopts)
13044 def git_sync_timestamps(settings, portdir):
13046 Since git doesn't preserve timestamps, synchronize timestamps between
13047 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13048 for a given file as long as the file in the working tree is not modified
13049 (relative to HEAD).
13051 cache_dir = os.path.join(portdir, "metadata", "cache")
13052 if not os.path.isdir(cache_dir):
13054 writemsg_level(">>> Synchronizing timestamps...\n")
13056 from portage.cache.cache_errors import CacheError
13058 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13059 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13060 except CacheError, e:
13061 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13062 level=logging.ERROR, noiselevel=-1)
13065 ec_dir = os.path.join(portdir, "eclass")
13067 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13068 if f.endswith(".eclass"))
13070 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13071 level=logging.ERROR, noiselevel=-1)
13074 args = [portage.const.BASH_BINARY, "-c",
13075 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13076 portage._shell_quote(portdir)]
13078 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13079 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13081 if rval != os.EX_OK:
13084 modified_eclasses = set(ec for ec in ec_names \
13085 if os.path.join("eclass", ec + ".eclass") in modified_files)
13087 updated_ec_mtimes = {}
13089 for cpv in cache_db:
13090 cpv_split = portage.catpkgsplit(cpv)
13091 if cpv_split is None:
13092 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13093 level=logging.ERROR, noiselevel=-1)
13096 cat, pn, ver, rev = cpv_split
13097 cat, pf = portage.catsplit(cpv)
13098 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13099 if relative_eb_path in modified_files:
13103 cache_entry = cache_db[cpv]
13104 eb_mtime = cache_entry.get("_mtime_")
13105 ec_mtimes = cache_entry.get("_eclasses_")
13107 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13108 level=logging.ERROR, noiselevel=-1)
13110 except CacheError, e:
13111 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13112 (cpv, e), level=logging.ERROR, noiselevel=-1)
13115 if eb_mtime is None:
13116 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13117 level=logging.ERROR, noiselevel=-1)
13121 eb_mtime = long(eb_mtime)
13123 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13124 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13127 if ec_mtimes is None:
13128 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13129 level=logging.ERROR, noiselevel=-1)
13132 if modified_eclasses.intersection(ec_mtimes):
13135 missing_eclasses = set(ec_mtimes).difference(ec_names)
13136 if missing_eclasses:
13137 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13138 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13142 eb_path = os.path.join(portdir, relative_eb_path)
13144 current_eb_mtime = os.stat(eb_path)
13146 writemsg_level("!!! Missing ebuild: %s\n" % \
13147 (cpv,), level=logging.ERROR, noiselevel=-1)
13150 inconsistent = False
13151 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13152 updated_mtime = updated_ec_mtimes.get(ec)
13153 if updated_mtime is not None and updated_mtime != ec_mtime:
13154 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13155 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13156 inconsistent = True
13162 if current_eb_mtime != eb_mtime:
13163 os.utime(eb_path, (eb_mtime, eb_mtime))
13165 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13166 if ec in updated_ec_mtimes:
13168 ec_path = os.path.join(ec_dir, ec + ".eclass")
13169 current_mtime = long(os.stat(ec_path).st_mtime)
13170 if current_mtime != ec_mtime:
13171 os.utime(ec_path, (ec_mtime, ec_mtime))
13172 updated_ec_mtimes[ec] = ec_mtime
13176 def action_metadata(settings, portdb, myopts):
13177 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13178 old_umask = os.umask(0002)
13179 cachedir = os.path.normpath(settings.depcachedir)
13180 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13181 "/lib", "/opt", "/proc", "/root", "/sbin",
13182 "/sys", "/tmp", "/usr", "/var"]:
13183 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13184 "ROOT DIRECTORY ON YOUR SYSTEM."
13185 print >> sys.stderr, \
13186 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13188 if not os.path.exists(cachedir):
13191 ec = portage.eclass_cache.cache(portdb.porttree_root)
13192 myportdir = os.path.realpath(settings["PORTDIR"])
13193 cm = settings.load_best_module("portdbapi.metadbmodule")(
13194 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13196 from portage.cache import util
13198 class percentage_noise_maker(util.quiet_mirroring):
13199 def __init__(self, dbapi):
13201 self.cp_all = dbapi.cp_all()
13202 l = len(self.cp_all)
13203 self.call_update_min = 100000000
13204 self.min_cp_all = l/100.0
13208 def __iter__(self):
13209 for x in self.cp_all:
13211 if self.count > self.min_cp_all:
13212 self.call_update_min = 0
13214 for y in self.dbapi.cp_list(x):
13216 self.call_update_mine = 0
13218 def update(self, *arg):
13219 try: self.pstr = int(self.pstr) + 1
13220 except ValueError: self.pstr = 1
13221 sys.stdout.write("%s%i%%" % \
13222 ("\b" * (len(str(self.pstr))+1), self.pstr))
13224 self.call_update_min = 10000000
13226 def finish(self, *arg):
13227 sys.stdout.write("\b\b\b\b100%\n")
13230 if "--quiet" in myopts:
13231 def quicky_cpv_generator(cp_all_list):
13232 for x in cp_all_list:
13233 for y in portdb.cp_list(x):
13235 source = quicky_cpv_generator(portdb.cp_all())
13236 noise_maker = portage.cache.util.quiet_mirroring()
13238 noise_maker = source = percentage_noise_maker(portdb)
13239 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13240 eclass_cache=ec, verbose_instance=noise_maker)
13243 os.umask(old_umask)
13245 def action_regen(settings, portdb, max_jobs, max_load):
13246 xterm_titles = "notitles" not in settings.features
13247 emergelog(xterm_titles, " === regen")
13248 #regenerate cache entries
13249 portage.writemsg_stdout("Regenerating cache entries...\n")
13251 os.close(sys.stdin.fileno())
13252 except SystemExit, e:
13253 raise # Needed else can't exit
13258 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13261 portage.writemsg_stdout("done!\n")
13262 return regen.returncode
13264 def action_config(settings, trees, myopts, myfiles):
13265 if len(myfiles) != 1:
13266 print red("!!! config can only take a single package atom at this time\n")
13268 if not is_valid_package_atom(myfiles[0]):
13269 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13271 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13272 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13276 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13277 except portage.exception.AmbiguousPackageName, e:
13278 # Multiple matches thrown from cpv_expand
13281 print "No packages found.\n"
13283 elif len(pkgs) > 1:
13284 if "--ask" in myopts:
13286 print "Please select a package to configure:"
13290 options.append(str(idx))
13291 print options[-1]+") "+pkg
13293 options.append("X")
13294 idx = userquery("Selection?", options)
13297 pkg = pkgs[int(idx)-1]
13299 print "The following packages available:"
13302 print "\nPlease use a specific atom or the --ask option."
13308 if "--ask" in myopts:
13309 if userquery("Ready to configure "+pkg+"?") == "No":
13312 print "Configuring pkg..."
13314 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13315 mysettings = portage.config(clone=settings)
13316 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13317 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13318 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13320 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13321 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13322 if retval == os.EX_OK:
13323 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13324 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13327 def action_info(settings, trees, myopts, myfiles):
13328 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13329 settings.profile_path, settings["CHOST"],
13330 trees[settings["ROOT"]]["vartree"].dbapi)
13332 header_title = "System Settings"
13334 print header_width * "="
13335 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13336 print header_width * "="
13337 print "System uname: "+platform.platform(aliased=1)
13339 lastSync = portage.grabfile(os.path.join(
13340 settings["PORTDIR"], "metadata", "timestamp.chk"))
13341 print "Timestamp of tree:",
13347 output=commands.getstatusoutput("distcc --version")
13349 print str(output[1].split("\n",1)[0]),
13350 if "distcc" in settings.features:
13355 output=commands.getstatusoutput("ccache -V")
13357 print str(output[1].split("\n",1)[0]),
13358 if "ccache" in settings.features:
13363 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13364 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13365 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13366 myvars = portage.util.unique_array(myvars)
13370 if portage.isvalidatom(x):
13371 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13372 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13373 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13375 for pn, ver, rev in pkg_matches:
13377 pkgs.append(ver + "-" + rev)
13381 pkgs = ", ".join(pkgs)
13382 print "%-20s %s" % (x+":", pkgs)
13384 print "%-20s %s" % (x+":", "[NOT VALID]")
13386 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13388 if "--verbose" in myopts:
13389 myvars=settings.keys()
13391 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13392 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13393 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13394 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13396 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13398 myvars = portage.util.unique_array(myvars)
13404 print '%s="%s"' % (x, settings[x])
13406 use = set(settings["USE"].split())
13407 use_expand = settings["USE_EXPAND"].split()
13409 for varname in use_expand:
13410 flag_prefix = varname.lower() + "_"
13411 for f in list(use):
13412 if f.startswith(flag_prefix):
13416 print 'USE="%s"' % " ".join(use),
13417 for varname in use_expand:
13418 myval = settings.get(varname)
13420 print '%s="%s"' % (varname, myval),
13423 unset_vars.append(x)
13425 print "Unset: "+", ".join(unset_vars)
13428 if "--debug" in myopts:
13429 for x in dir(portage):
13430 module = getattr(portage, x)
13431 if "cvs_id_string" in dir(module):
13432 print "%s: %s" % (str(x), str(module.cvs_id_string))
13434 # See if we can find any packages installed matching the strings
13435 # passed on the command line
13437 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13438 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13440 mypkgs.extend(vardb.match(x))
13442 # If some packages were found...
13444 # Get our global settings (we only print stuff if it varies from
13445 # the current config)
13446 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13447 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13449 pkgsettings = portage.config(clone=settings)
13451 for myvar in mydesiredvars:
13452 global_vals[myvar] = set(settings.get(myvar, "").split())
13454 # Loop through each package
13455 # Only print settings if they differ from global settings
13456 header_title = "Package Settings"
13457 print header_width * "="
13458 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13459 print header_width * "="
13460 from portage.output import EOutput
13463 # Get all package specific variables
13464 auxvalues = vardb.aux_get(pkg, auxkeys)
13466 for i in xrange(len(auxkeys)):
13467 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13469 for myvar in mydesiredvars:
13470 # If the package variable doesn't match the
13471 # current global variable, something has changed
13472 # so set diff_found so we know to print
13473 if valuesmap[myvar] != global_vals[myvar]:
13474 diff_values[myvar] = valuesmap[myvar]
13475 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13476 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13477 pkgsettings.reset()
13478 # If a matching ebuild is no longer available in the tree, maybe it
13479 # would make sense to compare against the flags for the best
13480 # available version with the same slot?
13482 if portdb.cpv_exists(pkg):
13484 pkgsettings.setcpv(pkg, mydb=mydb)
13485 if valuesmap["IUSE"].intersection(
13486 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13487 diff_values["USE"] = valuesmap["USE"]
13488 # If a difference was found, print the info for
13491 # Print package info
13492 print "%s was built with the following:" % pkg
13493 for myvar in mydesiredvars + ["USE"]:
13494 if myvar in diff_values:
13495 mylist = list(diff_values[myvar])
13497 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13499 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13500 ebuildpath = vardb.findname(pkg)
13501 if not ebuildpath or not os.path.exists(ebuildpath):
13502 out.ewarn("No ebuild found for '%s'" % pkg)
13504 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13505 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13506 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13509 def action_search(root_config, myopts, myfiles, spinner):
13511 print "emerge: no search terms provided."
13513 searchinstance = search(root_config,
13514 spinner, "--searchdesc" in myopts,
13515 "--quiet" not in myopts, "--usepkg" in myopts,
13516 "--usepkgonly" in myopts)
13517 for mysearch in myfiles:
13519 searchinstance.execute(mysearch)
13520 except re.error, comment:
13521 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13523 searchinstance.output()
13525 def action_depclean(settings, trees, ldpath_mtimes,
13526 myopts, action, myfiles, spinner):
13527 # Kill packages that aren't explicitly merged or are required as a
13528 # dependency of another package. World file is explicit.
13530 # Global depclean or prune operations are not very safe when there are
13531 # missing dependencies since it's unknown how badly incomplete
13532 # the dependency graph is, and we might accidentally remove packages
13533 # that should have been pulled into the graph. On the other hand, it's
13534 # relatively safe to ignore missing deps when only asked to remove
13535 # specific packages.
13536 allow_missing_deps = len(myfiles) > 0
13539 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13540 msg.append("mistakes. Packages that are part of the world set will always\n")
13541 msg.append("be kept. They can be manually added to this set with\n")
13542 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13543 msg.append("package.provided (see portage(5)) will be removed by\n")
13544 msg.append("depclean, even if they are part of the world set.\n")
13546 msg.append("As a safety measure, depclean will not remove any packages\n")
13547 msg.append("unless *all* required dependencies have been resolved. As a\n")
13548 msg.append("consequence, it is often necessary to run %s\n" % \
13549 good("`emerge --update"))
13550 msg.append(good("--newuse --deep @system @world`") + \
13551 " prior to depclean.\n")
13553 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13554 portage.writemsg_stdout("\n")
13556 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13558 xterm_titles = "notitles" not in settings.features
13559 myroot = settings["ROOT"]
13560 root_config = trees[myroot]["root_config"]
13561 getSetAtoms = root_config.setconfig.getSetAtoms
13562 vardb = trees[myroot]["vartree"].dbapi
13564 required_set_names = ("system", "world")
13568 for s in required_set_names:
13569 required_sets[s] = InternalPackageSet(
13570 initial_atoms=getSetAtoms(s))
13573 # When removing packages, use a temporary version of world
13574 # which excludes packages that are intended to be eligible for
13576 world_temp_set = required_sets["world"]
13577 system_set = required_sets["system"]
13579 if not system_set or not world_temp_set:
13582 writemsg_level("!!! You have no system list.\n",
13583 level=logging.ERROR, noiselevel=-1)
13585 if not world_temp_set:
13586 writemsg_level("!!! You have no world file.\n",
13587 level=logging.WARNING, noiselevel=-1)
13589 writemsg_level("!!! Proceeding is likely to " + \
13590 "break your installation.\n",
13591 level=logging.WARNING, noiselevel=-1)
13592 if "--pretend" not in myopts:
13593 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13595 if action == "depclean":
13596 emergelog(xterm_titles, " >>> depclean")
13599 args_set = InternalPackageSet()
13602 if not is_valid_package_atom(x):
13603 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13604 level=logging.ERROR, noiselevel=-1)
13605 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13608 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13609 except portage.exception.AmbiguousPackageName, e:
13610 msg = "The short ebuild name \"" + x + \
13611 "\" is ambiguous. Please specify " + \
13612 "one of the following " + \
13613 "fully-qualified ebuild names instead:"
13614 for line in textwrap.wrap(msg, 70):
13615 writemsg_level("!!! %s\n" % (line,),
13616 level=logging.ERROR, noiselevel=-1)
13618 writemsg_level(" %s\n" % colorize("INFORM", i),
13619 level=logging.ERROR, noiselevel=-1)
13620 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13623 matched_packages = False
13626 matched_packages = True
13628 if not matched_packages:
13629 writemsg_level(">>> No packages selected for removal by %s\n" % \
13633 writemsg_level("\nCalculating dependencies ")
13634 resolver_params = create_depgraph_params(myopts, "remove")
13635 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13636 vardb = resolver.trees[myroot]["vartree"].dbapi
13638 if action == "depclean":
13641 # Pull in everything that's installed but not matched
13642 # by an argument atom since we don't want to clean any
13643 # package if something depends on it.
13645 world_temp_set.clear()
13650 if args_set.findAtomForPackage(pkg) is None:
13651 world_temp_set.add("=" + pkg.cpv)
13653 except portage.exception.InvalidDependString, e:
13654 show_invalid_depstring_notice(pkg,
13655 pkg.metadata["PROVIDE"], str(e))
13657 world_temp_set.add("=" + pkg.cpv)
13660 elif action == "prune":
13662 # Pull in everything that's installed since we don't
13663 # to prune a package if something depends on it.
13664 world_temp_set.clear()
13665 world_temp_set.update(vardb.cp_all())
13669 # Try to prune everything that's slotted.
13670 for cp in vardb.cp_all():
13671 if len(vardb.cp_list(cp)) > 1:
13674 # Remove atoms from world that match installed packages
13675 # that are also matched by argument atoms, but do not remove
13676 # them if they match the highest installed version.
13679 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13680 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13681 raise AssertionError("package expected in matches: " + \
13682 "cp = %s, cpv = %s matches = %s" % \
13683 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13685 highest_version = pkgs_for_cp[-1]
13686 if pkg == highest_version:
13687 # pkg is the highest version
13688 world_temp_set.add("=" + pkg.cpv)
13691 if len(pkgs_for_cp) <= 1:
13692 raise AssertionError("more packages expected: " + \
13693 "cp = %s, cpv = %s matches = %s" % \
13694 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13697 if args_set.findAtomForPackage(pkg) is None:
13698 world_temp_set.add("=" + pkg.cpv)
13700 except portage.exception.InvalidDependString, e:
13701 show_invalid_depstring_notice(pkg,
13702 pkg.metadata["PROVIDE"], str(e))
13704 world_temp_set.add("=" + pkg.cpv)
13708 for s, package_set in required_sets.iteritems():
13709 set_atom = SETPREFIX + s
13710 set_arg = SetArg(arg=set_atom, set=package_set,
13711 root_config=resolver.roots[myroot])
13712 set_args[s] = set_arg
13713 for atom in set_arg.set:
13714 resolver._dep_stack.append(
13715 Dependency(atom=atom, root=myroot, parent=set_arg))
13716 resolver.digraph.add(set_arg, None)
13718 success = resolver._complete_graph()
13719 writemsg_level("\b\b... done!\n")
13721 resolver.display_problems()
13726 def unresolved_deps():
13728 unresolvable = set()
13729 for dep in resolver._initially_unsatisfied_deps:
13730 if isinstance(dep.parent, Package) and \
13731 (dep.priority > UnmergeDepPriority.SOFT):
13732 unresolvable.add((dep.atom, dep.parent.cpv))
13734 if not unresolvable:
13737 if unresolvable and not allow_missing_deps:
13738 prefix = bad(" * ")
13740 msg.append("Dependencies could not be completely resolved due to")
13741 msg.append("the following required packages not being installed:")
13743 for atom, parent in unresolvable:
13744 msg.append(" %s pulled in by:" % (atom,))
13745 msg.append(" %s" % (parent,))
13747 msg.append("Have you forgotten to run " + \
13748 good("`emerge --update --newuse --deep @system @world`") + " prior")
13749 msg.append(("to %s? It may be necessary to manually " + \
13750 "uninstall packages that no longer") % action)
13751 msg.append("exist in the portage tree since " + \
13752 "it may not be possible to satisfy their")
13753 msg.append("dependencies. Also, be aware of " + \
13754 "the --with-bdeps option that is documented")
13755 msg.append("in " + good("`man emerge`") + ".")
13756 if action == "prune":
13758 msg.append("If you would like to ignore " + \
13759 "dependencies then use %s." % good("--nodeps"))
13760 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13761 level=logging.ERROR, noiselevel=-1)
13765 if unresolved_deps():
13768 graph = resolver.digraph.copy()
13769 required_pkgs_total = 0
13771 if isinstance(node, Package):
13772 required_pkgs_total += 1
13774 def show_parents(child_node):
13775 parent_nodes = graph.parent_nodes(child_node)
13776 if not parent_nodes:
13777 # With --prune, the highest version can be pulled in without any
13778 # real parent since all installed packages are pulled in. In that
13779 # case there's nothing to show here.
13782 for node in parent_nodes:
13783 parent_strs.append(str(getattr(node, "cpv", node)))
13786 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13787 for parent_str in parent_strs:
13788 msg.append(" %s\n" % (parent_str,))
13790 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13792 def cmp_pkg_cpv(pkg1, pkg2):
13793 """Sort Package instances by cpv."""
13794 if pkg1.cpv > pkg2.cpv:
13796 elif pkg1.cpv == pkg2.cpv:
13801 def create_cleanlist():
13802 pkgs_to_remove = []
13804 if action == "depclean":
13807 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13810 arg_atom = args_set.findAtomForPackage(pkg)
13811 except portage.exception.InvalidDependString:
13812 # this error has already been displayed by now
13816 if pkg not in graph:
13817 pkgs_to_remove.append(pkg)
13818 elif "--verbose" in myopts:
13822 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13823 if pkg not in graph:
13824 pkgs_to_remove.append(pkg)
13825 elif "--verbose" in myopts:
13828 elif action == "prune":
13829 # Prune really uses all installed instead of world. It's not
13830 # a real reverse dependency so don't display it as such.
13831 graph.remove(set_args["world"])
13833 for atom in args_set:
13834 for pkg in vardb.match_pkgs(atom):
13835 if pkg not in graph:
13836 pkgs_to_remove.append(pkg)
13837 elif "--verbose" in myopts:
13840 if not pkgs_to_remove:
13842 ">>> No packages selected for removal by %s\n" % action)
13843 if "--verbose" not in myopts:
13845 ">>> To see reverse dependencies, use %s\n" % \
13847 if action == "prune":
13849 ">>> To ignore dependencies, use %s\n" % \
13852 return pkgs_to_remove
13854 cleanlist = create_cleanlist()
13857 clean_set = set(cleanlist)
13859 # Check if any of these package are the sole providers of libraries
13860 # with consumers that have not been selected for removal. If so, these
13861 # packages and any dependencies need to be added to the graph.
13862 real_vardb = trees[myroot]["vartree"].dbapi
13863 linkmap = real_vardb.linkmap
13864 liblist = linkmap.listLibraryObjects()
13865 consumer_cache = {}
13866 provider_cache = {}
13870 writemsg_level(">>> Checking for lib consumers...\n")
13872 for pkg in cleanlist:
13873 pkg_dblink = real_vardb._dblink(pkg.cpv)
13874 provided_libs = set()
13876 for lib in liblist:
13877 if pkg_dblink.isowner(lib, myroot):
13878 provided_libs.add(lib)
13880 if not provided_libs:
13884 for lib in provided_libs:
13885 lib_consumers = consumer_cache.get(lib)
13886 if lib_consumers is None:
13887 lib_consumers = linkmap.findConsumers(lib)
13888 consumer_cache[lib] = lib_consumers
13890 consumers[lib] = lib_consumers
13895 for lib, lib_consumers in consumers.items():
13896 for consumer_file in list(lib_consumers):
13897 if pkg_dblink.isowner(consumer_file, myroot):
13898 lib_consumers.remove(consumer_file)
13899 if not lib_consumers:
13905 for lib, lib_consumers in consumers.iteritems():
13907 soname = soname_cache.get(lib)
13909 soname = linkmap.getSoname(lib)
13910 soname_cache[lib] = soname
13912 consumer_providers = []
13913 for lib_consumer in lib_consumers:
13914 providers = provider_cache.get(lib)
13915 if providers is None:
13916 providers = linkmap.findProviders(lib_consumer)
13917 provider_cache[lib_consumer] = providers
13918 if soname not in providers:
13919 # Why does this happen?
13921 consumer_providers.append(
13922 (lib_consumer, providers[soname]))
13924 consumers[lib] = consumer_providers
13926 consumer_map[pkg] = consumers
13930 search_files = set()
13931 for consumers in consumer_map.itervalues():
13932 for lib, consumer_providers in consumers.iteritems():
13933 for lib_consumer, providers in consumer_providers:
13934 search_files.add(lib_consumer)
13935 search_files.update(providers)
13937 writemsg_level(">>> Assigning files to packages...\n")
13938 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13940 for pkg, consumers in consumer_map.items():
13941 for lib, consumer_providers in consumers.items():
13942 lib_consumers = set()
13944 for lib_consumer, providers in consumer_providers:
13945 owner_set = file_owners.get(lib_consumer)
13946 provider_dblinks = set()
13947 provider_pkgs = set()
13949 if len(providers) > 1:
13950 for provider in providers:
13951 provider_set = file_owners.get(provider)
13952 if provider_set is not None:
13953 provider_dblinks.update(provider_set)
13955 if len(provider_dblinks) > 1:
13956 for provider_dblink in provider_dblinks:
13957 pkg_key = ("installed", myroot,
13958 provider_dblink.mycpv, "nomerge")
13959 if pkg_key not in clean_set:
13960 provider_pkgs.add(vardb.get(pkg_key))
13965 if owner_set is not None:
13966 lib_consumers.update(owner_set)
13968 for consumer_dblink in list(lib_consumers):
13969 if ("installed", myroot, consumer_dblink.mycpv,
13970 "nomerge") in clean_set:
13971 lib_consumers.remove(consumer_dblink)
13975 consumers[lib] = lib_consumers
13979 del consumer_map[pkg]
13982 # TODO: Implement a package set for rebuilding consumer packages.
13984 msg = "In order to avoid breakage of link level " + \
13985 "dependencies, one or more packages will not be removed. " + \
13986 "This can be solved by rebuilding " + \
13987 "the packages that pulled them in."
13989 prefix = bad(" * ")
13990 from textwrap import wrap
13991 writemsg_level("".join(prefix + "%s\n" % line for \
13992 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13995 for pkg, consumers in consumer_map.iteritems():
13996 unique_consumers = set(chain(*consumers.values()))
13997 unique_consumers = sorted(consumer.mycpv \
13998 for consumer in unique_consumers)
14000 msg.append(" %s pulled in by:" % (pkg.cpv,))
14001 for consumer in unique_consumers:
14002 msg.append(" %s" % (consumer,))
14004 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14005 level=logging.WARNING, noiselevel=-1)
14007 # Add lib providers to the graph as children of lib consumers,
14008 # and also add any dependencies pulled in by the provider.
14009 writemsg_level(">>> Adding lib providers to graph...\n")
14011 for pkg, consumers in consumer_map.iteritems():
14012 for consumer_dblink in set(chain(*consumers.values())):
14013 consumer_pkg = vardb.get(("installed", myroot,
14014 consumer_dblink.mycpv, "nomerge"))
14015 if not resolver._add_pkg(pkg,
14016 Dependency(parent=consumer_pkg,
14017 priority=UnmergeDepPriority(runtime=True),
14019 resolver.display_problems()
14022 writemsg_level("\nCalculating dependencies ")
14023 success = resolver._complete_graph()
14024 writemsg_level("\b\b... done!\n")
14025 resolver.display_problems()
14028 if unresolved_deps():
14031 graph = resolver.digraph.copy()
14032 required_pkgs_total = 0
14034 if isinstance(node, Package):
14035 required_pkgs_total += 1
14036 cleanlist = create_cleanlist()
14039 clean_set = set(cleanlist)
14041 # Use a topological sort to create an unmerge order such that
14042 # each package is unmerged before it's dependencies. This is
14043 # necessary to avoid breaking things that may need to run
14044 # during pkg_prerm or pkg_postrm phases.
14046 # Create a new graph to account for dependencies between the
14047 # packages being unmerged.
14051 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14052 runtime = UnmergeDepPriority(runtime=True)
14053 runtime_post = UnmergeDepPriority(runtime_post=True)
14054 buildtime = UnmergeDepPriority(buildtime=True)
14056 "RDEPEND": runtime,
14057 "PDEPEND": runtime_post,
14058 "DEPEND": buildtime,
14061 for node in clean_set:
14062 graph.add(node, None)
14064 node_use = node.metadata["USE"].split()
14065 for dep_type in dep_keys:
14066 depstr = node.metadata[dep_type]
14070 portage.dep._dep_check_strict = False
14071 success, atoms = portage.dep_check(depstr, None, settings,
14072 myuse=node_use, trees=resolver._graph_trees,
14075 portage.dep._dep_check_strict = True
14077 # Ignore invalid deps of packages that will
14078 # be uninstalled anyway.
14081 priority = priority_map[dep_type]
14083 if not isinstance(atom, portage.dep.Atom):
14084 # Ignore invalid atoms returned from dep_check().
14088 matches = vardb.match_pkgs(atom)
14091 for child_node in matches:
14092 if child_node in clean_set:
14093 graph.add(child_node, node, priority=priority)
14096 if len(graph.order) == len(graph.root_nodes()):
14097 # If there are no dependencies between packages
14098 # let unmerge() group them by cat/pn.
14100 cleanlist = [pkg.cpv for pkg in graph.order]
14102 # Order nodes from lowest to highest overall reference count for
14103 # optimal root node selection.
14104 node_refcounts = {}
14105 for node in graph.order:
14106 node_refcounts[node] = len(graph.parent_nodes(node))
14107 def cmp_reference_count(node1, node2):
14108 return node_refcounts[node1] - node_refcounts[node2]
14109 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14111 ignore_priority_range = [None]
14112 ignore_priority_range.extend(
14113 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14114 while not graph.empty():
14115 for ignore_priority in ignore_priority_range:
14116 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14120 raise AssertionError("no root nodes")
14121 if ignore_priority is not None:
14122 # Some deps have been dropped due to circular dependencies,
14123 # so only pop one node in order do minimize the number that
14128 cleanlist.append(node.cpv)
14130 unmerge(root_config, myopts, "unmerge", cleanlist,
14131 ldpath_mtimes, ordered=ordered)
14133 if action == "prune":
14136 if not cleanlist and "--quiet" in myopts:
14139 print "Packages installed: "+str(len(vardb.cpv_all()))
14140 print "Packages in world: " + \
14141 str(len(root_config.sets["world"].getAtoms()))
14142 print "Packages in system: " + \
14143 str(len(root_config.sets["system"].getAtoms()))
14144 print "Required packages: "+str(required_pkgs_total)
14145 if "--pretend" in myopts:
14146 print "Number to remove: "+str(len(cleanlist))
14148 print "Number removed: "+str(len(cleanlist))
14150 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14152 Construct a depgraph for the given resume list. This will raise
14153 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14155 @returns: (success, depgraph, dropped_tasks)
14158 skip_unsatisfied = True
14159 mergelist = mtimedb["resume"]["mergelist"]
14160 dropped_tasks = set()
14162 mydepgraph = depgraph(settings, trees,
14163 myopts, myparams, spinner)
14165 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14166 skip_masked=skip_masked)
14167 except depgraph.UnsatisfiedResumeDep, e:
14168 if not skip_unsatisfied:
14171 graph = mydepgraph.digraph
14172 unsatisfied_parents = dict((dep.parent, dep.parent) \
14173 for dep in e.value)
14174 traversed_nodes = set()
14175 unsatisfied_stack = list(unsatisfied_parents)
14176 while unsatisfied_stack:
14177 pkg = unsatisfied_stack.pop()
14178 if pkg in traversed_nodes:
14180 traversed_nodes.add(pkg)
14182 # If this package was pulled in by a parent
14183 # package scheduled for merge, removing this
14184 # package may cause the the parent package's
14185 # dependency to become unsatisfied.
14186 for parent_node in graph.parent_nodes(pkg):
14187 if not isinstance(parent_node, Package) \
14188 or parent_node.operation not in ("merge", "nomerge"):
14191 graph.child_nodes(parent_node,
14192 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14193 if pkg in unsatisfied:
14194 unsatisfied_parents[parent_node] = parent_node
14195 unsatisfied_stack.append(parent_node)
14197 pruned_mergelist = []
14198 for x in mergelist:
14199 if isinstance(x, list) and \
14200 tuple(x) not in unsatisfied_parents:
14201 pruned_mergelist.append(x)
14203 # If the mergelist doesn't shrink then this loop is infinite.
14204 if len(pruned_mergelist) == len(mergelist):
14205 # This happens if a package can't be dropped because
14206 # it's already installed, but it has unsatisfied PDEPEND.
14208 mergelist[:] = pruned_mergelist
14210 # Exclude installed packages that have been removed from the graph due
14211 # to failure to build/install runtime dependencies after the dependent
14212 # package has already been installed.
14213 dropped_tasks.update(pkg for pkg in \
14214 unsatisfied_parents if pkg.operation != "nomerge")
14215 mydepgraph.break_refs(unsatisfied_parents)
14217 del e, graph, traversed_nodes, \
14218 unsatisfied_parents, unsatisfied_stack
14222 return (success, mydepgraph, dropped_tasks)
14224 def action_build(settings, trees, mtimedb,
14225 myopts, myaction, myfiles, spinner):
14227 # validate the state of the resume data
14228 # so that we can make assumptions later.
14229 for k in ("resume", "resume_backup"):
14230 if k not in mtimedb:
14232 resume_data = mtimedb[k]
14233 if not isinstance(resume_data, dict):
14236 mergelist = resume_data.get("mergelist")
14237 if not isinstance(mergelist, list):
14240 for x in mergelist:
14241 if not (isinstance(x, list) and len(x) == 4):
14243 pkg_type, pkg_root, pkg_key, pkg_action = x
14244 if pkg_root not in trees:
14245 # Current $ROOT setting differs,
14246 # so the list must be stale.
14252 resume_opts = resume_data.get("myopts")
14253 if not isinstance(resume_opts, (dict, list)):
14256 favorites = resume_data.get("favorites")
14257 if not isinstance(favorites, list):
14262 if "--resume" in myopts and \
14263 ("resume" in mtimedb or
14264 "resume_backup" in mtimedb):
14266 if "resume" not in mtimedb:
14267 mtimedb["resume"] = mtimedb["resume_backup"]
14268 del mtimedb["resume_backup"]
14270 # "myopts" is a list for backward compatibility.
14271 resume_opts = mtimedb["resume"].get("myopts", [])
14272 if isinstance(resume_opts, list):
14273 resume_opts = dict((k,True) for k in resume_opts)
14274 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14275 resume_opts.pop(opt, None)
14276 myopts.update(resume_opts)
14278 if "--debug" in myopts:
14279 writemsg_level("myopts %s\n" % (myopts,))
14281 # Adjust config according to options of the command being resumed.
14282 for myroot in trees:
14283 mysettings = trees[myroot]["vartree"].settings
14284 mysettings.unlock()
14285 adjust_config(myopts, mysettings)
14287 del myroot, mysettings
14289 ldpath_mtimes = mtimedb["ldpath"]
14292 buildpkgonly = "--buildpkgonly" in myopts
14293 pretend = "--pretend" in myopts
14294 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14295 ask = "--ask" in myopts
14296 nodeps = "--nodeps" in myopts
14297 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14298 tree = "--tree" in myopts
14299 if nodeps and tree:
14301 del myopts["--tree"]
14302 portage.writemsg(colorize("WARN", " * ") + \
14303 "--tree is broken with --nodeps. Disabling...\n")
14304 debug = "--debug" in myopts
14305 verbose = "--verbose" in myopts
14306 quiet = "--quiet" in myopts
14307 if pretend or fetchonly:
14308 # make the mtimedb readonly
14309 mtimedb.filename = None
14310 if '--digest' in myopts or 'digest' in settings.features:
14311 if '--digest' in myopts:
14312 msg = "The --digest option"
14314 msg = "The FEATURES=digest setting"
14316 msg += " can prevent corruption from being" + \
14317 " noticed. The `repoman manifest` command is the preferred" + \
14318 " way to generate manifests and it is capable of doing an" + \
14319 " entire repository or category at once."
14320 prefix = bad(" * ")
14321 writemsg(prefix + "\n")
14322 from textwrap import wrap
14323 for line in wrap(msg, 72):
14324 writemsg("%s%s\n" % (prefix, line))
14325 writemsg(prefix + "\n")
14327 if "--quiet" not in myopts and \
14328 ("--pretend" in myopts or "--ask" in myopts or \
14329 "--tree" in myopts or "--verbose" in myopts):
14331 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14333 elif "--buildpkgonly" in myopts:
14337 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14339 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14343 print darkgreen("These are the packages that would be %s, in order:") % action
14346 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14347 if not show_spinner:
14348 spinner.update = spinner.update_quiet
14351 favorites = mtimedb["resume"].get("favorites")
14352 if not isinstance(favorites, list):
14356 print "Calculating dependencies ",
14357 myparams = create_depgraph_params(myopts, myaction)
14359 resume_data = mtimedb["resume"]
14360 mergelist = resume_data["mergelist"]
14361 if mergelist and "--skipfirst" in myopts:
14362 for i, task in enumerate(mergelist):
14363 if isinstance(task, list) and \
14364 task and task[-1] == "merge":
14371 success, mydepgraph, dropped_tasks = resume_depgraph(
14372 settings, trees, mtimedb, myopts, myparams, spinner)
14373 except (portage.exception.PackageNotFound,
14374 depgraph.UnsatisfiedResumeDep), e:
14375 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14376 mydepgraph = e.depgraph
14379 from textwrap import wrap
14380 from portage.output import EOutput
14383 resume_data = mtimedb["resume"]
14384 mergelist = resume_data.get("mergelist")
14385 if not isinstance(mergelist, list):
14387 if mergelist and debug or (verbose and not quiet):
14388 out.eerror("Invalid resume list:")
14391 for task in mergelist:
14392 if isinstance(task, list):
14393 out.eerror(indent + str(tuple(task)))
14396 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14397 out.eerror("One or more packages are either masked or " + \
14398 "have missing dependencies:")
14401 for dep in e.value:
14402 if dep.atom is None:
14403 out.eerror(indent + "Masked package:")
14404 out.eerror(2 * indent + str(dep.parent))
14407 out.eerror(indent + str(dep.atom) + " pulled in by:")
14408 out.eerror(2 * indent + str(dep.parent))
14410 msg = "The resume list contains packages " + \
14411 "that are either masked or have " + \
14412 "unsatisfied dependencies. " + \
14413 "Please restart/continue " + \
14414 "the operation manually, or use --skipfirst " + \
14415 "to skip the first package in the list and " + \
14416 "any other packages that may be " + \
14417 "masked or have missing dependencies."
14418 for line in wrap(msg, 72):
14420 elif isinstance(e, portage.exception.PackageNotFound):
14421 out.eerror("An expected package is " + \
14422 "not available: %s" % str(e))
14424 msg = "The resume list contains one or more " + \
14425 "packages that are no longer " + \
14426 "available. Please restart/continue " + \
14427 "the operation manually."
14428 for line in wrap(msg, 72):
14432 print "\b\b... done!"
14436 portage.writemsg("!!! One or more packages have been " + \
14437 "dropped due to\n" + \
14438 "!!! masking or unsatisfied dependencies:\n\n",
14440 for task in dropped_tasks:
14441 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14442 portage.writemsg("\n", noiselevel=-1)
14445 if mydepgraph is not None:
14446 mydepgraph.display_problems()
14447 if not (ask or pretend):
14448 # delete the current list and also the backup
14449 # since it's probably stale too.
14450 for k in ("resume", "resume_backup"):
14451 mtimedb.pop(k, None)
14456 if ("--resume" in myopts):
14457 print darkgreen("emerge: It seems we have nothing to resume...")
14460 myparams = create_depgraph_params(myopts, myaction)
14461 if "--quiet" not in myopts and "--nodeps" not in myopts:
14462 print "Calculating dependencies ",
14464 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14466 retval, favorites = mydepgraph.select_files(myfiles)
14467 except portage.exception.PackageNotFound, e:
14468 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14470 except portage.exception.PackageSetNotFound, e:
14471 root_config = trees[settings["ROOT"]]["root_config"]
14472 display_missing_pkg_set(root_config, e.value)
14475 print "\b\b... done!"
14477 mydepgraph.display_problems()
14480 if "--pretend" not in myopts and \
14481 ("--ask" in myopts or "--tree" in myopts or \
14482 "--verbose" in myopts) and \
14483 not ("--quiet" in myopts and "--ask" not in myopts):
14484 if "--resume" in myopts:
14485 mymergelist = mydepgraph.altlist()
14486 if len(mymergelist) == 0:
14487 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14489 favorites = mtimedb["resume"]["favorites"]
14490 retval = mydepgraph.display(
14491 mydepgraph.altlist(reversed=tree),
14492 favorites=favorites)
14493 mydepgraph.display_problems()
14494 if retval != os.EX_OK:
14496 prompt="Would you like to resume merging these packages?"
14498 retval = mydepgraph.display(
14499 mydepgraph.altlist(reversed=("--tree" in myopts)),
14500 favorites=favorites)
14501 mydepgraph.display_problems()
14502 if retval != os.EX_OK:
14505 for x in mydepgraph.altlist():
14506 if isinstance(x, Package) and x.operation == "merge":
14510 sets = trees[settings["ROOT"]]["root_config"].sets
14511 world_candidates = None
14512 if "--noreplace" in myopts and \
14513 not oneshot and favorites:
14514 # Sets that are not world candidates are filtered
14515 # out here since the favorites list needs to be
14516 # complete for depgraph.loadResumeCommand() to
14517 # operate correctly.
14518 world_candidates = [x for x in favorites \
14519 if not (x.startswith(SETPREFIX) and \
14520 not sets[x[1:]].world_candidate)]
14521 if "--noreplace" in myopts and \
14522 not oneshot and world_candidates:
14524 for x in world_candidates:
14525 print " %s %s" % (good("*"), x)
14526 prompt="Would you like to add these packages to your world favorites?"
14527 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14528 prompt="Nothing to merge; would you like to auto-clean packages?"
14531 print "Nothing to merge; quitting."
14534 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14535 prompt="Would you like to fetch the source files for these packages?"
14537 prompt="Would you like to merge these packages?"
14539 if "--ask" in myopts and userquery(prompt) == "No":
14544 # Don't ask again (e.g. when auto-cleaning packages after merge)
14545 myopts.pop("--ask", None)
14547 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14548 if ("--resume" in myopts):
14549 mymergelist = mydepgraph.altlist()
14550 if len(mymergelist) == 0:
14551 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14553 favorites = mtimedb["resume"]["favorites"]
14554 retval = mydepgraph.display(
14555 mydepgraph.altlist(reversed=tree),
14556 favorites=favorites)
14557 mydepgraph.display_problems()
14558 if retval != os.EX_OK:
14561 retval = mydepgraph.display(
14562 mydepgraph.altlist(reversed=("--tree" in myopts)),
14563 favorites=favorites)
14564 mydepgraph.display_problems()
14565 if retval != os.EX_OK:
14567 if "--buildpkgonly" in myopts:
14568 graph_copy = mydepgraph.digraph.clone()
14569 removed_nodes = set()
14570 for node in graph_copy:
14571 if not isinstance(node, Package) or \
14572 node.operation == "nomerge":
14573 removed_nodes.add(node)
14574 graph_copy.difference_update(removed_nodes)
14575 if not graph_copy.hasallzeros(ignore_priority = \
14576 DepPrioritySatisfiedRange.ignore_medium):
14577 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14578 print "!!! You have to merge the dependencies before you can build this package.\n"
14581 if "--buildpkgonly" in myopts:
14582 graph_copy = mydepgraph.digraph.clone()
14583 removed_nodes = set()
14584 for node in graph_copy:
14585 if not isinstance(node, Package) or \
14586 node.operation == "nomerge":
14587 removed_nodes.add(node)
14588 graph_copy.difference_update(removed_nodes)
14589 if not graph_copy.hasallzeros(ignore_priority = \
14590 DepPrioritySatisfiedRange.ignore_medium):
14591 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14592 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14595 if ("--resume" in myopts):
14596 favorites=mtimedb["resume"]["favorites"]
14597 mymergelist = mydepgraph.altlist()
14598 mydepgraph.break_refs(mymergelist)
14599 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14600 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14601 del mydepgraph, mymergelist
14602 clear_caches(trees)
14604 retval = mergetask.merge()
14605 merge_count = mergetask.curval
14607 if "resume" in mtimedb and \
14608 "mergelist" in mtimedb["resume"] and \
14609 len(mtimedb["resume"]["mergelist"]) > 1:
14610 mtimedb["resume_backup"] = mtimedb["resume"]
14611 del mtimedb["resume"]
14613 mtimedb["resume"]={}
14614 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14615 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14616 # a list type for options.
14617 mtimedb["resume"]["myopts"] = myopts.copy()
14619 # Convert Atom instances to plain str.
14620 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14622 pkglist = mydepgraph.altlist()
14623 mydepgraph.saveNomergeFavorites()
14624 mydepgraph.break_refs(pkglist)
14625 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14626 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14627 del mydepgraph, pkglist
14628 clear_caches(trees)
14630 retval = mergetask.merge()
14631 merge_count = mergetask.curval
14633 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14634 if "yes" == settings.get("AUTOCLEAN"):
14635 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14636 unmerge(trees[settings["ROOT"]]["root_config"],
14637 myopts, "clean", [],
14638 ldpath_mtimes, autoclean=1)
14640 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14641 + " AUTOCLEAN is disabled. This can cause serious"
14642 + " problems due to overlapping packages.\n")
14643 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14647 def multiple_actions(action1, action2):
14648 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14649 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14652 def insert_optional_args(args):
14654 Parse optional arguments and insert a value if one has
14655 not been provided. This is done before feeding the args
14656 to the optparse parser since that parser does not support
14657 this feature natively.
14661 jobs_opts = ("-j", "--jobs")
14662 arg_stack = args[:]
14663 arg_stack.reverse()
14665 arg = arg_stack.pop()
14667 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14668 if not (short_job_opt or arg in jobs_opts):
14669 new_args.append(arg)
14672 # Insert an empty placeholder in order to
14673 # satisfy the requirements of optparse.
14675 new_args.append("--jobs")
14678 if short_job_opt and len(arg) > 2:
14679 if arg[:2] == "-j":
14681 job_count = int(arg[2:])
14683 saved_opts = arg[2:]
14686 saved_opts = arg[1:].replace("j", "")
14688 if job_count is None and arg_stack:
14690 job_count = int(arg_stack[-1])
14694 # Discard the job count from the stack
14695 # since we're consuming it here.
14698 if job_count is None:
14699 # unlimited number of jobs
14700 new_args.append("True")
14702 new_args.append(str(job_count))
14704 if saved_opts is not None:
14705 new_args.append("-" + saved_opts)
14709 def parse_opts(tmpcmdline, silent=False):
14714 global actions, options, shortmapping
14716 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14717 argument_options = {
14719 "help":"specify the location for portage configuration files",
14723 "help":"enable or disable color output",
14725 "choices":("y", "n")
14730 "help" : "Specifies the number of packages to build " + \
14736 "--load-average": {
14738 "help" :"Specifies that no new builds should be started " + \
14739 "if there are other builds running and the load average " + \
14740 "is at least LOAD (a floating-point number).",
14746 "help":"include unnecessary build time dependencies",
14748 "choices":("y", "n")
14751 "help":"specify conditions to trigger package reinstallation",
14753 "choices":["changed-use"]
14757 from optparse import OptionParser
14758 parser = OptionParser()
14759 if parser.has_option("--help"):
14760 parser.remove_option("--help")
14762 for action_opt in actions:
14763 parser.add_option("--" + action_opt, action="store_true",
14764 dest=action_opt.replace("-", "_"), default=False)
14765 for myopt in options:
14766 parser.add_option(myopt, action="store_true",
14767 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14768 for shortopt, longopt in shortmapping.iteritems():
14769 parser.add_option("-" + shortopt, action="store_true",
14770 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14771 for myalias, myopt in longopt_aliases.iteritems():
14772 parser.add_option(myalias, action="store_true",
14773 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14775 for myopt, kwargs in argument_options.iteritems():
14776 parser.add_option(myopt,
14777 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14779 tmpcmdline = insert_optional_args(tmpcmdline)
14781 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14785 if myoptions.jobs == "True":
14789 jobs = int(myoptions.jobs)
14793 if jobs is not True and \
14797 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14798 (myoptions.jobs,), noiselevel=-1)
14800 myoptions.jobs = jobs
14802 if myoptions.load_average:
14804 load_average = float(myoptions.load_average)
14808 if load_average <= 0.0:
14809 load_average = None
14811 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14812 (myoptions.load_average,), noiselevel=-1)
14814 myoptions.load_average = load_average
14816 for myopt in options:
14817 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14819 myopts[myopt] = True
14821 for myopt in argument_options:
14822 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14826 if myoptions.searchdesc:
14827 myoptions.search = True
14829 for action_opt in actions:
14830 v = getattr(myoptions, action_opt.replace("-", "_"))
14833 multiple_actions(myaction, action_opt)
14835 myaction = action_opt
14839 return myaction, myopts, myfiles
14841 def validate_ebuild_environment(trees):
14842 for myroot in trees:
14843 settings = trees[myroot]["vartree"].settings
14844 settings.validate()
14846 def clear_caches(trees):
14847 for d in trees.itervalues():
14848 d["porttree"].dbapi.melt()
14849 d["porttree"].dbapi._aux_cache.clear()
14850 d["bintree"].dbapi._aux_cache.clear()
14851 d["bintree"].dbapi._clear_cache()
14852 d["vartree"].dbapi.linkmap._clear_cache()
14853 portage.dircache.clear()
14856 def load_emerge_config(trees=None):
14858 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14859 v = os.environ.get(envvar, None)
14860 if v and v.strip():
14862 trees = portage.create_trees(trees=trees, **kwargs)
14864 for root, root_trees in trees.iteritems():
14865 settings = root_trees["vartree"].settings
14866 setconfig = load_default_config(settings, root_trees)
14867 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14869 settings = trees["/"]["vartree"].settings
14871 for myroot in trees:
14873 settings = trees[myroot]["vartree"].settings
14876 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14877 mtimedb = portage.MtimeDB(mtimedbfile)
14879 return settings, trees, mtimedb
14881 def adjust_config(myopts, settings):
14882 """Make emerge specific adjustments to the config."""
14884 # To enhance usability, make some vars case insensitive by forcing them to
14886 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14887 if myvar in settings:
14888 settings[myvar] = settings[myvar].lower()
14889 settings.backup_changes(myvar)
14892 # Kill noauto as it will break merges otherwise.
14893 if "noauto" in settings.features:
14894 while "noauto" in settings.features:
14895 settings.features.remove("noauto")
14896 settings["FEATURES"] = " ".join(settings.features)
14897 settings.backup_changes("FEATURES")
14901 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14902 except ValueError, e:
14903 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14904 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14905 settings["CLEAN_DELAY"], noiselevel=-1)
14906 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14907 settings.backup_changes("CLEAN_DELAY")
14909 EMERGE_WARNING_DELAY = 10
14911 EMERGE_WARNING_DELAY = int(settings.get(
14912 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14913 except ValueError, e:
14914 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14915 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14916 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14917 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14918 settings.backup_changes("EMERGE_WARNING_DELAY")
14920 if "--quiet" in myopts:
14921 settings["PORTAGE_QUIET"]="1"
14922 settings.backup_changes("PORTAGE_QUIET")
14924 if "--verbose" in myopts:
14925 settings["PORTAGE_VERBOSE"] = "1"
14926 settings.backup_changes("PORTAGE_VERBOSE")
14928 # Set so that configs will be merged regardless of remembered status
14929 if ("--noconfmem" in myopts):
14930 settings["NOCONFMEM"]="1"
14931 settings.backup_changes("NOCONFMEM")
14933 # Set various debug markers... They should be merged somehow.
14936 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14937 if PORTAGE_DEBUG not in (0, 1):
14938 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14939 PORTAGE_DEBUG, noiselevel=-1)
14940 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14943 except ValueError, e:
14944 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14945 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14946 settings["PORTAGE_DEBUG"], noiselevel=-1)
14948 if "--debug" in myopts:
14950 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14951 settings.backup_changes("PORTAGE_DEBUG")
14953 if settings.get("NOCOLOR") not in ("yes","true"):
14954 portage.output.havecolor = 1
14956 """The explicit --color < y | n > option overrides the NOCOLOR environment
14957 variable and stdout auto-detection."""
14958 if "--color" in myopts:
14959 if "y" == myopts["--color"]:
14960 portage.output.havecolor = 1
14961 settings["NOCOLOR"] = "false"
14963 portage.output.havecolor = 0
14964 settings["NOCOLOR"] = "true"
14965 settings.backup_changes("NOCOLOR")
14966 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14967 portage.output.havecolor = 0
14968 settings["NOCOLOR"] = "true"
14969 settings.backup_changes("NOCOLOR")
14971 def apply_priorities(settings):
14975 def nice(settings):
14977 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14978 except (OSError, ValueError), e:
14979 out = portage.output.EOutput()
14980 out.eerror("Failed to change nice value to '%s'" % \
14981 settings["PORTAGE_NICENESS"])
14982 out.eerror("%s\n" % str(e))
14984 def ionice(settings):
14986 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14988 ionice_cmd = shlex.split(ionice_cmd)
14992 from portage.util import varexpand
14993 variables = {"PID" : str(os.getpid())}
14994 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14997 rval = portage.process.spawn(cmd, env=os.environ)
14998 except portage.exception.CommandNotFound:
14999 # The OS kernel probably doesn't support ionice,
15000 # so return silently.
15003 if rval != os.EX_OK:
15004 out = portage.output.EOutput()
15005 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15006 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15008 def display_missing_pkg_set(root_config, set_name):
15011 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15012 "The following sets exist:") % \
15013 colorize("INFORM", set_name))
15016 for s in sorted(root_config.sets):
15017 msg.append(" %s" % s)
15020 writemsg_level("".join("%s\n" % l for l in msg),
15021 level=logging.ERROR, noiselevel=-1)
15023 def expand_set_arguments(myfiles, myaction, root_config):
15025 setconfig = root_config.setconfig
15027 sets = setconfig.getSets()
15029 # In order to know exactly which atoms/sets should be added to the
15030 # world file, the depgraph performs set expansion later. It will get
15031 # confused about where the atoms came from if it's not allowed to
15032 # expand them itself.
15033 do_not_expand = (None, )
15036 if a in ("system", "world"):
15037 newargs.append(SETPREFIX+a)
15044 # separators for set arguments
15048 # WARNING: all operators must be of equal length
15050 DIFF_OPERATOR = "-@"
15051 UNION_OPERATOR = "+@"
15053 for i in range(0, len(myfiles)):
15054 if myfiles[i].startswith(SETPREFIX):
15057 x = myfiles[i][len(SETPREFIX):]
15060 start = x.find(ARG_START)
15061 end = x.find(ARG_END)
15062 if start > 0 and start < end:
15063 namepart = x[:start]
15064 argpart = x[start+1:end]
15066 # TODO: implement proper quoting
15067 args = argpart.split(",")
15071 k, v = a.split("=", 1)
15074 options[a] = "True"
15075 setconfig.update(namepart, options)
15076 newset += (x[:start-len(namepart)]+namepart)
15077 x = x[end+len(ARG_END):]
15081 myfiles[i] = SETPREFIX+newset
15083 sets = setconfig.getSets()
15085 # display errors that occured while loading the SetConfig instance
15086 for e in setconfig.errors:
15087 print colorize("BAD", "Error during set creation: %s" % e)
15089 # emerge relies on the existance of sets with names "world" and "system"
15090 required_sets = ("world", "system")
15093 for s in required_sets:
15095 missing_sets.append(s)
15097 if len(missing_sets) > 2:
15098 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15099 missing_sets_str += ', and "%s"' % missing_sets[-1]
15100 elif len(missing_sets) == 2:
15101 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15103 missing_sets_str = '"%s"' % missing_sets[-1]
15104 msg = ["emerge: incomplete set configuration, " + \
15105 "missing set(s): %s" % missing_sets_str]
15107 msg.append(" sets defined: %s" % ", ".join(sets))
15108 msg.append(" This usually means that '%s'" % \
15109 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15110 msg.append(" is missing or corrupt.")
15112 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15114 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15117 if a.startswith(SETPREFIX):
15118 # support simple set operations (intersection, difference and union)
15119 # on the commandline. Expressions are evaluated strictly left-to-right
15120 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15121 expression = a[len(SETPREFIX):]
15124 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15125 is_pos = expression.rfind(IS_OPERATOR)
15126 diff_pos = expression.rfind(DIFF_OPERATOR)
15127 union_pos = expression.rfind(UNION_OPERATOR)
15128 op_pos = max(is_pos, diff_pos, union_pos)
15129 s1 = expression[:op_pos]
15130 s2 = expression[op_pos+len(IS_OPERATOR):]
15131 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15133 display_missing_pkg_set(root_config, s2)
15135 expr_sets.insert(0, s2)
15136 expr_ops.insert(0, op)
15138 if not expression in sets:
15139 display_missing_pkg_set(root_config, expression)
15141 expr_sets.insert(0, expression)
15142 result = set(setconfig.getSetAtoms(expression))
15143 for i in range(0, len(expr_ops)):
15144 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15145 if expr_ops[i] == IS_OPERATOR:
15146 result.intersection_update(s2)
15147 elif expr_ops[i] == DIFF_OPERATOR:
15148 result.difference_update(s2)
15149 elif expr_ops[i] == UNION_OPERATOR:
15152 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15153 newargs.extend(result)
15155 s = a[len(SETPREFIX):]
15157 display_missing_pkg_set(root_config, s)
15159 setconfig.active.append(s)
15161 set_atoms = setconfig.getSetAtoms(s)
15162 except portage.exception.PackageSetNotFound, e:
15163 writemsg_level(("emerge: the given set '%s' " + \
15164 "contains a non-existent set named '%s'.\n") % \
15165 (s, e), level=logging.ERROR, noiselevel=-1)
15167 if myaction in unmerge_actions and \
15168 not sets[s].supportsOperation("unmerge"):
15169 sys.stderr.write("emerge: the given set '%s' does " % s + \
15170 "not support unmerge operations\n")
15172 elif not set_atoms:
15173 print "emerge: '%s' is an empty set" % s
15174 elif myaction not in do_not_expand:
15175 newargs.extend(set_atoms)
15177 newargs.append(SETPREFIX+s)
15178 for e in sets[s].errors:
15182 return (newargs, retval)
15184 def repo_name_check(trees):
15185 missing_repo_names = set()
15186 for root, root_trees in trees.iteritems():
15187 if "porttree" in root_trees:
15188 portdb = root_trees["porttree"].dbapi
15189 missing_repo_names.update(portdb.porttrees)
15190 repos = portdb.getRepositories()
15192 missing_repo_names.discard(portdb.getRepositoryPath(r))
15193 if portdb.porttree_root in missing_repo_names and \
15194 not os.path.exists(os.path.join(
15195 portdb.porttree_root, "profiles")):
15196 # This is normal if $PORTDIR happens to be empty,
15197 # so don't warn about it.
15198 missing_repo_names.remove(portdb.porttree_root)
15200 if missing_repo_names:
15202 msg.append("WARNING: One or more repositories " + \
15203 "have missing repo_name entries:")
15205 for p in missing_repo_names:
15206 msg.append("\t%s/profiles/repo_name" % (p,))
15208 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15209 "should be a plain text file containing a unique " + \
15210 "name for the repository on the first line.", 70))
15211 writemsg_level("".join("%s\n" % l for l in msg),
15212 level=logging.WARNING, noiselevel=-1)
15214 return bool(missing_repo_names)
15216 def config_protect_check(trees):
15217 for root, root_trees in trees.iteritems():
15218 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15219 msg = "!!! CONFIG_PROTECT is empty"
15221 msg += " for '%s'" % root
15222 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15224 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15226 if "--quiet" in myopts:
15227 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15228 print "!!! one of the following fully-qualified ebuild names instead:\n"
15229 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15230 print " " + colorize("INFORM", cp)
15233 s = search(root_config, spinner, "--searchdesc" in myopts,
15234 "--quiet" not in myopts, "--usepkg" in myopts,
15235 "--usepkgonly" in myopts)
15236 null_cp = portage.dep_getkey(insert_category_into_atom(
15238 cat, atom_pn = portage.catsplit(null_cp)
15239 s.searchkey = atom_pn
15240 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15243 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15244 print "!!! one of the above fully-qualified ebuild names instead.\n"
15246 def profile_check(trees, myaction, myopts):
15247 if myaction in ("info", "sync"):
15249 elif "--version" in myopts or "--help" in myopts:
15251 for root, root_trees in trees.iteritems():
15252 if root_trees["root_config"].settings.profiles:
15254 # generate some profile related warning messages
15255 validate_ebuild_environment(trees)
15256 msg = "If you have just changed your profile configuration, you " + \
15257 "should revert back to the previous configuration. Due to " + \
15258 "your current profile being invalid, allowed actions are " + \
15259 "limited to --help, --info, --sync, and --version."
15260 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15261 level=logging.ERROR, noiselevel=-1)
15266 global portage # NFC why this is necessary now - genone
15267 portage._disable_legacy_globals()
15268 # Disable color until we're sure that it should be enabled (after
15269 # EMERGE_DEFAULT_OPTS has been parsed).
15270 portage.output.havecolor = 0
15271 # This first pass is just for options that need to be known as early as
15272 # possible, such as --config-root. They will be parsed again later,
15273 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15274 # the value of --config-root).
15275 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15276 if "--debug" in myopts:
15277 os.environ["PORTAGE_DEBUG"] = "1"
15278 if "--config-root" in myopts:
15279 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15281 # Portage needs to ensure a sane umask for the files it creates.
15283 settings, trees, mtimedb = load_emerge_config()
15284 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15285 rval = profile_check(trees, myaction, myopts)
15286 if rval != os.EX_OK:
15289 if portage._global_updates(trees, mtimedb["updates"]):
15291 # Reload the whole config from scratch.
15292 settings, trees, mtimedb = load_emerge_config(trees=trees)
15293 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15295 xterm_titles = "notitles" not in settings.features
15298 if "--ignore-default-opts" not in myopts:
15299 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15300 tmpcmdline.extend(sys.argv[1:])
15301 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15303 if "--digest" in myopts:
15304 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15305 # Reload the whole config from scratch so that the portdbapi internal
15306 # config is updated with new FEATURES.
15307 settings, trees, mtimedb = load_emerge_config(trees=trees)
15308 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15310 for myroot in trees:
15311 mysettings = trees[myroot]["vartree"].settings
15312 mysettings.unlock()
15313 adjust_config(myopts, mysettings)
15314 if '--pretend' not in myopts and myaction in \
15315 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15316 mysettings["PORTAGE_COUNTER_HASH"] = \
15317 trees[myroot]["vartree"].dbapi._counter_hash()
15318 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15320 del myroot, mysettings
15322 apply_priorities(settings)
15324 spinner = stdout_spinner()
15325 if "candy" in settings.features:
15326 spinner.update = spinner.update_scroll
15328 if "--quiet" not in myopts:
15329 portage.deprecated_profile_check(settings=settings)
15330 repo_name_check(trees)
15331 config_protect_check(trees)
15333 eclasses_overridden = {}
15334 for mytrees in trees.itervalues():
15335 mydb = mytrees["porttree"].dbapi
15336 # Freeze the portdbapi for performance (memoize all xmatch results).
15338 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15341 if eclasses_overridden and \
15342 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15343 prefix = bad(" * ")
15344 if len(eclasses_overridden) == 1:
15345 writemsg(prefix + "Overlay eclass overrides " + \
15346 "eclass from PORTDIR:\n", noiselevel=-1)
15348 writemsg(prefix + "Overlay eclasses override " + \
15349 "eclasses from PORTDIR:\n", noiselevel=-1)
15350 writemsg(prefix + "\n", noiselevel=-1)
15351 for eclass_name in sorted(eclasses_overridden):
15352 writemsg(prefix + " '%s/%s.eclass'\n" % \
15353 (eclasses_overridden[eclass_name], eclass_name),
15355 writemsg(prefix + "\n", noiselevel=-1)
15356 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15357 "because it will trigger invalidation of cached ebuild metadata " + \
15358 "that is distributed with the portage tree. If you must " + \
15359 "override eclasses from PORTDIR then you are advised to add " + \
15360 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15361 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15362 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15363 "you would like to disable this warning."
15364 from textwrap import wrap
15365 for line in wrap(msg, 72):
15366 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15368 if "moo" in myfiles:
15371 Larry loves Gentoo (""" + platform.system() + """)
15373 _______________________
15374 < Have you mooed today? >
15375 -----------------------
15385 ext = os.path.splitext(x)[1]
15386 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15387 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15390 root_config = trees[settings["ROOT"]]["root_config"]
15391 if myaction == "list-sets":
15392 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15396 # only expand sets for actions taking package arguments
15397 oldargs = myfiles[:]
15398 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15399 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15400 if retval != os.EX_OK:
15403 # Need to handle empty sets specially, otherwise emerge will react
15404 # with the help message for empty argument lists
15405 if oldargs and not myfiles:
15406 print "emerge: no targets left after set expansion"
15409 if ("--tree" in myopts) and ("--columns" in myopts):
15410 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15413 if ("--quiet" in myopts):
15414 spinner.update = spinner.update_quiet
15415 portage.util.noiselimit = -1
15417 # Always create packages if FEATURES=buildpkg
15418 # Imply --buildpkg if --buildpkgonly
15419 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15420 if "--buildpkg" not in myopts:
15421 myopts["--buildpkg"] = True
15423 # Always try and fetch binary packages if FEATURES=getbinpkg
15424 if ("getbinpkg" in settings.features):
15425 myopts["--getbinpkg"] = True
15427 if "--buildpkgonly" in myopts:
15428 # --buildpkgonly will not merge anything, so
15429 # it cancels all binary package options.
15430 for opt in ("--getbinpkg", "--getbinpkgonly",
15431 "--usepkg", "--usepkgonly"):
15432 myopts.pop(opt, None)
15434 if "--fetch-all-uri" in myopts:
15435 myopts["--fetchonly"] = True
15437 if "--skipfirst" in myopts and "--resume" not in myopts:
15438 myopts["--resume"] = True
15440 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15441 myopts["--usepkgonly"] = True
15443 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15444 myopts["--getbinpkg"] = True
15446 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15447 myopts["--usepkg"] = True
15449 # Also allow -K to apply --usepkg/-k
15450 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15451 myopts["--usepkg"] = True
15453 # Allow -p to remove --ask
15454 if ("--pretend" in myopts) and ("--ask" in myopts):
15455 print ">>> --pretend disables --ask... removing --ask from options."
15456 del myopts["--ask"]
15458 # forbid --ask when not in a terminal
15459 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15460 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15461 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15465 if settings.get("PORTAGE_DEBUG", "") == "1":
15466 spinner.update = spinner.update_quiet
15468 if "python-trace" in settings.features:
15469 import portage.debug
15470 portage.debug.set_trace(True)
15472 if not ("--quiet" in myopts):
15473 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15474 spinner.update = spinner.update_basic
15476 if myaction == 'version':
15477 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15478 settings.profile_path, settings["CHOST"],
15479 trees[settings["ROOT"]]["vartree"].dbapi)
15481 elif "--help" in myopts:
15482 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15485 if "--debug" in myopts:
15486 print "myaction", myaction
15487 print "myopts", myopts
15489 if not myaction and not myfiles and "--resume" not in myopts:
15490 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15493 pretend = "--pretend" in myopts
15494 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15495 buildpkgonly = "--buildpkgonly" in myopts
15497 # check if root user is the current user for the actions where emerge needs this
15498 if portage.secpass < 2:
15499 # We've already allowed "--version" and "--help" above.
15500 if "--pretend" not in myopts and myaction not in ("search","info"):
15501 need_superuser = not \
15503 (buildpkgonly and secpass >= 1) or \
15504 myaction in ("metadata", "regen") or \
15505 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15506 if portage.secpass < 1 or \
15509 access_desc = "superuser"
15511 access_desc = "portage group"
15512 # Always show portage_group_warning() when only portage group
15513 # access is required but the user is not in the portage group.
15514 from portage.data import portage_group_warning
15515 if "--ask" in myopts:
15516 myopts["--pretend"] = True
15517 del myopts["--ask"]
15518 print ("%s access is required... " + \
15519 "adding --pretend to options.\n") % access_desc
15520 if portage.secpass < 1 and not need_superuser:
15521 portage_group_warning()
15523 sys.stderr.write(("emerge: %s access is " + \
15524 "required.\n\n") % access_desc)
15525 if portage.secpass < 1 and not need_superuser:
15526 portage_group_warning()
15529 disable_emergelog = False
15530 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15532 disable_emergelog = True
15534 if myaction in ("search", "info"):
15535 disable_emergelog = True
15536 if disable_emergelog:
15537 """ Disable emergelog for everything except build or unmerge
15538 operations. This helps minimize parallel emerge.log entries that can
15539 confuse log parsers. We especially want it disabled during
15540 parallel-fetch, which uses --resume --fetchonly."""
15542 def emergelog(*pargs, **kargs):
15545 if not "--pretend" in myopts:
15546 emergelog(xterm_titles, "Started emerge on: "+\
15547 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15550 myelogstr=" ".join(myopts)
15552 myelogstr+=" "+myaction
15554 myelogstr += " " + " ".join(oldargs)
15555 emergelog(xterm_titles, " *** emerge " + myelogstr)
15558 def emergeexitsig(signum, frame):
15559 signal.signal(signal.SIGINT, signal.SIG_IGN)
15560 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15561 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15562 sys.exit(100+signum)
15563 signal.signal(signal.SIGINT, emergeexitsig)
15564 signal.signal(signal.SIGTERM, emergeexitsig)
15567 """This gets out final log message in before we quit."""
15568 if "--pretend" not in myopts:
15569 emergelog(xterm_titles, " *** terminating.")
15570 if "notitles" not in settings.features:
15572 portage.atexit_register(emergeexit)
15574 if myaction in ("config", "metadata", "regen", "sync"):
15575 if "--pretend" in myopts:
15576 sys.stderr.write(("emerge: The '%s' action does " + \
15577 "not support '--pretend'.\n") % myaction)
15580 if "sync" == myaction:
15581 return action_sync(settings, trees, mtimedb, myopts, myaction)
15582 elif "metadata" == myaction:
15583 action_metadata(settings, portdb, myopts)
15584 elif myaction=="regen":
15585 validate_ebuild_environment(trees)
15586 return action_regen(settings, portdb, myopts.get("--jobs"),
15587 myopts.get("--load-average"))
15589 elif "config"==myaction:
15590 validate_ebuild_environment(trees)
15591 action_config(settings, trees, myopts, myfiles)
15594 elif "search"==myaction:
15595 validate_ebuild_environment(trees)
15596 action_search(trees[settings["ROOT"]]["root_config"],
15597 myopts, myfiles, spinner)
15598 elif myaction in ("clean", "unmerge") or \
15599 (myaction == "prune" and "--nodeps" in myopts):
15600 validate_ebuild_environment(trees)
15602 # Ensure atoms are valid before calling unmerge().
15603 # For backward compat, leading '=' is not required.
15605 if is_valid_package_atom(x) or \
15606 is_valid_package_atom("=" + x):
15609 msg.append("'%s' is not a valid package atom." % (x,))
15610 msg.append("Please check ebuild(5) for full details.")
15611 writemsg_level("".join("!!! %s\n" % line for line in msg),
15612 level=logging.ERROR, noiselevel=-1)
15615 # When given a list of atoms, unmerge
15616 # them in the order given.
15617 ordered = myaction == "unmerge"
15618 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15619 mtimedb["ldpath"], ordered=ordered):
15620 if not (buildpkgonly or fetchonly or pretend):
15621 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15623 elif myaction in ("depclean", "info", "prune"):
15625 # Ensure atoms are valid before calling unmerge().
15626 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15629 if is_valid_package_atom(x):
15631 valid_atoms.append(
15632 portage.dep_expand(x, mydb=vardb, settings=settings))
15633 except portage.exception.AmbiguousPackageName, e:
15634 msg = "The short ebuild name \"" + x + \
15635 "\" is ambiguous. Please specify " + \
15636 "one of the following " + \
15637 "fully-qualified ebuild names instead:"
15638 for line in textwrap.wrap(msg, 70):
15639 writemsg_level("!!! %s\n" % (line,),
15640 level=logging.ERROR, noiselevel=-1)
15642 writemsg_level(" %s\n" % colorize("INFORM", i),
15643 level=logging.ERROR, noiselevel=-1)
15644 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15648 msg.append("'%s' is not a valid package atom." % (x,))
15649 msg.append("Please check ebuild(5) for full details.")
15650 writemsg_level("".join("!!! %s\n" % line for line in msg),
15651 level=logging.ERROR, noiselevel=-1)
15654 if myaction == "info":
15655 return action_info(settings, trees, myopts, valid_atoms)
15657 validate_ebuild_environment(trees)
15658 action_depclean(settings, trees, mtimedb["ldpath"],
15659 myopts, myaction, valid_atoms, spinner)
15660 if not (buildpkgonly or fetchonly or pretend):
15661 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15662 # "update", "system", or just process files:
15664 validate_ebuild_environment(trees)
15665 if "--pretend" not in myopts:
15666 display_news_notification(root_config, myopts)
15667 retval = action_build(settings, trees, mtimedb,
15668 myopts, myaction, myfiles, spinner)
15669 root_config = trees[settings["ROOT"]]["root_config"]
15670 post_emerge(root_config, myopts, mtimedb, retval)