2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
61 import cPickle as pickle
66 from cStringIO import StringIO
68 from StringIO import StringIO
70 class stdout_spinner(object):
72 "Gentoo Rocks ("+platform.system()+")",
73 "Thank you for using Gentoo. :)",
74 "Are you actually trying to read this?",
75 "How many times have you stared at this?",
76 "We are generating the cache right now",
77 "You are paying too much attention.",
78 "A theory is better than its explanation.",
79 "Phasers locked on target, Captain.",
80 "Thrashing is just virtual crashing.",
81 "To be is to program.",
82 "Real Users hate Real Programmers.",
83 "When all else fails, read the instructions.",
84 "Functionality breeds Contempt.",
85 "The future lies ahead.",
86 "3.1415926535897932384626433832795028841971694",
87 "Sometimes insanity is the only alternative.",
88 "Inaccuracy saves a world of explanation.",
91 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
95 self.update = self.update_twirl
96 self.scroll_sequence = self.scroll_msgs[
97 int(time.time() * 100) % len(self.scroll_msgs)]
99 self.min_display_latency = 0.05
101 def _return_early(self):
103 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104 each update* method should return without doing any output when this
107 cur_time = time.time()
108 if cur_time - self.last_update < self.min_display_latency:
110 self.last_update = cur_time
113 def update_basic(self):
114 self.spinpos = (self.spinpos + 1) % 500
115 if self._return_early():
117 if (self.spinpos % 100) == 0:
118 if self.spinpos == 0:
119 sys.stdout.write(". ")
121 sys.stdout.write(".")
124 def update_scroll(self):
125 if self._return_early():
127 if(self.spinpos >= len(self.scroll_sequence)):
128 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135 def update_twirl(self):
136 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137 if self._return_early():
139 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142 def update_quiet(self):
145 def userquery(prompt, responses=None, colours=None):
146 """Displays a prompt and a set of responses, then waits for a response
147 which is checked against the responses and the first to match is
148 returned. An empty response will match the first value in responses. The
149 input buffer is *not* cleared prior to the prompt!
152 responses: a List of Strings.
153 colours: a List of Functions taking and returning a String, used to
154 process the responses for display. Typically these will be functions
155 like red() but could be e.g. lambda x: "DisplayString".
156 If responses is omitted, defaults to ["Yes", "No"], [green, red].
157 If only colours is omitted, defaults to [bold, ...].
159 Returns a member of the List responses. (If called without optional
160 arguments, returns "Yes" or "No".)
161 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163 if responses is None:
164 responses = ["Yes", "No"]
166 create_color_func("PROMPT_CHOICE_DEFAULT"),
167 create_color_func("PROMPT_CHOICE_OTHER")
169 elif colours is None:
171 colours=(colours*len(responses))[:len(responses)]
175 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176 for key in responses:
177 # An empty response will match the first value in responses.
178 if response.upper()==key[:len(response)].upper():
180 print "Sorry, response '%s' not understood." % response,
181 except (EOFError, KeyboardInterrupt):
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen", "search",
189 "sync", "unmerge", "version",
192 "--ask", "--alphabetical",
193 "--buildpkg", "--buildpkgonly",
194 "--changelog", "--columns",
199 "--fetchonly", "--fetch-all-uri",
200 "--getbinpkg", "--getbinpkgonly",
201 "--help", "--ignore-default-opts",
204 "--newuse", "--nocolor",
205 "--nodeps", "--noreplace",
206 "--nospinner", "--oneshot",
207 "--onlydeps", "--pretend",
208 "--quiet", "--resume",
209 "--searchdesc", "--selective",
213 "--usepkg", "--usepkgonly",
220 "b":"--buildpkg", "B":"--buildpkgonly",
221 "c":"--clean", "C":"--unmerge",
222 "d":"--debug", "D":"--deep",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "k":"--usepkg", "K":"--usepkgonly",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps", "O":"--nodeps",
231 "p":"--pretend", "P":"--prune",
233 "s":"--search", "S":"--searchdesc",
236 "v":"--verbose", "V":"--version"
239 def emergelog(xterm_titles, mystr, short_msg=None):
240 if xterm_titles and short_msg:
241 if "HOSTNAME" in os.environ:
242 short_msg = os.environ["HOSTNAME"]+": "+short_msg
243 xtermTitle(short_msg)
245 file_path = "/var/log/emerge.log"
246 mylogfile = open(file_path, "a")
247 portage.util.apply_secpass_permissions(file_path,
248 uid=portage.portage_uid, gid=portage.portage_gid,
252 mylock = portage.locks.lockfile(mylogfile)
253 # seek because we may have gotten held up by the lock.
254 # if so, we may not be positioned at the end of the file.
256 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
260 portage.locks.unlockfile(mylock)
262 except (IOError,OSError,portage.exception.PortageException), e:
264 print >> sys.stderr, "emergelog():",e
266 def countdown(secs=5, doing="Starting"):
268 print ">>> Waiting",secs,"seconds before starting..."
269 print ">>> (Control-C to abort)...\n"+doing+" in: ",
273 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280 if isinstance(mysize, basestring):
282 if 0 != mysize % 1024:
283 # Always round up to the next kB so that it doesn't show 0 kB when
284 # some small file still needs to be fetched.
285 mysize += 1024 - mysize % 1024
286 mystr=str(mysize/1024)
290 mystr=mystr[:mycount]+","+mystr[mycount:]
294 def getgccversion(chost):
297 return: the current in-use gcc version
300 gcc_ver_command = 'gcc -dumpversion'
301 gcc_ver_prefix = 'gcc-'
303 gcc_not_found_error = red(
304 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305 "!!! to update the environment of this terminal and possibly\n" +
306 "!!! other terminals also.\n"
309 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313 mystatus, myoutput = commands.getstatusoutput(
314 chost + "-" + gcc_ver_command)
315 if mystatus == os.EX_OK:
316 return gcc_ver_prefix + myoutput
318 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319 if mystatus == os.EX_OK:
320 return gcc_ver_prefix + myoutput
322 portage.writemsg(gcc_not_found_error, noiselevel=-1)
323 return "[unavailable]"
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326 profilever = "unavailable"
328 realpath = os.path.realpath(profile)
329 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
330 if realpath.startswith(basepath):
331 profilever = realpath[1 + len(basepath):]
334 profilever = "!" + os.readlink(profile)
337 del realpath, basepath
340 libclist = vardb.match("virtual/libc")
341 libclist += vardb.match("virtual/glibc")
342 libclist = portage.util.unique_array(libclist)
344 xs=portage.catpkgsplit(x)
346 libcver+=","+"-".join(xs[1:])
348 libcver="-".join(xs[1:])
350 libcver="unavailable"
352 gccver = getgccversion(chost)
353 unameout=platform.release()+" "+platform.machine()
355 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357 def create_depgraph_params(myopts, myaction):
358 #configure emerge engine parameters
360 # self: include _this_ package regardless of if it is merged.
361 # selective: exclude the package if it is merged
362 # recurse: go into the dependencies
363 # deep: go into the dependencies of already merged packages
364 # empty: pretend nothing is merged
365 # complete: completely account for all known dependencies
366 # remove: build graph for use in removing packages
367 myparams = set(["recurse"])
369 if myaction == "remove":
370 myparams.add("remove")
371 myparams.add("complete")
374 if "--update" in myopts or \
375 "--newuse" in myopts or \
376 "--reinstall" in myopts or \
377 "--noreplace" in myopts:
378 myparams.add("selective")
379 if "--emptytree" in myopts:
380 myparams.add("empty")
381 myparams.discard("selective")
382 if "--nodeps" in myopts:
383 myparams.discard("recurse")
384 if "--deep" in myopts:
386 if "--complete-graph" in myopts:
387 myparams.add("complete")
390 # search functionality
391 class search(object):
402 def __init__(self, root_config, spinner, searchdesc,
403 verbose, usepkg, usepkgonly):
404 """Searches the available and installed packages for the supplied search key.
405 The list of available and installed packages is created at object instantiation.
406 This makes successive searches faster."""
407 self.settings = root_config.settings
408 self.vartree = root_config.trees["vartree"]
409 self.spinner = spinner
410 self.verbose = verbose
411 self.searchdesc = searchdesc
412 self.root_config = root_config
413 self.setconfig = root_config.setconfig
414 self.matches = {"pkg" : []}
419 self.portdb = fake_portdb
420 for attrib in ("aux_get", "cp_all",
421 "xmatch", "findname", "getFetchMap"):
422 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
426 portdb = root_config.trees["porttree"].dbapi
427 bindb = root_config.trees["bintree"].dbapi
428 vardb = root_config.trees["vartree"].dbapi
430 if not usepkgonly and portdb._have_root_eclass_dir:
431 self._dbs.append(portdb)
433 if (usepkg or usepkgonly) and bindb.cp_all():
434 self._dbs.append(bindb)
436 self._dbs.append(vardb)
437 self._portdb = portdb
442 cp_all.update(db.cp_all())
443 return list(sorted(cp_all))
445 def _aux_get(self, *args, **kwargs):
448 return db.aux_get(*args, **kwargs)
453 def _findname(self, *args, **kwargs):
455 if db is not self._portdb:
456 # We don't want findname to return anything
457 # unless it's an ebuild in a portage tree.
458 # Otherwise, it's already built and we don't
461 func = getattr(db, "findname", None)
463 value = func(*args, **kwargs)
468 def _getFetchMap(self, *args, **kwargs):
470 func = getattr(db, "getFetchMap", None)
472 value = func(*args, **kwargs)
477 def _visible(self, db, cpv, metadata):
478 installed = db is self.vartree.dbapi
479 built = installed or db is not self._portdb
482 pkg_type = "installed"
485 return visible(self.settings,
486 Package(type_name=pkg_type, root_config=self.root_config,
487 cpv=cpv, built=built, installed=installed, metadata=metadata))
489 def _xmatch(self, level, atom):
491 This method does not expand old-style virtuals because it
492 is restricted to returning matches for a single ${CATEGORY}/${PN}
493 and old-style virual matches unreliable for that when querying
494 multiple package databases. If necessary, old-style virtuals
495 can be performed on atoms prior to calling this method.
497 cp = portage.dep_getkey(atom)
498 if level == "match-all":
501 if hasattr(db, "xmatch"):
502 matches.update(db.xmatch(level, atom))
504 matches.update(db.match(atom))
505 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506 db._cpv_sort_ascending(result)
507 elif level == "match-visible":
510 if hasattr(db, "xmatch"):
511 matches.update(db.xmatch(level, atom))
513 db_keys = list(db._aux_cache_keys)
514 for cpv in db.match(atom):
515 metadata = izip(db_keys,
516 db.aux_get(cpv, db_keys))
517 if not self._visible(db, cpv, metadata):
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "bestmatch-visible":
525 if hasattr(db, "xmatch"):
526 cpv = db.xmatch("bestmatch-visible", atom)
527 if not cpv or portage.cpv_getkey(cpv) != cp:
529 if not result or cpv == portage.best([cpv, result]):
532 db_keys = Package.metadata_keys
533 # break out of this loop with highest visible
534 # match, checked in descending order
535 for cpv in reversed(db.match(atom)):
536 if portage.cpv_getkey(cpv) != cp:
538 metadata = izip(db_keys,
539 db.aux_get(cpv, db_keys))
540 if not self._visible(db, cpv, metadata):
542 if not result or cpv == portage.best([cpv, result]):
546 raise NotImplementedError(level)
549 def execute(self,searchkey):
550 """Performs the search for the supplied search key"""
552 self.searchkey=searchkey
553 self.packagematches = []
556 self.matches = {"pkg":[], "desc":[], "set":[]}
559 self.matches = {"pkg":[], "set":[]}
560 print "Searching... ",
563 if self.searchkey.startswith('%'):
565 self.searchkey = self.searchkey[1:]
566 if self.searchkey.startswith('@'):
568 self.searchkey = self.searchkey[1:]
570 self.searchre=re.compile(self.searchkey,re.I)
572 self.searchre=re.compile(re.escape(self.searchkey), re.I)
573 for package in self.portdb.cp_all():
574 self.spinner.update()
577 match_string = package[:]
579 match_string = package.split("/")[-1]
582 if self.searchre.search(match_string):
583 if not self.portdb.xmatch("match-visible", package):
585 self.matches["pkg"].append([package,masked])
586 elif self.searchdesc: # DESCRIPTION searching
587 full_package = self.portdb.xmatch("bestmatch-visible", package)
589 #no match found; we don't want to query description
590 full_package = portage.best(
591 self.portdb.xmatch("match-all", package))
597 full_desc = self.portdb.aux_get(
598 full_package, ["DESCRIPTION"])[0]
600 print "emerge: search: aux_get() failed, skipping"
602 if self.searchre.search(full_desc):
603 self.matches["desc"].append([full_package,masked])
605 self.sdict = self.setconfig.getSets()
606 for setname in self.sdict:
607 self.spinner.update()
609 match_string = setname
611 match_string = setname.split("/")[-1]
613 if self.searchre.search(match_string):
614 self.matches["set"].append([setname, False])
615 elif self.searchdesc:
616 if self.searchre.search(
617 self.sdict[setname].getMetadata("DESCRIPTION")):
618 self.matches["set"].append([setname, False])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
656 print green("*")+" "+white(match)
657 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
661 desc, homepage, license = self.portdb.aux_get(
662 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664 print "emerge: search: aux_get() failed, skipping"
667 print green("*")+" "+white(match)+" "+red("[ Masked ]")
669 print green("*")+" "+white(match)
670 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
674 mycat = match.split("/")[0]
675 mypkg = match.split("/")[1]
676 mycpv = match + "-" + myversion
677 myebuild = self.portdb.findname(mycpv)
679 pkgdir = os.path.dirname(myebuild)
680 from portage import manifest
681 mf = manifest.Manifest(
682 pkgdir, self.settings["DISTDIR"])
684 uri_map = self.portdb.getFetchMap(mycpv)
685 except portage.exception.InvalidDependString, e:
686 file_size_str = "Unknown (%s)" % (e,)
690 mysum[0] = mf.getDistfilesSize(uri_map)
692 file_size_str = "Unknown (missing " + \
693 "digest for %s)" % (e,)
698 if db is not vardb and \
699 db.cpv_exists(mycpv):
701 if not myebuild and hasattr(db, "bintree"):
702 myebuild = db.bintree.getname(mycpv)
704 mysum[0] = os.stat(myebuild).st_size
709 if myebuild and file_size_str is None:
710 mystr = str(mysum[0] / 1024)
714 mystr = mystr[:mycount] + "," + mystr[mycount:]
715 file_size_str = mystr + " kB"
719 print " ", darkgreen("Latest version available:"),myversion
720 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
723 (darkgreen("Size of files:"), file_size_str)
724 print " ", darkgreen("Homepage:")+" ",homepage
725 print " ", darkgreen("Description:")+" ",desc
726 print " ", darkgreen("License:")+" ",license
731 def getInstallationStatus(self,package):
732 installed_package = self.vartree.dep_bestmatch(package)
734 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736 result = darkgreen("Latest version installed:")+" "+version
738 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741 def getVersion(self,full_package,detail):
742 if len(full_package) > 1:
743 package_parts = portage.catpkgsplit(full_package)
744 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745 result = package_parts[2]+ "-" + package_parts[3]
747 result = package_parts[2]
752 class RootConfig(object):
753 """This is used internally by depgraph to track information about a
757 "ebuild" : "porttree",
758 "binary" : "bintree",
759 "installed" : "vartree"
763 for k, v in pkg_tree_map.iteritems():
766 def __init__(self, settings, trees, setconfig):
768 self.settings = settings
769 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770 self.root = self.settings["ROOT"]
771 self.setconfig = setconfig
772 self.sets = self.setconfig.getSets()
773 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775 def create_world_atom(pkg, args_set, root_config):
776 """Create a new atom for the world file if one does not exist. If the
777 argument atom is precise enough to identify a specific slot then a slot
778 atom will be returned. Atoms that are in the system set may also be stored
779 in world since system atoms can only match one slot while world atoms can
780 be greedy with respect to slots. Unslotted system packages will not be
783 arg_atom = args_set.findAtomForPackage(pkg)
786 cp = portage.dep_getkey(arg_atom)
788 sets = root_config.sets
789 portdb = root_config.trees["porttree"].dbapi
790 vardb = root_config.trees["vartree"].dbapi
791 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792 for cpv in portdb.match(cp))
793 slotted = len(available_slots) > 1 or \
794 (len(available_slots) == 1 and "0" not in available_slots)
796 # check the vdb in case this is multislot
797 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798 for cpv in vardb.match(cp))
799 slotted = len(available_slots) > 1 or \
800 (len(available_slots) == 1 and "0" not in available_slots)
801 if slotted and arg_atom != cp:
802 # If the user gave a specific atom, store it as a
803 # slot atom in the world file.
804 slot_atom = pkg.slot_atom
806 # For USE=multislot, there are a couple of cases to
809 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810 # unknown value, so just record an unslotted atom.
812 # 2) SLOT comes from an installed package and there is no
813 # matching SLOT in the portage tree.
815 # Make sure that the slot atom is available in either the
816 # portdb or the vardb, since otherwise the user certainly
817 # doesn't want the SLOT atom recorded in the world file
818 # (case 1 above). If it's only available in the vardb,
819 # the user may be trying to prevent a USE=multislot
820 # package from being removed by --depclean (case 2 above).
823 if not portdb.match(slot_atom):
824 # SLOT seems to come from an installed multislot package
826 # If there is no installed package matching the SLOT atom,
827 # it probably changed SLOT spontaneously due to USE=multislot,
828 # so just record an unslotted atom.
829 if vardb.match(slot_atom):
830 # Now verify that the argument is precise
831 # enough to identify a specific slot.
832 matches = mydb.match(arg_atom)
833 matched_slots = set()
835 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836 if len(matched_slots) == 1:
837 new_world_atom = slot_atom
839 if new_world_atom == sets["world"].findAtomForPackage(pkg):
840 # Both atoms would be identical, so there's nothing to add.
843 # Unlike world atoms, system atoms are not greedy for slots, so they
844 # can't be safely excluded from world if they are slotted.
845 system_atom = sets["system"].findAtomForPackage(pkg)
847 if not portage.dep_getkey(system_atom).startswith("virtual/"):
849 # System virtuals aren't safe to exclude from world since they can
850 # match multiple old-style virtuals but only one of them will be
851 # pulled in by update or depclean.
852 providers = portdb.mysettings.getvirtuals().get(
853 portage.dep_getkey(system_atom))
854 if providers and len(providers) == 1 and providers[0] == cp:
856 return new_world_atom
858 def filter_iuse_defaults(iuse):
860 if flag.startswith("+") or flag.startswith("-"):
865 class SlotObject(object):
866 __slots__ = ("__weakref__",)
868 def __init__(self, **kwargs):
869 classes = [self.__class__]
874 classes.extend(c.__bases__)
875 slots = getattr(c, "__slots__", None)
879 myvalue = kwargs.get(myattr, None)
880 setattr(self, myattr, myvalue)
884 Create a new instance and copy all attributes
885 defined from __slots__ (including those from
888 obj = self.__class__()
890 classes = [self.__class__]
895 classes.extend(c.__bases__)
896 slots = getattr(c, "__slots__", None)
900 setattr(obj, myattr, getattr(self, myattr))
904 class AbstractDepPriority(SlotObject):
905 __slots__ = ("buildtime", "runtime", "runtime_post")
907 def __lt__(self, other):
908 return self.__int__() < other
910 def __le__(self, other):
911 return self.__int__() <= other
913 def __eq__(self, other):
914 return self.__int__() == other
916 def __ne__(self, other):
917 return self.__int__() != other
919 def __gt__(self, other):
920 return self.__int__() > other
922 def __ge__(self, other):
923 return self.__int__() >= other
927 return copy.copy(self)
929 class DepPriority(AbstractDepPriority):
931 __slots__ = ("satisfied", "optional", "rebuild")
943 if self.runtime_post:
944 return "runtime_post"
947 class BlockerDepPriority(DepPriority):
955 BlockerDepPriority.instance = BlockerDepPriority()
957 class UnmergeDepPriority(AbstractDepPriority):
958 __slots__ = ("optional", "satisfied",)
960 Combination of properties Priority Category
965 (none of the above) -2 SOFT
975 if self.runtime_post:
982 myvalue = self.__int__()
983 if myvalue > self.SOFT:
987 class DepPriorityNormalRange(object):
989 DepPriority properties Index Category
993 runtime_post 2 MEDIUM_SOFT
995 (none of the above) 0 NONE
1003 def _ignore_optional(cls, priority):
1004 if priority.__class__ is not DepPriority:
1006 return bool(priority.optional)
1009 def _ignore_runtime_post(cls, priority):
1010 if priority.__class__ is not DepPriority:
1012 return bool(priority.optional or priority.runtime_post)
1015 def _ignore_runtime(cls, priority):
1016 if priority.__class__ is not DepPriority:
1018 return not priority.buildtime
1020 ignore_medium = _ignore_runtime
1021 ignore_medium_soft = _ignore_runtime_post
1022 ignore_soft = _ignore_optional
1024 DepPriorityNormalRange.ignore_priority = (
1026 DepPriorityNormalRange._ignore_optional,
1027 DepPriorityNormalRange._ignore_runtime_post,
1028 DepPriorityNormalRange._ignore_runtime
1031 class DepPrioritySatisfiedRange(object):
1033 DepPriority Index Category
1035 not satisfied and buildtime HARD
1036 not satisfied and runtime 7 MEDIUM
1037 not satisfied and runtime_post 6 MEDIUM_SOFT
1038 satisfied and buildtime and rebuild 5 SOFT
1039 satisfied and buildtime 4 SOFT
1040 satisfied and runtime 3 SOFT
1041 satisfied and runtime_post 2 SOFT
1043 (none of the above) 0 NONE
1051 def _ignore_optional(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional)
1057 def _ignore_satisfied_runtime_post(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 if priority.optional:
1062 if not priority.satisfied:
1064 return bool(priority.runtime_post)
1067 def _ignore_satisfied_runtime(cls, priority):
1068 if priority.__class__ is not DepPriority:
1070 if priority.optional:
1072 if not priority.satisfied:
1074 return not priority.buildtime
1077 def _ignore_satisfied_buildtime(cls, priority):
1078 if priority.__class__ is not DepPriority:
1080 if priority.optional:
1082 if not priority.satisfied:
1084 if priority.buildtime:
1085 return not priority.rebuild
1089 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090 if priority.__class__ is not DepPriority:
1092 if priority.optional:
1094 return bool(priority.satisfied)
1097 def _ignore_runtime_post(cls, priority):
1098 if priority.__class__ is not DepPriority:
1100 return bool(priority.optional or \
1101 priority.satisfied or \
1102 priority.runtime_post)
1105 def _ignore_runtime(cls, priority):
1106 if priority.__class__ is not DepPriority:
1108 return bool(priority.satisfied or \
1109 not priority.buildtime)
1111 ignore_medium = _ignore_runtime
1112 ignore_medium_soft = _ignore_runtime_post
1113 ignore_soft = _ignore_satisfied_buildtime_rebuild
1115 DepPrioritySatisfiedRange.ignore_priority = (
1117 DepPrioritySatisfiedRange._ignore_optional,
1118 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122 DepPrioritySatisfiedRange._ignore_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_runtime
1126 def _find_deep_system_runtime_deps(graph):
1127 deep_system_deps = set()
1130 if not isinstance(node, Package) or \
1131 node.operation == 'uninstall':
1133 if node.root_config.sets['system'].findAtomForPackage(node):
1134 node_stack.append(node)
1136 def ignore_priority(priority):
1138 Ignore non-runtime priorities.
1140 if isinstance(priority, DepPriority) and \
1141 (priority.runtime or priority.runtime_post):
1146 node = node_stack.pop()
1147 if node in deep_system_deps:
1149 deep_system_deps.add(node)
1150 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151 if not isinstance(child, Package) or \
1152 child.operation == 'uninstall':
1154 node_stack.append(child)
1156 return deep_system_deps
1158 class FakeVartree(portage.vartree):
1159 """This is implements an in-memory copy of a vartree instance that provides
1160 all the interfaces required for use by the depgraph. The vardb is locked
1161 during the constructor call just long enough to read a copy of the
1162 installed package information. This allows the depgraph to do it's
1163 dependency calculations without holding a lock on the vardb. It also
1164 allows things like vardb global updates to be done in memory so that the
1165 user doesn't necessarily need write access to the vardb in cases where
1166 global updates are necessary (updates are performed when necessary if there
1167 is not a matching ebuild in the tree)."""
1168 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169 self._root_config = root_config
1170 if pkg_cache is None:
1172 real_vartree = root_config.trees["vartree"]
1173 portdb = root_config.trees["porttree"].dbapi
1174 self.root = real_vartree.root
1175 self.settings = real_vartree.settings
1176 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177 if "_mtime_" not in mykeys:
1178 mykeys.append("_mtime_")
1179 self._db_keys = mykeys
1180 self._pkg_cache = pkg_cache
1181 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184 # At least the parent needs to exist for the lock file.
1185 portage.util.ensure_dirs(vdb_path)
1186 except portage.exception.PortageException:
1190 if acquire_lock and os.access(vdb_path, os.W_OK):
1191 vdb_lock = portage.locks.lockdir(vdb_path)
1192 real_dbapi = real_vartree.dbapi
1194 for cpv in real_dbapi.cpv_all():
1195 cache_key = ("installed", self.root, cpv, "nomerge")
1196 pkg = self._pkg_cache.get(cache_key)
1198 metadata = pkg.metadata
1200 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201 myslot = metadata["SLOT"]
1202 mycp = portage.dep_getkey(cpv)
1203 myslot_atom = "%s:%s" % (mycp, myslot)
1205 mycounter = long(metadata["COUNTER"])
1208 metadata["COUNTER"] = str(mycounter)
1209 other_counter = slot_counters.get(myslot_atom, None)
1210 if other_counter is not None:
1211 if other_counter > mycounter:
1213 slot_counters[myslot_atom] = mycounter
1215 pkg = Package(built=True, cpv=cpv,
1216 installed=True, metadata=metadata,
1217 root_config=root_config, type_name="installed")
1218 self._pkg_cache[pkg] = pkg
1219 self.dbapi.cpv_inject(pkg)
1220 real_dbapi.flush_cache()
1223 portage.locks.unlockdir(vdb_lock)
1224 # Populate the old-style virtuals using the cached values.
1225 if not self.settings.treeVirtuals:
1226 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227 portage.getCPFromCPV, self.get_all_provides())
1229 # Intialize variables needed for lazy cache pulls of the live ebuild
1230 # metadata. This ensures that the vardb lock is released ASAP, without
1231 # being delayed in case cache generation is triggered.
1232 self._aux_get = self.dbapi.aux_get
1233 self.dbapi.aux_get = self._aux_get_wrapper
1234 self._match = self.dbapi.match
1235 self.dbapi.match = self._match_wrapper
1236 self._aux_get_history = set()
1237 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238 self._portdb = portdb
1239 self._global_updates = None
1241 def _match_wrapper(self, cpv, use_cache=1):
1243 Make sure the metadata in Package instances gets updated for any
1244 cpv that is returned from a match() call, since the metadata can
1245 be accessed directly from the Package instance instead of via
1248 matches = self._match(cpv, use_cache=use_cache)
1250 if cpv in self._aux_get_history:
1252 self._aux_get_wrapper(cpv, [])
1255 def _aux_get_wrapper(self, pkg, wants):
1256 if pkg in self._aux_get_history:
1257 return self._aux_get(pkg, wants)
1258 self._aux_get_history.add(pkg)
1260 # Use the live ebuild metadata if possible.
1261 live_metadata = dict(izip(self._portdb_keys,
1262 self._portdb.aux_get(pkg, self._portdb_keys)))
1263 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265 self.dbapi.aux_update(pkg, live_metadata)
1266 except (KeyError, portage.exception.PortageException):
1267 if self._global_updates is None:
1268 self._global_updates = \
1269 grab_global_updates(self._portdb.porttree_root)
1270 perform_global_updates(
1271 pkg, self.dbapi, self._global_updates)
1272 return self._aux_get(pkg, wants)
1274 def sync(self, acquire_lock=1):
1276 Call this method to synchronize state with the real vardb
1277 after one or more packages may have been installed or
1280 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282 # At least the parent needs to exist for the lock file.
1283 portage.util.ensure_dirs(vdb_path)
1284 except portage.exception.PortageException:
1288 if acquire_lock and os.access(vdb_path, os.W_OK):
1289 vdb_lock = portage.locks.lockdir(vdb_path)
1293 portage.locks.unlockdir(vdb_lock)
1297 real_vardb = self._root_config.trees["vartree"].dbapi
1298 current_cpv_set = frozenset(real_vardb.cpv_all())
1299 pkg_vardb = self.dbapi
1300 aux_get_history = self._aux_get_history
1302 # Remove any packages that have been uninstalled.
1303 for pkg in list(pkg_vardb):
1304 if pkg.cpv not in current_cpv_set:
1305 pkg_vardb.cpv_remove(pkg)
1306 aux_get_history.discard(pkg.cpv)
1308 # Validate counters and timestamps.
1311 validation_keys = ["COUNTER", "_mtime_"]
1312 for cpv in current_cpv_set:
1314 pkg_hash_key = ("installed", root, cpv, "nomerge")
1315 pkg = pkg_vardb.get(pkg_hash_key)
1317 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319 counter = long(counter)
1323 if counter != pkg.counter or \
1325 pkg_vardb.cpv_remove(pkg)
1326 aux_get_history.discard(pkg.cpv)
1330 pkg = self._pkg(cpv)
1332 other_counter = slot_counters.get(pkg.slot_atom)
1333 if other_counter is not None:
1334 if other_counter > pkg.counter:
1337 slot_counters[pkg.slot_atom] = pkg.counter
1338 pkg_vardb.cpv_inject(pkg)
1340 real_vardb.flush_cache()
1342 def _pkg(self, cpv):
1343 root_config = self._root_config
1344 real_vardb = root_config.trees["vartree"].dbapi
1345 pkg = Package(cpv=cpv, installed=True,
1346 metadata=izip(self._db_keys,
1347 real_vardb.aux_get(cpv, self._db_keys)),
1348 root_config=root_config,
1349 type_name="installed")
1352 mycounter = long(pkg.metadata["COUNTER"])
1355 pkg.metadata["COUNTER"] = str(mycounter)
1359 def grab_global_updates(portdir):
1360 from portage.update import grab_updates, parse_updates
1361 updpath = os.path.join(portdir, "profiles", "updates")
1363 rawupdates = grab_updates(updpath)
1364 except portage.exception.DirectoryNotFound:
1367 for mykey, mystat, mycontent in rawupdates:
1368 commands, errors = parse_updates(mycontent)
1369 upd_commands.extend(commands)
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373 from portage.update import update_dbentries
1374 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376 updates = update_dbentries(mycommands, aux_dict)
1378 mydb.aux_update(mycpv, updates)
1380 def visible(pkgsettings, pkg):
1382 Check if a package is visible. This can raise an InvalidDependString
1383 exception if LICENSE is invalid.
1384 TODO: optionally generate a list of masking reasons
1386 @returns: True if the package is visible, False otherwise.
1388 if not pkg.metadata["SLOT"]:
1390 if not pkg.installed:
1391 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1393 eapi = pkg.metadata["EAPI"]
1394 if not portage.eapi_is_supported(eapi):
1396 if not pkg.installed:
1397 if portage._eapi_is_deprecated(eapi):
1399 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408 except portage.exception.InvalidDependString:
1412 def get_masking_status(pkg, pkgsettings, root_config):
1414 mreasons = portage.getmaskingstatus(
1415 pkg, settings=pkgsettings,
1416 portdb=root_config.trees["porttree"].dbapi)
1418 if not pkg.installed:
1419 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420 mreasons.append("CHOST: %s" % \
1421 pkg.metadata["CHOST"])
1423 if not pkg.metadata["SLOT"]:
1424 mreasons.append("invalid: SLOT is undefined")
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429 db, pkg_type, built, installed, db_keys):
1432 metadata = dict(izip(db_keys,
1433 db.aux_get(cpv, db_keys)))
1436 if metadata and not built:
1437 pkgsettings.setcpv(cpv, mydb=metadata)
1438 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440 if metadata is None:
1441 mreasons = ["corruption"]
1443 pkg = Package(type_name=pkg_type, root_config=root_config,
1444 cpv=cpv, built=built, installed=installed, metadata=metadata)
1445 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446 return metadata, mreasons
1448 def show_masked_packages(masked_packages):
1449 shown_licenses = set()
1450 shown_comments = set()
1451 # Maybe there is both an ebuild and a binary. Only
1452 # show one of them to avoid redundant appearance.
1454 have_eapi_mask = False
1455 for (root_config, pkgsettings, cpv,
1456 metadata, mreasons) in masked_packages:
1457 if cpv in shown_cpvs:
1460 comment, filename = None, None
1461 if "package.mask" in mreasons:
1462 comment, filename = \
1463 portage.getmaskingreason(
1464 cpv, metadata=metadata,
1465 settings=pkgsettings,
1466 portdb=root_config.trees["porttree"].dbapi,
1467 return_location=True)
1468 missing_licenses = []
1470 if not portage.eapi_is_supported(metadata["EAPI"]):
1471 have_eapi_mask = True
1473 missing_licenses = \
1474 pkgsettings._getMissingLicenses(
1476 except portage.exception.InvalidDependString:
1477 # This will have already been reported
1478 # above via mreasons.
1481 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482 if comment and comment not in shown_comments:
1485 shown_comments.add(comment)
1486 portdb = root_config.trees["porttree"].dbapi
1487 for l in missing_licenses:
1488 l_path = portdb.findLicensePath(l)
1489 if l in shown_licenses:
1491 msg = ("A copy of the '%s' license" + \
1492 " is located at '%s'.") % (l, l_path)
1495 shown_licenses.add(l)
1496 return have_eapi_mask
1498 class Task(SlotObject):
1499 __slots__ = ("_hash_key", "_hash_value")
1501 def _get_hash_key(self):
1502 hash_key = getattr(self, "_hash_key", None)
1503 if hash_key is None:
1504 raise NotImplementedError(self)
1507 def __eq__(self, other):
1508 return self._get_hash_key() == other
1510 def __ne__(self, other):
1511 return self._get_hash_key() != other
1514 hash_value = getattr(self, "_hash_value", None)
1515 if hash_value is None:
1516 self._hash_value = hash(self._get_hash_key())
1517 return self._hash_value
1520 return len(self._get_hash_key())
1522 def __getitem__(self, key):
1523 return self._get_hash_key()[key]
1526 return iter(self._get_hash_key())
1528 def __contains__(self, key):
1529 return key in self._get_hash_key()
1532 return str(self._get_hash_key())
1534 class Blocker(Task):
1536 __hash__ = Task.__hash__
1537 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1539 def __init__(self, **kwargs):
1540 Task.__init__(self, **kwargs)
1541 self.cp = portage.dep_getkey(self.atom)
1543 def _get_hash_key(self):
1544 hash_key = getattr(self, "_hash_key", None)
1545 if hash_key is None:
1547 ("blocks", self.root, self.atom, self.eapi)
1548 return self._hash_key
1550 class Package(Task):
1552 __hash__ = Task.__hash__
1553 __slots__ = ("built", "cpv", "depth",
1554 "installed", "metadata", "onlydeps", "operation",
1555 "root_config", "type_name",
1556 "category", "counter", "cp", "cpv_split",
1557 "inherited", "iuse", "mtime",
1558 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1561 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562 "INHERITED", "IUSE", "KEYWORDS",
1563 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1566 def __init__(self, **kwargs):
1567 Task.__init__(self, **kwargs)
1568 self.root = self.root_config.root
1569 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570 self.cp = portage.cpv_getkey(self.cpv)
1573 # Avoid an InvalidAtom exception when creating slot_atom.
1574 # This package instance will be masked due to empty SLOT.
1576 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1577 self.category, self.pf = portage.catsplit(self.cpv)
1578 self.cpv_split = portage.catpkgsplit(self.cpv)
1579 self.pv_split = self.cpv_split[1:]
1583 __slots__ = ("__weakref__", "enabled")
1585 def __init__(self, use):
1586 self.enabled = frozenset(use)
1588 class _iuse(object):
1590 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1592 def __init__(self, tokens, iuse_implicit):
1593 self.tokens = tuple(tokens)
1594 self.iuse_implicit = iuse_implicit
1601 enabled.append(x[1:])
1603 disabled.append(x[1:])
1606 self.enabled = frozenset(enabled)
1607 self.disabled = frozenset(disabled)
1608 self.all = frozenset(chain(enabled, disabled, other))
1610 def __getattribute__(self, name):
1613 return object.__getattribute__(self, "regex")
1614 except AttributeError:
1615 all = object.__getattribute__(self, "all")
1616 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1617 # Escape anything except ".*" which is supposed
1618 # to pass through from _get_implicit_iuse()
1619 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1620 regex = "^(%s)$" % "|".join(regex)
1621 regex = regex.replace("\\.\\*", ".*")
1622 self.regex = re.compile(regex)
1623 return object.__getattribute__(self, name)
1625 def _get_hash_key(self):
1626 hash_key = getattr(self, "_hash_key", None)
1627 if hash_key is None:
1628 if self.operation is None:
1629 self.operation = "merge"
1630 if self.onlydeps or self.installed:
1631 self.operation = "nomerge"
1633 (self.type_name, self.root, self.cpv, self.operation)
1634 return self._hash_key
1636 def __lt__(self, other):
1637 if other.cp != self.cp:
1639 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1643 def __le__(self, other):
1644 if other.cp != self.cp:
1646 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1650 def __gt__(self, other):
1651 if other.cp != self.cp:
1653 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1657 def __ge__(self, other):
1658 if other.cp != self.cp:
1660 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1664 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1665 if not x.startswith("UNUSED_"))
1666 _all_metadata_keys.discard("CDEPEND")
1667 _all_metadata_keys.update(Package.metadata_keys)
1669 from portage.cache.mappings import slot_dict_class
1670 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1672 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1674 Detect metadata updates and synchronize Package attributes.
1677 __slots__ = ("_pkg",)
1678 _wrapped_keys = frozenset(
1679 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1681 def __init__(self, pkg, metadata):
1682 _PackageMetadataWrapperBase.__init__(self)
1684 self.update(metadata)
1686 def __setitem__(self, k, v):
1687 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1688 if k in self._wrapped_keys:
1689 getattr(self, "_set_" + k.lower())(k, v)
1691 def _set_inherited(self, k, v):
1692 if isinstance(v, basestring):
1693 v = frozenset(v.split())
1694 self._pkg.inherited = v
1696 def _set_iuse(self, k, v):
1697 self._pkg.iuse = self._pkg._iuse(
1698 v.split(), self._pkg.root_config.iuse_implicit)
1700 def _set_slot(self, k, v):
1703 def _set_use(self, k, v):
1704 self._pkg.use = self._pkg._use(v.split())
1706 def _set_counter(self, k, v):
1707 if isinstance(v, basestring):
1712 self._pkg.counter = v
1714 def _set__mtime_(self, k, v):
1715 if isinstance(v, basestring):
1722 class EbuildFetchonly(SlotObject):
1724 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1727 settings = self.settings
1729 portdb = pkg.root_config.trees["porttree"].dbapi
1730 ebuild_path = portdb.findname(pkg.cpv)
1731 settings.setcpv(pkg)
1732 debug = settings.get("PORTAGE_DEBUG") == "1"
1733 use_cache = 1 # always true
1734 portage.doebuild_environment(ebuild_path, "fetch",
1735 settings["ROOT"], settings, debug, use_cache, portdb)
1736 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1739 rval = self._execute_with_builddir()
1741 rval = portage.doebuild(ebuild_path, "fetch",
1742 settings["ROOT"], settings, debug=debug,
1743 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1744 mydbapi=portdb, tree="porttree")
1746 if rval != os.EX_OK:
1747 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1748 eerror(msg, phase="unpack", key=pkg.cpv)
1752 def _execute_with_builddir(self):
1753 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1754 # ensuring sane $PWD (bug #239560) and storing elog
1755 # messages. Use a private temp directory, in order
1756 # to avoid locking the main one.
1757 settings = self.settings
1758 global_tmpdir = settings["PORTAGE_TMPDIR"]
1759 from tempfile import mkdtemp
1761 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1763 if e.errno != portage.exception.PermissionDenied.errno:
1765 raise portage.exception.PermissionDenied(global_tmpdir)
1766 settings["PORTAGE_TMPDIR"] = private_tmpdir
1767 settings.backup_changes("PORTAGE_TMPDIR")
1769 retval = self._execute()
1771 settings["PORTAGE_TMPDIR"] = global_tmpdir
1772 settings.backup_changes("PORTAGE_TMPDIR")
1773 shutil.rmtree(private_tmpdir)
1777 settings = self.settings
1779 root_config = pkg.root_config
1780 portdb = root_config.trees["porttree"].dbapi
1781 ebuild_path = portdb.findname(pkg.cpv)
1782 debug = settings.get("PORTAGE_DEBUG") == "1"
1783 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1785 retval = portage.doebuild(ebuild_path, "fetch",
1786 self.settings["ROOT"], self.settings, debug=debug,
1787 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1788 mydbapi=portdb, tree="porttree")
1790 if retval != os.EX_OK:
1791 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1792 eerror(msg, phase="unpack", key=pkg.cpv)
1794 portage.elog.elog_process(self.pkg.cpv, self.settings)
1797 class PollConstants(object):
1800 Provides POLL* constants that are equivalent to those from the
1801 select module, for use by PollSelectAdapter.
1804 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1807 locals()[k] = getattr(select, k, v)
1811 class AsynchronousTask(SlotObject):
1813 Subclasses override _wait() and _poll() so that calls
1814 to public methods can be wrapped for implementing
1815 hooks such as exit listener notification.
1817 Sublasses should call self.wait() to notify exit listeners after
1818 the task is complete and self.returncode has been set.
1821 __slots__ = ("background", "cancelled", "returncode") + \
1822 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1826 Start an asynchronous task and then return as soon as possible.
1832 raise NotImplementedError(self)
1835 return self.returncode is None
1842 return self.returncode
1845 if self.returncode is None:
1848 return self.returncode
1851 return self.returncode
1854 self.cancelled = True
1857 def addStartListener(self, f):
1859 The function will be called with one argument, a reference to self.
1861 if self._start_listeners is None:
1862 self._start_listeners = []
1863 self._start_listeners.append(f)
1865 def removeStartListener(self, f):
1866 if self._start_listeners is None:
1868 self._start_listeners.remove(f)
1870 def _start_hook(self):
1871 if self._start_listeners is not None:
1872 start_listeners = self._start_listeners
1873 self._start_listeners = None
1875 for f in start_listeners:
1878 def addExitListener(self, f):
1880 The function will be called with one argument, a reference to self.
1882 if self._exit_listeners is None:
1883 self._exit_listeners = []
1884 self._exit_listeners.append(f)
1886 def removeExitListener(self, f):
1887 if self._exit_listeners is None:
1888 if self._exit_listener_stack is not None:
1889 self._exit_listener_stack.remove(f)
1891 self._exit_listeners.remove(f)
1893 def _wait_hook(self):
1895 Call this method after the task completes, just before returning
1896 the returncode from wait() or poll(). This hook is
1897 used to trigger exit listeners when the returncode first
1900 if self.returncode is not None and \
1901 self._exit_listeners is not None:
1903 # This prevents recursion, in case one of the
1904 # exit handlers triggers this method again by
1905 # calling wait(). Use a stack that gives
1906 # removeExitListener() an opportunity to consume
1907 # listeners from the stack, before they can get
1908 # called below. This is necessary because a call
1909 # to one exit listener may result in a call to
1910 # removeExitListener() for another listener on
1911 # the stack. That listener needs to be removed
1912 # from the stack since it would be inconsistent
1913 # to call it after it has been been passed into
1914 # removeExitListener().
1915 self._exit_listener_stack = self._exit_listeners
1916 self._exit_listeners = None
1918 self._exit_listener_stack.reverse()
1919 while self._exit_listener_stack:
1920 self._exit_listener_stack.pop()(self)
1922 class AbstractPollTask(AsynchronousTask):
1924 __slots__ = ("scheduler",) + \
1928 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1929 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1932 def _unregister(self):
1933 raise NotImplementedError(self)
1935 def _unregister_if_appropriate(self, event):
1936 if self._registered:
1937 if event & self._exceptional_events:
1940 elif event & PollConstants.POLLHUP:
1944 class PipeReader(AbstractPollTask):
1947 Reads output from one or more files and saves it in memory,
1948 for retrieval via the getvalue() method. This is driven by
1949 the scheduler's poll() loop, so it runs entirely within the
1953 __slots__ = ("input_files",) + \
1954 ("_read_data", "_reg_ids")
1957 self._reg_ids = set()
1958 self._read_data = []
1959 for k, f in self.input_files.iteritems():
1960 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1961 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1962 self._reg_ids.add(self.scheduler.register(f.fileno(),
1963 self._registered_events, self._output_handler))
1964 self._registered = True
1967 return self._registered
1970 if self.returncode is None:
1972 self.cancelled = True
1976 if self.returncode is not None:
1977 return self.returncode
1979 if self._registered:
1980 self.scheduler.schedule(self._reg_ids)
1983 self.returncode = os.EX_OK
1984 return self.returncode
1987 """Retrieve the entire contents"""
1988 if sys.hexversion >= 0x3000000:
1989 return bytes().join(self._read_data)
1990 return "".join(self._read_data)
1993 """Free the memory buffer."""
1994 self._read_data = None
1996 def _output_handler(self, fd, event):
1998 if event & PollConstants.POLLIN:
2000 for f in self.input_files.itervalues():
2001 if fd == f.fileno():
2004 buf = array.array('B')
2006 buf.fromfile(f, self._bufsize)
2011 self._read_data.append(buf.tostring())
2016 self._unregister_if_appropriate(event)
2017 return self._registered
2019 def _unregister(self):
2021 Unregister from the scheduler and close open files.
2024 self._registered = False
2026 if self._reg_ids is not None:
2027 for reg_id in self._reg_ids:
2028 self.scheduler.unregister(reg_id)
2029 self._reg_ids = None
2031 if self.input_files is not None:
2032 for f in self.input_files.itervalues():
2034 self.input_files = None
2036 class CompositeTask(AsynchronousTask):
2038 __slots__ = ("scheduler",) + ("_current_task",)
2041 return self._current_task is not None
2044 self.cancelled = True
2045 if self._current_task is not None:
2046 self._current_task.cancel()
2050 This does a loop calling self._current_task.poll()
2051 repeatedly as long as the value of self._current_task
2052 keeps changing. It calls poll() a maximum of one time
2053 for a given self._current_task instance. This is useful
2054 since calling poll() on a task can trigger advance to
2055 the next task could eventually lead to the returncode
2056 being set in cases when polling only a single task would
2057 not have the same effect.
2062 task = self._current_task
2063 if task is None or task is prev:
2064 # don't poll the same task more than once
2069 return self.returncode
2075 task = self._current_task
2077 # don't wait for the same task more than once
2080 # Before the task.wait() method returned, an exit
2081 # listener should have set self._current_task to either
2082 # a different task or None. Something is wrong.
2083 raise AssertionError("self._current_task has not " + \
2084 "changed since calling wait", self, task)
2088 return self.returncode
2090 def _assert_current(self, task):
2092 Raises an AssertionError if the given task is not the
2093 same one as self._current_task. This can be useful
2096 if task is not self._current_task:
2097 raise AssertionError("Unrecognized task: %s" % (task,))
2099 def _default_exit(self, task):
2101 Calls _assert_current() on the given task and then sets the
2102 composite returncode attribute if task.returncode != os.EX_OK.
2103 If the task failed then self._current_task will be set to None.
2104 Subclasses can use this as a generic task exit callback.
2107 @returns: The task.returncode attribute.
2109 self._assert_current(task)
2110 if task.returncode != os.EX_OK:
2111 self.returncode = task.returncode
2112 self._current_task = None
2113 return task.returncode
2115 def _final_exit(self, task):
2117 Assumes that task is the final task of this composite task.
2118 Calls _default_exit() and sets self.returncode to the task's
2119 returncode and sets self._current_task to None.
2121 self._default_exit(task)
2122 self._current_task = None
2123 self.returncode = task.returncode
2124 return self.returncode
2126 def _default_final_exit(self, task):
2128 This calls _final_exit() and then wait().
2130 Subclasses can use this as a generic final task exit callback.
2133 self._final_exit(task)
2136 def _start_task(self, task, exit_handler):
2138 Register exit handler for the given task, set it
2139 as self._current_task, and call task.start().
2141 Subclasses can use this as a generic way to start
2145 task.addExitListener(exit_handler)
2146 self._current_task = task
2149 class TaskSequence(CompositeTask):
2151 A collection of tasks that executes sequentially. Each task
2152 must have a addExitListener() method that can be used as
2153 a means to trigger movement from one task to the next.
2156 __slots__ = ("_task_queue",)
2158 def __init__(self, **kwargs):
2159 AsynchronousTask.__init__(self, **kwargs)
2160 self._task_queue = deque()
2162 def add(self, task):
2163 self._task_queue.append(task)
2166 self._start_next_task()
2169 self._task_queue.clear()
2170 CompositeTask.cancel(self)
2172 def _start_next_task(self):
2173 self._start_task(self._task_queue.popleft(),
2174 self._task_exit_handler)
2176 def _task_exit_handler(self, task):
2177 if self._default_exit(task) != os.EX_OK:
2179 elif self._task_queue:
2180 self._start_next_task()
2182 self._final_exit(task)
2185 class SubProcess(AbstractPollTask):
2187 __slots__ = ("pid",) + \
2188 ("_files", "_reg_id")
2190 # A file descriptor is required for the scheduler to monitor changes from
2191 # inside a poll() loop. When logging is not enabled, create a pipe just to
2192 # serve this purpose alone.
2196 if self.returncode is not None:
2197 return self.returncode
2198 if self.pid is None:
2199 return self.returncode
2200 if self._registered:
2201 return self.returncode
2204 retval = os.waitpid(self.pid, os.WNOHANG)
2206 if e.errno != errno.ECHILD:
2209 retval = (self.pid, 1)
2211 if retval == (0, 0):
2213 self._set_returncode(retval)
2214 return self.returncode
2219 os.kill(self.pid, signal.SIGTERM)
2221 if e.errno != errno.ESRCH:
2225 self.cancelled = True
2226 if self.pid is not None:
2228 return self.returncode
2231 return self.pid is not None and \
2232 self.returncode is None
2236 if self.returncode is not None:
2237 return self.returncode
2239 if self._registered:
2240 self.scheduler.schedule(self._reg_id)
2242 if self.returncode is not None:
2243 return self.returncode
2246 wait_retval = os.waitpid(self.pid, 0)
2248 if e.errno != errno.ECHILD:
2251 self._set_returncode((self.pid, 1))
2253 self._set_returncode(wait_retval)
2255 return self.returncode
2257 def _unregister(self):
2259 Unregister from the scheduler and close open files.
2262 self._registered = False
2264 if self._reg_id is not None:
2265 self.scheduler.unregister(self._reg_id)
2268 if self._files is not None:
2269 for f in self._files.itervalues():
2273 def _set_returncode(self, wait_retval):
2275 retval = wait_retval[1]
2277 if retval != os.EX_OK:
2279 retval = (retval & 0xff) << 8
2281 retval = retval >> 8
2283 self.returncode = retval
2285 class SpawnProcess(SubProcess):
2288 Constructor keyword args are passed into portage.process.spawn().
2289 The required "args" keyword argument will be passed as the first
2293 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2294 "uid", "gid", "groups", "umask", "logfile",
2295 "path_lookup", "pre_exec")
2297 __slots__ = ("args",) + \
2300 _file_names = ("log", "process", "stdout")
2301 _files_dict = slot_dict_class(_file_names, prefix="")
2308 if self.fd_pipes is None:
2310 fd_pipes = self.fd_pipes
2311 fd_pipes.setdefault(0, sys.stdin.fileno())
2312 fd_pipes.setdefault(1, sys.stdout.fileno())
2313 fd_pipes.setdefault(2, sys.stderr.fileno())
2315 # flush any pending output
2316 for fd in fd_pipes.itervalues():
2317 if fd == sys.stdout.fileno():
2319 if fd == sys.stderr.fileno():
2322 logfile = self.logfile
2323 self._files = self._files_dict()
2326 master_fd, slave_fd = self._pipe(fd_pipes)
2327 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2328 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2331 fd_pipes_orig = fd_pipes.copy()
2333 # TODO: Use job control functions like tcsetpgrp() to control
2334 # access to stdin. Until then, use /dev/null so that any
2335 # attempts to read from stdin will immediately return EOF
2336 # instead of blocking indefinitely.
2337 null_input = open('/dev/null', 'rb')
2338 fd_pipes[0] = null_input.fileno()
2340 fd_pipes[0] = fd_pipes_orig[0]
2342 files.process = os.fdopen(master_fd, 'rb')
2343 if logfile is not None:
2345 fd_pipes[1] = slave_fd
2346 fd_pipes[2] = slave_fd
2348 files.log = open(logfile, mode='ab')
2349 portage.util.apply_secpass_permissions(logfile,
2350 uid=portage.portage_uid, gid=portage.portage_gid,
2353 if not self.background:
2354 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2356 output_handler = self._output_handler
2360 # Create a dummy pipe so the scheduler can monitor
2361 # the process from inside a poll() loop.
2362 fd_pipes[self._dummy_pipe_fd] = slave_fd
2364 fd_pipes[1] = slave_fd
2365 fd_pipes[2] = slave_fd
2366 output_handler = self._dummy_handler
2369 for k in self._spawn_kwarg_names:
2370 v = getattr(self, k)
2374 kwargs["fd_pipes"] = fd_pipes
2375 kwargs["returnpid"] = True
2376 kwargs.pop("logfile", None)
2378 self._reg_id = self.scheduler.register(files.process.fileno(),
2379 self._registered_events, output_handler)
2380 self._registered = True
2382 retval = self._spawn(self.args, **kwargs)
2385 if null_input is not None:
2388 if isinstance(retval, int):
2391 self.returncode = retval
2395 self.pid = retval[0]
2396 portage.process.spawned_pids.remove(self.pid)
2398 def _pipe(self, fd_pipes):
2400 @type fd_pipes: dict
2401 @param fd_pipes: pipes from which to copy terminal size if desired.
2405 def _spawn(self, args, **kwargs):
2406 return portage.process.spawn(args, **kwargs)
2408 def _output_handler(self, fd, event):
2410 if event & PollConstants.POLLIN:
2413 buf = array.array('B')
2415 buf.fromfile(files.process, self._bufsize)
2420 if not self.background:
2421 buf.tofile(files.stdout)
2422 files.stdout.flush()
2423 buf.tofile(files.log)
2429 self._unregister_if_appropriate(event)
2430 return self._registered
2432 def _dummy_handler(self, fd, event):
2434 This method is mainly interested in detecting EOF, since
2435 the only purpose of the pipe is to allow the scheduler to
2436 monitor the process from inside a poll() loop.
2439 if event & PollConstants.POLLIN:
2441 buf = array.array('B')
2443 buf.fromfile(self._files.process, self._bufsize)
2453 self._unregister_if_appropriate(event)
2454 return self._registered
2456 class MiscFunctionsProcess(SpawnProcess):
2458 Spawns misc-functions.sh with an existing ebuild environment.
2461 __slots__ = ("commands", "phase", "pkg", "settings")
2464 settings = self.settings
2465 settings.pop("EBUILD_PHASE", None)
2466 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2467 misc_sh_binary = os.path.join(portage_bin_path,
2468 os.path.basename(portage.const.MISC_SH_BINARY))
2470 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2471 self.logfile = settings.get("PORTAGE_LOG_FILE")
2473 portage._doebuild_exit_status_unlink(
2474 settings.get("EBUILD_EXIT_STATUS_FILE"))
2476 SpawnProcess._start(self)
2478 def _spawn(self, args, **kwargs):
2479 settings = self.settings
2480 debug = settings.get("PORTAGE_DEBUG") == "1"
2481 return portage.spawn(" ".join(args), settings,
2482 debug=debug, **kwargs)
2484 def _set_returncode(self, wait_retval):
2485 SpawnProcess._set_returncode(self, wait_retval)
2486 self.returncode = portage._doebuild_exit_status_check_and_log(
2487 self.settings, self.phase, self.returncode)
2489 class EbuildFetcher(SpawnProcess):
2491 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2496 root_config = self.pkg.root_config
2497 portdb = root_config.trees["porttree"].dbapi
2498 ebuild_path = portdb.findname(self.pkg.cpv)
2499 settings = self.config_pool.allocate()
2500 settings.setcpv(self.pkg)
2502 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2503 # should not be touched since otherwise it could interfere with
2504 # another instance of the same cpv concurrently being built for a
2505 # different $ROOT (currently, builds only cooperate with prefetchers
2506 # that are spawned for the same $ROOT).
2507 if not self.prefetch:
2508 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2509 self._build_dir.lock()
2510 self._build_dir.clean()
2511 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2512 if self.logfile is None:
2513 self.logfile = settings.get("PORTAGE_LOG_FILE")
2519 # If any incremental variables have been overridden
2520 # via the environment, those values need to be passed
2521 # along here so that they are correctly considered by
2522 # the config instance in the subproccess.
2523 fetch_env = os.environ.copy()
2525 nocolor = settings.get("NOCOLOR")
2526 if nocolor is not None:
2527 fetch_env["NOCOLOR"] = nocolor
2529 fetch_env["PORTAGE_NICENESS"] = "0"
2531 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2533 ebuild_binary = os.path.join(
2534 settings["PORTAGE_BIN_PATH"], "ebuild")
2536 fetch_args = [ebuild_binary, ebuild_path, phase]
2537 debug = settings.get("PORTAGE_DEBUG") == "1"
2539 fetch_args.append("--debug")
2541 self.args = fetch_args
2542 self.env = fetch_env
2543 SpawnProcess._start(self)
2545 def _pipe(self, fd_pipes):
2546 """When appropriate, use a pty so that fetcher progress bars,
2547 like wget has, will work properly."""
2548 if self.background or not sys.stdout.isatty():
2549 # When the output only goes to a log file,
2550 # there's no point in creating a pty.
2552 stdout_pipe = fd_pipes.get(1)
2553 got_pty, master_fd, slave_fd = \
2554 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2555 return (master_fd, slave_fd)
2557 def _set_returncode(self, wait_retval):
2558 SpawnProcess._set_returncode(self, wait_retval)
2559 # Collect elog messages that might have been
2560 # created by the pkg_nofetch phase.
2561 if self._build_dir is not None:
2562 # Skip elog messages for prefetch, in order to avoid duplicates.
2563 if not self.prefetch and self.returncode != os.EX_OK:
2565 if self.logfile is not None:
2567 elog_out = open(self.logfile, 'a')
2568 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2569 if self.logfile is not None:
2570 msg += ", Log file:"
2571 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2572 if self.logfile is not None:
2573 eerror(" '%s'" % (self.logfile,),
2574 phase="unpack", key=self.pkg.cpv, out=elog_out)
2575 if elog_out is not None:
2577 if not self.prefetch:
2578 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2579 features = self._build_dir.settings.features
2580 if self.returncode == os.EX_OK:
2581 self._build_dir.clean()
2582 self._build_dir.unlock()
2583 self.config_pool.deallocate(self._build_dir.settings)
2584 self._build_dir = None
2586 class EbuildBuildDir(SlotObject):
2588 __slots__ = ("dir_path", "pkg", "settings",
2589 "locked", "_catdir", "_lock_obj")
2591 def __init__(self, **kwargs):
2592 SlotObject.__init__(self, **kwargs)
2597 This raises an AlreadyLocked exception if lock() is called
2598 while a lock is already held. In order to avoid this, call
2599 unlock() or check whether the "locked" attribute is True
2600 or False before calling lock().
2602 if self._lock_obj is not None:
2603 raise self.AlreadyLocked((self._lock_obj,))
2605 dir_path = self.dir_path
2606 if dir_path is None:
2607 root_config = self.pkg.root_config
2608 portdb = root_config.trees["porttree"].dbapi
2609 ebuild_path = portdb.findname(self.pkg.cpv)
2610 settings = self.settings
2611 settings.setcpv(self.pkg)
2612 debug = settings.get("PORTAGE_DEBUG") == "1"
2613 use_cache = 1 # always true
2614 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2615 self.settings, debug, use_cache, portdb)
2616 dir_path = self.settings["PORTAGE_BUILDDIR"]
2618 catdir = os.path.dirname(dir_path)
2619 self._catdir = catdir
2621 portage.util.ensure_dirs(os.path.dirname(catdir),
2622 gid=portage.portage_gid,
2626 catdir_lock = portage.locks.lockdir(catdir)
2627 portage.util.ensure_dirs(catdir,
2628 gid=portage.portage_gid,
2630 self._lock_obj = portage.locks.lockdir(dir_path)
2632 self.locked = self._lock_obj is not None
2633 if catdir_lock is not None:
2634 portage.locks.unlockdir(catdir_lock)
2637 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2638 by keepwork or keeptemp in FEATURES."""
2639 settings = self.settings
2640 features = settings.features
2641 if not ("keepwork" in features or "keeptemp" in features):
2643 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2644 except EnvironmentError, e:
2645 if e.errno != errno.ENOENT:
2650 if self._lock_obj is None:
2653 portage.locks.unlockdir(self._lock_obj)
2654 self._lock_obj = None
2657 catdir = self._catdir
2660 catdir_lock = portage.locks.lockdir(catdir)
2666 if e.errno not in (errno.ENOENT,
2667 errno.ENOTEMPTY, errno.EEXIST):
2670 portage.locks.unlockdir(catdir_lock)
2672 class AlreadyLocked(portage.exception.PortageException):
2675 class EbuildBuild(CompositeTask):
2677 __slots__ = ("args_set", "config_pool", "find_blockers",
2678 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2679 "prefetcher", "settings", "world_atom") + \
2680 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2684 logger = self.logger
2687 settings = self.settings
2688 world_atom = self.world_atom
2689 root_config = pkg.root_config
2692 portdb = root_config.trees[tree].dbapi
2693 settings.setcpv(pkg)
2694 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2695 ebuild_path = portdb.findname(self.pkg.cpv)
2696 self._ebuild_path = ebuild_path
2698 prefetcher = self.prefetcher
2699 if prefetcher is None:
2701 elif not prefetcher.isAlive():
2703 elif prefetcher.poll() is None:
2705 waiting_msg = "Fetching files " + \
2706 "in the background. " + \
2707 "To view fetch progress, run `tail -f " + \
2708 "/var/log/emerge-fetch.log` in another " + \
2710 msg_prefix = colorize("GOOD", " * ")
2711 from textwrap import wrap
2712 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2713 for line in wrap(waiting_msg, 65))
2714 if not self.background:
2715 writemsg(waiting_msg, noiselevel=-1)
2717 self._current_task = prefetcher
2718 prefetcher.addExitListener(self._prefetch_exit)
2721 self._prefetch_exit(prefetcher)
2723 def _prefetch_exit(self, prefetcher):
2727 settings = self.settings
2730 fetcher = EbuildFetchonly(
2731 fetch_all=opts.fetch_all_uri,
2732 pkg=pkg, pretend=opts.pretend,
2734 retval = fetcher.execute()
2735 self.returncode = retval
2739 fetcher = EbuildFetcher(config_pool=self.config_pool,
2740 fetchall=opts.fetch_all_uri,
2741 fetchonly=opts.fetchonly,
2742 background=self.background,
2743 pkg=pkg, scheduler=self.scheduler)
2745 self._start_task(fetcher, self._fetch_exit)
2747 def _fetch_exit(self, fetcher):
2751 fetch_failed = False
2753 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2755 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2757 if fetch_failed and fetcher.logfile is not None and \
2758 os.path.exists(fetcher.logfile):
2759 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2761 if not fetch_failed and fetcher.logfile is not None:
2762 # Fetch was successful, so remove the fetch log.
2764 os.unlink(fetcher.logfile)
2768 if fetch_failed or opts.fetchonly:
2772 logger = self.logger
2774 pkg_count = self.pkg_count
2775 scheduler = self.scheduler
2776 settings = self.settings
2777 features = settings.features
2778 ebuild_path = self._ebuild_path
2779 system_set = pkg.root_config.sets["system"]
2781 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2782 self._build_dir.lock()
2784 # Cleaning is triggered before the setup
2785 # phase, in portage.doebuild().
2786 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2787 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2788 short_msg = "emerge: (%s of %s) %s Clean" % \
2789 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2790 logger.log(msg, short_msg=short_msg)
2792 #buildsyspkg: Check if we need to _force_ binary package creation
2793 self._issyspkg = "buildsyspkg" in features and \
2794 system_set.findAtomForPackage(pkg) and \
2797 if opts.buildpkg or self._issyspkg:
2799 self._buildpkg = True
2801 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2802 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2803 short_msg = "emerge: (%s of %s) %s Compile" % \
2804 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2805 logger.log(msg, short_msg=short_msg)
2808 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2809 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2810 short_msg = "emerge: (%s of %s) %s Compile" % \
2811 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2812 logger.log(msg, short_msg=short_msg)
2814 build = EbuildExecuter(background=self.background, pkg=pkg,
2815 scheduler=scheduler, settings=settings)
2816 self._start_task(build, self._build_exit)
2818 def _unlock_builddir(self):
2819 portage.elog.elog_process(self.pkg.cpv, self.settings)
2820 self._build_dir.unlock()
2822 def _build_exit(self, build):
2823 if self._default_exit(build) != os.EX_OK:
2824 self._unlock_builddir()
2829 buildpkg = self._buildpkg
2832 self._final_exit(build)
2837 msg = ">>> This is a system package, " + \
2838 "let's pack a rescue tarball.\n"
2840 log_path = self.settings.get("PORTAGE_LOG_FILE")
2841 if log_path is not None:
2842 log_file = open(log_path, 'a')
2848 if not self.background:
2849 portage.writemsg_stdout(msg, noiselevel=-1)
2851 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2852 scheduler=self.scheduler, settings=self.settings)
2854 self._start_task(packager, self._buildpkg_exit)
2856 def _buildpkg_exit(self, packager):
2858 Released build dir lock when there is a failure or
2859 when in buildpkgonly mode. Otherwise, the lock will
2860 be released when merge() is called.
2863 if self._default_exit(packager) != os.EX_OK:
2864 self._unlock_builddir()
2868 if self.opts.buildpkgonly:
2869 # Need to call "clean" phase for buildpkgonly mode
2870 portage.elog.elog_process(self.pkg.cpv, self.settings)
2872 clean_phase = EbuildPhase(background=self.background,
2873 pkg=self.pkg, phase=phase,
2874 scheduler=self.scheduler, settings=self.settings,
2876 self._start_task(clean_phase, self._clean_exit)
2879 # Continue holding the builddir lock until
2880 # after the package has been installed.
2881 self._current_task = None
2882 self.returncode = packager.returncode
2885 def _clean_exit(self, clean_phase):
2886 if self._final_exit(clean_phase) != os.EX_OK or \
2887 self.opts.buildpkgonly:
2888 self._unlock_builddir()
2893 Install the package and then clean up and release locks.
2894 Only call this after the build has completed successfully
2895 and neither fetchonly nor buildpkgonly mode are enabled.
2898 find_blockers = self.find_blockers
2899 ldpath_mtimes = self.ldpath_mtimes
2900 logger = self.logger
2902 pkg_count = self.pkg_count
2903 settings = self.settings
2904 world_atom = self.world_atom
2905 ebuild_path = self._ebuild_path
2908 merge = EbuildMerge(find_blockers=self.find_blockers,
2909 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2910 pkg_count=pkg_count, pkg_path=ebuild_path,
2911 scheduler=self.scheduler,
2912 settings=settings, tree=tree, world_atom=world_atom)
2914 msg = " === (%s of %s) Merging (%s::%s)" % \
2915 (pkg_count.curval, pkg_count.maxval,
2916 pkg.cpv, ebuild_path)
2917 short_msg = "emerge: (%s of %s) %s Merge" % \
2918 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2919 logger.log(msg, short_msg=short_msg)
2922 rval = merge.execute()
2924 self._unlock_builddir()
2928 class EbuildExecuter(CompositeTask):
2930 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2932 _phases = ("prepare", "configure", "compile", "test", "install")
2934 _live_eclasses = frozenset([
2944 self._tree = "porttree"
2947 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2948 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2949 self._start_task(clean_phase, self._clean_phase_exit)
2951 def _clean_phase_exit(self, clean_phase):
2953 if self._default_exit(clean_phase) != os.EX_OK:
2958 scheduler = self.scheduler
2959 settings = self.settings
2962 # This initializes PORTAGE_LOG_FILE.
2963 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2965 setup_phase = EbuildPhase(background=self.background,
2966 pkg=pkg, phase="setup", scheduler=scheduler,
2967 settings=settings, tree=self._tree)
2969 setup_phase.addExitListener(self._setup_exit)
2970 self._current_task = setup_phase
2971 self.scheduler.scheduleSetup(setup_phase)
2973 def _setup_exit(self, setup_phase):
2975 if self._default_exit(setup_phase) != os.EX_OK:
2979 unpack_phase = EbuildPhase(background=self.background,
2980 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2981 settings=self.settings, tree=self._tree)
2983 if self._live_eclasses.intersection(self.pkg.inherited):
2984 # Serialize $DISTDIR access for live ebuilds since
2985 # otherwise they can interfere with eachother.
2987 unpack_phase.addExitListener(self._unpack_exit)
2988 self._current_task = unpack_phase
2989 self.scheduler.scheduleUnpack(unpack_phase)
2992 self._start_task(unpack_phase, self._unpack_exit)
2994 def _unpack_exit(self, unpack_phase):
2996 if self._default_exit(unpack_phase) != os.EX_OK:
3000 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3003 phases = self._phases
3004 eapi = pkg.metadata["EAPI"]
3005 if eapi in ("0", "1"):
3006 # skip src_prepare and src_configure
3009 for phase in phases:
3010 ebuild_phases.add(EbuildPhase(background=self.background,
3011 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3012 settings=self.settings, tree=self._tree))
3014 self._start_task(ebuild_phases, self._default_final_exit)
3016 class EbuildMetadataPhase(SubProcess):
3019 Asynchronous interface for the ebuild "depend" phase which is
3020 used to extract metadata from the ebuild.
3023 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3024 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3027 _file_names = ("ebuild",)
3028 _files_dict = slot_dict_class(_file_names, prefix="")
3032 settings = self.settings
3034 ebuild_path = self.ebuild_path
3035 debug = settings.get("PORTAGE_DEBUG") == "1"
3039 if self.fd_pipes is not None:
3040 fd_pipes = self.fd_pipes.copy()
3044 fd_pipes.setdefault(0, sys.stdin.fileno())
3045 fd_pipes.setdefault(1, sys.stdout.fileno())
3046 fd_pipes.setdefault(2, sys.stderr.fileno())
3048 # flush any pending output
3049 for fd in fd_pipes.itervalues():
3050 if fd == sys.stdout.fileno():
3052 if fd == sys.stderr.fileno():
3055 fd_pipes_orig = fd_pipes.copy()
3056 self._files = self._files_dict()
3059 master_fd, slave_fd = os.pipe()
3060 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3061 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3063 fd_pipes[self._metadata_fd] = slave_fd
3065 self._raw_metadata = []
3066 files.ebuild = os.fdopen(master_fd, 'r')
3067 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3068 self._registered_events, self._output_handler)
3069 self._registered = True
3071 retval = portage.doebuild(ebuild_path, "depend",
3072 settings["ROOT"], settings, debug,
3073 mydbapi=self.portdb, tree="porttree",
3074 fd_pipes=fd_pipes, returnpid=True)
3078 if isinstance(retval, int):
3079 # doebuild failed before spawning
3081 self.returncode = retval
3085 self.pid = retval[0]
3086 portage.process.spawned_pids.remove(self.pid)
3088 def _output_handler(self, fd, event):
3090 if event & PollConstants.POLLIN:
3091 self._raw_metadata.append(self._files.ebuild.read())
3092 if not self._raw_metadata[-1]:
3096 self._unregister_if_appropriate(event)
3097 return self._registered
3099 def _set_returncode(self, wait_retval):
3100 SubProcess._set_returncode(self, wait_retval)
3101 if self.returncode == os.EX_OK:
3102 metadata_lines = "".join(self._raw_metadata).splitlines()
3103 if len(portage.auxdbkeys) != len(metadata_lines):
3104 # Don't trust bash's returncode if the
3105 # number of lines is incorrect.
3108 metadata = izip(portage.auxdbkeys, metadata_lines)
3109 self.metadata_callback(self.cpv, self.ebuild_path,
3110 self.repo_path, metadata, self.ebuild_mtime)
3112 class EbuildProcess(SpawnProcess):
3114 __slots__ = ("phase", "pkg", "settings", "tree")
3117 # Don't open the log file during the clean phase since the
3118 # open file can result in an nfs lock on $T/build.log which
3119 # prevents the clean phase from removing $T.
3120 if self.phase not in ("clean", "cleanrm"):
3121 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3122 SpawnProcess._start(self)
3124 def _pipe(self, fd_pipes):
3125 stdout_pipe = fd_pipes.get(1)
3126 got_pty, master_fd, slave_fd = \
3127 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3128 return (master_fd, slave_fd)
3130 def _spawn(self, args, **kwargs):
3132 root_config = self.pkg.root_config
3134 mydbapi = root_config.trees[tree].dbapi
3135 settings = self.settings
3136 ebuild_path = settings["EBUILD"]
3137 debug = settings.get("PORTAGE_DEBUG") == "1"
3139 rval = portage.doebuild(ebuild_path, self.phase,
3140 root_config.root, settings, debug,
3141 mydbapi=mydbapi, tree=tree, **kwargs)
3145 def _set_returncode(self, wait_retval):
3146 SpawnProcess._set_returncode(self, wait_retval)
3148 if self.phase not in ("clean", "cleanrm"):
3149 self.returncode = portage._doebuild_exit_status_check_and_log(
3150 self.settings, self.phase, self.returncode)
3152 if self.phase == "test" and self.returncode != os.EX_OK and \
3153 "test-fail-continue" in self.settings.features:
3154 self.returncode = os.EX_OK
3156 portage._post_phase_userpriv_perms(self.settings)
3158 class EbuildPhase(CompositeTask):
3160 __slots__ = ("background", "pkg", "phase",
3161 "scheduler", "settings", "tree")
3163 _post_phase_cmds = portage._post_phase_cmds
3167 ebuild_process = EbuildProcess(background=self.background,
3168 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3169 settings=self.settings, tree=self.tree)
3171 self._start_task(ebuild_process, self._ebuild_exit)
3173 def _ebuild_exit(self, ebuild_process):
3175 if self.phase == "install":
3177 log_path = self.settings.get("PORTAGE_LOG_FILE")
3179 if self.background and log_path is not None:
3180 log_file = open(log_path, 'a')
3183 portage._check_build_log(self.settings, out=out)
3185 if log_file is not None:
3188 if self._default_exit(ebuild_process) != os.EX_OK:
3192 settings = self.settings
3194 if self.phase == "install":
3195 portage._post_src_install_uid_fix(settings)
3197 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3198 if post_phase_cmds is not None:
3199 post_phase = MiscFunctionsProcess(background=self.background,
3200 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3201 scheduler=self.scheduler, settings=settings)
3202 self._start_task(post_phase, self._post_phase_exit)
3205 self.returncode = ebuild_process.returncode
3206 self._current_task = None
3209 def _post_phase_exit(self, post_phase):
3210 if self._final_exit(post_phase) != os.EX_OK:
3211 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3213 self._current_task = None
3217 class EbuildBinpkg(EbuildProcess):
3219 This assumes that src_install() has successfully completed.
3221 __slots__ = ("_binpkg_tmpfile",)
3224 self.phase = "package"
3225 self.tree = "porttree"
3227 root_config = pkg.root_config
3228 portdb = root_config.trees["porttree"].dbapi
3229 bintree = root_config.trees["bintree"]
3230 ebuild_path = portdb.findname(self.pkg.cpv)
3231 settings = self.settings
3232 debug = settings.get("PORTAGE_DEBUG") == "1"
3234 bintree.prevent_collision(pkg.cpv)
3235 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3236 pkg.cpv + ".tbz2." + str(os.getpid()))
3237 self._binpkg_tmpfile = binpkg_tmpfile
3238 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3239 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3242 EbuildProcess._start(self)
3244 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3246 def _set_returncode(self, wait_retval):
3247 EbuildProcess._set_returncode(self, wait_retval)
3250 bintree = pkg.root_config.trees["bintree"]
3251 binpkg_tmpfile = self._binpkg_tmpfile
3252 if self.returncode == os.EX_OK:
3253 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3255 class EbuildMerge(SlotObject):
3257 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3258 "pkg", "pkg_count", "pkg_path", "pretend",
3259 "scheduler", "settings", "tree", "world_atom")
3262 root_config = self.pkg.root_config
3263 settings = self.settings
3264 retval = portage.merge(settings["CATEGORY"],
3265 settings["PF"], settings["D"],
3266 os.path.join(settings["PORTAGE_BUILDDIR"],
3267 "build-info"), root_config.root, settings,
3268 myebuild=settings["EBUILD"],
3269 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3270 vartree=root_config.trees["vartree"],
3271 prev_mtimes=self.ldpath_mtimes,
3272 scheduler=self.scheduler,
3273 blockers=self.find_blockers)
3275 if retval == os.EX_OK:
3276 self.world_atom(self.pkg)
3281 def _log_success(self):
3283 pkg_count = self.pkg_count
3284 pkg_path = self.pkg_path
3285 logger = self.logger
3286 if "noclean" not in self.settings.features:
3287 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3288 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3289 logger.log((" === (%s of %s) " + \
3290 "Post-Build Cleaning (%s::%s)") % \
3291 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3292 short_msg=short_msg)
3293 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3294 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3296 class PackageUninstall(AsynchronousTask):
3298 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3302 unmerge(self.pkg.root_config, self.opts, "unmerge",
3303 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3304 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3305 writemsg_level=self._writemsg_level)
3306 except UninstallFailure, e:
3307 self.returncode = e.status
3309 self.returncode = os.EX_OK
3312 def _writemsg_level(self, msg, level=0, noiselevel=0):
3314 log_path = self.settings.get("PORTAGE_LOG_FILE")
3315 background = self.background
3317 if log_path is None:
3318 if not (background and level < logging.WARNING):
3319 portage.util.writemsg_level(msg,
3320 level=level, noiselevel=noiselevel)
3323 portage.util.writemsg_level(msg,
3324 level=level, noiselevel=noiselevel)
3326 f = open(log_path, 'a')
3332 class Binpkg(CompositeTask):
3334 __slots__ = ("find_blockers",
3335 "ldpath_mtimes", "logger", "opts",
3336 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3337 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3338 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3340 def _writemsg_level(self, msg, level=0, noiselevel=0):
3342 if not self.background:
3343 portage.util.writemsg_level(msg,
3344 level=level, noiselevel=noiselevel)
3346 log_path = self.settings.get("PORTAGE_LOG_FILE")
3347 if log_path is not None:
3348 f = open(log_path, 'a')
3357 settings = self.settings
3358 settings.setcpv(pkg)
3359 self._tree = "bintree"
3360 self._bintree = self.pkg.root_config.trees[self._tree]
3361 self._verify = not self.opts.pretend
3363 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3364 "portage", pkg.category, pkg.pf)
3365 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3366 pkg=pkg, settings=settings)
3367 self._image_dir = os.path.join(dir_path, "image")
3368 self._infloc = os.path.join(dir_path, "build-info")
3369 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3370 settings["EBUILD"] = self._ebuild_path
3371 debug = settings.get("PORTAGE_DEBUG") == "1"
3372 portage.doebuild_environment(self._ebuild_path, "setup",
3373 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3374 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3376 # The prefetcher has already completed or it
3377 # could be running now. If it's running now,
3378 # wait for it to complete since it holds
3379 # a lock on the file being fetched. The
3380 # portage.locks functions are only designed
3381 # to work between separate processes. Since
3382 # the lock is held by the current process,
3383 # use the scheduler and fetcher methods to
3384 # synchronize with the fetcher.
3385 prefetcher = self.prefetcher
3386 if prefetcher is None:
3388 elif not prefetcher.isAlive():
3390 elif prefetcher.poll() is None:
3392 waiting_msg = ("Fetching '%s' " + \
3393 "in the background. " + \
3394 "To view fetch progress, run `tail -f " + \
3395 "/var/log/emerge-fetch.log` in another " + \
3396 "terminal.") % prefetcher.pkg_path
3397 msg_prefix = colorize("GOOD", " * ")
3398 from textwrap import wrap
3399 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3400 for line in wrap(waiting_msg, 65))
3401 if not self.background:
3402 writemsg(waiting_msg, noiselevel=-1)
3404 self._current_task = prefetcher
3405 prefetcher.addExitListener(self._prefetch_exit)
3408 self._prefetch_exit(prefetcher)
3410 def _prefetch_exit(self, prefetcher):
3413 pkg_count = self.pkg_count
3414 if not (self.opts.pretend or self.opts.fetchonly):
3415 self._build_dir.lock()
3417 shutil.rmtree(self._build_dir.dir_path)
3418 except EnvironmentError, e:
3419 if e.errno != errno.ENOENT:
3422 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3423 fetcher = BinpkgFetcher(background=self.background,
3424 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3425 pretend=self.opts.pretend, scheduler=self.scheduler)
3426 pkg_path = fetcher.pkg_path
3427 self._pkg_path = pkg_path
3429 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3431 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3432 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3433 short_msg = "emerge: (%s of %s) %s Fetch" % \
3434 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3435 self.logger.log(msg, short_msg=short_msg)
3436 self._start_task(fetcher, self._fetcher_exit)
3439 self._fetcher_exit(fetcher)
3441 def _fetcher_exit(self, fetcher):
3443 # The fetcher only has a returncode when
3444 # --getbinpkg is enabled.
3445 if fetcher.returncode is not None:
3446 self._fetched_pkg = True
3447 if self._default_exit(fetcher) != os.EX_OK:
3448 self._unlock_builddir()
3452 if self.opts.pretend:
3453 self._current_task = None
3454 self.returncode = os.EX_OK
3462 logfile = self.settings.get("PORTAGE_LOG_FILE")
3463 verifier = BinpkgVerifier(background=self.background,
3464 logfile=logfile, pkg=self.pkg)
3465 self._start_task(verifier, self._verifier_exit)
3468 self._verifier_exit(verifier)
3470 def _verifier_exit(self, verifier):
3471 if verifier is not None and \
3472 self._default_exit(verifier) != os.EX_OK:
3473 self._unlock_builddir()
3477 logger = self.logger
3479 pkg_count = self.pkg_count
3480 pkg_path = self._pkg_path
3482 if self._fetched_pkg:
3483 self._bintree.inject(pkg.cpv, filename=pkg_path)
3485 if self.opts.fetchonly:
3486 self._current_task = None
3487 self.returncode = os.EX_OK
3491 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3492 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3493 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3494 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3495 logger.log(msg, short_msg=short_msg)
3498 settings = self.settings
3499 ebuild_phase = EbuildPhase(background=self.background,
3500 pkg=pkg, phase=phase, scheduler=self.scheduler,
3501 settings=settings, tree=self._tree)
3503 self._start_task(ebuild_phase, self._clean_exit)
3505 def _clean_exit(self, clean_phase):
3506 if self._default_exit(clean_phase) != os.EX_OK:
3507 self._unlock_builddir()
3511 dir_path = self._build_dir.dir_path
3514 shutil.rmtree(dir_path)
3515 except (IOError, OSError), e:
3516 if e.errno != errno.ENOENT:
3520 infloc = self._infloc
3522 pkg_path = self._pkg_path
3525 for mydir in (dir_path, self._image_dir, infloc):
3526 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3527 gid=portage.data.portage_gid, mode=dir_mode)
3529 # This initializes PORTAGE_LOG_FILE.
3530 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3531 self._writemsg_level(">>> Extracting info\n")
3533 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3534 check_missing_metadata = ("CATEGORY", "PF")
3535 missing_metadata = set()
3536 for k in check_missing_metadata:
3537 v = pkg_xpak.getfile(k)
3539 missing_metadata.add(k)
3541 pkg_xpak.unpackinfo(infloc)
3542 for k in missing_metadata:
3550 f = open(os.path.join(infloc, k), 'wb')
3556 # Store the md5sum in the vdb.
3557 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3559 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3563 # This gives bashrc users an opportunity to do various things
3564 # such as remove binary packages after they're installed.
3565 settings = self.settings
3566 settings.setcpv(self.pkg)
3567 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3568 settings.backup_changes("PORTAGE_BINPKG_FILE")
3571 setup_phase = EbuildPhase(background=self.background,
3572 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3573 settings=settings, tree=self._tree)
3575 setup_phase.addExitListener(self._setup_exit)
3576 self._current_task = setup_phase
3577 self.scheduler.scheduleSetup(setup_phase)
3579 def _setup_exit(self, setup_phase):
3580 if self._default_exit(setup_phase) != os.EX_OK:
3581 self._unlock_builddir()
3585 extractor = BinpkgExtractorAsync(background=self.background,
3586 image_dir=self._image_dir,
3587 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3588 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3589 self._start_task(extractor, self._extractor_exit)
3591 def _extractor_exit(self, extractor):
3592 if self._final_exit(extractor) != os.EX_OK:
3593 self._unlock_builddir()
3594 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3598 def _unlock_builddir(self):
3599 if self.opts.pretend or self.opts.fetchonly:
3601 portage.elog.elog_process(self.pkg.cpv, self.settings)
3602 self._build_dir.unlock()
3606 # This gives bashrc users an opportunity to do various things
3607 # such as remove binary packages after they're installed.
3608 settings = self.settings
3609 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3610 settings.backup_changes("PORTAGE_BINPKG_FILE")
3612 merge = EbuildMerge(find_blockers=self.find_blockers,
3613 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3614 pkg=self.pkg, pkg_count=self.pkg_count,
3615 pkg_path=self._pkg_path, scheduler=self.scheduler,
3616 settings=settings, tree=self._tree, world_atom=self.world_atom)
3619 retval = merge.execute()
3621 settings.pop("PORTAGE_BINPKG_FILE", None)
3622 self._unlock_builddir()
3625 class BinpkgFetcher(SpawnProcess):
3627 __slots__ = ("pkg", "pretend",
3628 "locked", "pkg_path", "_lock_obj")
3630 def __init__(self, **kwargs):
3631 SpawnProcess.__init__(self, **kwargs)
3633 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3641 pretend = self.pretend
3642 bintree = pkg.root_config.trees["bintree"]
3643 settings = bintree.settings
3644 use_locks = "distlocks" in settings.features
3645 pkg_path = self.pkg_path
3648 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3651 exists = os.path.exists(pkg_path)
3652 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3653 if not (pretend or resume):
3654 # Remove existing file or broken symlink.
3660 # urljoin doesn't work correctly with
3661 # unrecognized protocols like sftp
3662 if bintree._remote_has_index:
3663 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3665 rel_uri = pkg.cpv + ".tbz2"
3666 uri = bintree._remote_base_uri.rstrip("/") + \
3667 "/" + rel_uri.lstrip("/")
3669 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3670 "/" + pkg.pf + ".tbz2"
3673 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3674 self.returncode = os.EX_OK
3678 protocol = urlparse.urlparse(uri)[0]
3679 fcmd_prefix = "FETCHCOMMAND"
3681 fcmd_prefix = "RESUMECOMMAND"
3682 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3684 fcmd = settings.get(fcmd_prefix)
3687 "DISTDIR" : os.path.dirname(pkg_path),
3689 "FILE" : os.path.basename(pkg_path)
3692 fetch_env = dict(settings.iteritems())
3693 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3694 for x in shlex.split(fcmd)]
3696 if self.fd_pipes is None:
3698 fd_pipes = self.fd_pipes
3700 # Redirect all output to stdout since some fetchers like
3701 # wget pollute stderr (if portage detects a problem then it
3702 # can send it's own message to stderr).
3703 fd_pipes.setdefault(0, sys.stdin.fileno())
3704 fd_pipes.setdefault(1, sys.stdout.fileno())
3705 fd_pipes.setdefault(2, sys.stdout.fileno())
3707 self.args = fetch_args
3708 self.env = fetch_env
3709 SpawnProcess._start(self)
3711 def _set_returncode(self, wait_retval):
3712 SpawnProcess._set_returncode(self, wait_retval)
3713 if self.returncode == os.EX_OK:
3714 # If possible, update the mtime to match the remote package if
3715 # the fetcher didn't already do it automatically.
3716 bintree = self.pkg.root_config.trees["bintree"]
3717 if bintree._remote_has_index:
3718 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3719 if remote_mtime is not None:
3721 remote_mtime = long(remote_mtime)
3726 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3730 if remote_mtime != local_mtime:
3732 os.utime(self.pkg_path,
3733 (remote_mtime, remote_mtime))
3742 This raises an AlreadyLocked exception if lock() is called
3743 while a lock is already held. In order to avoid this, call
3744 unlock() or check whether the "locked" attribute is True
3745 or False before calling lock().
3747 if self._lock_obj is not None:
3748 raise self.AlreadyLocked((self._lock_obj,))
3750 self._lock_obj = portage.locks.lockfile(
3751 self.pkg_path, wantnewlockfile=1)
3754 class AlreadyLocked(portage.exception.PortageException):
3758 if self._lock_obj is None:
3760 portage.locks.unlockfile(self._lock_obj)
3761 self._lock_obj = None
3764 class BinpkgVerifier(AsynchronousTask):
3765 __slots__ = ("logfile", "pkg",)
3769 Note: Unlike a normal AsynchronousTask.start() method,
3770 this one does all work is synchronously. The returncode
3771 attribute will be set before it returns.
3775 root_config = pkg.root_config
3776 bintree = root_config.trees["bintree"]
3778 stdout_orig = sys.stdout
3779 stderr_orig = sys.stderr
3781 if self.background and self.logfile is not None:
3782 log_file = open(self.logfile, 'a')
3784 if log_file is not None:
3785 sys.stdout = log_file
3786 sys.stderr = log_file
3788 bintree.digestCheck(pkg)
3789 except portage.exception.FileNotFound:
3790 writemsg("!!! Fetching Binary failed " + \
3791 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3793 except portage.exception.DigestException, e:
3794 writemsg("\n!!! Digest verification failed:\n",
3796 writemsg("!!! %s\n" % e.value[0],
3798 writemsg("!!! Reason: %s\n" % e.value[1],
3800 writemsg("!!! Got: %s\n" % e.value[2],
3802 writemsg("!!! Expected: %s\n" % e.value[3],
3805 if rval != os.EX_OK:
3806 pkg_path = bintree.getname(pkg.cpv)
3807 head, tail = os.path.split(pkg_path)
3808 temp_filename = portage._checksum_failure_temp_file(head, tail)
3809 writemsg("File renamed to '%s'\n" % (temp_filename,),
3812 sys.stdout = stdout_orig
3813 sys.stderr = stderr_orig
3814 if log_file is not None:
3817 self.returncode = rval
3820 class BinpkgPrefetcher(CompositeTask):
3822 __slots__ = ("pkg",) + \
3823 ("pkg_path", "_bintree",)
3826 self._bintree = self.pkg.root_config.trees["bintree"]
3827 fetcher = BinpkgFetcher(background=self.background,
3828 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3829 scheduler=self.scheduler)
3830 self.pkg_path = fetcher.pkg_path
3831 self._start_task(fetcher, self._fetcher_exit)
3833 def _fetcher_exit(self, fetcher):
3835 if self._default_exit(fetcher) != os.EX_OK:
3839 verifier = BinpkgVerifier(background=self.background,
3840 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3841 self._start_task(verifier, self._verifier_exit)
3843 def _verifier_exit(self, verifier):
3844 if self._default_exit(verifier) != os.EX_OK:
3848 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3850 self._current_task = None
3851 self.returncode = os.EX_OK
3854 class BinpkgExtractorAsync(SpawnProcess):
3856 __slots__ = ("image_dir", "pkg", "pkg_path")
3858 _shell_binary = portage.const.BASH_BINARY
3861 self.args = [self._shell_binary, "-c",
3862 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3863 (portage._shell_quote(self.pkg_path),
3864 portage._shell_quote(self.image_dir))]
3866 self.env = self.pkg.root_config.settings.environ()
3867 SpawnProcess._start(self)
3869 class MergeListItem(CompositeTask):
3872 TODO: For parallel scheduling, everything here needs asynchronous
3873 execution support (start, poll, and wait methods).
3876 __slots__ = ("args_set",
3877 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3878 "find_blockers", "logger", "mtimedb", "pkg",
3879 "pkg_count", "pkg_to_replace", "prefetcher",
3880 "settings", "statusMessage", "world_atom") + \
3886 build_opts = self.build_opts
3889 # uninstall, executed by self.merge()
3890 self.returncode = os.EX_OK
3894 args_set = self.args_set
3895 find_blockers = self.find_blockers
3896 logger = self.logger
3897 mtimedb = self.mtimedb
3898 pkg_count = self.pkg_count
3899 scheduler = self.scheduler
3900 settings = self.settings
3901 world_atom = self.world_atom
3902 ldpath_mtimes = mtimedb["ldpath"]
3904 action_desc = "Emerging"
3906 if pkg.type_name == "binary":
3907 action_desc += " binary"
3909 if build_opts.fetchonly:
3910 action_desc = "Fetching"
3912 msg = "%s (%s of %s) %s" % \
3914 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3915 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3916 colorize("GOOD", pkg.cpv))
3918 portdb = pkg.root_config.trees["porttree"].dbapi
3919 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3920 if portdir_repo_name:
3921 pkg_repo_name = pkg.metadata.get("repository")
3922 if pkg_repo_name != portdir_repo_name:
3923 if not pkg_repo_name:
3924 pkg_repo_name = "unknown repo"
3925 msg += " from %s" % pkg_repo_name
3928 msg += " %s %s" % (preposition, pkg.root)
3930 if not build_opts.pretend:
3931 self.statusMessage(msg)
3932 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3933 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3935 if pkg.type_name == "ebuild":
3937 build = EbuildBuild(args_set=args_set,
3938 background=self.background,
3939 config_pool=self.config_pool,
3940 find_blockers=find_blockers,
3941 ldpath_mtimes=ldpath_mtimes, logger=logger,
3942 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3943 prefetcher=self.prefetcher, scheduler=scheduler,
3944 settings=settings, world_atom=world_atom)
3946 self._install_task = build
3947 self._start_task(build, self._default_final_exit)
3950 elif pkg.type_name == "binary":
3952 binpkg = Binpkg(background=self.background,
3953 find_blockers=find_blockers,
3954 ldpath_mtimes=ldpath_mtimes, logger=logger,
3955 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3956 prefetcher=self.prefetcher, settings=settings,
3957 scheduler=scheduler, world_atom=world_atom)
3959 self._install_task = binpkg
3960 self._start_task(binpkg, self._default_final_exit)
3964 self._install_task.poll()
3965 return self.returncode
3968 self._install_task.wait()
3969 return self.returncode
3974 build_opts = self.build_opts
3975 find_blockers = self.find_blockers
3976 logger = self.logger
3977 mtimedb = self.mtimedb
3978 pkg_count = self.pkg_count
3979 prefetcher = self.prefetcher
3980 scheduler = self.scheduler
3981 settings = self.settings
3982 world_atom = self.world_atom
3983 ldpath_mtimes = mtimedb["ldpath"]
3986 if not (build_opts.buildpkgonly or \
3987 build_opts.fetchonly or build_opts.pretend):
3989 uninstall = PackageUninstall(background=self.background,
3990 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3991 pkg=pkg, scheduler=scheduler, settings=settings)
3994 retval = uninstall.wait()
3995 if retval != os.EX_OK:
3999 if build_opts.fetchonly or \
4000 build_opts.buildpkgonly:
4001 return self.returncode
4003 retval = self._install_task.install()
4006 class PackageMerge(AsynchronousTask):
4008 TODO: Implement asynchronous merge so that the scheduler can
4009 run while a merge is executing.
4012 __slots__ = ("merge",)
4016 pkg = self.merge.pkg
4017 pkg_count = self.merge.pkg_count
4020 action_desc = "Uninstalling"
4021 preposition = "from"
4023 action_desc = "Installing"
4026 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4029 msg += " %s %s" % (preposition, pkg.root)
4031 if not self.merge.build_opts.fetchonly and \
4032 not self.merge.build_opts.pretend and \
4033 not self.merge.build_opts.buildpkgonly:
4034 self.merge.statusMessage(msg)
4036 self.returncode = self.merge.merge()
4039 class DependencyArg(object):
4040 def __init__(self, arg=None, root_config=None):
4042 self.root_config = root_config
4045 return str(self.arg)
4047 class AtomArg(DependencyArg):
4048 def __init__(self, atom=None, **kwargs):
4049 DependencyArg.__init__(self, **kwargs)
4051 if not isinstance(self.atom, portage.dep.Atom):
4052 self.atom = portage.dep.Atom(self.atom)
4053 self.set = (self.atom, )
4055 class PackageArg(DependencyArg):
4056 def __init__(self, package=None, **kwargs):
4057 DependencyArg.__init__(self, **kwargs)
4058 self.package = package
4059 self.atom = portage.dep.Atom("=" + package.cpv)
4060 self.set = (self.atom, )
4062 class SetArg(DependencyArg):
4063 def __init__(self, set=None, **kwargs):
4064 DependencyArg.__init__(self, **kwargs)
4066 self.name = self.arg[len(SETPREFIX):]
4068 class Dependency(SlotObject):
4069 __slots__ = ("atom", "blocker", "depth",
4070 "parent", "onlydeps", "priority", "root")
4071 def __init__(self, **kwargs):
4072 SlotObject.__init__(self, **kwargs)
4073 if self.priority is None:
4074 self.priority = DepPriority()
4075 if self.depth is None:
4078 class BlockerCache(portage.cache.mappings.MutableMapping):
4079 """This caches blockers of installed packages so that dep_check does not
4080 have to be done for every single installed package on every invocation of
4081 emerge. The cache is invalidated whenever it is detected that something
4082 has changed that might alter the results of dep_check() calls:
4083 1) the set of installed packages (including COUNTER) has changed
4084 2) the old-style virtuals have changed
4087 # Number of uncached packages to trigger cache update, since
4088 # it's wasteful to update it for every vdb change.
4089 _cache_threshold = 5
4091 class BlockerData(object):
4093 __slots__ = ("__weakref__", "atoms", "counter")
4095 def __init__(self, counter, atoms):
4096 self.counter = counter
4099 def __init__(self, myroot, vardb):
4101 self._virtuals = vardb.settings.getvirtuals()
4102 self._cache_filename = os.path.join(myroot,
4103 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4104 self._cache_version = "1"
4105 self._cache_data = None
4106 self._modified = set()
4111 f = open(self._cache_filename, mode='rb')
4112 mypickle = pickle.Unpickler(f)
4114 mypickle.find_global = None
4115 except AttributeError:
4116 # TODO: If py3k, override Unpickler.find_class().
4118 self._cache_data = mypickle.load()
4121 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4122 if isinstance(e, pickle.UnpicklingError):
4123 writemsg("!!! Error loading '%s': %s\n" % \
4124 (self._cache_filename, str(e)), noiselevel=-1)
4127 cache_valid = self._cache_data and \
4128 isinstance(self._cache_data, dict) and \
4129 self._cache_data.get("version") == self._cache_version and \
4130 isinstance(self._cache_data.get("blockers"), dict)
4132 # Validate all the atoms and counters so that
4133 # corruption is detected as soon as possible.
4134 invalid_items = set()
4135 for k, v in self._cache_data["blockers"].iteritems():
4136 if not isinstance(k, basestring):
4137 invalid_items.add(k)
4140 if portage.catpkgsplit(k) is None:
4141 invalid_items.add(k)
4143 except portage.exception.InvalidData:
4144 invalid_items.add(k)
4146 if not isinstance(v, tuple) or \
4148 invalid_items.add(k)
4151 if not isinstance(counter, (int, long)):
4152 invalid_items.add(k)
4154 if not isinstance(atoms, (list, tuple)):
4155 invalid_items.add(k)
4157 invalid_atom = False
4159 if not isinstance(atom, basestring):
4162 if atom[:1] != "!" or \
4163 not portage.isvalidatom(
4164 atom, allow_blockers=True):
4168 invalid_items.add(k)
4171 for k in invalid_items:
4172 del self._cache_data["blockers"][k]
4173 if not self._cache_data["blockers"]:
4177 self._cache_data = {"version":self._cache_version}
4178 self._cache_data["blockers"] = {}
4179 self._cache_data["virtuals"] = self._virtuals
4180 self._modified.clear()
4183 """If the current user has permission and the internal blocker cache
4184 been updated, save it to disk and mark it unmodified. This is called
4185 by emerge after it has proccessed blockers for all installed packages.
4186 Currently, the cache is only written if the user has superuser
4187 privileges (since that's required to obtain a lock), but all users
4188 have read access and benefit from faster blocker lookups (as long as
4189 the entire cache is still valid). The cache is stored as a pickled
4190 dict object with the following format:
4194 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4195 "virtuals" : vardb.settings.getvirtuals()
4198 if len(self._modified) >= self._cache_threshold and \
4201 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4202 pickle.dump(self._cache_data, f, protocol=2)
4204 portage.util.apply_secpass_permissions(
4205 self._cache_filename, gid=portage.portage_gid, mode=0644)
4206 except (IOError, OSError), e:
4208 self._modified.clear()
4210 def __setitem__(self, cpv, blocker_data):
4212 Update the cache and mark it as modified for a future call to
4215 @param cpv: Package for which to cache blockers.
4217 @param blocker_data: An object with counter and atoms attributes.
4218 @type blocker_data: BlockerData
4220 self._cache_data["blockers"][cpv] = \
4221 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4222 self._modified.add(cpv)
4225 if self._cache_data is None:
4226 # triggered by python-trace
4228 return iter(self._cache_data["blockers"])
4230 def __delitem__(self, cpv):
4231 del self._cache_data["blockers"][cpv]
4233 def __getitem__(self, cpv):
4236 @returns: An object with counter and atoms attributes.
4238 return self.BlockerData(*self._cache_data["blockers"][cpv])
4240 class BlockerDB(object):
4242 def __init__(self, root_config):
4243 self._root_config = root_config
4244 self._vartree = root_config.trees["vartree"]
4245 self._portdb = root_config.trees["porttree"].dbapi
4247 self._dep_check_trees = None
4248 self._fake_vartree = None
4250 def _get_fake_vartree(self, acquire_lock=0):
4251 fake_vartree = self._fake_vartree
4252 if fake_vartree is None:
4253 fake_vartree = FakeVartree(self._root_config,
4254 acquire_lock=acquire_lock)
4255 self._fake_vartree = fake_vartree
4256 self._dep_check_trees = { self._vartree.root : {
4257 "porttree" : fake_vartree,
4258 "vartree" : fake_vartree,
4261 fake_vartree.sync(acquire_lock=acquire_lock)
4264 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4265 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4266 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4267 settings = self._vartree.settings
4268 stale_cache = set(blocker_cache)
4269 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4270 dep_check_trees = self._dep_check_trees
4271 vardb = fake_vartree.dbapi
4272 installed_pkgs = list(vardb)
4274 for inst_pkg in installed_pkgs:
4275 stale_cache.discard(inst_pkg.cpv)
4276 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4277 if cached_blockers is not None and \
4278 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4279 cached_blockers = None
4280 if cached_blockers is not None:
4281 blocker_atoms = cached_blockers.atoms
4283 # Use aux_get() to trigger FakeVartree global
4284 # updates on *DEPEND when appropriate.
4285 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4287 portage.dep._dep_check_strict = False
4288 success, atoms = portage.dep_check(depstr,
4289 vardb, settings, myuse=inst_pkg.use.enabled,
4290 trees=dep_check_trees, myroot=inst_pkg.root)
4292 portage.dep._dep_check_strict = True
4294 pkg_location = os.path.join(inst_pkg.root,
4295 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4296 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4297 (pkg_location, atoms), noiselevel=-1)
4300 blocker_atoms = [atom for atom in atoms \
4301 if atom.startswith("!")]
4302 blocker_atoms.sort()
4303 counter = long(inst_pkg.metadata["COUNTER"])
4304 blocker_cache[inst_pkg.cpv] = \
4305 blocker_cache.BlockerData(counter, blocker_atoms)
4306 for cpv in stale_cache:
4307 del blocker_cache[cpv]
4308 blocker_cache.flush()
4310 blocker_parents = digraph()
4312 for pkg in installed_pkgs:
4313 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4314 blocker_atom = blocker_atom.lstrip("!")
4315 blocker_atoms.append(blocker_atom)
4316 blocker_parents.add(blocker_atom, pkg)
4318 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4319 blocking_pkgs = set()
4320 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4321 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4323 # Check for blockers in the other direction.
4324 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4326 portage.dep._dep_check_strict = False
4327 success, atoms = portage.dep_check(depstr,
4328 vardb, settings, myuse=new_pkg.use.enabled,
4329 trees=dep_check_trees, myroot=new_pkg.root)
4331 portage.dep._dep_check_strict = True
4333 # We should never get this far with invalid deps.
4334 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4337 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4340 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4341 for inst_pkg in installed_pkgs:
4343 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4344 except (portage.exception.InvalidDependString, StopIteration):
4346 blocking_pkgs.add(inst_pkg)
4348 return blocking_pkgs
4350 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4352 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4353 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4354 p_type, p_root, p_key, p_status = parent_node
4356 if p_status == "nomerge":
4357 category, pf = portage.catsplit(p_key)
4358 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4359 msg.append("Portage is unable to process the dependencies of the ")
4360 msg.append("'%s' package. " % p_key)
4361 msg.append("In order to correct this problem, the package ")
4362 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4363 msg.append("As a temporary workaround, the --nodeps option can ")
4364 msg.append("be used to ignore all dependencies. For reference, ")
4365 msg.append("the problematic dependencies can be found in the ")
4366 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4368 msg.append("This package can not be installed. ")
4369 msg.append("Please notify the '%s' package maintainer " % p_key)
4370 msg.append("about this problem.")
4372 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4373 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4375 class PackageVirtualDbapi(portage.dbapi):
4377 A dbapi-like interface class that represents the state of the installed
4378 package database as new packages are installed, replacing any packages
4379 that previously existed in the same slot. The main difference between
4380 this class and fakedbapi is that this one uses Package instances
4381 internally (passed in via cpv_inject() and cpv_remove() calls).
4383 def __init__(self, settings):
4384 portage.dbapi.__init__(self)
4385 self.settings = settings
4386 self._match_cache = {}
4392 Remove all packages.
4396 self._cp_map.clear()
4397 self._cpv_map.clear()
4400 obj = PackageVirtualDbapi(self.settings)
4401 obj._match_cache = self._match_cache.copy()
4402 obj._cp_map = self._cp_map.copy()
4403 for k, v in obj._cp_map.iteritems():
4404 obj._cp_map[k] = v[:]
4405 obj._cpv_map = self._cpv_map.copy()
4409 return self._cpv_map.itervalues()
4411 def __contains__(self, item):
4412 existing = self._cpv_map.get(item.cpv)
4413 if existing is not None and \
4418 def get(self, item, default=None):
4419 cpv = getattr(item, "cpv", None)
4423 type_name, root, cpv, operation = item
4425 existing = self._cpv_map.get(cpv)
4426 if existing is not None and \
4431 def match_pkgs(self, atom):
4432 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4434 def _clear_cache(self):
4435 if self._categories is not None:
4436 self._categories = None
4437 if self._match_cache:
4438 self._match_cache = {}
4440 def match(self, origdep, use_cache=1):
4441 result = self._match_cache.get(origdep)
4442 if result is not None:
4444 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4445 self._match_cache[origdep] = result
4448 def cpv_exists(self, cpv):
4449 return cpv in self._cpv_map
4451 def cp_list(self, mycp, use_cache=1):
4452 cachelist = self._match_cache.get(mycp)
4453 # cp_list() doesn't expand old-style virtuals
4454 if cachelist and cachelist[0].startswith(mycp):
4456 cpv_list = self._cp_map.get(mycp)
4457 if cpv_list is None:
4460 cpv_list = [pkg.cpv for pkg in cpv_list]
4461 self._cpv_sort_ascending(cpv_list)
4462 if not (not cpv_list and mycp.startswith("virtual/")):
4463 self._match_cache[mycp] = cpv_list
4467 return list(self._cp_map)
4470 return list(self._cpv_map)
4472 def cpv_inject(self, pkg):
4473 cp_list = self._cp_map.get(pkg.cp)
4476 self._cp_map[pkg.cp] = cp_list
4477 e_pkg = self._cpv_map.get(pkg.cpv)
4478 if e_pkg is not None:
4481 self.cpv_remove(e_pkg)
4482 for e_pkg in cp_list:
4483 if e_pkg.slot_atom == pkg.slot_atom:
4486 self.cpv_remove(e_pkg)
4489 self._cpv_map[pkg.cpv] = pkg
4492 def cpv_remove(self, pkg):
4493 old_pkg = self._cpv_map.get(pkg.cpv)
4496 self._cp_map[pkg.cp].remove(pkg)
4497 del self._cpv_map[pkg.cpv]
4500 def aux_get(self, cpv, wants):
4501 metadata = self._cpv_map[cpv].metadata
4502 return [metadata.get(x, "") for x in wants]
4504 def aux_update(self, cpv, values):
4505 self._cpv_map[cpv].metadata.update(values)
4508 class depgraph(object):
4510 pkg_tree_map = RootConfig.pkg_tree_map
4512 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4514 def __init__(self, settings, trees, myopts, myparams, spinner):
4515 self.settings = settings
4516 self.target_root = settings["ROOT"]
4517 self.myopts = myopts
4518 self.myparams = myparams
4520 if settings.get("PORTAGE_DEBUG", "") == "1":
4522 self.spinner = spinner
4523 self._running_root = trees["/"]["root_config"]
4524 self._opts_no_restart = Scheduler._opts_no_restart
4525 self.pkgsettings = {}
4526 # Maps slot atom to package for each Package added to the graph.
4527 self._slot_pkg_map = {}
4528 # Maps nodes to the reasons they were selected for reinstallation.
4529 self._reinstall_nodes = {}
4532 self._trees_orig = trees
4534 # Contains a filtered view of preferred packages that are selected
4535 # from available repositories.
4536 self._filtered_trees = {}
4537 # Contains installed packages and new packages that have been added
4539 self._graph_trees = {}
4540 # All Package instances
4541 self._pkg_cache = {}
4542 for myroot in trees:
4543 self.trees[myroot] = {}
4544 # Create a RootConfig instance that references
4545 # the FakeVartree instead of the real one.
4546 self.roots[myroot] = RootConfig(
4547 trees[myroot]["vartree"].settings,
4549 trees[myroot]["root_config"].setconfig)
4550 for tree in ("porttree", "bintree"):
4551 self.trees[myroot][tree] = trees[myroot][tree]
4552 self.trees[myroot]["vartree"] = \
4553 FakeVartree(trees[myroot]["root_config"],
4554 pkg_cache=self._pkg_cache)
4555 self.pkgsettings[myroot] = portage.config(
4556 clone=self.trees[myroot]["vartree"].settings)
4557 self._slot_pkg_map[myroot] = {}
4558 vardb = self.trees[myroot]["vartree"].dbapi
4559 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4560 "--buildpkgonly" not in self.myopts
4561 # This fakedbapi instance will model the state that the vdb will
4562 # have after new packages have been installed.
4563 fakedb = PackageVirtualDbapi(vardb.settings)
4564 if preload_installed_pkgs:
4566 self.spinner.update()
4567 # This triggers metadata updates via FakeVartree.
4568 vardb.aux_get(pkg.cpv, [])
4569 fakedb.cpv_inject(pkg)
4571 # Now that the vardb state is cached in our FakeVartree,
4572 # we won't be needing the real vartree cache for awhile.
4573 # To make some room on the heap, clear the vardbapi
4575 trees[myroot]["vartree"].dbapi._clear_cache()
4578 self.mydbapi[myroot] = fakedb
4581 graph_tree.dbapi = fakedb
4582 self._graph_trees[myroot] = {}
4583 self._filtered_trees[myroot] = {}
4584 # Substitute the graph tree for the vartree in dep_check() since we
4585 # want atom selections to be consistent with package selections
4586 # have already been made.
4587 self._graph_trees[myroot]["porttree"] = graph_tree
4588 self._graph_trees[myroot]["vartree"] = graph_tree
4589 def filtered_tree():
4591 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4592 self._filtered_trees[myroot]["porttree"] = filtered_tree
4594 # Passing in graph_tree as the vartree here could lead to better
4595 # atom selections in some cases by causing atoms for packages that
4596 # have been added to the graph to be preferred over other choices.
4597 # However, it can trigger atom selections that result in
4598 # unresolvable direct circular dependencies. For example, this
4599 # happens with gwydion-dylan which depends on either itself or
4600 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4601 # gwydion-dylan-bin needs to be selected in order to avoid a
4602 # an unresolvable direct circular dependency.
4604 # To solve the problem described above, pass in "graph_db" so that
4605 # packages that have been added to the graph are distinguishable
4606 # from other available packages and installed packages. Also, pass
4607 # the parent package into self._select_atoms() calls so that
4608 # unresolvable direct circular dependencies can be detected and
4609 # avoided when possible.
4610 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4611 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4614 portdb = self.trees[myroot]["porttree"].dbapi
4615 bindb = self.trees[myroot]["bintree"].dbapi
4616 vardb = self.trees[myroot]["vartree"].dbapi
4617 # (db, pkg_type, built, installed, db_keys)
4618 if "--usepkgonly" not in self.myopts:
4619 db_keys = list(portdb._aux_cache_keys)
4620 dbs.append((portdb, "ebuild", False, False, db_keys))
4621 if "--usepkg" in self.myopts:
4622 db_keys = list(bindb._aux_cache_keys)
4623 dbs.append((bindb, "binary", True, False, db_keys))
4624 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4625 dbs.append((vardb, "installed", True, True, db_keys))
4626 self._filtered_trees[myroot]["dbs"] = dbs
4627 if "--usepkg" in self.myopts:
4628 self.trees[myroot]["bintree"].populate(
4629 "--getbinpkg" in self.myopts,
4630 "--getbinpkgonly" in self.myopts)
4633 self.digraph=portage.digraph()
4634 # contains all sets added to the graph
4636 # contains atoms given as arguments
4637 self._sets["args"] = InternalPackageSet()
4638 # contains all atoms from all sets added to the graph, including
4639 # atoms given as arguments
4640 self._set_atoms = InternalPackageSet()
4641 self._atom_arg_map = {}
4642 # contains all nodes pulled in by self._set_atoms
4643 self._set_nodes = set()
4644 # Contains only Blocker -> Uninstall edges
4645 self._blocker_uninstalls = digraph()
4646 # Contains only Package -> Blocker edges
4647 self._blocker_parents = digraph()
4648 # Contains only irrelevant Package -> Blocker edges
4649 self._irrelevant_blockers = digraph()
4650 # Contains only unsolvable Package -> Blocker edges
4651 self._unsolvable_blockers = digraph()
4652 # Contains all Blocker -> Blocked Package edges
4653 self._blocked_pkgs = digraph()
4654 # Contains world packages that have been protected from
4655 # uninstallation but may not have been added to the graph
4656 # if the graph is not complete yet.
4657 self._blocked_world_pkgs = {}
4658 self._slot_collision_info = {}
4659 # Slot collision nodes are not allowed to block other packages since
4660 # blocker validation is only able to account for one package per slot.
4661 self._slot_collision_nodes = set()
4662 self._parent_atoms = {}
4663 self._slot_conflict_parent_atoms = set()
4664 self._serialized_tasks_cache = None
4665 self._scheduler_graph = None
4666 self._displayed_list = None
4667 self._pprovided_args = []
4668 self._missing_args = []
4669 self._masked_installed = set()
4670 self._unsatisfied_deps_for_display = []
4671 self._unsatisfied_blockers_for_display = None
4672 self._circular_deps_for_display = None
4673 self._dep_stack = []
4674 self._unsatisfied_deps = []
4675 self._initially_unsatisfied_deps = []
4676 self._ignored_deps = []
4677 self._required_set_names = set(["system", "world"])
4678 self._select_atoms = self._select_atoms_highest_available
4679 self._select_package = self._select_pkg_highest_available
4680 self._highest_pkg_cache = {}
4682 def _show_slot_collision_notice(self):
4683 """Show an informational message advising the user to mask one of the
4684 the packages. In some cases it may be possible to resolve this
4685 automatically, but support for backtracking (removal nodes that have
4686 already been selected) will be required in order to handle all possible
4690 if not self._slot_collision_info:
4693 self._show_merge_list()
4696 msg.append("\n!!! Multiple package instances within a single " + \
4697 "package slot have been pulled\n")
4698 msg.append("!!! into the dependency graph, resulting" + \
4699 " in a slot conflict:\n\n")
4701 # Max number of parents shown, to avoid flooding the display.
4703 explanation_columns = 70
4705 for (slot_atom, root), slot_nodes \
4706 in self._slot_collision_info.iteritems():
4707 msg.append(str(slot_atom))
4710 for node in slot_nodes:
4712 msg.append(str(node))
4713 parent_atoms = self._parent_atoms.get(node)
4716 # Prefer conflict atoms over others.
4717 for parent_atom in parent_atoms:
4718 if len(pruned_list) >= max_parents:
4720 if parent_atom in self._slot_conflict_parent_atoms:
4721 pruned_list.add(parent_atom)
4723 # If this package was pulled in by conflict atoms then
4724 # show those alone since those are the most interesting.
4726 # When generating the pruned list, prefer instances
4727 # of DependencyArg over instances of Package.
4728 for parent_atom in parent_atoms:
4729 if len(pruned_list) >= max_parents:
4731 parent, atom = parent_atom
4732 if isinstance(parent, DependencyArg):
4733 pruned_list.add(parent_atom)
4734 # Prefer Packages instances that themselves have been
4735 # pulled into collision slots.
4736 for parent_atom in parent_atoms:
4737 if len(pruned_list) >= max_parents:
4739 parent, atom = parent_atom
4740 if isinstance(parent, Package) and \
4741 (parent.slot_atom, parent.root) \
4742 in self._slot_collision_info:
4743 pruned_list.add(parent_atom)
4744 for parent_atom in parent_atoms:
4745 if len(pruned_list) >= max_parents:
4747 pruned_list.add(parent_atom)
4748 omitted_parents = len(parent_atoms) - len(pruned_list)
4749 parent_atoms = pruned_list
4750 msg.append(" pulled in by\n")
4751 for parent_atom in parent_atoms:
4752 parent, atom = parent_atom
4753 msg.append(2*indent)
4754 if isinstance(parent,
4755 (PackageArg, AtomArg)):
4756 # For PackageArg and AtomArg types, it's
4757 # redundant to display the atom attribute.
4758 msg.append(str(parent))
4760 # Display the specific atom from SetArg or
4762 msg.append("%s required by %s" % (atom, parent))
4765 msg.append(2*indent)
4766 msg.append("(and %d more)\n" % omitted_parents)
4768 msg.append(" (no parents)\n")
4770 explanation = self._slot_conflict_explanation(slot_nodes)
4773 msg.append(indent + "Explanation:\n\n")
4774 for line in textwrap.wrap(explanation, explanation_columns):
4775 msg.append(2*indent + line + "\n")
4778 sys.stderr.write("".join(msg))
4781 explanations_for_all = explanations == len(self._slot_collision_info)
4783 if explanations_for_all or "--quiet" in self.myopts:
4787 msg.append("It may be possible to solve this problem ")
4788 msg.append("by using package.mask to prevent one of ")
4789 msg.append("those packages from being selected. ")
4790 msg.append("However, it is also possible that conflicting ")
4791 msg.append("dependencies exist such that they are impossible to ")
4792 msg.append("satisfy simultaneously. If such a conflict exists in ")
4793 msg.append("the dependencies of two different packages, then those ")
4794 msg.append("packages can not be installed simultaneously.")
4796 from formatter import AbstractFormatter, DumbWriter
4797 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4799 f.add_flowing_data(x)
4803 msg.append("For more information, see MASKED PACKAGES ")
4804 msg.append("section in the emerge man page or refer ")
4805 msg.append("to the Gentoo Handbook.")
4807 f.add_flowing_data(x)
4811 def _slot_conflict_explanation(self, slot_nodes):
4813 When a slot conflict occurs due to USE deps, there are a few
4814 different cases to consider:
4816 1) New USE are correctly set but --newuse wasn't requested so an
4817 installed package with incorrect USE happened to get pulled
4818 into graph before the new one.
4820 2) New USE are incorrectly set but an installed package has correct
4821 USE so it got pulled into the graph, and a new instance also got
4822 pulled in due to --newuse or an upgrade.
4824 3) Multiple USE deps exist that can't be satisfied simultaneously,
4825 and multiple package instances got pulled into the same slot to
4826 satisfy the conflicting deps.
4828 Currently, explanations and suggested courses of action are generated
4829 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4832 if len(slot_nodes) != 2:
4833 # Suggestions are only implemented for
4834 # conflicts between two packages.
4837 all_conflict_atoms = self._slot_conflict_parent_atoms
4839 matched_atoms = None
4840 unmatched_node = None
4841 for node in slot_nodes:
4842 parent_atoms = self._parent_atoms.get(node)
4843 if not parent_atoms:
4844 # Normally, there are always parent atoms. If there are
4845 # none then something unexpected is happening and there's
4846 # currently no suggestion for this case.
4848 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4849 for parent_atom in conflict_atoms:
4850 parent, atom = parent_atom
4852 # Suggestions are currently only implemented for cases
4853 # in which all conflict atoms have USE deps.
4856 if matched_node is not None:
4857 # If conflict atoms match multiple nodes
4858 # then there's no suggestion.
4861 matched_atoms = conflict_atoms
4863 if unmatched_node is not None:
4864 # Neither node is matched by conflict atoms, and
4865 # there is no suggestion for this case.
4867 unmatched_node = node
4869 if matched_node is None or unmatched_node is None:
4870 # This shouldn't happen.
4873 if unmatched_node.installed and not matched_node.installed and \
4874 unmatched_node.cpv == matched_node.cpv:
4875 # If the conflicting packages are the same version then
4876 # --newuse should be all that's needed. If they are different
4877 # versions then there's some other problem.
4878 return "New USE are correctly set, but --newuse wasn't" + \
4879 " requested, so an installed package with incorrect USE " + \
4880 "happened to get pulled into the dependency graph. " + \
4881 "In order to solve " + \
4882 "this, either specify the --newuse option or explicitly " + \
4883 " reinstall '%s'." % matched_node.slot_atom
4885 if matched_node.installed and not unmatched_node.installed:
4886 atoms = sorted(set(atom for parent, atom in matched_atoms))
4887 explanation = ("New USE for '%s' are incorrectly set. " + \
4888 "In order to solve this, adjust USE to satisfy '%s'") % \
4889 (matched_node.slot_atom, atoms[0])
4891 for atom in atoms[1:-1]:
4892 explanation += ", '%s'" % (atom,)
4895 explanation += " and '%s'" % (atoms[-1],)
4901 def _process_slot_conflicts(self):
4903 Process slot conflict data to identify specific atoms which
4904 lead to conflict. These atoms only match a subset of the
4905 packages that have been pulled into a given slot.
4907 for (slot_atom, root), slot_nodes \
4908 in self._slot_collision_info.iteritems():
4910 all_parent_atoms = set()
4911 for pkg in slot_nodes:
4912 parent_atoms = self._parent_atoms.get(pkg)
4913 if not parent_atoms:
4915 all_parent_atoms.update(parent_atoms)
4917 for pkg in slot_nodes:
4918 parent_atoms = self._parent_atoms.get(pkg)
4919 if parent_atoms is None:
4920 parent_atoms = set()
4921 self._parent_atoms[pkg] = parent_atoms
4922 for parent_atom in all_parent_atoms:
4923 if parent_atom in parent_atoms:
4925 # Use package set for matching since it will match via
4926 # PROVIDE when necessary, while match_from_list does not.
4927 parent, atom = parent_atom
4928 atom_set = InternalPackageSet(
4929 initial_atoms=(atom,))
4930 if atom_set.findAtomForPackage(pkg):
4931 parent_atoms.add(parent_atom)
4933 self._slot_conflict_parent_atoms.add(parent_atom)
4935 def _reinstall_for_flags(self, forced_flags,
4936 orig_use, orig_iuse, cur_use, cur_iuse):
4937 """Return a set of flags that trigger reinstallation, or None if there
4938 are no such flags."""
4939 if "--newuse" in self.myopts:
4940 flags = set(orig_iuse.symmetric_difference(
4941 cur_iuse).difference(forced_flags))
4942 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4943 cur_iuse.intersection(cur_use)))
4946 elif "changed-use" == self.myopts.get("--reinstall"):
4947 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4948 cur_iuse.intersection(cur_use))
4953 def _create_graph(self, allow_unsatisfied=False):
4954 dep_stack = self._dep_stack
4956 self.spinner.update()
4957 dep = dep_stack.pop()
4958 if isinstance(dep, Package):
4959 if not self._add_pkg_deps(dep,
4960 allow_unsatisfied=allow_unsatisfied):
4963 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4967 def _add_dep(self, dep, allow_unsatisfied=False):
4968 debug = "--debug" in self.myopts
4969 buildpkgonly = "--buildpkgonly" in self.myopts
4970 nodeps = "--nodeps" in self.myopts
4971 empty = "empty" in self.myparams
4972 deep = "deep" in self.myparams
4973 update = "--update" in self.myopts and dep.depth <= 1
4975 if not buildpkgonly and \
4977 dep.parent not in self._slot_collision_nodes:
4978 if dep.parent.onlydeps:
4979 # It's safe to ignore blockers if the
4980 # parent is an --onlydeps node.
4982 # The blocker applies to the root where
4983 # the parent is or will be installed.
4984 blocker = Blocker(atom=dep.atom,
4985 eapi=dep.parent.metadata["EAPI"],
4986 root=dep.parent.root)
4987 self._blocker_parents.add(blocker, dep.parent)
4989 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4990 onlydeps=dep.onlydeps)
4992 if dep.priority.optional:
4993 # This could be an unecessary build-time dep
4994 # pulled in by --with-bdeps=y.
4996 if allow_unsatisfied:
4997 self._unsatisfied_deps.append(dep)
4999 self._unsatisfied_deps_for_display.append(
5000 ((dep.root, dep.atom), {"myparent":dep.parent}))
5002 # In some cases, dep_check will return deps that shouldn't
5003 # be proccessed any further, so they are identified and
5004 # discarded here. Try to discard as few as possible since
5005 # discarded dependencies reduce the amount of information
5006 # available for optimization of merge order.
5007 if dep.priority.satisfied and \
5008 not dep_pkg.installed and \
5009 not (existing_node or empty or deep or update):
5011 if dep.root == self.target_root:
5013 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5014 except StopIteration:
5016 except portage.exception.InvalidDependString:
5017 if not dep_pkg.installed:
5018 # This shouldn't happen since the package
5019 # should have been masked.
5022 self._ignored_deps.append(dep)
5025 if not self._add_pkg(dep_pkg, dep):
5029 def _add_pkg(self, pkg, dep):
5036 myparent = dep.parent
5037 priority = dep.priority
5039 if priority is None:
5040 priority = DepPriority()
5042 Fills the digraph with nodes comprised of packages to merge.
5043 mybigkey is the package spec of the package to merge.
5044 myparent is the package depending on mybigkey ( or None )
5045 addme = Should we add this package to the digraph or are we just looking at it's deps?
5046 Think --onlydeps, we need to ignore packages in that case.
5049 #IUSE-aware emerge -> USE DEP aware depgraph
5050 #"no downgrade" emerge
5052 # Ensure that the dependencies of the same package
5053 # are never processed more than once.
5054 previously_added = pkg in self.digraph
5056 # select the correct /var database that we'll be checking against
5057 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5058 pkgsettings = self.pkgsettings[pkg.root]
5063 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5064 except portage.exception.InvalidDependString, e:
5065 if not pkg.installed:
5066 show_invalid_depstring_notice(
5067 pkg, pkg.metadata["PROVIDE"], str(e))
5071 if not pkg.onlydeps:
5072 if not pkg.installed and \
5073 "empty" not in self.myparams and \
5074 vardbapi.match(pkg.slot_atom):
5075 # Increase the priority of dependencies on packages that
5076 # are being rebuilt. This optimizes merge order so that
5077 # dependencies are rebuilt/updated as soon as possible,
5078 # which is needed especially when emerge is called by
5079 # revdep-rebuild since dependencies may be affected by ABI
5080 # breakage that has rendered them useless. Don't adjust
5081 # priority here when in "empty" mode since all packages
5082 # are being merged in that case.
5083 priority.rebuild = True
5085 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5086 slot_collision = False
5088 existing_node_matches = pkg.cpv == existing_node.cpv
5089 if existing_node_matches and \
5090 pkg != existing_node and \
5091 dep.atom is not None:
5092 # Use package set for matching since it will match via
5093 # PROVIDE when necessary, while match_from_list does not.
5094 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5095 if not atom_set.findAtomForPackage(existing_node):
5096 existing_node_matches = False
5097 if existing_node_matches:
5098 # The existing node can be reused.
5100 for parent_atom in arg_atoms:
5101 parent, atom = parent_atom
5102 self.digraph.add(existing_node, parent,
5104 self._add_parent_atom(existing_node, parent_atom)
5105 # If a direct circular dependency is not an unsatisfied
5106 # buildtime dependency then drop it here since otherwise
5107 # it can skew the merge order calculation in an unwanted
5109 if existing_node != myparent or \
5110 (priority.buildtime and not priority.satisfied):
5111 self.digraph.addnode(existing_node, myparent,
5113 if dep.atom is not None and dep.parent is not None:
5114 self._add_parent_atom(existing_node,
5115 (dep.parent, dep.atom))
5119 # A slot collision has occurred. Sometimes this coincides
5120 # with unresolvable blockers, so the slot collision will be
5121 # shown later if there are no unresolvable blockers.
5122 self._add_slot_conflict(pkg)
5123 slot_collision = True
5126 # Now add this node to the graph so that self.display()
5127 # can show use flags and --tree portage.output. This node is
5128 # only being partially added to the graph. It must not be
5129 # allowed to interfere with the other nodes that have been
5130 # added. Do not overwrite data for existing nodes in
5131 # self.mydbapi since that data will be used for blocker
5133 # Even though the graph is now invalid, continue to process
5134 # dependencies so that things like --fetchonly can still
5135 # function despite collisions.
5137 elif not previously_added:
5138 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5139 self.mydbapi[pkg.root].cpv_inject(pkg)
5140 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5142 if not pkg.installed:
5143 # Allow this package to satisfy old-style virtuals in case it
5144 # doesn't already. Any pre-existing providers will be preferred
5147 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5148 # For consistency, also update the global virtuals.
5149 settings = self.roots[pkg.root].settings
5151 settings.setinst(pkg.cpv, pkg.metadata)
5153 except portage.exception.InvalidDependString, e:
5154 show_invalid_depstring_notice(
5155 pkg, pkg.metadata["PROVIDE"], str(e))
5160 self._set_nodes.add(pkg)
5162 # Do this even when addme is False (--onlydeps) so that the
5163 # parent/child relationship is always known in case
5164 # self._show_slot_collision_notice() needs to be called later.
5165 self.digraph.add(pkg, myparent, priority=priority)
5166 if dep.atom is not None and dep.parent is not None:
5167 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5170 for parent_atom in arg_atoms:
5171 parent, atom = parent_atom
5172 self.digraph.add(pkg, parent, priority=priority)
5173 self._add_parent_atom(pkg, parent_atom)
5175 """ This section determines whether we go deeper into dependencies or not.
5176 We want to go deeper on a few occasions:
5177 Installing package A, we need to make sure package A's deps are met.
5178 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5179 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5181 dep_stack = self._dep_stack
5182 if "recurse" not in self.myparams:
5184 elif pkg.installed and \
5185 "deep" not in self.myparams:
5186 dep_stack = self._ignored_deps
5188 self.spinner.update()
5193 if not previously_added:
5194 dep_stack.append(pkg)
5197 def _add_parent_atom(self, pkg, parent_atom):
5198 parent_atoms = self._parent_atoms.get(pkg)
5199 if parent_atoms is None:
5200 parent_atoms = set()
5201 self._parent_atoms[pkg] = parent_atoms
5202 parent_atoms.add(parent_atom)
5204 def _add_slot_conflict(self, pkg):
5205 self._slot_collision_nodes.add(pkg)
5206 slot_key = (pkg.slot_atom, pkg.root)
5207 slot_nodes = self._slot_collision_info.get(slot_key)
5208 if slot_nodes is None:
5210 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5211 self._slot_collision_info[slot_key] = slot_nodes
5214 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5216 mytype = pkg.type_name
5219 metadata = pkg.metadata
5220 myuse = pkg.use.enabled
5222 depth = pkg.depth + 1
5223 removal_action = "remove" in self.myparams
5226 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5228 edepend[k] = metadata[k]
5230 if not pkg.built and \
5231 "--buildpkgonly" in self.myopts and \
5232 "deep" not in self.myparams and \
5233 "empty" not in self.myparams:
5234 edepend["RDEPEND"] = ""
5235 edepend["PDEPEND"] = ""
5236 bdeps_optional = False
5238 if pkg.built and not removal_action:
5239 if self.myopts.get("--with-bdeps", "n") == "y":
5240 # Pull in build time deps as requested, but marked them as
5241 # "optional" since they are not strictly required. This allows
5242 # more freedom in the merge order calculation for solving
5243 # circular dependencies. Don't convert to PDEPEND since that
5244 # could make --with-bdeps=y less effective if it is used to
5245 # adjust merge order to prevent built_with_use() calls from
5247 bdeps_optional = True
5249 # built packages do not have build time dependencies.
5250 edepend["DEPEND"] = ""
5252 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5253 edepend["DEPEND"] = ""
5256 ("/", edepend["DEPEND"],
5257 self._priority(buildtime=(not bdeps_optional),
5258 optional=bdeps_optional)),
5259 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5260 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5263 debug = "--debug" in self.myopts
5264 strict = mytype != "installed"
5266 for dep_root, dep_string, dep_priority in deps:
5271 print "Parent: ", jbigkey
5272 print "Depstring:", dep_string
5273 print "Priority:", dep_priority
5274 vardb = self.roots[dep_root].trees["vartree"].dbapi
5276 selected_atoms = self._select_atoms(dep_root,
5277 dep_string, myuse=myuse, parent=pkg, strict=strict,
5278 priority=dep_priority)
5279 except portage.exception.InvalidDependString, e:
5280 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5283 print "Candidates:", selected_atoms
5285 for atom in selected_atoms:
5288 atom = portage.dep.Atom(atom)
5290 mypriority = dep_priority.copy()
5291 if not atom.blocker and vardb.match(atom):
5292 mypriority.satisfied = True
5294 if not self._add_dep(Dependency(atom=atom,
5295 blocker=atom.blocker, depth=depth, parent=pkg,
5296 priority=mypriority, root=dep_root),
5297 allow_unsatisfied=allow_unsatisfied):
5300 except portage.exception.InvalidAtom, e:
5301 show_invalid_depstring_notice(
5302 pkg, dep_string, str(e))
5304 if not pkg.installed:
5308 print "Exiting...", jbigkey
5309 except portage.exception.AmbiguousPackageName, e:
5311 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5312 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5314 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5315 portage.writemsg("\n", noiselevel=-1)
5316 if mytype == "binary":
5318 "!!! This binary package cannot be installed: '%s'\n" % \
5319 mykey, noiselevel=-1)
5320 elif mytype == "ebuild":
5321 portdb = self.roots[myroot].trees["porttree"].dbapi
5322 myebuild, mylocation = portdb.findname2(mykey)
5323 portage.writemsg("!!! This ebuild cannot be installed: " + \
5324 "'%s'\n" % myebuild, noiselevel=-1)
5325 portage.writemsg("!!! Please notify the package maintainer " + \
5326 "that atoms must be fully-qualified.\n", noiselevel=-1)
5330 def _priority(self, **kwargs):
5331 if "remove" in self.myparams:
5332 priority_constructor = UnmergeDepPriority
5334 priority_constructor = DepPriority
5335 return priority_constructor(**kwargs)
5337 def _dep_expand(self, root_config, atom_without_category):
5339 @param root_config: a root config instance
5340 @type root_config: RootConfig
5341 @param atom_without_category: an atom without a category component
5342 @type atom_without_category: String
5344 @returns: a list of atoms containing categories (possibly empty)
5346 null_cp = portage.dep_getkey(insert_category_into_atom(
5347 atom_without_category, "null"))
5348 cat, atom_pn = portage.catsplit(null_cp)
5350 dbs = self._filtered_trees[root_config.root]["dbs"]
5352 for db, pkg_type, built, installed, db_keys in dbs:
5353 for cat in db.categories:
5354 if db.cp_list("%s/%s" % (cat, atom_pn)):
5358 for cat in categories:
5359 deps.append(insert_category_into_atom(
5360 atom_without_category, cat))
5363 def _have_new_virt(self, root, atom_cp):
5365 for db, pkg_type, built, installed, db_keys in \
5366 self._filtered_trees[root]["dbs"]:
5367 if db.cp_list(atom_cp):
5372 def _iter_atoms_for_pkg(self, pkg):
5373 # TODO: add multiple $ROOT support
5374 if pkg.root != self.target_root:
5376 atom_arg_map = self._atom_arg_map
5377 root_config = self.roots[pkg.root]
5378 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5379 atom_cp = portage.dep_getkey(atom)
5380 if atom_cp != pkg.cp and \
5381 self._have_new_virt(pkg.root, atom_cp):
5383 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5384 visible_pkgs.reverse() # descending order
5386 for visible_pkg in visible_pkgs:
5387 if visible_pkg.cp != atom_cp:
5389 if pkg >= visible_pkg:
5390 # This is descending order, and we're not
5391 # interested in any versions <= pkg given.
5393 if pkg.slot_atom != visible_pkg.slot_atom:
5394 higher_slot = visible_pkg
5396 if higher_slot is not None:
5398 for arg in atom_arg_map[(atom, pkg.root)]:
5399 if isinstance(arg, PackageArg) and \
5404 def select_files(self, myfiles):
5405 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5406 appropriate depgraph and return a favorite list."""
5407 debug = "--debug" in self.myopts
5408 root_config = self.roots[self.target_root]
5409 sets = root_config.sets
5410 getSetAtoms = root_config.setconfig.getSetAtoms
5412 myroot = self.target_root
5413 dbs = self._filtered_trees[myroot]["dbs"]
5414 vardb = self.trees[myroot]["vartree"].dbapi
5415 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5416 portdb = self.trees[myroot]["porttree"].dbapi
5417 bindb = self.trees[myroot]["bintree"].dbapi
5418 pkgsettings = self.pkgsettings[myroot]
5420 onlydeps = "--onlydeps" in self.myopts
5423 ext = os.path.splitext(x)[1]
5425 if not os.path.exists(x):
5427 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5428 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5429 elif os.path.exists(
5430 os.path.join(pkgsettings["PKGDIR"], x)):
5431 x = os.path.join(pkgsettings["PKGDIR"], x)
5433 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5434 print "!!! Please ensure the tbz2 exists as specified.\n"
5435 return 0, myfavorites
5436 mytbz2=portage.xpak.tbz2(x)
5437 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5438 if os.path.realpath(x) != \
5439 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5440 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5441 return 0, myfavorites
5442 db_keys = list(bindb._aux_cache_keys)
5443 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5444 pkg = Package(type_name="binary", root_config=root_config,
5445 cpv=mykey, built=True, metadata=metadata,
5447 self._pkg_cache[pkg] = pkg
5448 args.append(PackageArg(arg=x, package=pkg,
5449 root_config=root_config))
5450 elif ext==".ebuild":
5451 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5452 pkgdir = os.path.dirname(ebuild_path)
5453 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5454 cp = pkgdir[len(tree_root)+1:]
5455 e = portage.exception.PackageNotFound(
5456 ("%s is not in a valid portage tree " + \
5457 "hierarchy or does not exist") % x)
5458 if not portage.isvalidatom(cp):
5460 cat = portage.catsplit(cp)[0]
5461 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5462 if not portage.isvalidatom("="+mykey):
5464 ebuild_path = portdb.findname(mykey)
5466 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5467 cp, os.path.basename(ebuild_path)):
5468 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5469 return 0, myfavorites
5470 if mykey not in portdb.xmatch(
5471 "match-visible", portage.dep_getkey(mykey)):
5472 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5473 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5474 print colorize("BAD", "*** page for details.")
5475 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5478 raise portage.exception.PackageNotFound(
5479 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5480 db_keys = list(portdb._aux_cache_keys)
5481 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5482 pkg = Package(type_name="ebuild", root_config=root_config,
5483 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5484 pkgsettings.setcpv(pkg)
5485 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5486 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5487 self._pkg_cache[pkg] = pkg
5488 args.append(PackageArg(arg=x, package=pkg,
5489 root_config=root_config))
5490 elif x.startswith(os.path.sep):
5491 if not x.startswith(myroot):
5492 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5493 " $ROOT.\n") % x, noiselevel=-1)
5495 # Queue these up since it's most efficient to handle
5496 # multiple files in a single iter_owners() call.
5497 lookup_owners.append(x)
5499 if x in ("system", "world"):
5501 if x.startswith(SETPREFIX):
5502 s = x[len(SETPREFIX):]
5504 raise portage.exception.PackageSetNotFound(s)
5507 # Recursively expand sets so that containment tests in
5508 # self._get_parent_sets() properly match atoms in nested
5509 # sets (like if world contains system).
5510 expanded_set = InternalPackageSet(
5511 initial_atoms=getSetAtoms(s))
5512 self._sets[s] = expanded_set
5513 args.append(SetArg(arg=x, set=expanded_set,
5514 root_config=root_config))
5516 if not is_valid_package_atom(x):
5517 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5519 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5520 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5522 # Don't expand categories or old-style virtuals here unless
5523 # necessary. Expansion of old-style virtuals here causes at
5524 # least the following problems:
5525 # 1) It's more difficult to determine which set(s) an atom
5526 # came from, if any.
5527 # 2) It takes away freedom from the resolver to choose other
5528 # possible expansions when necessary.
5530 args.append(AtomArg(arg=x, atom=x,
5531 root_config=root_config))
5533 expanded_atoms = self._dep_expand(root_config, x)
5534 installed_cp_set = set()
5535 for atom in expanded_atoms:
5536 atom_cp = portage.dep_getkey(atom)
5537 if vardb.cp_list(atom_cp):
5538 installed_cp_set.add(atom_cp)
5539 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5540 installed_cp = iter(installed_cp_set).next()
5541 expanded_atoms = [atom for atom in expanded_atoms \
5542 if portage.dep_getkey(atom) == installed_cp]
5544 if len(expanded_atoms) > 1:
5547 ambiguous_package_name(x, expanded_atoms, root_config,
5548 self.spinner, self.myopts)
5549 return False, myfavorites
5551 atom = expanded_atoms[0]
5553 null_atom = insert_category_into_atom(x, "null")
5554 null_cp = portage.dep_getkey(null_atom)
5555 cat, atom_pn = portage.catsplit(null_cp)
5556 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5558 # Allow the depgraph to choose which virtual.
5559 atom = insert_category_into_atom(x, "virtual")
5561 atom = insert_category_into_atom(x, "null")
5563 args.append(AtomArg(arg=x, atom=atom,
5564 root_config=root_config))
5568 search_for_multiple = False
5569 if len(lookup_owners) > 1:
5570 search_for_multiple = True
5572 for x in lookup_owners:
5573 if not search_for_multiple and os.path.isdir(x):
5574 search_for_multiple = True
5575 relative_paths.append(x[len(myroot):])
5578 for pkg, relative_path in \
5579 real_vardb._owners.iter_owners(relative_paths):
5580 owners.add(pkg.mycpv)
5581 if not search_for_multiple:
5585 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5586 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5590 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5592 # portage now masks packages with missing slot, but it's
5593 # possible that one was installed by an older version
5594 atom = portage.cpv_getkey(cpv)
5596 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5597 args.append(AtomArg(arg=atom, atom=atom,
5598 root_config=root_config))
5600 if "--update" in self.myopts:
5601 # In some cases, the greedy slots behavior can pull in a slot that
5602 # the user would want to uninstall due to it being blocked by a
5603 # newer version in a different slot. Therefore, it's necessary to
5604 # detect and discard any that should be uninstalled. Each time
5605 # that arguments are updated, package selections are repeated in
5606 # order to ensure consistency with the current arguments:
5608 # 1) Initialize args
5609 # 2) Select packages and generate initial greedy atoms
5610 # 3) Update args with greedy atoms
5611 # 4) Select packages and generate greedy atoms again, while
5612 # accounting for any blockers between selected packages
5613 # 5) Update args with revised greedy atoms
5615 self._set_args(args)
5618 greedy_args.append(arg)
5619 if not isinstance(arg, AtomArg):
5621 for atom in self._greedy_slots(arg.root_config, arg.atom):
5623 AtomArg(arg=arg.arg, atom=atom,
5624 root_config=arg.root_config))
5626 self._set_args(greedy_args)
5629 # Revise greedy atoms, accounting for any blockers
5630 # between selected packages.
5631 revised_greedy_args = []
5633 revised_greedy_args.append(arg)
5634 if not isinstance(arg, AtomArg):
5636 for atom in self._greedy_slots(arg.root_config, arg.atom,
5637 blocker_lookahead=True):
5638 revised_greedy_args.append(
5639 AtomArg(arg=arg.arg, atom=atom,
5640 root_config=arg.root_config))
5641 args = revised_greedy_args
5642 del revised_greedy_args
5644 self._set_args(args)
5646 myfavorites = set(myfavorites)
5648 if isinstance(arg, (AtomArg, PackageArg)):
5649 myfavorites.add(arg.atom)
5650 elif isinstance(arg, SetArg):
5651 myfavorites.add(arg.arg)
5652 myfavorites = list(myfavorites)
5654 pprovideddict = pkgsettings.pprovideddict
5656 portage.writemsg("\n", noiselevel=-1)
5657 # Order needs to be preserved since a feature of --nodeps
5658 # is to allow the user to force a specific merge order.
5662 for atom in arg.set:
5663 self.spinner.update()
5664 dep = Dependency(atom=atom, onlydeps=onlydeps,
5665 root=myroot, parent=arg)
5666 atom_cp = portage.dep_getkey(atom)
5668 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5669 if pprovided and portage.match_from_list(atom, pprovided):
5670 # A provided package has been specified on the command line.
5671 self._pprovided_args.append((arg, atom))
5673 if isinstance(arg, PackageArg):
5674 if not self._add_pkg(arg.package, dep) or \
5675 not self._create_graph():
5676 sys.stderr.write(("\n\n!!! Problem resolving " + \
5677 "dependencies for %s\n") % arg.arg)
5678 return 0, myfavorites
5681 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5682 (arg, atom), noiselevel=-1)
5683 pkg, existing_node = self._select_package(
5684 myroot, atom, onlydeps=onlydeps)
5686 if not (isinstance(arg, SetArg) and \
5687 arg.name in ("system", "world")):
5688 self._unsatisfied_deps_for_display.append(
5689 ((myroot, atom), {}))
5690 return 0, myfavorites
5691 self._missing_args.append((arg, atom))
5693 if atom_cp != pkg.cp:
5694 # For old-style virtuals, we need to repeat the
5695 # package.provided check against the selected package.
5696 expanded_atom = atom.replace(atom_cp, pkg.cp)
5697 pprovided = pprovideddict.get(pkg.cp)
5699 portage.match_from_list(expanded_atom, pprovided):
5700 # A provided package has been
5701 # specified on the command line.
5702 self._pprovided_args.append((arg, atom))
5704 if pkg.installed and "selective" not in self.myparams:
5705 self._unsatisfied_deps_for_display.append(
5706 ((myroot, atom), {}))
5707 # Previous behavior was to bail out in this case, but
5708 # since the dep is satisfied by the installed package,
5709 # it's more friendly to continue building the graph
5710 # and just show a warning message. Therefore, only bail
5711 # out here if the atom is not from either the system or
5713 if not (isinstance(arg, SetArg) and \
5714 arg.name in ("system", "world")):
5715 return 0, myfavorites
5717 # Add the selected package to the graph as soon as possible
5718 # so that later dep_check() calls can use it as feedback
5719 # for making more consistent atom selections.
5720 if not self._add_pkg(pkg, dep):
5721 if isinstance(arg, SetArg):
5722 sys.stderr.write(("\n\n!!! Problem resolving " + \
5723 "dependencies for %s from %s\n") % \
5726 sys.stderr.write(("\n\n!!! Problem resolving " + \
5727 "dependencies for %s\n") % atom)
5728 return 0, myfavorites
5730 except portage.exception.MissingSignature, e:
5731 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5732 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5733 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5734 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5735 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5736 return 0, myfavorites
5737 except portage.exception.InvalidSignature, e:
5738 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5739 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5740 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5741 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5742 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5743 return 0, myfavorites
5744 except SystemExit, e:
5745 raise # Needed else can't exit
5746 except Exception, e:
5747 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5748 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5751 # Now that the root packages have been added to the graph,
5752 # process the dependencies.
5753 if not self._create_graph():
5754 return 0, myfavorites
5757 if "--usepkgonly" in self.myopts:
5758 for xs in self.digraph.all_nodes():
5759 if not isinstance(xs, Package):
5761 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5765 print "Missing binary for:",xs[2]
5769 except self._unknown_internal_error:
5770 return False, myfavorites
5772 # We're true here unless we are missing binaries.
5773 return (not missing,myfavorites)
5775 def _set_args(self, args):
5777 Create the "args" package set from atoms and packages given as
5778 arguments. This method can be called multiple times if necessary.
5779 The package selection cache is automatically invalidated, since
5780 arguments influence package selections.
5782 args_set = self._sets["args"]
5785 if not isinstance(arg, (AtomArg, PackageArg)):
5788 if atom in args_set:
5792 self._set_atoms.clear()
5793 self._set_atoms.update(chain(*self._sets.itervalues()))
5794 atom_arg_map = self._atom_arg_map
5795 atom_arg_map.clear()
5797 for atom in arg.set:
5798 atom_key = (atom, arg.root_config.root)
5799 refs = atom_arg_map.get(atom_key)
5802 atom_arg_map[atom_key] = refs
5806 # Invalidate the package selection cache, since
5807 # arguments influence package selections.
5808 self._highest_pkg_cache.clear()
5809 for trees in self._filtered_trees.itervalues():
5810 trees["porttree"].dbapi._clear_cache()
5812 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5814 Return a list of slot atoms corresponding to installed slots that
5815 differ from the slot of the highest visible match. When
5816 blocker_lookahead is True, slot atoms that would trigger a blocker
5817 conflict are automatically discarded, potentially allowing automatic
5818 uninstallation of older slots when appropriate.
5820 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5821 if highest_pkg is None:
5823 vardb = root_config.trees["vartree"].dbapi
5825 for cpv in vardb.match(atom):
5826 # don't mix new virtuals with old virtuals
5827 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5828 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5830 slots.add(highest_pkg.metadata["SLOT"])
5834 slots.remove(highest_pkg.metadata["SLOT"])
5837 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5838 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5839 if pkg is not None and \
5840 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5841 greedy_pkgs.append(pkg)
5844 if not blocker_lookahead:
5845 return [pkg.slot_atom for pkg in greedy_pkgs]
5848 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5849 for pkg in greedy_pkgs + [highest_pkg]:
5850 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5852 atoms = self._select_atoms(
5853 pkg.root, dep_str, pkg.use.enabled,
5854 parent=pkg, strict=True)
5855 except portage.exception.InvalidDependString:
5857 blocker_atoms = (x for x in atoms if x.blocker)
5858 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5860 if highest_pkg not in blockers:
5863 # filter packages with invalid deps
5864 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5866 # filter packages that conflict with highest_pkg
5867 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5868 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5869 blockers[pkg].findAtomForPackage(highest_pkg))]
5874 # If two packages conflict, discard the lower version.
5875 discard_pkgs = set()
5876 greedy_pkgs.sort(reverse=True)
5877 for i in xrange(len(greedy_pkgs) - 1):
5878 pkg1 = greedy_pkgs[i]
5879 if pkg1 in discard_pkgs:
5881 for j in xrange(i + 1, len(greedy_pkgs)):
5882 pkg2 = greedy_pkgs[j]
5883 if pkg2 in discard_pkgs:
5885 if blockers[pkg1].findAtomForPackage(pkg2) or \
5886 blockers[pkg2].findAtomForPackage(pkg1):
5888 discard_pkgs.add(pkg2)
5890 return [pkg.slot_atom for pkg in greedy_pkgs \
5891 if pkg not in discard_pkgs]
5893 def _select_atoms_from_graph(self, *pargs, **kwargs):
5895 Prefer atoms matching packages that have already been
5896 added to the graph or those that are installed and have
5897 not been scheduled for replacement.
5899 kwargs["trees"] = self._graph_trees
5900 return self._select_atoms_highest_available(*pargs, **kwargs)
5902 def _select_atoms_highest_available(self, root, depstring,
5903 myuse=None, parent=None, strict=True, trees=None, priority=None):
5904 """This will raise InvalidDependString if necessary. If trees is
5905 None then self._filtered_trees is used."""
5906 pkgsettings = self.pkgsettings[root]
5908 trees = self._filtered_trees
5909 if not getattr(priority, "buildtime", False):
5910 # The parent should only be passed to dep_check() for buildtime
5911 # dependencies since that's the only case when it's appropriate
5912 # to trigger the circular dependency avoidance code which uses it.
5913 # It's important not to trigger the same circular dependency
5914 # avoidance code for runtime dependencies since it's not needed
5915 # and it can promote an incorrect package choice.
5919 if parent is not None:
5920 trees[root]["parent"] = parent
5922 portage.dep._dep_check_strict = False
5923 mycheck = portage.dep_check(depstring, None,
5924 pkgsettings, myuse=myuse,
5925 myroot=root, trees=trees)
5927 if parent is not None:
5928 trees[root].pop("parent")
5929 portage.dep._dep_check_strict = True
5931 raise portage.exception.InvalidDependString(mycheck[1])
5932 selected_atoms = mycheck[1]
5933 return selected_atoms
5935 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5936 atom = portage.dep.Atom(atom)
5937 atom_set = InternalPackageSet(initial_atoms=(atom,))
5938 atom_without_use = atom
5940 atom_without_use = portage.dep.remove_slot(atom)
5942 atom_without_use += ":" + atom.slot
5943 atom_without_use = portage.dep.Atom(atom_without_use)
5944 xinfo = '"%s"' % atom
5947 # Discard null/ from failed cpv_expand category expansion.
5948 xinfo = xinfo.replace("null/", "")
5949 masked_packages = []
5951 masked_pkg_instances = set()
5952 missing_licenses = []
5953 have_eapi_mask = False
5954 pkgsettings = self.pkgsettings[root]
5955 implicit_iuse = pkgsettings._get_implicit_iuse()
5956 root_config = self.roots[root]
5957 portdb = self.roots[root].trees["porttree"].dbapi
5958 dbs = self._filtered_trees[root]["dbs"]
5959 for db, pkg_type, built, installed, db_keys in dbs:
5963 if hasattr(db, "xmatch"):
5964 cpv_list = db.xmatch("match-all", atom_without_use)
5966 cpv_list = db.match(atom_without_use)
5969 for cpv in cpv_list:
5970 metadata, mreasons = get_mask_info(root_config, cpv,
5971 pkgsettings, db, pkg_type, built, installed, db_keys)
5972 if metadata is not None:
5973 pkg = Package(built=built, cpv=cpv,
5974 installed=installed, metadata=metadata,
5975 root_config=root_config)
5976 if pkg.cp != atom.cp:
5977 # A cpv can be returned from dbapi.match() as an
5978 # old-style virtual match even in cases when the
5979 # package does not actually PROVIDE the virtual.
5980 # Filter out any such false matches here.
5981 if not atom_set.findAtomForPackage(pkg):
5984 masked_pkg_instances.add(pkg)
5986 missing_use.append(pkg)
5989 masked_packages.append(
5990 (root_config, pkgsettings, cpv, metadata, mreasons))
5992 missing_use_reasons = []
5993 missing_iuse_reasons = []
5994 for pkg in missing_use:
5995 use = pkg.use.enabled
5996 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5997 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5999 for x in atom.use.required:
6000 if iuse_re.match(x) is None:
6001 missing_iuse.append(x)
6004 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6005 missing_iuse_reasons.append((pkg, mreasons))
6007 need_enable = sorted(atom.use.enabled.difference(use))
6008 need_disable = sorted(atom.use.disabled.intersection(use))
6009 if need_enable or need_disable:
6011 changes.extend(colorize("red", "+" + x) \
6012 for x in need_enable)
6013 changes.extend(colorize("blue", "-" + x) \
6014 for x in need_disable)
6015 mreasons.append("Change USE: %s" % " ".join(changes))
6016 missing_use_reasons.append((pkg, mreasons))
6018 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6019 in missing_use_reasons if pkg not in masked_pkg_instances]
6021 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6022 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6024 show_missing_use = False
6025 if unmasked_use_reasons:
6026 # Only show the latest version.
6027 show_missing_use = unmasked_use_reasons[:1]
6028 elif unmasked_iuse_reasons:
6029 if missing_use_reasons:
6030 # All packages with required IUSE are masked,
6031 # so display a normal masking message.
6034 show_missing_use = unmasked_iuse_reasons
6036 if show_missing_use:
6037 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6038 print "!!! One of the following packages is required to complete your request:"
6039 for pkg, mreasons in show_missing_use:
6040 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6042 elif masked_packages:
6044 colorize("BAD", "All ebuilds that could satisfy ") + \
6045 colorize("INFORM", xinfo) + \
6046 colorize("BAD", " have been masked.")
6047 print "!!! One of the following masked packages is required to complete your request:"
6048 have_eapi_mask = show_masked_packages(masked_packages)
6051 msg = ("The current version of portage supports " + \
6052 "EAPI '%s'. You must upgrade to a newer version" + \
6053 " of portage before EAPI masked packages can" + \
6054 " be installed.") % portage.const.EAPI
6055 from textwrap import wrap
6056 for line in wrap(msg, 75):
6061 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6063 # Show parent nodes and the argument that pulled them in.
6064 traversed_nodes = set()
6067 while node is not None:
6068 traversed_nodes.add(node)
6069 msg.append('(dependency required by "%s" [%s])' % \
6070 (colorize('INFORM', str(node.cpv)), node.type_name))
6071 # When traversing to parents, prefer arguments over packages
6072 # since arguments are root nodes. Never traverse the same
6073 # package twice, in order to prevent an infinite loop.
6074 selected_parent = None
6075 for parent in self.digraph.parent_nodes(node):
6076 if isinstance(parent, DependencyArg):
6077 msg.append('(dependency required by "%s" [argument])' % \
6078 (colorize('INFORM', str(parent))))
6079 selected_parent = None
6081 if parent not in traversed_nodes:
6082 selected_parent = parent
6083 node = selected_parent
6089 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6090 cache_key = (root, atom, onlydeps)
6091 ret = self._highest_pkg_cache.get(cache_key)
6094 if pkg and not existing:
6095 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6096 if existing and existing == pkg:
6097 # Update the cache to reflect that the
6098 # package has been added to the graph.
6100 self._highest_pkg_cache[cache_key] = ret
6102 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6103 self._highest_pkg_cache[cache_key] = ret
6106 settings = pkg.root_config.settings
6107 if visible(settings, pkg) and not (pkg.installed and \
6108 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6109 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6112 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6113 root_config = self.roots[root]
6114 pkgsettings = self.pkgsettings[root]
6115 dbs = self._filtered_trees[root]["dbs"]
6116 vardb = self.roots[root].trees["vartree"].dbapi
6117 portdb = self.roots[root].trees["porttree"].dbapi
6118 # List of acceptable packages, ordered by type preference.
6119 matched_packages = []
6120 highest_version = None
6121 if not isinstance(atom, portage.dep.Atom):
6122 atom = portage.dep.Atom(atom)
6124 atom_set = InternalPackageSet(initial_atoms=(atom,))
6125 existing_node = None
6127 usepkgonly = "--usepkgonly" in self.myopts
6128 empty = "empty" in self.myparams
6129 selective = "selective" in self.myparams
6131 noreplace = "--noreplace" in self.myopts
6132 # Behavior of the "selective" parameter depends on
6133 # whether or not a package matches an argument atom.
6134 # If an installed package provides an old-style
6135 # virtual that is no longer provided by an available
6136 # package, the installed package may match an argument
6137 # atom even though none of the available packages do.
6138 # Therefore, "selective" logic does not consider
6139 # whether or not an installed package matches an
6140 # argument atom. It only considers whether or not
6141 # available packages match argument atoms, which is
6142 # represented by the found_available_arg flag.
6143 found_available_arg = False
6144 for find_existing_node in True, False:
6147 for db, pkg_type, built, installed, db_keys in dbs:
6150 if installed and not find_existing_node:
6151 want_reinstall = reinstall or empty or \
6152 (found_available_arg and not selective)
6153 if want_reinstall and matched_packages:
6155 if hasattr(db, "xmatch"):
6156 cpv_list = db.xmatch("match-all", atom)
6158 cpv_list = db.match(atom)
6160 # USE=multislot can make an installed package appear as if
6161 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6162 # won't do any good as long as USE=multislot is enabled since
6163 # the newly built package still won't have the expected slot.
6164 # Therefore, assume that such SLOT dependencies are already
6165 # satisfied rather than forcing a rebuild.
6166 if installed and not cpv_list and atom.slot:
6167 for cpv in db.match(atom.cp):
6168 slot_available = False
6169 for other_db, other_type, other_built, \
6170 other_installed, other_keys in dbs:
6173 other_db.aux_get(cpv, ["SLOT"])[0]:
6174 slot_available = True
6178 if not slot_available:
6180 inst_pkg = self._pkg(cpv, "installed",
6181 root_config, installed=installed)
6182 # Remove the slot from the atom and verify that
6183 # the package matches the resulting atom.
6184 atom_without_slot = portage.dep.remove_slot(atom)
6186 atom_without_slot += str(atom.use)
6187 atom_without_slot = portage.dep.Atom(atom_without_slot)
6188 if portage.match_from_list(
6189 atom_without_slot, [inst_pkg]):
6190 cpv_list = [inst_pkg.cpv]
6195 pkg_status = "merge"
6196 if installed or onlydeps:
6197 pkg_status = "nomerge"
6200 for cpv in cpv_list:
6201 # Make --noreplace take precedence over --newuse.
6202 if not installed and noreplace and \
6203 cpv in vardb.match(atom):
6204 # If the installed version is masked, it may
6205 # be necessary to look at lower versions,
6206 # in case there is a visible downgrade.
6208 reinstall_for_flags = None
6209 cache_key = (pkg_type, root, cpv, pkg_status)
6210 calculated_use = True
6211 pkg = self._pkg_cache.get(cache_key)
6213 calculated_use = False
6215 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6218 pkg = Package(built=built, cpv=cpv,
6219 installed=installed, metadata=metadata,
6220 onlydeps=onlydeps, root_config=root_config,
6222 metadata = pkg.metadata
6224 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6225 if not built and ("?" in metadata["LICENSE"] or \
6226 "?" in metadata["PROVIDE"]):
6227 # This is avoided whenever possible because
6228 # it's expensive. It only needs to be done here
6229 # if it has an effect on visibility.
6230 pkgsettings.setcpv(pkg)
6231 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6232 calculated_use = True
6233 self._pkg_cache[pkg] = pkg
6235 if not installed or (built and matched_packages):
6236 # Only enforce visibility on installed packages
6237 # if there is at least one other visible package
6238 # available. By filtering installed masked packages
6239 # here, packages that have been masked since they
6240 # were installed can be automatically downgraded
6241 # to an unmasked version.
6243 if not visible(pkgsettings, pkg):
6245 except portage.exception.InvalidDependString:
6249 # Enable upgrade or downgrade to a version
6250 # with visible KEYWORDS when the installed
6251 # version is masked by KEYWORDS, but never
6252 # reinstall the same exact version only due
6253 # to a KEYWORDS mask.
6254 if built and matched_packages:
6256 different_version = None
6257 for avail_pkg in matched_packages:
6258 if not portage.dep.cpvequal(
6259 pkg.cpv, avail_pkg.cpv):
6260 different_version = avail_pkg
6262 if different_version is not None:
6265 pkgsettings._getMissingKeywords(
6266 pkg.cpv, pkg.metadata):
6269 # If the ebuild no longer exists or it's
6270 # keywords have been dropped, reject built
6271 # instances (installed or binary).
6272 # If --usepkgonly is enabled, assume that
6273 # the ebuild status should be ignored.
6277 pkg.cpv, "ebuild", root_config)
6278 except portage.exception.PackageNotFound:
6281 if not visible(pkgsettings, pkg_eb):
6284 if not pkg.built and not calculated_use:
6285 # This is avoided whenever possible because
6287 pkgsettings.setcpv(pkg)
6288 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6290 if pkg.cp != atom.cp:
6291 # A cpv can be returned from dbapi.match() as an
6292 # old-style virtual match even in cases when the
6293 # package does not actually PROVIDE the virtual.
6294 # Filter out any such false matches here.
6295 if not atom_set.findAtomForPackage(pkg):
6299 if root == self.target_root:
6301 # Ebuild USE must have been calculated prior
6302 # to this point, in case atoms have USE deps.
6303 myarg = self._iter_atoms_for_pkg(pkg).next()
6304 except StopIteration:
6306 except portage.exception.InvalidDependString:
6308 # masked by corruption
6310 if not installed and myarg:
6311 found_available_arg = True
6313 if atom.use and not pkg.built:
6314 use = pkg.use.enabled
6315 if atom.use.enabled.difference(use):
6317 if atom.use.disabled.intersection(use):
6319 if pkg.cp == atom_cp:
6320 if highest_version is None:
6321 highest_version = pkg
6322 elif pkg > highest_version:
6323 highest_version = pkg
6324 # At this point, we've found the highest visible
6325 # match from the current repo. Any lower versions
6326 # from this repo are ignored, so this so the loop
6327 # will always end with a break statement below
6329 if find_existing_node:
6330 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6333 if portage.dep.match_from_list(atom, [e_pkg]):
6334 if highest_version and \
6335 e_pkg.cp == atom_cp and \
6336 e_pkg < highest_version and \
6337 e_pkg.slot_atom != highest_version.slot_atom:
6338 # There is a higher version available in a
6339 # different slot, so this existing node is
6343 matched_packages.append(e_pkg)
6344 existing_node = e_pkg
6346 # Compare built package to current config and
6347 # reject the built package if necessary.
6348 if built and not installed and \
6349 ("--newuse" in self.myopts or \
6350 "--reinstall" in self.myopts):
6351 iuses = pkg.iuse.all
6352 old_use = pkg.use.enabled
6354 pkgsettings.setcpv(myeb)
6356 pkgsettings.setcpv(pkg)
6357 now_use = pkgsettings["PORTAGE_USE"].split()
6358 forced_flags = set()
6359 forced_flags.update(pkgsettings.useforce)
6360 forced_flags.update(pkgsettings.usemask)
6362 if myeb and not usepkgonly:
6363 cur_iuse = myeb.iuse.all
6364 if self._reinstall_for_flags(forced_flags,
6368 # Compare current config to installed package
6369 # and do not reinstall if possible.
6370 if not installed and \
6371 ("--newuse" in self.myopts or \
6372 "--reinstall" in self.myopts) and \
6373 cpv in vardb.match(atom):
6374 pkgsettings.setcpv(pkg)
6375 forced_flags = set()
6376 forced_flags.update(pkgsettings.useforce)
6377 forced_flags.update(pkgsettings.usemask)
6378 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6379 old_iuse = set(filter_iuse_defaults(
6380 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6381 cur_use = pkgsettings["PORTAGE_USE"].split()
6382 cur_iuse = pkg.iuse.all
6383 reinstall_for_flags = \
6384 self._reinstall_for_flags(
6385 forced_flags, old_use, old_iuse,
6387 if reinstall_for_flags:
6391 matched_packages.append(pkg)
6392 if reinstall_for_flags:
6393 self._reinstall_nodes[pkg] = \
6397 if not matched_packages:
6400 if "--debug" in self.myopts:
6401 for pkg in matched_packages:
6402 portage.writemsg("%s %s\n" % \
6403 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6405 # Filter out any old-style virtual matches if they are
6406 # mixed with new-style virtual matches.
6407 cp = portage.dep_getkey(atom)
6408 if len(matched_packages) > 1 and \
6409 "virtual" == portage.catsplit(cp)[0]:
6410 for pkg in matched_packages:
6413 # Got a new-style virtual, so filter
6414 # out any old-style virtuals.
6415 matched_packages = [pkg for pkg in matched_packages \
6419 if len(matched_packages) > 1:
6420 bestmatch = portage.best(
6421 [pkg.cpv for pkg in matched_packages])
6422 matched_packages = [pkg for pkg in matched_packages \
6423 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6425 # ordered by type preference ("ebuild" type is the last resort)
6426 return matched_packages[-1], existing_node
6428 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6430 Select packages that have already been added to the graph or
6431 those that are installed and have not been scheduled for
6434 graph_db = self._graph_trees[root]["porttree"].dbapi
6435 matches = graph_db.match_pkgs(atom)
6438 pkg = matches[-1] # highest match
6439 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6440 return pkg, in_graph
6442 def _complete_graph(self):
6444 Add any deep dependencies of required sets (args, system, world) that
6445 have not been pulled into the graph yet. This ensures that the graph
6446 is consistent such that initially satisfied deep dependencies are not
6447 broken in the new graph. Initially unsatisfied dependencies are
6448 irrelevant since we only want to avoid breaking dependencies that are
6451 Since this method can consume enough time to disturb users, it is
6452 currently only enabled by the --complete-graph option.
6454 if "--buildpkgonly" in self.myopts or \
6455 "recurse" not in self.myparams:
6458 if "complete" not in self.myparams:
6459 # Skip this to avoid consuming enough time to disturb users.
6462 # Put the depgraph into a mode that causes it to only
6463 # select packages that have already been added to the
6464 # graph or those that are installed and have not been
6465 # scheduled for replacement. Also, toggle the "deep"
6466 # parameter so that all dependencies are traversed and
6468 self._select_atoms = self._select_atoms_from_graph
6469 self._select_package = self._select_pkg_from_graph
6470 already_deep = "deep" in self.myparams
6471 if not already_deep:
6472 self.myparams.add("deep")
6474 for root in self.roots:
6475 required_set_names = self._required_set_names.copy()
6476 if root == self.target_root and \
6477 (already_deep or "empty" in self.myparams):
6478 required_set_names.difference_update(self._sets)
6479 if not required_set_names and not self._ignored_deps:
6481 root_config = self.roots[root]
6482 setconfig = root_config.setconfig
6484 # Reuse existing SetArg instances when available.
6485 for arg in self.digraph.root_nodes():
6486 if not isinstance(arg, SetArg):
6488 if arg.root_config != root_config:
6490 if arg.name in required_set_names:
6492 required_set_names.remove(arg.name)
6493 # Create new SetArg instances only when necessary.
6494 for s in required_set_names:
6495 expanded_set = InternalPackageSet(
6496 initial_atoms=setconfig.getSetAtoms(s))
6497 atom = SETPREFIX + s
6498 args.append(SetArg(arg=atom, set=expanded_set,
6499 root_config=root_config))
6500 vardb = root_config.trees["vartree"].dbapi
6502 for atom in arg.set:
6503 self._dep_stack.append(
6504 Dependency(atom=atom, root=root, parent=arg))
6505 if self._ignored_deps:
6506 self._dep_stack.extend(self._ignored_deps)
6507 self._ignored_deps = []
6508 if not self._create_graph(allow_unsatisfied=True):
6510 # Check the unsatisfied deps to see if any initially satisfied deps
6511 # will become unsatisfied due to an upgrade. Initially unsatisfied
6512 # deps are irrelevant since we only want to avoid breaking deps
6513 # that are initially satisfied.
6514 while self._unsatisfied_deps:
6515 dep = self._unsatisfied_deps.pop()
6516 matches = vardb.match_pkgs(dep.atom)
6518 self._initially_unsatisfied_deps.append(dep)
6520 # An scheduled installation broke a deep dependency.
6521 # Add the installed package to the graph so that it
6522 # will be appropriately reported as a slot collision
6523 # (possibly solvable via backtracking).
6524 pkg = matches[-1] # highest match
6525 if not self._add_pkg(pkg, dep):
6527 if not self._create_graph(allow_unsatisfied=True):
6531 def _pkg(self, cpv, type_name, root_config, installed=False):
6533 Get a package instance from the cache, or create a new
6534 one if necessary. Raises KeyError from aux_get if it
6535 failures for some reason (package does not exist or is
6540 operation = "nomerge"
6541 pkg = self._pkg_cache.get(
6542 (type_name, root_config.root, cpv, operation))
6544 tree_type = self.pkg_tree_map[type_name]
6545 db = root_config.trees[tree_type].dbapi
6546 db_keys = list(self._trees_orig[root_config.root][
6547 tree_type].dbapi._aux_cache_keys)
6549 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6551 raise portage.exception.PackageNotFound(cpv)
6552 pkg = Package(cpv=cpv, metadata=metadata,
6553 root_config=root_config, installed=installed)
6554 if type_name == "ebuild":
6555 settings = self.pkgsettings[root_config.root]
6556 settings.setcpv(pkg)
6557 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6558 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6559 self._pkg_cache[pkg] = pkg
6562 def validate_blockers(self):
6563 """Remove any blockers from the digraph that do not match any of the
6564 packages within the graph. If necessary, create hard deps to ensure
6565 correct merge order such that mutually blocking packages are never
6566 installed simultaneously."""
6568 if "--buildpkgonly" in self.myopts or \
6569 "--nodeps" in self.myopts:
6572 #if "deep" in self.myparams:
6574 # Pull in blockers from all installed packages that haven't already
6575 # been pulled into the depgraph. This is not enabled by default
6576 # due to the performance penalty that is incurred by all the
6577 # additional dep_check calls that are required.
6579 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6580 for myroot in self.trees:
6581 vardb = self.trees[myroot]["vartree"].dbapi
6582 portdb = self.trees[myroot]["porttree"].dbapi
6583 pkgsettings = self.pkgsettings[myroot]
6584 final_db = self.mydbapi[myroot]
6586 blocker_cache = BlockerCache(myroot, vardb)
6587 stale_cache = set(blocker_cache)
6590 stale_cache.discard(cpv)
6591 pkg_in_graph = self.digraph.contains(pkg)
6593 # Check for masked installed packages. Only warn about
6594 # packages that are in the graph in order to avoid warning
6595 # about those that will be automatically uninstalled during
6596 # the merge process or by --depclean.
6598 if pkg_in_graph and not visible(pkgsettings, pkg):
6599 self._masked_installed.add(pkg)
6601 blocker_atoms = None
6607 self._blocker_parents.child_nodes(pkg))
6612 self._irrelevant_blockers.child_nodes(pkg))
6615 if blockers is not None:
6616 blockers = set(str(blocker.atom) \
6617 for blocker in blockers)
6619 # If this node has any blockers, create a "nomerge"
6620 # node for it so that they can be enforced.
6621 self.spinner.update()
6622 blocker_data = blocker_cache.get(cpv)
6623 if blocker_data is not None and \
6624 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6627 # If blocker data from the graph is available, use
6628 # it to validate the cache and update the cache if
6630 if blocker_data is not None and \
6631 blockers is not None:
6632 if not blockers.symmetric_difference(
6633 blocker_data.atoms):
6637 if blocker_data is None and \
6638 blockers is not None:
6639 # Re-use the blockers from the graph.
6640 blocker_atoms = sorted(blockers)
6641 counter = long(pkg.metadata["COUNTER"])
6643 blocker_cache.BlockerData(counter, blocker_atoms)
6644 blocker_cache[pkg.cpv] = blocker_data
6648 blocker_atoms = blocker_data.atoms
6650 # Use aux_get() to trigger FakeVartree global
6651 # updates on *DEPEND when appropriate.
6652 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6653 # It is crucial to pass in final_db here in order to
6654 # optimize dep_check calls by eliminating atoms via
6655 # dep_wordreduce and dep_eval calls.
6657 portage.dep._dep_check_strict = False
6659 success, atoms = portage.dep_check(depstr,
6660 final_db, pkgsettings, myuse=pkg.use.enabled,
6661 trees=self._graph_trees, myroot=myroot)
6662 except Exception, e:
6663 if isinstance(e, SystemExit):
6665 # This is helpful, for example, if a ValueError
6666 # is thrown from cpv_expand due to multiple
6667 # matches (this can happen if an atom lacks a
6669 show_invalid_depstring_notice(
6670 pkg, depstr, str(e))
6674 portage.dep._dep_check_strict = True
6676 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6677 if replacement_pkg and \
6678 replacement_pkg[0].operation == "merge":
6679 # This package is being replaced anyway, so
6680 # ignore invalid dependencies so as not to
6681 # annoy the user too much (otherwise they'd be
6682 # forced to manually unmerge it first).
6684 show_invalid_depstring_notice(pkg, depstr, atoms)
6686 blocker_atoms = [myatom for myatom in atoms \
6687 if myatom.startswith("!")]
6688 blocker_atoms.sort()
6689 counter = long(pkg.metadata["COUNTER"])
6690 blocker_cache[cpv] = \
6691 blocker_cache.BlockerData(counter, blocker_atoms)
6694 for atom in blocker_atoms:
6695 blocker = Blocker(atom=portage.dep.Atom(atom),
6696 eapi=pkg.metadata["EAPI"], root=myroot)
6697 self._blocker_parents.add(blocker, pkg)
6698 except portage.exception.InvalidAtom, e:
6699 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6700 show_invalid_depstring_notice(
6701 pkg, depstr, "Invalid Atom: %s" % (e,))
6703 for cpv in stale_cache:
6704 del blocker_cache[cpv]
6705 blocker_cache.flush()
6708 # Discard any "uninstall" tasks scheduled by previous calls
6709 # to this method, since those tasks may not make sense given
6710 # the current graph state.
6711 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6712 if previous_uninstall_tasks:
6713 self._blocker_uninstalls = digraph()
6714 self.digraph.difference_update(previous_uninstall_tasks)
6716 for blocker in self._blocker_parents.leaf_nodes():
6717 self.spinner.update()
6718 root_config = self.roots[blocker.root]
6719 virtuals = root_config.settings.getvirtuals()
6720 myroot = blocker.root
6721 initial_db = self.trees[myroot]["vartree"].dbapi
6722 final_db = self.mydbapi[myroot]
6724 provider_virtual = False
6725 if blocker.cp in virtuals and \
6726 not self._have_new_virt(blocker.root, blocker.cp):
6727 provider_virtual = True
6729 if provider_virtual:
6731 for provider_entry in virtuals[blocker.cp]:
6733 portage.dep_getkey(provider_entry)
6734 atoms.append(blocker.atom.replace(
6735 blocker.cp, provider_cp))
6737 atoms = [blocker.atom]
6739 blocked_initial = []
6741 blocked_initial.extend(initial_db.match_pkgs(atom))
6745 blocked_final.extend(final_db.match_pkgs(atom))
6747 if not blocked_initial and not blocked_final:
6748 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6749 self._blocker_parents.remove(blocker)
6750 # Discard any parents that don't have any more blockers.
6751 for pkg in parent_pkgs:
6752 self._irrelevant_blockers.add(blocker, pkg)
6753 if not self._blocker_parents.child_nodes(pkg):
6754 self._blocker_parents.remove(pkg)
6756 for parent in self._blocker_parents.parent_nodes(blocker):
6757 unresolved_blocks = False
6758 depends_on_order = set()
6759 for pkg in blocked_initial:
6760 if pkg.slot_atom == parent.slot_atom:
6761 # TODO: Support blocks within slots in cases where it
6762 # might make sense. For example, a new version might
6763 # require that the old version be uninstalled at build
6766 if parent.installed:
6767 # Two currently installed packages conflict with
6768 # eachother. Ignore this case since the damage
6769 # is already done and this would be likely to
6770 # confuse users if displayed like a normal blocker.
6773 self._blocked_pkgs.add(pkg, blocker)
6775 if parent.operation == "merge":
6776 # Maybe the blocked package can be replaced or simply
6777 # unmerged to resolve this block.
6778 depends_on_order.add((pkg, parent))
6780 # None of the above blocker resolutions techniques apply,
6781 # so apparently this one is unresolvable.
6782 unresolved_blocks = True
6783 for pkg in blocked_final:
6784 if pkg.slot_atom == parent.slot_atom:
6785 # TODO: Support blocks within slots.
6787 if parent.operation == "nomerge" and \
6788 pkg.operation == "nomerge":
6789 # This blocker will be handled the next time that a
6790 # merge of either package is triggered.
6793 self._blocked_pkgs.add(pkg, blocker)
6795 # Maybe the blocking package can be
6796 # unmerged to resolve this block.
6797 if parent.operation == "merge" and pkg.installed:
6798 depends_on_order.add((pkg, parent))
6800 elif parent.operation == "nomerge":
6801 depends_on_order.add((parent, pkg))
6803 # None of the above blocker resolutions techniques apply,
6804 # so apparently this one is unresolvable.
6805 unresolved_blocks = True
6807 # Make sure we don't unmerge any package that have been pulled
6809 if not unresolved_blocks and depends_on_order:
6810 for inst_pkg, inst_task in depends_on_order:
6811 if self.digraph.contains(inst_pkg) and \
6812 self.digraph.parent_nodes(inst_pkg):
6813 unresolved_blocks = True
6816 if not unresolved_blocks and depends_on_order:
6817 for inst_pkg, inst_task in depends_on_order:
6818 uninst_task = Package(built=inst_pkg.built,
6819 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6820 metadata=inst_pkg.metadata,
6821 operation="uninstall",
6822 root_config=inst_pkg.root_config,
6823 type_name=inst_pkg.type_name)
6824 self._pkg_cache[uninst_task] = uninst_task
6825 # Enforce correct merge order with a hard dep.
6826 self.digraph.addnode(uninst_task, inst_task,
6827 priority=BlockerDepPriority.instance)
6828 # Count references to this blocker so that it can be
6829 # invalidated after nodes referencing it have been
6831 self._blocker_uninstalls.addnode(uninst_task, blocker)
6832 if not unresolved_blocks and not depends_on_order:
6833 self._irrelevant_blockers.add(blocker, parent)
6834 self._blocker_parents.remove_edge(blocker, parent)
6835 if not self._blocker_parents.parent_nodes(blocker):
6836 self._blocker_parents.remove(blocker)
6837 if not self._blocker_parents.child_nodes(parent):
6838 self._blocker_parents.remove(parent)
6839 if unresolved_blocks:
6840 self._unsolvable_blockers.add(blocker, parent)
6844 def _accept_blocker_conflicts(self):
6846 for x in ("--buildpkgonly", "--fetchonly",
6847 "--fetch-all-uri", "--nodeps"):
6848 if x in self.myopts:
6853 def _merge_order_bias(self, mygraph):
6855 For optimal leaf node selection, promote deep system runtime deps and
6856 order nodes from highest to lowest overall reference count.
6860 for node in mygraph.order:
6861 node_info[node] = len(mygraph.parent_nodes(node))
6862 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6864 def cmp_merge_preference(node1, node2):
6866 if node1.operation == 'uninstall':
6867 if node2.operation == 'uninstall':
6871 if node2.operation == 'uninstall':
6872 if node1.operation == 'uninstall':
6876 node1_sys = node1 in deep_system_deps
6877 node2_sys = node2 in deep_system_deps
6878 if node1_sys != node2_sys:
6883 return node_info[node2] - node_info[node1]
6885 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6887 def altlist(self, reversed=False):
6889 while self._serialized_tasks_cache is None:
6890 self._resolve_conflicts()
6892 self._serialized_tasks_cache, self._scheduler_graph = \
6893 self._serialize_tasks()
6894 except self._serialize_tasks_retry:
6897 retlist = self._serialized_tasks_cache[:]
6902 def schedulerGraph(self):
6904 The scheduler graph is identical to the normal one except that
6905 uninstall edges are reversed in specific cases that require
6906 conflicting packages to be temporarily installed simultaneously.
6907 This is intended for use by the Scheduler in it's parallelization
6908 logic. It ensures that temporary simultaneous installation of
6909 conflicting packages is avoided when appropriate (especially for
6910 !!atom blockers), but allowed in specific cases that require it.
6912 Note that this method calls break_refs() which alters the state of
6913 internal Package instances such that this depgraph instance should
6914 not be used to perform any more calculations.
6916 if self._scheduler_graph is None:
6918 self.break_refs(self._scheduler_graph.order)
6919 return self._scheduler_graph
6921 def break_refs(self, nodes):
6923 Take a mergelist like that returned from self.altlist() and
6924 break any references that lead back to the depgraph. This is
6925 useful if you want to hold references to packages without
6926 also holding the depgraph on the heap.
6929 if hasattr(node, "root_config"):
6930 # The FakeVartree references the _package_cache which
6931 # references the depgraph. So that Package instances don't
6932 # hold the depgraph and FakeVartree on the heap, replace
6933 # the RootConfig that references the FakeVartree with the
6934 # original RootConfig instance which references the actual
6936 node.root_config = \
6937 self._trees_orig[node.root_config.root]["root_config"]
6939 def _resolve_conflicts(self):
6940 if not self._complete_graph():
6941 raise self._unknown_internal_error()
6943 if not self.validate_blockers():
6944 raise self._unknown_internal_error()
6946 if self._slot_collision_info:
6947 self._process_slot_conflicts()
6949 def _serialize_tasks(self):
6951 if "--debug" in self.myopts:
6952 writemsg("\ndigraph:\n\n", noiselevel=-1)
6953 self.digraph.debug_print()
6954 writemsg("\n", noiselevel=-1)
6956 scheduler_graph = self.digraph.copy()
6957 mygraph=self.digraph.copy()
6958 # Prune "nomerge" root nodes if nothing depends on them, since
6959 # otherwise they slow down merge order calculation. Don't remove
6960 # non-root nodes since they help optimize merge order in some cases
6961 # such as revdep-rebuild.
6962 removed_nodes = set()
6964 for node in mygraph.root_nodes():
6965 if not isinstance(node, Package) or \
6966 node.installed or node.onlydeps:
6967 removed_nodes.add(node)
6969 self.spinner.update()
6970 mygraph.difference_update(removed_nodes)
6971 if not removed_nodes:
6973 removed_nodes.clear()
6974 self._merge_order_bias(mygraph)
6975 def cmp_circular_bias(n1, n2):
6977 RDEPEND is stronger than PDEPEND and this function
6978 measures such a strength bias within a circular
6979 dependency relationship.
6981 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6982 ignore_priority=priority_range.ignore_medium_soft)
6983 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6984 ignore_priority=priority_range.ignore_medium_soft)
6985 if n1_n2_medium == n2_n1_medium:
6990 myblocker_uninstalls = self._blocker_uninstalls.copy()
6992 # Contains uninstall tasks that have been scheduled to
6993 # occur after overlapping blockers have been installed.
6994 scheduled_uninstalls = set()
6995 # Contains any Uninstall tasks that have been ignored
6996 # in order to avoid the circular deps code path. These
6997 # correspond to blocker conflicts that could not be
6999 ignored_uninstall_tasks = set()
7000 have_uninstall_task = False
7001 complete = "complete" in self.myparams
7004 def get_nodes(**kwargs):
7006 Returns leaf nodes excluding Uninstall instances
7007 since those should be executed as late as possible.
7009 return [node for node in mygraph.leaf_nodes(**kwargs) \
7010 if isinstance(node, Package) and \
7011 (node.operation != "uninstall" or \
7012 node in scheduled_uninstalls)]
7014 # sys-apps/portage needs special treatment if ROOT="/"
7015 running_root = self._running_root.root
7016 from portage.const import PORTAGE_PACKAGE_ATOM
7017 runtime_deps = InternalPackageSet(
7018 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7019 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7020 PORTAGE_PACKAGE_ATOM)
7021 replacement_portage = self.mydbapi[running_root].match_pkgs(
7022 PORTAGE_PACKAGE_ATOM)
7025 running_portage = running_portage[0]
7027 running_portage = None
7029 if replacement_portage:
7030 replacement_portage = replacement_portage[0]
7032 replacement_portage = None
7034 if replacement_portage == running_portage:
7035 replacement_portage = None
7037 if replacement_portage is not None:
7038 # update from running_portage to replacement_portage asap
7039 asap_nodes.append(replacement_portage)
7041 if running_portage is not None:
7043 portage_rdepend = self._select_atoms_highest_available(
7044 running_root, running_portage.metadata["RDEPEND"],
7045 myuse=running_portage.use.enabled,
7046 parent=running_portage, strict=False)
7047 except portage.exception.InvalidDependString, e:
7048 portage.writemsg("!!! Invalid RDEPEND in " + \
7049 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7050 (running_root, running_portage.cpv, e), noiselevel=-1)
7052 portage_rdepend = []
7053 runtime_deps.update(atom for atom in portage_rdepend \
7054 if not atom.startswith("!"))
7056 def gather_deps(ignore_priority, mergeable_nodes,
7057 selected_nodes, node):
7059 Recursively gather a group of nodes that RDEPEND on
7060 eachother. This ensures that they are merged as a group
7061 and get their RDEPENDs satisfied as soon as possible.
7063 if node in selected_nodes:
7065 if node not in mergeable_nodes:
7067 if node == replacement_portage and \
7068 mygraph.child_nodes(node,
7069 ignore_priority=priority_range.ignore_medium_soft):
7070 # Make sure that portage always has all of it's
7071 # RDEPENDs installed first.
7073 selected_nodes.add(node)
7074 for child in mygraph.child_nodes(node,
7075 ignore_priority=ignore_priority):
7076 if not gather_deps(ignore_priority,
7077 mergeable_nodes, selected_nodes, child):
7081 def ignore_uninst_or_med(priority):
7082 if priority is BlockerDepPriority.instance:
7084 return priority_range.ignore_medium(priority)
7086 def ignore_uninst_or_med_soft(priority):
7087 if priority is BlockerDepPriority.instance:
7089 return priority_range.ignore_medium_soft(priority)
7091 tree_mode = "--tree" in self.myopts
7092 # Tracks whether or not the current iteration should prefer asap_nodes
7093 # if available. This is set to False when the previous iteration
7094 # failed to select any nodes. It is reset whenever nodes are
7095 # successfully selected.
7098 # Controls whether or not the current iteration should drop edges that
7099 # are "satisfied" by installed packages, in order to solve circular
7100 # dependencies. The deep runtime dependencies of installed packages are
7101 # not checked in this case (bug #199856), so it must be avoided
7102 # whenever possible.
7103 drop_satisfied = False
7105 # State of variables for successive iterations that loosen the
7106 # criteria for node selection.
7108 # iteration prefer_asap drop_satisfied
7113 # If no nodes are selected on the last iteration, it is due to
7114 # unresolved blockers or circular dependencies.
7116 while not mygraph.empty():
7117 self.spinner.update()
7118 selected_nodes = None
7119 ignore_priority = None
7120 if drop_satisfied or (prefer_asap and asap_nodes):
7121 priority_range = DepPrioritySatisfiedRange
7123 priority_range = DepPriorityNormalRange
7124 if prefer_asap and asap_nodes:
7125 # ASAP nodes are merged before their soft deps. Go ahead and
7126 # select root nodes here if necessary, since it's typical for
7127 # the parent to have been removed from the graph already.
7128 asap_nodes = [node for node in asap_nodes \
7129 if mygraph.contains(node)]
7130 for node in asap_nodes:
7131 if not mygraph.child_nodes(node,
7132 ignore_priority=priority_range.ignore_soft):
7133 selected_nodes = [node]
7134 asap_nodes.remove(node)
7136 if not selected_nodes and \
7137 not (prefer_asap and asap_nodes):
7138 for i in xrange(priority_range.NONE,
7139 priority_range.MEDIUM_SOFT + 1):
7140 ignore_priority = priority_range.ignore_priority[i]
7141 nodes = get_nodes(ignore_priority=ignore_priority)
7143 # If there is a mix of uninstall nodes with other
7144 # types, save the uninstall nodes for later since
7145 # sometimes a merge node will render an uninstall
7146 # node unnecessary (due to occupying the same slot),
7147 # and we want to avoid executing a separate uninstall
7148 # task in that case.
7150 good_uninstalls = []
7151 with_some_uninstalls_excluded = []
7153 if node.operation == "uninstall":
7154 slot_node = self.mydbapi[node.root
7155 ].match_pkgs(node.slot_atom)
7157 slot_node[0].operation == "merge":
7159 good_uninstalls.append(node)
7160 with_some_uninstalls_excluded.append(node)
7162 nodes = good_uninstalls
7163 elif with_some_uninstalls_excluded:
7164 nodes = with_some_uninstalls_excluded
7168 if ignore_priority is None and not tree_mode:
7169 # Greedily pop all of these nodes since no
7170 # relationship has been ignored. This optimization
7171 # destroys --tree output, so it's disabled in tree
7173 selected_nodes = nodes
7175 # For optimal merge order:
7176 # * Only pop one node.
7177 # * Removing a root node (node without a parent)
7178 # will not produce a leaf node, so avoid it.
7179 # * It's normal for a selected uninstall to be a
7180 # root node, so don't check them for parents.
7182 if node.operation == "uninstall" or \
7183 mygraph.parent_nodes(node):
7184 selected_nodes = [node]
7190 if not selected_nodes:
7191 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7193 mergeable_nodes = set(nodes)
7194 if prefer_asap and asap_nodes:
7196 for i in xrange(priority_range.SOFT,
7197 priority_range.MEDIUM_SOFT + 1):
7198 ignore_priority = priority_range.ignore_priority[i]
7200 if not mygraph.parent_nodes(node):
7202 selected_nodes = set()
7203 if gather_deps(ignore_priority,
7204 mergeable_nodes, selected_nodes, node):
7207 selected_nodes = None
7211 if prefer_asap and asap_nodes and not selected_nodes:
7212 # We failed to find any asap nodes to merge, so ignore
7213 # them for the next iteration.
7217 if selected_nodes and ignore_priority is not None:
7218 # Try to merge ignored medium_soft deps as soon as possible
7219 # if they're not satisfied by installed packages.
7220 for node in selected_nodes:
7221 children = set(mygraph.child_nodes(node))
7222 soft = children.difference(
7223 mygraph.child_nodes(node,
7224 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7225 medium_soft = children.difference(
7226 mygraph.child_nodes(node,
7228 DepPrioritySatisfiedRange.ignore_medium_soft))
7229 medium_soft.difference_update(soft)
7230 for child in medium_soft:
7231 if child in selected_nodes:
7233 if child in asap_nodes:
7235 asap_nodes.append(child)
7237 if selected_nodes and len(selected_nodes) > 1:
7238 if not isinstance(selected_nodes, list):
7239 selected_nodes = list(selected_nodes)
7240 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7242 if not selected_nodes and not myblocker_uninstalls.is_empty():
7243 # An Uninstall task needs to be executed in order to
7244 # avoid conflict if possible.
7247 priority_range = DepPrioritySatisfiedRange
7249 priority_range = DepPriorityNormalRange
7251 mergeable_nodes = get_nodes(
7252 ignore_priority=ignore_uninst_or_med)
7254 min_parent_deps = None
7256 for task in myblocker_uninstalls.leaf_nodes():
7257 # Do some sanity checks so that system or world packages
7258 # don't get uninstalled inappropriately here (only really
7259 # necessary when --complete-graph has not been enabled).
7261 if task in ignored_uninstall_tasks:
7264 if task in scheduled_uninstalls:
7265 # It's been scheduled but it hasn't
7266 # been executed yet due to dependence
7267 # on installation of blocking packages.
7270 root_config = self.roots[task.root]
7271 inst_pkg = self._pkg_cache[
7272 ("installed", task.root, task.cpv, "nomerge")]
7274 if self.digraph.contains(inst_pkg):
7277 forbid_overlap = False
7278 heuristic_overlap = False
7279 for blocker in myblocker_uninstalls.parent_nodes(task):
7280 if blocker.eapi in ("0", "1"):
7281 heuristic_overlap = True
7282 elif blocker.atom.blocker.overlap.forbid:
7283 forbid_overlap = True
7285 if forbid_overlap and running_root == task.root:
7288 if heuristic_overlap and running_root == task.root:
7289 # Never uninstall sys-apps/portage or it's essential
7290 # dependencies, except through replacement.
7292 runtime_dep_atoms = \
7293 list(runtime_deps.iterAtomsForPackage(task))
7294 except portage.exception.InvalidDependString, e:
7295 portage.writemsg("!!! Invalid PROVIDE in " + \
7296 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7297 (task.root, task.cpv, e), noiselevel=-1)
7301 # Don't uninstall a runtime dep if it appears
7302 # to be the only suitable one installed.
7304 vardb = root_config.trees["vartree"].dbapi
7305 for atom in runtime_dep_atoms:
7306 other_version = None
7307 for pkg in vardb.match_pkgs(atom):
7308 if pkg.cpv == task.cpv and \
7309 pkg.metadata["COUNTER"] == \
7310 task.metadata["COUNTER"]:
7314 if other_version is None:
7320 # For packages in the system set, don't take
7321 # any chances. If the conflict can't be resolved
7322 # by a normal replacement operation then abort.
7325 for atom in root_config.sets[
7326 "system"].iterAtomsForPackage(task):
7329 except portage.exception.InvalidDependString, e:
7330 portage.writemsg("!!! Invalid PROVIDE in " + \
7331 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7332 (task.root, task.cpv, e), noiselevel=-1)
7338 # Note that the world check isn't always
7339 # necessary since self._complete_graph() will
7340 # add all packages from the system and world sets to the
7341 # graph. This just allows unresolved conflicts to be
7342 # detected as early as possible, which makes it possible
7343 # to avoid calling self._complete_graph() when it is
7344 # unnecessary due to blockers triggering an abortion.
7346 # For packages in the world set, go ahead an uninstall
7347 # when necessary, as long as the atom will be satisfied
7348 # in the final state.
7349 graph_db = self.mydbapi[task.root]
7352 for atom in root_config.sets[
7353 "world"].iterAtomsForPackage(task):
7355 for pkg in graph_db.match_pkgs(atom):
7362 self._blocked_world_pkgs[inst_pkg] = atom
7364 except portage.exception.InvalidDependString, e:
7365 portage.writemsg("!!! Invalid PROVIDE in " + \
7366 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7367 (task.root, task.cpv, e), noiselevel=-1)
7373 # Check the deps of parent nodes to ensure that
7374 # the chosen task produces a leaf node. Maybe
7375 # this can be optimized some more to make the
7376 # best possible choice, but the current algorithm
7377 # is simple and should be near optimal for most
7379 mergeable_parent = False
7381 for parent in mygraph.parent_nodes(task):
7382 parent_deps.update(mygraph.child_nodes(parent,
7383 ignore_priority=priority_range.ignore_medium_soft))
7384 if parent in mergeable_nodes and \
7385 gather_deps(ignore_uninst_or_med_soft,
7386 mergeable_nodes, set(), parent):
7387 mergeable_parent = True
7389 if not mergeable_parent:
7392 parent_deps.remove(task)
7393 if min_parent_deps is None or \
7394 len(parent_deps) < min_parent_deps:
7395 min_parent_deps = len(parent_deps)
7398 if uninst_task is not None:
7399 # The uninstall is performed only after blocking
7400 # packages have been merged on top of it. File
7401 # collisions between blocking packages are detected
7402 # and removed from the list of files to be uninstalled.
7403 scheduled_uninstalls.add(uninst_task)
7404 parent_nodes = mygraph.parent_nodes(uninst_task)
7406 # Reverse the parent -> uninstall edges since we want
7407 # to do the uninstall after blocking packages have
7408 # been merged on top of it.
7409 mygraph.remove(uninst_task)
7410 for blocked_pkg in parent_nodes:
7411 mygraph.add(blocked_pkg, uninst_task,
7412 priority=BlockerDepPriority.instance)
7413 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7414 scheduler_graph.add(blocked_pkg, uninst_task,
7415 priority=BlockerDepPriority.instance)
7417 # Reset the state variables for leaf node selection and
7418 # continue trying to select leaf nodes.
7420 drop_satisfied = False
7423 if not selected_nodes:
7424 # Only select root nodes as a last resort. This case should
7425 # only trigger when the graph is nearly empty and the only
7426 # remaining nodes are isolated (no parents or children). Since
7427 # the nodes must be isolated, ignore_priority is not needed.
7428 selected_nodes = get_nodes()
7430 if not selected_nodes and not drop_satisfied:
7431 drop_satisfied = True
7434 if not selected_nodes and not myblocker_uninstalls.is_empty():
7435 # If possible, drop an uninstall task here in order to avoid
7436 # the circular deps code path. The corresponding blocker will
7437 # still be counted as an unresolved conflict.
7439 for node in myblocker_uninstalls.leaf_nodes():
7441 mygraph.remove(node)
7446 ignored_uninstall_tasks.add(node)
7449 if uninst_task is not None:
7450 # Reset the state variables for leaf node selection and
7451 # continue trying to select leaf nodes.
7453 drop_satisfied = False
7456 if not selected_nodes:
7457 self._circular_deps_for_display = mygraph
7458 raise self._unknown_internal_error()
7460 # At this point, we've succeeded in selecting one or more nodes, so
7461 # reset state variables for leaf node selection.
7463 drop_satisfied = False
7465 mygraph.difference_update(selected_nodes)
7467 for node in selected_nodes:
7468 if isinstance(node, Package) and \
7469 node.operation == "nomerge":
7472 # Handle interactions between blockers
7473 # and uninstallation tasks.
7474 solved_blockers = set()
7476 if isinstance(node, Package) and \
7477 "uninstall" == node.operation:
7478 have_uninstall_task = True
7481 vardb = self.trees[node.root]["vartree"].dbapi
7482 previous_cpv = vardb.match(node.slot_atom)
7484 # The package will be replaced by this one, so remove
7485 # the corresponding Uninstall task if necessary.
7486 previous_cpv = previous_cpv[0]
7488 ("installed", node.root, previous_cpv, "uninstall")
7490 mygraph.remove(uninst_task)
7494 if uninst_task is not None and \
7495 uninst_task not in ignored_uninstall_tasks and \
7496 myblocker_uninstalls.contains(uninst_task):
7497 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7498 myblocker_uninstalls.remove(uninst_task)
7499 # Discard any blockers that this Uninstall solves.
7500 for blocker in blocker_nodes:
7501 if not myblocker_uninstalls.child_nodes(blocker):
7502 myblocker_uninstalls.remove(blocker)
7503 solved_blockers.add(blocker)
7505 retlist.append(node)
7507 if (isinstance(node, Package) and \
7508 "uninstall" == node.operation) or \
7509 (uninst_task is not None and \
7510 uninst_task in scheduled_uninstalls):
7511 # Include satisfied blockers in the merge list
7512 # since the user might be interested and also
7513 # it serves as an indicator that blocking packages
7514 # will be temporarily installed simultaneously.
7515 for blocker in solved_blockers:
7516 retlist.append(Blocker(atom=blocker.atom,
7517 root=blocker.root, eapi=blocker.eapi,
7520 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7521 for node in myblocker_uninstalls.root_nodes():
7522 unsolvable_blockers.add(node)
7524 for blocker in unsolvable_blockers:
7525 retlist.append(blocker)
7527 # If any Uninstall tasks need to be executed in order
7528 # to avoid a conflict, complete the graph with any
7529 # dependencies that may have been initially
7530 # neglected (to ensure that unsafe Uninstall tasks
7531 # are properly identified and blocked from execution).
7532 if have_uninstall_task and \
7534 not unsolvable_blockers:
7535 self.myparams.add("complete")
7536 raise self._serialize_tasks_retry("")
7538 if unsolvable_blockers and \
7539 not self._accept_blocker_conflicts():
7540 self._unsatisfied_blockers_for_display = unsolvable_blockers
7541 self._serialized_tasks_cache = retlist[:]
7542 self._scheduler_graph = scheduler_graph
7543 raise self._unknown_internal_error()
7545 if self._slot_collision_info and \
7546 not self._accept_blocker_conflicts():
7547 self._serialized_tasks_cache = retlist[:]
7548 self._scheduler_graph = scheduler_graph
7549 raise self._unknown_internal_error()
7551 return retlist, scheduler_graph
7553 def _show_circular_deps(self, mygraph):
7554 # No leaf nodes are available, so we have a circular
7555 # dependency panic situation. Reduce the noise level to a
7556 # minimum via repeated elimination of root nodes since they
7557 # have no parents and thus can not be part of a cycle.
7559 root_nodes = mygraph.root_nodes(
7560 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7563 mygraph.difference_update(root_nodes)
7564 # Display the USE flags that are enabled on nodes that are part
7565 # of dependency cycles in case that helps the user decide to
7566 # disable some of them.
7568 tempgraph = mygraph.copy()
7569 while not tempgraph.empty():
7570 nodes = tempgraph.leaf_nodes()
7572 node = tempgraph.order[0]
7575 display_order.append(node)
7576 tempgraph.remove(node)
7577 display_order.reverse()
7578 self.myopts.pop("--quiet", None)
7579 self.myopts.pop("--verbose", None)
7580 self.myopts["--tree"] = True
7581 portage.writemsg("\n\n", noiselevel=-1)
7582 self.display(display_order)
7583 prefix = colorize("BAD", " * ")
7584 portage.writemsg("\n", noiselevel=-1)
7585 portage.writemsg(prefix + "Error: circular dependencies:\n",
7587 portage.writemsg("\n", noiselevel=-1)
7588 mygraph.debug_print()
7589 portage.writemsg("\n", noiselevel=-1)
7590 portage.writemsg(prefix + "Note that circular dependencies " + \
7591 "can often be avoided by temporarily\n", noiselevel=-1)
7592 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7593 "optional dependencies.\n", noiselevel=-1)
7595 def _show_merge_list(self):
7596 if self._serialized_tasks_cache is not None and \
7597 not (self._displayed_list and \
7598 (self._displayed_list == self._serialized_tasks_cache or \
7599 self._displayed_list == \
7600 list(reversed(self._serialized_tasks_cache)))):
7601 display_list = self._serialized_tasks_cache[:]
7602 if "--tree" in self.myopts:
7603 display_list.reverse()
7604 self.display(display_list)
7606 def _show_unsatisfied_blockers(self, blockers):
7607 self._show_merge_list()
7608 msg = "Error: The above package list contains " + \
7609 "packages which cannot be installed " + \
7610 "at the same time on the same system."
7611 prefix = colorize("BAD", " * ")
7612 from textwrap import wrap
7613 portage.writemsg("\n", noiselevel=-1)
7614 for line in wrap(msg, 70):
7615 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7617 # Display the conflicting packages along with the packages
7618 # that pulled them in. This is helpful for troubleshooting
7619 # cases in which blockers don't solve automatically and
7620 # the reasons are not apparent from the normal merge list
7624 for blocker in blockers:
7625 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7626 self._blocker_parents.parent_nodes(blocker)):
7627 parent_atoms = self._parent_atoms.get(pkg)
7628 if not parent_atoms:
7629 atom = self._blocked_world_pkgs.get(pkg)
7630 if atom is not None:
7631 parent_atoms = set([("@world", atom)])
7633 conflict_pkgs[pkg] = parent_atoms
7636 # Reduce noise by pruning packages that are only
7637 # pulled in by other conflict packages.
7639 for pkg, parent_atoms in conflict_pkgs.iteritems():
7640 relevant_parent = False
7641 for parent, atom in parent_atoms:
7642 if parent not in conflict_pkgs:
7643 relevant_parent = True
7645 if not relevant_parent:
7646 pruned_pkgs.add(pkg)
7647 for pkg in pruned_pkgs:
7648 del conflict_pkgs[pkg]
7654 # Max number of parents shown, to avoid flooding the display.
7656 for pkg, parent_atoms in conflict_pkgs.iteritems():
7660 # Prefer packages that are not directly involved in a conflict.
7661 for parent_atom in parent_atoms:
7662 if len(pruned_list) >= max_parents:
7664 parent, atom = parent_atom
7665 if parent not in conflict_pkgs:
7666 pruned_list.add(parent_atom)
7668 for parent_atom in parent_atoms:
7669 if len(pruned_list) >= max_parents:
7671 pruned_list.add(parent_atom)
7673 omitted_parents = len(parent_atoms) - len(pruned_list)
7674 msg.append(indent + "%s pulled in by\n" % pkg)
7676 for parent_atom in pruned_list:
7677 parent, atom = parent_atom
7678 msg.append(2*indent)
7679 if isinstance(parent,
7680 (PackageArg, AtomArg)):
7681 # For PackageArg and AtomArg types, it's
7682 # redundant to display the atom attribute.
7683 msg.append(str(parent))
7685 # Display the specific atom from SetArg or
7687 msg.append("%s required by %s" % (atom, parent))
7691 msg.append(2*indent)
7692 msg.append("(and %d more)\n" % omitted_parents)
7696 sys.stderr.write("".join(msg))
7699 if "--quiet" not in self.myopts:
7700 show_blocker_docs_link()
7702 def display(self, mylist, favorites=[], verbosity=None):
7704 # This is used to prevent display_problems() from
7705 # redundantly displaying this exact same merge list
7706 # again via _show_merge_list().
7707 self._displayed_list = mylist
7709 if verbosity is None:
7710 verbosity = ("--quiet" in self.myopts and 1 or \
7711 "--verbose" in self.myopts and 3 or 2)
7712 favorites_set = InternalPackageSet(favorites)
7713 oneshot = "--oneshot" in self.myopts or \
7714 "--onlydeps" in self.myopts
7715 columns = "--columns" in self.myopts
7720 counters = PackageCounters()
7722 if verbosity == 1 and "--verbose" not in self.myopts:
7723 def create_use_string(*args):
7726 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7728 is_new, reinst_flags,
7729 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7730 alphabetical=("--alphabetical" in self.myopts)):
7738 cur_iuse = set(cur_iuse)
7739 enabled_flags = cur_iuse.intersection(cur_use)
7740 removed_iuse = set(old_iuse).difference(cur_iuse)
7741 any_iuse = cur_iuse.union(old_iuse)
7742 any_iuse = list(any_iuse)
7744 for flag in any_iuse:
7747 reinst_flag = reinst_flags and flag in reinst_flags
7748 if flag in enabled_flags:
7750 if is_new or flag in old_use and \
7751 (all_flags or reinst_flag):
7752 flag_str = red(flag)
7753 elif flag not in old_iuse:
7754 flag_str = yellow(flag) + "%*"
7755 elif flag not in old_use:
7756 flag_str = green(flag) + "*"
7757 elif flag in removed_iuse:
7758 if all_flags or reinst_flag:
7759 flag_str = yellow("-" + flag) + "%"
7762 flag_str = "(" + flag_str + ")"
7763 removed.append(flag_str)
7766 if is_new or flag in old_iuse and \
7767 flag not in old_use and \
7768 (all_flags or reinst_flag):
7769 flag_str = blue("-" + flag)
7770 elif flag not in old_iuse:
7771 flag_str = yellow("-" + flag)
7772 if flag not in iuse_forced:
7774 elif flag in old_use:
7775 flag_str = green("-" + flag) + "*"
7777 if flag in iuse_forced:
7778 flag_str = "(" + flag_str + ")"
7780 enabled.append(flag_str)
7782 disabled.append(flag_str)
7785 ret = " ".join(enabled)
7787 ret = " ".join(enabled + disabled + removed)
7789 ret = '%s="%s" ' % (name, ret)
7792 repo_display = RepoDisplay(self.roots)
7796 mygraph = self.digraph.copy()
7798 # If there are any Uninstall instances, add the corresponding
7799 # blockers to the digraph (useful for --tree display).
7801 executed_uninstalls = set(node for node in mylist \
7802 if isinstance(node, Package) and node.operation == "unmerge")
7804 for uninstall in self._blocker_uninstalls.leaf_nodes():
7805 uninstall_parents = \
7806 self._blocker_uninstalls.parent_nodes(uninstall)
7807 if not uninstall_parents:
7810 # Remove the corresponding "nomerge" node and substitute
7811 # the Uninstall node.
7812 inst_pkg = self._pkg_cache[
7813 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7815 mygraph.remove(inst_pkg)
7820 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7822 inst_pkg_blockers = []
7824 # Break the Package -> Uninstall edges.
7825 mygraph.remove(uninstall)
7827 # Resolution of a package's blockers
7828 # depend on it's own uninstallation.
7829 for blocker in inst_pkg_blockers:
7830 mygraph.add(uninstall, blocker)
7832 # Expand Package -> Uninstall edges into
7833 # Package -> Blocker -> Uninstall edges.
7834 for blocker in uninstall_parents:
7835 mygraph.add(uninstall, blocker)
7836 for parent in self._blocker_parents.parent_nodes(blocker):
7837 if parent != inst_pkg:
7838 mygraph.add(blocker, parent)
7840 # If the uninstall task did not need to be executed because
7841 # of an upgrade, display Blocker -> Upgrade edges since the
7842 # corresponding Blocker -> Uninstall edges will not be shown.
7844 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7845 if upgrade_node is not None and \
7846 uninstall not in executed_uninstalls:
7847 for blocker in uninstall_parents:
7848 mygraph.add(upgrade_node, blocker)
7850 unsatisfied_blockers = []
7855 if isinstance(x, Blocker) and not x.satisfied:
7856 unsatisfied_blockers.append(x)
7859 if "--tree" in self.myopts:
7860 depth = len(tree_nodes)
7861 while depth and graph_key not in \
7862 mygraph.child_nodes(tree_nodes[depth-1]):
7865 tree_nodes = tree_nodes[:depth]
7866 tree_nodes.append(graph_key)
7867 display_list.append((x, depth, True))
7868 shown_edges.add((graph_key, tree_nodes[depth-1]))
7870 traversed_nodes = set() # prevent endless circles
7871 traversed_nodes.add(graph_key)
7872 def add_parents(current_node, ordered):
7874 # Do not traverse to parents if this node is an
7875 # an argument or a direct member of a set that has
7876 # been specified as an argument (system or world).
7877 if current_node not in self._set_nodes:
7878 parent_nodes = mygraph.parent_nodes(current_node)
7880 child_nodes = set(mygraph.child_nodes(current_node))
7881 selected_parent = None
7882 # First, try to avoid a direct cycle.
7883 for node in parent_nodes:
7884 if not isinstance(node, (Blocker, Package)):
7886 if node not in traversed_nodes and \
7887 node not in child_nodes:
7888 edge = (current_node, node)
7889 if edge in shown_edges:
7891 selected_parent = node
7893 if not selected_parent:
7894 # A direct cycle is unavoidable.
7895 for node in parent_nodes:
7896 if not isinstance(node, (Blocker, Package)):
7898 if node not in traversed_nodes:
7899 edge = (current_node, node)
7900 if edge in shown_edges:
7902 selected_parent = node
7905 shown_edges.add((current_node, selected_parent))
7906 traversed_nodes.add(selected_parent)
7907 add_parents(selected_parent, False)
7908 display_list.append((current_node,
7909 len(tree_nodes), ordered))
7910 tree_nodes.append(current_node)
7912 add_parents(graph_key, True)
7914 display_list.append((x, depth, True))
7915 mylist = display_list
7916 for x in unsatisfied_blockers:
7917 mylist.append((x, 0, True))
7919 last_merge_depth = 0
7920 for i in xrange(len(mylist)-1,-1,-1):
7921 graph_key, depth, ordered = mylist[i]
7922 if not ordered and depth == 0 and i > 0 \
7923 and graph_key == mylist[i-1][0] and \
7924 mylist[i-1][1] == 0:
7925 # An ordered node got a consecutive duplicate when the tree was
7929 if ordered and graph_key[-1] != "nomerge":
7930 last_merge_depth = depth
7932 if depth >= last_merge_depth or \
7933 i < len(mylist) - 1 and \
7934 depth >= mylist[i+1][1]:
7937 from portage import flatten
7938 from portage.dep import use_reduce, paren_reduce
7939 # files to fetch list - avoids counting a same file twice
7940 # in size display (verbose mode)
7943 # Use this set to detect when all the "repoadd" strings are "[0]"
7944 # and disable the entire repo display in this case.
7947 for mylist_index in xrange(len(mylist)):
7948 x, depth, ordered = mylist[mylist_index]
7952 portdb = self.trees[myroot]["porttree"].dbapi
7953 bindb = self.trees[myroot]["bintree"].dbapi
7954 vardb = self.trees[myroot]["vartree"].dbapi
7955 vartree = self.trees[myroot]["vartree"]
7956 pkgsettings = self.pkgsettings[myroot]
7959 indent = " " * depth
7961 if isinstance(x, Blocker):
7963 blocker_style = "PKG_BLOCKER_SATISFIED"
7964 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7966 blocker_style = "PKG_BLOCKER"
7967 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7969 counters.blocks += 1
7971 counters.blocks_satisfied += 1
7972 resolved = portage.key_expand(
7973 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7974 if "--columns" in self.myopts and "--quiet" in self.myopts:
7975 addl += " " + colorize(blocker_style, resolved)
7977 addl = "[%s %s] %s%s" % \
7978 (colorize(blocker_style, "blocks"),
7979 addl, indent, colorize(blocker_style, resolved))
7980 block_parents = self._blocker_parents.parent_nodes(x)
7981 block_parents = set([pnode[2] for pnode in block_parents])
7982 block_parents = ", ".join(block_parents)
7984 addl += colorize(blocker_style,
7985 " (\"%s\" is blocking %s)") % \
7986 (str(x.atom).lstrip("!"), block_parents)
7988 addl += colorize(blocker_style,
7989 " (is blocking %s)") % block_parents
7990 if isinstance(x, Blocker) and x.satisfied:
7995 blockers.append(addl)
7998 pkg_merge = ordered and pkg_status == "merge"
7999 if not pkg_merge and pkg_status == "merge":
8000 pkg_status = "nomerge"
8001 built = pkg_type != "ebuild"
8002 installed = pkg_type == "installed"
8004 metadata = pkg.metadata
8006 repo_name = metadata["repository"]
8007 if pkg_type == "ebuild":
8008 ebuild_path = portdb.findname(pkg_key)
8009 if not ebuild_path: # shouldn't happen
8010 raise portage.exception.PackageNotFound(pkg_key)
8011 repo_path_real = os.path.dirname(os.path.dirname(
8012 os.path.dirname(ebuild_path)))
8014 repo_path_real = portdb.getRepositoryPath(repo_name)
8015 pkg_use = list(pkg.use.enabled)
8017 restrict = flatten(use_reduce(paren_reduce(
8018 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8019 except portage.exception.InvalidDependString, e:
8020 if not pkg.installed:
8021 show_invalid_depstring_notice(x,
8022 pkg.metadata["RESTRICT"], str(e))
8026 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8027 "fetch" in restrict:
8030 counters.restrict_fetch += 1
8031 if portdb.fetch_check(pkg_key, pkg_use):
8034 counters.restrict_fetch_satisfied += 1
8036 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8037 #param is used for -u, where you still *do* want to see when something is being upgraded.
8040 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8041 if vardb.cpv_exists(pkg_key):
8042 addl=" "+yellow("R")+fetch+" "
8045 counters.reinst += 1
8046 elif pkg_status == "uninstall":
8047 counters.uninst += 1
8048 # filter out old-style virtual matches
8049 elif installed_versions and \
8050 portage.cpv_getkey(installed_versions[0]) == \
8051 portage.cpv_getkey(pkg_key):
8052 myinslotlist = vardb.match(pkg.slot_atom)
8053 # If this is the first install of a new-style virtual, we
8054 # need to filter out old-style virtual matches.
8055 if myinslotlist and \
8056 portage.cpv_getkey(myinslotlist[0]) != \
8057 portage.cpv_getkey(pkg_key):
8060 myoldbest = myinslotlist[:]
8062 if not portage.dep.cpvequal(pkg_key,
8063 portage.best([pkg_key] + myoldbest)):
8065 addl += turquoise("U")+blue("D")
8067 counters.downgrades += 1
8070 addl += turquoise("U") + " "
8072 counters.upgrades += 1
8074 # New slot, mark it new.
8075 addl = " " + green("NS") + fetch + " "
8076 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8078 counters.newslot += 1
8080 if "--changelog" in self.myopts:
8081 inst_matches = vardb.match(pkg.slot_atom)
8083 changelogs.extend(self.calc_changelog(
8084 portdb.findname(pkg_key),
8085 inst_matches[0], pkg_key))
8087 addl = " " + green("N") + " " + fetch + " "
8096 forced_flags = set()
8097 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8098 forced_flags.update(pkgsettings.useforce)
8099 forced_flags.update(pkgsettings.usemask)
8101 cur_use = [flag for flag in pkg.use.enabled \
8102 if flag in pkg.iuse.all]
8103 cur_iuse = sorted(pkg.iuse.all)
8105 if myoldbest and myinslotlist:
8106 previous_cpv = myoldbest[0]
8108 previous_cpv = pkg.cpv
8109 if vardb.cpv_exists(previous_cpv):
8110 old_iuse, old_use = vardb.aux_get(
8111 previous_cpv, ["IUSE", "USE"])
8112 old_iuse = list(set(
8113 filter_iuse_defaults(old_iuse.split())))
8115 old_use = old_use.split()
8122 old_use = [flag for flag in old_use if flag in old_iuse]
8124 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8126 use_expand.reverse()
8127 use_expand_hidden = \
8128 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8130 def map_to_use_expand(myvals, forcedFlags=False,
8134 for exp in use_expand:
8137 for val in myvals[:]:
8138 if val.startswith(exp.lower()+"_"):
8139 if val in forced_flags:
8140 forced[exp].add(val[len(exp)+1:])
8141 ret[exp].append(val[len(exp)+1:])
8144 forced["USE"] = [val for val in myvals \
8145 if val in forced_flags]
8147 for exp in use_expand_hidden:
8153 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8154 # are the only thing that triggered reinstallation.
8155 reinst_flags_map = {}
8156 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8157 reinst_expand_map = None
8158 if reinstall_for_flags:
8159 reinst_flags_map = map_to_use_expand(
8160 list(reinstall_for_flags), removeHidden=False)
8161 for k in list(reinst_flags_map):
8162 if not reinst_flags_map[k]:
8163 del reinst_flags_map[k]
8164 if not reinst_flags_map.get("USE"):
8165 reinst_expand_map = reinst_flags_map.copy()
8166 reinst_expand_map.pop("USE", None)
8167 if reinst_expand_map and \
8168 not set(reinst_expand_map).difference(
8170 use_expand_hidden = \
8171 set(use_expand_hidden).difference(
8174 cur_iuse_map, iuse_forced = \
8175 map_to_use_expand(cur_iuse, forcedFlags=True)
8176 cur_use_map = map_to_use_expand(cur_use)
8177 old_iuse_map = map_to_use_expand(old_iuse)
8178 old_use_map = map_to_use_expand(old_use)
8181 use_expand.insert(0, "USE")
8183 for key in use_expand:
8184 if key in use_expand_hidden:
8186 verboseadd += create_use_string(key.upper(),
8187 cur_iuse_map[key], iuse_forced[key],
8188 cur_use_map[key], old_iuse_map[key],
8189 old_use_map[key], is_new,
8190 reinst_flags_map.get(key))
8195 if pkg_type == "ebuild" and pkg_merge:
8197 myfilesdict = portdb.getfetchsizes(pkg_key,
8198 useflags=pkg_use, debug=self.edebug)
8199 except portage.exception.InvalidDependString, e:
8200 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8201 show_invalid_depstring_notice(x, src_uri, str(e))
8204 if myfilesdict is None:
8205 myfilesdict="[empty/missing/bad digest]"
8207 for myfetchfile in myfilesdict:
8208 if myfetchfile not in myfetchlist:
8209 mysize+=myfilesdict[myfetchfile]
8210 myfetchlist.append(myfetchfile)
8212 counters.totalsize += mysize
8213 verboseadd += format_size(mysize)
8216 # assign index for a previous version in the same slot
8217 has_previous = False
8218 repo_name_prev = None
8219 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8221 slot_matches = vardb.match(slot_atom)
8224 repo_name_prev = vardb.aux_get(slot_matches[0],
8227 # now use the data to generate output
8228 if pkg.installed or not has_previous:
8229 repoadd = repo_display.repoStr(repo_path_real)
8231 repo_path_prev = None
8233 repo_path_prev = portdb.getRepositoryPath(
8235 if repo_path_prev == repo_path_real:
8236 repoadd = repo_display.repoStr(repo_path_real)
8238 repoadd = "%s=>%s" % (
8239 repo_display.repoStr(repo_path_prev),
8240 repo_display.repoStr(repo_path_real))
8242 repoadd_set.add(repoadd)
8244 xs = [portage.cpv_getkey(pkg_key)] + \
8245 list(portage.catpkgsplit(pkg_key)[2:])
8252 if "COLUMNWIDTH" in self.settings:
8254 mywidth = int(self.settings["COLUMNWIDTH"])
8255 except ValueError, e:
8256 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8258 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8259 self.settings["COLUMNWIDTH"], noiselevel=-1)
8261 oldlp = mywidth - 30
8264 # Convert myoldbest from a list to a string.
8268 for pos, key in enumerate(myoldbest):
8269 key = portage.catpkgsplit(key)[2] + \
8270 "-" + portage.catpkgsplit(key)[3]
8271 if key[-3:] == "-r0":
8273 myoldbest[pos] = key
8274 myoldbest = blue("["+", ".join(myoldbest)+"]")
8277 root_config = self.roots[myroot]
8278 system_set = root_config.sets["system"]
8279 world_set = root_config.sets["world"]
8284 pkg_system = system_set.findAtomForPackage(pkg)
8285 pkg_world = world_set.findAtomForPackage(pkg)
8286 if not (oneshot or pkg_world) and \
8287 myroot == self.target_root and \
8288 favorites_set.findAtomForPackage(pkg):
8289 # Maybe it will be added to world now.
8290 if create_world_atom(pkg, favorites_set, root_config):
8292 except portage.exception.InvalidDependString:
8293 # This is reported elsewhere if relevant.
8296 def pkgprint(pkg_str):
8299 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8301 return colorize("PKG_MERGE_WORLD", pkg_str)
8303 return colorize("PKG_MERGE", pkg_str)
8304 elif pkg_status == "uninstall":
8305 return colorize("PKG_UNINSTALL", pkg_str)
8308 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8310 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8312 return colorize("PKG_NOMERGE", pkg_str)
8315 properties = flatten(use_reduce(paren_reduce(
8316 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8317 except portage.exception.InvalidDependString, e:
8318 if not pkg.installed:
8319 show_invalid_depstring_notice(pkg,
8320 pkg.metadata["PROPERTIES"], str(e))
8324 interactive = "interactive" in properties
8325 if interactive and pkg.operation == "merge":
8326 addl = colorize("WARN", "I") + addl[1:]
8328 counters.interactive += 1
8333 if "--columns" in self.myopts:
8334 if "--quiet" in self.myopts:
8335 myprint=addl+" "+indent+pkgprint(pkg_cp)
8336 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8337 myprint=myprint+myoldbest
8338 myprint=myprint+darkgreen("to "+x[1])
8342 myprint = "[%s] %s%s" % \
8343 (pkgprint(pkg_status.ljust(13)),
8344 indent, pkgprint(pkg.cp))
8346 myprint = "[%s %s] %s%s" % \
8347 (pkgprint(pkg.type_name), addl,
8348 indent, pkgprint(pkg.cp))
8349 if (newlp-nc_len(myprint)) > 0:
8350 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8351 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8352 if (oldlp-nc_len(myprint)) > 0:
8353 myprint=myprint+" "*(oldlp-nc_len(myprint))
8354 myprint=myprint+myoldbest
8355 myprint += darkgreen("to " + pkg.root)
8358 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8360 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8361 myprint += indent + pkgprint(pkg_key) + " " + \
8362 myoldbest + darkgreen("to " + myroot)
8364 if "--columns" in self.myopts:
8365 if "--quiet" in self.myopts:
8366 myprint=addl+" "+indent+pkgprint(pkg_cp)
8367 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8368 myprint=myprint+myoldbest
8372 myprint = "[%s] %s%s" % \
8373 (pkgprint(pkg_status.ljust(13)),
8374 indent, pkgprint(pkg.cp))
8376 myprint = "[%s %s] %s%s" % \
8377 (pkgprint(pkg.type_name), addl,
8378 indent, pkgprint(pkg.cp))
8379 if (newlp-nc_len(myprint)) > 0:
8380 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8381 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8382 if (oldlp-nc_len(myprint)) > 0:
8383 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8384 myprint += myoldbest
8387 myprint = "[%s] %s%s %s" % \
8388 (pkgprint(pkg_status.ljust(13)),
8389 indent, pkgprint(pkg.cpv),
8392 myprint = "[%s %s] %s%s %s" % \
8393 (pkgprint(pkg_type), addl, indent,
8394 pkgprint(pkg.cpv), myoldbest)
8396 if columns and pkg.operation == "uninstall":
8398 p.append((myprint, verboseadd, repoadd))
8400 if "--tree" not in self.myopts and \
8401 "--quiet" not in self.myopts and \
8402 not self._opts_no_restart.intersection(self.myopts) and \
8403 pkg.root == self._running_root.root and \
8404 portage.match_from_list(
8405 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8406 not vardb.cpv_exists(pkg.cpv) and \
8407 "--quiet" not in self.myopts:
8408 if mylist_index < len(mylist) - 1:
8409 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8410 p.append(colorize("WARN", " then resume the merge."))
8413 show_repos = repoadd_set and repoadd_set != set(["0"])
8416 if isinstance(x, basestring):
8417 out.write("%s\n" % (x,))
8420 myprint, verboseadd, repoadd = x
8423 myprint += " " + verboseadd
8425 if show_repos and repoadd:
8426 myprint += " " + teal("[%s]" % repoadd)
8428 out.write("%s\n" % (myprint,))
8437 sys.stdout.write(str(repo_display))
8439 if "--changelog" in self.myopts:
8441 for revision,text in changelogs:
8442 print bold('*'+revision)
8443 sys.stdout.write(text)
8448 def display_problems(self):
8450 Display problems with the dependency graph such as slot collisions.
8451 This is called internally by display() to show the problems _after_
8452 the merge list where it is most likely to be seen, but if display()
8453 is not going to be called then this method should be called explicitly
8454 to ensure that the user is notified of problems with the graph.
8456 All output goes to stderr, except for unsatisfied dependencies which
8457 go to stdout for parsing by programs such as autounmask.
8460 # Note that show_masked_packages() sends it's output to
8461 # stdout, and some programs such as autounmask parse the
8462 # output in cases when emerge bails out. However, when
8463 # show_masked_packages() is called for installed packages
8464 # here, the message is a warning that is more appropriate
8465 # to send to stderr, so temporarily redirect stdout to
8466 # stderr. TODO: Fix output code so there's a cleaner way
8467 # to redirect everything to stderr.
8472 sys.stdout = sys.stderr
8473 self._display_problems()
8479 # This goes to stdout for parsing by programs like autounmask.
8480 for pargs, kwargs in self._unsatisfied_deps_for_display:
8481 self._show_unsatisfied_dep(*pargs, **kwargs)
8483 def _display_problems(self):
8484 if self._circular_deps_for_display is not None:
8485 self._show_circular_deps(
8486 self._circular_deps_for_display)
8488 # The user is only notified of a slot conflict if
8489 # there are no unresolvable blocker conflicts.
8490 if self._unsatisfied_blockers_for_display is not None:
8491 self._show_unsatisfied_blockers(
8492 self._unsatisfied_blockers_for_display)
8494 self._show_slot_collision_notice()
8496 # TODO: Add generic support for "set problem" handlers so that
8497 # the below warnings aren't special cases for world only.
8499 if self._missing_args:
8500 world_problems = False
8501 if "world" in self._sets:
8502 # Filter out indirect members of world (from nested sets)
8503 # since only direct members of world are desired here.
8504 world_set = self.roots[self.target_root].sets["world"]
8505 for arg, atom in self._missing_args:
8506 if arg.name == "world" and atom in world_set:
8507 world_problems = True
8511 sys.stderr.write("\n!!! Problems have been " + \
8512 "detected with your world file\n")
8513 sys.stderr.write("!!! Please run " + \
8514 green("emaint --check world")+"\n\n")
8516 if self._missing_args:
8517 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8518 " Ebuilds for the following packages are either all\n")
8519 sys.stderr.write(colorize("BAD", "!!!") + \
8520 " masked or don't exist:\n")
8521 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8522 self._missing_args) + "\n")
8524 if self._pprovided_args:
8526 for arg, atom in self._pprovided_args:
8527 if isinstance(arg, SetArg):
8529 arg_atom = (atom, atom)
8532 arg_atom = (arg.arg, atom)
8533 refs = arg_refs.setdefault(arg_atom, [])
8534 if parent not in refs:
8537 msg.append(bad("\nWARNING: "))
8538 if len(self._pprovided_args) > 1:
8539 msg.append("Requested packages will not be " + \
8540 "merged because they are listed in\n")
8542 msg.append("A requested package will not be " + \
8543 "merged because it is listed in\n")
8544 msg.append("package.provided:\n\n")
8545 problems_sets = set()
8546 for (arg, atom), refs in arg_refs.iteritems():
8549 problems_sets.update(refs)
8551 ref_string = ", ".join(["'%s'" % name for name in refs])
8552 ref_string = " pulled in by " + ref_string
8553 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8555 if "world" in problems_sets:
8556 msg.append("This problem can be solved in one of the following ways:\n\n")
8557 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8558 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8559 msg.append(" C) Remove offending entries from package.provided.\n\n")
8560 msg.append("The best course of action depends on the reason that an offending\n")
8561 msg.append("package.provided entry exists.\n\n")
8562 sys.stderr.write("".join(msg))
8564 masked_packages = []
8565 for pkg in self._masked_installed:
8566 root_config = pkg.root_config
8567 pkgsettings = self.pkgsettings[pkg.root]
8568 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8569 masked_packages.append((root_config, pkgsettings,
8570 pkg.cpv, pkg.metadata, mreasons))
8572 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8573 " The following installed packages are masked:\n")
8574 show_masked_packages(masked_packages)
8578 def calc_changelog(self,ebuildpath,current,next):
8579 if ebuildpath == None or not os.path.exists(ebuildpath):
8581 current = '-'.join(portage.catpkgsplit(current)[1:])
8582 if current.endswith('-r0'):
8583 current = current[:-3]
8584 next = '-'.join(portage.catpkgsplit(next)[1:])
8585 if next.endswith('-r0'):
8587 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8589 changelog = open(changelogpath).read()
8590 except SystemExit, e:
8591 raise # Needed else can't exit
8594 divisions = self.find_changelog_tags(changelog)
8595 #print 'XX from',current,'to',next
8596 #for div,text in divisions: print 'XX',div
8597 # skip entries for all revisions above the one we are about to emerge
8598 for i in range(len(divisions)):
8599 if divisions[i][0]==next:
8600 divisions = divisions[i:]
8602 # find out how many entries we are going to display
8603 for i in range(len(divisions)):
8604 if divisions[i][0]==current:
8605 divisions = divisions[:i]
8608 # couldnt find the current revision in the list. display nothing
8612 def find_changelog_tags(self,changelog):
8616 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8618 if release is not None:
8619 divs.append((release,changelog))
8621 if release is not None:
8622 divs.append((release,changelog[:match.start()]))
8623 changelog = changelog[match.end():]
8624 release = match.group(1)
8625 if release.endswith('.ebuild'):
8626 release = release[:-7]
8627 if release.endswith('-r0'):
8628 release = release[:-3]
8630 def saveNomergeFavorites(self):
8631 """Find atoms in favorites that are not in the mergelist and add them
8632 to the world file if necessary."""
8633 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8634 "--oneshot", "--onlydeps", "--pretend"):
8635 if x in self.myopts:
8637 root_config = self.roots[self.target_root]
8638 world_set = root_config.sets["world"]
8640 world_locked = False
8641 if hasattr(world_set, "lock"):
8645 if hasattr(world_set, "load"):
8646 world_set.load() # maybe it's changed on disk
8648 args_set = self._sets["args"]
8649 portdb = self.trees[self.target_root]["porttree"].dbapi
8650 added_favorites = set()
8651 for x in self._set_nodes:
8652 pkg_type, root, pkg_key, pkg_status = x
8653 if pkg_status != "nomerge":
8657 myfavkey = create_world_atom(x, args_set, root_config)
8659 if myfavkey in added_favorites:
8661 added_favorites.add(myfavkey)
8662 except portage.exception.InvalidDependString, e:
8663 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8664 (pkg_key, str(e)), noiselevel=-1)
8665 writemsg("!!! see '%s'\n\n" % os.path.join(
8666 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8669 for k in self._sets:
8670 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8675 all_added.append(SETPREFIX + k)
8676 all_added.extend(added_favorites)
8679 print ">>> Recording %s in \"world\" favorites file..." % \
8680 colorize("INFORM", str(a))
8682 world_set.update(all_added)
8687 def loadResumeCommand(self, resume_data, skip_masked=False):
8689 Add a resume command to the graph and validate it in the process. This
8690 will raise a PackageNotFound exception if a package is not available.
8693 if not isinstance(resume_data, dict):
8696 mergelist = resume_data.get("mergelist")
8697 if not isinstance(mergelist, list):
8700 fakedb = self.mydbapi
8702 serialized_tasks = []
8705 if not (isinstance(x, list) and len(x) == 4):
8707 pkg_type, myroot, pkg_key, action = x
8708 if pkg_type not in self.pkg_tree_map:
8710 if action != "merge":
8712 tree_type = self.pkg_tree_map[pkg_type]
8713 mydb = trees[myroot][tree_type].dbapi
8714 db_keys = list(self._trees_orig[myroot][
8715 tree_type].dbapi._aux_cache_keys)
8717 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8719 # It does no exist or it is corrupt.
8720 if action == "uninstall":
8722 raise portage.exception.PackageNotFound(pkg_key)
8723 installed = action == "uninstall"
8724 built = pkg_type != "ebuild"
8725 root_config = self.roots[myroot]
8726 pkg = Package(built=built, cpv=pkg_key,
8727 installed=installed, metadata=metadata,
8728 operation=action, root_config=root_config,
8730 if pkg_type == "ebuild":
8731 pkgsettings = self.pkgsettings[myroot]
8732 pkgsettings.setcpv(pkg)
8733 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8734 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8735 self._pkg_cache[pkg] = pkg
8737 root_config = self.roots[pkg.root]
8738 if "merge" == pkg.operation and \
8739 not visible(root_config.settings, pkg):
8741 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8743 self._unsatisfied_deps_for_display.append(
8744 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8746 fakedb[myroot].cpv_inject(pkg)
8747 serialized_tasks.append(pkg)
8748 self.spinner.update()
8750 if self._unsatisfied_deps_for_display:
8753 if not serialized_tasks or "--nodeps" in self.myopts:
8754 self._serialized_tasks_cache = serialized_tasks
8755 self._scheduler_graph = self.digraph
8757 self._select_package = self._select_pkg_from_graph
8758 self.myparams.add("selective")
8759 # Always traverse deep dependencies in order to account for
8760 # potentially unsatisfied dependencies of installed packages.
8761 # This is necessary for correct --keep-going or --resume operation
8762 # in case a package from a group of circularly dependent packages
8763 # fails. In this case, a package which has recently been installed
8764 # may have an unsatisfied circular dependency (pulled in by
8765 # PDEPEND, for example). So, even though a package is already
8766 # installed, it may not have all of it's dependencies satisfied, so
8767 # it may not be usable. If such a package is in the subgraph of
8768 # deep depenedencies of a scheduled build, that build needs to
8769 # be cancelled. In order for this type of situation to be
8770 # recognized, deep traversal of dependencies is required.
8771 self.myparams.add("deep")
8773 favorites = resume_data.get("favorites")
8774 args_set = self._sets["args"]
8775 if isinstance(favorites, list):
8776 args = self._load_favorites(favorites)
8780 for task in serialized_tasks:
8781 if isinstance(task, Package) and \
8782 task.operation == "merge":
8783 if not self._add_pkg(task, None):
8786 # Packages for argument atoms need to be explicitly
8787 # added via _add_pkg() so that they are included in the
8788 # digraph (needed at least for --tree display).
8790 for atom in arg.set:
8791 pkg, existing_node = self._select_package(
8792 arg.root_config.root, atom)
8793 if existing_node is None and \
8795 if not self._add_pkg(pkg, Dependency(atom=atom,
8796 root=pkg.root, parent=arg)):
8799 # Allow unsatisfied deps here to avoid showing a masking
8800 # message for an unsatisfied dep that isn't necessarily
8802 if not self._create_graph(allow_unsatisfied=True):
8805 unsatisfied_deps = []
8806 for dep in self._unsatisfied_deps:
8807 if not isinstance(dep.parent, Package):
8809 if dep.parent.operation == "merge":
8810 unsatisfied_deps.append(dep)
8813 # For unsatisfied deps of installed packages, only account for
8814 # them if they are in the subgraph of dependencies of a package
8815 # which is scheduled to be installed.
8816 unsatisfied_install = False
8818 dep_stack = self.digraph.parent_nodes(dep.parent)
8820 node = dep_stack.pop()
8821 if not isinstance(node, Package):
8823 if node.operation == "merge":
8824 unsatisfied_install = True
8826 if node in traversed:
8829 dep_stack.extend(self.digraph.parent_nodes(node))
8831 if unsatisfied_install:
8832 unsatisfied_deps.append(dep)
8834 if masked_tasks or unsatisfied_deps:
8835 # This probably means that a required package
8836 # was dropped via --skipfirst. It makes the
8837 # resume list invalid, so convert it to a
8838 # UnsatisfiedResumeDep exception.
8839 raise self.UnsatisfiedResumeDep(self,
8840 masked_tasks + unsatisfied_deps)
8841 self._serialized_tasks_cache = None
8844 except self._unknown_internal_error:
8849 def _load_favorites(self, favorites):
8851 Use a list of favorites to resume state from a
8852 previous select_files() call. This creates similar
8853 DependencyArg instances to those that would have
8854 been created by the original select_files() call.
8855 This allows Package instances to be matched with
8856 DependencyArg instances during graph creation.
8858 root_config = self.roots[self.target_root]
8859 getSetAtoms = root_config.setconfig.getSetAtoms
8860 sets = root_config.sets
8863 if not isinstance(x, basestring):
8865 if x in ("system", "world"):
8867 if x.startswith(SETPREFIX):
8868 s = x[len(SETPREFIX):]
8873 # Recursively expand sets so that containment tests in
8874 # self._get_parent_sets() properly match atoms in nested
8875 # sets (like if world contains system).
8876 expanded_set = InternalPackageSet(
8877 initial_atoms=getSetAtoms(s))
8878 self._sets[s] = expanded_set
8879 args.append(SetArg(arg=x, set=expanded_set,
8880 root_config=root_config))
8882 if not portage.isvalidatom(x):
8884 args.append(AtomArg(arg=x, atom=x,
8885 root_config=root_config))
8887 self._set_args(args)
8890 class UnsatisfiedResumeDep(portage.exception.PortageException):
8892 A dependency of a resume list is not installed. This
8893 can occur when a required package is dropped from the
8894 merge list via --skipfirst.
8896 def __init__(self, depgraph, value):
8897 portage.exception.PortageException.__init__(self, value)
8898 self.depgraph = depgraph
8900 class _internal_exception(portage.exception.PortageException):
8901 def __init__(self, value=""):
8902 portage.exception.PortageException.__init__(self, value)
8904 class _unknown_internal_error(_internal_exception):
8906 Used by the depgraph internally to terminate graph creation.
8907 The specific reason for the failure should have been dumped
8908 to stderr, unfortunately, the exact reason for the failure
8912 class _serialize_tasks_retry(_internal_exception):
8914 This is raised by the _serialize_tasks() method when it needs to
8915 be called again for some reason. The only case that it's currently
8916 used for is when neglected dependencies need to be added to the
8917 graph in order to avoid making a potentially unsafe decision.
8920 class _dep_check_composite_db(portage.dbapi):
8922 A dbapi-like interface that is optimized for use in dep_check() calls.
8923 This is built on top of the existing depgraph package selection logic.
8924 Some packages that have been added to the graph may be masked from this
8925 view in order to influence the atom preference selection that occurs
8928 def __init__(self, depgraph, root):
8929 portage.dbapi.__init__(self)
8930 self._depgraph = depgraph
8932 self._match_cache = {}
8933 self._cpv_pkg_map = {}
8935 def _clear_cache(self):
8936 self._match_cache.clear()
8937 self._cpv_pkg_map.clear()
8939 def match(self, atom):
8940 ret = self._match_cache.get(atom)
8945 atom = self._dep_expand(atom)
8946 pkg, existing = self._depgraph._select_package(self._root, atom)
8950 # Return the highest available from select_package() as well as
8951 # any matching slots in the graph db.
8953 slots.add(pkg.metadata["SLOT"])
8954 atom_cp = portage.dep_getkey(atom)
8955 if pkg.cp.startswith("virtual/"):
8956 # For new-style virtual lookahead that occurs inside
8957 # dep_check(), examine all slots. This is needed
8958 # so that newer slots will not unnecessarily be pulled in
8959 # when a satisfying lower slot is already installed. For
8960 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8961 # there's no need to pull in a newer slot to satisfy a
8962 # virtual/jdk dependency.
8963 for db, pkg_type, built, installed, db_keys in \
8964 self._depgraph._filtered_trees[self._root]["dbs"]:
8965 for cpv in db.match(atom):
8966 if portage.cpv_getkey(cpv) != pkg.cp:
8968 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8970 if self._visible(pkg):
8971 self._cpv_pkg_map[pkg.cpv] = pkg
8973 slots.remove(pkg.metadata["SLOT"])
8975 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8976 pkg, existing = self._depgraph._select_package(
8977 self._root, slot_atom)
8980 if not self._visible(pkg):
8982 self._cpv_pkg_map[pkg.cpv] = pkg
8985 self._cpv_sort_ascending(ret)
8986 self._match_cache[orig_atom] = ret
8989 def _visible(self, pkg):
8990 if pkg.installed and "selective" not in self._depgraph.myparams:
8992 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8993 except (StopIteration, portage.exception.InvalidDependString):
9000 self._depgraph.pkgsettings[pkg.root], pkg):
9002 except portage.exception.InvalidDependString:
9004 in_graph = self._depgraph._slot_pkg_map[
9005 self._root].get(pkg.slot_atom)
9006 if in_graph is None:
9007 # Mask choices for packages which are not the highest visible
9008 # version within their slot (since they usually trigger slot
9010 highest_visible, in_graph = self._depgraph._select_package(
9011 self._root, pkg.slot_atom)
9012 if pkg != highest_visible:
9014 elif in_graph != pkg:
9015 # Mask choices for packages that would trigger a slot
9016 # conflict with a previously selected package.
9020 def _dep_expand(self, atom):
9022 This is only needed for old installed packages that may
9023 contain atoms that are not fully qualified with a specific
9024 category. Emulate the cpv_expand() function that's used by
9025 dbapi.match() in cases like this. If there are multiple
9026 matches, it's often due to a new-style virtual that has
9027 been added, so try to filter those out to avoid raising
9030 root_config = self._depgraph.roots[self._root]
9032 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9033 if len(expanded_atoms) > 1:
9034 non_virtual_atoms = []
9035 for x in expanded_atoms:
9036 if not portage.dep_getkey(x).startswith("virtual/"):
9037 non_virtual_atoms.append(x)
9038 if len(non_virtual_atoms) == 1:
9039 expanded_atoms = non_virtual_atoms
9040 if len(expanded_atoms) > 1:
9041 # compatible with portage.cpv_expand()
9042 raise portage.exception.AmbiguousPackageName(
9043 [portage.dep_getkey(x) for x in expanded_atoms])
9045 atom = expanded_atoms[0]
9047 null_atom = insert_category_into_atom(atom, "null")
9048 null_cp = portage.dep_getkey(null_atom)
9049 cat, atom_pn = portage.catsplit(null_cp)
9050 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9052 # Allow the resolver to choose which virtual.
9053 atom = insert_category_into_atom(atom, "virtual")
9055 atom = insert_category_into_atom(atom, "null")
9058 def aux_get(self, cpv, wants):
9059 metadata = self._cpv_pkg_map[cpv].metadata
9060 return [metadata.get(x, "") for x in wants]
9062 class RepoDisplay(object):
9063 def __init__(self, roots):
9064 self._shown_repos = {}
9065 self._unknown_repo = False
9067 for root_config in roots.itervalues():
9068 portdir = root_config.settings.get("PORTDIR")
9070 repo_paths.add(portdir)
9071 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9073 repo_paths.update(overlays.split())
9074 repo_paths = list(repo_paths)
9075 self._repo_paths = repo_paths
9076 self._repo_paths_real = [ os.path.realpath(repo_path) \
9077 for repo_path in repo_paths ]
9079 # pre-allocate index for PORTDIR so that it always has index 0.
9080 for root_config in roots.itervalues():
9081 portdb = root_config.trees["porttree"].dbapi
9082 portdir = portdb.porttree_root
9084 self.repoStr(portdir)
9086 def repoStr(self, repo_path_real):
9089 real_index = self._repo_paths_real.index(repo_path_real)
9090 if real_index == -1:
9092 self._unknown_repo = True
9094 shown_repos = self._shown_repos
9095 repo_paths = self._repo_paths
9096 repo_path = repo_paths[real_index]
9097 index = shown_repos.get(repo_path)
9099 index = len(shown_repos)
9100 shown_repos[repo_path] = index
9106 shown_repos = self._shown_repos
9107 unknown_repo = self._unknown_repo
9108 if shown_repos or self._unknown_repo:
9109 output.append("Portage tree and overlays:\n")
9110 show_repo_paths = list(shown_repos)
9111 for repo_path, repo_index in shown_repos.iteritems():
9112 show_repo_paths[repo_index] = repo_path
9114 for index, repo_path in enumerate(show_repo_paths):
9115 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9117 output.append(" "+teal("[?]") + \
9118 " indicates that the source repository could not be determined\n")
9119 return "".join(output)
9121 class PackageCounters(object):
9131 self.blocks_satisfied = 0
9133 self.restrict_fetch = 0
9134 self.restrict_fetch_satisfied = 0
9135 self.interactive = 0
9138 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9141 myoutput.append("Total: %s package" % total_installs)
9142 if total_installs != 1:
9143 myoutput.append("s")
9144 if total_installs != 0:
9145 myoutput.append(" (")
9146 if self.upgrades > 0:
9147 details.append("%s upgrade" % self.upgrades)
9148 if self.upgrades > 1:
9150 if self.downgrades > 0:
9151 details.append("%s downgrade" % self.downgrades)
9152 if self.downgrades > 1:
9155 details.append("%s new" % self.new)
9156 if self.newslot > 0:
9157 details.append("%s in new slot" % self.newslot)
9158 if self.newslot > 1:
9161 details.append("%s reinstall" % self.reinst)
9165 details.append("%s uninstall" % self.uninst)
9168 if self.interactive > 0:
9169 details.append("%s %s" % (self.interactive,
9170 colorize("WARN", "interactive")))
9171 myoutput.append(", ".join(details))
9172 if total_installs != 0:
9173 myoutput.append(")")
9174 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9175 if self.restrict_fetch:
9176 myoutput.append("\nFetch Restriction: %s package" % \
9177 self.restrict_fetch)
9178 if self.restrict_fetch > 1:
9179 myoutput.append("s")
9180 if self.restrict_fetch_satisfied < self.restrict_fetch:
9181 myoutput.append(bad(" (%s unsatisfied)") % \
9182 (self.restrict_fetch - self.restrict_fetch_satisfied))
9184 myoutput.append("\nConflict: %s block" % \
9187 myoutput.append("s")
9188 if self.blocks_satisfied < self.blocks:
9189 myoutput.append(bad(" (%s unsatisfied)") % \
9190 (self.blocks - self.blocks_satisfied))
9191 return "".join(myoutput)
9193 class PollSelectAdapter(PollConstants):
9196 Use select to emulate a poll object, for
9197 systems that don't support poll().
9201 self._registered = {}
9202 self._select_args = [[], [], []]
9204 def register(self, fd, *args):
9206 Only POLLIN is currently supported!
9210 "register expected at most 2 arguments, got " + \
9211 repr(1 + len(args)))
9213 eventmask = PollConstants.POLLIN | \
9214 PollConstants.POLLPRI | PollConstants.POLLOUT
9218 self._registered[fd] = eventmask
9219 self._select_args = None
9221 def unregister(self, fd):
9222 self._select_args = None
9223 del self._registered[fd]
9225 def poll(self, *args):
9228 "poll expected at most 2 arguments, got " + \
9229 repr(1 + len(args)))
9235 select_args = self._select_args
9236 if select_args is None:
9237 select_args = [self._registered.keys(), [], []]
9239 if timeout is not None:
9240 select_args = select_args[:]
9241 # Translate poll() timeout args to select() timeout args:
9243 # | units | value(s) for indefinite block
9244 # ---------|--------------|------------------------------
9245 # poll | milliseconds | omitted, negative, or None
9246 # ---------|--------------|------------------------------
9247 # select | seconds | omitted
9248 # ---------|--------------|------------------------------
9250 if timeout is not None and timeout < 0:
9252 if timeout is not None:
9253 select_args.append(timeout / 1000)
9255 select_events = select.select(*select_args)
9257 for fd in select_events[0]:
9258 poll_events.append((fd, PollConstants.POLLIN))
9261 class SequentialTaskQueue(SlotObject):
9263 __slots__ = ("max_jobs", "running_tasks") + \
9264 ("_dirty", "_scheduling", "_task_queue")
9266 def __init__(self, **kwargs):
9267 SlotObject.__init__(self, **kwargs)
9268 self._task_queue = deque()
9269 self.running_tasks = set()
9270 if self.max_jobs is None:
9274 def add(self, task):
9275 self._task_queue.append(task)
9278 def addFront(self, task):
9279 self._task_queue.appendleft(task)
9290 if self._scheduling:
9291 # Ignore any recursive schedule() calls triggered via
9292 # self._task_exit().
9295 self._scheduling = True
9297 task_queue = self._task_queue
9298 running_tasks = self.running_tasks
9299 max_jobs = self.max_jobs
9300 state_changed = False
9302 while task_queue and \
9303 (max_jobs is True or len(running_tasks) < max_jobs):
9304 task = task_queue.popleft()
9305 cancelled = getattr(task, "cancelled", None)
9307 running_tasks.add(task)
9308 task.addExitListener(self._task_exit)
9310 state_changed = True
9313 self._scheduling = False
9315 return state_changed
9317 def _task_exit(self, task):
9319 Since we can always rely on exit listeners being called, the set of
9320 running tasks is always pruned automatically and there is never any need
9321 to actively prune it.
9323 self.running_tasks.remove(task)
9324 if self._task_queue:
9328 self._task_queue.clear()
9329 running_tasks = self.running_tasks
9330 while running_tasks:
9331 task = running_tasks.pop()
9332 task.removeExitListener(self._task_exit)
9336 def __nonzero__(self):
9337 return bool(self._task_queue or self.running_tasks)
9340 return len(self._task_queue) + len(self.running_tasks)
9342 _can_poll_device = None
9344 def can_poll_device():
9346 Test if it's possible to use poll() on a device such as a pty. This
9347 is known to fail on Darwin.
9349 @returns: True if poll() on a device succeeds, False otherwise.
9352 global _can_poll_device
9353 if _can_poll_device is not None:
9354 return _can_poll_device
9356 if not hasattr(select, "poll"):
9357 _can_poll_device = False
9358 return _can_poll_device
9361 dev_null = open('/dev/null', 'rb')
9363 _can_poll_device = False
9364 return _can_poll_device
9367 p.register(dev_null.fileno(), PollConstants.POLLIN)
9369 invalid_request = False
9370 for f, event in p.poll():
9371 if event & PollConstants.POLLNVAL:
9372 invalid_request = True
9376 _can_poll_device = not invalid_request
9377 return _can_poll_device
9379 def create_poll_instance():
9381 Create an instance of select.poll, or an instance of
9382 PollSelectAdapter there is no poll() implementation or
9383 it is broken somehow.
9385 if can_poll_device():
9386 return select.poll()
9387 return PollSelectAdapter()
9389 getloadavg = getattr(os, "getloadavg", None)
9390 if getloadavg is None:
9393 Uses /proc/loadavg to emulate os.getloadavg().
9394 Raises OSError if the load average was unobtainable.
9397 loadavg_str = open('/proc/loadavg').readline()
9399 # getloadavg() is only supposed to raise OSError, so convert
9400 raise OSError('unknown')
9401 loadavg_split = loadavg_str.split()
9402 if len(loadavg_split) < 3:
9403 raise OSError('unknown')
9407 loadavg_floats.append(float(loadavg_split[i]))
9409 raise OSError('unknown')
9410 return tuple(loadavg_floats)
9412 class PollScheduler(object):
9414 class _sched_iface_class(SlotObject):
9415 __slots__ = ("register", "schedule", "unregister")
9419 self._max_load = None
9421 self._poll_event_queue = []
9422 self._poll_event_handlers = {}
9423 self._poll_event_handler_ids = {}
9424 # Increment id for each new handler.
9425 self._event_handler_id = 0
9426 self._poll_obj = create_poll_instance()
9427 self._scheduling = False
9429 def _schedule(self):
9431 Calls _schedule_tasks() and automatically returns early from
9432 any recursive calls to this method that the _schedule_tasks()
9433 call might trigger. This makes _schedule() safe to call from
9434 inside exit listeners.
9436 if self._scheduling:
9438 self._scheduling = True
9440 return self._schedule_tasks()
9442 self._scheduling = False
9444 def _running_job_count(self):
9447 def _can_add_job(self):
9448 max_jobs = self._max_jobs
9449 max_load = self._max_load
9451 if self._max_jobs is not True and \
9452 self._running_job_count() >= self._max_jobs:
9455 if max_load is not None and \
9456 (max_jobs is True or max_jobs > 1) and \
9457 self._running_job_count() >= 1:
9459 avg1, avg5, avg15 = getloadavg()
9463 if avg1 >= max_load:
9468 def _poll(self, timeout=None):
9470 All poll() calls pass through here. The poll events
9471 are added directly to self._poll_event_queue.
9472 In order to avoid endless blocking, this raises
9473 StopIteration if timeout is None and there are
9474 no file descriptors to poll.
9476 if not self._poll_event_handlers:
9478 if timeout is None and \
9479 not self._poll_event_handlers:
9480 raise StopIteration(
9481 "timeout is None and there are no poll() event handlers")
9483 # The following error is known to occur with Linux kernel versions
9486 # select.error: (4, 'Interrupted system call')
9488 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9489 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9490 # without any events.
9493 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9495 except select.error, e:
9496 writemsg_level("\n!!! select error: %s\n" % (e,),
9497 level=logging.ERROR, noiselevel=-1)
9499 if timeout is not None:
9502 def _next_poll_event(self, timeout=None):
9504 Since the _schedule_wait() loop is called by event
9505 handlers from _poll_loop(), maintain a central event
9506 queue for both of them to share events from a single
9507 poll() call. In order to avoid endless blocking, this
9508 raises StopIteration if timeout is None and there are
9509 no file descriptors to poll.
9511 if not self._poll_event_queue:
9513 return self._poll_event_queue.pop()
9515 def _poll_loop(self):
9517 event_handlers = self._poll_event_handlers
9518 event_handled = False
9521 while event_handlers:
9522 f, event = self._next_poll_event()
9523 handler, reg_id = event_handlers[f]
9525 event_handled = True
9526 except StopIteration:
9527 event_handled = True
9529 if not event_handled:
9530 raise AssertionError("tight loop")
9532 def _schedule_yield(self):
9534 Schedule for a short period of time chosen by the scheduler based
9535 on internal state. Synchronous tasks should call this periodically
9536 in order to allow the scheduler to service pending poll events. The
9537 scheduler will call poll() exactly once, without blocking, and any
9538 resulting poll events will be serviced.
9540 event_handlers = self._poll_event_handlers
9543 if not event_handlers:
9544 return bool(events_handled)
9546 if not self._poll_event_queue:
9550 while event_handlers and self._poll_event_queue:
9551 f, event = self._next_poll_event()
9552 handler, reg_id = event_handlers[f]
9555 except StopIteration:
9558 return bool(events_handled)
9560 def _register(self, f, eventmask, handler):
9563 @return: A unique registration id, for use in schedule() or
9566 if f in self._poll_event_handlers:
9567 raise AssertionError("fd %d is already registered" % f)
9568 self._event_handler_id += 1
9569 reg_id = self._event_handler_id
9570 self._poll_event_handler_ids[reg_id] = f
9571 self._poll_event_handlers[f] = (handler, reg_id)
9572 self._poll_obj.register(f, eventmask)
9575 def _unregister(self, reg_id):
9576 f = self._poll_event_handler_ids[reg_id]
9577 self._poll_obj.unregister(f)
9578 del self._poll_event_handlers[f]
9579 del self._poll_event_handler_ids[reg_id]
9581 def _schedule_wait(self, wait_ids):
9583 Schedule until wait_id is not longer registered
9586 @param wait_id: a task id to wait for
9588 event_handlers = self._poll_event_handlers
9589 handler_ids = self._poll_event_handler_ids
9590 event_handled = False
9592 if isinstance(wait_ids, int):
9593 wait_ids = frozenset([wait_ids])
9596 while wait_ids.intersection(handler_ids):
9597 f, event = self._next_poll_event()
9598 handler, reg_id = event_handlers[f]
9600 event_handled = True
9601 except StopIteration:
9602 event_handled = True
9604 return event_handled
9606 class QueueScheduler(PollScheduler):
9609 Add instances of SequentialTaskQueue and then call run(). The
9610 run() method returns when no tasks remain.
9613 def __init__(self, max_jobs=None, max_load=None):
9614 PollScheduler.__init__(self)
9616 if max_jobs is None:
9619 self._max_jobs = max_jobs
9620 self._max_load = max_load
9621 self.sched_iface = self._sched_iface_class(
9622 register=self._register,
9623 schedule=self._schedule_wait,
9624 unregister=self._unregister)
9627 self._schedule_listeners = []
9630 self._queues.append(q)
9632 def remove(self, q):
9633 self._queues.remove(q)
9637 while self._schedule():
9640 while self._running_job_count():
9643 def _schedule_tasks(self):
9646 @returns: True if there may be remaining tasks to schedule,
9649 while self._can_add_job():
9650 n = self._max_jobs - self._running_job_count()
9654 if not self._start_next_job(n):
9657 for q in self._queues:
9662 def _running_job_count(self):
9664 for q in self._queues:
9665 job_count += len(q.running_tasks)
9666 self._jobs = job_count
9669 def _start_next_job(self, n=1):
9671 for q in self._queues:
9672 initial_job_count = len(q.running_tasks)
9674 final_job_count = len(q.running_tasks)
9675 if final_job_count > initial_job_count:
9676 started_count += (final_job_count - initial_job_count)
9677 if started_count >= n:
9679 return started_count
9681 class TaskScheduler(object):
9684 A simple way to handle scheduling of AsynchrousTask instances. Simply
9685 add tasks and call run(). The run() method returns when no tasks remain.
9688 def __init__(self, max_jobs=None, max_load=None):
9689 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9690 self._scheduler = QueueScheduler(
9691 max_jobs=max_jobs, max_load=max_load)
9692 self.sched_iface = self._scheduler.sched_iface
9693 self.run = self._scheduler.run
9694 self._scheduler.add(self._queue)
9696 def add(self, task):
9697 self._queue.add(task)
9699 class JobStatusDisplay(object):
9701 _bound_properties = ("curval", "failed", "running")
9702 _jobs_column_width = 48
9704 # Don't update the display unless at least this much
9705 # time has passed, in units of seconds.
9706 _min_display_latency = 2
9708 _default_term_codes = {
9714 _termcap_name_map = {
9715 'carriage_return' : 'cr',
9720 def __init__(self, out=sys.stdout, quiet=False):
9721 object.__setattr__(self, "out", out)
9722 object.__setattr__(self, "quiet", quiet)
9723 object.__setattr__(self, "maxval", 0)
9724 object.__setattr__(self, "merges", 0)
9725 object.__setattr__(self, "_changed", False)
9726 object.__setattr__(self, "_displayed", False)
9727 object.__setattr__(self, "_last_display_time", 0)
9728 object.__setattr__(self, "width", 80)
9731 isatty = hasattr(out, "isatty") and out.isatty()
9732 object.__setattr__(self, "_isatty", isatty)
9733 if not isatty or not self._init_term():
9735 for k, capname in self._termcap_name_map.iteritems():
9736 term_codes[k] = self._default_term_codes[capname]
9737 object.__setattr__(self, "_term_codes", term_codes)
9738 encoding = sys.getdefaultencoding()
9739 for k, v in self._term_codes.items():
9740 if not isinstance(v, basestring):
9741 self._term_codes[k] = v.decode(encoding, 'replace')
9743 def _init_term(self):
9745 Initialize term control codes.
9747 @returns: True if term codes were successfully initialized,
9751 term_type = os.environ.get("TERM", "vt100")
9757 curses.setupterm(term_type, self.out.fileno())
9758 tigetstr = curses.tigetstr
9759 except curses.error:
9764 if tigetstr is None:
9768 for k, capname in self._termcap_name_map.iteritems():
9769 code = tigetstr(capname)
9771 code = self._default_term_codes[capname]
9772 term_codes[k] = code
9773 object.__setattr__(self, "_term_codes", term_codes)
9776 def _format_msg(self, msg):
9777 return ">>> %s" % msg
9781 self._term_codes['carriage_return'] + \
9782 self._term_codes['clr_eol'])
9784 self._displayed = False
9786 def _display(self, line):
9787 self.out.write(line)
9789 self._displayed = True
9791 def _update(self, msg):
9794 if not self._isatty:
9795 out.write(self._format_msg(msg) + self._term_codes['newline'])
9797 self._displayed = True
9803 self._display(self._format_msg(msg))
9805 def displayMessage(self, msg):
9807 was_displayed = self._displayed
9809 if self._isatty and self._displayed:
9812 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9814 self._displayed = False
9817 self._changed = True
9823 for name in self._bound_properties:
9824 object.__setattr__(self, name, 0)
9827 self.out.write(self._term_codes['newline'])
9829 self._displayed = False
9831 def __setattr__(self, name, value):
9832 old_value = getattr(self, name)
9833 if value == old_value:
9835 object.__setattr__(self, name, value)
9836 if name in self._bound_properties:
9837 self._property_change(name, old_value, value)
9839 def _property_change(self, name, old_value, new_value):
9840 self._changed = True
9843 def _load_avg_str(self):
9858 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9862 Display status on stdout, but only if something has
9863 changed since the last call.
9869 current_time = time.time()
9870 time_delta = current_time - self._last_display_time
9871 if self._displayed and \
9873 if not self._isatty:
9875 if time_delta < self._min_display_latency:
9878 self._last_display_time = current_time
9879 self._changed = False
9880 self._display_status()
9882 def _display_status(self):
9883 # Don't use len(self._completed_tasks) here since that also
9884 # can include uninstall tasks.
9885 curval_str = str(self.curval)
9886 maxval_str = str(self.maxval)
9887 running_str = str(self.running)
9888 failed_str = str(self.failed)
9889 load_avg_str = self._load_avg_str()
9891 color_output = StringIO()
9892 plain_output = StringIO()
9893 style_file = portage.output.ConsoleStyleFile(color_output)
9894 style_file.write_listener = plain_output
9895 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9896 style_writer.style_listener = style_file.new_styles
9897 f = formatter.AbstractFormatter(style_writer)
9899 number_style = "INFORM"
9900 f.add_literal_data("Jobs: ")
9901 f.push_style(number_style)
9902 f.add_literal_data(curval_str)
9904 f.add_literal_data(" of ")
9905 f.push_style(number_style)
9906 f.add_literal_data(maxval_str)
9908 f.add_literal_data(" complete")
9911 f.add_literal_data(", ")
9912 f.push_style(number_style)
9913 f.add_literal_data(running_str)
9915 f.add_literal_data(" running")
9918 f.add_literal_data(", ")
9919 f.push_style(number_style)
9920 f.add_literal_data(failed_str)
9922 f.add_literal_data(" failed")
9924 padding = self._jobs_column_width - len(plain_output.getvalue())
9926 f.add_literal_data(padding * " ")
9928 f.add_literal_data("Load avg: ")
9929 f.add_literal_data(load_avg_str)
9931 # Truncate to fit width, to avoid making the terminal scroll if the
9932 # line overflows (happens when the load average is large).
9933 plain_output = plain_output.getvalue()
9934 if self._isatty and len(plain_output) > self.width:
9935 # Use plain_output here since it's easier to truncate
9936 # properly than the color output which contains console
9938 self._update(plain_output[:self.width])
9940 self._update(color_output.getvalue())
9942 xtermTitle(" ".join(plain_output.split()))
9944 class Scheduler(PollScheduler):
9946 _opts_ignore_blockers = \
9947 frozenset(["--buildpkgonly",
9948 "--fetchonly", "--fetch-all-uri",
9949 "--nodeps", "--pretend"])
9951 _opts_no_background = \
9952 frozenset(["--pretend",
9953 "--fetchonly", "--fetch-all-uri"])
9955 _opts_no_restart = frozenset(["--buildpkgonly",
9956 "--fetchonly", "--fetch-all-uri", "--pretend"])
9958 _bad_resume_opts = set(["--ask", "--changelog",
9959 "--resume", "--skipfirst"])
9961 _fetch_log = "/var/log/emerge-fetch.log"
9963 class _iface_class(SlotObject):
9964 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9965 "dblinkElog", "fetch", "register", "schedule",
9966 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9969 class _fetch_iface_class(SlotObject):
9970 __slots__ = ("log_file", "schedule")
9972 _task_queues_class = slot_dict_class(
9973 ("merge", "jobs", "fetch", "unpack"), prefix="")
9975 class _build_opts_class(SlotObject):
9976 __slots__ = ("buildpkg", "buildpkgonly",
9977 "fetch_all_uri", "fetchonly", "pretend")
9979 class _binpkg_opts_class(SlotObject):
9980 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9982 class _pkg_count_class(SlotObject):
9983 __slots__ = ("curval", "maxval")
9985 class _emerge_log_class(SlotObject):
9986 __slots__ = ("xterm_titles",)
9988 def log(self, *pargs, **kwargs):
9989 if not self.xterm_titles:
9990 # Avoid interference with the scheduler's status display.
9991 kwargs.pop("short_msg", None)
9992 emergelog(self.xterm_titles, *pargs, **kwargs)
9994 class _failed_pkg(SlotObject):
9995 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9997 class _ConfigPool(object):
9998 """Interface for a task to temporarily allocate a config
9999 instance from a pool. This allows a task to be constructed
10000 long before the config instance actually becomes needed, like
10001 when prefetchers are constructed for the whole merge list."""
10002 __slots__ = ("_root", "_allocate", "_deallocate")
10003 def __init__(self, root, allocate, deallocate):
10005 self._allocate = allocate
10006 self._deallocate = deallocate
10007 def allocate(self):
10008 return self._allocate(self._root)
10009 def deallocate(self, settings):
10010 self._deallocate(settings)
10012 class _unknown_internal_error(portage.exception.PortageException):
10014 Used internally to terminate scheduling. The specific reason for
10015 the failure should have been dumped to stderr.
10017 def __init__(self, value=""):
10018 portage.exception.PortageException.__init__(self, value)
10020 def __init__(self, settings, trees, mtimedb, myopts,
10021 spinner, mergelist, favorites, digraph):
10022 PollScheduler.__init__(self)
10023 self.settings = settings
10024 self.target_root = settings["ROOT"]
10026 self.myopts = myopts
10027 self._spinner = spinner
10028 self._mtimedb = mtimedb
10029 self._mergelist = mergelist
10030 self._favorites = favorites
10031 self._args_set = InternalPackageSet(favorites)
10032 self._build_opts = self._build_opts_class()
10033 for k in self._build_opts.__slots__:
10034 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10035 self._binpkg_opts = self._binpkg_opts_class()
10036 for k in self._binpkg_opts.__slots__:
10037 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10040 self._logger = self._emerge_log_class()
10041 self._task_queues = self._task_queues_class()
10042 for k in self._task_queues.allowed_keys:
10043 setattr(self._task_queues, k,
10044 SequentialTaskQueue())
10046 # Holds merges that will wait to be executed when no builds are
10047 # executing. This is useful for system packages since dependencies
10048 # on system packages are frequently unspecified.
10049 self._merge_wait_queue = []
10050 # Holds merges that have been transfered from the merge_wait_queue to
10051 # the actual merge queue. They are removed from this list upon
10052 # completion. Other packages can start building only when this list is
10054 self._merge_wait_scheduled = []
10056 # Holds system packages and their deep runtime dependencies. Before
10057 # being merged, these packages go to merge_wait_queue, to be merged
10058 # when no other packages are building.
10059 self._deep_system_deps = set()
10061 # Holds packages to merge which will satisfy currently unsatisfied
10062 # deep runtime dependencies of system packages. If this is not empty
10063 # then no parallel builds will be spawned until it is empty. This
10064 # minimizes the possibility that a build will fail due to the system
10065 # being in a fragile state. For example, see bug #259954.
10066 self._unsatisfied_system_deps = set()
10068 self._status_display = JobStatusDisplay()
10069 self._max_load = myopts.get("--load-average")
10070 max_jobs = myopts.get("--jobs")
10071 if max_jobs is None:
10073 self._set_max_jobs(max_jobs)
10075 # The root where the currently running
10076 # portage instance is installed.
10077 self._running_root = trees["/"]["root_config"]
10079 if settings.get("PORTAGE_DEBUG", "") == "1":
10081 self.pkgsettings = {}
10082 self._config_pool = {}
10083 self._blocker_db = {}
10085 self._config_pool[root] = []
10086 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10088 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10089 schedule=self._schedule_fetch)
10090 self._sched_iface = self._iface_class(
10091 dblinkEbuildPhase=self._dblink_ebuild_phase,
10092 dblinkDisplayMerge=self._dblink_display_merge,
10093 dblinkElog=self._dblink_elog,
10094 fetch=fetch_iface, register=self._register,
10095 schedule=self._schedule_wait,
10096 scheduleSetup=self._schedule_setup,
10097 scheduleUnpack=self._schedule_unpack,
10098 scheduleYield=self._schedule_yield,
10099 unregister=self._unregister)
10101 self._prefetchers = weakref.WeakValueDictionary()
10102 self._pkg_queue = []
10103 self._completed_tasks = set()
10105 self._failed_pkgs = []
10106 self._failed_pkgs_all = []
10107 self._failed_pkgs_die_msgs = []
10108 self._post_mod_echo_msgs = []
10109 self._parallel_fetch = False
10110 merge_count = len([x for x in mergelist \
10111 if isinstance(x, Package) and x.operation == "merge"])
10112 self._pkg_count = self._pkg_count_class(
10113 curval=0, maxval=merge_count)
10114 self._status_display.maxval = self._pkg_count.maxval
10116 # The load average takes some time to respond when new
10117 # jobs are added, so we need to limit the rate of adding
10119 self._job_delay_max = 10
10120 self._job_delay_factor = 1.0
10121 self._job_delay_exp = 1.5
10122 self._previous_job_start_time = None
10124 self._set_digraph(digraph)
10126 # This is used to memoize the _choose_pkg() result when
10127 # no packages can be chosen until one of the existing
10129 self._choose_pkg_return_early = False
10131 features = self.settings.features
10132 if "parallel-fetch" in features and \
10133 not ("--pretend" in self.myopts or \
10134 "--fetch-all-uri" in self.myopts or \
10135 "--fetchonly" in self.myopts):
10136 if "distlocks" not in features:
10137 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10138 portage.writemsg(red("!!!")+" parallel-fetching " + \
10139 "requires the distlocks feature enabled"+"\n",
10141 portage.writemsg(red("!!!")+" you have it disabled, " + \
10142 "thus parallel-fetching is being disabled"+"\n",
10144 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10145 elif len(mergelist) > 1:
10146 self._parallel_fetch = True
10148 if self._parallel_fetch:
10149 # clear out existing fetch log if it exists
10151 open(self._fetch_log, 'w')
10152 except EnvironmentError:
10155 self._running_portage = None
10156 portage_match = self._running_root.trees["vartree"].dbapi.match(
10157 portage.const.PORTAGE_PACKAGE_ATOM)
10159 cpv = portage_match.pop()
10160 self._running_portage = self._pkg(cpv, "installed",
10161 self._running_root, installed=True)
10163 def _poll(self, timeout=None):
10165 PollScheduler._poll(self, timeout=timeout)
10167 def _set_max_jobs(self, max_jobs):
10168 self._max_jobs = max_jobs
10169 self._task_queues.jobs.max_jobs = max_jobs
10171 def _background_mode(self):
10173 Check if background mode is enabled and adjust states as necessary.
10176 @returns: True if background mode is enabled, False otherwise.
10178 background = (self._max_jobs is True or \
10179 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10180 not bool(self._opts_no_background.intersection(self.myopts))
10183 interactive_tasks = self._get_interactive_tasks()
10184 if interactive_tasks:
10186 writemsg_level(">>> Sending package output to stdio due " + \
10187 "to interactive package(s):\n",
10188 level=logging.INFO, noiselevel=-1)
10190 for pkg in interactive_tasks:
10191 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10192 if pkg.root != "/":
10193 pkg_str += " for " + pkg.root
10194 msg.append(pkg_str)
10196 writemsg_level("".join("%s\n" % (l,) for l in msg),
10197 level=logging.INFO, noiselevel=-1)
10198 if self._max_jobs is True or self._max_jobs > 1:
10199 self._set_max_jobs(1)
10200 writemsg_level(">>> Setting --jobs=1 due " + \
10201 "to the above interactive package(s)\n",
10202 level=logging.INFO, noiselevel=-1)
10204 self._status_display.quiet = \
10205 not background or \
10206 ("--quiet" in self.myopts and \
10207 "--verbose" not in self.myopts)
10209 self._logger.xterm_titles = \
10210 "notitles" not in self.settings.features and \
10211 self._status_display.quiet
10215 def _get_interactive_tasks(self):
10216 from portage import flatten
10217 from portage.dep import use_reduce, paren_reduce
10218 interactive_tasks = []
10219 for task in self._mergelist:
10220 if not (isinstance(task, Package) and \
10221 task.operation == "merge"):
10224 properties = flatten(use_reduce(paren_reduce(
10225 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10226 except portage.exception.InvalidDependString, e:
10227 show_invalid_depstring_notice(task,
10228 task.metadata["PROPERTIES"], str(e))
10229 raise self._unknown_internal_error()
10230 if "interactive" in properties:
10231 interactive_tasks.append(task)
10232 return interactive_tasks
10234 def _set_digraph(self, digraph):
10235 if "--nodeps" in self.myopts or \
10236 (self._max_jobs is not True and self._max_jobs < 2):
10238 self._digraph = None
10241 self._digraph = digraph
10242 self._find_system_deps()
10243 self._prune_digraph()
10244 self._prevent_builddir_collisions()
10246 def _find_system_deps(self):
10248 Find system packages and their deep runtime dependencies. Before being
10249 merged, these packages go to merge_wait_queue, to be merged when no
10250 other packages are building.
10252 deep_system_deps = self._deep_system_deps
10253 deep_system_deps.clear()
10254 deep_system_deps.update(
10255 _find_deep_system_runtime_deps(self._digraph))
10256 deep_system_deps.difference_update([pkg for pkg in \
10257 deep_system_deps if pkg.operation != "merge"])
10259 def _prune_digraph(self):
10261 Prune any root nodes that are irrelevant.
10264 graph = self._digraph
10265 completed_tasks = self._completed_tasks
10266 removed_nodes = set()
10268 for node in graph.root_nodes():
10269 if not isinstance(node, Package) or \
10270 (node.installed and node.operation == "nomerge") or \
10272 node in completed_tasks:
10273 removed_nodes.add(node)
10275 graph.difference_update(removed_nodes)
10276 if not removed_nodes:
10278 removed_nodes.clear()
10280 def _prevent_builddir_collisions(self):
10282 When building stages, sometimes the same exact cpv needs to be merged
10283 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10284 in the builddir. Currently, normal file locks would be inappropriate
10285 for this purpose since emerge holds all of it's build dir locks from
10289 for pkg in self._mergelist:
10290 if not isinstance(pkg, Package):
10291 # a satisfied blocker
10295 if pkg.cpv not in cpv_map:
10296 cpv_map[pkg.cpv] = [pkg]
10298 for earlier_pkg in cpv_map[pkg.cpv]:
10299 self._digraph.add(earlier_pkg, pkg,
10300 priority=DepPriority(buildtime=True))
10301 cpv_map[pkg.cpv].append(pkg)
10303 class _pkg_failure(portage.exception.PortageException):
10305 An instance of this class is raised by unmerge() when
10306 an uninstallation fails.
10309 def __init__(self, *pargs):
10310 portage.exception.PortageException.__init__(self, pargs)
10312 self.status = pargs[0]
10314 def _schedule_fetch(self, fetcher):
10316 Schedule a fetcher on the fetch queue, in order to
10317 serialize access to the fetch log.
10319 self._task_queues.fetch.addFront(fetcher)
10321 def _schedule_setup(self, setup_phase):
10323 Schedule a setup phase on the merge queue, in order to
10324 serialize unsandboxed access to the live filesystem.
10326 self._task_queues.merge.addFront(setup_phase)
10329 def _schedule_unpack(self, unpack_phase):
10331 Schedule an unpack phase on the unpack queue, in order
10332 to serialize $DISTDIR access for live ebuilds.
10334 self._task_queues.unpack.add(unpack_phase)
10336 def _find_blockers(self, new_pkg):
10338 Returns a callable which should be called only when
10339 the vdb lock has been acquired.
10341 def get_blockers():
10342 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10343 return get_blockers
10345 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10346 if self._opts_ignore_blockers.intersection(self.myopts):
10349 # Call gc.collect() here to avoid heap overflow that
10350 # triggers 'Cannot allocate memory' errors (reported
10351 # with python-2.5).
10355 blocker_db = self._blocker_db[new_pkg.root]
10357 blocker_dblinks = []
10358 for blocking_pkg in blocker_db.findInstalledBlockers(
10359 new_pkg, acquire_lock=acquire_lock):
10360 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10362 if new_pkg.cpv == blocking_pkg.cpv:
10364 blocker_dblinks.append(portage.dblink(
10365 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10366 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10367 vartree=self.trees[blocking_pkg.root]["vartree"]))
10371 return blocker_dblinks
10373 def _dblink_pkg(self, pkg_dblink):
10374 cpv = pkg_dblink.mycpv
10375 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10376 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10377 installed = type_name == "installed"
10378 return self._pkg(cpv, type_name, root_config, installed=installed)
10380 def _append_to_log_path(self, log_path, msg):
10381 f = open(log_path, 'a')
10387 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10389 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10392 background = self._background
10394 if background and log_path is not None:
10395 log_file = open(log_path, 'a')
10400 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10402 if log_file is not None:
10405 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10406 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10407 background = self._background
10409 if log_path is None:
10410 if not (background and level < logging.WARN):
10411 portage.util.writemsg_level(msg,
10412 level=level, noiselevel=noiselevel)
10415 portage.util.writemsg_level(msg,
10416 level=level, noiselevel=noiselevel)
10417 self._append_to_log_path(log_path, msg)
10419 def _dblink_ebuild_phase(self,
10420 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10422 Using this callback for merge phases allows the scheduler
10423 to run while these phases execute asynchronously, and allows
10424 the scheduler control output handling.
10427 scheduler = self._sched_iface
10428 settings = pkg_dblink.settings
10429 pkg = self._dblink_pkg(pkg_dblink)
10430 background = self._background
10431 log_path = settings.get("PORTAGE_LOG_FILE")
10433 ebuild_phase = EbuildPhase(background=background,
10434 pkg=pkg, phase=phase, scheduler=scheduler,
10435 settings=settings, tree=pkg_dblink.treetype)
10436 ebuild_phase.start()
10437 ebuild_phase.wait()
10439 return ebuild_phase.returncode
10441 def _generate_digests(self):
10443 Generate digests if necessary for --digests or FEATURES=digest.
10444 In order to avoid interference, this must done before parallel
10448 if '--fetchonly' in self.myopts:
10451 digest = '--digest' in self.myopts
10453 for pkgsettings in self.pkgsettings.itervalues():
10454 if 'digest' in pkgsettings.features:
10461 for x in self._mergelist:
10462 if not isinstance(x, Package) or \
10463 x.type_name != 'ebuild' or \
10464 x.operation != 'merge':
10466 pkgsettings = self.pkgsettings[x.root]
10467 if '--digest' not in self.myopts and \
10468 'digest' not in pkgsettings.features:
10470 portdb = x.root_config.trees['porttree'].dbapi
10471 ebuild_path = portdb.findname(x.cpv)
10472 if not ebuild_path:
10474 "!!! Could not locate ebuild for '%s'.\n" \
10475 % x.cpv, level=logging.ERROR, noiselevel=-1)
10477 pkgsettings['O'] = os.path.dirname(ebuild_path)
10478 if not portage.digestgen([], pkgsettings, myportdb=portdb):
10480 "!!! Unable to generate manifest for '%s'.\n" \
10481 % x.cpv, level=logging.ERROR, noiselevel=-1)
10486 def _check_manifests(self):
10487 # Verify all the manifests now so that the user is notified of failure
10488 # as soon as possible.
10489 if "strict" not in self.settings.features or \
10490 "--fetchonly" in self.myopts or \
10491 "--fetch-all-uri" in self.myopts:
10494 shown_verifying_msg = False
10495 quiet_settings = {}
10496 for myroot, pkgsettings in self.pkgsettings.iteritems():
10497 quiet_config = portage.config(clone=pkgsettings)
10498 quiet_config["PORTAGE_QUIET"] = "1"
10499 quiet_config.backup_changes("PORTAGE_QUIET")
10500 quiet_settings[myroot] = quiet_config
10503 for x in self._mergelist:
10504 if not isinstance(x, Package) or \
10505 x.type_name != "ebuild":
10508 if not shown_verifying_msg:
10509 shown_verifying_msg = True
10510 self._status_msg("Verifying ebuild manifests")
10512 root_config = x.root_config
10513 portdb = root_config.trees["porttree"].dbapi
10514 quiet_config = quiet_settings[root_config.root]
10515 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10516 if not portage.digestcheck([], quiet_config, strict=True):
10521 def _add_prefetchers(self):
10523 if not self._parallel_fetch:
10526 if self._parallel_fetch:
10527 self._status_msg("Starting parallel fetch")
10529 prefetchers = self._prefetchers
10530 getbinpkg = "--getbinpkg" in self.myopts
10532 # In order to avoid "waiting for lock" messages
10533 # at the beginning, which annoy users, never
10534 # spawn a prefetcher for the first package.
10535 for pkg in self._mergelist[1:]:
10536 prefetcher = self._create_prefetcher(pkg)
10537 if prefetcher is not None:
10538 self._task_queues.fetch.add(prefetcher)
10539 prefetchers[pkg] = prefetcher
10541 def _create_prefetcher(self, pkg):
10543 @return: a prefetcher, or None if not applicable
10547 if not isinstance(pkg, Package):
10550 elif pkg.type_name == "ebuild":
10552 prefetcher = EbuildFetcher(background=True,
10553 config_pool=self._ConfigPool(pkg.root,
10554 self._allocate_config, self._deallocate_config),
10555 fetchonly=1, logfile=self._fetch_log,
10556 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10558 elif pkg.type_name == "binary" and \
10559 "--getbinpkg" in self.myopts and \
10560 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10562 prefetcher = BinpkgPrefetcher(background=True,
10563 pkg=pkg, scheduler=self._sched_iface)
10567 def _is_restart_scheduled(self):
10569 Check if the merge list contains a replacement
10570 for the current running instance, that will result
10571 in restart after merge.
10573 @returns: True if a restart is scheduled, False otherwise.
10575 if self._opts_no_restart.intersection(self.myopts):
10578 mergelist = self._mergelist
10580 for i, pkg in enumerate(mergelist):
10581 if self._is_restart_necessary(pkg) and \
10582 i != len(mergelist) - 1:
10587 def _is_restart_necessary(self, pkg):
10589 @return: True if merging the given package
10590 requires restart, False otherwise.
10593 # Figure out if we need a restart.
10594 if pkg.root == self._running_root.root and \
10595 portage.match_from_list(
10596 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10597 if self._running_portage:
10598 return pkg.cpv != self._running_portage.cpv
10602 def _restart_if_necessary(self, pkg):
10604 Use execv() to restart emerge. This happens
10605 if portage upgrades itself and there are
10606 remaining packages in the list.
10609 if self._opts_no_restart.intersection(self.myopts):
10612 if not self._is_restart_necessary(pkg):
10615 if pkg == self._mergelist[-1]:
10618 self._main_loop_cleanup()
10620 logger = self._logger
10621 pkg_count = self._pkg_count
10622 mtimedb = self._mtimedb
10623 bad_resume_opts = self._bad_resume_opts
10625 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10626 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10628 logger.log(" *** RESTARTING " + \
10629 "emerge via exec() after change of " + \
10630 "portage version.")
10632 mtimedb["resume"]["mergelist"].remove(list(pkg))
10634 portage.run_exitfuncs()
10635 mynewargv = [sys.argv[0], "--resume"]
10636 resume_opts = self.myopts.copy()
10637 # For automatic resume, we need to prevent
10638 # any of bad_resume_opts from leaking in
10639 # via EMERGE_DEFAULT_OPTS.
10640 resume_opts["--ignore-default-opts"] = True
10641 for myopt, myarg in resume_opts.iteritems():
10642 if myopt not in bad_resume_opts:
10644 mynewargv.append(myopt)
10646 mynewargv.append(myopt +"="+ str(myarg))
10647 # priority only needs to be adjusted on the first run
10648 os.environ["PORTAGE_NICENESS"] = "0"
10649 os.execv(mynewargv[0], mynewargv)
10653 if "--resume" in self.myopts:
10655 portage.writemsg_stdout(
10656 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10657 self._logger.log(" *** Resuming merge...")
10659 self._save_resume_list()
10662 self._background = self._background_mode()
10663 except self._unknown_internal_error:
10666 for root in self.trees:
10667 root_config = self.trees[root]["root_config"]
10669 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10670 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10671 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10672 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10673 if not tmpdir or not os.path.isdir(tmpdir):
10674 msg = "The directory specified in your " + \
10675 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10676 "does not exist. Please create this " + \
10677 "directory or correct your PORTAGE_TMPDIR setting."
10678 msg = textwrap.wrap(msg, 70)
10679 out = portage.output.EOutput()
10684 if self._background:
10685 root_config.settings.unlock()
10686 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10687 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10688 root_config.settings.lock()
10690 self.pkgsettings[root] = portage.config(
10691 clone=root_config.settings)
10693 rval = self._generate_digests()
10694 if rval != os.EX_OK:
10697 rval = self._check_manifests()
10698 if rval != os.EX_OK:
10701 keep_going = "--keep-going" in self.myopts
10702 fetchonly = self._build_opts.fetchonly
10703 mtimedb = self._mtimedb
10704 failed_pkgs = self._failed_pkgs
10707 rval = self._merge()
10708 if rval == os.EX_OK or fetchonly or not keep_going:
10710 if "resume" not in mtimedb:
10712 mergelist = self._mtimedb["resume"].get("mergelist")
10716 if not failed_pkgs:
10719 for failed_pkg in failed_pkgs:
10720 mergelist.remove(list(failed_pkg.pkg))
10722 self._failed_pkgs_all.extend(failed_pkgs)
10728 if not self._calc_resume_list():
10731 clear_caches(self.trees)
10732 if not self._mergelist:
10735 self._save_resume_list()
10736 self._pkg_count.curval = 0
10737 self._pkg_count.maxval = len([x for x in self._mergelist \
10738 if isinstance(x, Package) and x.operation == "merge"])
10739 self._status_display.maxval = self._pkg_count.maxval
10741 self._logger.log(" *** Finished. Cleaning up...")
10744 self._failed_pkgs_all.extend(failed_pkgs)
10747 background = self._background
10748 failure_log_shown = False
10749 if background and len(self._failed_pkgs_all) == 1:
10750 # If only one package failed then just show it's
10751 # whole log for easy viewing.
10752 failed_pkg = self._failed_pkgs_all[-1]
10753 build_dir = failed_pkg.build_dir
10756 log_paths = [failed_pkg.build_log]
10758 log_path = self._locate_failure_log(failed_pkg)
10759 if log_path is not None:
10761 log_file = open(log_path)
10765 if log_file is not None:
10767 for line in log_file:
10768 writemsg_level(line, noiselevel=-1)
10771 failure_log_shown = True
10773 # Dump mod_echo output now since it tends to flood the terminal.
10774 # This allows us to avoid having more important output, generated
10775 # later, from being swept away by the mod_echo output.
10776 mod_echo_output = _flush_elog_mod_echo()
10778 if background and not failure_log_shown and \
10779 self._failed_pkgs_all and \
10780 self._failed_pkgs_die_msgs and \
10781 not mod_echo_output:
10783 printer = portage.output.EOutput()
10784 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10786 if mysettings["ROOT"] != "/":
10787 root_msg = " merged to %s" % mysettings["ROOT"]
10789 printer.einfo("Error messages for package %s%s:" % \
10790 (colorize("INFORM", key), root_msg))
10792 for phase in portage.const.EBUILD_PHASES:
10793 if phase not in logentries:
10795 for msgtype, msgcontent in logentries[phase]:
10796 if isinstance(msgcontent, basestring):
10797 msgcontent = [msgcontent]
10798 for line in msgcontent:
10799 printer.eerror(line.strip("\n"))
10801 if self._post_mod_echo_msgs:
10802 for msg in self._post_mod_echo_msgs:
10805 if len(self._failed_pkgs_all) > 1 or \
10806 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10807 if len(self._failed_pkgs_all) > 1:
10808 msg = "The following %d packages have " % \
10809 len(self._failed_pkgs_all) + \
10810 "failed to build or install:"
10812 msg = "The following package has " + \
10813 "failed to build or install:"
10814 prefix = bad(" * ")
10815 writemsg(prefix + "\n", noiselevel=-1)
10816 from textwrap import wrap
10817 for line in wrap(msg, 72):
10818 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10819 writemsg(prefix + "\n", noiselevel=-1)
10820 for failed_pkg in self._failed_pkgs_all:
10821 writemsg("%s\t%s\n" % (prefix,
10822 colorize("INFORM", str(failed_pkg.pkg))),
10824 writemsg(prefix + "\n", noiselevel=-1)
10828 def _elog_listener(self, mysettings, key, logentries, fulltext):
10829 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10831 self._failed_pkgs_die_msgs.append(
10832 (mysettings, key, errors))
10834 def _locate_failure_log(self, failed_pkg):
10836 build_dir = failed_pkg.build_dir
10839 log_paths = [failed_pkg.build_log]
10841 for log_path in log_paths:
10846 log_size = os.stat(log_path).st_size
10857 def _add_packages(self):
10858 pkg_queue = self._pkg_queue
10859 for pkg in self._mergelist:
10860 if isinstance(pkg, Package):
10861 pkg_queue.append(pkg)
10862 elif isinstance(pkg, Blocker):
10865 def _system_merge_started(self, merge):
10867 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10869 graph = self._digraph
10872 pkg = merge.merge.pkg
10874 # Skip this if $ROOT != / since it shouldn't matter if there
10875 # are unsatisfied system runtime deps in this case.
10876 if pkg.root != '/':
10879 completed_tasks = self._completed_tasks
10880 unsatisfied = self._unsatisfied_system_deps
10882 def ignore_non_runtime_or_satisfied(priority):
10884 Ignore non-runtime and satisfied runtime priorities.
10886 if isinstance(priority, DepPriority) and \
10887 not priority.satisfied and \
10888 (priority.runtime or priority.runtime_post):
10892 # When checking for unsatisfied runtime deps, only check
10893 # direct deps since indirect deps are checked when the
10894 # corresponding parent is merged.
10895 for child in graph.child_nodes(pkg,
10896 ignore_priority=ignore_non_runtime_or_satisfied):
10897 if not isinstance(child, Package) or \
10898 child.operation == 'uninstall':
10902 if child.operation == 'merge' and \
10903 child not in completed_tasks:
10904 unsatisfied.add(child)
10906 def _merge_wait_exit_handler(self, task):
10907 self._merge_wait_scheduled.remove(task)
10908 self._merge_exit(task)
10910 def _merge_exit(self, merge):
10911 self._do_merge_exit(merge)
10912 self._deallocate_config(merge.merge.settings)
10913 if merge.returncode == os.EX_OK and \
10914 not merge.merge.pkg.installed:
10915 self._status_display.curval += 1
10916 self._status_display.merges = len(self._task_queues.merge)
10919 def _do_merge_exit(self, merge):
10920 pkg = merge.merge.pkg
10921 if merge.returncode != os.EX_OK:
10922 settings = merge.merge.settings
10923 build_dir = settings.get("PORTAGE_BUILDDIR")
10924 build_log = settings.get("PORTAGE_LOG_FILE")
10926 self._failed_pkgs.append(self._failed_pkg(
10927 build_dir=build_dir, build_log=build_log,
10929 returncode=merge.returncode))
10930 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10932 self._status_display.failed = len(self._failed_pkgs)
10935 self._task_complete(pkg)
10936 pkg_to_replace = merge.merge.pkg_to_replace
10937 if pkg_to_replace is not None:
10938 # When a package is replaced, mark it's uninstall
10939 # task complete (if any).
10940 uninst_hash_key = \
10941 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10942 self._task_complete(uninst_hash_key)
10947 self._restart_if_necessary(pkg)
10949 # Call mtimedb.commit() after each merge so that
10950 # --resume still works after being interrupted
10951 # by reboot, sigkill or similar.
10952 mtimedb = self._mtimedb
10953 mtimedb["resume"]["mergelist"].remove(list(pkg))
10954 if not mtimedb["resume"]["mergelist"]:
10955 del mtimedb["resume"]
10958 def _build_exit(self, build):
10959 if build.returncode == os.EX_OK:
10961 merge = PackageMerge(merge=build)
10962 if not build.build_opts.buildpkgonly and \
10963 build.pkg in self._deep_system_deps:
10964 # Since dependencies on system packages are frequently
10965 # unspecified, merge them only when no builds are executing.
10966 self._merge_wait_queue.append(merge)
10967 merge.addStartListener(self._system_merge_started)
10969 merge.addExitListener(self._merge_exit)
10970 self._task_queues.merge.add(merge)
10971 self._status_display.merges = len(self._task_queues.merge)
10973 settings = build.settings
10974 build_dir = settings.get("PORTAGE_BUILDDIR")
10975 build_log = settings.get("PORTAGE_LOG_FILE")
10977 self._failed_pkgs.append(self._failed_pkg(
10978 build_dir=build_dir, build_log=build_log,
10980 returncode=build.returncode))
10981 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10983 self._status_display.failed = len(self._failed_pkgs)
10984 self._deallocate_config(build.settings)
10986 self._status_display.running = self._jobs
10989 def _extract_exit(self, build):
10990 self._build_exit(build)
10992 def _task_complete(self, pkg):
10993 self._completed_tasks.add(pkg)
10994 self._unsatisfied_system_deps.discard(pkg)
10995 self._choose_pkg_return_early = False
10999 self._add_prefetchers()
11000 self._add_packages()
11001 pkg_queue = self._pkg_queue
11002 failed_pkgs = self._failed_pkgs
11003 portage.locks._quiet = self._background
11004 portage.elog._emerge_elog_listener = self._elog_listener
11010 self._main_loop_cleanup()
11011 portage.locks._quiet = False
11012 portage.elog._emerge_elog_listener = None
11014 rval = failed_pkgs[-1].returncode
11018 def _main_loop_cleanup(self):
11019 del self._pkg_queue[:]
11020 self._completed_tasks.clear()
11021 self._deep_system_deps.clear()
11022 self._unsatisfied_system_deps.clear()
11023 self._choose_pkg_return_early = False
11024 self._status_display.reset()
11025 self._digraph = None
11026 self._task_queues.fetch.clear()
11028 def _choose_pkg(self):
11030 Choose a task that has all it's dependencies satisfied.
11033 if self._choose_pkg_return_early:
11036 if self._digraph is None:
11037 if (self._jobs or self._task_queues.merge) and \
11038 not ("--nodeps" in self.myopts and \
11039 (self._max_jobs is True or self._max_jobs > 1)):
11040 self._choose_pkg_return_early = True
11042 return self._pkg_queue.pop(0)
11044 if not (self._jobs or self._task_queues.merge):
11045 return self._pkg_queue.pop(0)
11047 self._prune_digraph()
11050 later = set(self._pkg_queue)
11051 for pkg in self._pkg_queue:
11053 if not self._dependent_on_scheduled_merges(pkg, later):
11057 if chosen_pkg is not None:
11058 self._pkg_queue.remove(chosen_pkg)
11060 if chosen_pkg is None:
11061 # There's no point in searching for a package to
11062 # choose until at least one of the existing jobs
11064 self._choose_pkg_return_early = True
11068 def _dependent_on_scheduled_merges(self, pkg, later):
11070 Traverse the subgraph of the given packages deep dependencies
11071 to see if it contains any scheduled merges.
11072 @param pkg: a package to check dependencies for
11074 @param later: packages for which dependence should be ignored
11075 since they will be merged later than pkg anyway and therefore
11076 delaying the merge of pkg will not result in a more optimal
11080 @returns: True if the package is dependent, False otherwise.
11083 graph = self._digraph
11084 completed_tasks = self._completed_tasks
11087 traversed_nodes = set([pkg])
11088 direct_deps = graph.child_nodes(pkg)
11089 node_stack = direct_deps
11090 direct_deps = frozenset(direct_deps)
11092 node = node_stack.pop()
11093 if node in traversed_nodes:
11095 traversed_nodes.add(node)
11096 if not ((node.installed and node.operation == "nomerge") or \
11097 (node.operation == "uninstall" and \
11098 node not in direct_deps) or \
11099 node in completed_tasks or \
11103 node_stack.extend(graph.child_nodes(node))
11107 def _allocate_config(self, root):
11109 Allocate a unique config instance for a task in order
11110 to prevent interference between parallel tasks.
11112 if self._config_pool[root]:
11113 temp_settings = self._config_pool[root].pop()
11115 temp_settings = portage.config(clone=self.pkgsettings[root])
11116 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11117 # performance reasons, call it here to make sure all settings from the
11118 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11119 temp_settings.reload()
11120 temp_settings.reset()
11121 return temp_settings
11123 def _deallocate_config(self, settings):
11124 self._config_pool[settings["ROOT"]].append(settings)
11126 def _main_loop(self):
11128 # Only allow 1 job max if a restart is scheduled
11129 # due to portage update.
11130 if self._is_restart_scheduled() or \
11131 self._opts_no_background.intersection(self.myopts):
11132 self._set_max_jobs(1)
11134 merge_queue = self._task_queues.merge
11136 while self._schedule():
11137 if self._poll_event_handlers:
11142 if not (self._jobs or merge_queue):
11144 if self._poll_event_handlers:
11147 def _keep_scheduling(self):
11148 return bool(self._pkg_queue and \
11149 not (self._failed_pkgs and not self._build_opts.fetchonly))
11151 def _schedule_tasks(self):
11153 # When the number of jobs drops to zero, process all waiting merges.
11154 if not self._jobs and self._merge_wait_queue:
11155 for task in self._merge_wait_queue:
11156 task.addExitListener(self._merge_wait_exit_handler)
11157 self._task_queues.merge.add(task)
11158 self._status_display.merges = len(self._task_queues.merge)
11159 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11160 del self._merge_wait_queue[:]
11162 self._schedule_tasks_imp()
11163 self._status_display.display()
11166 for q in self._task_queues.values():
11170 # Cancel prefetchers if they're the only reason
11171 # the main poll loop is still running.
11172 if self._failed_pkgs and not self._build_opts.fetchonly and \
11173 not (self._jobs or self._task_queues.merge) and \
11174 self._task_queues.fetch:
11175 self._task_queues.fetch.clear()
11179 self._schedule_tasks_imp()
11180 self._status_display.display()
11182 return self._keep_scheduling()
11184 def _job_delay(self):
11187 @returns: True if job scheduling should be delayed, False otherwise.
11190 if self._jobs and self._max_load is not None:
11192 current_time = time.time()
11194 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11195 if delay > self._job_delay_max:
11196 delay = self._job_delay_max
11197 if (current_time - self._previous_job_start_time) < delay:
11202 def _schedule_tasks_imp(self):
11205 @returns: True if state changed, False otherwise.
11212 if not self._keep_scheduling():
11213 return bool(state_change)
11215 if self._choose_pkg_return_early or \
11216 self._merge_wait_scheduled or \
11217 (self._jobs and self._unsatisfied_system_deps) or \
11218 not self._can_add_job() or \
11220 return bool(state_change)
11222 pkg = self._choose_pkg()
11224 return bool(state_change)
11228 if not pkg.installed:
11229 self._pkg_count.curval += 1
11231 task = self._task(pkg)
11234 merge = PackageMerge(merge=task)
11235 merge.addExitListener(self._merge_exit)
11236 self._task_queues.merge.add(merge)
11240 self._previous_job_start_time = time.time()
11241 self._status_display.running = self._jobs
11242 task.addExitListener(self._extract_exit)
11243 self._task_queues.jobs.add(task)
11247 self._previous_job_start_time = time.time()
11248 self._status_display.running = self._jobs
11249 task.addExitListener(self._build_exit)
11250 self._task_queues.jobs.add(task)
11252 return bool(state_change)
11254 def _task(self, pkg):
11256 pkg_to_replace = None
11257 if pkg.operation != "uninstall":
11258 vardb = pkg.root_config.trees["vartree"].dbapi
11259 previous_cpv = vardb.match(pkg.slot_atom)
11261 previous_cpv = previous_cpv.pop()
11262 pkg_to_replace = self._pkg(previous_cpv,
11263 "installed", pkg.root_config, installed=True)
11265 task = MergeListItem(args_set=self._args_set,
11266 background=self._background, binpkg_opts=self._binpkg_opts,
11267 build_opts=self._build_opts,
11268 config_pool=self._ConfigPool(pkg.root,
11269 self._allocate_config, self._deallocate_config),
11270 emerge_opts=self.myopts,
11271 find_blockers=self._find_blockers(pkg), logger=self._logger,
11272 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11273 pkg_to_replace=pkg_to_replace,
11274 prefetcher=self._prefetchers.get(pkg),
11275 scheduler=self._sched_iface,
11276 settings=self._allocate_config(pkg.root),
11277 statusMessage=self._status_msg,
11278 world_atom=self._world_atom)
11282 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11283 pkg = failed_pkg.pkg
11284 msg = "%s to %s %s" % \
11285 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11286 if pkg.root != "/":
11287 msg += " %s %s" % (preposition, pkg.root)
11289 log_path = self._locate_failure_log(failed_pkg)
11290 if log_path is not None:
11291 msg += ", Log file:"
11292 self._status_msg(msg)
11294 if log_path is not None:
11295 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11297 def _status_msg(self, msg):
11299 Display a brief status message (no newlines) in the status display.
11300 This is called by tasks to provide feedback to the user. This
11301 delegates the resposibility of generating \r and \n control characters,
11302 to guarantee that lines are created or erased when necessary and
11306 @param msg: a brief status message (no newlines allowed)
11308 if not self._background:
11309 writemsg_level("\n")
11310 self._status_display.displayMessage(msg)
11312 def _save_resume_list(self):
11314 Do this before verifying the ebuild Manifests since it might
11315 be possible for the user to use --resume --skipfirst get past
11316 a non-essential package with a broken digest.
11318 mtimedb = self._mtimedb
11319 mtimedb["resume"]["mergelist"] = [list(x) \
11320 for x in self._mergelist \
11321 if isinstance(x, Package) and x.operation == "merge"]
11325 def _calc_resume_list(self):
11327 Use the current resume list to calculate a new one,
11328 dropping any packages with unsatisfied deps.
11330 @returns: True if successful, False otherwise.
11332 print colorize("GOOD", "*** Resuming merge...")
11334 if self._show_list():
11335 if "--tree" in self.myopts:
11336 portage.writemsg_stdout("\n" + \
11337 darkgreen("These are the packages that " + \
11338 "would be merged, in reverse order:\n\n"))
11341 portage.writemsg_stdout("\n" + \
11342 darkgreen("These are the packages that " + \
11343 "would be merged, in order:\n\n"))
11345 show_spinner = "--quiet" not in self.myopts and \
11346 "--nodeps" not in self.myopts
11349 print "Calculating dependencies ",
11351 myparams = create_depgraph_params(self.myopts, None)
11355 success, mydepgraph, dropped_tasks = resume_depgraph(
11356 self.settings, self.trees, self._mtimedb, self.myopts,
11357 myparams, self._spinner)
11358 except depgraph.UnsatisfiedResumeDep, exc:
11359 # rename variable to avoid python-3.0 error:
11360 # SyntaxError: can not delete variable 'e' referenced in nested
11363 mydepgraph = e.depgraph
11364 dropped_tasks = set()
11367 print "\b\b... done!"
11370 def unsatisfied_resume_dep_msg():
11371 mydepgraph.display_problems()
11372 out = portage.output.EOutput()
11373 out.eerror("One or more packages are either masked or " + \
11374 "have missing dependencies:")
11377 show_parents = set()
11378 for dep in e.value:
11379 if dep.parent in show_parents:
11381 show_parents.add(dep.parent)
11382 if dep.atom is None:
11383 out.eerror(indent + "Masked package:")
11384 out.eerror(2 * indent + str(dep.parent))
11387 out.eerror(indent + str(dep.atom) + " pulled in by:")
11388 out.eerror(2 * indent + str(dep.parent))
11390 msg = "The resume list contains packages " + \
11391 "that are either masked or have " + \
11392 "unsatisfied dependencies. " + \
11393 "Please restart/continue " + \
11394 "the operation manually, or use --skipfirst " + \
11395 "to skip the first package in the list and " + \
11396 "any other packages that may be " + \
11397 "masked or have missing dependencies."
11398 for line in textwrap.wrap(msg, 72):
11400 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11403 if success and self._show_list():
11404 mylist = mydepgraph.altlist()
11406 if "--tree" in self.myopts:
11408 mydepgraph.display(mylist, favorites=self._favorites)
11411 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11413 mydepgraph.display_problems()
11415 mylist = mydepgraph.altlist()
11416 mydepgraph.break_refs(mylist)
11417 mydepgraph.break_refs(dropped_tasks)
11418 self._mergelist = mylist
11419 self._set_digraph(mydepgraph.schedulerGraph())
11422 for task in dropped_tasks:
11423 if not (isinstance(task, Package) and task.operation == "merge"):
11426 msg = "emerge --keep-going:" + \
11428 if pkg.root != "/":
11429 msg += " for %s" % (pkg.root,)
11430 msg += " dropped due to unsatisfied dependency."
11431 for line in textwrap.wrap(msg, msg_width):
11432 eerror(line, phase="other", key=pkg.cpv)
11433 settings = self.pkgsettings[pkg.root]
11434 # Ensure that log collection from $T is disabled inside
11435 # elog_process(), since any logs that might exist are
11437 settings.pop("T", None)
11438 portage.elog.elog_process(pkg.cpv, settings)
11439 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11443 def _show_list(self):
11444 myopts = self.myopts
11445 if "--quiet" not in myopts and \
11446 ("--ask" in myopts or "--tree" in myopts or \
11447 "--verbose" in myopts):
11451 def _world_atom(self, pkg):
11453 Add the package to the world file, but only if
11454 it's supposed to be added. Otherwise, do nothing.
11457 if set(("--buildpkgonly", "--fetchonly",
11459 "--oneshot", "--onlydeps",
11460 "--pretend")).intersection(self.myopts):
11463 if pkg.root != self.target_root:
11466 args_set = self._args_set
11467 if not args_set.findAtomForPackage(pkg):
11470 logger = self._logger
11471 pkg_count = self._pkg_count
11472 root_config = pkg.root_config
11473 world_set = root_config.sets["world"]
11474 world_locked = False
11475 if hasattr(world_set, "lock"):
11477 world_locked = True
11480 if hasattr(world_set, "load"):
11481 world_set.load() # maybe it's changed on disk
11483 atom = create_world_atom(pkg, args_set, root_config)
11485 if hasattr(world_set, "add"):
11486 self._status_msg(('Recording %s in "world" ' + \
11487 'favorites file...') % atom)
11488 logger.log(" === (%s of %s) Updating world file (%s)" % \
11489 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11490 world_set.add(atom)
11492 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11493 (atom,), level=logging.WARN, noiselevel=-1)
11498 def _pkg(self, cpv, type_name, root_config, installed=False):
11500 Get a package instance from the cache, or create a new
11501 one if necessary. Raises KeyError from aux_get if it
11502 failures for some reason (package does not exist or is
11505 operation = "merge"
11507 operation = "nomerge"
11509 if self._digraph is not None:
11510 # Reuse existing instance when available.
11511 pkg = self._digraph.get(
11512 (type_name, root_config.root, cpv, operation))
11513 if pkg is not None:
11516 tree_type = depgraph.pkg_tree_map[type_name]
11517 db = root_config.trees[tree_type].dbapi
11518 db_keys = list(self.trees[root_config.root][
11519 tree_type].dbapi._aux_cache_keys)
11520 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11521 pkg = Package(cpv=cpv, metadata=metadata,
11522 root_config=root_config, installed=installed)
11523 if type_name == "ebuild":
11524 settings = self.pkgsettings[root_config.root]
11525 settings.setcpv(pkg)
11526 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11527 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11531 class MetadataRegen(PollScheduler):
11533 def __init__(self, portdb, max_jobs=None, max_load=None):
11534 PollScheduler.__init__(self)
11535 self._portdb = portdb
11537 if max_jobs is None:
11540 self._max_jobs = max_jobs
11541 self._max_load = max_load
11542 self._sched_iface = self._sched_iface_class(
11543 register=self._register,
11544 schedule=self._schedule_wait,
11545 unregister=self._unregister)
11547 self._valid_pkgs = set()
11548 self._process_iter = self._iter_metadata_processes()
11549 self.returncode = os.EX_OK
11550 self._error_count = 0
11552 def _iter_metadata_processes(self):
11553 portdb = self._portdb
11554 valid_pkgs = self._valid_pkgs
11555 every_cp = portdb.cp_all()
11556 every_cp.sort(reverse=True)
11559 cp = every_cp.pop()
11560 portage.writemsg_stdout("Processing %s\n" % cp)
11561 cpv_list = portdb.cp_list(cp)
11562 for cpv in cpv_list:
11563 valid_pkgs.add(cpv)
11564 ebuild_path, repo_path = portdb.findname2(cpv)
11565 metadata_process = portdb._metadata_process(
11566 cpv, ebuild_path, repo_path)
11567 if metadata_process is None:
11569 yield metadata_process
11573 portdb = self._portdb
11574 from portage.cache.cache_errors import CacheError
11577 for mytree in portdb.porttrees:
11579 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11580 except CacheError, e:
11581 portage.writemsg("Error listing cache entries for " + \
11582 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11587 while self._schedule():
11594 for y in self._valid_pkgs:
11595 for mytree in portdb.porttrees:
11596 if portdb.findname2(y, mytree=mytree)[0]:
11597 dead_nodes[mytree].discard(y)
11599 for mytree, nodes in dead_nodes.iteritems():
11600 auxdb = portdb.auxdb[mytree]
11604 except (KeyError, CacheError):
11607 def _schedule_tasks(self):
11610 @returns: True if there may be remaining tasks to schedule,
11613 while self._can_add_job():
11615 metadata_process = self._process_iter.next()
11616 except StopIteration:
11620 metadata_process.scheduler = self._sched_iface
11621 metadata_process.addExitListener(self._metadata_exit)
11622 metadata_process.start()
11625 def _metadata_exit(self, metadata_process):
11627 if metadata_process.returncode != os.EX_OK:
11628 self.returncode = 1
11629 self._error_count += 1
11630 self._valid_pkgs.discard(metadata_process.cpv)
11631 portage.writemsg("Error processing %s, continuing...\n" % \
11632 (metadata_process.cpv,))
11635 class UninstallFailure(portage.exception.PortageException):
11637 An instance of this class is raised by unmerge() when
11638 an uninstallation fails.
11641 def __init__(self, *pargs):
11642 portage.exception.PortageException.__init__(self, pargs)
11644 self.status = pargs[0]
11646 def unmerge(root_config, myopts, unmerge_action,
11647 unmerge_files, ldpath_mtimes, autoclean=0,
11648 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11649 scheduler=None, writemsg_level=portage.util.writemsg_level):
11651 quiet = "--quiet" in myopts
11652 settings = root_config.settings
11653 sets = root_config.sets
11654 vartree = root_config.trees["vartree"]
11655 candidate_catpkgs=[]
11657 xterm_titles = "notitles" not in settings.features
11658 out = portage.output.EOutput()
11660 db_keys = list(vartree.dbapi._aux_cache_keys)
11663 pkg = pkg_cache.get(cpv)
11665 pkg = Package(cpv=cpv, installed=True,
11666 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11667 root_config=root_config,
11668 type_name="installed")
11669 pkg_cache[cpv] = pkg
11672 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11674 # At least the parent needs to exist for the lock file.
11675 portage.util.ensure_dirs(vdb_path)
11676 except portage.exception.PortageException:
11680 if os.access(vdb_path, os.W_OK):
11681 vdb_lock = portage.locks.lockdir(vdb_path)
11682 realsyslist = sets["system"].getAtoms()
11684 for x in realsyslist:
11685 mycp = portage.dep_getkey(x)
11686 if mycp in settings.getvirtuals():
11688 for provider in settings.getvirtuals()[mycp]:
11689 if vartree.dbapi.match(provider):
11690 providers.append(provider)
11691 if len(providers) == 1:
11692 syslist.extend(providers)
11694 syslist.append(mycp)
11696 mysettings = portage.config(clone=settings)
11698 if not unmerge_files:
11699 if unmerge_action == "unmerge":
11701 print bold("emerge unmerge") + " can only be used with specific package names"
11707 localtree = vartree
11708 # process all arguments and add all
11709 # valid db entries to candidate_catpkgs
11711 if not unmerge_files:
11712 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11714 #we've got command-line arguments
11715 if not unmerge_files:
11716 print "\nNo packages to unmerge have been provided.\n"
11718 for x in unmerge_files:
11719 arg_parts = x.split('/')
11720 if x[0] not in [".","/"] and \
11721 arg_parts[-1][-7:] != ".ebuild":
11722 #possible cat/pkg or dep; treat as such
11723 candidate_catpkgs.append(x)
11724 elif unmerge_action in ["prune","clean"]:
11725 print "\n!!! Prune and clean do not accept individual" + \
11726 " ebuilds as arguments;\n skipping.\n"
11729 # it appears that the user is specifying an installed
11730 # ebuild and we're in "unmerge" mode, so it's ok.
11731 if not os.path.exists(x):
11732 print "\n!!! The path '"+x+"' doesn't exist.\n"
11735 absx = os.path.abspath(x)
11736 sp_absx = absx.split("/")
11737 if sp_absx[-1][-7:] == ".ebuild":
11739 absx = "/".join(sp_absx)
11741 sp_absx_len = len(sp_absx)
11743 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11744 vdb_len = len(vdb_path)
11746 sp_vdb = vdb_path.split("/")
11747 sp_vdb_len = len(sp_vdb)
11749 if not os.path.exists(absx+"/CONTENTS"):
11750 print "!!! Not a valid db dir: "+str(absx)
11753 if sp_absx_len <= sp_vdb_len:
11754 # The Path is shorter... so it can't be inside the vdb.
11757 print "\n!!!",x,"cannot be inside "+ \
11758 vdb_path+"; aborting.\n"
11761 for idx in range(0,sp_vdb_len):
11762 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11765 print "\n!!!", x, "is not inside "+\
11766 vdb_path+"; aborting.\n"
11769 print "="+"/".join(sp_absx[sp_vdb_len:])
11770 candidate_catpkgs.append(
11771 "="+"/".join(sp_absx[sp_vdb_len:]))
11774 if (not "--quiet" in myopts):
11776 if settings["ROOT"] != "/":
11777 writemsg_level(darkgreen(newline+ \
11778 ">>> Using system located in ROOT tree %s\n" % \
11781 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11782 not ("--quiet" in myopts):
11783 writemsg_level(darkgreen(newline+\
11784 ">>> These are the packages that would be unmerged:\n"))
11786 # Preservation of order is required for --depclean and --prune so
11787 # that dependencies are respected. Use all_selected to eliminate
11788 # duplicate packages since the same package may be selected by
11791 all_selected = set()
11792 for x in candidate_catpkgs:
11793 # cycle through all our candidate deps and determine
11794 # what will and will not get unmerged
11796 mymatch = vartree.dbapi.match(x)
11797 except portage.exception.AmbiguousPackageName, errpkgs:
11798 print "\n\n!!! The short ebuild name \"" + \
11799 x + "\" is ambiguous. Please specify"
11800 print "!!! one of the following fully-qualified " + \
11801 "ebuild names instead:\n"
11802 for i in errpkgs[0]:
11803 print " " + green(i)
11807 if not mymatch and x[0] not in "<>=~":
11808 mymatch = localtree.dep_match(x)
11810 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11811 (x, unmerge_action), noiselevel=-1)
11815 {"protected": set(), "selected": set(), "omitted": set()})
11816 mykey = len(pkgmap) - 1
11817 if unmerge_action=="unmerge":
11819 if y not in all_selected:
11820 pkgmap[mykey]["selected"].add(y)
11821 all_selected.add(y)
11822 elif unmerge_action == "prune":
11823 if len(mymatch) == 1:
11825 best_version = mymatch[0]
11826 best_slot = vartree.getslot(best_version)
11827 best_counter = vartree.dbapi.cpv_counter(best_version)
11828 for mypkg in mymatch[1:]:
11829 myslot = vartree.getslot(mypkg)
11830 mycounter = vartree.dbapi.cpv_counter(mypkg)
11831 if (myslot == best_slot and mycounter > best_counter) or \
11832 mypkg == portage.best([mypkg, best_version]):
11833 if myslot == best_slot:
11834 if mycounter < best_counter:
11835 # On slot collision, keep the one with the
11836 # highest counter since it is the most
11837 # recently installed.
11839 best_version = mypkg
11841 best_counter = mycounter
11842 pkgmap[mykey]["protected"].add(best_version)
11843 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11844 if mypkg != best_version and mypkg not in all_selected)
11845 all_selected.update(pkgmap[mykey]["selected"])
11847 # unmerge_action == "clean"
11849 for mypkg in mymatch:
11850 if unmerge_action == "clean":
11851 myslot = localtree.getslot(mypkg)
11853 # since we're pruning, we don't care about slots
11854 # and put all the pkgs in together
11856 if myslot not in slotmap:
11857 slotmap[myslot] = {}
11858 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11860 for mypkg in vartree.dbapi.cp_list(
11861 portage.dep_getkey(mymatch[0])):
11862 myslot = vartree.getslot(mypkg)
11863 if myslot not in slotmap:
11864 slotmap[myslot] = {}
11865 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11867 for myslot in slotmap:
11868 counterkeys = slotmap[myslot].keys()
11869 if not counterkeys:
11872 pkgmap[mykey]["protected"].add(
11873 slotmap[myslot][counterkeys[-1]])
11874 del counterkeys[-1]
11876 for counter in counterkeys[:]:
11877 mypkg = slotmap[myslot][counter]
11878 if mypkg not in mymatch:
11879 counterkeys.remove(counter)
11880 pkgmap[mykey]["protected"].add(
11881 slotmap[myslot][counter])
11883 #be pretty and get them in order of merge:
11884 for ckey in counterkeys:
11885 mypkg = slotmap[myslot][ckey]
11886 if mypkg not in all_selected:
11887 pkgmap[mykey]["selected"].add(mypkg)
11888 all_selected.add(mypkg)
11889 # ok, now the last-merged package
11890 # is protected, and the rest are selected
11891 numselected = len(all_selected)
11892 if global_unmerge and not numselected:
11893 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11896 if not numselected:
11897 portage.writemsg_stdout(
11898 "\n>>> No packages selected for removal by " + \
11899 unmerge_action + "\n")
11903 vartree.dbapi.flush_cache()
11904 portage.locks.unlockdir(vdb_lock)
11906 from portage.sets.base import EditablePackageSet
11908 # generate a list of package sets that are directly or indirectly listed in "world",
11909 # as there is no persistent list of "installed" sets
11910 installed_sets = ["world"]
11915 pos = len(installed_sets)
11916 for s in installed_sets[pos - 1:]:
11919 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11922 installed_sets += candidates
11923 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11926 # we don't want to unmerge packages that are still listed in user-editable package sets
11927 # listed in "world" as they would be remerged on the next update of "world" or the
11928 # relevant package sets.
11929 unknown_sets = set()
11930 for cp in xrange(len(pkgmap)):
11931 for cpv in pkgmap[cp]["selected"].copy():
11935 # It could have been uninstalled
11936 # by a concurrent process.
11939 if unmerge_action != "clean" and \
11940 root_config.root == "/" and \
11941 portage.match_from_list(
11942 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11943 msg = ("Not unmerging package %s since there is no valid " + \
11944 "reason for portage to unmerge itself.") % (pkg.cpv,)
11945 for line in textwrap.wrap(msg, 75):
11947 # adjust pkgmap so the display output is correct
11948 pkgmap[cp]["selected"].remove(cpv)
11949 all_selected.remove(cpv)
11950 pkgmap[cp]["protected"].add(cpv)
11954 for s in installed_sets:
11955 # skip sets that the user requested to unmerge, and skip world
11956 # unless we're unmerging a package set (as the package would be
11957 # removed from "world" later on)
11958 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11962 if s in unknown_sets:
11964 unknown_sets.add(s)
11965 out = portage.output.EOutput()
11966 out.eerror(("Unknown set '@%s' in " + \
11967 "%svar/lib/portage/world_sets") % \
11968 (s, root_config.root))
11971 # only check instances of EditablePackageSet as other classes are generally used for
11972 # special purposes and can be ignored here (and are usually generated dynamically, so the
11973 # user can't do much about them anyway)
11974 if isinstance(sets[s], EditablePackageSet):
11976 # This is derived from a snippet of code in the
11977 # depgraph._iter_atoms_for_pkg() method.
11978 for atom in sets[s].iterAtomsForPackage(pkg):
11979 inst_matches = vartree.dbapi.match(atom)
11980 inst_matches.reverse() # descending order
11982 for inst_cpv in inst_matches:
11984 inst_pkg = _pkg(inst_cpv)
11986 # It could have been uninstalled
11987 # by a concurrent process.
11990 if inst_pkg.cp != atom.cp:
11992 if pkg >= inst_pkg:
11993 # This is descending order, and we're not
11994 # interested in any versions <= pkg given.
11996 if pkg.slot_atom != inst_pkg.slot_atom:
11997 higher_slot = inst_pkg
11999 if higher_slot is None:
12003 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12004 #print colorize("WARN", "but still listed in the following package sets:")
12005 #print " %s\n" % ", ".join(parents)
12006 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12007 print colorize("WARN", "still referenced by the following package sets:")
12008 print " %s\n" % ", ".join(parents)
12009 # adjust pkgmap so the display output is correct
12010 pkgmap[cp]["selected"].remove(cpv)
12011 all_selected.remove(cpv)
12012 pkgmap[cp]["protected"].add(cpv)
12016 numselected = len(all_selected)
12017 if not numselected:
12019 "\n>>> No packages selected for removal by " + \
12020 unmerge_action + "\n")
12023 # Unmerge order only matters in some cases
12027 selected = d["selected"]
12030 cp = portage.cpv_getkey(iter(selected).next())
12031 cp_dict = unordered.get(cp)
12032 if cp_dict is None:
12034 unordered[cp] = cp_dict
12037 for k, v in d.iteritems():
12038 cp_dict[k].update(v)
12039 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12041 for x in xrange(len(pkgmap)):
12042 selected = pkgmap[x]["selected"]
12045 for mytype, mylist in pkgmap[x].iteritems():
12046 if mytype == "selected":
12048 mylist.difference_update(all_selected)
12049 cp = portage.cpv_getkey(iter(selected).next())
12050 for y in localtree.dep_match(cp):
12051 if y not in pkgmap[x]["omitted"] and \
12052 y not in pkgmap[x]["selected"] and \
12053 y not in pkgmap[x]["protected"] and \
12054 y not in all_selected:
12055 pkgmap[x]["omitted"].add(y)
12056 if global_unmerge and not pkgmap[x]["selected"]:
12057 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12059 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12060 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12061 "'%s' is part of your system profile.\n" % cp),
12062 level=logging.WARNING, noiselevel=-1)
12063 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12064 "be damaging to your system.\n\n"),
12065 level=logging.WARNING, noiselevel=-1)
12066 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12067 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12068 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12070 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12072 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12073 for mytype in ["selected","protected","omitted"]:
12075 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12076 if pkgmap[x][mytype]:
12077 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12078 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12079 for pn, ver, rev in sorted_pkgs:
12083 myversion = ver + "-" + rev
12084 if mytype == "selected":
12086 colorize("UNMERGE_WARN", myversion + " "),
12090 colorize("GOOD", myversion + " "), noiselevel=-1)
12092 writemsg_level("none ", noiselevel=-1)
12094 writemsg_level("\n", noiselevel=-1)
12096 writemsg_level("\n", noiselevel=-1)
12098 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12099 " packages are slated for removal.\n")
12100 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12101 " and " + colorize("GOOD", "'omitted'") + \
12102 " packages will not be removed.\n\n")
12104 if "--pretend" in myopts:
12105 #we're done... return
12107 if "--ask" in myopts:
12108 if userquery("Would you like to unmerge these packages?")=="No":
12109 # enter pretend mode for correct formatting of results
12110 myopts["--pretend"] = True
12115 #the real unmerging begins, after a short delay....
12116 if clean_delay and not autoclean:
12117 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12119 for x in xrange(len(pkgmap)):
12120 for y in pkgmap[x]["selected"]:
12121 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12122 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12123 mysplit = y.split("/")
12125 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12126 mysettings, unmerge_action not in ["clean","prune"],
12127 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12128 scheduler=scheduler)
12130 if retval != os.EX_OK:
12131 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12133 raise UninstallFailure(retval)
12136 if clean_world and hasattr(sets["world"], "cleanPackage"):
12137 sets["world"].cleanPackage(vartree.dbapi, y)
12138 emergelog(xterm_titles, " >>> unmerge success: "+y)
12139 if clean_world and hasattr(sets["world"], "remove"):
12140 for s in root_config.setconfig.active:
12141 sets["world"].remove(SETPREFIX+s)
12144 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12146 if os.path.exists("/usr/bin/install-info"):
12147 out = portage.output.EOutput()
12152 inforoot=normpath(root+z)
12153 if os.path.isdir(inforoot):
12154 infomtime = long(os.stat(inforoot).st_mtime)
12155 if inforoot not in prev_mtimes or \
12156 prev_mtimes[inforoot] != infomtime:
12157 regen_infodirs.append(inforoot)
12159 if not regen_infodirs:
12160 portage.writemsg_stdout("\n")
12161 out.einfo("GNU info directory index is up-to-date.")
12163 portage.writemsg_stdout("\n")
12164 out.einfo("Regenerating GNU info directory index...")
12166 dir_extensions = ("", ".gz", ".bz2")
12170 for inforoot in regen_infodirs:
12174 if not os.path.isdir(inforoot) or \
12175 not os.access(inforoot, os.W_OK):
12178 file_list = os.listdir(inforoot)
12180 dir_file = os.path.join(inforoot, "dir")
12181 moved_old_dir = False
12182 processed_count = 0
12183 for x in file_list:
12184 if x.startswith(".") or \
12185 os.path.isdir(os.path.join(inforoot, x)):
12187 if x.startswith("dir"):
12189 for ext in dir_extensions:
12190 if x == "dir" + ext or \
12191 x == "dir" + ext + ".old":
12196 if processed_count == 0:
12197 for ext in dir_extensions:
12199 os.rename(dir_file + ext, dir_file + ext + ".old")
12200 moved_old_dir = True
12201 except EnvironmentError, e:
12202 if e.errno != errno.ENOENT:
12205 processed_count += 1
12206 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12207 existsstr="already exists, for file `"
12209 if re.search(existsstr,myso):
12210 # Already exists... Don't increment the count for this.
12212 elif myso[:44]=="install-info: warning: no info dir entry in ":
12213 # This info file doesn't contain a DIR-header: install-info produces this
12214 # (harmless) warning (the --quiet switch doesn't seem to work).
12215 # Don't increment the count for this.
12218 badcount=badcount+1
12219 errmsg += myso + "\n"
12222 if moved_old_dir and not os.path.exists(dir_file):
12223 # We didn't generate a new dir file, so put the old file
12224 # back where it was originally found.
12225 for ext in dir_extensions:
12227 os.rename(dir_file + ext + ".old", dir_file + ext)
12228 except EnvironmentError, e:
12229 if e.errno != errno.ENOENT:
12233 # Clean dir.old cruft so that they don't prevent
12234 # unmerge of otherwise empty directories.
12235 for ext in dir_extensions:
12237 os.unlink(dir_file + ext + ".old")
12238 except EnvironmentError, e:
12239 if e.errno != errno.ENOENT:
12243 #update mtime so we can potentially avoid regenerating.
12244 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12247 out.eerror("Processed %d info files; %d errors." % \
12248 (icount, badcount))
12249 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12252 out.einfo("Processed %d info files." % (icount,))
12255 def display_news_notification(root_config, myopts):
12256 target_root = root_config.root
12257 trees = root_config.trees
12258 settings = trees["vartree"].settings
12259 portdb = trees["porttree"].dbapi
12260 vardb = trees["vartree"].dbapi
12261 NEWS_PATH = os.path.join("metadata", "news")
12262 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12263 newsReaderDisplay = False
12264 update = "--pretend" not in myopts
12266 for repo in portdb.getRepositories():
12267 unreadItems = checkUpdatedNewsItems(
12268 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12270 if not newsReaderDisplay:
12271 newsReaderDisplay = True
12273 print colorize("WARN", " * IMPORTANT:"),
12274 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12277 if newsReaderDisplay:
12278 print colorize("WARN", " *"),
12279 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12282 def display_preserved_libs(vardbapi):
12285 # Ensure the registry is consistent with existing files.
12286 vardbapi.plib_registry.pruneNonExisting()
12288 if vardbapi.plib_registry.hasEntries():
12290 print colorize("WARN", "!!!") + " existing preserved libs:"
12291 plibdata = vardbapi.plib_registry.getPreservedLibs()
12292 linkmap = vardbapi.linkmap
12295 linkmap_broken = False
12299 except portage.exception.CommandNotFound, e:
12300 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12301 level=logging.ERROR, noiselevel=-1)
12303 linkmap_broken = True
12305 search_for_owners = set()
12306 for cpv in plibdata:
12307 internal_plib_keys = set(linkmap._obj_key(f) \
12308 for f in plibdata[cpv])
12309 for f in plibdata[cpv]:
12310 if f in consumer_map:
12313 for c in linkmap.findConsumers(f):
12314 # Filter out any consumers that are also preserved libs
12315 # belonging to the same package as the provider.
12316 if linkmap._obj_key(c) not in internal_plib_keys:
12317 consumers.append(c)
12319 consumer_map[f] = consumers
12320 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12322 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12324 for cpv in plibdata:
12325 print colorize("WARN", ">>>") + " package: %s" % cpv
12327 for f in plibdata[cpv]:
12328 obj_key = linkmap._obj_key(f)
12329 alt_paths = samefile_map.get(obj_key)
12330 if alt_paths is None:
12332 samefile_map[obj_key] = alt_paths
12335 for alt_paths in samefile_map.itervalues():
12336 alt_paths = sorted(alt_paths)
12337 for p in alt_paths:
12338 print colorize("WARN", " * ") + " - %s" % (p,)
12340 consumers = consumer_map.get(f, [])
12341 for c in consumers[:MAX_DISPLAY]:
12342 print colorize("WARN", " * ") + " used by %s (%s)" % \
12343 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12344 if len(consumers) == MAX_DISPLAY + 1:
12345 print colorize("WARN", " * ") + " used by %s (%s)" % \
12346 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12347 for x in owners.get(consumers[MAX_DISPLAY], [])))
12348 elif len(consumers) > MAX_DISPLAY:
12349 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12350 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12353 def _flush_elog_mod_echo():
12355 Dump the mod_echo output now so that our other
12356 notifications are shown last.
12358 @returns: True if messages were shown, False otherwise.
12360 messages_shown = False
12362 from portage.elog import mod_echo
12363 except ImportError:
12364 pass # happens during downgrade to a version without the module
12366 messages_shown = bool(mod_echo._items)
12367 mod_echo.finalize()
12368 return messages_shown
12370 def post_emerge(root_config, myopts, mtimedb, retval):
12372 Misc. things to run at the end of a merge session.
12375 Update Config Files
12378 Display preserved libs warnings
12381 @param trees: A dictionary mapping each ROOT to it's package databases
12383 @param mtimedb: The mtimeDB to store data needed across merge invocations
12384 @type mtimedb: MtimeDB class instance
12385 @param retval: Emerge's return value
12389 1. Calls sys.exit(retval)
12392 target_root = root_config.root
12393 trees = { target_root : root_config.trees }
12394 vardbapi = trees[target_root]["vartree"].dbapi
12395 settings = vardbapi.settings
12396 info_mtimes = mtimedb["info"]
12398 # Load the most current variables from ${ROOT}/etc/profile.env
12401 settings.regenerate()
12404 config_protect = settings.get("CONFIG_PROTECT","").split()
12405 infodirs = settings.get("INFOPATH","").split(":") + \
12406 settings.get("INFODIR","").split(":")
12410 if retval == os.EX_OK:
12411 exit_msg = " *** exiting successfully."
12413 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12414 emergelog("notitles" not in settings.features, exit_msg)
12416 _flush_elog_mod_echo()
12418 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12419 if "--pretend" in myopts or (counter_hash is not None and \
12420 counter_hash == vardbapi._counter_hash()):
12421 display_news_notification(root_config, myopts)
12422 # If vdb state has not changed then there's nothing else to do.
12425 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12426 portage.util.ensure_dirs(vdb_path)
12428 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12429 vdb_lock = portage.locks.lockdir(vdb_path)
12433 if "noinfo" not in settings.features:
12434 chk_updated_info_files(target_root,
12435 infodirs, info_mtimes, retval)
12439 portage.locks.unlockdir(vdb_lock)
12441 chk_updated_cfg_files(target_root, config_protect)
12443 display_news_notification(root_config, myopts)
12444 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12445 display_preserved_libs(vardbapi)
12450 def chk_updated_cfg_files(target_root, config_protect):
12452 #number of directories with some protect files in them
12454 for x in config_protect:
12455 x = os.path.join(target_root, x.lstrip(os.path.sep))
12456 if not os.access(x, os.W_OK):
12457 # Avoid Permission denied errors generated
12461 mymode = os.lstat(x).st_mode
12464 if stat.S_ISLNK(mymode):
12465 # We want to treat it like a directory if it
12466 # is a symlink to an existing directory.
12468 real_mode = os.stat(x).st_mode
12469 if stat.S_ISDIR(real_mode):
12473 if stat.S_ISDIR(mymode):
12474 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12476 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12477 os.path.split(x.rstrip(os.path.sep))
12478 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12479 a = commands.getstatusoutput(mycommand)
12481 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12483 # Show the error message alone, sending stdout to /dev/null.
12484 os.system(mycommand + " 1>/dev/null")
12486 files = a[1].split('\0')
12487 # split always produces an empty string as the last element
12488 if files and not files[-1]:
12492 print "\n"+colorize("WARN", " * IMPORTANT:"),
12493 if stat.S_ISDIR(mymode):
12494 print "%d config files in '%s' need updating." % \
12497 print "config file '%s' needs updating." % x
12500 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12501 " section of the " + bold("emerge")
12502 print " "+yellow("*")+" man page to learn how to update config files."
12504 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12507 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12508 Returns the number of unread (yet relevent) items.
12510 @param portdb: a portage tree database
12511 @type portdb: pordbapi
12512 @param vardb: an installed package database
12513 @type vardb: vardbapi
12516 @param UNREAD_PATH:
12522 1. The number of unread but relevant news items.
12525 from portage.news import NewsManager
12526 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12527 return manager.getUnreadItems( repo_id, update=update )
12529 def insert_category_into_atom(atom, category):
12530 alphanum = re.search(r'\w', atom)
12532 ret = atom[:alphanum.start()] + "%s/" % category + \
12533 atom[alphanum.start():]
12538 def is_valid_package_atom(x):
12540 alphanum = re.search(r'\w', x)
12542 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12543 return portage.isvalidatom(x)
12545 def show_blocker_docs_link():
12547 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12548 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12550 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12553 def show_mask_docs():
12554 print "For more information, see the MASKED PACKAGES section in the emerge"
12555 print "man page or refer to the Gentoo Handbook."
12557 def action_sync(settings, trees, mtimedb, myopts, myaction):
12558 xterm_titles = "notitles" not in settings.features
12559 emergelog(xterm_titles, " === sync")
12560 myportdir = settings.get("PORTDIR", None)
12561 out = portage.output.EOutput()
12563 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12565 if myportdir[-1]=="/":
12566 myportdir=myportdir[:-1]
12568 st = os.stat(myportdir)
12572 print ">>>",myportdir,"not found, creating it."
12573 os.makedirs(myportdir,0755)
12574 st = os.stat(myportdir)
12577 spawn_kwargs["env"] = settings.environ()
12578 if 'usersync' in settings.features and \
12579 portage.data.secpass >= 2 and \
12580 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12581 st.st_gid != os.getgid() and st.st_mode & 0070):
12583 homedir = pwd.getpwuid(st.st_uid).pw_dir
12587 # Drop privileges when syncing, in order to match
12588 # existing uid/gid settings.
12589 spawn_kwargs["uid"] = st.st_uid
12590 spawn_kwargs["gid"] = st.st_gid
12591 spawn_kwargs["groups"] = [st.st_gid]
12592 spawn_kwargs["env"]["HOME"] = homedir
12594 if not st.st_mode & 0020:
12595 umask = umask | 0020
12596 spawn_kwargs["umask"] = umask
12598 syncuri = settings.get("SYNC", "").strip()
12600 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12601 noiselevel=-1, level=logging.ERROR)
12604 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12605 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12608 dosyncuri = syncuri
12609 updatecache_flg = False
12610 if myaction == "metadata":
12611 print "skipping sync"
12612 updatecache_flg = True
12613 elif ".git" in vcs_dirs:
12614 # Update existing git repository, and ignore the syncuri. We are
12615 # going to trust the user and assume that the user is in the branch
12616 # that he/she wants updated. We'll let the user manage branches with
12618 if portage.process.find_binary("git") is None:
12619 msg = ["Command not found: git",
12620 "Type \"emerge dev-util/git\" to enable git support."]
12622 writemsg_level("!!! %s\n" % l,
12623 level=logging.ERROR, noiselevel=-1)
12625 msg = ">>> Starting git pull in %s..." % myportdir
12626 emergelog(xterm_titles, msg )
12627 writemsg_level(msg + "\n")
12628 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12629 (portage._shell_quote(myportdir),), **spawn_kwargs)
12630 if exitcode != os.EX_OK:
12631 msg = "!!! git pull error in %s." % myportdir
12632 emergelog(xterm_titles, msg)
12633 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12635 msg = ">>> Git pull in %s successful" % myportdir
12636 emergelog(xterm_titles, msg)
12637 writemsg_level(msg + "\n")
12638 exitcode = git_sync_timestamps(settings, myportdir)
12639 if exitcode == os.EX_OK:
12640 updatecache_flg = True
12641 elif syncuri[:8]=="rsync://":
12642 for vcs_dir in vcs_dirs:
12643 writemsg_level(("!!! %s appears to be under revision " + \
12644 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12645 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12647 if not os.path.exists("/usr/bin/rsync"):
12648 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12649 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12654 if settings["PORTAGE_RSYNC_OPTS"] == "":
12655 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12656 rsync_opts.extend([
12657 "--recursive", # Recurse directories
12658 "--links", # Consider symlinks
12659 "--safe-links", # Ignore links outside of tree
12660 "--perms", # Preserve permissions
12661 "--times", # Preserive mod times
12662 "--compress", # Compress the data transmitted
12663 "--force", # Force deletion on non-empty dirs
12664 "--whole-file", # Don't do block transfers, only entire files
12665 "--delete", # Delete files that aren't in the master tree
12666 "--stats", # Show final statistics about what was transfered
12667 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12668 "--exclude=/distfiles", # Exclude distfiles from consideration
12669 "--exclude=/local", # Exclude local from consideration
12670 "--exclude=/packages", # Exclude packages from consideration
12674 # The below validation is not needed when using the above hardcoded
12677 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12679 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12680 for opt in ("--recursive", "--times"):
12681 if opt not in rsync_opts:
12682 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12683 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12684 rsync_opts.append(opt)
12686 for exclude in ("distfiles", "local", "packages"):
12687 opt = "--exclude=/%s" % exclude
12688 if opt not in rsync_opts:
12689 portage.writemsg(yellow("WARNING:") + \
12690 " adding required option %s not included in " % opt + \
12691 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12692 rsync_opts.append(opt)
12694 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12695 def rsync_opt_startswith(opt_prefix):
12696 for x in rsync_opts:
12697 if x.startswith(opt_prefix):
12701 if not rsync_opt_startswith("--timeout="):
12702 rsync_opts.append("--timeout=%d" % mytimeout)
12704 for opt in ("--compress", "--whole-file"):
12705 if opt not in rsync_opts:
12706 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12707 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12708 rsync_opts.append(opt)
12710 if "--quiet" in myopts:
12711 rsync_opts.append("--quiet") # Shut up a lot
12713 rsync_opts.append("--verbose") # Print filelist
12715 if "--verbose" in myopts:
12716 rsync_opts.append("--progress") # Progress meter for each file
12718 if "--debug" in myopts:
12719 rsync_opts.append("--checksum") # Force checksum on all files
12721 # Real local timestamp file.
12722 servertimestampfile = os.path.join(
12723 myportdir, "metadata", "timestamp.chk")
12725 content = portage.util.grabfile(servertimestampfile)
12729 mytimestamp = time.mktime(time.strptime(content[0],
12730 "%a, %d %b %Y %H:%M:%S +0000"))
12731 except (OverflowError, ValueError):
12736 rsync_initial_timeout = \
12737 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12739 rsync_initial_timeout = 15
12742 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12743 except SystemExit, e:
12744 raise # Needed else can't exit
12746 maxretries=3 #default number of retries
12749 user_name, hostname, port = re.split(
12750 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12753 if user_name is None:
12755 updatecache_flg=True
12756 all_rsync_opts = set(rsync_opts)
12757 extra_rsync_opts = shlex.split(
12758 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12759 all_rsync_opts.update(extra_rsync_opts)
12760 family = socket.AF_INET
12761 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12762 family = socket.AF_INET
12763 elif socket.has_ipv6 and \
12764 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12765 family = socket.AF_INET6
12767 SERVER_OUT_OF_DATE = -1
12768 EXCEEDED_MAX_RETRIES = -2
12774 for addrinfo in socket.getaddrinfo(
12775 hostname, None, family, socket.SOCK_STREAM):
12776 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12777 # IPv6 addresses need to be enclosed in square brackets
12778 ips.append("[%s]" % addrinfo[4][0])
12780 ips.append(addrinfo[4][0])
12781 from random import shuffle
12783 except SystemExit, e:
12784 raise # Needed else can't exit
12785 except Exception, e:
12786 print "Notice:",str(e)
12791 dosyncuri = syncuri.replace(
12792 "//" + user_name + hostname + port + "/",
12793 "//" + user_name + ips[0] + port + "/", 1)
12794 except SystemExit, e:
12795 raise # Needed else can't exit
12796 except Exception, e:
12797 print "Notice:",str(e)
12801 if "--ask" in myopts:
12802 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12807 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12808 if "--quiet" not in myopts:
12809 print ">>> Starting rsync with "+dosyncuri+"..."
12811 emergelog(xterm_titles,
12812 ">>> Starting retry %d of %d with %s" % \
12813 (retries,maxretries,dosyncuri))
12814 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12816 if mytimestamp != 0 and "--quiet" not in myopts:
12817 print ">>> Checking server timestamp ..."
12819 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12821 if "--debug" in myopts:
12824 exitcode = os.EX_OK
12825 servertimestamp = 0
12826 # Even if there's no timestamp available locally, fetch the
12827 # timestamp anyway as an initial probe to verify that the server is
12828 # responsive. This protects us from hanging indefinitely on a
12829 # connection attempt to an unresponsive server which rsync's
12830 # --timeout option does not prevent.
12832 # Temporary file for remote server timestamp comparison.
12833 from tempfile import mkstemp
12834 fd, tmpservertimestampfile = mkstemp()
12836 mycommand = rsynccommand[:]
12837 mycommand.append(dosyncuri.rstrip("/") + \
12838 "/metadata/timestamp.chk")
12839 mycommand.append(tmpservertimestampfile)
12843 def timeout_handler(signum, frame):
12844 raise portage.exception.PortageException("timed out")
12845 signal.signal(signal.SIGALRM, timeout_handler)
12846 # Timeout here in case the server is unresponsive. The
12847 # --timeout rsync option doesn't apply to the initial
12848 # connection attempt.
12849 if rsync_initial_timeout:
12850 signal.alarm(rsync_initial_timeout)
12852 mypids.extend(portage.process.spawn(
12853 mycommand, env=settings.environ(), returnpid=True))
12854 exitcode = os.waitpid(mypids[0], 0)[1]
12855 content = portage.grabfile(tmpservertimestampfile)
12857 if rsync_initial_timeout:
12860 os.unlink(tmpservertimestampfile)
12863 except portage.exception.PortageException, e:
12867 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12868 os.kill(mypids[0], signal.SIGTERM)
12869 os.waitpid(mypids[0], 0)
12870 # This is the same code rsync uses for timeout.
12873 if exitcode != os.EX_OK:
12874 if exitcode & 0xff:
12875 exitcode = (exitcode & 0xff) << 8
12877 exitcode = exitcode >> 8
12879 portage.process.spawned_pids.remove(mypids[0])
12882 servertimestamp = time.mktime(time.strptime(
12883 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12884 except (OverflowError, ValueError):
12886 del mycommand, mypids, content
12887 if exitcode == os.EX_OK:
12888 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12889 emergelog(xterm_titles,
12890 ">>> Cancelling sync -- Already current.")
12893 print ">>> Timestamps on the server and in the local repository are the same."
12894 print ">>> Cancelling all further sync action. You are already up to date."
12896 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12900 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12901 emergelog(xterm_titles,
12902 ">>> Server out of date: %s" % dosyncuri)
12905 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12907 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12910 exitcode = SERVER_OUT_OF_DATE
12911 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12913 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12914 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12915 if exitcode in [0,1,3,4,11,14,20,21]:
12917 elif exitcode in [1,3,4,11,14,20,21]:
12920 # Code 2 indicates protocol incompatibility, which is expected
12921 # for servers with protocol < 29 that don't support
12922 # --prune-empty-directories. Retry for a server that supports
12923 # at least rsync protocol version 29 (>=rsync-2.6.4).
12928 if retries<=maxretries:
12929 print ">>> Retrying..."
12934 updatecache_flg=False
12935 exitcode = EXCEEDED_MAX_RETRIES
12939 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12940 elif exitcode == SERVER_OUT_OF_DATE:
12942 elif exitcode == EXCEEDED_MAX_RETRIES:
12944 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12949 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12950 msg.append("that your SYNC statement is proper.")
12951 msg.append("SYNC=" + settings["SYNC"])
12953 msg.append("Rsync has reported that there is a File IO error. Normally")
12954 msg.append("this means your disk is full, but can be caused by corruption")
12955 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12956 msg.append("and try again after the problem has been fixed.")
12957 msg.append("PORTDIR=" + settings["PORTDIR"])
12959 msg.append("Rsync was killed before it finished.")
12961 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12962 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12963 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12964 msg.append("temporary problem unless complications exist with your network")
12965 msg.append("(and possibly your system's filesystem) configuration.")
12969 elif syncuri[:6]=="cvs://":
12970 if not os.path.exists("/usr/bin/cvs"):
12971 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12972 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12974 cvsroot=syncuri[6:]
12975 cvsdir=os.path.dirname(myportdir)
12976 if not os.path.exists(myportdir+"/CVS"):
12978 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12979 if os.path.exists(cvsdir+"/gentoo-x86"):
12980 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12983 os.rmdir(myportdir)
12985 if e.errno != errno.ENOENT:
12987 "!!! existing '%s' directory; exiting.\n" % myportdir)
12990 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12991 print "!!! cvs checkout error; exiting."
12993 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12996 print ">>> Starting cvs update with "+syncuri+"..."
12997 retval = portage.process.spawn_bash(
12998 "cd %s; cvs -z0 -q update -dP" % \
12999 (portage._shell_quote(myportdir),), **spawn_kwargs)
13000 if retval != os.EX_OK:
13002 dosyncuri = syncuri
13004 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13005 noiselevel=-1, level=logging.ERROR)
13008 if updatecache_flg and \
13009 myaction != "metadata" and \
13010 "metadata-transfer" not in settings.features:
13011 updatecache_flg = False
13013 # Reload the whole config from scratch.
13014 settings, trees, mtimedb = load_emerge_config(trees=trees)
13015 root_config = trees[settings["ROOT"]]["root_config"]
13016 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13018 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13019 action_metadata(settings, portdb, myopts)
13021 if portage._global_updates(trees, mtimedb["updates"]):
13023 # Reload the whole config from scratch.
13024 settings, trees, mtimedb = load_emerge_config(trees=trees)
13025 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13026 root_config = trees[settings["ROOT"]]["root_config"]
13028 mybestpv = portdb.xmatch("bestmatch-visible",
13029 portage.const.PORTAGE_PACKAGE_ATOM)
13030 mypvs = portage.best(
13031 trees[settings["ROOT"]]["vartree"].dbapi.match(
13032 portage.const.PORTAGE_PACKAGE_ATOM))
13034 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13036 if myaction != "metadata":
13037 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13038 retval = portage.process.spawn(
13039 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13040 dosyncuri], env=settings.environ())
13041 if retval != os.EX_OK:
13042 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13044 if(mybestpv != mypvs) and not "--quiet" in myopts:
13046 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13047 print red(" * ")+"that you update portage now, before any other packages are updated."
13049 print red(" * ")+"To update portage, run 'emerge portage' now."
13052 display_news_notification(root_config, myopts)
13055 def git_sync_timestamps(settings, portdir):
13057 Since git doesn't preserve timestamps, synchronize timestamps between
13058 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13059 for a given file as long as the file in the working tree is not modified
13060 (relative to HEAD).
13062 cache_dir = os.path.join(portdir, "metadata", "cache")
13063 if not os.path.isdir(cache_dir):
13065 writemsg_level(">>> Synchronizing timestamps...\n")
13067 from portage.cache.cache_errors import CacheError
13069 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13070 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13071 except CacheError, e:
13072 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13073 level=logging.ERROR, noiselevel=-1)
13076 ec_dir = os.path.join(portdir, "eclass")
13078 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13079 if f.endswith(".eclass"))
13081 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13082 level=logging.ERROR, noiselevel=-1)
13085 args = [portage.const.BASH_BINARY, "-c",
13086 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13087 portage._shell_quote(portdir)]
13089 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13090 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13092 if rval != os.EX_OK:
13095 modified_eclasses = set(ec for ec in ec_names \
13096 if os.path.join("eclass", ec + ".eclass") in modified_files)
13098 updated_ec_mtimes = {}
13100 for cpv in cache_db:
13101 cpv_split = portage.catpkgsplit(cpv)
13102 if cpv_split is None:
13103 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13104 level=logging.ERROR, noiselevel=-1)
13107 cat, pn, ver, rev = cpv_split
13108 cat, pf = portage.catsplit(cpv)
13109 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13110 if relative_eb_path in modified_files:
13114 cache_entry = cache_db[cpv]
13115 eb_mtime = cache_entry.get("_mtime_")
13116 ec_mtimes = cache_entry.get("_eclasses_")
13118 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13119 level=logging.ERROR, noiselevel=-1)
13121 except CacheError, e:
13122 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13123 (cpv, e), level=logging.ERROR, noiselevel=-1)
13126 if eb_mtime is None:
13127 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13128 level=logging.ERROR, noiselevel=-1)
13132 eb_mtime = long(eb_mtime)
13134 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13135 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13138 if ec_mtimes is None:
13139 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13140 level=logging.ERROR, noiselevel=-1)
13143 if modified_eclasses.intersection(ec_mtimes):
13146 missing_eclasses = set(ec_mtimes).difference(ec_names)
13147 if missing_eclasses:
13148 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13149 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13153 eb_path = os.path.join(portdir, relative_eb_path)
13155 current_eb_mtime = os.stat(eb_path)
13157 writemsg_level("!!! Missing ebuild: %s\n" % \
13158 (cpv,), level=logging.ERROR, noiselevel=-1)
13161 inconsistent = False
13162 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13163 updated_mtime = updated_ec_mtimes.get(ec)
13164 if updated_mtime is not None and updated_mtime != ec_mtime:
13165 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13166 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13167 inconsistent = True
13173 if current_eb_mtime != eb_mtime:
13174 os.utime(eb_path, (eb_mtime, eb_mtime))
13176 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13177 if ec in updated_ec_mtimes:
13179 ec_path = os.path.join(ec_dir, ec + ".eclass")
13180 current_mtime = long(os.stat(ec_path).st_mtime)
13181 if current_mtime != ec_mtime:
13182 os.utime(ec_path, (ec_mtime, ec_mtime))
13183 updated_ec_mtimes[ec] = ec_mtime
13187 def action_metadata(settings, portdb, myopts):
13188 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13189 old_umask = os.umask(0002)
13190 cachedir = os.path.normpath(settings.depcachedir)
13191 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13192 "/lib", "/opt", "/proc", "/root", "/sbin",
13193 "/sys", "/tmp", "/usr", "/var"]:
13194 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13195 "ROOT DIRECTORY ON YOUR SYSTEM."
13196 print >> sys.stderr, \
13197 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13199 if not os.path.exists(cachedir):
13202 ec = portage.eclass_cache.cache(portdb.porttree_root)
13203 myportdir = os.path.realpath(settings["PORTDIR"])
13204 cm = settings.load_best_module("portdbapi.metadbmodule")(
13205 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13207 from portage.cache import util
13209 class percentage_noise_maker(util.quiet_mirroring):
13210 def __init__(self, dbapi):
13212 self.cp_all = dbapi.cp_all()
13213 l = len(self.cp_all)
13214 self.call_update_min = 100000000
13215 self.min_cp_all = l/100.0
13219 def __iter__(self):
13220 for x in self.cp_all:
13222 if self.count > self.min_cp_all:
13223 self.call_update_min = 0
13225 for y in self.dbapi.cp_list(x):
13227 self.call_update_mine = 0
13229 def update(self, *arg):
13230 try: self.pstr = int(self.pstr) + 1
13231 except ValueError: self.pstr = 1
13232 sys.stdout.write("%s%i%%" % \
13233 ("\b" * (len(str(self.pstr))+1), self.pstr))
13235 self.call_update_min = 10000000
13237 def finish(self, *arg):
13238 sys.stdout.write("\b\b\b\b100%\n")
13241 if "--quiet" in myopts:
13242 def quicky_cpv_generator(cp_all_list):
13243 for x in cp_all_list:
13244 for y in portdb.cp_list(x):
13246 source = quicky_cpv_generator(portdb.cp_all())
13247 noise_maker = portage.cache.util.quiet_mirroring()
13249 noise_maker = source = percentage_noise_maker(portdb)
13250 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13251 eclass_cache=ec, verbose_instance=noise_maker)
13254 os.umask(old_umask)
13256 def action_regen(settings, portdb, max_jobs, max_load):
13257 xterm_titles = "notitles" not in settings.features
13258 emergelog(xterm_titles, " === regen")
13259 #regenerate cache entries
13260 portage.writemsg_stdout("Regenerating cache entries...\n")
13262 os.close(sys.stdin.fileno())
13263 except SystemExit, e:
13264 raise # Needed else can't exit
13269 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13272 portage.writemsg_stdout("done!\n")
13273 return regen.returncode
13275 def action_config(settings, trees, myopts, myfiles):
13276 if len(myfiles) != 1:
13277 print red("!!! config can only take a single package atom at this time\n")
13279 if not is_valid_package_atom(myfiles[0]):
13280 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13282 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13283 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13287 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13288 except portage.exception.AmbiguousPackageName, e:
13289 # Multiple matches thrown from cpv_expand
13292 print "No packages found.\n"
13294 elif len(pkgs) > 1:
13295 if "--ask" in myopts:
13297 print "Please select a package to configure:"
13301 options.append(str(idx))
13302 print options[-1]+") "+pkg
13304 options.append("X")
13305 idx = userquery("Selection?", options)
13308 pkg = pkgs[int(idx)-1]
13310 print "The following packages available:"
13313 print "\nPlease use a specific atom or the --ask option."
13319 if "--ask" in myopts:
13320 if userquery("Ready to configure "+pkg+"?") == "No":
13323 print "Configuring pkg..."
13325 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13326 mysettings = portage.config(clone=settings)
13327 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13328 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13329 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13331 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13332 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13333 if retval == os.EX_OK:
13334 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13335 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13338 def action_info(settings, trees, myopts, myfiles):
13339 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13340 settings.profile_path, settings["CHOST"],
13341 trees[settings["ROOT"]]["vartree"].dbapi)
13343 header_title = "System Settings"
13345 print header_width * "="
13346 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13347 print header_width * "="
13348 print "System uname: "+platform.platform(aliased=1)
13350 lastSync = portage.grabfile(os.path.join(
13351 settings["PORTDIR"], "metadata", "timestamp.chk"))
13352 print "Timestamp of tree:",
13358 output=commands.getstatusoutput("distcc --version")
13360 print str(output[1].split("\n",1)[0]),
13361 if "distcc" in settings.features:
13366 output=commands.getstatusoutput("ccache -V")
13368 print str(output[1].split("\n",1)[0]),
13369 if "ccache" in settings.features:
13374 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13375 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13376 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13377 myvars = portage.util.unique_array(myvars)
13381 if portage.isvalidatom(x):
13382 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13383 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13384 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13386 for pn, ver, rev in pkg_matches:
13388 pkgs.append(ver + "-" + rev)
13392 pkgs = ", ".join(pkgs)
13393 print "%-20s %s" % (x+":", pkgs)
13395 print "%-20s %s" % (x+":", "[NOT VALID]")
13397 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13399 if "--verbose" in myopts:
13400 myvars=settings.keys()
13402 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13403 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13404 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13405 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13407 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13409 myvars = portage.util.unique_array(myvars)
13415 print '%s="%s"' % (x, settings[x])
13417 use = set(settings["USE"].split())
13418 use_expand = settings["USE_EXPAND"].split()
13420 for varname in use_expand:
13421 flag_prefix = varname.lower() + "_"
13422 for f in list(use):
13423 if f.startswith(flag_prefix):
13427 print 'USE="%s"' % " ".join(use),
13428 for varname in use_expand:
13429 myval = settings.get(varname)
13431 print '%s="%s"' % (varname, myval),
13434 unset_vars.append(x)
13436 print "Unset: "+", ".join(unset_vars)
13439 if "--debug" in myopts:
13440 for x in dir(portage):
13441 module = getattr(portage, x)
13442 if "cvs_id_string" in dir(module):
13443 print "%s: %s" % (str(x), str(module.cvs_id_string))
13445 # See if we can find any packages installed matching the strings
13446 # passed on the command line
13448 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13449 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13451 mypkgs.extend(vardb.match(x))
13453 # If some packages were found...
13455 # Get our global settings (we only print stuff if it varies from
13456 # the current config)
13457 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13458 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13460 pkgsettings = portage.config(clone=settings)
13462 for myvar in mydesiredvars:
13463 global_vals[myvar] = set(settings.get(myvar, "").split())
13465 # Loop through each package
13466 # Only print settings if they differ from global settings
13467 header_title = "Package Settings"
13468 print header_width * "="
13469 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13470 print header_width * "="
13471 from portage.output import EOutput
13474 # Get all package specific variables
13475 auxvalues = vardb.aux_get(pkg, auxkeys)
13477 for i in xrange(len(auxkeys)):
13478 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13480 for myvar in mydesiredvars:
13481 # If the package variable doesn't match the
13482 # current global variable, something has changed
13483 # so set diff_found so we know to print
13484 if valuesmap[myvar] != global_vals[myvar]:
13485 diff_values[myvar] = valuesmap[myvar]
13486 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13487 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13488 pkgsettings.reset()
13489 # If a matching ebuild is no longer available in the tree, maybe it
13490 # would make sense to compare against the flags for the best
13491 # available version with the same slot?
13493 if portdb.cpv_exists(pkg):
13495 pkgsettings.setcpv(pkg, mydb=mydb)
13496 if valuesmap["IUSE"].intersection(
13497 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13498 diff_values["USE"] = valuesmap["USE"]
13499 # If a difference was found, print the info for
13502 # Print package info
13503 print "%s was built with the following:" % pkg
13504 for myvar in mydesiredvars + ["USE"]:
13505 if myvar in diff_values:
13506 mylist = list(diff_values[myvar])
13508 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13510 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13511 ebuildpath = vardb.findname(pkg)
13512 if not ebuildpath or not os.path.exists(ebuildpath):
13513 out.ewarn("No ebuild found for '%s'" % pkg)
13515 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13516 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13517 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13520 def action_search(root_config, myopts, myfiles, spinner):
13522 print "emerge: no search terms provided."
13524 searchinstance = search(root_config,
13525 spinner, "--searchdesc" in myopts,
13526 "--quiet" not in myopts, "--usepkg" in myopts,
13527 "--usepkgonly" in myopts)
13528 for mysearch in myfiles:
13530 searchinstance.execute(mysearch)
13531 except re.error, comment:
13532 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13534 searchinstance.output()
13536 def action_depclean(settings, trees, ldpath_mtimes,
13537 myopts, action, myfiles, spinner):
13538 # Kill packages that aren't explicitly merged or are required as a
13539 # dependency of another package. World file is explicit.
13541 # Global depclean or prune operations are not very safe when there are
13542 # missing dependencies since it's unknown how badly incomplete
13543 # the dependency graph is, and we might accidentally remove packages
13544 # that should have been pulled into the graph. On the other hand, it's
13545 # relatively safe to ignore missing deps when only asked to remove
13546 # specific packages.
13547 allow_missing_deps = len(myfiles) > 0
13550 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13551 msg.append("mistakes. Packages that are part of the world set will always\n")
13552 msg.append("be kept. They can be manually added to this set with\n")
13553 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13554 msg.append("package.provided (see portage(5)) will be removed by\n")
13555 msg.append("depclean, even if they are part of the world set.\n")
13557 msg.append("As a safety measure, depclean will not remove any packages\n")
13558 msg.append("unless *all* required dependencies have been resolved. As a\n")
13559 msg.append("consequence, it is often necessary to run %s\n" % \
13560 good("`emerge --update"))
13561 msg.append(good("--newuse --deep @system @world`") + \
13562 " prior to depclean.\n")
13564 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13565 portage.writemsg_stdout("\n")
13567 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13569 xterm_titles = "notitles" not in settings.features
13570 myroot = settings["ROOT"]
13571 root_config = trees[myroot]["root_config"]
13572 getSetAtoms = root_config.setconfig.getSetAtoms
13573 vardb = trees[myroot]["vartree"].dbapi
13575 required_set_names = ("system", "world")
13579 for s in required_set_names:
13580 required_sets[s] = InternalPackageSet(
13581 initial_atoms=getSetAtoms(s))
13584 # When removing packages, use a temporary version of world
13585 # which excludes packages that are intended to be eligible for
13587 world_temp_set = required_sets["world"]
13588 system_set = required_sets["system"]
13590 if not system_set or not world_temp_set:
13593 writemsg_level("!!! You have no system list.\n",
13594 level=logging.ERROR, noiselevel=-1)
13596 if not world_temp_set:
13597 writemsg_level("!!! You have no world file.\n",
13598 level=logging.WARNING, noiselevel=-1)
13600 writemsg_level("!!! Proceeding is likely to " + \
13601 "break your installation.\n",
13602 level=logging.WARNING, noiselevel=-1)
13603 if "--pretend" not in myopts:
13604 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13606 if action == "depclean":
13607 emergelog(xterm_titles, " >>> depclean")
13610 args_set = InternalPackageSet()
13613 if not is_valid_package_atom(x):
13614 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13615 level=logging.ERROR, noiselevel=-1)
13616 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13619 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13620 except portage.exception.AmbiguousPackageName, e:
13621 msg = "The short ebuild name \"" + x + \
13622 "\" is ambiguous. Please specify " + \
13623 "one of the following " + \
13624 "fully-qualified ebuild names instead:"
13625 for line in textwrap.wrap(msg, 70):
13626 writemsg_level("!!! %s\n" % (line,),
13627 level=logging.ERROR, noiselevel=-1)
13629 writemsg_level(" %s\n" % colorize("INFORM", i),
13630 level=logging.ERROR, noiselevel=-1)
13631 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13634 matched_packages = False
13637 matched_packages = True
13639 if not matched_packages:
13640 writemsg_level(">>> No packages selected for removal by %s\n" % \
13644 writemsg_level("\nCalculating dependencies ")
13645 resolver_params = create_depgraph_params(myopts, "remove")
13646 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13647 vardb = resolver.trees[myroot]["vartree"].dbapi
13649 if action == "depclean":
13652 # Pull in everything that's installed but not matched
13653 # by an argument atom since we don't want to clean any
13654 # package if something depends on it.
13656 world_temp_set.clear()
13661 if args_set.findAtomForPackage(pkg) is None:
13662 world_temp_set.add("=" + pkg.cpv)
13664 except portage.exception.InvalidDependString, e:
13665 show_invalid_depstring_notice(pkg,
13666 pkg.metadata["PROVIDE"], str(e))
13668 world_temp_set.add("=" + pkg.cpv)
13671 elif action == "prune":
13673 # Pull in everything that's installed since we don't
13674 # to prune a package if something depends on it.
13675 world_temp_set.clear()
13676 world_temp_set.update(vardb.cp_all())
13680 # Try to prune everything that's slotted.
13681 for cp in vardb.cp_all():
13682 if len(vardb.cp_list(cp)) > 1:
13685 # Remove atoms from world that match installed packages
13686 # that are also matched by argument atoms, but do not remove
13687 # them if they match the highest installed version.
13690 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13691 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13692 raise AssertionError("package expected in matches: " + \
13693 "cp = %s, cpv = %s matches = %s" % \
13694 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13696 highest_version = pkgs_for_cp[-1]
13697 if pkg == highest_version:
13698 # pkg is the highest version
13699 world_temp_set.add("=" + pkg.cpv)
13702 if len(pkgs_for_cp) <= 1:
13703 raise AssertionError("more packages expected: " + \
13704 "cp = %s, cpv = %s matches = %s" % \
13705 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13708 if args_set.findAtomForPackage(pkg) is None:
13709 world_temp_set.add("=" + pkg.cpv)
13711 except portage.exception.InvalidDependString, e:
13712 show_invalid_depstring_notice(pkg,
13713 pkg.metadata["PROVIDE"], str(e))
13715 world_temp_set.add("=" + pkg.cpv)
13719 for s, package_set in required_sets.iteritems():
13720 set_atom = SETPREFIX + s
13721 set_arg = SetArg(arg=set_atom, set=package_set,
13722 root_config=resolver.roots[myroot])
13723 set_args[s] = set_arg
13724 for atom in set_arg.set:
13725 resolver._dep_stack.append(
13726 Dependency(atom=atom, root=myroot, parent=set_arg))
13727 resolver.digraph.add(set_arg, None)
13729 success = resolver._complete_graph()
13730 writemsg_level("\b\b... done!\n")
13732 resolver.display_problems()
13737 def unresolved_deps():
13739 unresolvable = set()
13740 for dep in resolver._initially_unsatisfied_deps:
13741 if isinstance(dep.parent, Package) and \
13742 (dep.priority > UnmergeDepPriority.SOFT):
13743 unresolvable.add((dep.atom, dep.parent.cpv))
13745 if not unresolvable:
13748 if unresolvable and not allow_missing_deps:
13749 prefix = bad(" * ")
13751 msg.append("Dependencies could not be completely resolved due to")
13752 msg.append("the following required packages not being installed:")
13754 for atom, parent in unresolvable:
13755 msg.append(" %s pulled in by:" % (atom,))
13756 msg.append(" %s" % (parent,))
13758 msg.append("Have you forgotten to run " + \
13759 good("`emerge --update --newuse --deep @system @world`") + " prior")
13760 msg.append(("to %s? It may be necessary to manually " + \
13761 "uninstall packages that no longer") % action)
13762 msg.append("exist in the portage tree since " + \
13763 "it may not be possible to satisfy their")
13764 msg.append("dependencies. Also, be aware of " + \
13765 "the --with-bdeps option that is documented")
13766 msg.append("in " + good("`man emerge`") + ".")
13767 if action == "prune":
13769 msg.append("If you would like to ignore " + \
13770 "dependencies then use %s." % good("--nodeps"))
13771 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13772 level=logging.ERROR, noiselevel=-1)
13776 if unresolved_deps():
13779 graph = resolver.digraph.copy()
13780 required_pkgs_total = 0
13782 if isinstance(node, Package):
13783 required_pkgs_total += 1
13785 def show_parents(child_node):
13786 parent_nodes = graph.parent_nodes(child_node)
13787 if not parent_nodes:
13788 # With --prune, the highest version can be pulled in without any
13789 # real parent since all installed packages are pulled in. In that
13790 # case there's nothing to show here.
13793 for node in parent_nodes:
13794 parent_strs.append(str(getattr(node, "cpv", node)))
13797 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13798 for parent_str in parent_strs:
13799 msg.append(" %s\n" % (parent_str,))
13801 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13803 def cmp_pkg_cpv(pkg1, pkg2):
13804 """Sort Package instances by cpv."""
13805 if pkg1.cpv > pkg2.cpv:
13807 elif pkg1.cpv == pkg2.cpv:
13812 def create_cleanlist():
13813 pkgs_to_remove = []
13815 if action == "depclean":
13818 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13821 arg_atom = args_set.findAtomForPackage(pkg)
13822 except portage.exception.InvalidDependString:
13823 # this error has already been displayed by now
13827 if pkg not in graph:
13828 pkgs_to_remove.append(pkg)
13829 elif "--verbose" in myopts:
13833 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13834 if pkg not in graph:
13835 pkgs_to_remove.append(pkg)
13836 elif "--verbose" in myopts:
13839 elif action == "prune":
13840 # Prune really uses all installed instead of world. It's not
13841 # a real reverse dependency so don't display it as such.
13842 graph.remove(set_args["world"])
13844 for atom in args_set:
13845 for pkg in vardb.match_pkgs(atom):
13846 if pkg not in graph:
13847 pkgs_to_remove.append(pkg)
13848 elif "--verbose" in myopts:
13851 if not pkgs_to_remove:
13853 ">>> No packages selected for removal by %s\n" % action)
13854 if "--verbose" not in myopts:
13856 ">>> To see reverse dependencies, use %s\n" % \
13858 if action == "prune":
13860 ">>> To ignore dependencies, use %s\n" % \
13863 return pkgs_to_remove
13865 cleanlist = create_cleanlist()
13868 clean_set = set(cleanlist)
13870 # Check if any of these package are the sole providers of libraries
13871 # with consumers that have not been selected for removal. If so, these
13872 # packages and any dependencies need to be added to the graph.
13873 real_vardb = trees[myroot]["vartree"].dbapi
13874 linkmap = real_vardb.linkmap
13875 liblist = linkmap.listLibraryObjects()
13876 consumer_cache = {}
13877 provider_cache = {}
13881 writemsg_level(">>> Checking for lib consumers...\n")
13883 for pkg in cleanlist:
13884 pkg_dblink = real_vardb._dblink(pkg.cpv)
13885 provided_libs = set()
13887 for lib in liblist:
13888 if pkg_dblink.isowner(lib, myroot):
13889 provided_libs.add(lib)
13891 if not provided_libs:
13895 for lib in provided_libs:
13896 lib_consumers = consumer_cache.get(lib)
13897 if lib_consumers is None:
13898 lib_consumers = linkmap.findConsumers(lib)
13899 consumer_cache[lib] = lib_consumers
13901 consumers[lib] = lib_consumers
13906 for lib, lib_consumers in consumers.items():
13907 for consumer_file in list(lib_consumers):
13908 if pkg_dblink.isowner(consumer_file, myroot):
13909 lib_consumers.remove(consumer_file)
13910 if not lib_consumers:
13916 for lib, lib_consumers in consumers.iteritems():
13918 soname = soname_cache.get(lib)
13920 soname = linkmap.getSoname(lib)
13921 soname_cache[lib] = soname
13923 consumer_providers = []
13924 for lib_consumer in lib_consumers:
13925 providers = provider_cache.get(lib)
13926 if providers is None:
13927 providers = linkmap.findProviders(lib_consumer)
13928 provider_cache[lib_consumer] = providers
13929 if soname not in providers:
13930 # Why does this happen?
13932 consumer_providers.append(
13933 (lib_consumer, providers[soname]))
13935 consumers[lib] = consumer_providers
13937 consumer_map[pkg] = consumers
13941 search_files = set()
13942 for consumers in consumer_map.itervalues():
13943 for lib, consumer_providers in consumers.iteritems():
13944 for lib_consumer, providers in consumer_providers:
13945 search_files.add(lib_consumer)
13946 search_files.update(providers)
13948 writemsg_level(">>> Assigning files to packages...\n")
13949 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13951 for pkg, consumers in consumer_map.items():
13952 for lib, consumer_providers in consumers.items():
13953 lib_consumers = set()
13955 for lib_consumer, providers in consumer_providers:
13956 owner_set = file_owners.get(lib_consumer)
13957 provider_dblinks = set()
13958 provider_pkgs = set()
13960 if len(providers) > 1:
13961 for provider in providers:
13962 provider_set = file_owners.get(provider)
13963 if provider_set is not None:
13964 provider_dblinks.update(provider_set)
13966 if len(provider_dblinks) > 1:
13967 for provider_dblink in provider_dblinks:
13968 pkg_key = ("installed", myroot,
13969 provider_dblink.mycpv, "nomerge")
13970 if pkg_key not in clean_set:
13971 provider_pkgs.add(vardb.get(pkg_key))
13976 if owner_set is not None:
13977 lib_consumers.update(owner_set)
13979 for consumer_dblink in list(lib_consumers):
13980 if ("installed", myroot, consumer_dblink.mycpv,
13981 "nomerge") in clean_set:
13982 lib_consumers.remove(consumer_dblink)
13986 consumers[lib] = lib_consumers
13990 del consumer_map[pkg]
13993 # TODO: Implement a package set for rebuilding consumer packages.
13995 msg = "In order to avoid breakage of link level " + \
13996 "dependencies, one or more packages will not be removed. " + \
13997 "This can be solved by rebuilding " + \
13998 "the packages that pulled them in."
14000 prefix = bad(" * ")
14001 from textwrap import wrap
14002 writemsg_level("".join(prefix + "%s\n" % line for \
14003 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14006 for pkg, consumers in consumer_map.iteritems():
14007 unique_consumers = set(chain(*consumers.values()))
14008 unique_consumers = sorted(consumer.mycpv \
14009 for consumer in unique_consumers)
14011 msg.append(" %s pulled in by:" % (pkg.cpv,))
14012 for consumer in unique_consumers:
14013 msg.append(" %s" % (consumer,))
14015 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14016 level=logging.WARNING, noiselevel=-1)
14018 # Add lib providers to the graph as children of lib consumers,
14019 # and also add any dependencies pulled in by the provider.
14020 writemsg_level(">>> Adding lib providers to graph...\n")
14022 for pkg, consumers in consumer_map.iteritems():
14023 for consumer_dblink in set(chain(*consumers.values())):
14024 consumer_pkg = vardb.get(("installed", myroot,
14025 consumer_dblink.mycpv, "nomerge"))
14026 if not resolver._add_pkg(pkg,
14027 Dependency(parent=consumer_pkg,
14028 priority=UnmergeDepPriority(runtime=True),
14030 resolver.display_problems()
14033 writemsg_level("\nCalculating dependencies ")
14034 success = resolver._complete_graph()
14035 writemsg_level("\b\b... done!\n")
14036 resolver.display_problems()
14039 if unresolved_deps():
14042 graph = resolver.digraph.copy()
14043 required_pkgs_total = 0
14045 if isinstance(node, Package):
14046 required_pkgs_total += 1
14047 cleanlist = create_cleanlist()
14050 clean_set = set(cleanlist)
14052 # Use a topological sort to create an unmerge order such that
14053 # each package is unmerged before it's dependencies. This is
14054 # necessary to avoid breaking things that may need to run
14055 # during pkg_prerm or pkg_postrm phases.
14057 # Create a new graph to account for dependencies between the
14058 # packages being unmerged.
14062 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14063 runtime = UnmergeDepPriority(runtime=True)
14064 runtime_post = UnmergeDepPriority(runtime_post=True)
14065 buildtime = UnmergeDepPriority(buildtime=True)
14067 "RDEPEND": runtime,
14068 "PDEPEND": runtime_post,
14069 "DEPEND": buildtime,
14072 for node in clean_set:
14073 graph.add(node, None)
14075 node_use = node.metadata["USE"].split()
14076 for dep_type in dep_keys:
14077 depstr = node.metadata[dep_type]
14081 portage.dep._dep_check_strict = False
14082 success, atoms = portage.dep_check(depstr, None, settings,
14083 myuse=node_use, trees=resolver._graph_trees,
14086 portage.dep._dep_check_strict = True
14088 # Ignore invalid deps of packages that will
14089 # be uninstalled anyway.
14092 priority = priority_map[dep_type]
14094 if not isinstance(atom, portage.dep.Atom):
14095 # Ignore invalid atoms returned from dep_check().
14099 matches = vardb.match_pkgs(atom)
14102 for child_node in matches:
14103 if child_node in clean_set:
14104 graph.add(child_node, node, priority=priority)
14107 if len(graph.order) == len(graph.root_nodes()):
14108 # If there are no dependencies between packages
14109 # let unmerge() group them by cat/pn.
14111 cleanlist = [pkg.cpv for pkg in graph.order]
14113 # Order nodes from lowest to highest overall reference count for
14114 # optimal root node selection.
14115 node_refcounts = {}
14116 for node in graph.order:
14117 node_refcounts[node] = len(graph.parent_nodes(node))
14118 def cmp_reference_count(node1, node2):
14119 return node_refcounts[node1] - node_refcounts[node2]
14120 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14122 ignore_priority_range = [None]
14123 ignore_priority_range.extend(
14124 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14125 while not graph.empty():
14126 for ignore_priority in ignore_priority_range:
14127 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14131 raise AssertionError("no root nodes")
14132 if ignore_priority is not None:
14133 # Some deps have been dropped due to circular dependencies,
14134 # so only pop one node in order do minimize the number that
14139 cleanlist.append(node.cpv)
14141 unmerge(root_config, myopts, "unmerge", cleanlist,
14142 ldpath_mtimes, ordered=ordered)
14144 if action == "prune":
14147 if not cleanlist and "--quiet" in myopts:
14150 print "Packages installed: "+str(len(vardb.cpv_all()))
14151 print "Packages in world: " + \
14152 str(len(root_config.sets["world"].getAtoms()))
14153 print "Packages in system: " + \
14154 str(len(root_config.sets["system"].getAtoms()))
14155 print "Required packages: "+str(required_pkgs_total)
14156 if "--pretend" in myopts:
14157 print "Number to remove: "+str(len(cleanlist))
14159 print "Number removed: "+str(len(cleanlist))
14161 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14163 Construct a depgraph for the given resume list. This will raise
14164 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14166 @returns: (success, depgraph, dropped_tasks)
14169 skip_unsatisfied = True
14170 mergelist = mtimedb["resume"]["mergelist"]
14171 dropped_tasks = set()
14173 mydepgraph = depgraph(settings, trees,
14174 myopts, myparams, spinner)
14176 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14177 skip_masked=skip_masked)
14178 except depgraph.UnsatisfiedResumeDep, e:
14179 if not skip_unsatisfied:
14182 graph = mydepgraph.digraph
14183 unsatisfied_parents = dict((dep.parent, dep.parent) \
14184 for dep in e.value)
14185 traversed_nodes = set()
14186 unsatisfied_stack = list(unsatisfied_parents)
14187 while unsatisfied_stack:
14188 pkg = unsatisfied_stack.pop()
14189 if pkg in traversed_nodes:
14191 traversed_nodes.add(pkg)
14193 # If this package was pulled in by a parent
14194 # package scheduled for merge, removing this
14195 # package may cause the the parent package's
14196 # dependency to become unsatisfied.
14197 for parent_node in graph.parent_nodes(pkg):
14198 if not isinstance(parent_node, Package) \
14199 or parent_node.operation not in ("merge", "nomerge"):
14202 graph.child_nodes(parent_node,
14203 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14204 if pkg in unsatisfied:
14205 unsatisfied_parents[parent_node] = parent_node
14206 unsatisfied_stack.append(parent_node)
14208 pruned_mergelist = []
14209 for x in mergelist:
14210 if isinstance(x, list) and \
14211 tuple(x) not in unsatisfied_parents:
14212 pruned_mergelist.append(x)
14214 # If the mergelist doesn't shrink then this loop is infinite.
14215 if len(pruned_mergelist) == len(mergelist):
14216 # This happens if a package can't be dropped because
14217 # it's already installed, but it has unsatisfied PDEPEND.
14219 mergelist[:] = pruned_mergelist
14221 # Exclude installed packages that have been removed from the graph due
14222 # to failure to build/install runtime dependencies after the dependent
14223 # package has already been installed.
14224 dropped_tasks.update(pkg for pkg in \
14225 unsatisfied_parents if pkg.operation != "nomerge")
14226 mydepgraph.break_refs(unsatisfied_parents)
14228 del e, graph, traversed_nodes, \
14229 unsatisfied_parents, unsatisfied_stack
14233 return (success, mydepgraph, dropped_tasks)
14235 def action_build(settings, trees, mtimedb,
14236 myopts, myaction, myfiles, spinner):
14238 # validate the state of the resume data
14239 # so that we can make assumptions later.
14240 for k in ("resume", "resume_backup"):
14241 if k not in mtimedb:
14243 resume_data = mtimedb[k]
14244 if not isinstance(resume_data, dict):
14247 mergelist = resume_data.get("mergelist")
14248 if not isinstance(mergelist, list):
14251 for x in mergelist:
14252 if not (isinstance(x, list) and len(x) == 4):
14254 pkg_type, pkg_root, pkg_key, pkg_action = x
14255 if pkg_root not in trees:
14256 # Current $ROOT setting differs,
14257 # so the list must be stale.
14263 resume_opts = resume_data.get("myopts")
14264 if not isinstance(resume_opts, (dict, list)):
14267 favorites = resume_data.get("favorites")
14268 if not isinstance(favorites, list):
14273 if "--resume" in myopts and \
14274 ("resume" in mtimedb or
14275 "resume_backup" in mtimedb):
14277 if "resume" not in mtimedb:
14278 mtimedb["resume"] = mtimedb["resume_backup"]
14279 del mtimedb["resume_backup"]
14281 # "myopts" is a list for backward compatibility.
14282 resume_opts = mtimedb["resume"].get("myopts", [])
14283 if isinstance(resume_opts, list):
14284 resume_opts = dict((k,True) for k in resume_opts)
14285 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14286 resume_opts.pop(opt, None)
14287 myopts.update(resume_opts)
14289 if "--debug" in myopts:
14290 writemsg_level("myopts %s\n" % (myopts,))
14292 # Adjust config according to options of the command being resumed.
14293 for myroot in trees:
14294 mysettings = trees[myroot]["vartree"].settings
14295 mysettings.unlock()
14296 adjust_config(myopts, mysettings)
14298 del myroot, mysettings
14300 ldpath_mtimes = mtimedb["ldpath"]
14303 buildpkgonly = "--buildpkgonly" in myopts
14304 pretend = "--pretend" in myopts
14305 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14306 ask = "--ask" in myopts
14307 nodeps = "--nodeps" in myopts
14308 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14309 tree = "--tree" in myopts
14310 if nodeps and tree:
14312 del myopts["--tree"]
14313 portage.writemsg(colorize("WARN", " * ") + \
14314 "--tree is broken with --nodeps. Disabling...\n")
14315 debug = "--debug" in myopts
14316 verbose = "--verbose" in myopts
14317 quiet = "--quiet" in myopts
14318 if pretend or fetchonly:
14319 # make the mtimedb readonly
14320 mtimedb.filename = None
14321 if '--digest' in myopts or 'digest' in settings.features:
14322 if '--digest' in myopts:
14323 msg = "The --digest option"
14325 msg = "The FEATURES=digest setting"
14327 msg += " can prevent corruption from being" + \
14328 " noticed. The `repoman manifest` command is the preferred" + \
14329 " way to generate manifests and it is capable of doing an" + \
14330 " entire repository or category at once."
14331 prefix = bad(" * ")
14332 writemsg(prefix + "\n")
14333 from textwrap import wrap
14334 for line in wrap(msg, 72):
14335 writemsg("%s%s\n" % (prefix, line))
14336 writemsg(prefix + "\n")
14338 if "--quiet" not in myopts and \
14339 ("--pretend" in myopts or "--ask" in myopts or \
14340 "--tree" in myopts or "--verbose" in myopts):
14342 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14344 elif "--buildpkgonly" in myopts:
14348 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14350 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14354 print darkgreen("These are the packages that would be %s, in order:") % action
14357 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14358 if not show_spinner:
14359 spinner.update = spinner.update_quiet
14362 favorites = mtimedb["resume"].get("favorites")
14363 if not isinstance(favorites, list):
14367 print "Calculating dependencies ",
14368 myparams = create_depgraph_params(myopts, myaction)
14370 resume_data = mtimedb["resume"]
14371 mergelist = resume_data["mergelist"]
14372 if mergelist and "--skipfirst" in myopts:
14373 for i, task in enumerate(mergelist):
14374 if isinstance(task, list) and \
14375 task and task[-1] == "merge":
14382 success, mydepgraph, dropped_tasks = resume_depgraph(
14383 settings, trees, mtimedb, myopts, myparams, spinner)
14384 except (portage.exception.PackageNotFound,
14385 depgraph.UnsatisfiedResumeDep), e:
14386 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14387 mydepgraph = e.depgraph
14390 from textwrap import wrap
14391 from portage.output import EOutput
14394 resume_data = mtimedb["resume"]
14395 mergelist = resume_data.get("mergelist")
14396 if not isinstance(mergelist, list):
14398 if mergelist and debug or (verbose and not quiet):
14399 out.eerror("Invalid resume list:")
14402 for task in mergelist:
14403 if isinstance(task, list):
14404 out.eerror(indent + str(tuple(task)))
14407 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14408 out.eerror("One or more packages are either masked or " + \
14409 "have missing dependencies:")
14412 for dep in e.value:
14413 if dep.atom is None:
14414 out.eerror(indent + "Masked package:")
14415 out.eerror(2 * indent + str(dep.parent))
14418 out.eerror(indent + str(dep.atom) + " pulled in by:")
14419 out.eerror(2 * indent + str(dep.parent))
14421 msg = "The resume list contains packages " + \
14422 "that are either masked or have " + \
14423 "unsatisfied dependencies. " + \
14424 "Please restart/continue " + \
14425 "the operation manually, or use --skipfirst " + \
14426 "to skip the first package in the list and " + \
14427 "any other packages that may be " + \
14428 "masked or have missing dependencies."
14429 for line in wrap(msg, 72):
14431 elif isinstance(e, portage.exception.PackageNotFound):
14432 out.eerror("An expected package is " + \
14433 "not available: %s" % str(e))
14435 msg = "The resume list contains one or more " + \
14436 "packages that are no longer " + \
14437 "available. Please restart/continue " + \
14438 "the operation manually."
14439 for line in wrap(msg, 72):
14443 print "\b\b... done!"
14447 portage.writemsg("!!! One or more packages have been " + \
14448 "dropped due to\n" + \
14449 "!!! masking or unsatisfied dependencies:\n\n",
14451 for task in dropped_tasks:
14452 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14453 portage.writemsg("\n", noiselevel=-1)
14456 if mydepgraph is not None:
14457 mydepgraph.display_problems()
14458 if not (ask or pretend):
14459 # delete the current list and also the backup
14460 # since it's probably stale too.
14461 for k in ("resume", "resume_backup"):
14462 mtimedb.pop(k, None)
14467 if ("--resume" in myopts):
14468 print darkgreen("emerge: It seems we have nothing to resume...")
14471 myparams = create_depgraph_params(myopts, myaction)
14472 if "--quiet" not in myopts and "--nodeps" not in myopts:
14473 print "Calculating dependencies ",
14475 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14477 retval, favorites = mydepgraph.select_files(myfiles)
14478 except portage.exception.PackageNotFound, e:
14479 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14481 except portage.exception.PackageSetNotFound, e:
14482 root_config = trees[settings["ROOT"]]["root_config"]
14483 display_missing_pkg_set(root_config, e.value)
14486 print "\b\b... done!"
14488 mydepgraph.display_problems()
14491 if "--pretend" not in myopts and \
14492 ("--ask" in myopts or "--tree" in myopts or \
14493 "--verbose" in myopts) and \
14494 not ("--quiet" in myopts and "--ask" not in myopts):
14495 if "--resume" in myopts:
14496 mymergelist = mydepgraph.altlist()
14497 if len(mymergelist) == 0:
14498 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14500 favorites = mtimedb["resume"]["favorites"]
14501 retval = mydepgraph.display(
14502 mydepgraph.altlist(reversed=tree),
14503 favorites=favorites)
14504 mydepgraph.display_problems()
14505 if retval != os.EX_OK:
14507 prompt="Would you like to resume merging these packages?"
14509 retval = mydepgraph.display(
14510 mydepgraph.altlist(reversed=("--tree" in myopts)),
14511 favorites=favorites)
14512 mydepgraph.display_problems()
14513 if retval != os.EX_OK:
14516 for x in mydepgraph.altlist():
14517 if isinstance(x, Package) and x.operation == "merge":
14521 sets = trees[settings["ROOT"]]["root_config"].sets
14522 world_candidates = None
14523 if "--noreplace" in myopts and \
14524 not oneshot and favorites:
14525 # Sets that are not world candidates are filtered
14526 # out here since the favorites list needs to be
14527 # complete for depgraph.loadResumeCommand() to
14528 # operate correctly.
14529 world_candidates = [x for x in favorites \
14530 if not (x.startswith(SETPREFIX) and \
14531 not sets[x[1:]].world_candidate)]
14532 if "--noreplace" in myopts and \
14533 not oneshot and world_candidates:
14535 for x in world_candidates:
14536 print " %s %s" % (good("*"), x)
14537 prompt="Would you like to add these packages to your world favorites?"
14538 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14539 prompt="Nothing to merge; would you like to auto-clean packages?"
14542 print "Nothing to merge; quitting."
14545 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14546 prompt="Would you like to fetch the source files for these packages?"
14548 prompt="Would you like to merge these packages?"
14550 if "--ask" in myopts and userquery(prompt) == "No":
14555 # Don't ask again (e.g. when auto-cleaning packages after merge)
14556 myopts.pop("--ask", None)
14558 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14559 if ("--resume" in myopts):
14560 mymergelist = mydepgraph.altlist()
14561 if len(mymergelist) == 0:
14562 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14564 favorites = mtimedb["resume"]["favorites"]
14565 retval = mydepgraph.display(
14566 mydepgraph.altlist(reversed=tree),
14567 favorites=favorites)
14568 mydepgraph.display_problems()
14569 if retval != os.EX_OK:
14572 retval = mydepgraph.display(
14573 mydepgraph.altlist(reversed=("--tree" in myopts)),
14574 favorites=favorites)
14575 mydepgraph.display_problems()
14576 if retval != os.EX_OK:
14578 if "--buildpkgonly" in myopts:
14579 graph_copy = mydepgraph.digraph.clone()
14580 removed_nodes = set()
14581 for node in graph_copy:
14582 if not isinstance(node, Package) or \
14583 node.operation == "nomerge":
14584 removed_nodes.add(node)
14585 graph_copy.difference_update(removed_nodes)
14586 if not graph_copy.hasallzeros(ignore_priority = \
14587 DepPrioritySatisfiedRange.ignore_medium):
14588 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14589 print "!!! You have to merge the dependencies before you can build this package.\n"
14592 if "--buildpkgonly" in myopts:
14593 graph_copy = mydepgraph.digraph.clone()
14594 removed_nodes = set()
14595 for node in graph_copy:
14596 if not isinstance(node, Package) or \
14597 node.operation == "nomerge":
14598 removed_nodes.add(node)
14599 graph_copy.difference_update(removed_nodes)
14600 if not graph_copy.hasallzeros(ignore_priority = \
14601 DepPrioritySatisfiedRange.ignore_medium):
14602 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14603 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14606 if ("--resume" in myopts):
14607 favorites=mtimedb["resume"]["favorites"]
14608 mymergelist = mydepgraph.altlist()
14609 mydepgraph.break_refs(mymergelist)
14610 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14611 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14612 del mydepgraph, mymergelist
14613 clear_caches(trees)
14615 retval = mergetask.merge()
14616 merge_count = mergetask.curval
14618 if "resume" in mtimedb and \
14619 "mergelist" in mtimedb["resume"] and \
14620 len(mtimedb["resume"]["mergelist"]) > 1:
14621 mtimedb["resume_backup"] = mtimedb["resume"]
14622 del mtimedb["resume"]
14624 mtimedb["resume"]={}
14625 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14626 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14627 # a list type for options.
14628 mtimedb["resume"]["myopts"] = myopts.copy()
14630 # Convert Atom instances to plain str.
14631 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14633 pkglist = mydepgraph.altlist()
14634 mydepgraph.saveNomergeFavorites()
14635 mydepgraph.break_refs(pkglist)
14636 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14637 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14638 del mydepgraph, pkglist
14639 clear_caches(trees)
14641 retval = mergetask.merge()
14642 merge_count = mergetask.curval
14644 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14645 if "yes" == settings.get("AUTOCLEAN"):
14646 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14647 unmerge(trees[settings["ROOT"]]["root_config"],
14648 myopts, "clean", [],
14649 ldpath_mtimes, autoclean=1)
14651 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14652 + " AUTOCLEAN is disabled. This can cause serious"
14653 + " problems due to overlapping packages.\n")
14654 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14658 def multiple_actions(action1, action2):
14659 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14660 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14663 def insert_optional_args(args):
14665 Parse optional arguments and insert a value if one has
14666 not been provided. This is done before feeding the args
14667 to the optparse parser since that parser does not support
14668 this feature natively.
14672 jobs_opts = ("-j", "--jobs")
14673 arg_stack = args[:]
14674 arg_stack.reverse()
14676 arg = arg_stack.pop()
14678 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14679 if not (short_job_opt or arg in jobs_opts):
14680 new_args.append(arg)
14683 # Insert an empty placeholder in order to
14684 # satisfy the requirements of optparse.
14686 new_args.append("--jobs")
14689 if short_job_opt and len(arg) > 2:
14690 if arg[:2] == "-j":
14692 job_count = int(arg[2:])
14694 saved_opts = arg[2:]
14697 saved_opts = arg[1:].replace("j", "")
14699 if job_count is None and arg_stack:
14701 job_count = int(arg_stack[-1])
14705 # Discard the job count from the stack
14706 # since we're consuming it here.
14709 if job_count is None:
14710 # unlimited number of jobs
14711 new_args.append("True")
14713 new_args.append(str(job_count))
14715 if saved_opts is not None:
14716 new_args.append("-" + saved_opts)
14720 def parse_opts(tmpcmdline, silent=False):
14725 global actions, options, shortmapping
14727 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14728 argument_options = {
14730 "help":"specify the location for portage configuration files",
14734 "help":"enable or disable color output",
14736 "choices":("y", "n")
14741 "help" : "Specifies the number of packages to build " + \
14747 "--load-average": {
14749 "help" :"Specifies that no new builds should be started " + \
14750 "if there are other builds running and the load average " + \
14751 "is at least LOAD (a floating-point number).",
14757 "help":"include unnecessary build time dependencies",
14759 "choices":("y", "n")
14762 "help":"specify conditions to trigger package reinstallation",
14764 "choices":["changed-use"]
14768 from optparse import OptionParser
14769 parser = OptionParser()
14770 if parser.has_option("--help"):
14771 parser.remove_option("--help")
14773 for action_opt in actions:
14774 parser.add_option("--" + action_opt, action="store_true",
14775 dest=action_opt.replace("-", "_"), default=False)
14776 for myopt in options:
14777 parser.add_option(myopt, action="store_true",
14778 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14779 for shortopt, longopt in shortmapping.iteritems():
14780 parser.add_option("-" + shortopt, action="store_true",
14781 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14782 for myalias, myopt in longopt_aliases.iteritems():
14783 parser.add_option(myalias, action="store_true",
14784 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14786 for myopt, kwargs in argument_options.iteritems():
14787 parser.add_option(myopt,
14788 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14790 tmpcmdline = insert_optional_args(tmpcmdline)
14792 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14796 if myoptions.jobs == "True":
14800 jobs = int(myoptions.jobs)
14804 if jobs is not True and \
14808 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14809 (myoptions.jobs,), noiselevel=-1)
14811 myoptions.jobs = jobs
14813 if myoptions.load_average:
14815 load_average = float(myoptions.load_average)
14819 if load_average <= 0.0:
14820 load_average = None
14822 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14823 (myoptions.load_average,), noiselevel=-1)
14825 myoptions.load_average = load_average
14827 for myopt in options:
14828 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14830 myopts[myopt] = True
14832 for myopt in argument_options:
14833 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14837 if myoptions.searchdesc:
14838 myoptions.search = True
14840 for action_opt in actions:
14841 v = getattr(myoptions, action_opt.replace("-", "_"))
14844 multiple_actions(myaction, action_opt)
14846 myaction = action_opt
14850 return myaction, myopts, myfiles
14852 def validate_ebuild_environment(trees):
14853 for myroot in trees:
14854 settings = trees[myroot]["vartree"].settings
14855 settings.validate()
14857 def clear_caches(trees):
14858 for d in trees.itervalues():
14859 d["porttree"].dbapi.melt()
14860 d["porttree"].dbapi._aux_cache.clear()
14861 d["bintree"].dbapi._aux_cache.clear()
14862 d["bintree"].dbapi._clear_cache()
14863 d["vartree"].dbapi.linkmap._clear_cache()
14864 portage.dircache.clear()
14867 def load_emerge_config(trees=None):
14869 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14870 v = os.environ.get(envvar, None)
14871 if v and v.strip():
14873 trees = portage.create_trees(trees=trees, **kwargs)
14875 for root, root_trees in trees.iteritems():
14876 settings = root_trees["vartree"].settings
14877 setconfig = load_default_config(settings, root_trees)
14878 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14880 settings = trees["/"]["vartree"].settings
14882 for myroot in trees:
14884 settings = trees[myroot]["vartree"].settings
14887 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14888 mtimedb = portage.MtimeDB(mtimedbfile)
14890 return settings, trees, mtimedb
14892 def adjust_config(myopts, settings):
14893 """Make emerge specific adjustments to the config."""
14895 # To enhance usability, make some vars case insensitive by forcing them to
14897 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14898 if myvar in settings:
14899 settings[myvar] = settings[myvar].lower()
14900 settings.backup_changes(myvar)
14903 # Kill noauto as it will break merges otherwise.
14904 if "noauto" in settings.features:
14905 while "noauto" in settings.features:
14906 settings.features.remove("noauto")
14907 settings["FEATURES"] = " ".join(settings.features)
14908 settings.backup_changes("FEATURES")
14912 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14913 except ValueError, e:
14914 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14915 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14916 settings["CLEAN_DELAY"], noiselevel=-1)
14917 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14918 settings.backup_changes("CLEAN_DELAY")
14920 EMERGE_WARNING_DELAY = 10
14922 EMERGE_WARNING_DELAY = int(settings.get(
14923 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14924 except ValueError, e:
14925 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14926 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14927 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14928 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14929 settings.backup_changes("EMERGE_WARNING_DELAY")
14931 if "--quiet" in myopts:
14932 settings["PORTAGE_QUIET"]="1"
14933 settings.backup_changes("PORTAGE_QUIET")
14935 if "--verbose" in myopts:
14936 settings["PORTAGE_VERBOSE"] = "1"
14937 settings.backup_changes("PORTAGE_VERBOSE")
14939 # Set so that configs will be merged regardless of remembered status
14940 if ("--noconfmem" in myopts):
14941 settings["NOCONFMEM"]="1"
14942 settings.backup_changes("NOCONFMEM")
14944 # Set various debug markers... They should be merged somehow.
14947 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14948 if PORTAGE_DEBUG not in (0, 1):
14949 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14950 PORTAGE_DEBUG, noiselevel=-1)
14951 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14954 except ValueError, e:
14955 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14956 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14957 settings["PORTAGE_DEBUG"], noiselevel=-1)
14959 if "--debug" in myopts:
14961 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14962 settings.backup_changes("PORTAGE_DEBUG")
14964 if settings.get("NOCOLOR") not in ("yes","true"):
14965 portage.output.havecolor = 1
14967 """The explicit --color < y | n > option overrides the NOCOLOR environment
14968 variable and stdout auto-detection."""
14969 if "--color" in myopts:
14970 if "y" == myopts["--color"]:
14971 portage.output.havecolor = 1
14972 settings["NOCOLOR"] = "false"
14974 portage.output.havecolor = 0
14975 settings["NOCOLOR"] = "true"
14976 settings.backup_changes("NOCOLOR")
14977 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14978 portage.output.havecolor = 0
14979 settings["NOCOLOR"] = "true"
14980 settings.backup_changes("NOCOLOR")
14982 def apply_priorities(settings):
14986 def nice(settings):
14988 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14989 except (OSError, ValueError), e:
14990 out = portage.output.EOutput()
14991 out.eerror("Failed to change nice value to '%s'" % \
14992 settings["PORTAGE_NICENESS"])
14993 out.eerror("%s\n" % str(e))
14995 def ionice(settings):
14997 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14999 ionice_cmd = shlex.split(ionice_cmd)
15003 from portage.util import varexpand
15004 variables = {"PID" : str(os.getpid())}
15005 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15008 rval = portage.process.spawn(cmd, env=os.environ)
15009 except portage.exception.CommandNotFound:
15010 # The OS kernel probably doesn't support ionice,
15011 # so return silently.
15014 if rval != os.EX_OK:
15015 out = portage.output.EOutput()
15016 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15017 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15019 def display_missing_pkg_set(root_config, set_name):
15022 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15023 "The following sets exist:") % \
15024 colorize("INFORM", set_name))
15027 for s in sorted(root_config.sets):
15028 msg.append(" %s" % s)
15031 writemsg_level("".join("%s\n" % l for l in msg),
15032 level=logging.ERROR, noiselevel=-1)
15034 def expand_set_arguments(myfiles, myaction, root_config):
15036 setconfig = root_config.setconfig
15038 sets = setconfig.getSets()
15040 # In order to know exactly which atoms/sets should be added to the
15041 # world file, the depgraph performs set expansion later. It will get
15042 # confused about where the atoms came from if it's not allowed to
15043 # expand them itself.
15044 do_not_expand = (None, )
15047 if a in ("system", "world"):
15048 newargs.append(SETPREFIX+a)
15055 # separators for set arguments
15059 # WARNING: all operators must be of equal length
15061 DIFF_OPERATOR = "-@"
15062 UNION_OPERATOR = "+@"
15064 for i in range(0, len(myfiles)):
15065 if myfiles[i].startswith(SETPREFIX):
15068 x = myfiles[i][len(SETPREFIX):]
15071 start = x.find(ARG_START)
15072 end = x.find(ARG_END)
15073 if start > 0 and start < end:
15074 namepart = x[:start]
15075 argpart = x[start+1:end]
15077 # TODO: implement proper quoting
15078 args = argpart.split(",")
15082 k, v = a.split("=", 1)
15085 options[a] = "True"
15086 setconfig.update(namepart, options)
15087 newset += (x[:start-len(namepart)]+namepart)
15088 x = x[end+len(ARG_END):]
15092 myfiles[i] = SETPREFIX+newset
15094 sets = setconfig.getSets()
15096 # display errors that occured while loading the SetConfig instance
15097 for e in setconfig.errors:
15098 print colorize("BAD", "Error during set creation: %s" % e)
15100 # emerge relies on the existance of sets with names "world" and "system"
15101 required_sets = ("world", "system")
15104 for s in required_sets:
15106 missing_sets.append(s)
15108 if len(missing_sets) > 2:
15109 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15110 missing_sets_str += ', and "%s"' % missing_sets[-1]
15111 elif len(missing_sets) == 2:
15112 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15114 missing_sets_str = '"%s"' % missing_sets[-1]
15115 msg = ["emerge: incomplete set configuration, " + \
15116 "missing set(s): %s" % missing_sets_str]
15118 msg.append(" sets defined: %s" % ", ".join(sets))
15119 msg.append(" This usually means that '%s'" % \
15120 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15121 msg.append(" is missing or corrupt.")
15123 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15125 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15128 if a.startswith(SETPREFIX):
15129 # support simple set operations (intersection, difference and union)
15130 # on the commandline. Expressions are evaluated strictly left-to-right
15131 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15132 expression = a[len(SETPREFIX):]
15135 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15136 is_pos = expression.rfind(IS_OPERATOR)
15137 diff_pos = expression.rfind(DIFF_OPERATOR)
15138 union_pos = expression.rfind(UNION_OPERATOR)
15139 op_pos = max(is_pos, diff_pos, union_pos)
15140 s1 = expression[:op_pos]
15141 s2 = expression[op_pos+len(IS_OPERATOR):]
15142 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15144 display_missing_pkg_set(root_config, s2)
15146 expr_sets.insert(0, s2)
15147 expr_ops.insert(0, op)
15149 if not expression in sets:
15150 display_missing_pkg_set(root_config, expression)
15152 expr_sets.insert(0, expression)
15153 result = set(setconfig.getSetAtoms(expression))
15154 for i in range(0, len(expr_ops)):
15155 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15156 if expr_ops[i] == IS_OPERATOR:
15157 result.intersection_update(s2)
15158 elif expr_ops[i] == DIFF_OPERATOR:
15159 result.difference_update(s2)
15160 elif expr_ops[i] == UNION_OPERATOR:
15163 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15164 newargs.extend(result)
15166 s = a[len(SETPREFIX):]
15168 display_missing_pkg_set(root_config, s)
15170 setconfig.active.append(s)
15172 set_atoms = setconfig.getSetAtoms(s)
15173 except portage.exception.PackageSetNotFound, e:
15174 writemsg_level(("emerge: the given set '%s' " + \
15175 "contains a non-existent set named '%s'.\n") % \
15176 (s, e), level=logging.ERROR, noiselevel=-1)
15178 if myaction in unmerge_actions and \
15179 not sets[s].supportsOperation("unmerge"):
15180 sys.stderr.write("emerge: the given set '%s' does " % s + \
15181 "not support unmerge operations\n")
15183 elif not set_atoms:
15184 print "emerge: '%s' is an empty set" % s
15185 elif myaction not in do_not_expand:
15186 newargs.extend(set_atoms)
15188 newargs.append(SETPREFIX+s)
15189 for e in sets[s].errors:
15193 return (newargs, retval)
15195 def repo_name_check(trees):
15196 missing_repo_names = set()
15197 for root, root_trees in trees.iteritems():
15198 if "porttree" in root_trees:
15199 portdb = root_trees["porttree"].dbapi
15200 missing_repo_names.update(portdb.porttrees)
15201 repos = portdb.getRepositories()
15203 missing_repo_names.discard(portdb.getRepositoryPath(r))
15204 if portdb.porttree_root in missing_repo_names and \
15205 not os.path.exists(os.path.join(
15206 portdb.porttree_root, "profiles")):
15207 # This is normal if $PORTDIR happens to be empty,
15208 # so don't warn about it.
15209 missing_repo_names.remove(portdb.porttree_root)
15211 if missing_repo_names:
15213 msg.append("WARNING: One or more repositories " + \
15214 "have missing repo_name entries:")
15216 for p in missing_repo_names:
15217 msg.append("\t%s/profiles/repo_name" % (p,))
15219 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15220 "should be a plain text file containing a unique " + \
15221 "name for the repository on the first line.", 70))
15222 writemsg_level("".join("%s\n" % l for l in msg),
15223 level=logging.WARNING, noiselevel=-1)
15225 return bool(missing_repo_names)
15227 def config_protect_check(trees):
15228 for root, root_trees in trees.iteritems():
15229 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15230 msg = "!!! CONFIG_PROTECT is empty"
15232 msg += " for '%s'" % root
15233 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15235 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15237 if "--quiet" in myopts:
15238 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15239 print "!!! one of the following fully-qualified ebuild names instead:\n"
15240 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15241 print " " + colorize("INFORM", cp)
15244 s = search(root_config, spinner, "--searchdesc" in myopts,
15245 "--quiet" not in myopts, "--usepkg" in myopts,
15246 "--usepkgonly" in myopts)
15247 null_cp = portage.dep_getkey(insert_category_into_atom(
15249 cat, atom_pn = portage.catsplit(null_cp)
15250 s.searchkey = atom_pn
15251 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15254 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15255 print "!!! one of the above fully-qualified ebuild names instead.\n"
15257 def profile_check(trees, myaction, myopts):
15258 if myaction in ("info", "sync"):
15260 elif "--version" in myopts or "--help" in myopts:
15262 for root, root_trees in trees.iteritems():
15263 if root_trees["root_config"].settings.profiles:
15265 # generate some profile related warning messages
15266 validate_ebuild_environment(trees)
15267 msg = "If you have just changed your profile configuration, you " + \
15268 "should revert back to the previous configuration. Due to " + \
15269 "your current profile being invalid, allowed actions are " + \
15270 "limited to --help, --info, --sync, and --version."
15271 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15272 level=logging.ERROR, noiselevel=-1)
15277 global portage # NFC why this is necessary now - genone
15278 portage._disable_legacy_globals()
15279 # Disable color until we're sure that it should be enabled (after
15280 # EMERGE_DEFAULT_OPTS has been parsed).
15281 portage.output.havecolor = 0
15282 # This first pass is just for options that need to be known as early as
15283 # possible, such as --config-root. They will be parsed again later,
15284 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15285 # the value of --config-root).
15286 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15287 if "--debug" in myopts:
15288 os.environ["PORTAGE_DEBUG"] = "1"
15289 if "--config-root" in myopts:
15290 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15292 # Portage needs to ensure a sane umask for the files it creates.
15294 settings, trees, mtimedb = load_emerge_config()
15295 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15296 rval = profile_check(trees, myaction, myopts)
15297 if rval != os.EX_OK:
15300 if portage._global_updates(trees, mtimedb["updates"]):
15302 # Reload the whole config from scratch.
15303 settings, trees, mtimedb = load_emerge_config(trees=trees)
15304 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15306 xterm_titles = "notitles" not in settings.features
15309 if "--ignore-default-opts" not in myopts:
15310 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15311 tmpcmdline.extend(sys.argv[1:])
15312 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15314 if "--digest" in myopts:
15315 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15316 # Reload the whole config from scratch so that the portdbapi internal
15317 # config is updated with new FEATURES.
15318 settings, trees, mtimedb = load_emerge_config(trees=trees)
15319 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15321 for myroot in trees:
15322 mysettings = trees[myroot]["vartree"].settings
15323 mysettings.unlock()
15324 adjust_config(myopts, mysettings)
15325 if '--pretend' not in myopts and myaction in \
15326 (None, 'clean', 'depclean', 'prune', 'unmerge'):
15327 mysettings["PORTAGE_COUNTER_HASH"] = \
15328 trees[myroot]["vartree"].dbapi._counter_hash()
15329 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15331 del myroot, mysettings
15333 apply_priorities(settings)
15335 spinner = stdout_spinner()
15336 if "candy" in settings.features:
15337 spinner.update = spinner.update_scroll
15339 if "--quiet" not in myopts:
15340 portage.deprecated_profile_check(settings=settings)
15341 repo_name_check(trees)
15342 config_protect_check(trees)
15344 eclasses_overridden = {}
15345 for mytrees in trees.itervalues():
15346 mydb = mytrees["porttree"].dbapi
15347 # Freeze the portdbapi for performance (memoize all xmatch results).
15349 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15352 if eclasses_overridden and \
15353 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15354 prefix = bad(" * ")
15355 if len(eclasses_overridden) == 1:
15356 writemsg(prefix + "Overlay eclass overrides " + \
15357 "eclass from PORTDIR:\n", noiselevel=-1)
15359 writemsg(prefix + "Overlay eclasses override " + \
15360 "eclasses from PORTDIR:\n", noiselevel=-1)
15361 writemsg(prefix + "\n", noiselevel=-1)
15362 for eclass_name in sorted(eclasses_overridden):
15363 writemsg(prefix + " '%s/%s.eclass'\n" % \
15364 (eclasses_overridden[eclass_name], eclass_name),
15366 writemsg(prefix + "\n", noiselevel=-1)
15367 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15368 "because it will trigger invalidation of cached ebuild metadata " + \
15369 "that is distributed with the portage tree. If you must " + \
15370 "override eclasses from PORTDIR then you are advised to add " + \
15371 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15372 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15373 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15374 "you would like to disable this warning."
15375 from textwrap import wrap
15376 for line in wrap(msg, 72):
15377 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15379 if "moo" in myfiles:
15382 Larry loves Gentoo (""" + platform.system() + """)
15384 _______________________
15385 < Have you mooed today? >
15386 -----------------------
15396 ext = os.path.splitext(x)[1]
15397 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15398 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15401 root_config = trees[settings["ROOT"]]["root_config"]
15402 if myaction == "list-sets":
15403 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15407 # only expand sets for actions taking package arguments
15408 oldargs = myfiles[:]
15409 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15410 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15411 if retval != os.EX_OK:
15414 # Need to handle empty sets specially, otherwise emerge will react
15415 # with the help message for empty argument lists
15416 if oldargs and not myfiles:
15417 print "emerge: no targets left after set expansion"
15420 if ("--tree" in myopts) and ("--columns" in myopts):
15421 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15424 if ("--quiet" in myopts):
15425 spinner.update = spinner.update_quiet
15426 portage.util.noiselimit = -1
15428 # Always create packages if FEATURES=buildpkg
15429 # Imply --buildpkg if --buildpkgonly
15430 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15431 if "--buildpkg" not in myopts:
15432 myopts["--buildpkg"] = True
15434 # Always try and fetch binary packages if FEATURES=getbinpkg
15435 if ("getbinpkg" in settings.features):
15436 myopts["--getbinpkg"] = True
15438 if "--buildpkgonly" in myopts:
15439 # --buildpkgonly will not merge anything, so
15440 # it cancels all binary package options.
15441 for opt in ("--getbinpkg", "--getbinpkgonly",
15442 "--usepkg", "--usepkgonly"):
15443 myopts.pop(opt, None)
15445 if "--fetch-all-uri" in myopts:
15446 myopts["--fetchonly"] = True
15448 if "--skipfirst" in myopts and "--resume" not in myopts:
15449 myopts["--resume"] = True
15451 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15452 myopts["--usepkgonly"] = True
15454 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15455 myopts["--getbinpkg"] = True
15457 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15458 myopts["--usepkg"] = True
15460 # Also allow -K to apply --usepkg/-k
15461 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15462 myopts["--usepkg"] = True
15464 # Allow -p to remove --ask
15465 if ("--pretend" in myopts) and ("--ask" in myopts):
15466 print ">>> --pretend disables --ask... removing --ask from options."
15467 del myopts["--ask"]
15469 # forbid --ask when not in a terminal
15470 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15471 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15472 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15476 if settings.get("PORTAGE_DEBUG", "") == "1":
15477 spinner.update = spinner.update_quiet
15479 if "python-trace" in settings.features:
15480 import portage.debug
15481 portage.debug.set_trace(True)
15483 if not ("--quiet" in myopts):
15484 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15485 spinner.update = spinner.update_basic
15487 if myaction == 'version':
15488 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15489 settings.profile_path, settings["CHOST"],
15490 trees[settings["ROOT"]]["vartree"].dbapi)
15492 elif "--help" in myopts:
15493 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15496 if "--debug" in myopts:
15497 print "myaction", myaction
15498 print "myopts", myopts
15500 if not myaction and not myfiles and "--resume" not in myopts:
15501 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15504 pretend = "--pretend" in myopts
15505 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15506 buildpkgonly = "--buildpkgonly" in myopts
15508 # check if root user is the current user for the actions where emerge needs this
15509 if portage.secpass < 2:
15510 # We've already allowed "--version" and "--help" above.
15511 if "--pretend" not in myopts and myaction not in ("search","info"):
15512 need_superuser = not \
15514 (buildpkgonly and secpass >= 1) or \
15515 myaction in ("metadata", "regen") or \
15516 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15517 if portage.secpass < 1 or \
15520 access_desc = "superuser"
15522 access_desc = "portage group"
15523 # Always show portage_group_warning() when only portage group
15524 # access is required but the user is not in the portage group.
15525 from portage.data import portage_group_warning
15526 if "--ask" in myopts:
15527 myopts["--pretend"] = True
15528 del myopts["--ask"]
15529 print ("%s access is required... " + \
15530 "adding --pretend to options.\n") % access_desc
15531 if portage.secpass < 1 and not need_superuser:
15532 portage_group_warning()
15534 sys.stderr.write(("emerge: %s access is " + \
15535 "required.\n\n") % access_desc)
15536 if portage.secpass < 1 and not need_superuser:
15537 portage_group_warning()
15540 disable_emergelog = False
15541 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15543 disable_emergelog = True
15545 if myaction in ("search", "info"):
15546 disable_emergelog = True
15547 if disable_emergelog:
15548 """ Disable emergelog for everything except build or unmerge
15549 operations. This helps minimize parallel emerge.log entries that can
15550 confuse log parsers. We especially want it disabled during
15551 parallel-fetch, which uses --resume --fetchonly."""
15553 def emergelog(*pargs, **kargs):
15556 if not "--pretend" in myopts:
15557 emergelog(xterm_titles, "Started emerge on: "+\
15558 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15561 myelogstr=" ".join(myopts)
15563 myelogstr+=" "+myaction
15565 myelogstr += " " + " ".join(oldargs)
15566 emergelog(xterm_titles, " *** emerge " + myelogstr)
15569 def emergeexitsig(signum, frame):
15570 signal.signal(signal.SIGINT, signal.SIG_IGN)
15571 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15572 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15573 sys.exit(100+signum)
15574 signal.signal(signal.SIGINT, emergeexitsig)
15575 signal.signal(signal.SIGTERM, emergeexitsig)
15578 """This gets out final log message in before we quit."""
15579 if "--pretend" not in myopts:
15580 emergelog(xterm_titles, " *** terminating.")
15581 if "notitles" not in settings.features:
15583 portage.atexit_register(emergeexit)
15585 if myaction in ("config", "metadata", "regen", "sync"):
15586 if "--pretend" in myopts:
15587 sys.stderr.write(("emerge: The '%s' action does " + \
15588 "not support '--pretend'.\n") % myaction)
15591 if "sync" == myaction:
15592 return action_sync(settings, trees, mtimedb, myopts, myaction)
15593 elif "metadata" == myaction:
15594 action_metadata(settings, portdb, myopts)
15595 elif myaction=="regen":
15596 validate_ebuild_environment(trees)
15597 return action_regen(settings, portdb, myopts.get("--jobs"),
15598 myopts.get("--load-average"))
15600 elif "config"==myaction:
15601 validate_ebuild_environment(trees)
15602 action_config(settings, trees, myopts, myfiles)
15605 elif "search"==myaction:
15606 validate_ebuild_environment(trees)
15607 action_search(trees[settings["ROOT"]]["root_config"],
15608 myopts, myfiles, spinner)
15609 elif myaction in ("clean", "unmerge") or \
15610 (myaction == "prune" and "--nodeps" in myopts):
15611 validate_ebuild_environment(trees)
15613 # Ensure atoms are valid before calling unmerge().
15614 # For backward compat, leading '=' is not required.
15616 if is_valid_package_atom(x) or \
15617 is_valid_package_atom("=" + x):
15620 msg.append("'%s' is not a valid package atom." % (x,))
15621 msg.append("Please check ebuild(5) for full details.")
15622 writemsg_level("".join("!!! %s\n" % line for line in msg),
15623 level=logging.ERROR, noiselevel=-1)
15626 # When given a list of atoms, unmerge
15627 # them in the order given.
15628 ordered = myaction == "unmerge"
15629 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15630 mtimedb["ldpath"], ordered=ordered):
15631 if not (buildpkgonly or fetchonly or pretend):
15632 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15634 elif myaction in ("depclean", "info", "prune"):
15636 # Ensure atoms are valid before calling unmerge().
15637 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15640 if is_valid_package_atom(x):
15642 valid_atoms.append(
15643 portage.dep_expand(x, mydb=vardb, settings=settings))
15644 except portage.exception.AmbiguousPackageName, e:
15645 msg = "The short ebuild name \"" + x + \
15646 "\" is ambiguous. Please specify " + \
15647 "one of the following " + \
15648 "fully-qualified ebuild names instead:"
15649 for line in textwrap.wrap(msg, 70):
15650 writemsg_level("!!! %s\n" % (line,),
15651 level=logging.ERROR, noiselevel=-1)
15653 writemsg_level(" %s\n" % colorize("INFORM", i),
15654 level=logging.ERROR, noiselevel=-1)
15655 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15659 msg.append("'%s' is not a valid package atom." % (x,))
15660 msg.append("Please check ebuild(5) for full details.")
15661 writemsg_level("".join("!!! %s\n" % line for line in msg),
15662 level=logging.ERROR, noiselevel=-1)
15665 if myaction == "info":
15666 return action_info(settings, trees, myopts, valid_atoms)
15668 validate_ebuild_environment(trees)
15669 action_depclean(settings, trees, mtimedb["ldpath"],
15670 myopts, myaction, valid_atoms, spinner)
15671 if not (buildpkgonly or fetchonly or pretend):
15672 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15673 # "update", "system", or just process files:
15675 validate_ebuild_environment(trees)
15676 if "--pretend" not in myopts:
15677 display_news_notification(root_config, myopts)
15678 retval = action_build(settings, trees, mtimedb,
15679 myopts, myaction, myfiles, spinner)
15680 root_config = trees[settings["ROOT"]]["root_config"]
15681 post_emerge(root_config, myopts, mtimedb, retval)