2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 from collections import deque
27 from os import path as osp
28 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
35 import portage.xpak, commands, errno, re, socket, time
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37 nc_len, red, teal, turquoise, xtermTitle, \
38 xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
47 portage.dep._dep_check_strict = True
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import cmp_sort_key, writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
61 import cPickle as pickle
66 from cStringIO import StringIO
68 from StringIO import StringIO
70 class stdout_spinner(object):
72 "Gentoo Rocks ("+platform.system()+")",
73 "Thank you for using Gentoo. :)",
74 "Are you actually trying to read this?",
75 "How many times have you stared at this?",
76 "We are generating the cache right now",
77 "You are paying too much attention.",
78 "A theory is better than its explanation.",
79 "Phasers locked on target, Captain.",
80 "Thrashing is just virtual crashing.",
81 "To be is to program.",
82 "Real Users hate Real Programmers.",
83 "When all else fails, read the instructions.",
84 "Functionality breeds Contempt.",
85 "The future lies ahead.",
86 "3.1415926535897932384626433832795028841971694",
87 "Sometimes insanity is the only alternative.",
88 "Inaccuracy saves a world of explanation.",
91 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
95 self.update = self.update_twirl
96 self.scroll_sequence = self.scroll_msgs[
97 int(time.time() * 100) % len(self.scroll_msgs)]
99 self.min_display_latency = 0.05
101 def _return_early(self):
103 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
104 each update* method should return without doing any output when this
107 cur_time = time.time()
108 if cur_time - self.last_update < self.min_display_latency:
110 self.last_update = cur_time
113 def update_basic(self):
114 self.spinpos = (self.spinpos + 1) % 500
115 if self._return_early():
117 if (self.spinpos % 100) == 0:
118 if self.spinpos == 0:
119 sys.stdout.write(". ")
121 sys.stdout.write(".")
124 def update_scroll(self):
125 if self._return_early():
127 if(self.spinpos >= len(self.scroll_sequence)):
128 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
129 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135 def update_twirl(self):
136 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
137 if self._return_early():
139 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142 def update_quiet(self):
145 def userquery(prompt, responses=None, colours=None):
146 """Displays a prompt and a set of responses, then waits for a response
147 which is checked against the responses and the first to match is
148 returned. An empty response will match the first value in responses. The
149 input buffer is *not* cleared prior to the prompt!
152 responses: a List of Strings.
153 colours: a List of Functions taking and returning a String, used to
154 process the responses for display. Typically these will be functions
155 like red() but could be e.g. lambda x: "DisplayString".
156 If responses is omitted, defaults to ["Yes", "No"], [green, red].
157 If only colours is omitted, defaults to [bold, ...].
159 Returns a member of the List responses. (If called without optional
160 arguments, returns "Yes" or "No".)
161 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163 if responses is None:
164 responses = ["Yes", "No"]
166 create_color_func("PROMPT_CHOICE_DEFAULT"),
167 create_color_func("PROMPT_CHOICE_OTHER")
169 elif colours is None:
171 colours=(colours*len(responses))[:len(responses)]
175 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
176 for key in responses:
177 # An empty response will match the first value in responses.
178 if response.upper()==key[:len(response)].upper():
180 print "Sorry, response '%s' not understood." % response,
181 except (EOFError, KeyboardInterrupt):
185 actions = frozenset([
186 "clean", "config", "depclean",
187 "info", "list-sets", "metadata",
188 "prune", "regen", "search",
189 "sync", "unmerge", "version",
192 "--ask", "--alphabetical",
193 "--buildpkg", "--buildpkgonly",
194 "--changelog", "--columns",
199 "--fetchonly", "--fetch-all-uri",
200 "--getbinpkg", "--getbinpkgonly",
201 "--help", "--ignore-default-opts",
204 "--newuse", "--nocolor",
205 "--nodeps", "--noreplace",
206 "--nospinner", "--oneshot",
207 "--onlydeps", "--pretend",
208 "--quiet", "--resume",
209 "--searchdesc", "--selective",
213 "--usepkg", "--usepkgonly",
220 "b":"--buildpkg", "B":"--buildpkgonly",
221 "c":"--clean", "C":"--unmerge",
222 "d":"--debug", "D":"--deep",
224 "f":"--fetchonly", "F":"--fetch-all-uri",
225 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "k":"--usepkg", "K":"--usepkgonly",
229 "n":"--noreplace", "N":"--newuse",
230 "o":"--onlydeps", "O":"--nodeps",
231 "p":"--pretend", "P":"--prune",
233 "s":"--search", "S":"--searchdesc",
236 "v":"--verbose", "V":"--version"
239 def emergelog(xterm_titles, mystr, short_msg=None):
240 if xterm_titles and short_msg:
241 if "HOSTNAME" in os.environ:
242 short_msg = os.environ["HOSTNAME"]+": "+short_msg
243 xtermTitle(short_msg)
245 file_path = "/var/log/emerge.log"
246 mylogfile = open(file_path, "a")
247 portage.util.apply_secpass_permissions(file_path,
248 uid=portage.portage_uid, gid=portage.portage_gid,
252 mylock = portage.locks.lockfile(mylogfile)
253 # seek because we may have gotten held up by the lock.
254 # if so, we may not be positioned at the end of the file.
256 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
260 portage.locks.unlockfile(mylock)
262 except (IOError,OSError,portage.exception.PortageException), e:
264 print >> sys.stderr, "emergelog():",e
266 def countdown(secs=5, doing="Starting"):
268 print ">>> Waiting",secs,"seconds before starting..."
269 print ">>> (Control-C to abort)...\n"+doing+" in: ",
273 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
278 # formats a size given in bytes nicely
279 def format_size(mysize):
280 if isinstance(mysize, basestring):
282 if 0 != mysize % 1024:
283 # Always round up to the next kB so that it doesn't show 0 kB when
284 # some small file still needs to be fetched.
285 mysize += 1024 - mysize % 1024
286 mystr=str(mysize/1024)
290 mystr=mystr[:mycount]+","+mystr[mycount:]
294 def getgccversion(chost):
297 return: the current in-use gcc version
300 gcc_ver_command = 'gcc -dumpversion'
301 gcc_ver_prefix = 'gcc-'
303 gcc_not_found_error = red(
304 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
305 "!!! to update the environment of this terminal and possibly\n" +
306 "!!! other terminals also.\n"
309 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
310 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
311 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313 mystatus, myoutput = commands.getstatusoutput(
314 chost + "-" + gcc_ver_command)
315 if mystatus == os.EX_OK:
316 return gcc_ver_prefix + myoutput
318 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
319 if mystatus == os.EX_OK:
320 return gcc_ver_prefix + myoutput
322 portage.writemsg(gcc_not_found_error, noiselevel=-1)
323 return "[unavailable]"
325 def getportageversion(portdir, target_root, profile, chost, vardb):
326 profilever = "unavailable"
328 realpath = os.path.realpath(profile)
329 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
330 if realpath.startswith(basepath):
331 profilever = realpath[1 + len(basepath):]
334 profilever = "!" + os.readlink(profile)
337 del realpath, basepath
340 libclist = vardb.match("virtual/libc")
341 libclist += vardb.match("virtual/glibc")
342 libclist = portage.util.unique_array(libclist)
344 xs=portage.catpkgsplit(x)
346 libcver+=","+"-".join(xs[1:])
348 libcver="-".join(xs[1:])
350 libcver="unavailable"
352 gccver = getgccversion(chost)
353 unameout=platform.release()+" "+platform.machine()
355 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357 def create_depgraph_params(myopts, myaction):
358 #configure emerge engine parameters
360 # self: include _this_ package regardless of if it is merged.
361 # selective: exclude the package if it is merged
362 # recurse: go into the dependencies
363 # deep: go into the dependencies of already merged packages
364 # empty: pretend nothing is merged
365 # complete: completely account for all known dependencies
366 # remove: build graph for use in removing packages
367 myparams = set(["recurse"])
369 if myaction == "remove":
370 myparams.add("remove")
371 myparams.add("complete")
374 if "--update" in myopts or \
375 "--newuse" in myopts or \
376 "--reinstall" in myopts or \
377 "--noreplace" in myopts:
378 myparams.add("selective")
379 if "--emptytree" in myopts:
380 myparams.add("empty")
381 myparams.discard("selective")
382 if "--nodeps" in myopts:
383 myparams.discard("recurse")
384 if "--deep" in myopts:
386 if "--complete-graph" in myopts:
387 myparams.add("complete")
390 # search functionality
391 class search(object):
402 def __init__(self, root_config, spinner, searchdesc,
403 verbose, usepkg, usepkgonly):
404 """Searches the available and installed packages for the supplied search key.
405 The list of available and installed packages is created at object instantiation.
406 This makes successive searches faster."""
407 self.settings = root_config.settings
408 self.vartree = root_config.trees["vartree"]
409 self.spinner = spinner
410 self.verbose = verbose
411 self.searchdesc = searchdesc
412 self.root_config = root_config
413 self.setconfig = root_config.setconfig
414 self.matches = {"pkg" : []}
419 self.portdb = fake_portdb
420 for attrib in ("aux_get", "cp_all",
421 "xmatch", "findname", "getFetchMap"):
422 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
426 portdb = root_config.trees["porttree"].dbapi
427 bindb = root_config.trees["bintree"].dbapi
428 vardb = root_config.trees["vartree"].dbapi
430 if not usepkgonly and portdb._have_root_eclass_dir:
431 self._dbs.append(portdb)
433 if (usepkg or usepkgonly) and bindb.cp_all():
434 self._dbs.append(bindb)
436 self._dbs.append(vardb)
437 self._portdb = portdb
442 cp_all.update(db.cp_all())
443 return list(sorted(cp_all))
445 def _aux_get(self, *args, **kwargs):
448 return db.aux_get(*args, **kwargs)
453 def _findname(self, *args, **kwargs):
455 if db is not self._portdb:
456 # We don't want findname to return anything
457 # unless it's an ebuild in a portage tree.
458 # Otherwise, it's already built and we don't
461 func = getattr(db, "findname", None)
463 value = func(*args, **kwargs)
468 def _getFetchMap(self, *args, **kwargs):
470 func = getattr(db, "getFetchMap", None)
472 value = func(*args, **kwargs)
477 def _visible(self, db, cpv, metadata):
478 installed = db is self.vartree.dbapi
479 built = installed or db is not self._portdb
482 pkg_type = "installed"
485 return visible(self.settings,
486 Package(type_name=pkg_type, root_config=self.root_config,
487 cpv=cpv, built=built, installed=installed, metadata=metadata))
489 def _xmatch(self, level, atom):
491 This method does not expand old-style virtuals because it
492 is restricted to returning matches for a single ${CATEGORY}/${PN}
493 and old-style virual matches unreliable for that when querying
494 multiple package databases. If necessary, old-style virtuals
495 can be performed on atoms prior to calling this method.
497 cp = portage.dep_getkey(atom)
498 if level == "match-all":
501 if hasattr(db, "xmatch"):
502 matches.update(db.xmatch(level, atom))
504 matches.update(db.match(atom))
505 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
506 db._cpv_sort_ascending(result)
507 elif level == "match-visible":
510 if hasattr(db, "xmatch"):
511 matches.update(db.xmatch(level, atom))
513 db_keys = list(db._aux_cache_keys)
514 for cpv in db.match(atom):
515 metadata = izip(db_keys,
516 db.aux_get(cpv, db_keys))
517 if not self._visible(db, cpv, metadata):
520 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521 db._cpv_sort_ascending(result)
522 elif level == "bestmatch-visible":
525 if hasattr(db, "xmatch"):
526 cpv = db.xmatch("bestmatch-visible", atom)
527 if not cpv or portage.cpv_getkey(cpv) != cp:
529 if not result or cpv == portage.best([cpv, result]):
532 db_keys = Package.metadata_keys
533 # break out of this loop with highest visible
534 # match, checked in descending order
535 for cpv in reversed(db.match(atom)):
536 if portage.cpv_getkey(cpv) != cp:
538 metadata = izip(db_keys,
539 db.aux_get(cpv, db_keys))
540 if not self._visible(db, cpv, metadata):
542 if not result or cpv == portage.best([cpv, result]):
546 raise NotImplementedError(level)
549 def execute(self,searchkey):
550 """Performs the search for the supplied search key"""
552 self.searchkey=searchkey
553 self.packagematches = []
556 self.matches = {"pkg":[], "desc":[], "set":[]}
559 self.matches = {"pkg":[], "set":[]}
560 print "Searching... ",
563 if self.searchkey.startswith('%'):
565 self.searchkey = self.searchkey[1:]
566 if self.searchkey.startswith('@'):
568 self.searchkey = self.searchkey[1:]
570 self.searchre=re.compile(self.searchkey,re.I)
572 self.searchre=re.compile(re.escape(self.searchkey), re.I)
573 for package in self.portdb.cp_all():
574 self.spinner.update()
577 match_string = package[:]
579 match_string = package.split("/")[-1]
582 if self.searchre.search(match_string):
583 if not self.portdb.xmatch("match-visible", package):
585 self.matches["pkg"].append([package,masked])
586 elif self.searchdesc: # DESCRIPTION searching
587 full_package = self.portdb.xmatch("bestmatch-visible", package)
589 #no match found; we don't want to query description
590 full_package = portage.best(
591 self.portdb.xmatch("match-all", package))
597 full_desc = self.portdb.aux_get(
598 full_package, ["DESCRIPTION"])[0]
600 print "emerge: search: aux_get() failed, skipping"
602 if self.searchre.search(full_desc):
603 self.matches["desc"].append([full_package,masked])
605 self.sdict = self.setconfig.getSets()
606 for setname in self.sdict:
607 self.spinner.update()
609 match_string = setname
611 match_string = setname.split("/")[-1]
613 if self.searchre.search(match_string):
614 self.matches["set"].append([setname, False])
615 elif self.searchdesc:
616 if self.searchre.search(
617 self.sdict[setname].getMetadata("DESCRIPTION")):
618 self.matches["set"].append([setname, False])
621 for mtype in self.matches:
622 self.matches[mtype].sort()
623 self.mlen += len(self.matches[mtype])
626 if not self.portdb.xmatch("match-all", cp):
629 if not self.portdb.xmatch("bestmatch-visible", cp):
631 self.matches["pkg"].append([cp, masked])
635 """Outputs the results of the search."""
636 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
637 print "[ Applications found : "+white(str(self.mlen))+" ]"
639 vardb = self.vartree.dbapi
640 for mtype in self.matches:
641 for match,masked in self.matches[mtype]:
645 full_package = self.portdb.xmatch(
646 "bestmatch-visible", match)
648 #no match found; we don't want to query description
650 full_package = portage.best(
651 self.portdb.xmatch("match-all",match))
652 elif mtype == "desc":
654 match = portage.cpv_getkey(match)
656 print green("*")+" "+white(match)
657 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
661 desc, homepage, license = self.portdb.aux_get(
662 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664 print "emerge: search: aux_get() failed, skipping"
667 print green("*")+" "+white(match)+" "+red("[ Masked ]")
669 print green("*")+" "+white(match)
670 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
674 mycat = match.split("/")[0]
675 mypkg = match.split("/")[1]
676 mycpv = match + "-" + myversion
677 myebuild = self.portdb.findname(mycpv)
679 pkgdir = os.path.dirname(myebuild)
680 from portage import manifest
681 mf = manifest.Manifest(
682 pkgdir, self.settings["DISTDIR"])
684 uri_map = self.portdb.getFetchMap(mycpv)
685 except portage.exception.InvalidDependString, e:
686 file_size_str = "Unknown (%s)" % (e,)
690 mysum[0] = mf.getDistfilesSize(uri_map)
692 file_size_str = "Unknown (missing " + \
693 "digest for %s)" % (e,)
698 if db is not vardb and \
699 db.cpv_exists(mycpv):
701 if not myebuild and hasattr(db, "bintree"):
702 myebuild = db.bintree.getname(mycpv)
704 mysum[0] = os.stat(myebuild).st_size
709 if myebuild and file_size_str is None:
710 mystr = str(mysum[0] / 1024)
714 mystr = mystr[:mycount] + "," + mystr[mycount:]
715 file_size_str = mystr + " kB"
719 print " ", darkgreen("Latest version available:"),myversion
720 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
723 (darkgreen("Size of files:"), file_size_str)
724 print " ", darkgreen("Homepage:")+" ",homepage
725 print " ", darkgreen("Description:")+" ",desc
726 print " ", darkgreen("License:")+" ",license
731 def getInstallationStatus(self,package):
732 installed_package = self.vartree.dep_bestmatch(package)
734 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736 result = darkgreen("Latest version installed:")+" "+version
738 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741 def getVersion(self,full_package,detail):
742 if len(full_package) > 1:
743 package_parts = portage.catpkgsplit(full_package)
744 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
745 result = package_parts[2]+ "-" + package_parts[3]
747 result = package_parts[2]
752 class RootConfig(object):
753 """This is used internally by depgraph to track information about a
757 "ebuild" : "porttree",
758 "binary" : "bintree",
759 "installed" : "vartree"
763 for k, v in pkg_tree_map.iteritems():
766 def __init__(self, settings, trees, setconfig):
768 self.settings = settings
769 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
770 self.root = self.settings["ROOT"]
771 self.setconfig = setconfig
772 self.sets = self.setconfig.getSets()
773 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775 def create_world_atom(pkg, args_set, root_config):
776 """Create a new atom for the world file if one does not exist. If the
777 argument atom is precise enough to identify a specific slot then a slot
778 atom will be returned. Atoms that are in the system set may also be stored
779 in world since system atoms can only match one slot while world atoms can
780 be greedy with respect to slots. Unslotted system packages will not be
783 arg_atom = args_set.findAtomForPackage(pkg)
786 cp = portage.dep_getkey(arg_atom)
788 sets = root_config.sets
789 portdb = root_config.trees["porttree"].dbapi
790 vardb = root_config.trees["vartree"].dbapi
791 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
792 for cpv in portdb.match(cp))
793 slotted = len(available_slots) > 1 or \
794 (len(available_slots) == 1 and "0" not in available_slots)
796 # check the vdb in case this is multislot
797 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
798 for cpv in vardb.match(cp))
799 slotted = len(available_slots) > 1 or \
800 (len(available_slots) == 1 and "0" not in available_slots)
801 if slotted and arg_atom != cp:
802 # If the user gave a specific atom, store it as a
803 # slot atom in the world file.
804 slot_atom = pkg.slot_atom
806 # For USE=multislot, there are a couple of cases to
809 # 1) SLOT="0", but the real SLOT spontaneously changed to some
810 # unknown value, so just record an unslotted atom.
812 # 2) SLOT comes from an installed package and there is no
813 # matching SLOT in the portage tree.
815 # Make sure that the slot atom is available in either the
816 # portdb or the vardb, since otherwise the user certainly
817 # doesn't want the SLOT atom recorded in the world file
818 # (case 1 above). If it's only available in the vardb,
819 # the user may be trying to prevent a USE=multislot
820 # package from being removed by --depclean (case 2 above).
823 if not portdb.match(slot_atom):
824 # SLOT seems to come from an installed multislot package
826 # If there is no installed package matching the SLOT atom,
827 # it probably changed SLOT spontaneously due to USE=multislot,
828 # so just record an unslotted atom.
829 if vardb.match(slot_atom):
830 # Now verify that the argument is precise
831 # enough to identify a specific slot.
832 matches = mydb.match(arg_atom)
833 matched_slots = set()
835 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
836 if len(matched_slots) == 1:
837 new_world_atom = slot_atom
839 if new_world_atom == sets["world"].findAtomForPackage(pkg):
840 # Both atoms would be identical, so there's nothing to add.
843 # Unlike world atoms, system atoms are not greedy for slots, so they
844 # can't be safely excluded from world if they are slotted.
845 system_atom = sets["system"].findAtomForPackage(pkg)
847 if not portage.dep_getkey(system_atom).startswith("virtual/"):
849 # System virtuals aren't safe to exclude from world since they can
850 # match multiple old-style virtuals but only one of them will be
851 # pulled in by update or depclean.
852 providers = portdb.mysettings.getvirtuals().get(
853 portage.dep_getkey(system_atom))
854 if providers and len(providers) == 1 and providers[0] == cp:
856 return new_world_atom
858 def filter_iuse_defaults(iuse):
860 if flag.startswith("+") or flag.startswith("-"):
865 class SlotObject(object):
866 __slots__ = ("__weakref__",)
868 def __init__(self, **kwargs):
869 classes = [self.__class__]
874 classes.extend(c.__bases__)
875 slots = getattr(c, "__slots__", None)
879 myvalue = kwargs.get(myattr, None)
880 setattr(self, myattr, myvalue)
884 Create a new instance and copy all attributes
885 defined from __slots__ (including those from
888 obj = self.__class__()
890 classes = [self.__class__]
895 classes.extend(c.__bases__)
896 slots = getattr(c, "__slots__", None)
900 setattr(obj, myattr, getattr(self, myattr))
904 class AbstractDepPriority(SlotObject):
905 __slots__ = ("buildtime", "runtime", "runtime_post")
907 def __lt__(self, other):
908 return self.__int__() < other
910 def __le__(self, other):
911 return self.__int__() <= other
913 def __eq__(self, other):
914 return self.__int__() == other
916 def __ne__(self, other):
917 return self.__int__() != other
919 def __gt__(self, other):
920 return self.__int__() > other
922 def __ge__(self, other):
923 return self.__int__() >= other
927 return copy.copy(self)
929 class DepPriority(AbstractDepPriority):
931 __slots__ = ("satisfied", "optional", "rebuild")
943 if self.runtime_post:
944 return "runtime_post"
947 class BlockerDepPriority(DepPriority):
955 BlockerDepPriority.instance = BlockerDepPriority()
957 class UnmergeDepPriority(AbstractDepPriority):
958 __slots__ = ("optional", "satisfied",)
960 Combination of properties Priority Category
965 (none of the above) -2 SOFT
975 if self.runtime_post:
982 myvalue = self.__int__()
983 if myvalue > self.SOFT:
987 class DepPriorityNormalRange(object):
989 DepPriority properties Index Category
993 runtime_post 2 MEDIUM_SOFT
995 (none of the above) 0 NONE
1003 def _ignore_optional(cls, priority):
1004 if priority.__class__ is not DepPriority:
1006 return bool(priority.optional)
1009 def _ignore_runtime_post(cls, priority):
1010 if priority.__class__ is not DepPriority:
1012 return bool(priority.optional or priority.runtime_post)
1015 def _ignore_runtime(cls, priority):
1016 if priority.__class__ is not DepPriority:
1018 return not priority.buildtime
1020 ignore_medium = _ignore_runtime
1021 ignore_medium_soft = _ignore_runtime_post
1022 ignore_soft = _ignore_optional
1024 DepPriorityNormalRange.ignore_priority = (
1026 DepPriorityNormalRange._ignore_optional,
1027 DepPriorityNormalRange._ignore_runtime_post,
1028 DepPriorityNormalRange._ignore_runtime
1031 class DepPrioritySatisfiedRange(object):
1033 DepPriority Index Category
1035 not satisfied and buildtime HARD
1036 not satisfied and runtime 7 MEDIUM
1037 not satisfied and runtime_post 6 MEDIUM_SOFT
1038 satisfied and buildtime and rebuild 5 SOFT
1039 satisfied and buildtime 4 SOFT
1040 satisfied and runtime 3 SOFT
1041 satisfied and runtime_post 2 SOFT
1043 (none of the above) 0 NONE
1051 def _ignore_optional(cls, priority):
1052 if priority.__class__ is not DepPriority:
1054 return bool(priority.optional)
1057 def _ignore_satisfied_runtime_post(cls, priority):
1058 if priority.__class__ is not DepPriority:
1060 if priority.optional:
1062 if not priority.satisfied:
1064 return bool(priority.runtime_post)
1067 def _ignore_satisfied_runtime(cls, priority):
1068 if priority.__class__ is not DepPriority:
1070 if priority.optional:
1072 if not priority.satisfied:
1074 return not priority.buildtime
1077 def _ignore_satisfied_buildtime(cls, priority):
1078 if priority.__class__ is not DepPriority:
1080 if priority.optional:
1082 if not priority.satisfied:
1084 if priority.buildtime:
1085 return not priority.rebuild
1089 def _ignore_satisfied_buildtime_rebuild(cls, priority):
1090 if priority.__class__ is not DepPriority:
1092 if priority.optional:
1094 return bool(priority.satisfied)
1097 def _ignore_runtime_post(cls, priority):
1098 if priority.__class__ is not DepPriority:
1100 return bool(priority.optional or \
1101 priority.satisfied or \
1102 priority.runtime_post)
1105 def _ignore_runtime(cls, priority):
1106 if priority.__class__ is not DepPriority:
1108 return bool(priority.satisfied or \
1109 not priority.buildtime)
1111 ignore_medium = _ignore_runtime
1112 ignore_medium_soft = _ignore_runtime_post
1113 ignore_soft = _ignore_satisfied_buildtime_rebuild
1115 DepPrioritySatisfiedRange.ignore_priority = (
1117 DepPrioritySatisfiedRange._ignore_optional,
1118 DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1119 DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1120 DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1121 DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1122 DepPrioritySatisfiedRange._ignore_runtime_post,
1123 DepPrioritySatisfiedRange._ignore_runtime
1126 def _find_deep_system_runtime_deps(graph):
1127 deep_system_deps = set()
1130 if not isinstance(node, Package) or \
1131 node.operation == 'uninstall':
1133 if node.root_config.sets['system'].findAtomForPackage(node):
1134 node_stack.append(node)
1136 def ignore_priority(priority):
1138 Ignore non-runtime priorities.
1140 if isinstance(priority, DepPriority) and \
1141 (priority.runtime or priority.runtime_post):
1146 node = node_stack.pop()
1147 if node in deep_system_deps:
1149 deep_system_deps.add(node)
1150 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1151 if not isinstance(child, Package) or \
1152 child.operation == 'uninstall':
1154 node_stack.append(child)
1156 return deep_system_deps
1158 class FakeVartree(portage.vartree):
1159 """This is implements an in-memory copy of a vartree instance that provides
1160 all the interfaces required for use by the depgraph. The vardb is locked
1161 during the constructor call just long enough to read a copy of the
1162 installed package information. This allows the depgraph to do it's
1163 dependency calculations without holding a lock on the vardb. It also
1164 allows things like vardb global updates to be done in memory so that the
1165 user doesn't necessarily need write access to the vardb in cases where
1166 global updates are necessary (updates are performed when necessary if there
1167 is not a matching ebuild in the tree)."""
1168 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1169 self._root_config = root_config
1170 if pkg_cache is None:
1172 real_vartree = root_config.trees["vartree"]
1173 portdb = root_config.trees["porttree"].dbapi
1174 self.root = real_vartree.root
1175 self.settings = real_vartree.settings
1176 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1177 if "_mtime_" not in mykeys:
1178 mykeys.append("_mtime_")
1179 self._db_keys = mykeys
1180 self._pkg_cache = pkg_cache
1181 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1182 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184 # At least the parent needs to exist for the lock file.
1185 portage.util.ensure_dirs(vdb_path)
1186 except portage.exception.PortageException:
1190 if acquire_lock and os.access(vdb_path, os.W_OK):
1191 vdb_lock = portage.locks.lockdir(vdb_path)
1192 real_dbapi = real_vartree.dbapi
1194 for cpv in real_dbapi.cpv_all():
1195 cache_key = ("installed", self.root, cpv, "nomerge")
1196 pkg = self._pkg_cache.get(cache_key)
1198 metadata = pkg.metadata
1200 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1201 myslot = metadata["SLOT"]
1202 mycp = portage.dep_getkey(cpv)
1203 myslot_atom = "%s:%s" % (mycp, myslot)
1205 mycounter = long(metadata["COUNTER"])
1208 metadata["COUNTER"] = str(mycounter)
1209 other_counter = slot_counters.get(myslot_atom, None)
1210 if other_counter is not None:
1211 if other_counter > mycounter:
1213 slot_counters[myslot_atom] = mycounter
1215 pkg = Package(built=True, cpv=cpv,
1216 installed=True, metadata=metadata,
1217 root_config=root_config, type_name="installed")
1218 self._pkg_cache[pkg] = pkg
1219 self.dbapi.cpv_inject(pkg)
1220 real_dbapi.flush_cache()
1223 portage.locks.unlockdir(vdb_lock)
1224 # Populate the old-style virtuals using the cached values.
1225 if not self.settings.treeVirtuals:
1226 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1227 portage.getCPFromCPV, self.get_all_provides())
1229 # Intialize variables needed for lazy cache pulls of the live ebuild
1230 # metadata. This ensures that the vardb lock is released ASAP, without
1231 # being delayed in case cache generation is triggered.
1232 self._aux_get = self.dbapi.aux_get
1233 self.dbapi.aux_get = self._aux_get_wrapper
1234 self._match = self.dbapi.match
1235 self.dbapi.match = self._match_wrapper
1236 self._aux_get_history = set()
1237 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1238 self._portdb = portdb
1239 self._global_updates = None
1241 def _match_wrapper(self, cpv, use_cache=1):
1243 Make sure the metadata in Package instances gets updated for any
1244 cpv that is returned from a match() call, since the metadata can
1245 be accessed directly from the Package instance instead of via
1248 matches = self._match(cpv, use_cache=use_cache)
1250 if cpv in self._aux_get_history:
1252 self._aux_get_wrapper(cpv, [])
1255 def _aux_get_wrapper(self, pkg, wants):
1256 if pkg in self._aux_get_history:
1257 return self._aux_get(pkg, wants)
1258 self._aux_get_history.add(pkg)
1260 # Use the live ebuild metadata if possible.
1261 live_metadata = dict(izip(self._portdb_keys,
1262 self._portdb.aux_get(pkg, self._portdb_keys)))
1263 if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265 self.dbapi.aux_update(pkg, live_metadata)
1266 except (KeyError, portage.exception.PortageException):
1267 if self._global_updates is None:
1268 self._global_updates = \
1269 grab_global_updates(self._portdb.porttree_root)
1270 perform_global_updates(
1271 pkg, self.dbapi, self._global_updates)
1272 return self._aux_get(pkg, wants)
1274 def sync(self, acquire_lock=1):
1276 Call this method to synchronize state with the real vardb
1277 after one or more packages may have been installed or
1280 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282 # At least the parent needs to exist for the lock file.
1283 portage.util.ensure_dirs(vdb_path)
1284 except portage.exception.PortageException:
1288 if acquire_lock and os.access(vdb_path, os.W_OK):
1289 vdb_lock = portage.locks.lockdir(vdb_path)
1293 portage.locks.unlockdir(vdb_lock)
1297 real_vardb = self._root_config.trees["vartree"].dbapi
1298 current_cpv_set = frozenset(real_vardb.cpv_all())
1299 pkg_vardb = self.dbapi
1300 aux_get_history = self._aux_get_history
1302 # Remove any packages that have been uninstalled.
1303 for pkg in list(pkg_vardb):
1304 if pkg.cpv not in current_cpv_set:
1305 pkg_vardb.cpv_remove(pkg)
1306 aux_get_history.discard(pkg.cpv)
1308 # Validate counters and timestamps.
1311 validation_keys = ["COUNTER", "_mtime_"]
1312 for cpv in current_cpv_set:
1314 pkg_hash_key = ("installed", root, cpv, "nomerge")
1315 pkg = pkg_vardb.get(pkg_hash_key)
1317 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319 counter = long(counter)
1323 if counter != pkg.counter or \
1325 pkg_vardb.cpv_remove(pkg)
1326 aux_get_history.discard(pkg.cpv)
1330 pkg = self._pkg(cpv)
1332 other_counter = slot_counters.get(pkg.slot_atom)
1333 if other_counter is not None:
1334 if other_counter > pkg.counter:
1337 slot_counters[pkg.slot_atom] = pkg.counter
1338 pkg_vardb.cpv_inject(pkg)
1340 real_vardb.flush_cache()
1342 def _pkg(self, cpv):
1343 root_config = self._root_config
1344 real_vardb = root_config.trees["vartree"].dbapi
1345 pkg = Package(cpv=cpv, installed=True,
1346 metadata=izip(self._db_keys,
1347 real_vardb.aux_get(cpv, self._db_keys)),
1348 root_config=root_config,
1349 type_name="installed")
1352 mycounter = long(pkg.metadata["COUNTER"])
1355 pkg.metadata["COUNTER"] = str(mycounter)
1359 def grab_global_updates(portdir):
1360 from portage.update import grab_updates, parse_updates
1361 updpath = os.path.join(portdir, "profiles", "updates")
1363 rawupdates = grab_updates(updpath)
1364 except portage.exception.DirectoryNotFound:
1367 for mykey, mystat, mycontent in rawupdates:
1368 commands, errors = parse_updates(mycontent)
1369 upd_commands.extend(commands)
1372 def perform_global_updates(mycpv, mydb, mycommands):
1373 from portage.update import update_dbentries
1374 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1375 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1376 updates = update_dbentries(mycommands, aux_dict)
1378 mydb.aux_update(mycpv, updates)
1380 def visible(pkgsettings, pkg):
1382 Check if a package is visible. This can raise an InvalidDependString
1383 exception if LICENSE is invalid.
1384 TODO: optionally generate a list of masking reasons
1386 @returns: True if the package is visible, False otherwise.
1388 if not pkg.metadata["SLOT"]:
1390 if not pkg.installed:
1391 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1393 eapi = pkg.metadata["EAPI"]
1394 if not portage.eapi_is_supported(eapi):
1396 if not pkg.installed:
1397 if portage._eapi_is_deprecated(eapi):
1399 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1406 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408 except portage.exception.InvalidDependString:
1412 def get_masking_status(pkg, pkgsettings, root_config):
1414 mreasons = portage.getmaskingstatus(
1415 pkg, settings=pkgsettings,
1416 portdb=root_config.trees["porttree"].dbapi)
1418 if not pkg.installed:
1419 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1420 mreasons.append("CHOST: %s" % \
1421 pkg.metadata["CHOST"])
1423 if not pkg.metadata["SLOT"]:
1424 mreasons.append("invalid: SLOT is undefined")
1428 def get_mask_info(root_config, cpv, pkgsettings,
1429 db, pkg_type, built, installed, db_keys):
1432 metadata = dict(izip(db_keys,
1433 db.aux_get(cpv, db_keys)))
1436 if metadata and not built:
1437 pkgsettings.setcpv(cpv, mydb=metadata)
1438 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1439 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1440 if metadata is None:
1441 mreasons = ["corruption"]
1443 pkg = Package(type_name=pkg_type, root_config=root_config,
1444 cpv=cpv, built=built, installed=installed, metadata=metadata)
1445 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1446 return metadata, mreasons
1448 def show_masked_packages(masked_packages):
1449 shown_licenses = set()
1450 shown_comments = set()
1451 # Maybe there is both an ebuild and a binary. Only
1452 # show one of them to avoid redundant appearance.
1454 have_eapi_mask = False
1455 for (root_config, pkgsettings, cpv,
1456 metadata, mreasons) in masked_packages:
1457 if cpv in shown_cpvs:
1460 comment, filename = None, None
1461 if "package.mask" in mreasons:
1462 comment, filename = \
1463 portage.getmaskingreason(
1464 cpv, metadata=metadata,
1465 settings=pkgsettings,
1466 portdb=root_config.trees["porttree"].dbapi,
1467 return_location=True)
1468 missing_licenses = []
1470 if not portage.eapi_is_supported(metadata["EAPI"]):
1471 have_eapi_mask = True
1473 missing_licenses = \
1474 pkgsettings._getMissingLicenses(
1476 except portage.exception.InvalidDependString:
1477 # This will have already been reported
1478 # above via mreasons.
1481 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1482 if comment and comment not in shown_comments:
1485 shown_comments.add(comment)
1486 portdb = root_config.trees["porttree"].dbapi
1487 for l in missing_licenses:
1488 l_path = portdb.findLicensePath(l)
1489 if l in shown_licenses:
1491 msg = ("A copy of the '%s' license" + \
1492 " is located at '%s'.") % (l, l_path)
1495 shown_licenses.add(l)
1496 return have_eapi_mask
1498 class Task(SlotObject):
1499 __slots__ = ("_hash_key", "_hash_value")
1501 def _get_hash_key(self):
1502 hash_key = getattr(self, "_hash_key", None)
1503 if hash_key is None:
1504 raise NotImplementedError(self)
1507 def __eq__(self, other):
1508 return self._get_hash_key() == other
1510 def __ne__(self, other):
1511 return self._get_hash_key() != other
1514 hash_value = getattr(self, "_hash_value", None)
1515 if hash_value is None:
1516 self._hash_value = hash(self._get_hash_key())
1517 return self._hash_value
1520 return len(self._get_hash_key())
1522 def __getitem__(self, key):
1523 return self._get_hash_key()[key]
1526 return iter(self._get_hash_key())
1528 def __contains__(self, key):
1529 return key in self._get_hash_key()
1532 return str(self._get_hash_key())
1534 class Blocker(Task):
1536 __hash__ = Task.__hash__
1537 __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1539 def __init__(self, **kwargs):
1540 Task.__init__(self, **kwargs)
1541 self.cp = portage.dep_getkey(self.atom)
1543 def _get_hash_key(self):
1544 hash_key = getattr(self, "_hash_key", None)
1545 if hash_key is None:
1547 ("blocks", self.root, self.atom, self.eapi)
1548 return self._hash_key
1550 class Package(Task):
1552 __hash__ = Task.__hash__
1553 __slots__ = ("built", "cpv", "depth",
1554 "installed", "metadata", "onlydeps", "operation",
1555 "root_config", "type_name",
1556 "category", "counter", "cp", "cpv_split",
1557 "inherited", "iuse", "mtime",
1558 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1561 "CHOST", "COUNTER", "DEPEND", "EAPI",
1562 "INHERITED", "IUSE", "KEYWORDS",
1563 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1564 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1566 def __init__(self, **kwargs):
1567 Task.__init__(self, **kwargs)
1568 self.root = self.root_config.root
1569 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1570 self.cp = portage.cpv_getkey(self.cpv)
1571 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1572 self.category, self.pf = portage.catsplit(self.cpv)
1573 self.cpv_split = portage.catpkgsplit(self.cpv)
1574 self.pv_split = self.cpv_split[1:]
1578 __slots__ = ("__weakref__", "enabled")
1580 def __init__(self, use):
1581 self.enabled = frozenset(use)
1583 class _iuse(object):
1585 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1587 def __init__(self, tokens, iuse_implicit):
1588 self.tokens = tuple(tokens)
1589 self.iuse_implicit = iuse_implicit
1596 enabled.append(x[1:])
1598 disabled.append(x[1:])
1601 self.enabled = frozenset(enabled)
1602 self.disabled = frozenset(disabled)
1603 self.all = frozenset(chain(enabled, disabled, other))
1605 def __getattribute__(self, name):
1608 return object.__getattribute__(self, "regex")
1609 except AttributeError:
1610 all = object.__getattribute__(self, "all")
1611 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1612 # Escape anything except ".*" which is supposed
1613 # to pass through from _get_implicit_iuse()
1614 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1615 regex = "^(%s)$" % "|".join(regex)
1616 regex = regex.replace("\\.\\*", ".*")
1617 self.regex = re.compile(regex)
1618 return object.__getattribute__(self, name)
1620 def _get_hash_key(self):
1621 hash_key = getattr(self, "_hash_key", None)
1622 if hash_key is None:
1623 if self.operation is None:
1624 self.operation = "merge"
1625 if self.onlydeps or self.installed:
1626 self.operation = "nomerge"
1628 (self.type_name, self.root, self.cpv, self.operation)
1629 return self._hash_key
1631 def __lt__(self, other):
1632 if other.cp != self.cp:
1634 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1638 def __le__(self, other):
1639 if other.cp != self.cp:
1641 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1645 def __gt__(self, other):
1646 if other.cp != self.cp:
1648 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1652 def __ge__(self, other):
1653 if other.cp != self.cp:
1655 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1659 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1660 if not x.startswith("UNUSED_"))
1661 _all_metadata_keys.discard("CDEPEND")
1662 _all_metadata_keys.update(Package.metadata_keys)
1664 from portage.cache.mappings import slot_dict_class
1665 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1667 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1669 Detect metadata updates and synchronize Package attributes.
1672 __slots__ = ("_pkg",)
1673 _wrapped_keys = frozenset(
1674 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1676 def __init__(self, pkg, metadata):
1677 _PackageMetadataWrapperBase.__init__(self)
1679 self.update(metadata)
1681 def __setitem__(self, k, v):
1682 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1683 if k in self._wrapped_keys:
1684 getattr(self, "_set_" + k.lower())(k, v)
1686 def _set_inherited(self, k, v):
1687 if isinstance(v, basestring):
1688 v = frozenset(v.split())
1689 self._pkg.inherited = v
1691 def _set_iuse(self, k, v):
1692 self._pkg.iuse = self._pkg._iuse(
1693 v.split(), self._pkg.root_config.iuse_implicit)
1695 def _set_slot(self, k, v):
1698 def _set_use(self, k, v):
1699 self._pkg.use = self._pkg._use(v.split())
1701 def _set_counter(self, k, v):
1702 if isinstance(v, basestring):
1707 self._pkg.counter = v
1709 def _set__mtime_(self, k, v):
1710 if isinstance(v, basestring):
1717 class EbuildFetchonly(SlotObject):
1719 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1722 settings = self.settings
1724 portdb = pkg.root_config.trees["porttree"].dbapi
1725 ebuild_path = portdb.findname(pkg.cpv)
1726 settings.setcpv(pkg)
1727 debug = settings.get("PORTAGE_DEBUG") == "1"
1728 use_cache = 1 # always true
1729 portage.doebuild_environment(ebuild_path, "fetch",
1730 settings["ROOT"], settings, debug, use_cache, portdb)
1731 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1734 rval = self._execute_with_builddir()
1736 rval = portage.doebuild(ebuild_path, "fetch",
1737 settings["ROOT"], settings, debug=debug,
1738 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1739 mydbapi=portdb, tree="porttree")
1741 if rval != os.EX_OK:
1742 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1743 eerror(msg, phase="unpack", key=pkg.cpv)
1747 def _execute_with_builddir(self):
1748 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1749 # ensuring sane $PWD (bug #239560) and storing elog
1750 # messages. Use a private temp directory, in order
1751 # to avoid locking the main one.
1752 settings = self.settings
1753 global_tmpdir = settings["PORTAGE_TMPDIR"]
1754 from tempfile import mkdtemp
1756 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1758 if e.errno != portage.exception.PermissionDenied.errno:
1760 raise portage.exception.PermissionDenied(global_tmpdir)
1761 settings["PORTAGE_TMPDIR"] = private_tmpdir
1762 settings.backup_changes("PORTAGE_TMPDIR")
1764 retval = self._execute()
1766 settings["PORTAGE_TMPDIR"] = global_tmpdir
1767 settings.backup_changes("PORTAGE_TMPDIR")
1768 shutil.rmtree(private_tmpdir)
1772 settings = self.settings
1774 root_config = pkg.root_config
1775 portdb = root_config.trees["porttree"].dbapi
1776 ebuild_path = portdb.findname(pkg.cpv)
1777 debug = settings.get("PORTAGE_DEBUG") == "1"
1778 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1780 retval = portage.doebuild(ebuild_path, "fetch",
1781 self.settings["ROOT"], self.settings, debug=debug,
1782 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1783 mydbapi=portdb, tree="porttree")
1785 if retval != os.EX_OK:
1786 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1787 eerror(msg, phase="unpack", key=pkg.cpv)
1789 portage.elog.elog_process(self.pkg.cpv, self.settings)
1792 class PollConstants(object):
1795 Provides POLL* constants that are equivalent to those from the
1796 select module, for use by PollSelectAdapter.
1799 names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1802 locals()[k] = getattr(select, k, v)
1806 class AsynchronousTask(SlotObject):
1808 Subclasses override _wait() and _poll() so that calls
1809 to public methods can be wrapped for implementing
1810 hooks such as exit listener notification.
1812 Sublasses should call self.wait() to notify exit listeners after
1813 the task is complete and self.returncode has been set.
1816 __slots__ = ("background", "cancelled", "returncode") + \
1817 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1821 Start an asynchronous task and then return as soon as possible.
1827 raise NotImplementedError(self)
1830 return self.returncode is None
1837 return self.returncode
1840 if self.returncode is None:
1843 return self.returncode
1846 return self.returncode
1849 self.cancelled = True
1852 def addStartListener(self, f):
1854 The function will be called with one argument, a reference to self.
1856 if self._start_listeners is None:
1857 self._start_listeners = []
1858 self._start_listeners.append(f)
1860 def removeStartListener(self, f):
1861 if self._start_listeners is None:
1863 self._start_listeners.remove(f)
1865 def _start_hook(self):
1866 if self._start_listeners is not None:
1867 start_listeners = self._start_listeners
1868 self._start_listeners = None
1870 for f in start_listeners:
1873 def addExitListener(self, f):
1875 The function will be called with one argument, a reference to self.
1877 if self._exit_listeners is None:
1878 self._exit_listeners = []
1879 self._exit_listeners.append(f)
1881 def removeExitListener(self, f):
1882 if self._exit_listeners is None:
1883 if self._exit_listener_stack is not None:
1884 self._exit_listener_stack.remove(f)
1886 self._exit_listeners.remove(f)
1888 def _wait_hook(self):
1890 Call this method after the task completes, just before returning
1891 the returncode from wait() or poll(). This hook is
1892 used to trigger exit listeners when the returncode first
1895 if self.returncode is not None and \
1896 self._exit_listeners is not None:
1898 # This prevents recursion, in case one of the
1899 # exit handlers triggers this method again by
1900 # calling wait(). Use a stack that gives
1901 # removeExitListener() an opportunity to consume
1902 # listeners from the stack, before they can get
1903 # called below. This is necessary because a call
1904 # to one exit listener may result in a call to
1905 # removeExitListener() for another listener on
1906 # the stack. That listener needs to be removed
1907 # from the stack since it would be inconsistent
1908 # to call it after it has been been passed into
1909 # removeExitListener().
1910 self._exit_listener_stack = self._exit_listeners
1911 self._exit_listeners = None
1913 self._exit_listener_stack.reverse()
1914 while self._exit_listener_stack:
1915 self._exit_listener_stack.pop()(self)
1917 class AbstractPollTask(AsynchronousTask):
1919 __slots__ = ("scheduler",) + \
1923 _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1924 _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1927 def _unregister(self):
1928 raise NotImplementedError(self)
1930 def _unregister_if_appropriate(self, event):
1931 if self._registered:
1932 if event & self._exceptional_events:
1935 elif event & PollConstants.POLLHUP:
1939 class PipeReader(AbstractPollTask):
1942 Reads output from one or more files and saves it in memory,
1943 for retrieval via the getvalue() method. This is driven by
1944 the scheduler's poll() loop, so it runs entirely within the
1948 __slots__ = ("input_files",) + \
1949 ("_read_data", "_reg_ids")
1952 self._reg_ids = set()
1953 self._read_data = []
1954 for k, f in self.input_files.iteritems():
1955 fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1956 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1957 self._reg_ids.add(self.scheduler.register(f.fileno(),
1958 self._registered_events, self._output_handler))
1959 self._registered = True
1962 return self._registered
1965 if self.returncode is None:
1967 self.cancelled = True
1971 if self.returncode is not None:
1972 return self.returncode
1974 if self._registered:
1975 self.scheduler.schedule(self._reg_ids)
1978 self.returncode = os.EX_OK
1979 return self.returncode
1982 """Retrieve the entire contents"""
1983 if sys.hexversion >= 0x3000000:
1984 return bytes().join(self._read_data)
1985 return "".join(self._read_data)
1988 """Free the memory buffer."""
1989 self._read_data = None
1991 def _output_handler(self, fd, event):
1993 if event & PollConstants.POLLIN:
1995 for f in self.input_files.itervalues():
1996 if fd == f.fileno():
1999 buf = array.array('B')
2001 buf.fromfile(f, self._bufsize)
2006 self._read_data.append(buf.tostring())
2011 self._unregister_if_appropriate(event)
2012 return self._registered
2014 def _unregister(self):
2016 Unregister from the scheduler and close open files.
2019 self._registered = False
2021 if self._reg_ids is not None:
2022 for reg_id in self._reg_ids:
2023 self.scheduler.unregister(reg_id)
2024 self._reg_ids = None
2026 if self.input_files is not None:
2027 for f in self.input_files.itervalues():
2029 self.input_files = None
2031 class CompositeTask(AsynchronousTask):
2033 __slots__ = ("scheduler",) + ("_current_task",)
2036 return self._current_task is not None
2039 self.cancelled = True
2040 if self._current_task is not None:
2041 self._current_task.cancel()
2045 This does a loop calling self._current_task.poll()
2046 repeatedly as long as the value of self._current_task
2047 keeps changing. It calls poll() a maximum of one time
2048 for a given self._current_task instance. This is useful
2049 since calling poll() on a task can trigger advance to
2050 the next task could eventually lead to the returncode
2051 being set in cases when polling only a single task would
2052 not have the same effect.
2057 task = self._current_task
2058 if task is None or task is prev:
2059 # don't poll the same task more than once
2064 return self.returncode
2070 task = self._current_task
2072 # don't wait for the same task more than once
2075 # Before the task.wait() method returned, an exit
2076 # listener should have set self._current_task to either
2077 # a different task or None. Something is wrong.
2078 raise AssertionError("self._current_task has not " + \
2079 "changed since calling wait", self, task)
2083 return self.returncode
2085 def _assert_current(self, task):
2087 Raises an AssertionError if the given task is not the
2088 same one as self._current_task. This can be useful
2091 if task is not self._current_task:
2092 raise AssertionError("Unrecognized task: %s" % (task,))
2094 def _default_exit(self, task):
2096 Calls _assert_current() on the given task and then sets the
2097 composite returncode attribute if task.returncode != os.EX_OK.
2098 If the task failed then self._current_task will be set to None.
2099 Subclasses can use this as a generic task exit callback.
2102 @returns: The task.returncode attribute.
2104 self._assert_current(task)
2105 if task.returncode != os.EX_OK:
2106 self.returncode = task.returncode
2107 self._current_task = None
2108 return task.returncode
2110 def _final_exit(self, task):
2112 Assumes that task is the final task of this composite task.
2113 Calls _default_exit() and sets self.returncode to the task's
2114 returncode and sets self._current_task to None.
2116 self._default_exit(task)
2117 self._current_task = None
2118 self.returncode = task.returncode
2119 return self.returncode
2121 def _default_final_exit(self, task):
2123 This calls _final_exit() and then wait().
2125 Subclasses can use this as a generic final task exit callback.
2128 self._final_exit(task)
2131 def _start_task(self, task, exit_handler):
2133 Register exit handler for the given task, set it
2134 as self._current_task, and call task.start().
2136 Subclasses can use this as a generic way to start
2140 task.addExitListener(exit_handler)
2141 self._current_task = task
2144 class TaskSequence(CompositeTask):
2146 A collection of tasks that executes sequentially. Each task
2147 must have a addExitListener() method that can be used as
2148 a means to trigger movement from one task to the next.
2151 __slots__ = ("_task_queue",)
2153 def __init__(self, **kwargs):
2154 AsynchronousTask.__init__(self, **kwargs)
2155 self._task_queue = deque()
2157 def add(self, task):
2158 self._task_queue.append(task)
2161 self._start_next_task()
2164 self._task_queue.clear()
2165 CompositeTask.cancel(self)
2167 def _start_next_task(self):
2168 self._start_task(self._task_queue.popleft(),
2169 self._task_exit_handler)
2171 def _task_exit_handler(self, task):
2172 if self._default_exit(task) != os.EX_OK:
2174 elif self._task_queue:
2175 self._start_next_task()
2177 self._final_exit(task)
2180 class SubProcess(AbstractPollTask):
2182 __slots__ = ("pid",) + \
2183 ("_files", "_reg_id")
2185 # A file descriptor is required for the scheduler to monitor changes from
2186 # inside a poll() loop. When logging is not enabled, create a pipe just to
2187 # serve this purpose alone.
2191 if self.returncode is not None:
2192 return self.returncode
2193 if self.pid is None:
2194 return self.returncode
2195 if self._registered:
2196 return self.returncode
2199 retval = os.waitpid(self.pid, os.WNOHANG)
2201 if e.errno != errno.ECHILD:
2204 retval = (self.pid, 1)
2206 if retval == (0, 0):
2208 self._set_returncode(retval)
2209 return self.returncode
2214 os.kill(self.pid, signal.SIGTERM)
2216 if e.errno != errno.ESRCH:
2220 self.cancelled = True
2221 if self.pid is not None:
2223 return self.returncode
2226 return self.pid is not None and \
2227 self.returncode is None
2231 if self.returncode is not None:
2232 return self.returncode
2234 if self._registered:
2235 self.scheduler.schedule(self._reg_id)
2237 if self.returncode is not None:
2238 return self.returncode
2241 wait_retval = os.waitpid(self.pid, 0)
2243 if e.errno != errno.ECHILD:
2246 self._set_returncode((self.pid, 1))
2248 self._set_returncode(wait_retval)
2250 return self.returncode
2252 def _unregister(self):
2254 Unregister from the scheduler and close open files.
2257 self._registered = False
2259 if self._reg_id is not None:
2260 self.scheduler.unregister(self._reg_id)
2263 if self._files is not None:
2264 for f in self._files.itervalues():
2268 def _set_returncode(self, wait_retval):
2270 retval = wait_retval[1]
2272 if retval != os.EX_OK:
2274 retval = (retval & 0xff) << 8
2276 retval = retval >> 8
2278 self.returncode = retval
2280 class SpawnProcess(SubProcess):
2283 Constructor keyword args are passed into portage.process.spawn().
2284 The required "args" keyword argument will be passed as the first
2288 _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2289 "uid", "gid", "groups", "umask", "logfile",
2290 "path_lookup", "pre_exec")
2292 __slots__ = ("args",) + \
2295 _file_names = ("log", "process", "stdout")
2296 _files_dict = slot_dict_class(_file_names, prefix="")
2303 if self.fd_pipes is None:
2305 fd_pipes = self.fd_pipes
2306 fd_pipes.setdefault(0, sys.stdin.fileno())
2307 fd_pipes.setdefault(1, sys.stdout.fileno())
2308 fd_pipes.setdefault(2, sys.stderr.fileno())
2310 # flush any pending output
2311 for fd in fd_pipes.itervalues():
2312 if fd == sys.stdout.fileno():
2314 if fd == sys.stderr.fileno():
2317 logfile = self.logfile
2318 self._files = self._files_dict()
2321 master_fd, slave_fd = self._pipe(fd_pipes)
2322 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2323 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2326 fd_pipes_orig = fd_pipes.copy()
2328 # TODO: Use job control functions like tcsetpgrp() to control
2329 # access to stdin. Until then, use /dev/null so that any
2330 # attempts to read from stdin will immediately return EOF
2331 # instead of blocking indefinitely.
2332 null_input = open('/dev/null', 'rb')
2333 fd_pipes[0] = null_input.fileno()
2335 fd_pipes[0] = fd_pipes_orig[0]
2337 files.process = os.fdopen(master_fd, 'rb')
2338 if logfile is not None:
2340 fd_pipes[1] = slave_fd
2341 fd_pipes[2] = slave_fd
2343 files.log = open(logfile, mode='ab')
2344 portage.util.apply_secpass_permissions(logfile,
2345 uid=portage.portage_uid, gid=portage.portage_gid,
2348 if not self.background:
2349 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2351 output_handler = self._output_handler
2355 # Create a dummy pipe so the scheduler can monitor
2356 # the process from inside a poll() loop.
2357 fd_pipes[self._dummy_pipe_fd] = slave_fd
2359 fd_pipes[1] = slave_fd
2360 fd_pipes[2] = slave_fd
2361 output_handler = self._dummy_handler
2364 for k in self._spawn_kwarg_names:
2365 v = getattr(self, k)
2369 kwargs["fd_pipes"] = fd_pipes
2370 kwargs["returnpid"] = True
2371 kwargs.pop("logfile", None)
2373 self._reg_id = self.scheduler.register(files.process.fileno(),
2374 self._registered_events, output_handler)
2375 self._registered = True
2377 retval = self._spawn(self.args, **kwargs)
2380 if null_input is not None:
2383 if isinstance(retval, int):
2386 self.returncode = retval
2390 self.pid = retval[0]
2391 portage.process.spawned_pids.remove(self.pid)
2393 def _pipe(self, fd_pipes):
2395 @type fd_pipes: dict
2396 @param fd_pipes: pipes from which to copy terminal size if desired.
2400 def _spawn(self, args, **kwargs):
2401 return portage.process.spawn(args, **kwargs)
2403 def _output_handler(self, fd, event):
2405 if event & PollConstants.POLLIN:
2408 buf = array.array('B')
2410 buf.fromfile(files.process, self._bufsize)
2415 if not self.background:
2416 buf.tofile(files.stdout)
2417 files.stdout.flush()
2418 buf.tofile(files.log)
2424 self._unregister_if_appropriate(event)
2425 return self._registered
2427 def _dummy_handler(self, fd, event):
2429 This method is mainly interested in detecting EOF, since
2430 the only purpose of the pipe is to allow the scheduler to
2431 monitor the process from inside a poll() loop.
2434 if event & PollConstants.POLLIN:
2436 buf = array.array('B')
2438 buf.fromfile(self._files.process, self._bufsize)
2448 self._unregister_if_appropriate(event)
2449 return self._registered
2451 class MiscFunctionsProcess(SpawnProcess):
2453 Spawns misc-functions.sh with an existing ebuild environment.
2456 __slots__ = ("commands", "phase", "pkg", "settings")
2459 settings = self.settings
2460 settings.pop("EBUILD_PHASE", None)
2461 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2462 misc_sh_binary = os.path.join(portage_bin_path,
2463 os.path.basename(portage.const.MISC_SH_BINARY))
2465 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2466 self.logfile = settings.get("PORTAGE_LOG_FILE")
2468 portage._doebuild_exit_status_unlink(
2469 settings.get("EBUILD_EXIT_STATUS_FILE"))
2471 SpawnProcess._start(self)
2473 def _spawn(self, args, **kwargs):
2474 settings = self.settings
2475 debug = settings.get("PORTAGE_DEBUG") == "1"
2476 return portage.spawn(" ".join(args), settings,
2477 debug=debug, **kwargs)
2479 def _set_returncode(self, wait_retval):
2480 SpawnProcess._set_returncode(self, wait_retval)
2481 self.returncode = portage._doebuild_exit_status_check_and_log(
2482 self.settings, self.phase, self.returncode)
2484 class EbuildFetcher(SpawnProcess):
2486 __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2491 root_config = self.pkg.root_config
2492 portdb = root_config.trees["porttree"].dbapi
2493 ebuild_path = portdb.findname(self.pkg.cpv)
2494 settings = self.config_pool.allocate()
2495 settings.setcpv(self.pkg)
2497 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2498 # should not be touched since otherwise it could interfere with
2499 # another instance of the same cpv concurrently being built for a
2500 # different $ROOT (currently, builds only cooperate with prefetchers
2501 # that are spawned for the same $ROOT).
2502 if not self.prefetch:
2503 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2504 self._build_dir.lock()
2505 self._build_dir.clean()
2506 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2507 if self.logfile is None:
2508 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514 # If any incremental variables have been overridden
2515 # via the environment, those values need to be passed
2516 # along here so that they are correctly considered by
2517 # the config instance in the subproccess.
2518 fetch_env = os.environ.copy()
2520 nocolor = settings.get("NOCOLOR")
2521 if nocolor is not None:
2522 fetch_env["NOCOLOR"] = nocolor
2524 fetch_env["PORTAGE_NICENESS"] = "0"
2526 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2528 ebuild_binary = os.path.join(
2529 settings["PORTAGE_BIN_PATH"], "ebuild")
2531 fetch_args = [ebuild_binary, ebuild_path, phase]
2532 debug = settings.get("PORTAGE_DEBUG") == "1"
2534 fetch_args.append("--debug")
2536 self.args = fetch_args
2537 self.env = fetch_env
2538 SpawnProcess._start(self)
2540 def _pipe(self, fd_pipes):
2541 """When appropriate, use a pty so that fetcher progress bars,
2542 like wget has, will work properly."""
2543 if self.background or not sys.stdout.isatty():
2544 # When the output only goes to a log file,
2545 # there's no point in creating a pty.
2547 stdout_pipe = fd_pipes.get(1)
2548 got_pty, master_fd, slave_fd = \
2549 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2550 return (master_fd, slave_fd)
2552 def _set_returncode(self, wait_retval):
2553 SpawnProcess._set_returncode(self, wait_retval)
2554 # Collect elog messages that might have been
2555 # created by the pkg_nofetch phase.
2556 if self._build_dir is not None:
2557 # Skip elog messages for prefetch, in order to avoid duplicates.
2558 if not self.prefetch and self.returncode != os.EX_OK:
2560 if self.logfile is not None:
2562 elog_out = open(self.logfile, 'a')
2563 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2564 if self.logfile is not None:
2565 msg += ", Log file:"
2566 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2567 if self.logfile is not None:
2568 eerror(" '%s'" % (self.logfile,),
2569 phase="unpack", key=self.pkg.cpv, out=elog_out)
2570 if elog_out is not None:
2572 if not self.prefetch:
2573 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2574 features = self._build_dir.settings.features
2575 if self.returncode == os.EX_OK:
2576 self._build_dir.clean()
2577 self._build_dir.unlock()
2578 self.config_pool.deallocate(self._build_dir.settings)
2579 self._build_dir = None
2581 class EbuildBuildDir(SlotObject):
2583 __slots__ = ("dir_path", "pkg", "settings",
2584 "locked", "_catdir", "_lock_obj")
2586 def __init__(self, **kwargs):
2587 SlotObject.__init__(self, **kwargs)
2592 This raises an AlreadyLocked exception if lock() is called
2593 while a lock is already held. In order to avoid this, call
2594 unlock() or check whether the "locked" attribute is True
2595 or False before calling lock().
2597 if self._lock_obj is not None:
2598 raise self.AlreadyLocked((self._lock_obj,))
2600 dir_path = self.dir_path
2601 if dir_path is None:
2602 root_config = self.pkg.root_config
2603 portdb = root_config.trees["porttree"].dbapi
2604 ebuild_path = portdb.findname(self.pkg.cpv)
2605 settings = self.settings
2606 settings.setcpv(self.pkg)
2607 debug = settings.get("PORTAGE_DEBUG") == "1"
2608 use_cache = 1 # always true
2609 portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2610 self.settings, debug, use_cache, portdb)
2611 dir_path = self.settings["PORTAGE_BUILDDIR"]
2613 catdir = os.path.dirname(dir_path)
2614 self._catdir = catdir
2616 portage.util.ensure_dirs(os.path.dirname(catdir),
2617 gid=portage.portage_gid,
2621 catdir_lock = portage.locks.lockdir(catdir)
2622 portage.util.ensure_dirs(catdir,
2623 gid=portage.portage_gid,
2625 self._lock_obj = portage.locks.lockdir(dir_path)
2627 self.locked = self._lock_obj is not None
2628 if catdir_lock is not None:
2629 portage.locks.unlockdir(catdir_lock)
2632 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2633 by keepwork or keeptemp in FEATURES."""
2634 settings = self.settings
2635 features = settings.features
2636 if not ("keepwork" in features or "keeptemp" in features):
2638 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2639 except EnvironmentError, e:
2640 if e.errno != errno.ENOENT:
2645 if self._lock_obj is None:
2648 portage.locks.unlockdir(self._lock_obj)
2649 self._lock_obj = None
2652 catdir = self._catdir
2655 catdir_lock = portage.locks.lockdir(catdir)
2661 if e.errno not in (errno.ENOENT,
2662 errno.ENOTEMPTY, errno.EEXIST):
2665 portage.locks.unlockdir(catdir_lock)
2667 class AlreadyLocked(portage.exception.PortageException):
2670 class EbuildBuild(CompositeTask):
2672 __slots__ = ("args_set", "config_pool", "find_blockers",
2673 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2674 "prefetcher", "settings", "world_atom") + \
2675 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2679 logger = self.logger
2682 settings = self.settings
2683 world_atom = self.world_atom
2684 root_config = pkg.root_config
2687 portdb = root_config.trees[tree].dbapi
2688 settings.setcpv(pkg)
2689 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2690 ebuild_path = portdb.findname(self.pkg.cpv)
2691 self._ebuild_path = ebuild_path
2693 prefetcher = self.prefetcher
2694 if prefetcher is None:
2696 elif not prefetcher.isAlive():
2698 elif prefetcher.poll() is None:
2700 waiting_msg = "Fetching files " + \
2701 "in the background. " + \
2702 "To view fetch progress, run `tail -f " + \
2703 "/var/log/emerge-fetch.log` in another " + \
2705 msg_prefix = colorize("GOOD", " * ")
2706 from textwrap import wrap
2707 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2708 for line in wrap(waiting_msg, 65))
2709 if not self.background:
2710 writemsg(waiting_msg, noiselevel=-1)
2712 self._current_task = prefetcher
2713 prefetcher.addExitListener(self._prefetch_exit)
2716 self._prefetch_exit(prefetcher)
2718 def _prefetch_exit(self, prefetcher):
2722 settings = self.settings
2725 fetcher = EbuildFetchonly(
2726 fetch_all=opts.fetch_all_uri,
2727 pkg=pkg, pretend=opts.pretend,
2729 retval = fetcher.execute()
2730 self.returncode = retval
2734 fetcher = EbuildFetcher(config_pool=self.config_pool,
2735 fetchall=opts.fetch_all_uri,
2736 fetchonly=opts.fetchonly,
2737 background=self.background,
2738 pkg=pkg, scheduler=self.scheduler)
2740 self._start_task(fetcher, self._fetch_exit)
2742 def _fetch_exit(self, fetcher):
2746 fetch_failed = False
2748 fetch_failed = self._final_exit(fetcher) != os.EX_OK
2750 fetch_failed = self._default_exit(fetcher) != os.EX_OK
2752 if fetch_failed and fetcher.logfile is not None and \
2753 os.path.exists(fetcher.logfile):
2754 self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2756 if not fetch_failed and fetcher.logfile is not None:
2757 # Fetch was successful, so remove the fetch log.
2759 os.unlink(fetcher.logfile)
2763 if fetch_failed or opts.fetchonly:
2767 logger = self.logger
2769 pkg_count = self.pkg_count
2770 scheduler = self.scheduler
2771 settings = self.settings
2772 features = settings.features
2773 ebuild_path = self._ebuild_path
2774 system_set = pkg.root_config.sets["system"]
2776 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2777 self._build_dir.lock()
2779 # Cleaning is triggered before the setup
2780 # phase, in portage.doebuild().
2781 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2782 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2783 short_msg = "emerge: (%s of %s) %s Clean" % \
2784 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2785 logger.log(msg, short_msg=short_msg)
2787 #buildsyspkg: Check if we need to _force_ binary package creation
2788 self._issyspkg = "buildsyspkg" in features and \
2789 system_set.findAtomForPackage(pkg) and \
2792 if opts.buildpkg or self._issyspkg:
2794 self._buildpkg = True
2796 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2797 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2798 short_msg = "emerge: (%s of %s) %s Compile" % \
2799 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2800 logger.log(msg, short_msg=short_msg)
2803 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2804 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2805 short_msg = "emerge: (%s of %s) %s Compile" % \
2806 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2807 logger.log(msg, short_msg=short_msg)
2809 build = EbuildExecuter(background=self.background, pkg=pkg,
2810 scheduler=scheduler, settings=settings)
2811 self._start_task(build, self._build_exit)
2813 def _unlock_builddir(self):
2814 portage.elog.elog_process(self.pkg.cpv, self.settings)
2815 self._build_dir.unlock()
2817 def _build_exit(self, build):
2818 if self._default_exit(build) != os.EX_OK:
2819 self._unlock_builddir()
2824 buildpkg = self._buildpkg
2827 self._final_exit(build)
2832 msg = ">>> This is a system package, " + \
2833 "let's pack a rescue tarball.\n"
2835 log_path = self.settings.get("PORTAGE_LOG_FILE")
2836 if log_path is not None:
2837 log_file = open(log_path, 'a')
2843 if not self.background:
2844 portage.writemsg_stdout(msg, noiselevel=-1)
2846 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2847 scheduler=self.scheduler, settings=self.settings)
2849 self._start_task(packager, self._buildpkg_exit)
2851 def _buildpkg_exit(self, packager):
2853 Released build dir lock when there is a failure or
2854 when in buildpkgonly mode. Otherwise, the lock will
2855 be released when merge() is called.
2858 if self._default_exit(packager) != os.EX_OK:
2859 self._unlock_builddir()
2863 if self.opts.buildpkgonly:
2864 # Need to call "clean" phase for buildpkgonly mode
2865 portage.elog.elog_process(self.pkg.cpv, self.settings)
2867 clean_phase = EbuildPhase(background=self.background,
2868 pkg=self.pkg, phase=phase,
2869 scheduler=self.scheduler, settings=self.settings,
2871 self._start_task(clean_phase, self._clean_exit)
2874 # Continue holding the builddir lock until
2875 # after the package has been installed.
2876 self._current_task = None
2877 self.returncode = packager.returncode
2880 def _clean_exit(self, clean_phase):
2881 if self._final_exit(clean_phase) != os.EX_OK or \
2882 self.opts.buildpkgonly:
2883 self._unlock_builddir()
2888 Install the package and then clean up and release locks.
2889 Only call this after the build has completed successfully
2890 and neither fetchonly nor buildpkgonly mode are enabled.
2893 find_blockers = self.find_blockers
2894 ldpath_mtimes = self.ldpath_mtimes
2895 logger = self.logger
2897 pkg_count = self.pkg_count
2898 settings = self.settings
2899 world_atom = self.world_atom
2900 ebuild_path = self._ebuild_path
2903 merge = EbuildMerge(find_blockers=self.find_blockers,
2904 ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2905 pkg_count=pkg_count, pkg_path=ebuild_path,
2906 scheduler=self.scheduler,
2907 settings=settings, tree=tree, world_atom=world_atom)
2909 msg = " === (%s of %s) Merging (%s::%s)" % \
2910 (pkg_count.curval, pkg_count.maxval,
2911 pkg.cpv, ebuild_path)
2912 short_msg = "emerge: (%s of %s) %s Merge" % \
2913 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2914 logger.log(msg, short_msg=short_msg)
2917 rval = merge.execute()
2919 self._unlock_builddir()
2923 class EbuildExecuter(CompositeTask):
2925 __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2927 _phases = ("prepare", "configure", "compile", "test", "install")
2929 _live_eclasses = frozenset([
2939 self._tree = "porttree"
2942 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2943 scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2944 self._start_task(clean_phase, self._clean_phase_exit)
2946 def _clean_phase_exit(self, clean_phase):
2948 if self._default_exit(clean_phase) != os.EX_OK:
2953 scheduler = self.scheduler
2954 settings = self.settings
2957 # This initializes PORTAGE_LOG_FILE.
2958 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2960 setup_phase = EbuildPhase(background=self.background,
2961 pkg=pkg, phase="setup", scheduler=scheduler,
2962 settings=settings, tree=self._tree)
2964 setup_phase.addExitListener(self._setup_exit)
2965 self._current_task = setup_phase
2966 self.scheduler.scheduleSetup(setup_phase)
2968 def _setup_exit(self, setup_phase):
2970 if self._default_exit(setup_phase) != os.EX_OK:
2974 unpack_phase = EbuildPhase(background=self.background,
2975 pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2976 settings=self.settings, tree=self._tree)
2978 if self._live_eclasses.intersection(self.pkg.inherited):
2979 # Serialize $DISTDIR access for live ebuilds since
2980 # otherwise they can interfere with eachother.
2982 unpack_phase.addExitListener(self._unpack_exit)
2983 self._current_task = unpack_phase
2984 self.scheduler.scheduleUnpack(unpack_phase)
2987 self._start_task(unpack_phase, self._unpack_exit)
2989 def _unpack_exit(self, unpack_phase):
2991 if self._default_exit(unpack_phase) != os.EX_OK:
2995 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2998 phases = self._phases
2999 eapi = pkg.metadata["EAPI"]
3000 if eapi in ("0", "1"):
3001 # skip src_prepare and src_configure
3004 for phase in phases:
3005 ebuild_phases.add(EbuildPhase(background=self.background,
3006 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3007 settings=self.settings, tree=self._tree))
3009 self._start_task(ebuild_phases, self._default_final_exit)
3011 class EbuildMetadataPhase(SubProcess):
3014 Asynchronous interface for the ebuild "depend" phase which is
3015 used to extract metadata from the ebuild.
3018 __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3019 "ebuild_mtime", "portdb", "repo_path", "settings") + \
3022 _file_names = ("ebuild",)
3023 _files_dict = slot_dict_class(_file_names, prefix="")
3027 settings = self.settings
3029 ebuild_path = self.ebuild_path
3030 debug = settings.get("PORTAGE_DEBUG") == "1"
3034 if self.fd_pipes is not None:
3035 fd_pipes = self.fd_pipes.copy()
3039 fd_pipes.setdefault(0, sys.stdin.fileno())
3040 fd_pipes.setdefault(1, sys.stdout.fileno())
3041 fd_pipes.setdefault(2, sys.stderr.fileno())
3043 # flush any pending output
3044 for fd in fd_pipes.itervalues():
3045 if fd == sys.stdout.fileno():
3047 if fd == sys.stderr.fileno():
3050 fd_pipes_orig = fd_pipes.copy()
3051 self._files = self._files_dict()
3054 master_fd, slave_fd = os.pipe()
3055 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3056 fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3058 fd_pipes[self._metadata_fd] = slave_fd
3060 self._raw_metadata = []
3061 files.ebuild = os.fdopen(master_fd, 'r')
3062 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3063 self._registered_events, self._output_handler)
3064 self._registered = True
3066 retval = portage.doebuild(ebuild_path, "depend",
3067 settings["ROOT"], settings, debug,
3068 mydbapi=self.portdb, tree="porttree",
3069 fd_pipes=fd_pipes, returnpid=True)
3073 if isinstance(retval, int):
3074 # doebuild failed before spawning
3076 self.returncode = retval
3080 self.pid = retval[0]
3081 portage.process.spawned_pids.remove(self.pid)
3083 def _output_handler(self, fd, event):
3085 if event & PollConstants.POLLIN:
3086 self._raw_metadata.append(self._files.ebuild.read())
3087 if not self._raw_metadata[-1]:
3091 self._unregister_if_appropriate(event)
3092 return self._registered
3094 def _set_returncode(self, wait_retval):
3095 SubProcess._set_returncode(self, wait_retval)
3096 if self.returncode == os.EX_OK:
3097 metadata_lines = "".join(self._raw_metadata).splitlines()
3098 if len(portage.auxdbkeys) != len(metadata_lines):
3099 # Don't trust bash's returncode if the
3100 # number of lines is incorrect.
3103 metadata = izip(portage.auxdbkeys, metadata_lines)
3104 self.metadata_callback(self.cpv, self.ebuild_path,
3105 self.repo_path, metadata, self.ebuild_mtime)
3107 class EbuildProcess(SpawnProcess):
3109 __slots__ = ("phase", "pkg", "settings", "tree")
3112 # Don't open the log file during the clean phase since the
3113 # open file can result in an nfs lock on $T/build.log which
3114 # prevents the clean phase from removing $T.
3115 if self.phase not in ("clean", "cleanrm"):
3116 self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3117 SpawnProcess._start(self)
3119 def _pipe(self, fd_pipes):
3120 stdout_pipe = fd_pipes.get(1)
3121 got_pty, master_fd, slave_fd = \
3122 portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3123 return (master_fd, slave_fd)
3125 def _spawn(self, args, **kwargs):
3127 root_config = self.pkg.root_config
3129 mydbapi = root_config.trees[tree].dbapi
3130 settings = self.settings
3131 ebuild_path = settings["EBUILD"]
3132 debug = settings.get("PORTAGE_DEBUG") == "1"
3134 rval = portage.doebuild(ebuild_path, self.phase,
3135 root_config.root, settings, debug,
3136 mydbapi=mydbapi, tree=tree, **kwargs)
3140 def _set_returncode(self, wait_retval):
3141 SpawnProcess._set_returncode(self, wait_retval)
3143 if self.phase not in ("clean", "cleanrm"):
3144 self.returncode = portage._doebuild_exit_status_check_and_log(
3145 self.settings, self.phase, self.returncode)
3147 if self.phase == "test" and self.returncode != os.EX_OK and \
3148 "test-fail-continue" in self.settings.features:
3149 self.returncode = os.EX_OK
3151 portage._post_phase_userpriv_perms(self.settings)
3153 class EbuildPhase(CompositeTask):
3155 __slots__ = ("background", "pkg", "phase",
3156 "scheduler", "settings", "tree")
3158 _post_phase_cmds = portage._post_phase_cmds
3162 ebuild_process = EbuildProcess(background=self.background,
3163 pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3164 settings=self.settings, tree=self.tree)
3166 self._start_task(ebuild_process, self._ebuild_exit)
3168 def _ebuild_exit(self, ebuild_process):
3170 if self.phase == "install":
3172 log_path = self.settings.get("PORTAGE_LOG_FILE")
3174 if self.background and log_path is not None:
3175 log_file = open(log_path, 'a')
3178 portage._check_build_log(self.settings, out=out)
3180 if log_file is not None:
3183 if self._default_exit(ebuild_process) != os.EX_OK:
3187 settings = self.settings
3189 if self.phase == "install":
3190 portage._post_src_install_uid_fix(settings)
3192 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3193 if post_phase_cmds is not None:
3194 post_phase = MiscFunctionsProcess(background=self.background,
3195 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3196 scheduler=self.scheduler, settings=settings)
3197 self._start_task(post_phase, self._post_phase_exit)
3200 self.returncode = ebuild_process.returncode
3201 self._current_task = None
3204 def _post_phase_exit(self, post_phase):
3205 if self._final_exit(post_phase) != os.EX_OK:
3206 writemsg("!!! post %s failed; exiting.\n" % self.phase,
3208 self._current_task = None
3212 class EbuildBinpkg(EbuildProcess):
3214 This assumes that src_install() has successfully completed.
3216 __slots__ = ("_binpkg_tmpfile",)
3219 self.phase = "package"
3220 self.tree = "porttree"
3222 root_config = pkg.root_config
3223 portdb = root_config.trees["porttree"].dbapi
3224 bintree = root_config.trees["bintree"]
3225 ebuild_path = portdb.findname(self.pkg.cpv)
3226 settings = self.settings
3227 debug = settings.get("PORTAGE_DEBUG") == "1"
3229 bintree.prevent_collision(pkg.cpv)
3230 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3231 pkg.cpv + ".tbz2." + str(os.getpid()))
3232 self._binpkg_tmpfile = binpkg_tmpfile
3233 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3234 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3237 EbuildProcess._start(self)
3239 settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3241 def _set_returncode(self, wait_retval):
3242 EbuildProcess._set_returncode(self, wait_retval)
3245 bintree = pkg.root_config.trees["bintree"]
3246 binpkg_tmpfile = self._binpkg_tmpfile
3247 if self.returncode == os.EX_OK:
3248 bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3250 class EbuildMerge(SlotObject):
3252 __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3253 "pkg", "pkg_count", "pkg_path", "pretend",
3254 "scheduler", "settings", "tree", "world_atom")
3257 root_config = self.pkg.root_config
3258 settings = self.settings
3259 retval = portage.merge(settings["CATEGORY"],
3260 settings["PF"], settings["D"],
3261 os.path.join(settings["PORTAGE_BUILDDIR"],
3262 "build-info"), root_config.root, settings,
3263 myebuild=settings["EBUILD"],
3264 mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3265 vartree=root_config.trees["vartree"],
3266 prev_mtimes=self.ldpath_mtimes,
3267 scheduler=self.scheduler,
3268 blockers=self.find_blockers)
3270 if retval == os.EX_OK:
3271 self.world_atom(self.pkg)
3276 def _log_success(self):
3278 pkg_count = self.pkg_count
3279 pkg_path = self.pkg_path
3280 logger = self.logger
3281 if "noclean" not in self.settings.features:
3282 short_msg = "emerge: (%s of %s) %s Clean Post" % \
3283 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3284 logger.log((" === (%s of %s) " + \
3285 "Post-Build Cleaning (%s::%s)") % \
3286 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3287 short_msg=short_msg)
3288 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3289 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3291 class PackageUninstall(AsynchronousTask):
3293 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3297 unmerge(self.pkg.root_config, self.opts, "unmerge",
3298 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3299 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3300 writemsg_level=self._writemsg_level)
3301 except UninstallFailure, e:
3302 self.returncode = e.status
3304 self.returncode = os.EX_OK
3307 def _writemsg_level(self, msg, level=0, noiselevel=0):
3309 log_path = self.settings.get("PORTAGE_LOG_FILE")
3310 background = self.background
3312 if log_path is None:
3313 if not (background and level < logging.WARNING):
3314 portage.util.writemsg_level(msg,
3315 level=level, noiselevel=noiselevel)
3318 portage.util.writemsg_level(msg,
3319 level=level, noiselevel=noiselevel)
3321 f = open(log_path, 'a')
3327 class Binpkg(CompositeTask):
3329 __slots__ = ("find_blockers",
3330 "ldpath_mtimes", "logger", "opts",
3331 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3332 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3333 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3335 def _writemsg_level(self, msg, level=0, noiselevel=0):
3337 if not self.background:
3338 portage.util.writemsg_level(msg,
3339 level=level, noiselevel=noiselevel)
3341 log_path = self.settings.get("PORTAGE_LOG_FILE")
3342 if log_path is not None:
3343 f = open(log_path, 'a')
3352 settings = self.settings
3353 settings.setcpv(pkg)
3354 self._tree = "bintree"
3355 self._bintree = self.pkg.root_config.trees[self._tree]
3356 self._verify = not self.opts.pretend
3358 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3359 "portage", pkg.category, pkg.pf)
3360 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3361 pkg=pkg, settings=settings)
3362 self._image_dir = os.path.join(dir_path, "image")
3363 self._infloc = os.path.join(dir_path, "build-info")
3364 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3365 settings["EBUILD"] = self._ebuild_path
3366 debug = settings.get("PORTAGE_DEBUG") == "1"
3367 portage.doebuild_environment(self._ebuild_path, "setup",
3368 settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3369 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3371 # The prefetcher has already completed or it
3372 # could be running now. If it's running now,
3373 # wait for it to complete since it holds
3374 # a lock on the file being fetched. The
3375 # portage.locks functions are only designed
3376 # to work between separate processes. Since
3377 # the lock is held by the current process,
3378 # use the scheduler and fetcher methods to
3379 # synchronize with the fetcher.
3380 prefetcher = self.prefetcher
3381 if prefetcher is None:
3383 elif not prefetcher.isAlive():
3385 elif prefetcher.poll() is None:
3387 waiting_msg = ("Fetching '%s' " + \
3388 "in the background. " + \
3389 "To view fetch progress, run `tail -f " + \
3390 "/var/log/emerge-fetch.log` in another " + \
3391 "terminal.") % prefetcher.pkg_path
3392 msg_prefix = colorize("GOOD", " * ")
3393 from textwrap import wrap
3394 waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3395 for line in wrap(waiting_msg, 65))
3396 if not self.background:
3397 writemsg(waiting_msg, noiselevel=-1)
3399 self._current_task = prefetcher
3400 prefetcher.addExitListener(self._prefetch_exit)
3403 self._prefetch_exit(prefetcher)
3405 def _prefetch_exit(self, prefetcher):
3408 pkg_count = self.pkg_count
3409 if not (self.opts.pretend or self.opts.fetchonly):
3410 self._build_dir.lock()
3412 shutil.rmtree(self._build_dir.dir_path)
3413 except EnvironmentError, e:
3414 if e.errno != errno.ENOENT:
3417 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3418 fetcher = BinpkgFetcher(background=self.background,
3419 logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3420 pretend=self.opts.pretend, scheduler=self.scheduler)
3421 pkg_path = fetcher.pkg_path
3422 self._pkg_path = pkg_path
3424 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3426 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3427 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3428 short_msg = "emerge: (%s of %s) %s Fetch" % \
3429 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3430 self.logger.log(msg, short_msg=short_msg)
3431 self._start_task(fetcher, self._fetcher_exit)
3434 self._fetcher_exit(fetcher)
3436 def _fetcher_exit(self, fetcher):
3438 # The fetcher only has a returncode when
3439 # --getbinpkg is enabled.
3440 if fetcher.returncode is not None:
3441 self._fetched_pkg = True
3442 if self._default_exit(fetcher) != os.EX_OK:
3443 self._unlock_builddir()
3447 if self.opts.pretend:
3448 self._current_task = None
3449 self.returncode = os.EX_OK
3457 logfile = self.settings.get("PORTAGE_LOG_FILE")
3458 verifier = BinpkgVerifier(background=self.background,
3459 logfile=logfile, pkg=self.pkg)
3460 self._start_task(verifier, self._verifier_exit)
3463 self._verifier_exit(verifier)
3465 def _verifier_exit(self, verifier):
3466 if verifier is not None and \
3467 self._default_exit(verifier) != os.EX_OK:
3468 self._unlock_builddir()
3472 logger = self.logger
3474 pkg_count = self.pkg_count
3475 pkg_path = self._pkg_path
3477 if self._fetched_pkg:
3478 self._bintree.inject(pkg.cpv, filename=pkg_path)
3480 if self.opts.fetchonly:
3481 self._current_task = None
3482 self.returncode = os.EX_OK
3486 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3487 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3488 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3489 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3490 logger.log(msg, short_msg=short_msg)
3493 settings = self.settings
3494 ebuild_phase = EbuildPhase(background=self.background,
3495 pkg=pkg, phase=phase, scheduler=self.scheduler,
3496 settings=settings, tree=self._tree)
3498 self._start_task(ebuild_phase, self._clean_exit)
3500 def _clean_exit(self, clean_phase):
3501 if self._default_exit(clean_phase) != os.EX_OK:
3502 self._unlock_builddir()
3506 dir_path = self._build_dir.dir_path
3509 shutil.rmtree(dir_path)
3510 except (IOError, OSError), e:
3511 if e.errno != errno.ENOENT:
3515 infloc = self._infloc
3517 pkg_path = self._pkg_path
3520 for mydir in (dir_path, self._image_dir, infloc):
3521 portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3522 gid=portage.data.portage_gid, mode=dir_mode)
3524 # This initializes PORTAGE_LOG_FILE.
3525 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3526 self._writemsg_level(">>> Extracting info\n")
3528 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3529 check_missing_metadata = ("CATEGORY", "PF")
3530 missing_metadata = set()
3531 for k in check_missing_metadata:
3532 v = pkg_xpak.getfile(k)
3534 missing_metadata.add(k)
3536 pkg_xpak.unpackinfo(infloc)
3537 for k in missing_metadata:
3545 f = open(os.path.join(infloc, k), 'wb')
3551 # Store the md5sum in the vdb.
3552 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3554 f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3558 # This gives bashrc users an opportunity to do various things
3559 # such as remove binary packages after they're installed.
3560 settings = self.settings
3561 settings.setcpv(self.pkg)
3562 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3563 settings.backup_changes("PORTAGE_BINPKG_FILE")
3566 setup_phase = EbuildPhase(background=self.background,
3567 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3568 settings=settings, tree=self._tree)
3570 setup_phase.addExitListener(self._setup_exit)
3571 self._current_task = setup_phase
3572 self.scheduler.scheduleSetup(setup_phase)
3574 def _setup_exit(self, setup_phase):
3575 if self._default_exit(setup_phase) != os.EX_OK:
3576 self._unlock_builddir()
3580 extractor = BinpkgExtractorAsync(background=self.background,
3581 image_dir=self._image_dir,
3582 pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3583 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3584 self._start_task(extractor, self._extractor_exit)
3586 def _extractor_exit(self, extractor):
3587 if self._final_exit(extractor) != os.EX_OK:
3588 self._unlock_builddir()
3589 writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3593 def _unlock_builddir(self):
3594 if self.opts.pretend or self.opts.fetchonly:
3596 portage.elog.elog_process(self.pkg.cpv, self.settings)
3597 self._build_dir.unlock()
3601 # This gives bashrc users an opportunity to do various things
3602 # such as remove binary packages after they're installed.
3603 settings = self.settings
3604 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3605 settings.backup_changes("PORTAGE_BINPKG_FILE")
3607 merge = EbuildMerge(find_blockers=self.find_blockers,
3608 ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3609 pkg=self.pkg, pkg_count=self.pkg_count,
3610 pkg_path=self._pkg_path, scheduler=self.scheduler,
3611 settings=settings, tree=self._tree, world_atom=self.world_atom)
3614 retval = merge.execute()
3616 settings.pop("PORTAGE_BINPKG_FILE", None)
3617 self._unlock_builddir()
3620 class BinpkgFetcher(SpawnProcess):
3622 __slots__ = ("pkg", "pretend",
3623 "locked", "pkg_path", "_lock_obj")
3625 def __init__(self, **kwargs):
3626 SpawnProcess.__init__(self, **kwargs)
3628 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3636 pretend = self.pretend
3637 bintree = pkg.root_config.trees["bintree"]
3638 settings = bintree.settings
3639 use_locks = "distlocks" in settings.features
3640 pkg_path = self.pkg_path
3643 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3646 exists = os.path.exists(pkg_path)
3647 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3648 if not (pretend or resume):
3649 # Remove existing file or broken symlink.
3655 # urljoin doesn't work correctly with
3656 # unrecognized protocols like sftp
3657 if bintree._remote_has_index:
3658 rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3660 rel_uri = pkg.cpv + ".tbz2"
3661 uri = bintree._remote_base_uri.rstrip("/") + \
3662 "/" + rel_uri.lstrip("/")
3664 uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3665 "/" + pkg.pf + ".tbz2"
3668 portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3669 self.returncode = os.EX_OK
3673 protocol = urlparse.urlparse(uri)[0]
3674 fcmd_prefix = "FETCHCOMMAND"
3676 fcmd_prefix = "RESUMECOMMAND"
3677 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3679 fcmd = settings.get(fcmd_prefix)
3682 "DISTDIR" : os.path.dirname(pkg_path),
3684 "FILE" : os.path.basename(pkg_path)
3687 fetch_env = dict(settings.iteritems())
3688 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3689 for x in shlex.split(fcmd)]
3691 if self.fd_pipes is None:
3693 fd_pipes = self.fd_pipes
3695 # Redirect all output to stdout since some fetchers like
3696 # wget pollute stderr (if portage detects a problem then it
3697 # can send it's own message to stderr).
3698 fd_pipes.setdefault(0, sys.stdin.fileno())
3699 fd_pipes.setdefault(1, sys.stdout.fileno())
3700 fd_pipes.setdefault(2, sys.stdout.fileno())
3702 self.args = fetch_args
3703 self.env = fetch_env
3704 SpawnProcess._start(self)
3706 def _set_returncode(self, wait_retval):
3707 SpawnProcess._set_returncode(self, wait_retval)
3708 if self.returncode == os.EX_OK:
3709 # If possible, update the mtime to match the remote package if
3710 # the fetcher didn't already do it automatically.
3711 bintree = self.pkg.root_config.trees["bintree"]
3712 if bintree._remote_has_index:
3713 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3714 if remote_mtime is not None:
3716 remote_mtime = long(remote_mtime)
3721 local_mtime = long(os.stat(self.pkg_path).st_mtime)
3725 if remote_mtime != local_mtime:
3727 os.utime(self.pkg_path,
3728 (remote_mtime, remote_mtime))
3737 This raises an AlreadyLocked exception if lock() is called
3738 while a lock is already held. In order to avoid this, call
3739 unlock() or check whether the "locked" attribute is True
3740 or False before calling lock().
3742 if self._lock_obj is not None:
3743 raise self.AlreadyLocked((self._lock_obj,))
3745 self._lock_obj = portage.locks.lockfile(
3746 self.pkg_path, wantnewlockfile=1)
3749 class AlreadyLocked(portage.exception.PortageException):
3753 if self._lock_obj is None:
3755 portage.locks.unlockfile(self._lock_obj)
3756 self._lock_obj = None
3759 class BinpkgVerifier(AsynchronousTask):
3760 __slots__ = ("logfile", "pkg",)
3764 Note: Unlike a normal AsynchronousTask.start() method,
3765 this one does all work is synchronously. The returncode
3766 attribute will be set before it returns.
3770 root_config = pkg.root_config
3771 bintree = root_config.trees["bintree"]
3773 stdout_orig = sys.stdout
3774 stderr_orig = sys.stderr
3776 if self.background and self.logfile is not None:
3777 log_file = open(self.logfile, 'a')
3779 if log_file is not None:
3780 sys.stdout = log_file
3781 sys.stderr = log_file
3783 bintree.digestCheck(pkg)
3784 except portage.exception.FileNotFound:
3785 writemsg("!!! Fetching Binary failed " + \
3786 "for '%s'\n" % pkg.cpv, noiselevel=-1)
3788 except portage.exception.DigestException, e:
3789 writemsg("\n!!! Digest verification failed:\n",
3791 writemsg("!!! %s\n" % e.value[0],
3793 writemsg("!!! Reason: %s\n" % e.value[1],
3795 writemsg("!!! Got: %s\n" % e.value[2],
3797 writemsg("!!! Expected: %s\n" % e.value[3],
3800 if rval != os.EX_OK:
3801 pkg_path = bintree.getname(pkg.cpv)
3802 head, tail = os.path.split(pkg_path)
3803 temp_filename = portage._checksum_failure_temp_file(head, tail)
3804 writemsg("File renamed to '%s'\n" % (temp_filename,),
3807 sys.stdout = stdout_orig
3808 sys.stderr = stderr_orig
3809 if log_file is not None:
3812 self.returncode = rval
3815 class BinpkgPrefetcher(CompositeTask):
3817 __slots__ = ("pkg",) + \
3818 ("pkg_path", "_bintree",)
3821 self._bintree = self.pkg.root_config.trees["bintree"]
3822 fetcher = BinpkgFetcher(background=self.background,
3823 logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3824 scheduler=self.scheduler)
3825 self.pkg_path = fetcher.pkg_path
3826 self._start_task(fetcher, self._fetcher_exit)
3828 def _fetcher_exit(self, fetcher):
3830 if self._default_exit(fetcher) != os.EX_OK:
3834 verifier = BinpkgVerifier(background=self.background,
3835 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3836 self._start_task(verifier, self._verifier_exit)
3838 def _verifier_exit(self, verifier):
3839 if self._default_exit(verifier) != os.EX_OK:
3843 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3845 self._current_task = None
3846 self.returncode = os.EX_OK
3849 class BinpkgExtractorAsync(SpawnProcess):
3851 __slots__ = ("image_dir", "pkg", "pkg_path")
3853 _shell_binary = portage.const.BASH_BINARY
3856 self.args = [self._shell_binary, "-c",
3857 "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3858 (portage._shell_quote(self.pkg_path),
3859 portage._shell_quote(self.image_dir))]
3861 self.env = self.pkg.root_config.settings.environ()
3862 SpawnProcess._start(self)
3864 class MergeListItem(CompositeTask):
3867 TODO: For parallel scheduling, everything here needs asynchronous
3868 execution support (start, poll, and wait methods).
3871 __slots__ = ("args_set",
3872 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3873 "find_blockers", "logger", "mtimedb", "pkg",
3874 "pkg_count", "pkg_to_replace", "prefetcher",
3875 "settings", "statusMessage", "world_atom") + \
3881 build_opts = self.build_opts
3884 # uninstall, executed by self.merge()
3885 self.returncode = os.EX_OK
3889 args_set = self.args_set
3890 find_blockers = self.find_blockers
3891 logger = self.logger
3892 mtimedb = self.mtimedb
3893 pkg_count = self.pkg_count
3894 scheduler = self.scheduler
3895 settings = self.settings
3896 world_atom = self.world_atom
3897 ldpath_mtimes = mtimedb["ldpath"]
3899 action_desc = "Emerging"
3901 if pkg.type_name == "binary":
3902 action_desc += " binary"
3904 if build_opts.fetchonly:
3905 action_desc = "Fetching"
3907 msg = "%s (%s of %s) %s" % \
3909 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3910 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3911 colorize("GOOD", pkg.cpv))
3913 portdb = pkg.root_config.trees["porttree"].dbapi
3914 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3915 if portdir_repo_name:
3916 pkg_repo_name = pkg.metadata.get("repository")
3917 if pkg_repo_name != portdir_repo_name:
3918 if not pkg_repo_name:
3919 pkg_repo_name = "unknown repo"
3920 msg += " from %s" % pkg_repo_name
3923 msg += " %s %s" % (preposition, pkg.root)
3925 if not build_opts.pretend:
3926 self.statusMessage(msg)
3927 logger.log(" >>> emerge (%s of %s) %s to %s" % \
3928 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3930 if pkg.type_name == "ebuild":
3932 build = EbuildBuild(args_set=args_set,
3933 background=self.background,
3934 config_pool=self.config_pool,
3935 find_blockers=find_blockers,
3936 ldpath_mtimes=ldpath_mtimes, logger=logger,
3937 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3938 prefetcher=self.prefetcher, scheduler=scheduler,
3939 settings=settings, world_atom=world_atom)
3941 self._install_task = build
3942 self._start_task(build, self._default_final_exit)
3945 elif pkg.type_name == "binary":
3947 binpkg = Binpkg(background=self.background,
3948 find_blockers=find_blockers,
3949 ldpath_mtimes=ldpath_mtimes, logger=logger,
3950 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3951 prefetcher=self.prefetcher, settings=settings,
3952 scheduler=scheduler, world_atom=world_atom)
3954 self._install_task = binpkg
3955 self._start_task(binpkg, self._default_final_exit)
3959 self._install_task.poll()
3960 return self.returncode
3963 self._install_task.wait()
3964 return self.returncode
3969 build_opts = self.build_opts
3970 find_blockers = self.find_blockers
3971 logger = self.logger
3972 mtimedb = self.mtimedb
3973 pkg_count = self.pkg_count
3974 prefetcher = self.prefetcher
3975 scheduler = self.scheduler
3976 settings = self.settings
3977 world_atom = self.world_atom
3978 ldpath_mtimes = mtimedb["ldpath"]
3981 if not (build_opts.buildpkgonly or \
3982 build_opts.fetchonly or build_opts.pretend):
3984 uninstall = PackageUninstall(background=self.background,
3985 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3986 pkg=pkg, scheduler=scheduler, settings=settings)
3989 retval = uninstall.wait()
3990 if retval != os.EX_OK:
3994 if build_opts.fetchonly or \
3995 build_opts.buildpkgonly:
3996 return self.returncode
3998 retval = self._install_task.install()
4001 class PackageMerge(AsynchronousTask):
4003 TODO: Implement asynchronous merge so that the scheduler can
4004 run while a merge is executing.
4007 __slots__ = ("merge",)
4011 pkg = self.merge.pkg
4012 pkg_count = self.merge.pkg_count
4015 action_desc = "Uninstalling"
4016 preposition = "from"
4018 action_desc = "Installing"
4021 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
4024 msg += " %s %s" % (preposition, pkg.root)
4026 if not self.merge.build_opts.fetchonly and \
4027 not self.merge.build_opts.pretend and \
4028 not self.merge.build_opts.buildpkgonly:
4029 self.merge.statusMessage(msg)
4031 self.returncode = self.merge.merge()
4034 class DependencyArg(object):
4035 def __init__(self, arg=None, root_config=None):
4037 self.root_config = root_config
4040 return str(self.arg)
4042 class AtomArg(DependencyArg):
4043 def __init__(self, atom=None, **kwargs):
4044 DependencyArg.__init__(self, **kwargs)
4046 if not isinstance(self.atom, portage.dep.Atom):
4047 self.atom = portage.dep.Atom(self.atom)
4048 self.set = (self.atom, )
4050 class PackageArg(DependencyArg):
4051 def __init__(self, package=None, **kwargs):
4052 DependencyArg.__init__(self, **kwargs)
4053 self.package = package
4054 self.atom = portage.dep.Atom("=" + package.cpv)
4055 self.set = (self.atom, )
4057 class SetArg(DependencyArg):
4058 def __init__(self, set=None, **kwargs):
4059 DependencyArg.__init__(self, **kwargs)
4061 self.name = self.arg[len(SETPREFIX):]
4063 class Dependency(SlotObject):
4064 __slots__ = ("atom", "blocker", "depth",
4065 "parent", "onlydeps", "priority", "root")
4066 def __init__(self, **kwargs):
4067 SlotObject.__init__(self, **kwargs)
4068 if self.priority is None:
4069 self.priority = DepPriority()
4070 if self.depth is None:
4073 class BlockerCache(portage.cache.mappings.MutableMapping):
4074 """This caches blockers of installed packages so that dep_check does not
4075 have to be done for every single installed package on every invocation of
4076 emerge. The cache is invalidated whenever it is detected that something
4077 has changed that might alter the results of dep_check() calls:
4078 1) the set of installed packages (including COUNTER) has changed
4079 2) the old-style virtuals have changed
4082 # Number of uncached packages to trigger cache update, since
4083 # it's wasteful to update it for every vdb change.
4084 _cache_threshold = 5
4086 class BlockerData(object):
4088 __slots__ = ("__weakref__", "atoms", "counter")
4090 def __init__(self, counter, atoms):
4091 self.counter = counter
4094 def __init__(self, myroot, vardb):
4096 self._virtuals = vardb.settings.getvirtuals()
4097 self._cache_filename = os.path.join(myroot,
4098 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4099 self._cache_version = "1"
4100 self._cache_data = None
4101 self._modified = set()
4106 f = open(self._cache_filename, mode='rb')
4107 mypickle = pickle.Unpickler(f)
4109 mypickle.find_global = None
4110 except AttributeError:
4111 # TODO: If py3k, override Unpickler.find_class().
4113 self._cache_data = mypickle.load()
4116 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4117 if isinstance(e, pickle.UnpicklingError):
4118 writemsg("!!! Error loading '%s': %s\n" % \
4119 (self._cache_filename, str(e)), noiselevel=-1)
4122 cache_valid = self._cache_data and \
4123 isinstance(self._cache_data, dict) and \
4124 self._cache_data.get("version") == self._cache_version and \
4125 isinstance(self._cache_data.get("blockers"), dict)
4127 # Validate all the atoms and counters so that
4128 # corruption is detected as soon as possible.
4129 invalid_items = set()
4130 for k, v in self._cache_data["blockers"].iteritems():
4131 if not isinstance(k, basestring):
4132 invalid_items.add(k)
4135 if portage.catpkgsplit(k) is None:
4136 invalid_items.add(k)
4138 except portage.exception.InvalidData:
4139 invalid_items.add(k)
4141 if not isinstance(v, tuple) or \
4143 invalid_items.add(k)
4146 if not isinstance(counter, (int, long)):
4147 invalid_items.add(k)
4149 if not isinstance(atoms, (list, tuple)):
4150 invalid_items.add(k)
4152 invalid_atom = False
4154 if not isinstance(atom, basestring):
4157 if atom[:1] != "!" or \
4158 not portage.isvalidatom(
4159 atom, allow_blockers=True):
4163 invalid_items.add(k)
4166 for k in invalid_items:
4167 del self._cache_data["blockers"][k]
4168 if not self._cache_data["blockers"]:
4172 self._cache_data = {"version":self._cache_version}
4173 self._cache_data["blockers"] = {}
4174 self._cache_data["virtuals"] = self._virtuals
4175 self._modified.clear()
4178 """If the current user has permission and the internal blocker cache
4179 been updated, save it to disk and mark it unmodified. This is called
4180 by emerge after it has proccessed blockers for all installed packages.
4181 Currently, the cache is only written if the user has superuser
4182 privileges (since that's required to obtain a lock), but all users
4183 have read access and benefit from faster blocker lookups (as long as
4184 the entire cache is still valid). The cache is stored as a pickled
4185 dict object with the following format:
4189 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4190 "virtuals" : vardb.settings.getvirtuals()
4193 if len(self._modified) >= self._cache_threshold and \
4196 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4197 pickle.dump(self._cache_data, f, -1)
4199 portage.util.apply_secpass_permissions(
4200 self._cache_filename, gid=portage.portage_gid, mode=0644)
4201 except (IOError, OSError), e:
4203 self._modified.clear()
4205 def __setitem__(self, cpv, blocker_data):
4207 Update the cache and mark it as modified for a future call to
4210 @param cpv: Package for which to cache blockers.
4212 @param blocker_data: An object with counter and atoms attributes.
4213 @type blocker_data: BlockerData
4215 self._cache_data["blockers"][cpv] = \
4216 (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4217 self._modified.add(cpv)
4220 if self._cache_data is None:
4221 # triggered by python-trace
4223 return iter(self._cache_data["blockers"])
4225 def __delitem__(self, cpv):
4226 del self._cache_data["blockers"][cpv]
4228 def __getitem__(self, cpv):
4231 @returns: An object with counter and atoms attributes.
4233 return self.BlockerData(*self._cache_data["blockers"][cpv])
4235 class BlockerDB(object):
4237 def __init__(self, root_config):
4238 self._root_config = root_config
4239 self._vartree = root_config.trees["vartree"]
4240 self._portdb = root_config.trees["porttree"].dbapi
4242 self._dep_check_trees = None
4243 self._fake_vartree = None
4245 def _get_fake_vartree(self, acquire_lock=0):
4246 fake_vartree = self._fake_vartree
4247 if fake_vartree is None:
4248 fake_vartree = FakeVartree(self._root_config,
4249 acquire_lock=acquire_lock)
4250 self._fake_vartree = fake_vartree
4251 self._dep_check_trees = { self._vartree.root : {
4252 "porttree" : fake_vartree,
4253 "vartree" : fake_vartree,
4256 fake_vartree.sync(acquire_lock=acquire_lock)
4259 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4260 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4261 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4262 settings = self._vartree.settings
4263 stale_cache = set(blocker_cache)
4264 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4265 dep_check_trees = self._dep_check_trees
4266 vardb = fake_vartree.dbapi
4267 installed_pkgs = list(vardb)
4269 for inst_pkg in installed_pkgs:
4270 stale_cache.discard(inst_pkg.cpv)
4271 cached_blockers = blocker_cache.get(inst_pkg.cpv)
4272 if cached_blockers is not None and \
4273 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4274 cached_blockers = None
4275 if cached_blockers is not None:
4276 blocker_atoms = cached_blockers.atoms
4278 # Use aux_get() to trigger FakeVartree global
4279 # updates on *DEPEND when appropriate.
4280 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4282 portage.dep._dep_check_strict = False
4283 success, atoms = portage.dep_check(depstr,
4284 vardb, settings, myuse=inst_pkg.use.enabled,
4285 trees=dep_check_trees, myroot=inst_pkg.root)
4287 portage.dep._dep_check_strict = True
4289 pkg_location = os.path.join(inst_pkg.root,
4290 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4291 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4292 (pkg_location, atoms), noiselevel=-1)
4295 blocker_atoms = [atom for atom in atoms \
4296 if atom.startswith("!")]
4297 blocker_atoms.sort()
4298 counter = long(inst_pkg.metadata["COUNTER"])
4299 blocker_cache[inst_pkg.cpv] = \
4300 blocker_cache.BlockerData(counter, blocker_atoms)
4301 for cpv in stale_cache:
4302 del blocker_cache[cpv]
4303 blocker_cache.flush()
4305 blocker_parents = digraph()
4307 for pkg in installed_pkgs:
4308 for blocker_atom in blocker_cache[pkg.cpv].atoms:
4309 blocker_atom = blocker_atom.lstrip("!")
4310 blocker_atoms.append(blocker_atom)
4311 blocker_parents.add(blocker_atom, pkg)
4313 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4314 blocking_pkgs = set()
4315 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4316 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4318 # Check for blockers in the other direction.
4319 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4321 portage.dep._dep_check_strict = False
4322 success, atoms = portage.dep_check(depstr,
4323 vardb, settings, myuse=new_pkg.use.enabled,
4324 trees=dep_check_trees, myroot=new_pkg.root)
4326 portage.dep._dep_check_strict = True
4328 # We should never get this far with invalid deps.
4329 show_invalid_depstring_notice(new_pkg, depstr, atoms)
4332 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4335 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4336 for inst_pkg in installed_pkgs:
4338 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4339 except (portage.exception.InvalidDependString, StopIteration):
4341 blocking_pkgs.add(inst_pkg)
4343 return blocking_pkgs
4345 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4347 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4348 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4349 p_type, p_root, p_key, p_status = parent_node
4351 if p_status == "nomerge":
4352 category, pf = portage.catsplit(p_key)
4353 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4354 msg.append("Portage is unable to process the dependencies of the ")
4355 msg.append("'%s' package. " % p_key)
4356 msg.append("In order to correct this problem, the package ")
4357 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4358 msg.append("As a temporary workaround, the --nodeps option can ")
4359 msg.append("be used to ignore all dependencies. For reference, ")
4360 msg.append("the problematic dependencies can be found in the ")
4361 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4363 msg.append("This package can not be installed. ")
4364 msg.append("Please notify the '%s' package maintainer " % p_key)
4365 msg.append("about this problem.")
4367 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4368 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4370 class PackageVirtualDbapi(portage.dbapi):
4372 A dbapi-like interface class that represents the state of the installed
4373 package database as new packages are installed, replacing any packages
4374 that previously existed in the same slot. The main difference between
4375 this class and fakedbapi is that this one uses Package instances
4376 internally (passed in via cpv_inject() and cpv_remove() calls).
4378 def __init__(self, settings):
4379 portage.dbapi.__init__(self)
4380 self.settings = settings
4381 self._match_cache = {}
4387 Remove all packages.
4391 self._cp_map.clear()
4392 self._cpv_map.clear()
4395 obj = PackageVirtualDbapi(self.settings)
4396 obj._match_cache = self._match_cache.copy()
4397 obj._cp_map = self._cp_map.copy()
4398 for k, v in obj._cp_map.iteritems():
4399 obj._cp_map[k] = v[:]
4400 obj._cpv_map = self._cpv_map.copy()
4404 return self._cpv_map.itervalues()
4406 def __contains__(self, item):
4407 existing = self._cpv_map.get(item.cpv)
4408 if existing is not None and \
4413 def get(self, item, default=None):
4414 cpv = getattr(item, "cpv", None)
4418 type_name, root, cpv, operation = item
4420 existing = self._cpv_map.get(cpv)
4421 if existing is not None and \
4426 def match_pkgs(self, atom):
4427 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4429 def _clear_cache(self):
4430 if self._categories is not None:
4431 self._categories = None
4432 if self._match_cache:
4433 self._match_cache = {}
4435 def match(self, origdep, use_cache=1):
4436 result = self._match_cache.get(origdep)
4437 if result is not None:
4439 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4440 self._match_cache[origdep] = result
4443 def cpv_exists(self, cpv):
4444 return cpv in self._cpv_map
4446 def cp_list(self, mycp, use_cache=1):
4447 cachelist = self._match_cache.get(mycp)
4448 # cp_list() doesn't expand old-style virtuals
4449 if cachelist and cachelist[0].startswith(mycp):
4451 cpv_list = self._cp_map.get(mycp)
4452 if cpv_list is None:
4455 cpv_list = [pkg.cpv for pkg in cpv_list]
4456 self._cpv_sort_ascending(cpv_list)
4457 if not (not cpv_list and mycp.startswith("virtual/")):
4458 self._match_cache[mycp] = cpv_list
4462 return list(self._cp_map)
4465 return list(self._cpv_map)
4467 def cpv_inject(self, pkg):
4468 cp_list = self._cp_map.get(pkg.cp)
4471 self._cp_map[pkg.cp] = cp_list
4472 e_pkg = self._cpv_map.get(pkg.cpv)
4473 if e_pkg is not None:
4476 self.cpv_remove(e_pkg)
4477 for e_pkg in cp_list:
4478 if e_pkg.slot_atom == pkg.slot_atom:
4481 self.cpv_remove(e_pkg)
4484 self._cpv_map[pkg.cpv] = pkg
4487 def cpv_remove(self, pkg):
4488 old_pkg = self._cpv_map.get(pkg.cpv)
4491 self._cp_map[pkg.cp].remove(pkg)
4492 del self._cpv_map[pkg.cpv]
4495 def aux_get(self, cpv, wants):
4496 metadata = self._cpv_map[cpv].metadata
4497 return [metadata.get(x, "") for x in wants]
4499 def aux_update(self, cpv, values):
4500 self._cpv_map[cpv].metadata.update(values)
4503 class depgraph(object):
4505 pkg_tree_map = RootConfig.pkg_tree_map
4507 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4509 def __init__(self, settings, trees, myopts, myparams, spinner):
4510 self.settings = settings
4511 self.target_root = settings["ROOT"]
4512 self.myopts = myopts
4513 self.myparams = myparams
4515 if settings.get("PORTAGE_DEBUG", "") == "1":
4517 self.spinner = spinner
4518 self._running_root = trees["/"]["root_config"]
4519 self._opts_no_restart = Scheduler._opts_no_restart
4520 self.pkgsettings = {}
4521 # Maps slot atom to package for each Package added to the graph.
4522 self._slot_pkg_map = {}
4523 # Maps nodes to the reasons they were selected for reinstallation.
4524 self._reinstall_nodes = {}
4527 self._trees_orig = trees
4529 # Contains a filtered view of preferred packages that are selected
4530 # from available repositories.
4531 self._filtered_trees = {}
4532 # Contains installed packages and new packages that have been added
4534 self._graph_trees = {}
4535 # All Package instances
4536 self._pkg_cache = {}
4537 for myroot in trees:
4538 self.trees[myroot] = {}
4539 # Create a RootConfig instance that references
4540 # the FakeVartree instead of the real one.
4541 self.roots[myroot] = RootConfig(
4542 trees[myroot]["vartree"].settings,
4544 trees[myroot]["root_config"].setconfig)
4545 for tree in ("porttree", "bintree"):
4546 self.trees[myroot][tree] = trees[myroot][tree]
4547 self.trees[myroot]["vartree"] = \
4548 FakeVartree(trees[myroot]["root_config"],
4549 pkg_cache=self._pkg_cache)
4550 self.pkgsettings[myroot] = portage.config(
4551 clone=self.trees[myroot]["vartree"].settings)
4552 self._slot_pkg_map[myroot] = {}
4553 vardb = self.trees[myroot]["vartree"].dbapi
4554 preload_installed_pkgs = "--nodeps" not in self.myopts and \
4555 "--buildpkgonly" not in self.myopts
4556 # This fakedbapi instance will model the state that the vdb will
4557 # have after new packages have been installed.
4558 fakedb = PackageVirtualDbapi(vardb.settings)
4559 if preload_installed_pkgs:
4561 self.spinner.update()
4562 # This triggers metadata updates via FakeVartree.
4563 vardb.aux_get(pkg.cpv, [])
4564 fakedb.cpv_inject(pkg)
4566 # Now that the vardb state is cached in our FakeVartree,
4567 # we won't be needing the real vartree cache for awhile.
4568 # To make some room on the heap, clear the vardbapi
4570 trees[myroot]["vartree"].dbapi._clear_cache()
4573 self.mydbapi[myroot] = fakedb
4576 graph_tree.dbapi = fakedb
4577 self._graph_trees[myroot] = {}
4578 self._filtered_trees[myroot] = {}
4579 # Substitute the graph tree for the vartree in dep_check() since we
4580 # want atom selections to be consistent with package selections
4581 # have already been made.
4582 self._graph_trees[myroot]["porttree"] = graph_tree
4583 self._graph_trees[myroot]["vartree"] = graph_tree
4584 def filtered_tree():
4586 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4587 self._filtered_trees[myroot]["porttree"] = filtered_tree
4589 # Passing in graph_tree as the vartree here could lead to better
4590 # atom selections in some cases by causing atoms for packages that
4591 # have been added to the graph to be preferred over other choices.
4592 # However, it can trigger atom selections that result in
4593 # unresolvable direct circular dependencies. For example, this
4594 # happens with gwydion-dylan which depends on either itself or
4595 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4596 # gwydion-dylan-bin needs to be selected in order to avoid a
4597 # an unresolvable direct circular dependency.
4599 # To solve the problem described above, pass in "graph_db" so that
4600 # packages that have been added to the graph are distinguishable
4601 # from other available packages and installed packages. Also, pass
4602 # the parent package into self._select_atoms() calls so that
4603 # unresolvable direct circular dependencies can be detected and
4604 # avoided when possible.
4605 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4606 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4609 portdb = self.trees[myroot]["porttree"].dbapi
4610 bindb = self.trees[myroot]["bintree"].dbapi
4611 vardb = self.trees[myroot]["vartree"].dbapi
4612 # (db, pkg_type, built, installed, db_keys)
4613 if "--usepkgonly" not in self.myopts:
4614 db_keys = list(portdb._aux_cache_keys)
4615 dbs.append((portdb, "ebuild", False, False, db_keys))
4616 if "--usepkg" in self.myopts:
4617 db_keys = list(bindb._aux_cache_keys)
4618 dbs.append((bindb, "binary", True, False, db_keys))
4619 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4620 dbs.append((vardb, "installed", True, True, db_keys))
4621 self._filtered_trees[myroot]["dbs"] = dbs
4622 if "--usepkg" in self.myopts:
4623 self.trees[myroot]["bintree"].populate(
4624 "--getbinpkg" in self.myopts,
4625 "--getbinpkgonly" in self.myopts)
4628 self.digraph=portage.digraph()
4629 # contains all sets added to the graph
4631 # contains atoms given as arguments
4632 self._sets["args"] = InternalPackageSet()
4633 # contains all atoms from all sets added to the graph, including
4634 # atoms given as arguments
4635 self._set_atoms = InternalPackageSet()
4636 self._atom_arg_map = {}
4637 # contains all nodes pulled in by self._set_atoms
4638 self._set_nodes = set()
4639 # Contains only Blocker -> Uninstall edges
4640 self._blocker_uninstalls = digraph()
4641 # Contains only Package -> Blocker edges
4642 self._blocker_parents = digraph()
4643 # Contains only irrelevant Package -> Blocker edges
4644 self._irrelevant_blockers = digraph()
4645 # Contains only unsolvable Package -> Blocker edges
4646 self._unsolvable_blockers = digraph()
4647 # Contains all Blocker -> Blocked Package edges
4648 self._blocked_pkgs = digraph()
4649 # Contains world packages that have been protected from
4650 # uninstallation but may not have been added to the graph
4651 # if the graph is not complete yet.
4652 self._blocked_world_pkgs = {}
4653 self._slot_collision_info = {}
4654 # Slot collision nodes are not allowed to block other packages since
4655 # blocker validation is only able to account for one package per slot.
4656 self._slot_collision_nodes = set()
4657 self._parent_atoms = {}
4658 self._slot_conflict_parent_atoms = set()
4659 self._serialized_tasks_cache = None
4660 self._scheduler_graph = None
4661 self._displayed_list = None
4662 self._pprovided_args = []
4663 self._missing_args = []
4664 self._masked_installed = set()
4665 self._unsatisfied_deps_for_display = []
4666 self._unsatisfied_blockers_for_display = None
4667 self._circular_deps_for_display = None
4668 self._dep_stack = []
4669 self._unsatisfied_deps = []
4670 self._initially_unsatisfied_deps = []
4671 self._ignored_deps = []
4672 self._required_set_names = set(["system", "world"])
4673 self._select_atoms = self._select_atoms_highest_available
4674 self._select_package = self._select_pkg_highest_available
4675 self._highest_pkg_cache = {}
4677 def _show_slot_collision_notice(self):
4678 """Show an informational message advising the user to mask one of the
4679 the packages. In some cases it may be possible to resolve this
4680 automatically, but support for backtracking (removal nodes that have
4681 already been selected) will be required in order to handle all possible
4685 if not self._slot_collision_info:
4688 self._show_merge_list()
4691 msg.append("\n!!! Multiple package instances within a single " + \
4692 "package slot have been pulled\n")
4693 msg.append("!!! into the dependency graph, resulting" + \
4694 " in a slot conflict:\n\n")
4696 # Max number of parents shown, to avoid flooding the display.
4698 explanation_columns = 70
4700 for (slot_atom, root), slot_nodes \
4701 in self._slot_collision_info.iteritems():
4702 msg.append(str(slot_atom))
4705 for node in slot_nodes:
4707 msg.append(str(node))
4708 parent_atoms = self._parent_atoms.get(node)
4711 # Prefer conflict atoms over others.
4712 for parent_atom in parent_atoms:
4713 if len(pruned_list) >= max_parents:
4715 if parent_atom in self._slot_conflict_parent_atoms:
4716 pruned_list.add(parent_atom)
4718 # If this package was pulled in by conflict atoms then
4719 # show those alone since those are the most interesting.
4721 # When generating the pruned list, prefer instances
4722 # of DependencyArg over instances of Package.
4723 for parent_atom in parent_atoms:
4724 if len(pruned_list) >= max_parents:
4726 parent, atom = parent_atom
4727 if isinstance(parent, DependencyArg):
4728 pruned_list.add(parent_atom)
4729 # Prefer Packages instances that themselves have been
4730 # pulled into collision slots.
4731 for parent_atom in parent_atoms:
4732 if len(pruned_list) >= max_parents:
4734 parent, atom = parent_atom
4735 if isinstance(parent, Package) and \
4736 (parent.slot_atom, parent.root) \
4737 in self._slot_collision_info:
4738 pruned_list.add(parent_atom)
4739 for parent_atom in parent_atoms:
4740 if len(pruned_list) >= max_parents:
4742 pruned_list.add(parent_atom)
4743 omitted_parents = len(parent_atoms) - len(pruned_list)
4744 parent_atoms = pruned_list
4745 msg.append(" pulled in by\n")
4746 for parent_atom in parent_atoms:
4747 parent, atom = parent_atom
4748 msg.append(2*indent)
4749 if isinstance(parent,
4750 (PackageArg, AtomArg)):
4751 # For PackageArg and AtomArg types, it's
4752 # redundant to display the atom attribute.
4753 msg.append(str(parent))
4755 # Display the specific atom from SetArg or
4757 msg.append("%s required by %s" % (atom, parent))
4760 msg.append(2*indent)
4761 msg.append("(and %d more)\n" % omitted_parents)
4763 msg.append(" (no parents)\n")
4765 explanation = self._slot_conflict_explanation(slot_nodes)
4768 msg.append(indent + "Explanation:\n\n")
4769 for line in textwrap.wrap(explanation, explanation_columns):
4770 msg.append(2*indent + line + "\n")
4773 sys.stderr.write("".join(msg))
4776 explanations_for_all = explanations == len(self._slot_collision_info)
4778 if explanations_for_all or "--quiet" in self.myopts:
4782 msg.append("It may be possible to solve this problem ")
4783 msg.append("by using package.mask to prevent one of ")
4784 msg.append("those packages from being selected. ")
4785 msg.append("However, it is also possible that conflicting ")
4786 msg.append("dependencies exist such that they are impossible to ")
4787 msg.append("satisfy simultaneously. If such a conflict exists in ")
4788 msg.append("the dependencies of two different packages, then those ")
4789 msg.append("packages can not be installed simultaneously.")
4791 from formatter import AbstractFormatter, DumbWriter
4792 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4794 f.add_flowing_data(x)
4798 msg.append("For more information, see MASKED PACKAGES ")
4799 msg.append("section in the emerge man page or refer ")
4800 msg.append("to the Gentoo Handbook.")
4802 f.add_flowing_data(x)
4806 def _slot_conflict_explanation(self, slot_nodes):
4808 When a slot conflict occurs due to USE deps, there are a few
4809 different cases to consider:
4811 1) New USE are correctly set but --newuse wasn't requested so an
4812 installed package with incorrect USE happened to get pulled
4813 into graph before the new one.
4815 2) New USE are incorrectly set but an installed package has correct
4816 USE so it got pulled into the graph, and a new instance also got
4817 pulled in due to --newuse or an upgrade.
4819 3) Multiple USE deps exist that can't be satisfied simultaneously,
4820 and multiple package instances got pulled into the same slot to
4821 satisfy the conflicting deps.
4823 Currently, explanations and suggested courses of action are generated
4824 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4827 if len(slot_nodes) != 2:
4828 # Suggestions are only implemented for
4829 # conflicts between two packages.
4832 all_conflict_atoms = self._slot_conflict_parent_atoms
4834 matched_atoms = None
4835 unmatched_node = None
4836 for node in slot_nodes:
4837 parent_atoms = self._parent_atoms.get(node)
4838 if not parent_atoms:
4839 # Normally, there are always parent atoms. If there are
4840 # none then something unexpected is happening and there's
4841 # currently no suggestion for this case.
4843 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4844 for parent_atom in conflict_atoms:
4845 parent, atom = parent_atom
4847 # Suggestions are currently only implemented for cases
4848 # in which all conflict atoms have USE deps.
4851 if matched_node is not None:
4852 # If conflict atoms match multiple nodes
4853 # then there's no suggestion.
4856 matched_atoms = conflict_atoms
4858 if unmatched_node is not None:
4859 # Neither node is matched by conflict atoms, and
4860 # there is no suggestion for this case.
4862 unmatched_node = node
4864 if matched_node is None or unmatched_node is None:
4865 # This shouldn't happen.
4868 if unmatched_node.installed and not matched_node.installed:
4869 return "New USE are correctly set, but --newuse wasn't" + \
4870 " requested, so an installed package with incorrect USE " + \
4871 "happened to get pulled into the dependency graph. " + \
4872 "In order to solve " + \
4873 "this, either specify the --newuse option or explicitly " + \
4874 " reinstall '%s'." % matched_node.slot_atom
4876 if matched_node.installed and not unmatched_node.installed:
4877 atoms = sorted(set(atom for parent, atom in matched_atoms))
4878 explanation = ("New USE for '%s' are incorrectly set. " + \
4879 "In order to solve this, adjust USE to satisfy '%s'") % \
4880 (matched_node.slot_atom, atoms[0])
4882 for atom in atoms[1:-1]:
4883 explanation += ", '%s'" % (atom,)
4886 explanation += " and '%s'" % (atoms[-1],)
4892 def _process_slot_conflicts(self):
4894 Process slot conflict data to identify specific atoms which
4895 lead to conflict. These atoms only match a subset of the
4896 packages that have been pulled into a given slot.
4898 for (slot_atom, root), slot_nodes \
4899 in self._slot_collision_info.iteritems():
4901 all_parent_atoms = set()
4902 for pkg in slot_nodes:
4903 parent_atoms = self._parent_atoms.get(pkg)
4904 if not parent_atoms:
4906 all_parent_atoms.update(parent_atoms)
4908 for pkg in slot_nodes:
4909 parent_atoms = self._parent_atoms.get(pkg)
4910 if parent_atoms is None:
4911 parent_atoms = set()
4912 self._parent_atoms[pkg] = parent_atoms
4913 for parent_atom in all_parent_atoms:
4914 if parent_atom in parent_atoms:
4916 # Use package set for matching since it will match via
4917 # PROVIDE when necessary, while match_from_list does not.
4918 parent, atom = parent_atom
4919 atom_set = InternalPackageSet(
4920 initial_atoms=(atom,))
4921 if atom_set.findAtomForPackage(pkg):
4922 parent_atoms.add(parent_atom)
4924 self._slot_conflict_parent_atoms.add(parent_atom)
4926 def _reinstall_for_flags(self, forced_flags,
4927 orig_use, orig_iuse, cur_use, cur_iuse):
4928 """Return a set of flags that trigger reinstallation, or None if there
4929 are no such flags."""
4930 if "--newuse" in self.myopts:
4931 flags = set(orig_iuse.symmetric_difference(
4932 cur_iuse).difference(forced_flags))
4933 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4934 cur_iuse.intersection(cur_use)))
4937 elif "changed-use" == self.myopts.get("--reinstall"):
4938 flags = orig_iuse.intersection(orig_use).symmetric_difference(
4939 cur_iuse.intersection(cur_use))
4944 def _create_graph(self, allow_unsatisfied=False):
4945 dep_stack = self._dep_stack
4947 self.spinner.update()
4948 dep = dep_stack.pop()
4949 if isinstance(dep, Package):
4950 if not self._add_pkg_deps(dep,
4951 allow_unsatisfied=allow_unsatisfied):
4954 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4958 def _add_dep(self, dep, allow_unsatisfied=False):
4959 debug = "--debug" in self.myopts
4960 buildpkgonly = "--buildpkgonly" in self.myopts
4961 nodeps = "--nodeps" in self.myopts
4962 empty = "empty" in self.myparams
4963 deep = "deep" in self.myparams
4964 update = "--update" in self.myopts and dep.depth <= 1
4966 if not buildpkgonly and \
4968 dep.parent not in self._slot_collision_nodes:
4969 if dep.parent.onlydeps:
4970 # It's safe to ignore blockers if the
4971 # parent is an --onlydeps node.
4973 # The blocker applies to the root where
4974 # the parent is or will be installed.
4975 blocker = Blocker(atom=dep.atom,
4976 eapi=dep.parent.metadata["EAPI"],
4977 root=dep.parent.root)
4978 self._blocker_parents.add(blocker, dep.parent)
4980 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4981 onlydeps=dep.onlydeps)
4983 if dep.priority.optional:
4984 # This could be an unecessary build-time dep
4985 # pulled in by --with-bdeps=y.
4987 if allow_unsatisfied:
4988 self._unsatisfied_deps.append(dep)
4990 self._unsatisfied_deps_for_display.append(
4991 ((dep.root, dep.atom), {"myparent":dep.parent}))
4993 # In some cases, dep_check will return deps that shouldn't
4994 # be proccessed any further, so they are identified and
4995 # discarded here. Try to discard as few as possible since
4996 # discarded dependencies reduce the amount of information
4997 # available for optimization of merge order.
4998 if dep.priority.satisfied and \
4999 not dep_pkg.installed and \
5000 not (existing_node or empty or deep or update):
5002 if dep.root == self.target_root:
5004 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5005 except StopIteration:
5007 except portage.exception.InvalidDependString:
5008 if not dep_pkg.installed:
5009 # This shouldn't happen since the package
5010 # should have been masked.
5013 self._ignored_deps.append(dep)
5016 if not self._add_pkg(dep_pkg, dep):
5020 def _add_pkg(self, pkg, dep):
5027 myparent = dep.parent
5028 priority = dep.priority
5030 if priority is None:
5031 priority = DepPriority()
5033 Fills the digraph with nodes comprised of packages to merge.
5034 mybigkey is the package spec of the package to merge.
5035 myparent is the package depending on mybigkey ( or None )
5036 addme = Should we add this package to the digraph or are we just looking at it's deps?
5037 Think --onlydeps, we need to ignore packages in that case.
5040 #IUSE-aware emerge -> USE DEP aware depgraph
5041 #"no downgrade" emerge
5043 # Ensure that the dependencies of the same package
5044 # are never processed more than once.
5045 previously_added = pkg in self.digraph
5047 # select the correct /var database that we'll be checking against
5048 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5049 pkgsettings = self.pkgsettings[pkg.root]
5054 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5055 except portage.exception.InvalidDependString, e:
5056 if not pkg.installed:
5057 show_invalid_depstring_notice(
5058 pkg, pkg.metadata["PROVIDE"], str(e))
5062 if not pkg.onlydeps:
5063 if not pkg.installed and \
5064 "empty" not in self.myparams and \
5065 vardbapi.match(pkg.slot_atom):
5066 # Increase the priority of dependencies on packages that
5067 # are being rebuilt. This optimizes merge order so that
5068 # dependencies are rebuilt/updated as soon as possible,
5069 # which is needed especially when emerge is called by
5070 # revdep-rebuild since dependencies may be affected by ABI
5071 # breakage that has rendered them useless. Don't adjust
5072 # priority here when in "empty" mode since all packages
5073 # are being merged in that case.
5074 priority.rebuild = True
5076 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5077 slot_collision = False
5079 existing_node_matches = pkg.cpv == existing_node.cpv
5080 if existing_node_matches and \
5081 pkg != existing_node and \
5082 dep.atom is not None:
5083 # Use package set for matching since it will match via
5084 # PROVIDE when necessary, while match_from_list does not.
5085 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5086 if not atom_set.findAtomForPackage(existing_node):
5087 existing_node_matches = False
5088 if existing_node_matches:
5089 # The existing node can be reused.
5091 for parent_atom in arg_atoms:
5092 parent, atom = parent_atom
5093 self.digraph.add(existing_node, parent,
5095 self._add_parent_atom(existing_node, parent_atom)
5096 # If a direct circular dependency is not an unsatisfied
5097 # buildtime dependency then drop it here since otherwise
5098 # it can skew the merge order calculation in an unwanted
5100 if existing_node != myparent or \
5101 (priority.buildtime and not priority.satisfied):
5102 self.digraph.addnode(existing_node, myparent,
5104 if dep.atom is not None and dep.parent is not None:
5105 self._add_parent_atom(existing_node,
5106 (dep.parent, dep.atom))
5110 # A slot collision has occurred. Sometimes this coincides
5111 # with unresolvable blockers, so the slot collision will be
5112 # shown later if there are no unresolvable blockers.
5113 self._add_slot_conflict(pkg)
5114 slot_collision = True
5117 # Now add this node to the graph so that self.display()
5118 # can show use flags and --tree portage.output. This node is
5119 # only being partially added to the graph. It must not be
5120 # allowed to interfere with the other nodes that have been
5121 # added. Do not overwrite data for existing nodes in
5122 # self.mydbapi since that data will be used for blocker
5124 # Even though the graph is now invalid, continue to process
5125 # dependencies so that things like --fetchonly can still
5126 # function despite collisions.
5128 elif not previously_added:
5129 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5130 self.mydbapi[pkg.root].cpv_inject(pkg)
5131 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5133 if not pkg.installed:
5134 # Allow this package to satisfy old-style virtuals in case it
5135 # doesn't already. Any pre-existing providers will be preferred
5138 pkgsettings.setinst(pkg.cpv, pkg.metadata)
5139 # For consistency, also update the global virtuals.
5140 settings = self.roots[pkg.root].settings
5142 settings.setinst(pkg.cpv, pkg.metadata)
5144 except portage.exception.InvalidDependString, e:
5145 show_invalid_depstring_notice(
5146 pkg, pkg.metadata["PROVIDE"], str(e))
5151 self._set_nodes.add(pkg)
5153 # Do this even when addme is False (--onlydeps) so that the
5154 # parent/child relationship is always known in case
5155 # self._show_slot_collision_notice() needs to be called later.
5156 self.digraph.add(pkg, myparent, priority=priority)
5157 if dep.atom is not None and dep.parent is not None:
5158 self._add_parent_atom(pkg, (dep.parent, dep.atom))
5161 for parent_atom in arg_atoms:
5162 parent, atom = parent_atom
5163 self.digraph.add(pkg, parent, priority=priority)
5164 self._add_parent_atom(pkg, parent_atom)
5166 """ This section determines whether we go deeper into dependencies or not.
5167 We want to go deeper on a few occasions:
5168 Installing package A, we need to make sure package A's deps are met.
5169 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5170 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5172 dep_stack = self._dep_stack
5173 if "recurse" not in self.myparams:
5175 elif pkg.installed and \
5176 "deep" not in self.myparams:
5177 dep_stack = self._ignored_deps
5179 self.spinner.update()
5184 if not previously_added:
5185 dep_stack.append(pkg)
5188 def _add_parent_atom(self, pkg, parent_atom):
5189 parent_atoms = self._parent_atoms.get(pkg)
5190 if parent_atoms is None:
5191 parent_atoms = set()
5192 self._parent_atoms[pkg] = parent_atoms
5193 parent_atoms.add(parent_atom)
5195 def _add_slot_conflict(self, pkg):
5196 self._slot_collision_nodes.add(pkg)
5197 slot_key = (pkg.slot_atom, pkg.root)
5198 slot_nodes = self._slot_collision_info.get(slot_key)
5199 if slot_nodes is None:
5201 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5202 self._slot_collision_info[slot_key] = slot_nodes
5205 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5207 mytype = pkg.type_name
5210 metadata = pkg.metadata
5211 myuse = pkg.use.enabled
5213 depth = pkg.depth + 1
5214 removal_action = "remove" in self.myparams
5217 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5219 edepend[k] = metadata[k]
5221 if not pkg.built and \
5222 "--buildpkgonly" in self.myopts and \
5223 "deep" not in self.myparams and \
5224 "empty" not in self.myparams:
5225 edepend["RDEPEND"] = ""
5226 edepend["PDEPEND"] = ""
5227 bdeps_optional = False
5229 if pkg.built and not removal_action:
5230 if self.myopts.get("--with-bdeps", "n") == "y":
5231 # Pull in build time deps as requested, but marked them as
5232 # "optional" since they are not strictly required. This allows
5233 # more freedom in the merge order calculation for solving
5234 # circular dependencies. Don't convert to PDEPEND since that
5235 # could make --with-bdeps=y less effective if it is used to
5236 # adjust merge order to prevent built_with_use() calls from
5238 bdeps_optional = True
5240 # built packages do not have build time dependencies.
5241 edepend["DEPEND"] = ""
5243 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5244 edepend["DEPEND"] = ""
5247 ("/", edepend["DEPEND"],
5248 self._priority(buildtime=(not bdeps_optional),
5249 optional=bdeps_optional)),
5250 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5251 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5254 debug = "--debug" in self.myopts
5255 strict = mytype != "installed"
5257 for dep_root, dep_string, dep_priority in deps:
5262 print "Parent: ", jbigkey
5263 print "Depstring:", dep_string
5264 print "Priority:", dep_priority
5265 vardb = self.roots[dep_root].trees["vartree"].dbapi
5267 selected_atoms = self._select_atoms(dep_root,
5268 dep_string, myuse=myuse, parent=pkg, strict=strict,
5269 priority=dep_priority)
5270 except portage.exception.InvalidDependString, e:
5271 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5274 print "Candidates:", selected_atoms
5276 for atom in selected_atoms:
5279 atom = portage.dep.Atom(atom)
5281 mypriority = dep_priority.copy()
5282 if not atom.blocker and vardb.match(atom):
5283 mypriority.satisfied = True
5285 if not self._add_dep(Dependency(atom=atom,
5286 blocker=atom.blocker, depth=depth, parent=pkg,
5287 priority=mypriority, root=dep_root),
5288 allow_unsatisfied=allow_unsatisfied):
5291 except portage.exception.InvalidAtom, e:
5292 show_invalid_depstring_notice(
5293 pkg, dep_string, str(e))
5295 if not pkg.installed:
5299 print "Exiting...", jbigkey
5300 except portage.exception.AmbiguousPackageName, e:
5302 portage.writemsg("\n\n!!! An atom in the dependencies " + \
5303 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5305 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
5306 portage.writemsg("\n", noiselevel=-1)
5307 if mytype == "binary":
5309 "!!! This binary package cannot be installed: '%s'\n" % \
5310 mykey, noiselevel=-1)
5311 elif mytype == "ebuild":
5312 portdb = self.roots[myroot].trees["porttree"].dbapi
5313 myebuild, mylocation = portdb.findname2(mykey)
5314 portage.writemsg("!!! This ebuild cannot be installed: " + \
5315 "'%s'\n" % myebuild, noiselevel=-1)
5316 portage.writemsg("!!! Please notify the package maintainer " + \
5317 "that atoms must be fully-qualified.\n", noiselevel=-1)
5321 def _priority(self, **kwargs):
5322 if "remove" in self.myparams:
5323 priority_constructor = UnmergeDepPriority
5325 priority_constructor = DepPriority
5326 return priority_constructor(**kwargs)
5328 def _dep_expand(self, root_config, atom_without_category):
5330 @param root_config: a root config instance
5331 @type root_config: RootConfig
5332 @param atom_without_category: an atom without a category component
5333 @type atom_without_category: String
5335 @returns: a list of atoms containing categories (possibly empty)
5337 null_cp = portage.dep_getkey(insert_category_into_atom(
5338 atom_without_category, "null"))
5339 cat, atom_pn = portage.catsplit(null_cp)
5341 dbs = self._filtered_trees[root_config.root]["dbs"]
5343 for db, pkg_type, built, installed, db_keys in dbs:
5344 for cat in db.categories:
5345 if db.cp_list("%s/%s" % (cat, atom_pn)):
5349 for cat in categories:
5350 deps.append(insert_category_into_atom(
5351 atom_without_category, cat))
5354 def _have_new_virt(self, root, atom_cp):
5356 for db, pkg_type, built, installed, db_keys in \
5357 self._filtered_trees[root]["dbs"]:
5358 if db.cp_list(atom_cp):
5363 def _iter_atoms_for_pkg(self, pkg):
5364 # TODO: add multiple $ROOT support
5365 if pkg.root != self.target_root:
5367 atom_arg_map = self._atom_arg_map
5368 root_config = self.roots[pkg.root]
5369 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5370 atom_cp = portage.dep_getkey(atom)
5371 if atom_cp != pkg.cp and \
5372 self._have_new_virt(pkg.root, atom_cp):
5374 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5375 visible_pkgs.reverse() # descending order
5377 for visible_pkg in visible_pkgs:
5378 if visible_pkg.cp != atom_cp:
5380 if pkg >= visible_pkg:
5381 # This is descending order, and we're not
5382 # interested in any versions <= pkg given.
5384 if pkg.slot_atom != visible_pkg.slot_atom:
5385 higher_slot = visible_pkg
5387 if higher_slot is not None:
5389 for arg in atom_arg_map[(atom, pkg.root)]:
5390 if isinstance(arg, PackageArg) and \
5395 def select_files(self, myfiles):
5396 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5397 appropriate depgraph and return a favorite list."""
5398 debug = "--debug" in self.myopts
5399 root_config = self.roots[self.target_root]
5400 sets = root_config.sets
5401 getSetAtoms = root_config.setconfig.getSetAtoms
5403 myroot = self.target_root
5404 dbs = self._filtered_trees[myroot]["dbs"]
5405 vardb = self.trees[myroot]["vartree"].dbapi
5406 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5407 portdb = self.trees[myroot]["porttree"].dbapi
5408 bindb = self.trees[myroot]["bintree"].dbapi
5409 pkgsettings = self.pkgsettings[myroot]
5411 onlydeps = "--onlydeps" in self.myopts
5414 ext = os.path.splitext(x)[1]
5416 if not os.path.exists(x):
5418 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5419 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5420 elif os.path.exists(
5421 os.path.join(pkgsettings["PKGDIR"], x)):
5422 x = os.path.join(pkgsettings["PKGDIR"], x)
5424 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5425 print "!!! Please ensure the tbz2 exists as specified.\n"
5426 return 0, myfavorites
5427 mytbz2=portage.xpak.tbz2(x)
5428 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5429 if os.path.realpath(x) != \
5430 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5431 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5432 return 0, myfavorites
5433 db_keys = list(bindb._aux_cache_keys)
5434 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5435 pkg = Package(type_name="binary", root_config=root_config,
5436 cpv=mykey, built=True, metadata=metadata,
5438 self._pkg_cache[pkg] = pkg
5439 args.append(PackageArg(arg=x, package=pkg,
5440 root_config=root_config))
5441 elif ext==".ebuild":
5442 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5443 pkgdir = os.path.dirname(ebuild_path)
5444 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5445 cp = pkgdir[len(tree_root)+1:]
5446 e = portage.exception.PackageNotFound(
5447 ("%s is not in a valid portage tree " + \
5448 "hierarchy or does not exist") % x)
5449 if not portage.isvalidatom(cp):
5451 cat = portage.catsplit(cp)[0]
5452 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5453 if not portage.isvalidatom("="+mykey):
5455 ebuild_path = portdb.findname(mykey)
5457 if ebuild_path != os.path.join(os.path.realpath(tree_root),
5458 cp, os.path.basename(ebuild_path)):
5459 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5460 return 0, myfavorites
5461 if mykey not in portdb.xmatch(
5462 "match-visible", portage.dep_getkey(mykey)):
5463 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5464 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5465 print colorize("BAD", "*** page for details.")
5466 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5469 raise portage.exception.PackageNotFound(
5470 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5471 db_keys = list(portdb._aux_cache_keys)
5472 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5473 pkg = Package(type_name="ebuild", root_config=root_config,
5474 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5475 pkgsettings.setcpv(pkg)
5476 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5477 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5478 self._pkg_cache[pkg] = pkg
5479 args.append(PackageArg(arg=x, package=pkg,
5480 root_config=root_config))
5481 elif x.startswith(os.path.sep):
5482 if not x.startswith(myroot):
5483 portage.writemsg(("\n\n!!! '%s' does not start with" + \
5484 " $ROOT.\n") % x, noiselevel=-1)
5486 # Queue these up since it's most efficient to handle
5487 # multiple files in a single iter_owners() call.
5488 lookup_owners.append(x)
5490 if x in ("system", "world"):
5492 if x.startswith(SETPREFIX):
5493 s = x[len(SETPREFIX):]
5495 raise portage.exception.PackageSetNotFound(s)
5498 # Recursively expand sets so that containment tests in
5499 # self._get_parent_sets() properly match atoms in nested
5500 # sets (like if world contains system).
5501 expanded_set = InternalPackageSet(
5502 initial_atoms=getSetAtoms(s))
5503 self._sets[s] = expanded_set
5504 args.append(SetArg(arg=x, set=expanded_set,
5505 root_config=root_config))
5507 if not is_valid_package_atom(x):
5508 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5510 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5511 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5513 # Don't expand categories or old-style virtuals here unless
5514 # necessary. Expansion of old-style virtuals here causes at
5515 # least the following problems:
5516 # 1) It's more difficult to determine which set(s) an atom
5517 # came from, if any.
5518 # 2) It takes away freedom from the resolver to choose other
5519 # possible expansions when necessary.
5521 args.append(AtomArg(arg=x, atom=x,
5522 root_config=root_config))
5524 expanded_atoms = self._dep_expand(root_config, x)
5525 installed_cp_set = set()
5526 for atom in expanded_atoms:
5527 atom_cp = portage.dep_getkey(atom)
5528 if vardb.cp_list(atom_cp):
5529 installed_cp_set.add(atom_cp)
5530 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5531 installed_cp = iter(installed_cp_set).next()
5532 expanded_atoms = [atom for atom in expanded_atoms \
5533 if portage.dep_getkey(atom) == installed_cp]
5535 if len(expanded_atoms) > 1:
5538 ambiguous_package_name(x, expanded_atoms, root_config,
5539 self.spinner, self.myopts)
5540 return False, myfavorites
5542 atom = expanded_atoms[0]
5544 null_atom = insert_category_into_atom(x, "null")
5545 null_cp = portage.dep_getkey(null_atom)
5546 cat, atom_pn = portage.catsplit(null_cp)
5547 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5549 # Allow the depgraph to choose which virtual.
5550 atom = insert_category_into_atom(x, "virtual")
5552 atom = insert_category_into_atom(x, "null")
5554 args.append(AtomArg(arg=x, atom=atom,
5555 root_config=root_config))
5559 search_for_multiple = False
5560 if len(lookup_owners) > 1:
5561 search_for_multiple = True
5563 for x in lookup_owners:
5564 if not search_for_multiple and os.path.isdir(x):
5565 search_for_multiple = True
5566 relative_paths.append(x[len(myroot):])
5569 for pkg, relative_path in \
5570 real_vardb._owners.iter_owners(relative_paths):
5571 owners.add(pkg.mycpv)
5572 if not search_for_multiple:
5576 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5577 "by any package.\n") % lookup_owners[0], noiselevel=-1)
5581 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5583 # portage now masks packages with missing slot, but it's
5584 # possible that one was installed by an older version
5585 atom = portage.cpv_getkey(cpv)
5587 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5588 args.append(AtomArg(arg=atom, atom=atom,
5589 root_config=root_config))
5591 if "--update" in self.myopts:
5592 # In some cases, the greedy slots behavior can pull in a slot that
5593 # the user would want to uninstall due to it being blocked by a
5594 # newer version in a different slot. Therefore, it's necessary to
5595 # detect and discard any that should be uninstalled. Each time
5596 # that arguments are updated, package selections are repeated in
5597 # order to ensure consistency with the current arguments:
5599 # 1) Initialize args
5600 # 2) Select packages and generate initial greedy atoms
5601 # 3) Update args with greedy atoms
5602 # 4) Select packages and generate greedy atoms again, while
5603 # accounting for any blockers between selected packages
5604 # 5) Update args with revised greedy atoms
5606 self._set_args(args)
5609 greedy_args.append(arg)
5610 if not isinstance(arg, AtomArg):
5612 for atom in self._greedy_slots(arg.root_config, arg.atom):
5614 AtomArg(arg=arg.arg, atom=atom,
5615 root_config=arg.root_config))
5617 self._set_args(greedy_args)
5620 # Revise greedy atoms, accounting for any blockers
5621 # between selected packages.
5622 revised_greedy_args = []
5624 revised_greedy_args.append(arg)
5625 if not isinstance(arg, AtomArg):
5627 for atom in self._greedy_slots(arg.root_config, arg.atom,
5628 blocker_lookahead=True):
5629 revised_greedy_args.append(
5630 AtomArg(arg=arg.arg, atom=atom,
5631 root_config=arg.root_config))
5632 args = revised_greedy_args
5633 del revised_greedy_args
5635 self._set_args(args)
5637 myfavorites = set(myfavorites)
5639 if isinstance(arg, (AtomArg, PackageArg)):
5640 myfavorites.add(arg.atom)
5641 elif isinstance(arg, SetArg):
5642 myfavorites.add(arg.arg)
5643 myfavorites = list(myfavorites)
5645 pprovideddict = pkgsettings.pprovideddict
5647 portage.writemsg("\n", noiselevel=-1)
5648 # Order needs to be preserved since a feature of --nodeps
5649 # is to allow the user to force a specific merge order.
5653 for atom in arg.set:
5654 self.spinner.update()
5655 dep = Dependency(atom=atom, onlydeps=onlydeps,
5656 root=myroot, parent=arg)
5657 atom_cp = portage.dep_getkey(atom)
5659 pprovided = pprovideddict.get(portage.dep_getkey(atom))
5660 if pprovided and portage.match_from_list(atom, pprovided):
5661 # A provided package has been specified on the command line.
5662 self._pprovided_args.append((arg, atom))
5664 if isinstance(arg, PackageArg):
5665 if not self._add_pkg(arg.package, dep) or \
5666 not self._create_graph():
5667 sys.stderr.write(("\n\n!!! Problem resolving " + \
5668 "dependencies for %s\n") % arg.arg)
5669 return 0, myfavorites
5672 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
5673 (arg, atom), noiselevel=-1)
5674 pkg, existing_node = self._select_package(
5675 myroot, atom, onlydeps=onlydeps)
5677 if not (isinstance(arg, SetArg) and \
5678 arg.name in ("system", "world")):
5679 self._unsatisfied_deps_for_display.append(
5680 ((myroot, atom), {}))
5681 return 0, myfavorites
5682 self._missing_args.append((arg, atom))
5684 if atom_cp != pkg.cp:
5685 # For old-style virtuals, we need to repeat the
5686 # package.provided check against the selected package.
5687 expanded_atom = atom.replace(atom_cp, pkg.cp)
5688 pprovided = pprovideddict.get(pkg.cp)
5690 portage.match_from_list(expanded_atom, pprovided):
5691 # A provided package has been
5692 # specified on the command line.
5693 self._pprovided_args.append((arg, atom))
5695 if pkg.installed and "selective" not in self.myparams:
5696 self._unsatisfied_deps_for_display.append(
5697 ((myroot, atom), {}))
5698 # Previous behavior was to bail out in this case, but
5699 # since the dep is satisfied by the installed package,
5700 # it's more friendly to continue building the graph
5701 # and just show a warning message. Therefore, only bail
5702 # out here if the atom is not from either the system or
5704 if not (isinstance(arg, SetArg) and \
5705 arg.name in ("system", "world")):
5706 return 0, myfavorites
5708 # Add the selected package to the graph as soon as possible
5709 # so that later dep_check() calls can use it as feedback
5710 # for making more consistent atom selections.
5711 if not self._add_pkg(pkg, dep):
5712 if isinstance(arg, SetArg):
5713 sys.stderr.write(("\n\n!!! Problem resolving " + \
5714 "dependencies for %s from %s\n") % \
5717 sys.stderr.write(("\n\n!!! Problem resolving " + \
5718 "dependencies for %s\n") % atom)
5719 return 0, myfavorites
5721 except portage.exception.MissingSignature, e:
5722 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5723 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5724 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5725 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5726 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5727 return 0, myfavorites
5728 except portage.exception.InvalidSignature, e:
5729 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5730 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5731 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5732 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5733 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5734 return 0, myfavorites
5735 except SystemExit, e:
5736 raise # Needed else can't exit
5737 except Exception, e:
5738 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5739 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5742 # Now that the root packages have been added to the graph,
5743 # process the dependencies.
5744 if not self._create_graph():
5745 return 0, myfavorites
5748 if "--usepkgonly" in self.myopts:
5749 for xs in self.digraph.all_nodes():
5750 if not isinstance(xs, Package):
5752 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5756 print "Missing binary for:",xs[2]
5760 except self._unknown_internal_error:
5761 return False, myfavorites
5763 # We're true here unless we are missing binaries.
5764 return (not missing,myfavorites)
5766 def _set_args(self, args):
5768 Create the "args" package set from atoms and packages given as
5769 arguments. This method can be called multiple times if necessary.
5770 The package selection cache is automatically invalidated, since
5771 arguments influence package selections.
5773 args_set = self._sets["args"]
5776 if not isinstance(arg, (AtomArg, PackageArg)):
5779 if atom in args_set:
5783 self._set_atoms.clear()
5784 self._set_atoms.update(chain(*self._sets.itervalues()))
5785 atom_arg_map = self._atom_arg_map
5786 atom_arg_map.clear()
5788 for atom in arg.set:
5789 atom_key = (atom, arg.root_config.root)
5790 refs = atom_arg_map.get(atom_key)
5793 atom_arg_map[atom_key] = refs
5797 # Invalidate the package selection cache, since
5798 # arguments influence package selections.
5799 self._highest_pkg_cache.clear()
5800 for trees in self._filtered_trees.itervalues():
5801 trees["porttree"].dbapi._clear_cache()
5803 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5805 Return a list of slot atoms corresponding to installed slots that
5806 differ from the slot of the highest visible match. When
5807 blocker_lookahead is True, slot atoms that would trigger a blocker
5808 conflict are automatically discarded, potentially allowing automatic
5809 uninstallation of older slots when appropriate.
5811 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5812 if highest_pkg is None:
5814 vardb = root_config.trees["vartree"].dbapi
5816 for cpv in vardb.match(atom):
5817 # don't mix new virtuals with old virtuals
5818 if portage.cpv_getkey(cpv) == highest_pkg.cp:
5819 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5821 slots.add(highest_pkg.metadata["SLOT"])
5825 slots.remove(highest_pkg.metadata["SLOT"])
5828 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5829 pkg, in_graph = self._select_package(root_config.root, slot_atom)
5830 if pkg is not None and \
5831 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5832 greedy_pkgs.append(pkg)
5835 if not blocker_lookahead:
5836 return [pkg.slot_atom for pkg in greedy_pkgs]
5839 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5840 for pkg in greedy_pkgs + [highest_pkg]:
5841 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5843 atoms = self._select_atoms(
5844 pkg.root, dep_str, pkg.use.enabled,
5845 parent=pkg, strict=True)
5846 except portage.exception.InvalidDependString:
5848 blocker_atoms = (x for x in atoms if x.blocker)
5849 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5851 if highest_pkg not in blockers:
5854 # filter packages with invalid deps
5855 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5857 # filter packages that conflict with highest_pkg
5858 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5859 (blockers[highest_pkg].findAtomForPackage(pkg) or \
5860 blockers[pkg].findAtomForPackage(highest_pkg))]
5865 # If two packages conflict, discard the lower version.
5866 discard_pkgs = set()
5867 greedy_pkgs.sort(reverse=True)
5868 for i in xrange(len(greedy_pkgs) - 1):
5869 pkg1 = greedy_pkgs[i]
5870 if pkg1 in discard_pkgs:
5872 for j in xrange(i + 1, len(greedy_pkgs)):
5873 pkg2 = greedy_pkgs[j]
5874 if pkg2 in discard_pkgs:
5876 if blockers[pkg1].findAtomForPackage(pkg2) or \
5877 blockers[pkg2].findAtomForPackage(pkg1):
5879 discard_pkgs.add(pkg2)
5881 return [pkg.slot_atom for pkg in greedy_pkgs \
5882 if pkg not in discard_pkgs]
5884 def _select_atoms_from_graph(self, *pargs, **kwargs):
5886 Prefer atoms matching packages that have already been
5887 added to the graph or those that are installed and have
5888 not been scheduled for replacement.
5890 kwargs["trees"] = self._graph_trees
5891 return self._select_atoms_highest_available(*pargs, **kwargs)
5893 def _select_atoms_highest_available(self, root, depstring,
5894 myuse=None, parent=None, strict=True, trees=None, priority=None):
5895 """This will raise InvalidDependString if necessary. If trees is
5896 None then self._filtered_trees is used."""
5897 pkgsettings = self.pkgsettings[root]
5899 trees = self._filtered_trees
5900 if not getattr(priority, "buildtime", False):
5901 # The parent should only be passed to dep_check() for buildtime
5902 # dependencies since that's the only case when it's appropriate
5903 # to trigger the circular dependency avoidance code which uses it.
5904 # It's important not to trigger the same circular dependency
5905 # avoidance code for runtime dependencies since it's not needed
5906 # and it can promote an incorrect package choice.
5910 if parent is not None:
5911 trees[root]["parent"] = parent
5913 portage.dep._dep_check_strict = False
5914 mycheck = portage.dep_check(depstring, None,
5915 pkgsettings, myuse=myuse,
5916 myroot=root, trees=trees)
5918 if parent is not None:
5919 trees[root].pop("parent")
5920 portage.dep._dep_check_strict = True
5922 raise portage.exception.InvalidDependString(mycheck[1])
5923 selected_atoms = mycheck[1]
5924 return selected_atoms
5926 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5927 atom = portage.dep.Atom(atom)
5928 atom_set = InternalPackageSet(initial_atoms=(atom,))
5929 atom_without_use = atom
5931 atom_without_use = portage.dep.remove_slot(atom)
5933 atom_without_use += ":" + atom.slot
5934 atom_without_use = portage.dep.Atom(atom_without_use)
5935 xinfo = '"%s"' % atom
5938 # Discard null/ from failed cpv_expand category expansion.
5939 xinfo = xinfo.replace("null/", "")
5940 masked_packages = []
5942 masked_pkg_instances = set()
5943 missing_licenses = []
5944 have_eapi_mask = False
5945 pkgsettings = self.pkgsettings[root]
5946 implicit_iuse = pkgsettings._get_implicit_iuse()
5947 root_config = self.roots[root]
5948 portdb = self.roots[root].trees["porttree"].dbapi
5949 dbs = self._filtered_trees[root]["dbs"]
5950 for db, pkg_type, built, installed, db_keys in dbs:
5954 if hasattr(db, "xmatch"):
5955 cpv_list = db.xmatch("match-all", atom_without_use)
5957 cpv_list = db.match(atom_without_use)
5960 for cpv in cpv_list:
5961 metadata, mreasons = get_mask_info(root_config, cpv,
5962 pkgsettings, db, pkg_type, built, installed, db_keys)
5963 if metadata is not None:
5964 pkg = Package(built=built, cpv=cpv,
5965 installed=installed, metadata=metadata,
5966 root_config=root_config)
5967 if pkg.cp != atom.cp:
5968 # A cpv can be returned from dbapi.match() as an
5969 # old-style virtual match even in cases when the
5970 # package does not actually PROVIDE the virtual.
5971 # Filter out any such false matches here.
5972 if not atom_set.findAtomForPackage(pkg):
5975 masked_pkg_instances.add(pkg)
5977 missing_use.append(pkg)
5980 masked_packages.append(
5981 (root_config, pkgsettings, cpv, metadata, mreasons))
5983 missing_use_reasons = []
5984 missing_iuse_reasons = []
5985 for pkg in missing_use:
5986 use = pkg.use.enabled
5987 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5988 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5990 for x in atom.use.required:
5991 if iuse_re.match(x) is None:
5992 missing_iuse.append(x)
5995 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5996 missing_iuse_reasons.append((pkg, mreasons))
5998 need_enable = sorted(atom.use.enabled.difference(use))
5999 need_disable = sorted(atom.use.disabled.intersection(use))
6000 if need_enable or need_disable:
6002 changes.extend(colorize("red", "+" + x) \
6003 for x in need_enable)
6004 changes.extend(colorize("blue", "-" + x) \
6005 for x in need_disable)
6006 mreasons.append("Change USE: %s" % " ".join(changes))
6007 missing_use_reasons.append((pkg, mreasons))
6009 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6010 in missing_use_reasons if pkg not in masked_pkg_instances]
6012 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6013 in missing_iuse_reasons if pkg not in masked_pkg_instances]
6015 show_missing_use = False
6016 if unmasked_use_reasons:
6017 # Only show the latest version.
6018 show_missing_use = unmasked_use_reasons[:1]
6019 elif unmasked_iuse_reasons:
6020 if missing_use_reasons:
6021 # All packages with required IUSE are masked,
6022 # so display a normal masking message.
6025 show_missing_use = unmasked_iuse_reasons
6027 if show_missing_use:
6028 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6029 print "!!! One of the following packages is required to complete your request:"
6030 for pkg, mreasons in show_missing_use:
6031 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6033 elif masked_packages:
6035 colorize("BAD", "All ebuilds that could satisfy ") + \
6036 colorize("INFORM", xinfo) + \
6037 colorize("BAD", " have been masked.")
6038 print "!!! One of the following masked packages is required to complete your request:"
6039 have_eapi_mask = show_masked_packages(masked_packages)
6042 msg = ("The current version of portage supports " + \
6043 "EAPI '%s'. You must upgrade to a newer version" + \
6044 " of portage before EAPI masked packages can" + \
6045 " be installed.") % portage.const.EAPI
6046 from textwrap import wrap
6047 for line in wrap(msg, 75):
6052 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6054 # Show parent nodes and the argument that pulled them in.
6055 traversed_nodes = set()
6058 while node is not None:
6059 traversed_nodes.add(node)
6060 msg.append('(dependency required by "%s" [%s])' % \
6061 (colorize('INFORM', str(node.cpv)), node.type_name))
6062 # When traversing to parents, prefer arguments over packages
6063 # since arguments are root nodes. Never traverse the same
6064 # package twice, in order to prevent an infinite loop.
6065 selected_parent = None
6066 for parent in self.digraph.parent_nodes(node):
6067 if isinstance(parent, DependencyArg):
6068 msg.append('(dependency required by "%s" [argument])' % \
6069 (colorize('INFORM', str(parent))))
6070 selected_parent = None
6072 if parent not in traversed_nodes:
6073 selected_parent = parent
6074 node = selected_parent
6080 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6081 cache_key = (root, atom, onlydeps)
6082 ret = self._highest_pkg_cache.get(cache_key)
6085 if pkg and not existing:
6086 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6087 if existing and existing == pkg:
6088 # Update the cache to reflect that the
6089 # package has been added to the graph.
6091 self._highest_pkg_cache[cache_key] = ret
6093 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6094 self._highest_pkg_cache[cache_key] = ret
6097 settings = pkg.root_config.settings
6098 if visible(settings, pkg) and not (pkg.installed and \
6099 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6100 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6103 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6104 root_config = self.roots[root]
6105 pkgsettings = self.pkgsettings[root]
6106 dbs = self._filtered_trees[root]["dbs"]
6107 vardb = self.roots[root].trees["vartree"].dbapi
6108 portdb = self.roots[root].trees["porttree"].dbapi
6109 # List of acceptable packages, ordered by type preference.
6110 matched_packages = []
6111 highest_version = None
6112 if not isinstance(atom, portage.dep.Atom):
6113 atom = portage.dep.Atom(atom)
6115 atom_set = InternalPackageSet(initial_atoms=(atom,))
6116 existing_node = None
6118 usepkgonly = "--usepkgonly" in self.myopts
6119 empty = "empty" in self.myparams
6120 selective = "selective" in self.myparams
6122 noreplace = "--noreplace" in self.myopts
6123 # Behavior of the "selective" parameter depends on
6124 # whether or not a package matches an argument atom.
6125 # If an installed package provides an old-style
6126 # virtual that is no longer provided by an available
6127 # package, the installed package may match an argument
6128 # atom even though none of the available packages do.
6129 # Therefore, "selective" logic does not consider
6130 # whether or not an installed package matches an
6131 # argument atom. It only considers whether or not
6132 # available packages match argument atoms, which is
6133 # represented by the found_available_arg flag.
6134 found_available_arg = False
6135 for find_existing_node in True, False:
6138 for db, pkg_type, built, installed, db_keys in dbs:
6141 if installed and not find_existing_node:
6142 want_reinstall = reinstall or empty or \
6143 (found_available_arg and not selective)
6144 if want_reinstall and matched_packages:
6146 if hasattr(db, "xmatch"):
6147 cpv_list = db.xmatch("match-all", atom)
6149 cpv_list = db.match(atom)
6151 # USE=multislot can make an installed package appear as if
6152 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6153 # won't do any good as long as USE=multislot is enabled since
6154 # the newly built package still won't have the expected slot.
6155 # Therefore, assume that such SLOT dependencies are already
6156 # satisfied rather than forcing a rebuild.
6157 if installed and not cpv_list and atom.slot:
6158 for cpv in db.match(atom.cp):
6159 slot_available = False
6160 for other_db, other_type, other_built, \
6161 other_installed, other_keys in dbs:
6164 other_db.aux_get(cpv, ["SLOT"])[0]:
6165 slot_available = True
6169 if not slot_available:
6171 inst_pkg = self._pkg(cpv, "installed",
6172 root_config, installed=installed)
6173 # Remove the slot from the atom and verify that
6174 # the package matches the resulting atom.
6175 atom_without_slot = portage.dep.remove_slot(atom)
6177 atom_without_slot += str(atom.use)
6178 atom_without_slot = portage.dep.Atom(atom_without_slot)
6179 if portage.match_from_list(
6180 atom_without_slot, [inst_pkg]):
6181 cpv_list = [inst_pkg.cpv]
6186 pkg_status = "merge"
6187 if installed or onlydeps:
6188 pkg_status = "nomerge"
6191 for cpv in cpv_list:
6192 # Make --noreplace take precedence over --newuse.
6193 if not installed and noreplace and \
6194 cpv in vardb.match(atom):
6195 # If the installed version is masked, it may
6196 # be necessary to look at lower versions,
6197 # in case there is a visible downgrade.
6199 reinstall_for_flags = None
6200 cache_key = (pkg_type, root, cpv, pkg_status)
6201 calculated_use = True
6202 pkg = self._pkg_cache.get(cache_key)
6204 calculated_use = False
6206 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6209 pkg = Package(built=built, cpv=cpv,
6210 installed=installed, metadata=metadata,
6211 onlydeps=onlydeps, root_config=root_config,
6213 metadata = pkg.metadata
6215 metadata['CHOST'] = pkgsettings.get('CHOST', '')
6216 if not built and ("?" in metadata["LICENSE"] or \
6217 "?" in metadata["PROVIDE"]):
6218 # This is avoided whenever possible because
6219 # it's expensive. It only needs to be done here
6220 # if it has an effect on visibility.
6221 pkgsettings.setcpv(pkg)
6222 metadata["USE"] = pkgsettings["PORTAGE_USE"]
6223 calculated_use = True
6224 self._pkg_cache[pkg] = pkg
6226 if not installed or (built and matched_packages):
6227 # Only enforce visibility on installed packages
6228 # if there is at least one other visible package
6229 # available. By filtering installed masked packages
6230 # here, packages that have been masked since they
6231 # were installed can be automatically downgraded
6232 # to an unmasked version.
6234 if not visible(pkgsettings, pkg):
6236 except portage.exception.InvalidDependString:
6240 # Enable upgrade or downgrade to a version
6241 # with visible KEYWORDS when the installed
6242 # version is masked by KEYWORDS, but never
6243 # reinstall the same exact version only due
6244 # to a KEYWORDS mask.
6245 if built and matched_packages:
6247 different_version = None
6248 for avail_pkg in matched_packages:
6249 if not portage.dep.cpvequal(
6250 pkg.cpv, avail_pkg.cpv):
6251 different_version = avail_pkg
6253 if different_version is not None:
6256 pkgsettings._getMissingKeywords(
6257 pkg.cpv, pkg.metadata):
6260 # If the ebuild no longer exists or it's
6261 # keywords have been dropped, reject built
6262 # instances (installed or binary).
6263 # If --usepkgonly is enabled, assume that
6264 # the ebuild status should be ignored.
6268 pkg.cpv, "ebuild", root_config)
6269 except portage.exception.PackageNotFound:
6272 if not visible(pkgsettings, pkg_eb):
6275 if not pkg.built and not calculated_use:
6276 # This is avoided whenever possible because
6278 pkgsettings.setcpv(pkg)
6279 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6281 if pkg.cp != atom.cp:
6282 # A cpv can be returned from dbapi.match() as an
6283 # old-style virtual match even in cases when the
6284 # package does not actually PROVIDE the virtual.
6285 # Filter out any such false matches here.
6286 if not atom_set.findAtomForPackage(pkg):
6290 if root == self.target_root:
6292 # Ebuild USE must have been calculated prior
6293 # to this point, in case atoms have USE deps.
6294 myarg = self._iter_atoms_for_pkg(pkg).next()
6295 except StopIteration:
6297 except portage.exception.InvalidDependString:
6299 # masked by corruption
6301 if not installed and myarg:
6302 found_available_arg = True
6304 if atom.use and not pkg.built:
6305 use = pkg.use.enabled
6306 if atom.use.enabled.difference(use):
6308 if atom.use.disabled.intersection(use):
6310 if pkg.cp == atom_cp:
6311 if highest_version is None:
6312 highest_version = pkg
6313 elif pkg > highest_version:
6314 highest_version = pkg
6315 # At this point, we've found the highest visible
6316 # match from the current repo. Any lower versions
6317 # from this repo are ignored, so this so the loop
6318 # will always end with a break statement below
6320 if find_existing_node:
6321 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6324 if portage.dep.match_from_list(atom, [e_pkg]):
6325 if highest_version and \
6326 e_pkg.cp == atom_cp and \
6327 e_pkg < highest_version and \
6328 e_pkg.slot_atom != highest_version.slot_atom:
6329 # There is a higher version available in a
6330 # different slot, so this existing node is
6334 matched_packages.append(e_pkg)
6335 existing_node = e_pkg
6337 # Compare built package to current config and
6338 # reject the built package if necessary.
6339 if built and not installed and \
6340 ("--newuse" in self.myopts or \
6341 "--reinstall" in self.myopts):
6342 iuses = pkg.iuse.all
6343 old_use = pkg.use.enabled
6345 pkgsettings.setcpv(myeb)
6347 pkgsettings.setcpv(pkg)
6348 now_use = pkgsettings["PORTAGE_USE"].split()
6349 forced_flags = set()
6350 forced_flags.update(pkgsettings.useforce)
6351 forced_flags.update(pkgsettings.usemask)
6353 if myeb and not usepkgonly:
6354 cur_iuse = myeb.iuse.all
6355 if self._reinstall_for_flags(forced_flags,
6359 # Compare current config to installed package
6360 # and do not reinstall if possible.
6361 if not installed and \
6362 ("--newuse" in self.myopts or \
6363 "--reinstall" in self.myopts) and \
6364 cpv in vardb.match(atom):
6365 pkgsettings.setcpv(pkg)
6366 forced_flags = set()
6367 forced_flags.update(pkgsettings.useforce)
6368 forced_flags.update(pkgsettings.usemask)
6369 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6370 old_iuse = set(filter_iuse_defaults(
6371 vardb.aux_get(cpv, ["IUSE"])[0].split()))
6372 cur_use = pkgsettings["PORTAGE_USE"].split()
6373 cur_iuse = pkg.iuse.all
6374 reinstall_for_flags = \
6375 self._reinstall_for_flags(
6376 forced_flags, old_use, old_iuse,
6378 if reinstall_for_flags:
6382 matched_packages.append(pkg)
6383 if reinstall_for_flags:
6384 self._reinstall_nodes[pkg] = \
6388 if not matched_packages:
6391 if "--debug" in self.myopts:
6392 for pkg in matched_packages:
6393 portage.writemsg("%s %s\n" % \
6394 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6396 # Filter out any old-style virtual matches if they are
6397 # mixed with new-style virtual matches.
6398 cp = portage.dep_getkey(atom)
6399 if len(matched_packages) > 1 and \
6400 "virtual" == portage.catsplit(cp)[0]:
6401 for pkg in matched_packages:
6404 # Got a new-style virtual, so filter
6405 # out any old-style virtuals.
6406 matched_packages = [pkg for pkg in matched_packages \
6410 if len(matched_packages) > 1:
6411 bestmatch = portage.best(
6412 [pkg.cpv for pkg in matched_packages])
6413 matched_packages = [pkg for pkg in matched_packages \
6414 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6416 # ordered by type preference ("ebuild" type is the last resort)
6417 return matched_packages[-1], existing_node
6419 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6421 Select packages that have already been added to the graph or
6422 those that are installed and have not been scheduled for
6425 graph_db = self._graph_trees[root]["porttree"].dbapi
6426 matches = graph_db.match_pkgs(atom)
6429 pkg = matches[-1] # highest match
6430 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6431 return pkg, in_graph
6433 def _complete_graph(self):
6435 Add any deep dependencies of required sets (args, system, world) that
6436 have not been pulled into the graph yet. This ensures that the graph
6437 is consistent such that initially satisfied deep dependencies are not
6438 broken in the new graph. Initially unsatisfied dependencies are
6439 irrelevant since we only want to avoid breaking dependencies that are
6442 Since this method can consume enough time to disturb users, it is
6443 currently only enabled by the --complete-graph option.
6445 if "--buildpkgonly" in self.myopts or \
6446 "recurse" not in self.myparams:
6449 if "complete" not in self.myparams:
6450 # Skip this to avoid consuming enough time to disturb users.
6453 # Put the depgraph into a mode that causes it to only
6454 # select packages that have already been added to the
6455 # graph or those that are installed and have not been
6456 # scheduled for replacement. Also, toggle the "deep"
6457 # parameter so that all dependencies are traversed and
6459 self._select_atoms = self._select_atoms_from_graph
6460 self._select_package = self._select_pkg_from_graph
6461 already_deep = "deep" in self.myparams
6462 if not already_deep:
6463 self.myparams.add("deep")
6465 for root in self.roots:
6466 required_set_names = self._required_set_names.copy()
6467 if root == self.target_root and \
6468 (already_deep or "empty" in self.myparams):
6469 required_set_names.difference_update(self._sets)
6470 if not required_set_names and not self._ignored_deps:
6472 root_config = self.roots[root]
6473 setconfig = root_config.setconfig
6475 # Reuse existing SetArg instances when available.
6476 for arg in self.digraph.root_nodes():
6477 if not isinstance(arg, SetArg):
6479 if arg.root_config != root_config:
6481 if arg.name in required_set_names:
6483 required_set_names.remove(arg.name)
6484 # Create new SetArg instances only when necessary.
6485 for s in required_set_names:
6486 expanded_set = InternalPackageSet(
6487 initial_atoms=setconfig.getSetAtoms(s))
6488 atom = SETPREFIX + s
6489 args.append(SetArg(arg=atom, set=expanded_set,
6490 root_config=root_config))
6491 vardb = root_config.trees["vartree"].dbapi
6493 for atom in arg.set:
6494 self._dep_stack.append(
6495 Dependency(atom=atom, root=root, parent=arg))
6496 if self._ignored_deps:
6497 self._dep_stack.extend(self._ignored_deps)
6498 self._ignored_deps = []
6499 if not self._create_graph(allow_unsatisfied=True):
6501 # Check the unsatisfied deps to see if any initially satisfied deps
6502 # will become unsatisfied due to an upgrade. Initially unsatisfied
6503 # deps are irrelevant since we only want to avoid breaking deps
6504 # that are initially satisfied.
6505 while self._unsatisfied_deps:
6506 dep = self._unsatisfied_deps.pop()
6507 matches = vardb.match_pkgs(dep.atom)
6509 self._initially_unsatisfied_deps.append(dep)
6511 # An scheduled installation broke a deep dependency.
6512 # Add the installed package to the graph so that it
6513 # will be appropriately reported as a slot collision
6514 # (possibly solvable via backtracking).
6515 pkg = matches[-1] # highest match
6516 if not self._add_pkg(pkg, dep):
6518 if not self._create_graph(allow_unsatisfied=True):
6522 def _pkg(self, cpv, type_name, root_config, installed=False):
6524 Get a package instance from the cache, or create a new
6525 one if necessary. Raises KeyError from aux_get if it
6526 failures for some reason (package does not exist or is
6531 operation = "nomerge"
6532 pkg = self._pkg_cache.get(
6533 (type_name, root_config.root, cpv, operation))
6535 tree_type = self.pkg_tree_map[type_name]
6536 db = root_config.trees[tree_type].dbapi
6537 db_keys = list(self._trees_orig[root_config.root][
6538 tree_type].dbapi._aux_cache_keys)
6540 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6542 raise portage.exception.PackageNotFound(cpv)
6543 pkg = Package(cpv=cpv, metadata=metadata,
6544 root_config=root_config, installed=installed)
6545 if type_name == "ebuild":
6546 settings = self.pkgsettings[root_config.root]
6547 settings.setcpv(pkg)
6548 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6549 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6550 self._pkg_cache[pkg] = pkg
6553 def validate_blockers(self):
6554 """Remove any blockers from the digraph that do not match any of the
6555 packages within the graph. If necessary, create hard deps to ensure
6556 correct merge order such that mutually blocking packages are never
6557 installed simultaneously."""
6559 if "--buildpkgonly" in self.myopts or \
6560 "--nodeps" in self.myopts:
6563 #if "deep" in self.myparams:
6565 # Pull in blockers from all installed packages that haven't already
6566 # been pulled into the depgraph. This is not enabled by default
6567 # due to the performance penalty that is incurred by all the
6568 # additional dep_check calls that are required.
6570 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6571 for myroot in self.trees:
6572 vardb = self.trees[myroot]["vartree"].dbapi
6573 portdb = self.trees[myroot]["porttree"].dbapi
6574 pkgsettings = self.pkgsettings[myroot]
6575 final_db = self.mydbapi[myroot]
6577 blocker_cache = BlockerCache(myroot, vardb)
6578 stale_cache = set(blocker_cache)
6581 stale_cache.discard(cpv)
6582 pkg_in_graph = self.digraph.contains(pkg)
6584 # Check for masked installed packages. Only warn about
6585 # packages that are in the graph in order to avoid warning
6586 # about those that will be automatically uninstalled during
6587 # the merge process or by --depclean.
6589 if pkg_in_graph and not visible(pkgsettings, pkg):
6590 self._masked_installed.add(pkg)
6592 blocker_atoms = None
6598 self._blocker_parents.child_nodes(pkg))
6603 self._irrelevant_blockers.child_nodes(pkg))
6606 if blockers is not None:
6607 blockers = set(str(blocker.atom) \
6608 for blocker in blockers)
6610 # If this node has any blockers, create a "nomerge"
6611 # node for it so that they can be enforced.
6612 self.spinner.update()
6613 blocker_data = blocker_cache.get(cpv)
6614 if blocker_data is not None and \
6615 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6618 # If blocker data from the graph is available, use
6619 # it to validate the cache and update the cache if
6621 if blocker_data is not None and \
6622 blockers is not None:
6623 if not blockers.symmetric_difference(
6624 blocker_data.atoms):
6628 if blocker_data is None and \
6629 blockers is not None:
6630 # Re-use the blockers from the graph.
6631 blocker_atoms = sorted(blockers)
6632 counter = long(pkg.metadata["COUNTER"])
6634 blocker_cache.BlockerData(counter, blocker_atoms)
6635 blocker_cache[pkg.cpv] = blocker_data
6639 blocker_atoms = blocker_data.atoms
6641 # Use aux_get() to trigger FakeVartree global
6642 # updates on *DEPEND when appropriate.
6643 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6644 # It is crucial to pass in final_db here in order to
6645 # optimize dep_check calls by eliminating atoms via
6646 # dep_wordreduce and dep_eval calls.
6648 portage.dep._dep_check_strict = False
6650 success, atoms = portage.dep_check(depstr,
6651 final_db, pkgsettings, myuse=pkg.use.enabled,
6652 trees=self._graph_trees, myroot=myroot)
6653 except Exception, e:
6654 if isinstance(e, SystemExit):
6656 # This is helpful, for example, if a ValueError
6657 # is thrown from cpv_expand due to multiple
6658 # matches (this can happen if an atom lacks a
6660 show_invalid_depstring_notice(
6661 pkg, depstr, str(e))
6665 portage.dep._dep_check_strict = True
6667 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6668 if replacement_pkg and \
6669 replacement_pkg[0].operation == "merge":
6670 # This package is being replaced anyway, so
6671 # ignore invalid dependencies so as not to
6672 # annoy the user too much (otherwise they'd be
6673 # forced to manually unmerge it first).
6675 show_invalid_depstring_notice(pkg, depstr, atoms)
6677 blocker_atoms = [myatom for myatom in atoms \
6678 if myatom.startswith("!")]
6679 blocker_atoms.sort()
6680 counter = long(pkg.metadata["COUNTER"])
6681 blocker_cache[cpv] = \
6682 blocker_cache.BlockerData(counter, blocker_atoms)
6685 for atom in blocker_atoms:
6686 blocker = Blocker(atom=portage.dep.Atom(atom),
6687 eapi=pkg.metadata["EAPI"], root=myroot)
6688 self._blocker_parents.add(blocker, pkg)
6689 except portage.exception.InvalidAtom, e:
6690 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6691 show_invalid_depstring_notice(
6692 pkg, depstr, "Invalid Atom: %s" % (e,))
6694 for cpv in stale_cache:
6695 del blocker_cache[cpv]
6696 blocker_cache.flush()
6699 # Discard any "uninstall" tasks scheduled by previous calls
6700 # to this method, since those tasks may not make sense given
6701 # the current graph state.
6702 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6703 if previous_uninstall_tasks:
6704 self._blocker_uninstalls = digraph()
6705 self.digraph.difference_update(previous_uninstall_tasks)
6707 for blocker in self._blocker_parents.leaf_nodes():
6708 self.spinner.update()
6709 root_config = self.roots[blocker.root]
6710 virtuals = root_config.settings.getvirtuals()
6711 myroot = blocker.root
6712 initial_db = self.trees[myroot]["vartree"].dbapi
6713 final_db = self.mydbapi[myroot]
6715 provider_virtual = False
6716 if blocker.cp in virtuals and \
6717 not self._have_new_virt(blocker.root, blocker.cp):
6718 provider_virtual = True
6720 if provider_virtual:
6722 for provider_entry in virtuals[blocker.cp]:
6724 portage.dep_getkey(provider_entry)
6725 atoms.append(blocker.atom.replace(
6726 blocker.cp, provider_cp))
6728 atoms = [blocker.atom]
6730 blocked_initial = []
6732 blocked_initial.extend(initial_db.match_pkgs(atom))
6736 blocked_final.extend(final_db.match_pkgs(atom))
6738 if not blocked_initial and not blocked_final:
6739 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6740 self._blocker_parents.remove(blocker)
6741 # Discard any parents that don't have any more blockers.
6742 for pkg in parent_pkgs:
6743 self._irrelevant_blockers.add(blocker, pkg)
6744 if not self._blocker_parents.child_nodes(pkg):
6745 self._blocker_parents.remove(pkg)
6747 for parent in self._blocker_parents.parent_nodes(blocker):
6748 unresolved_blocks = False
6749 depends_on_order = set()
6750 for pkg in blocked_initial:
6751 if pkg.slot_atom == parent.slot_atom:
6752 # TODO: Support blocks within slots in cases where it
6753 # might make sense. For example, a new version might
6754 # require that the old version be uninstalled at build
6757 if parent.installed:
6758 # Two currently installed packages conflict with
6759 # eachother. Ignore this case since the damage
6760 # is already done and this would be likely to
6761 # confuse users if displayed like a normal blocker.
6764 self._blocked_pkgs.add(pkg, blocker)
6766 if parent.operation == "merge":
6767 # Maybe the blocked package can be replaced or simply
6768 # unmerged to resolve this block.
6769 depends_on_order.add((pkg, parent))
6771 # None of the above blocker resolutions techniques apply,
6772 # so apparently this one is unresolvable.
6773 unresolved_blocks = True
6774 for pkg in blocked_final:
6775 if pkg.slot_atom == parent.slot_atom:
6776 # TODO: Support blocks within slots.
6778 if parent.operation == "nomerge" and \
6779 pkg.operation == "nomerge":
6780 # This blocker will be handled the next time that a
6781 # merge of either package is triggered.
6784 self._blocked_pkgs.add(pkg, blocker)
6786 # Maybe the blocking package can be
6787 # unmerged to resolve this block.
6788 if parent.operation == "merge" and pkg.installed:
6789 depends_on_order.add((pkg, parent))
6791 elif parent.operation == "nomerge":
6792 depends_on_order.add((parent, pkg))
6794 # None of the above blocker resolutions techniques apply,
6795 # so apparently this one is unresolvable.
6796 unresolved_blocks = True
6798 # Make sure we don't unmerge any package that have been pulled
6800 if not unresolved_blocks and depends_on_order:
6801 for inst_pkg, inst_task in depends_on_order:
6802 if self.digraph.contains(inst_pkg) and \
6803 self.digraph.parent_nodes(inst_pkg):
6804 unresolved_blocks = True
6807 if not unresolved_blocks and depends_on_order:
6808 for inst_pkg, inst_task in depends_on_order:
6809 uninst_task = Package(built=inst_pkg.built,
6810 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6811 metadata=inst_pkg.metadata,
6812 operation="uninstall",
6813 root_config=inst_pkg.root_config,
6814 type_name=inst_pkg.type_name)
6815 self._pkg_cache[uninst_task] = uninst_task
6816 # Enforce correct merge order with a hard dep.
6817 self.digraph.addnode(uninst_task, inst_task,
6818 priority=BlockerDepPriority.instance)
6819 # Count references to this blocker so that it can be
6820 # invalidated after nodes referencing it have been
6822 self._blocker_uninstalls.addnode(uninst_task, blocker)
6823 if not unresolved_blocks and not depends_on_order:
6824 self._irrelevant_blockers.add(blocker, parent)
6825 self._blocker_parents.remove_edge(blocker, parent)
6826 if not self._blocker_parents.parent_nodes(blocker):
6827 self._blocker_parents.remove(blocker)
6828 if not self._blocker_parents.child_nodes(parent):
6829 self._blocker_parents.remove(parent)
6830 if unresolved_blocks:
6831 self._unsolvable_blockers.add(blocker, parent)
6835 def _accept_blocker_conflicts(self):
6837 for x in ("--buildpkgonly", "--fetchonly",
6838 "--fetch-all-uri", "--nodeps"):
6839 if x in self.myopts:
6844 def _merge_order_bias(self, mygraph):
6846 For optimal leaf node selection, promote deep system runtime deps and
6847 order nodes from highest to lowest overall reference count.
6851 for node in mygraph.order:
6852 node_info[node] = len(mygraph.parent_nodes(node))
6853 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6855 def cmp_merge_preference(node1, node2):
6857 if node1.operation == 'uninstall':
6858 if node2.operation == 'uninstall':
6862 if node2.operation == 'uninstall':
6863 if node1.operation == 'uninstall':
6867 node1_sys = node1 in deep_system_deps
6868 node2_sys = node2 in deep_system_deps
6869 if node1_sys != node2_sys:
6874 return node_info[node2] - node_info[node1]
6876 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6878 def altlist(self, reversed=False):
6880 while self._serialized_tasks_cache is None:
6881 self._resolve_conflicts()
6883 self._serialized_tasks_cache, self._scheduler_graph = \
6884 self._serialize_tasks()
6885 except self._serialize_tasks_retry:
6888 retlist = self._serialized_tasks_cache[:]
6893 def schedulerGraph(self):
6895 The scheduler graph is identical to the normal one except that
6896 uninstall edges are reversed in specific cases that require
6897 conflicting packages to be temporarily installed simultaneously.
6898 This is intended for use by the Scheduler in it's parallelization
6899 logic. It ensures that temporary simultaneous installation of
6900 conflicting packages is avoided when appropriate (especially for
6901 !!atom blockers), but allowed in specific cases that require it.
6903 Note that this method calls break_refs() which alters the state of
6904 internal Package instances such that this depgraph instance should
6905 not be used to perform any more calculations.
6907 if self._scheduler_graph is None:
6909 self.break_refs(self._scheduler_graph.order)
6910 return self._scheduler_graph
6912 def break_refs(self, nodes):
6914 Take a mergelist like that returned from self.altlist() and
6915 break any references that lead back to the depgraph. This is
6916 useful if you want to hold references to packages without
6917 also holding the depgraph on the heap.
6920 if hasattr(node, "root_config"):
6921 # The FakeVartree references the _package_cache which
6922 # references the depgraph. So that Package instances don't
6923 # hold the depgraph and FakeVartree on the heap, replace
6924 # the RootConfig that references the FakeVartree with the
6925 # original RootConfig instance which references the actual
6927 node.root_config = \
6928 self._trees_orig[node.root_config.root]["root_config"]
6930 def _resolve_conflicts(self):
6931 if not self._complete_graph():
6932 raise self._unknown_internal_error()
6934 if not self.validate_blockers():
6935 raise self._unknown_internal_error()
6937 if self._slot_collision_info:
6938 self._process_slot_conflicts()
6940 def _serialize_tasks(self):
6942 if "--debug" in self.myopts:
6943 writemsg("\ndigraph:\n\n", noiselevel=-1)
6944 self.digraph.debug_print()
6945 writemsg("\n", noiselevel=-1)
6947 scheduler_graph = self.digraph.copy()
6948 mygraph=self.digraph.copy()
6949 # Prune "nomerge" root nodes if nothing depends on them, since
6950 # otherwise they slow down merge order calculation. Don't remove
6951 # non-root nodes since they help optimize merge order in some cases
6952 # such as revdep-rebuild.
6953 removed_nodes = set()
6955 for node in mygraph.root_nodes():
6956 if not isinstance(node, Package) or \
6957 node.installed or node.onlydeps:
6958 removed_nodes.add(node)
6960 self.spinner.update()
6961 mygraph.difference_update(removed_nodes)
6962 if not removed_nodes:
6964 removed_nodes.clear()
6965 self._merge_order_bias(mygraph)
6966 def cmp_circular_bias(n1, n2):
6968 RDEPEND is stronger than PDEPEND and this function
6969 measures such a strength bias within a circular
6970 dependency relationship.
6972 n1_n2_medium = n2 in mygraph.child_nodes(n1,
6973 ignore_priority=priority_range.ignore_medium_soft)
6974 n2_n1_medium = n1 in mygraph.child_nodes(n2,
6975 ignore_priority=priority_range.ignore_medium_soft)
6976 if n1_n2_medium == n2_n1_medium:
6981 myblocker_uninstalls = self._blocker_uninstalls.copy()
6983 # Contains uninstall tasks that have been scheduled to
6984 # occur after overlapping blockers have been installed.
6985 scheduled_uninstalls = set()
6986 # Contains any Uninstall tasks that have been ignored
6987 # in order to avoid the circular deps code path. These
6988 # correspond to blocker conflicts that could not be
6990 ignored_uninstall_tasks = set()
6991 have_uninstall_task = False
6992 complete = "complete" in self.myparams
6995 def get_nodes(**kwargs):
6997 Returns leaf nodes excluding Uninstall instances
6998 since those should be executed as late as possible.
7000 return [node for node in mygraph.leaf_nodes(**kwargs) \
7001 if isinstance(node, Package) and \
7002 (node.operation != "uninstall" or \
7003 node in scheduled_uninstalls)]
7005 # sys-apps/portage needs special treatment if ROOT="/"
7006 running_root = self._running_root.root
7007 from portage.const import PORTAGE_PACKAGE_ATOM
7008 runtime_deps = InternalPackageSet(
7009 initial_atoms=[PORTAGE_PACKAGE_ATOM])
7010 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7011 PORTAGE_PACKAGE_ATOM)
7012 replacement_portage = self.mydbapi[running_root].match_pkgs(
7013 PORTAGE_PACKAGE_ATOM)
7016 running_portage = running_portage[0]
7018 running_portage = None
7020 if replacement_portage:
7021 replacement_portage = replacement_portage[0]
7023 replacement_portage = None
7025 if replacement_portage == running_portage:
7026 replacement_portage = None
7028 if replacement_portage is not None:
7029 # update from running_portage to replacement_portage asap
7030 asap_nodes.append(replacement_portage)
7032 if running_portage is not None:
7034 portage_rdepend = self._select_atoms_highest_available(
7035 running_root, running_portage.metadata["RDEPEND"],
7036 myuse=running_portage.use.enabled,
7037 parent=running_portage, strict=False)
7038 except portage.exception.InvalidDependString, e:
7039 portage.writemsg("!!! Invalid RDEPEND in " + \
7040 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7041 (running_root, running_portage.cpv, e), noiselevel=-1)
7043 portage_rdepend = []
7044 runtime_deps.update(atom for atom in portage_rdepend \
7045 if not atom.startswith("!"))
7047 def gather_deps(ignore_priority, mergeable_nodes,
7048 selected_nodes, node):
7050 Recursively gather a group of nodes that RDEPEND on
7051 eachother. This ensures that they are merged as a group
7052 and get their RDEPENDs satisfied as soon as possible.
7054 if node in selected_nodes:
7056 if node not in mergeable_nodes:
7058 if node == replacement_portage and \
7059 mygraph.child_nodes(node,
7060 ignore_priority=priority_range.ignore_medium_soft):
7061 # Make sure that portage always has all of it's
7062 # RDEPENDs installed first.
7064 selected_nodes.add(node)
7065 for child in mygraph.child_nodes(node,
7066 ignore_priority=ignore_priority):
7067 if not gather_deps(ignore_priority,
7068 mergeable_nodes, selected_nodes, child):
7072 def ignore_uninst_or_med(priority):
7073 if priority is BlockerDepPriority.instance:
7075 return priority_range.ignore_medium(priority)
7077 def ignore_uninst_or_med_soft(priority):
7078 if priority is BlockerDepPriority.instance:
7080 return priority_range.ignore_medium_soft(priority)
7082 tree_mode = "--tree" in self.myopts
7083 # Tracks whether or not the current iteration should prefer asap_nodes
7084 # if available. This is set to False when the previous iteration
7085 # failed to select any nodes. It is reset whenever nodes are
7086 # successfully selected.
7089 # Controls whether or not the current iteration should drop edges that
7090 # are "satisfied" by installed packages, in order to solve circular
7091 # dependencies. The deep runtime dependencies of installed packages are
7092 # not checked in this case (bug #199856), so it must be avoided
7093 # whenever possible.
7094 drop_satisfied = False
7096 # State of variables for successive iterations that loosen the
7097 # criteria for node selection.
7099 # iteration prefer_asap drop_satisfied
7104 # If no nodes are selected on the last iteration, it is due to
7105 # unresolved blockers or circular dependencies.
7107 while not mygraph.empty():
7108 self.spinner.update()
7109 selected_nodes = None
7110 ignore_priority = None
7111 if drop_satisfied or (prefer_asap and asap_nodes):
7112 priority_range = DepPrioritySatisfiedRange
7114 priority_range = DepPriorityNormalRange
7115 if prefer_asap and asap_nodes:
7116 # ASAP nodes are merged before their soft deps. Go ahead and
7117 # select root nodes here if necessary, since it's typical for
7118 # the parent to have been removed from the graph already.
7119 asap_nodes = [node for node in asap_nodes \
7120 if mygraph.contains(node)]
7121 for node in asap_nodes:
7122 if not mygraph.child_nodes(node,
7123 ignore_priority=priority_range.ignore_soft):
7124 selected_nodes = [node]
7125 asap_nodes.remove(node)
7127 if not selected_nodes and \
7128 not (prefer_asap and asap_nodes):
7129 for i in xrange(priority_range.NONE,
7130 priority_range.MEDIUM_SOFT + 1):
7131 ignore_priority = priority_range.ignore_priority[i]
7132 nodes = get_nodes(ignore_priority=ignore_priority)
7134 # If there is a mix of uninstall nodes with other
7135 # types, save the uninstall nodes for later since
7136 # sometimes a merge node will render an uninstall
7137 # node unnecessary (due to occupying the same slot),
7138 # and we want to avoid executing a separate uninstall
7139 # task in that case.
7141 good_uninstalls = []
7142 with_some_uninstalls_excluded = []
7144 if node.operation == "uninstall":
7145 slot_node = self.mydbapi[node.root
7146 ].match_pkgs(node.slot_atom)
7148 slot_node[0].operation == "merge":
7150 good_uninstalls.append(node)
7151 with_some_uninstalls_excluded.append(node)
7153 nodes = good_uninstalls
7154 elif with_some_uninstalls_excluded:
7155 nodes = with_some_uninstalls_excluded
7159 if ignore_priority is None and not tree_mode:
7160 # Greedily pop all of these nodes since no
7161 # relationship has been ignored. This optimization
7162 # destroys --tree output, so it's disabled in tree
7164 selected_nodes = nodes
7166 # For optimal merge order:
7167 # * Only pop one node.
7168 # * Removing a root node (node without a parent)
7169 # will not produce a leaf node, so avoid it.
7170 # * It's normal for a selected uninstall to be a
7171 # root node, so don't check them for parents.
7173 if node.operation == "uninstall" or \
7174 mygraph.parent_nodes(node):
7175 selected_nodes = [node]
7181 if not selected_nodes:
7182 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7184 mergeable_nodes = set(nodes)
7185 if prefer_asap and asap_nodes:
7187 for i in xrange(priority_range.SOFT,
7188 priority_range.MEDIUM_SOFT + 1):
7189 ignore_priority = priority_range.ignore_priority[i]
7191 if not mygraph.parent_nodes(node):
7193 selected_nodes = set()
7194 if gather_deps(ignore_priority,
7195 mergeable_nodes, selected_nodes, node):
7198 selected_nodes = None
7202 if prefer_asap and asap_nodes and not selected_nodes:
7203 # We failed to find any asap nodes to merge, so ignore
7204 # them for the next iteration.
7208 if selected_nodes and ignore_priority is not None:
7209 # Try to merge ignored medium_soft deps as soon as possible
7210 # if they're not satisfied by installed packages.
7211 for node in selected_nodes:
7212 children = set(mygraph.child_nodes(node))
7213 soft = children.difference(
7214 mygraph.child_nodes(node,
7215 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7216 medium_soft = children.difference(
7217 mygraph.child_nodes(node,
7219 DepPrioritySatisfiedRange.ignore_medium_soft))
7220 medium_soft.difference_update(soft)
7221 for child in medium_soft:
7222 if child in selected_nodes:
7224 if child in asap_nodes:
7226 asap_nodes.append(child)
7228 if selected_nodes and len(selected_nodes) > 1:
7229 if not isinstance(selected_nodes, list):
7230 selected_nodes = list(selected_nodes)
7231 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7233 if not selected_nodes and not myblocker_uninstalls.is_empty():
7234 # An Uninstall task needs to be executed in order to
7235 # avoid conflict if possible.
7238 priority_range = DepPrioritySatisfiedRange
7240 priority_range = DepPriorityNormalRange
7242 mergeable_nodes = get_nodes(
7243 ignore_priority=ignore_uninst_or_med)
7245 min_parent_deps = None
7247 for task in myblocker_uninstalls.leaf_nodes():
7248 # Do some sanity checks so that system or world packages
7249 # don't get uninstalled inappropriately here (only really
7250 # necessary when --complete-graph has not been enabled).
7252 if task in ignored_uninstall_tasks:
7255 if task in scheduled_uninstalls:
7256 # It's been scheduled but it hasn't
7257 # been executed yet due to dependence
7258 # on installation of blocking packages.
7261 root_config = self.roots[task.root]
7262 inst_pkg = self._pkg_cache[
7263 ("installed", task.root, task.cpv, "nomerge")]
7265 if self.digraph.contains(inst_pkg):
7268 forbid_overlap = False
7269 heuristic_overlap = False
7270 for blocker in myblocker_uninstalls.parent_nodes(task):
7271 if blocker.eapi in ("0", "1"):
7272 heuristic_overlap = True
7273 elif blocker.atom.blocker.overlap.forbid:
7274 forbid_overlap = True
7276 if forbid_overlap and running_root == task.root:
7279 if heuristic_overlap and running_root == task.root:
7280 # Never uninstall sys-apps/portage or it's essential
7281 # dependencies, except through replacement.
7283 runtime_dep_atoms = \
7284 list(runtime_deps.iterAtomsForPackage(task))
7285 except portage.exception.InvalidDependString, e:
7286 portage.writemsg("!!! Invalid PROVIDE in " + \
7287 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7288 (task.root, task.cpv, e), noiselevel=-1)
7292 # Don't uninstall a runtime dep if it appears
7293 # to be the only suitable one installed.
7295 vardb = root_config.trees["vartree"].dbapi
7296 for atom in runtime_dep_atoms:
7297 other_version = None
7298 for pkg in vardb.match_pkgs(atom):
7299 if pkg.cpv == task.cpv and \
7300 pkg.metadata["COUNTER"] == \
7301 task.metadata["COUNTER"]:
7305 if other_version is None:
7311 # For packages in the system set, don't take
7312 # any chances. If the conflict can't be resolved
7313 # by a normal replacement operation then abort.
7316 for atom in root_config.sets[
7317 "system"].iterAtomsForPackage(task):
7320 except portage.exception.InvalidDependString, e:
7321 portage.writemsg("!!! Invalid PROVIDE in " + \
7322 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7323 (task.root, task.cpv, e), noiselevel=-1)
7329 # Note that the world check isn't always
7330 # necessary since self._complete_graph() will
7331 # add all packages from the system and world sets to the
7332 # graph. This just allows unresolved conflicts to be
7333 # detected as early as possible, which makes it possible
7334 # to avoid calling self._complete_graph() when it is
7335 # unnecessary due to blockers triggering an abortion.
7337 # For packages in the world set, go ahead an uninstall
7338 # when necessary, as long as the atom will be satisfied
7339 # in the final state.
7340 graph_db = self.mydbapi[task.root]
7343 for atom in root_config.sets[
7344 "world"].iterAtomsForPackage(task):
7346 for pkg in graph_db.match_pkgs(atom):
7353 self._blocked_world_pkgs[inst_pkg] = atom
7355 except portage.exception.InvalidDependString, e:
7356 portage.writemsg("!!! Invalid PROVIDE in " + \
7357 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7358 (task.root, task.cpv, e), noiselevel=-1)
7364 # Check the deps of parent nodes to ensure that
7365 # the chosen task produces a leaf node. Maybe
7366 # this can be optimized some more to make the
7367 # best possible choice, but the current algorithm
7368 # is simple and should be near optimal for most
7370 mergeable_parent = False
7372 for parent in mygraph.parent_nodes(task):
7373 parent_deps.update(mygraph.child_nodes(parent,
7374 ignore_priority=priority_range.ignore_medium_soft))
7375 if parent in mergeable_nodes and \
7376 gather_deps(ignore_uninst_or_med_soft,
7377 mergeable_nodes, set(), parent):
7378 mergeable_parent = True
7380 if not mergeable_parent:
7383 parent_deps.remove(task)
7384 if min_parent_deps is None or \
7385 len(parent_deps) < min_parent_deps:
7386 min_parent_deps = len(parent_deps)
7389 if uninst_task is not None:
7390 # The uninstall is performed only after blocking
7391 # packages have been merged on top of it. File
7392 # collisions between blocking packages are detected
7393 # and removed from the list of files to be uninstalled.
7394 scheduled_uninstalls.add(uninst_task)
7395 parent_nodes = mygraph.parent_nodes(uninst_task)
7397 # Reverse the parent -> uninstall edges since we want
7398 # to do the uninstall after blocking packages have
7399 # been merged on top of it.
7400 mygraph.remove(uninst_task)
7401 for blocked_pkg in parent_nodes:
7402 mygraph.add(blocked_pkg, uninst_task,
7403 priority=BlockerDepPriority.instance)
7404 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7405 scheduler_graph.add(blocked_pkg, uninst_task,
7406 priority=BlockerDepPriority.instance)
7408 # Reset the state variables for leaf node selection and
7409 # continue trying to select leaf nodes.
7411 drop_satisfied = False
7414 if not selected_nodes:
7415 # Only select root nodes as a last resort. This case should
7416 # only trigger when the graph is nearly empty and the only
7417 # remaining nodes are isolated (no parents or children). Since
7418 # the nodes must be isolated, ignore_priority is not needed.
7419 selected_nodes = get_nodes()
7421 if not selected_nodes and not drop_satisfied:
7422 drop_satisfied = True
7425 if not selected_nodes and not myblocker_uninstalls.is_empty():
7426 # If possible, drop an uninstall task here in order to avoid
7427 # the circular deps code path. The corresponding blocker will
7428 # still be counted as an unresolved conflict.
7430 for node in myblocker_uninstalls.leaf_nodes():
7432 mygraph.remove(node)
7437 ignored_uninstall_tasks.add(node)
7440 if uninst_task is not None:
7441 # Reset the state variables for leaf node selection and
7442 # continue trying to select leaf nodes.
7444 drop_satisfied = False
7447 if not selected_nodes:
7448 self._circular_deps_for_display = mygraph
7449 raise self._unknown_internal_error()
7451 # At this point, we've succeeded in selecting one or more nodes, so
7452 # reset state variables for leaf node selection.
7454 drop_satisfied = False
7456 mygraph.difference_update(selected_nodes)
7458 for node in selected_nodes:
7459 if isinstance(node, Package) and \
7460 node.operation == "nomerge":
7463 # Handle interactions between blockers
7464 # and uninstallation tasks.
7465 solved_blockers = set()
7467 if isinstance(node, Package) and \
7468 "uninstall" == node.operation:
7469 have_uninstall_task = True
7472 vardb = self.trees[node.root]["vartree"].dbapi
7473 previous_cpv = vardb.match(node.slot_atom)
7475 # The package will be replaced by this one, so remove
7476 # the corresponding Uninstall task if necessary.
7477 previous_cpv = previous_cpv[0]
7479 ("installed", node.root, previous_cpv, "uninstall")
7481 mygraph.remove(uninst_task)
7485 if uninst_task is not None and \
7486 uninst_task not in ignored_uninstall_tasks and \
7487 myblocker_uninstalls.contains(uninst_task):
7488 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7489 myblocker_uninstalls.remove(uninst_task)
7490 # Discard any blockers that this Uninstall solves.
7491 for blocker in blocker_nodes:
7492 if not myblocker_uninstalls.child_nodes(blocker):
7493 myblocker_uninstalls.remove(blocker)
7494 solved_blockers.add(blocker)
7496 retlist.append(node)
7498 if (isinstance(node, Package) and \
7499 "uninstall" == node.operation) or \
7500 (uninst_task is not None and \
7501 uninst_task in scheduled_uninstalls):
7502 # Include satisfied blockers in the merge list
7503 # since the user might be interested and also
7504 # it serves as an indicator that blocking packages
7505 # will be temporarily installed simultaneously.
7506 for blocker in solved_blockers:
7507 retlist.append(Blocker(atom=blocker.atom,
7508 root=blocker.root, eapi=blocker.eapi,
7511 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7512 for node in myblocker_uninstalls.root_nodes():
7513 unsolvable_blockers.add(node)
7515 for blocker in unsolvable_blockers:
7516 retlist.append(blocker)
7518 # If any Uninstall tasks need to be executed in order
7519 # to avoid a conflict, complete the graph with any
7520 # dependencies that may have been initially
7521 # neglected (to ensure that unsafe Uninstall tasks
7522 # are properly identified and blocked from execution).
7523 if have_uninstall_task and \
7525 not unsolvable_blockers:
7526 self.myparams.add("complete")
7527 raise self._serialize_tasks_retry("")
7529 if unsolvable_blockers and \
7530 not self._accept_blocker_conflicts():
7531 self._unsatisfied_blockers_for_display = unsolvable_blockers
7532 self._serialized_tasks_cache = retlist[:]
7533 self._scheduler_graph = scheduler_graph
7534 raise self._unknown_internal_error()
7536 if self._slot_collision_info and \
7537 not self._accept_blocker_conflicts():
7538 self._serialized_tasks_cache = retlist[:]
7539 self._scheduler_graph = scheduler_graph
7540 raise self._unknown_internal_error()
7542 return retlist, scheduler_graph
7544 def _show_circular_deps(self, mygraph):
7545 # No leaf nodes are available, so we have a circular
7546 # dependency panic situation. Reduce the noise level to a
7547 # minimum via repeated elimination of root nodes since they
7548 # have no parents and thus can not be part of a cycle.
7550 root_nodes = mygraph.root_nodes(
7551 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7554 mygraph.difference_update(root_nodes)
7555 # Display the USE flags that are enabled on nodes that are part
7556 # of dependency cycles in case that helps the user decide to
7557 # disable some of them.
7559 tempgraph = mygraph.copy()
7560 while not tempgraph.empty():
7561 nodes = tempgraph.leaf_nodes()
7563 node = tempgraph.order[0]
7566 display_order.append(node)
7567 tempgraph.remove(node)
7568 display_order.reverse()
7569 self.myopts.pop("--quiet", None)
7570 self.myopts.pop("--verbose", None)
7571 self.myopts["--tree"] = True
7572 portage.writemsg("\n\n", noiselevel=-1)
7573 self.display(display_order)
7574 prefix = colorize("BAD", " * ")
7575 portage.writemsg("\n", noiselevel=-1)
7576 portage.writemsg(prefix + "Error: circular dependencies:\n",
7578 portage.writemsg("\n", noiselevel=-1)
7579 mygraph.debug_print()
7580 portage.writemsg("\n", noiselevel=-1)
7581 portage.writemsg(prefix + "Note that circular dependencies " + \
7582 "can often be avoided by temporarily\n", noiselevel=-1)
7583 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7584 "optional dependencies.\n", noiselevel=-1)
7586 def _show_merge_list(self):
7587 if self._serialized_tasks_cache is not None and \
7588 not (self._displayed_list and \
7589 (self._displayed_list == self._serialized_tasks_cache or \
7590 self._displayed_list == \
7591 list(reversed(self._serialized_tasks_cache)))):
7592 display_list = self._serialized_tasks_cache[:]
7593 if "--tree" in self.myopts:
7594 display_list.reverse()
7595 self.display(display_list)
7597 def _show_unsatisfied_blockers(self, blockers):
7598 self._show_merge_list()
7599 msg = "Error: The above package list contains " + \
7600 "packages which cannot be installed " + \
7601 "at the same time on the same system."
7602 prefix = colorize("BAD", " * ")
7603 from textwrap import wrap
7604 portage.writemsg("\n", noiselevel=-1)
7605 for line in wrap(msg, 70):
7606 portage.writemsg(prefix + line + "\n", noiselevel=-1)
7608 # Display the conflicting packages along with the packages
7609 # that pulled them in. This is helpful for troubleshooting
7610 # cases in which blockers don't solve automatically and
7611 # the reasons are not apparent from the normal merge list
7615 for blocker in blockers:
7616 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7617 self._blocker_parents.parent_nodes(blocker)):
7618 parent_atoms = self._parent_atoms.get(pkg)
7619 if not parent_atoms:
7620 atom = self._blocked_world_pkgs.get(pkg)
7621 if atom is not None:
7622 parent_atoms = set([("@world", atom)])
7624 conflict_pkgs[pkg] = parent_atoms
7627 # Reduce noise by pruning packages that are only
7628 # pulled in by other conflict packages.
7630 for pkg, parent_atoms in conflict_pkgs.iteritems():
7631 relevant_parent = False
7632 for parent, atom in parent_atoms:
7633 if parent not in conflict_pkgs:
7634 relevant_parent = True
7636 if not relevant_parent:
7637 pruned_pkgs.add(pkg)
7638 for pkg in pruned_pkgs:
7639 del conflict_pkgs[pkg]
7645 # Max number of parents shown, to avoid flooding the display.
7647 for pkg, parent_atoms in conflict_pkgs.iteritems():
7651 # Prefer packages that are not directly involved in a conflict.
7652 for parent_atom in parent_atoms:
7653 if len(pruned_list) >= max_parents:
7655 parent, atom = parent_atom
7656 if parent not in conflict_pkgs:
7657 pruned_list.add(parent_atom)
7659 for parent_atom in parent_atoms:
7660 if len(pruned_list) >= max_parents:
7662 pruned_list.add(parent_atom)
7664 omitted_parents = len(parent_atoms) - len(pruned_list)
7665 msg.append(indent + "%s pulled in by\n" % pkg)
7667 for parent_atom in pruned_list:
7668 parent, atom = parent_atom
7669 msg.append(2*indent)
7670 if isinstance(parent,
7671 (PackageArg, AtomArg)):
7672 # For PackageArg and AtomArg types, it's
7673 # redundant to display the atom attribute.
7674 msg.append(str(parent))
7676 # Display the specific atom from SetArg or
7678 msg.append("%s required by %s" % (atom, parent))
7682 msg.append(2*indent)
7683 msg.append("(and %d more)\n" % omitted_parents)
7687 sys.stderr.write("".join(msg))
7690 if "--quiet" not in self.myopts:
7691 show_blocker_docs_link()
7693 def display(self, mylist, favorites=[], verbosity=None):
7695 # This is used to prevent display_problems() from
7696 # redundantly displaying this exact same merge list
7697 # again via _show_merge_list().
7698 self._displayed_list = mylist
7700 if verbosity is None:
7701 verbosity = ("--quiet" in self.myopts and 1 or \
7702 "--verbose" in self.myopts and 3 or 2)
7703 favorites_set = InternalPackageSet(favorites)
7704 oneshot = "--oneshot" in self.myopts or \
7705 "--onlydeps" in self.myopts
7706 columns = "--columns" in self.myopts
7711 counters = PackageCounters()
7713 if verbosity == 1 and "--verbose" not in self.myopts:
7714 def create_use_string(*args):
7717 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7719 is_new, reinst_flags,
7720 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7721 alphabetical=("--alphabetical" in self.myopts)):
7729 cur_iuse = set(cur_iuse)
7730 enabled_flags = cur_iuse.intersection(cur_use)
7731 removed_iuse = set(old_iuse).difference(cur_iuse)
7732 any_iuse = cur_iuse.union(old_iuse)
7733 any_iuse = list(any_iuse)
7735 for flag in any_iuse:
7738 reinst_flag = reinst_flags and flag in reinst_flags
7739 if flag in enabled_flags:
7741 if is_new or flag in old_use and \
7742 (all_flags or reinst_flag):
7743 flag_str = red(flag)
7744 elif flag not in old_iuse:
7745 flag_str = yellow(flag) + "%*"
7746 elif flag not in old_use:
7747 flag_str = green(flag) + "*"
7748 elif flag in removed_iuse:
7749 if all_flags or reinst_flag:
7750 flag_str = yellow("-" + flag) + "%"
7753 flag_str = "(" + flag_str + ")"
7754 removed.append(flag_str)
7757 if is_new or flag in old_iuse and \
7758 flag not in old_use and \
7759 (all_flags or reinst_flag):
7760 flag_str = blue("-" + flag)
7761 elif flag not in old_iuse:
7762 flag_str = yellow("-" + flag)
7763 if flag not in iuse_forced:
7765 elif flag in old_use:
7766 flag_str = green("-" + flag) + "*"
7768 if flag in iuse_forced:
7769 flag_str = "(" + flag_str + ")"
7771 enabled.append(flag_str)
7773 disabled.append(flag_str)
7776 ret = " ".join(enabled)
7778 ret = " ".join(enabled + disabled + removed)
7780 ret = '%s="%s" ' % (name, ret)
7783 repo_display = RepoDisplay(self.roots)
7787 mygraph = self.digraph.copy()
7789 # If there are any Uninstall instances, add the corresponding
7790 # blockers to the digraph (useful for --tree display).
7792 executed_uninstalls = set(node for node in mylist \
7793 if isinstance(node, Package) and node.operation == "unmerge")
7795 for uninstall in self._blocker_uninstalls.leaf_nodes():
7796 uninstall_parents = \
7797 self._blocker_uninstalls.parent_nodes(uninstall)
7798 if not uninstall_parents:
7801 # Remove the corresponding "nomerge" node and substitute
7802 # the Uninstall node.
7803 inst_pkg = self._pkg_cache[
7804 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7806 mygraph.remove(inst_pkg)
7811 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7813 inst_pkg_blockers = []
7815 # Break the Package -> Uninstall edges.
7816 mygraph.remove(uninstall)
7818 # Resolution of a package's blockers
7819 # depend on it's own uninstallation.
7820 for blocker in inst_pkg_blockers:
7821 mygraph.add(uninstall, blocker)
7823 # Expand Package -> Uninstall edges into
7824 # Package -> Blocker -> Uninstall edges.
7825 for blocker in uninstall_parents:
7826 mygraph.add(uninstall, blocker)
7827 for parent in self._blocker_parents.parent_nodes(blocker):
7828 if parent != inst_pkg:
7829 mygraph.add(blocker, parent)
7831 # If the uninstall task did not need to be executed because
7832 # of an upgrade, display Blocker -> Upgrade edges since the
7833 # corresponding Blocker -> Uninstall edges will not be shown.
7835 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7836 if upgrade_node is not None and \
7837 uninstall not in executed_uninstalls:
7838 for blocker in uninstall_parents:
7839 mygraph.add(upgrade_node, blocker)
7841 unsatisfied_blockers = []
7846 if isinstance(x, Blocker) and not x.satisfied:
7847 unsatisfied_blockers.append(x)
7850 if "--tree" in self.myopts:
7851 depth = len(tree_nodes)
7852 while depth and graph_key not in \
7853 mygraph.child_nodes(tree_nodes[depth-1]):
7856 tree_nodes = tree_nodes[:depth]
7857 tree_nodes.append(graph_key)
7858 display_list.append((x, depth, True))
7859 shown_edges.add((graph_key, tree_nodes[depth-1]))
7861 traversed_nodes = set() # prevent endless circles
7862 traversed_nodes.add(graph_key)
7863 def add_parents(current_node, ordered):
7865 # Do not traverse to parents if this node is an
7866 # an argument or a direct member of a set that has
7867 # been specified as an argument (system or world).
7868 if current_node not in self._set_nodes:
7869 parent_nodes = mygraph.parent_nodes(current_node)
7871 child_nodes = set(mygraph.child_nodes(current_node))
7872 selected_parent = None
7873 # First, try to avoid a direct cycle.
7874 for node in parent_nodes:
7875 if not isinstance(node, (Blocker, Package)):
7877 if node not in traversed_nodes and \
7878 node not in child_nodes:
7879 edge = (current_node, node)
7880 if edge in shown_edges:
7882 selected_parent = node
7884 if not selected_parent:
7885 # A direct cycle is unavoidable.
7886 for node in parent_nodes:
7887 if not isinstance(node, (Blocker, Package)):
7889 if node not in traversed_nodes:
7890 edge = (current_node, node)
7891 if edge in shown_edges:
7893 selected_parent = node
7896 shown_edges.add((current_node, selected_parent))
7897 traversed_nodes.add(selected_parent)
7898 add_parents(selected_parent, False)
7899 display_list.append((current_node,
7900 len(tree_nodes), ordered))
7901 tree_nodes.append(current_node)
7903 add_parents(graph_key, True)
7905 display_list.append((x, depth, True))
7906 mylist = display_list
7907 for x in unsatisfied_blockers:
7908 mylist.append((x, 0, True))
7910 last_merge_depth = 0
7911 for i in xrange(len(mylist)-1,-1,-1):
7912 graph_key, depth, ordered = mylist[i]
7913 if not ordered and depth == 0 and i > 0 \
7914 and graph_key == mylist[i-1][0] and \
7915 mylist[i-1][1] == 0:
7916 # An ordered node got a consecutive duplicate when the tree was
7920 if ordered and graph_key[-1] != "nomerge":
7921 last_merge_depth = depth
7923 if depth >= last_merge_depth or \
7924 i < len(mylist) - 1 and \
7925 depth >= mylist[i+1][1]:
7928 from portage import flatten
7929 from portage.dep import use_reduce, paren_reduce
7930 # files to fetch list - avoids counting a same file twice
7931 # in size display (verbose mode)
7934 # Use this set to detect when all the "repoadd" strings are "[0]"
7935 # and disable the entire repo display in this case.
7938 for mylist_index in xrange(len(mylist)):
7939 x, depth, ordered = mylist[mylist_index]
7943 portdb = self.trees[myroot]["porttree"].dbapi
7944 bindb = self.trees[myroot]["bintree"].dbapi
7945 vardb = self.trees[myroot]["vartree"].dbapi
7946 vartree = self.trees[myroot]["vartree"]
7947 pkgsettings = self.pkgsettings[myroot]
7950 indent = " " * depth
7952 if isinstance(x, Blocker):
7954 blocker_style = "PKG_BLOCKER_SATISFIED"
7955 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
7957 blocker_style = "PKG_BLOCKER"
7958 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
7960 counters.blocks += 1
7962 counters.blocks_satisfied += 1
7963 resolved = portage.key_expand(
7964 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7965 if "--columns" in self.myopts and "--quiet" in self.myopts:
7966 addl += " " + colorize(blocker_style, resolved)
7968 addl = "[%s %s] %s%s" % \
7969 (colorize(blocker_style, "blocks"),
7970 addl, indent, colorize(blocker_style, resolved))
7971 block_parents = self._blocker_parents.parent_nodes(x)
7972 block_parents = set([pnode[2] for pnode in block_parents])
7973 block_parents = ", ".join(block_parents)
7975 addl += colorize(blocker_style,
7976 " (\"%s\" is blocking %s)") % \
7977 (str(x.atom).lstrip("!"), block_parents)
7979 addl += colorize(blocker_style,
7980 " (is blocking %s)") % block_parents
7981 if isinstance(x, Blocker) and x.satisfied:
7986 blockers.append(addl)
7989 pkg_merge = ordered and pkg_status == "merge"
7990 if not pkg_merge and pkg_status == "merge":
7991 pkg_status = "nomerge"
7992 built = pkg_type != "ebuild"
7993 installed = pkg_type == "installed"
7995 metadata = pkg.metadata
7997 repo_name = metadata["repository"]
7998 if pkg_type == "ebuild":
7999 ebuild_path = portdb.findname(pkg_key)
8000 if not ebuild_path: # shouldn't happen
8001 raise portage.exception.PackageNotFound(pkg_key)
8002 repo_path_real = os.path.dirname(os.path.dirname(
8003 os.path.dirname(ebuild_path)))
8005 repo_path_real = portdb.getRepositoryPath(repo_name)
8006 pkg_use = list(pkg.use.enabled)
8008 restrict = flatten(use_reduce(paren_reduce(
8009 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8010 except portage.exception.InvalidDependString, e:
8011 if not pkg.installed:
8012 show_invalid_depstring_notice(x,
8013 pkg.metadata["RESTRICT"], str(e))
8017 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8018 "fetch" in restrict:
8021 counters.restrict_fetch += 1
8022 if portdb.fetch_check(pkg_key, pkg_use):
8025 counters.restrict_fetch_satisfied += 1
8027 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8028 #param is used for -u, where you still *do* want to see when something is being upgraded.
8031 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8032 if vardb.cpv_exists(pkg_key):
8033 addl=" "+yellow("R")+fetch+" "
8036 counters.reinst += 1
8037 elif pkg_status == "uninstall":
8038 counters.uninst += 1
8039 # filter out old-style virtual matches
8040 elif installed_versions and \
8041 portage.cpv_getkey(installed_versions[0]) == \
8042 portage.cpv_getkey(pkg_key):
8043 myinslotlist = vardb.match(pkg.slot_atom)
8044 # If this is the first install of a new-style virtual, we
8045 # need to filter out old-style virtual matches.
8046 if myinslotlist and \
8047 portage.cpv_getkey(myinslotlist[0]) != \
8048 portage.cpv_getkey(pkg_key):
8051 myoldbest = myinslotlist[:]
8053 if not portage.dep.cpvequal(pkg_key,
8054 portage.best([pkg_key] + myoldbest)):
8056 addl += turquoise("U")+blue("D")
8058 counters.downgrades += 1
8061 addl += turquoise("U") + " "
8063 counters.upgrades += 1
8065 # New slot, mark it new.
8066 addl = " " + green("NS") + fetch + " "
8067 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8069 counters.newslot += 1
8071 if "--changelog" in self.myopts:
8072 inst_matches = vardb.match(pkg.slot_atom)
8074 changelogs.extend(self.calc_changelog(
8075 portdb.findname(pkg_key),
8076 inst_matches[0], pkg_key))
8078 addl = " " + green("N") + " " + fetch + " "
8087 forced_flags = set()
8088 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8089 forced_flags.update(pkgsettings.useforce)
8090 forced_flags.update(pkgsettings.usemask)
8092 cur_use = [flag for flag in pkg.use.enabled \
8093 if flag in pkg.iuse.all]
8094 cur_iuse = sorted(pkg.iuse.all)
8096 if myoldbest and myinslotlist:
8097 previous_cpv = myoldbest[0]
8099 previous_cpv = pkg.cpv
8100 if vardb.cpv_exists(previous_cpv):
8101 old_iuse, old_use = vardb.aux_get(
8102 previous_cpv, ["IUSE", "USE"])
8103 old_iuse = list(set(
8104 filter_iuse_defaults(old_iuse.split())))
8106 old_use = old_use.split()
8113 old_use = [flag for flag in old_use if flag in old_iuse]
8115 use_expand = pkgsettings["USE_EXPAND"].lower().split()
8117 use_expand.reverse()
8118 use_expand_hidden = \
8119 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8121 def map_to_use_expand(myvals, forcedFlags=False,
8125 for exp in use_expand:
8128 for val in myvals[:]:
8129 if val.startswith(exp.lower()+"_"):
8130 if val in forced_flags:
8131 forced[exp].add(val[len(exp)+1:])
8132 ret[exp].append(val[len(exp)+1:])
8135 forced["USE"] = [val for val in myvals \
8136 if val in forced_flags]
8138 for exp in use_expand_hidden:
8144 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8145 # are the only thing that triggered reinstallation.
8146 reinst_flags_map = {}
8147 reinstall_for_flags = self._reinstall_nodes.get(pkg)
8148 reinst_expand_map = None
8149 if reinstall_for_flags:
8150 reinst_flags_map = map_to_use_expand(
8151 list(reinstall_for_flags), removeHidden=False)
8152 for k in list(reinst_flags_map):
8153 if not reinst_flags_map[k]:
8154 del reinst_flags_map[k]
8155 if not reinst_flags_map.get("USE"):
8156 reinst_expand_map = reinst_flags_map.copy()
8157 reinst_expand_map.pop("USE", None)
8158 if reinst_expand_map and \
8159 not set(reinst_expand_map).difference(
8161 use_expand_hidden = \
8162 set(use_expand_hidden).difference(
8165 cur_iuse_map, iuse_forced = \
8166 map_to_use_expand(cur_iuse, forcedFlags=True)
8167 cur_use_map = map_to_use_expand(cur_use)
8168 old_iuse_map = map_to_use_expand(old_iuse)
8169 old_use_map = map_to_use_expand(old_use)
8172 use_expand.insert(0, "USE")
8174 for key in use_expand:
8175 if key in use_expand_hidden:
8177 verboseadd += create_use_string(key.upper(),
8178 cur_iuse_map[key], iuse_forced[key],
8179 cur_use_map[key], old_iuse_map[key],
8180 old_use_map[key], is_new,
8181 reinst_flags_map.get(key))
8186 if pkg_type == "ebuild" and pkg_merge:
8188 myfilesdict = portdb.getfetchsizes(pkg_key,
8189 useflags=pkg_use, debug=self.edebug)
8190 except portage.exception.InvalidDependString, e:
8191 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8192 show_invalid_depstring_notice(x, src_uri, str(e))
8195 if myfilesdict is None:
8196 myfilesdict="[empty/missing/bad digest]"
8198 for myfetchfile in myfilesdict:
8199 if myfetchfile not in myfetchlist:
8200 mysize+=myfilesdict[myfetchfile]
8201 myfetchlist.append(myfetchfile)
8203 counters.totalsize += mysize
8204 verboseadd += format_size(mysize)
8207 # assign index for a previous version in the same slot
8208 has_previous = False
8209 repo_name_prev = None
8210 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8212 slot_matches = vardb.match(slot_atom)
8215 repo_name_prev = vardb.aux_get(slot_matches[0],
8218 # now use the data to generate output
8219 if pkg.installed or not has_previous:
8220 repoadd = repo_display.repoStr(repo_path_real)
8222 repo_path_prev = None
8224 repo_path_prev = portdb.getRepositoryPath(
8226 if repo_path_prev == repo_path_real:
8227 repoadd = repo_display.repoStr(repo_path_real)
8229 repoadd = "%s=>%s" % (
8230 repo_display.repoStr(repo_path_prev),
8231 repo_display.repoStr(repo_path_real))
8233 repoadd_set.add(repoadd)
8235 xs = [portage.cpv_getkey(pkg_key)] + \
8236 list(portage.catpkgsplit(pkg_key)[2:])
8243 if "COLUMNWIDTH" in self.settings:
8245 mywidth = int(self.settings["COLUMNWIDTH"])
8246 except ValueError, e:
8247 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8249 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8250 self.settings["COLUMNWIDTH"], noiselevel=-1)
8252 oldlp = mywidth - 30
8255 # Convert myoldbest from a list to a string.
8259 for pos, key in enumerate(myoldbest):
8260 key = portage.catpkgsplit(key)[2] + \
8261 "-" + portage.catpkgsplit(key)[3]
8262 if key[-3:] == "-r0":
8264 myoldbest[pos] = key
8265 myoldbest = blue("["+", ".join(myoldbest)+"]")
8268 root_config = self.roots[myroot]
8269 system_set = root_config.sets["system"]
8270 world_set = root_config.sets["world"]
8275 pkg_system = system_set.findAtomForPackage(pkg)
8276 pkg_world = world_set.findAtomForPackage(pkg)
8277 if not (oneshot or pkg_world) and \
8278 myroot == self.target_root and \
8279 favorites_set.findAtomForPackage(pkg):
8280 # Maybe it will be added to world now.
8281 if create_world_atom(pkg, favorites_set, root_config):
8283 except portage.exception.InvalidDependString:
8284 # This is reported elsewhere if relevant.
8287 def pkgprint(pkg_str):
8290 return colorize("PKG_MERGE_SYSTEM", pkg_str)
8292 return colorize("PKG_MERGE_WORLD", pkg_str)
8294 return colorize("PKG_MERGE", pkg_str)
8295 elif pkg_status == "uninstall":
8296 return colorize("PKG_UNINSTALL", pkg_str)
8299 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8301 return colorize("PKG_NOMERGE_WORLD", pkg_str)
8303 return colorize("PKG_NOMERGE", pkg_str)
8306 properties = flatten(use_reduce(paren_reduce(
8307 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8308 except portage.exception.InvalidDependString, e:
8309 if not pkg.installed:
8310 show_invalid_depstring_notice(pkg,
8311 pkg.metadata["PROPERTIES"], str(e))
8315 interactive = "interactive" in properties
8316 if interactive and pkg.operation == "merge":
8317 addl = colorize("WARN", "I") + addl[1:]
8319 counters.interactive += 1
8324 if "--columns" in self.myopts:
8325 if "--quiet" in self.myopts:
8326 myprint=addl+" "+indent+pkgprint(pkg_cp)
8327 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8328 myprint=myprint+myoldbest
8329 myprint=myprint+darkgreen("to "+x[1])
8333 myprint = "[%s] %s%s" % \
8334 (pkgprint(pkg_status.ljust(13)),
8335 indent, pkgprint(pkg.cp))
8337 myprint = "[%s %s] %s%s" % \
8338 (pkgprint(pkg.type_name), addl,
8339 indent, pkgprint(pkg.cp))
8340 if (newlp-nc_len(myprint)) > 0:
8341 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8342 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8343 if (oldlp-nc_len(myprint)) > 0:
8344 myprint=myprint+" "*(oldlp-nc_len(myprint))
8345 myprint=myprint+myoldbest
8346 myprint += darkgreen("to " + pkg.root)
8349 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8351 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8352 myprint += indent + pkgprint(pkg_key) + " " + \
8353 myoldbest + darkgreen("to " + myroot)
8355 if "--columns" in self.myopts:
8356 if "--quiet" in self.myopts:
8357 myprint=addl+" "+indent+pkgprint(pkg_cp)
8358 myprint=myprint+" "+green(xs[1]+xs[2])+" "
8359 myprint=myprint+myoldbest
8363 myprint = "[%s] %s%s" % \
8364 (pkgprint(pkg_status.ljust(13)),
8365 indent, pkgprint(pkg.cp))
8367 myprint = "[%s %s] %s%s" % \
8368 (pkgprint(pkg.type_name), addl,
8369 indent, pkgprint(pkg.cp))
8370 if (newlp-nc_len(myprint)) > 0:
8371 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8372 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8373 if (oldlp-nc_len(myprint)) > 0:
8374 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8375 myprint += myoldbest
8378 myprint = "[%s] %s%s %s" % \
8379 (pkgprint(pkg_status.ljust(13)),
8380 indent, pkgprint(pkg.cpv),
8383 myprint = "[%s %s] %s%s %s" % \
8384 (pkgprint(pkg_type), addl, indent,
8385 pkgprint(pkg.cpv), myoldbest)
8387 if columns and pkg.operation == "uninstall":
8389 p.append((myprint, verboseadd, repoadd))
8391 if "--tree" not in self.myopts and \
8392 "--quiet" not in self.myopts and \
8393 not self._opts_no_restart.intersection(self.myopts) and \
8394 pkg.root == self._running_root.root and \
8395 portage.match_from_list(
8396 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8397 not vardb.cpv_exists(pkg.cpv) and \
8398 "--quiet" not in self.myopts:
8399 if mylist_index < len(mylist) - 1:
8400 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8401 p.append(colorize("WARN", " then resume the merge."))
8404 show_repos = repoadd_set and repoadd_set != set(["0"])
8407 if isinstance(x, basestring):
8408 out.write("%s\n" % (x,))
8411 myprint, verboseadd, repoadd = x
8414 myprint += " " + verboseadd
8416 if show_repos and repoadd:
8417 myprint += " " + teal("[%s]" % repoadd)
8419 out.write("%s\n" % (myprint,))
8428 sys.stdout.write(str(repo_display))
8430 if "--changelog" in self.myopts:
8432 for revision,text in changelogs:
8433 print bold('*'+revision)
8434 sys.stdout.write(text)
8439 def display_problems(self):
8441 Display problems with the dependency graph such as slot collisions.
8442 This is called internally by display() to show the problems _after_
8443 the merge list where it is most likely to be seen, but if display()
8444 is not going to be called then this method should be called explicitly
8445 to ensure that the user is notified of problems with the graph.
8447 All output goes to stderr, except for unsatisfied dependencies which
8448 go to stdout for parsing by programs such as autounmask.
8451 # Note that show_masked_packages() sends it's output to
8452 # stdout, and some programs such as autounmask parse the
8453 # output in cases when emerge bails out. However, when
8454 # show_masked_packages() is called for installed packages
8455 # here, the message is a warning that is more appropriate
8456 # to send to stderr, so temporarily redirect stdout to
8457 # stderr. TODO: Fix output code so there's a cleaner way
8458 # to redirect everything to stderr.
8463 sys.stdout = sys.stderr
8464 self._display_problems()
8470 # This goes to stdout for parsing by programs like autounmask.
8471 for pargs, kwargs in self._unsatisfied_deps_for_display:
8472 self._show_unsatisfied_dep(*pargs, **kwargs)
8474 def _display_problems(self):
8475 if self._circular_deps_for_display is not None:
8476 self._show_circular_deps(
8477 self._circular_deps_for_display)
8479 # The user is only notified of a slot conflict if
8480 # there are no unresolvable blocker conflicts.
8481 if self._unsatisfied_blockers_for_display is not None:
8482 self._show_unsatisfied_blockers(
8483 self._unsatisfied_blockers_for_display)
8485 self._show_slot_collision_notice()
8487 # TODO: Add generic support for "set problem" handlers so that
8488 # the below warnings aren't special cases for world only.
8490 if self._missing_args:
8491 world_problems = False
8492 if "world" in self._sets:
8493 # Filter out indirect members of world (from nested sets)
8494 # since only direct members of world are desired here.
8495 world_set = self.roots[self.target_root].sets["world"]
8496 for arg, atom in self._missing_args:
8497 if arg.name == "world" and atom in world_set:
8498 world_problems = True
8502 sys.stderr.write("\n!!! Problems have been " + \
8503 "detected with your world file\n")
8504 sys.stderr.write("!!! Please run " + \
8505 green("emaint --check world")+"\n\n")
8507 if self._missing_args:
8508 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8509 " Ebuilds for the following packages are either all\n")
8510 sys.stderr.write(colorize("BAD", "!!!") + \
8511 " masked or don't exist:\n")
8512 sys.stderr.write(" ".join(str(atom) for arg, atom in \
8513 self._missing_args) + "\n")
8515 if self._pprovided_args:
8517 for arg, atom in self._pprovided_args:
8518 if isinstance(arg, SetArg):
8520 arg_atom = (atom, atom)
8523 arg_atom = (arg.arg, atom)
8524 refs = arg_refs.setdefault(arg_atom, [])
8525 if parent not in refs:
8528 msg.append(bad("\nWARNING: "))
8529 if len(self._pprovided_args) > 1:
8530 msg.append("Requested packages will not be " + \
8531 "merged because they are listed in\n")
8533 msg.append("A requested package will not be " + \
8534 "merged because it is listed in\n")
8535 msg.append("package.provided:\n\n")
8536 problems_sets = set()
8537 for (arg, atom), refs in arg_refs.iteritems():
8540 problems_sets.update(refs)
8542 ref_string = ", ".join(["'%s'" % name for name in refs])
8543 ref_string = " pulled in by " + ref_string
8544 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8546 if "world" in problems_sets:
8547 msg.append("This problem can be solved in one of the following ways:\n\n")
8548 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
8549 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
8550 msg.append(" C) Remove offending entries from package.provided.\n\n")
8551 msg.append("The best course of action depends on the reason that an offending\n")
8552 msg.append("package.provided entry exists.\n\n")
8553 sys.stderr.write("".join(msg))
8555 masked_packages = []
8556 for pkg in self._masked_installed:
8557 root_config = pkg.root_config
8558 pkgsettings = self.pkgsettings[pkg.root]
8559 mreasons = get_masking_status(pkg, pkgsettings, root_config)
8560 masked_packages.append((root_config, pkgsettings,
8561 pkg.cpv, pkg.metadata, mreasons))
8563 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8564 " The following installed packages are masked:\n")
8565 show_masked_packages(masked_packages)
8569 def calc_changelog(self,ebuildpath,current,next):
8570 if ebuildpath == None or not os.path.exists(ebuildpath):
8572 current = '-'.join(portage.catpkgsplit(current)[1:])
8573 if current.endswith('-r0'):
8574 current = current[:-3]
8575 next = '-'.join(portage.catpkgsplit(next)[1:])
8576 if next.endswith('-r0'):
8578 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8580 changelog = open(changelogpath).read()
8581 except SystemExit, e:
8582 raise # Needed else can't exit
8585 divisions = self.find_changelog_tags(changelog)
8586 #print 'XX from',current,'to',next
8587 #for div,text in divisions: print 'XX',div
8588 # skip entries for all revisions above the one we are about to emerge
8589 for i in range(len(divisions)):
8590 if divisions[i][0]==next:
8591 divisions = divisions[i:]
8593 # find out how many entries we are going to display
8594 for i in range(len(divisions)):
8595 if divisions[i][0]==current:
8596 divisions = divisions[:i]
8599 # couldnt find the current revision in the list. display nothing
8603 def find_changelog_tags(self,changelog):
8607 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8609 if release is not None:
8610 divs.append((release,changelog))
8612 if release is not None:
8613 divs.append((release,changelog[:match.start()]))
8614 changelog = changelog[match.end():]
8615 release = match.group(1)
8616 if release.endswith('.ebuild'):
8617 release = release[:-7]
8618 if release.endswith('-r0'):
8619 release = release[:-3]
8621 def saveNomergeFavorites(self):
8622 """Find atoms in favorites that are not in the mergelist and add them
8623 to the world file if necessary."""
8624 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8625 "--oneshot", "--onlydeps", "--pretend"):
8626 if x in self.myopts:
8628 root_config = self.roots[self.target_root]
8629 world_set = root_config.sets["world"]
8631 world_locked = False
8632 if hasattr(world_set, "lock"):
8636 if hasattr(world_set, "load"):
8637 world_set.load() # maybe it's changed on disk
8639 args_set = self._sets["args"]
8640 portdb = self.trees[self.target_root]["porttree"].dbapi
8641 added_favorites = set()
8642 for x in self._set_nodes:
8643 pkg_type, root, pkg_key, pkg_status = x
8644 if pkg_status != "nomerge":
8648 myfavkey = create_world_atom(x, args_set, root_config)
8650 if myfavkey in added_favorites:
8652 added_favorites.add(myfavkey)
8653 except portage.exception.InvalidDependString, e:
8654 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8655 (pkg_key, str(e)), noiselevel=-1)
8656 writemsg("!!! see '%s'\n\n" % os.path.join(
8657 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8660 for k in self._sets:
8661 if k in ("args", "world") or not root_config.sets[k].world_candidate:
8666 all_added.append(SETPREFIX + k)
8667 all_added.extend(added_favorites)
8670 print ">>> Recording %s in \"world\" favorites file..." % \
8671 colorize("INFORM", str(a))
8673 world_set.update(all_added)
8678 def loadResumeCommand(self, resume_data, skip_masked=False):
8680 Add a resume command to the graph and validate it in the process. This
8681 will raise a PackageNotFound exception if a package is not available.
8684 if not isinstance(resume_data, dict):
8687 mergelist = resume_data.get("mergelist")
8688 if not isinstance(mergelist, list):
8691 fakedb = self.mydbapi
8693 serialized_tasks = []
8696 if not (isinstance(x, list) and len(x) == 4):
8698 pkg_type, myroot, pkg_key, action = x
8699 if pkg_type not in self.pkg_tree_map:
8701 if action != "merge":
8703 tree_type = self.pkg_tree_map[pkg_type]
8704 mydb = trees[myroot][tree_type].dbapi
8705 db_keys = list(self._trees_orig[myroot][
8706 tree_type].dbapi._aux_cache_keys)
8708 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8710 # It does no exist or it is corrupt.
8711 if action == "uninstall":
8713 raise portage.exception.PackageNotFound(pkg_key)
8714 installed = action == "uninstall"
8715 built = pkg_type != "ebuild"
8716 root_config = self.roots[myroot]
8717 pkg = Package(built=built, cpv=pkg_key,
8718 installed=installed, metadata=metadata,
8719 operation=action, root_config=root_config,
8721 if pkg_type == "ebuild":
8722 pkgsettings = self.pkgsettings[myroot]
8723 pkgsettings.setcpv(pkg)
8724 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8725 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8726 self._pkg_cache[pkg] = pkg
8728 root_config = self.roots[pkg.root]
8729 if "merge" == pkg.operation and \
8730 not visible(root_config.settings, pkg):
8732 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8734 self._unsatisfied_deps_for_display.append(
8735 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8737 fakedb[myroot].cpv_inject(pkg)
8738 serialized_tasks.append(pkg)
8739 self.spinner.update()
8741 if self._unsatisfied_deps_for_display:
8744 if not serialized_tasks or "--nodeps" in self.myopts:
8745 self._serialized_tasks_cache = serialized_tasks
8746 self._scheduler_graph = self.digraph
8748 self._select_package = self._select_pkg_from_graph
8749 self.myparams.add("selective")
8750 # Always traverse deep dependencies in order to account for
8751 # potentially unsatisfied dependencies of installed packages.
8752 # This is necessary for correct --keep-going or --resume operation
8753 # in case a package from a group of circularly dependent packages
8754 # fails. In this case, a package which has recently been installed
8755 # may have an unsatisfied circular dependency (pulled in by
8756 # PDEPEND, for example). So, even though a package is already
8757 # installed, it may not have all of it's dependencies satisfied, so
8758 # it may not be usable. If such a package is in the subgraph of
8759 # deep depenedencies of a scheduled build, that build needs to
8760 # be cancelled. In order for this type of situation to be
8761 # recognized, deep traversal of dependencies is required.
8762 self.myparams.add("deep")
8764 favorites = resume_data.get("favorites")
8765 args_set = self._sets["args"]
8766 if isinstance(favorites, list):
8767 args = self._load_favorites(favorites)
8771 for task in serialized_tasks:
8772 if isinstance(task, Package) and \
8773 task.operation == "merge":
8774 if not self._add_pkg(task, None):
8777 # Packages for argument atoms need to be explicitly
8778 # added via _add_pkg() so that they are included in the
8779 # digraph (needed at least for --tree display).
8781 for atom in arg.set:
8782 pkg, existing_node = self._select_package(
8783 arg.root_config.root, atom)
8784 if existing_node is None and \
8786 if not self._add_pkg(pkg, Dependency(atom=atom,
8787 root=pkg.root, parent=arg)):
8790 # Allow unsatisfied deps here to avoid showing a masking
8791 # message for an unsatisfied dep that isn't necessarily
8793 if not self._create_graph(allow_unsatisfied=True):
8796 unsatisfied_deps = []
8797 for dep in self._unsatisfied_deps:
8798 if not isinstance(dep.parent, Package):
8800 if dep.parent.operation == "merge":
8801 unsatisfied_deps.append(dep)
8804 # For unsatisfied deps of installed packages, only account for
8805 # them if they are in the subgraph of dependencies of a package
8806 # which is scheduled to be installed.
8807 unsatisfied_install = False
8809 dep_stack = self.digraph.parent_nodes(dep.parent)
8811 node = dep_stack.pop()
8812 if not isinstance(node, Package):
8814 if node.operation == "merge":
8815 unsatisfied_install = True
8817 if node in traversed:
8820 dep_stack.extend(self.digraph.parent_nodes(node))
8822 if unsatisfied_install:
8823 unsatisfied_deps.append(dep)
8825 if masked_tasks or unsatisfied_deps:
8826 # This probably means that a required package
8827 # was dropped via --skipfirst. It makes the
8828 # resume list invalid, so convert it to a
8829 # UnsatisfiedResumeDep exception.
8830 raise self.UnsatisfiedResumeDep(self,
8831 masked_tasks + unsatisfied_deps)
8832 self._serialized_tasks_cache = None
8835 except self._unknown_internal_error:
8840 def _load_favorites(self, favorites):
8842 Use a list of favorites to resume state from a
8843 previous select_files() call. This creates similar
8844 DependencyArg instances to those that would have
8845 been created by the original select_files() call.
8846 This allows Package instances to be matched with
8847 DependencyArg instances during graph creation.
8849 root_config = self.roots[self.target_root]
8850 getSetAtoms = root_config.setconfig.getSetAtoms
8851 sets = root_config.sets
8854 if not isinstance(x, basestring):
8856 if x in ("system", "world"):
8858 if x.startswith(SETPREFIX):
8859 s = x[len(SETPREFIX):]
8864 # Recursively expand sets so that containment tests in
8865 # self._get_parent_sets() properly match atoms in nested
8866 # sets (like if world contains system).
8867 expanded_set = InternalPackageSet(
8868 initial_atoms=getSetAtoms(s))
8869 self._sets[s] = expanded_set
8870 args.append(SetArg(arg=x, set=expanded_set,
8871 root_config=root_config))
8873 if not portage.isvalidatom(x):
8875 args.append(AtomArg(arg=x, atom=x,
8876 root_config=root_config))
8878 self._set_args(args)
8881 class UnsatisfiedResumeDep(portage.exception.PortageException):
8883 A dependency of a resume list is not installed. This
8884 can occur when a required package is dropped from the
8885 merge list via --skipfirst.
8887 def __init__(self, depgraph, value):
8888 portage.exception.PortageException.__init__(self, value)
8889 self.depgraph = depgraph
8891 class _internal_exception(portage.exception.PortageException):
8892 def __init__(self, value=""):
8893 portage.exception.PortageException.__init__(self, value)
8895 class _unknown_internal_error(_internal_exception):
8897 Used by the depgraph internally to terminate graph creation.
8898 The specific reason for the failure should have been dumped
8899 to stderr, unfortunately, the exact reason for the failure
8903 class _serialize_tasks_retry(_internal_exception):
8905 This is raised by the _serialize_tasks() method when it needs to
8906 be called again for some reason. The only case that it's currently
8907 used for is when neglected dependencies need to be added to the
8908 graph in order to avoid making a potentially unsafe decision.
8911 class _dep_check_composite_db(portage.dbapi):
8913 A dbapi-like interface that is optimized for use in dep_check() calls.
8914 This is built on top of the existing depgraph package selection logic.
8915 Some packages that have been added to the graph may be masked from this
8916 view in order to influence the atom preference selection that occurs
8919 def __init__(self, depgraph, root):
8920 portage.dbapi.__init__(self)
8921 self._depgraph = depgraph
8923 self._match_cache = {}
8924 self._cpv_pkg_map = {}
8926 def _clear_cache(self):
8927 self._match_cache.clear()
8928 self._cpv_pkg_map.clear()
8930 def match(self, atom):
8931 ret = self._match_cache.get(atom)
8936 atom = self._dep_expand(atom)
8937 pkg, existing = self._depgraph._select_package(self._root, atom)
8941 # Return the highest available from select_package() as well as
8942 # any matching slots in the graph db.
8944 slots.add(pkg.metadata["SLOT"])
8945 atom_cp = portage.dep_getkey(atom)
8946 if pkg.cp.startswith("virtual/"):
8947 # For new-style virtual lookahead that occurs inside
8948 # dep_check(), examine all slots. This is needed
8949 # so that newer slots will not unnecessarily be pulled in
8950 # when a satisfying lower slot is already installed. For
8951 # example, if virtual/jdk-1.4 is satisfied via kaffe then
8952 # there's no need to pull in a newer slot to satisfy a
8953 # virtual/jdk dependency.
8954 for db, pkg_type, built, installed, db_keys in \
8955 self._depgraph._filtered_trees[self._root]["dbs"]:
8956 for cpv in db.match(atom):
8957 if portage.cpv_getkey(cpv) != pkg.cp:
8959 slots.add(db.aux_get(cpv, ["SLOT"])[0])
8961 if self._visible(pkg):
8962 self._cpv_pkg_map[pkg.cpv] = pkg
8964 slots.remove(pkg.metadata["SLOT"])
8966 slot_atom = "%s:%s" % (atom_cp, slots.pop())
8967 pkg, existing = self._depgraph._select_package(
8968 self._root, slot_atom)
8971 if not self._visible(pkg):
8973 self._cpv_pkg_map[pkg.cpv] = pkg
8976 self._cpv_sort_ascending(ret)
8977 self._match_cache[orig_atom] = ret
8980 def _visible(self, pkg):
8981 if pkg.installed and "selective" not in self._depgraph.myparams:
8983 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8984 except (StopIteration, portage.exception.InvalidDependString):
8991 self._depgraph.pkgsettings[pkg.root], pkg):
8993 except portage.exception.InvalidDependString:
8995 in_graph = self._depgraph._slot_pkg_map[
8996 self._root].get(pkg.slot_atom)
8997 if in_graph is None:
8998 # Mask choices for packages which are not the highest visible
8999 # version within their slot (since they usually trigger slot
9001 highest_visible, in_graph = self._depgraph._select_package(
9002 self._root, pkg.slot_atom)
9003 if pkg != highest_visible:
9005 elif in_graph != pkg:
9006 # Mask choices for packages that would trigger a slot
9007 # conflict with a previously selected package.
9011 def _dep_expand(self, atom):
9013 This is only needed for old installed packages that may
9014 contain atoms that are not fully qualified with a specific
9015 category. Emulate the cpv_expand() function that's used by
9016 dbapi.match() in cases like this. If there are multiple
9017 matches, it's often due to a new-style virtual that has
9018 been added, so try to filter those out to avoid raising
9021 root_config = self._depgraph.roots[self._root]
9023 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9024 if len(expanded_atoms) > 1:
9025 non_virtual_atoms = []
9026 for x in expanded_atoms:
9027 if not portage.dep_getkey(x).startswith("virtual/"):
9028 non_virtual_atoms.append(x)
9029 if len(non_virtual_atoms) == 1:
9030 expanded_atoms = non_virtual_atoms
9031 if len(expanded_atoms) > 1:
9032 # compatible with portage.cpv_expand()
9033 raise portage.exception.AmbiguousPackageName(
9034 [portage.dep_getkey(x) for x in expanded_atoms])
9036 atom = expanded_atoms[0]
9038 null_atom = insert_category_into_atom(atom, "null")
9039 null_cp = portage.dep_getkey(null_atom)
9040 cat, atom_pn = portage.catsplit(null_cp)
9041 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9043 # Allow the resolver to choose which virtual.
9044 atom = insert_category_into_atom(atom, "virtual")
9046 atom = insert_category_into_atom(atom, "null")
9049 def aux_get(self, cpv, wants):
9050 metadata = self._cpv_pkg_map[cpv].metadata
9051 return [metadata.get(x, "") for x in wants]
9053 class RepoDisplay(object):
9054 def __init__(self, roots):
9055 self._shown_repos = {}
9056 self._unknown_repo = False
9058 for root_config in roots.itervalues():
9059 portdir = root_config.settings.get("PORTDIR")
9061 repo_paths.add(portdir)
9062 overlays = root_config.settings.get("PORTDIR_OVERLAY")
9064 repo_paths.update(overlays.split())
9065 repo_paths = list(repo_paths)
9066 self._repo_paths = repo_paths
9067 self._repo_paths_real = [ os.path.realpath(repo_path) \
9068 for repo_path in repo_paths ]
9070 # pre-allocate index for PORTDIR so that it always has index 0.
9071 for root_config in roots.itervalues():
9072 portdb = root_config.trees["porttree"].dbapi
9073 portdir = portdb.porttree_root
9075 self.repoStr(portdir)
9077 def repoStr(self, repo_path_real):
9080 real_index = self._repo_paths_real.index(repo_path_real)
9081 if real_index == -1:
9083 self._unknown_repo = True
9085 shown_repos = self._shown_repos
9086 repo_paths = self._repo_paths
9087 repo_path = repo_paths[real_index]
9088 index = shown_repos.get(repo_path)
9090 index = len(shown_repos)
9091 shown_repos[repo_path] = index
9097 shown_repos = self._shown_repos
9098 unknown_repo = self._unknown_repo
9099 if shown_repos or self._unknown_repo:
9100 output.append("Portage tree and overlays:\n")
9101 show_repo_paths = list(shown_repos)
9102 for repo_path, repo_index in shown_repos.iteritems():
9103 show_repo_paths[repo_index] = repo_path
9105 for index, repo_path in enumerate(show_repo_paths):
9106 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9108 output.append(" "+teal("[?]") + \
9109 " indicates that the source repository could not be determined\n")
9110 return "".join(output)
9112 class PackageCounters(object):
9122 self.blocks_satisfied = 0
9124 self.restrict_fetch = 0
9125 self.restrict_fetch_satisfied = 0
9126 self.interactive = 0
9129 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9132 myoutput.append("Total: %s package" % total_installs)
9133 if total_installs != 1:
9134 myoutput.append("s")
9135 if total_installs != 0:
9136 myoutput.append(" (")
9137 if self.upgrades > 0:
9138 details.append("%s upgrade" % self.upgrades)
9139 if self.upgrades > 1:
9141 if self.downgrades > 0:
9142 details.append("%s downgrade" % self.downgrades)
9143 if self.downgrades > 1:
9146 details.append("%s new" % self.new)
9147 if self.newslot > 0:
9148 details.append("%s in new slot" % self.newslot)
9149 if self.newslot > 1:
9152 details.append("%s reinstall" % self.reinst)
9156 details.append("%s uninstall" % self.uninst)
9159 if self.interactive > 0:
9160 details.append("%s %s" % (self.interactive,
9161 colorize("WARN", "interactive")))
9162 myoutput.append(", ".join(details))
9163 if total_installs != 0:
9164 myoutput.append(")")
9165 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9166 if self.restrict_fetch:
9167 myoutput.append("\nFetch Restriction: %s package" % \
9168 self.restrict_fetch)
9169 if self.restrict_fetch > 1:
9170 myoutput.append("s")
9171 if self.restrict_fetch_satisfied < self.restrict_fetch:
9172 myoutput.append(bad(" (%s unsatisfied)") % \
9173 (self.restrict_fetch - self.restrict_fetch_satisfied))
9175 myoutput.append("\nConflict: %s block" % \
9178 myoutput.append("s")
9179 if self.blocks_satisfied < self.blocks:
9180 myoutput.append(bad(" (%s unsatisfied)") % \
9181 (self.blocks - self.blocks_satisfied))
9182 return "".join(myoutput)
9184 class PollSelectAdapter(PollConstants):
9187 Use select to emulate a poll object, for
9188 systems that don't support poll().
9192 self._registered = {}
9193 self._select_args = [[], [], []]
9195 def register(self, fd, *args):
9197 Only POLLIN is currently supported!
9201 "register expected at most 2 arguments, got " + \
9202 repr(1 + len(args)))
9204 eventmask = PollConstants.POLLIN | \
9205 PollConstants.POLLPRI | PollConstants.POLLOUT
9209 self._registered[fd] = eventmask
9210 self._select_args = None
9212 def unregister(self, fd):
9213 self._select_args = None
9214 del self._registered[fd]
9216 def poll(self, *args):
9219 "poll expected at most 2 arguments, got " + \
9220 repr(1 + len(args)))
9226 select_args = self._select_args
9227 if select_args is None:
9228 select_args = [self._registered.keys(), [], []]
9230 if timeout is not None:
9231 select_args = select_args[:]
9232 # Translate poll() timeout args to select() timeout args:
9234 # | units | value(s) for indefinite block
9235 # ---------|--------------|------------------------------
9236 # poll | milliseconds | omitted, negative, or None
9237 # ---------|--------------|------------------------------
9238 # select | seconds | omitted
9239 # ---------|--------------|------------------------------
9241 if timeout is not None and timeout < 0:
9243 if timeout is not None:
9244 select_args.append(timeout / 1000)
9246 select_events = select.select(*select_args)
9248 for fd in select_events[0]:
9249 poll_events.append((fd, PollConstants.POLLIN))
9252 class SequentialTaskQueue(SlotObject):
9254 __slots__ = ("max_jobs", "running_tasks") + \
9255 ("_dirty", "_scheduling", "_task_queue")
9257 def __init__(self, **kwargs):
9258 SlotObject.__init__(self, **kwargs)
9259 self._task_queue = deque()
9260 self.running_tasks = set()
9261 if self.max_jobs is None:
9265 def add(self, task):
9266 self._task_queue.append(task)
9269 def addFront(self, task):
9270 self._task_queue.appendleft(task)
9281 if self._scheduling:
9282 # Ignore any recursive schedule() calls triggered via
9283 # self._task_exit().
9286 self._scheduling = True
9288 task_queue = self._task_queue
9289 running_tasks = self.running_tasks
9290 max_jobs = self.max_jobs
9291 state_changed = False
9293 while task_queue and \
9294 (max_jobs is True or len(running_tasks) < max_jobs):
9295 task = task_queue.popleft()
9296 cancelled = getattr(task, "cancelled", None)
9298 running_tasks.add(task)
9299 task.addExitListener(self._task_exit)
9301 state_changed = True
9304 self._scheduling = False
9306 return state_changed
9308 def _task_exit(self, task):
9310 Since we can always rely on exit listeners being called, the set of
9311 running tasks is always pruned automatically and there is never any need
9312 to actively prune it.
9314 self.running_tasks.remove(task)
9315 if self._task_queue:
9319 self._task_queue.clear()
9320 running_tasks = self.running_tasks
9321 while running_tasks:
9322 task = running_tasks.pop()
9323 task.removeExitListener(self._task_exit)
9327 def __nonzero__(self):
9328 return bool(self._task_queue or self.running_tasks)
9331 return len(self._task_queue) + len(self.running_tasks)
9333 _can_poll_device = None
9335 def can_poll_device():
9337 Test if it's possible to use poll() on a device such as a pty. This
9338 is known to fail on Darwin.
9340 @returns: True if poll() on a device succeeds, False otherwise.
9343 global _can_poll_device
9344 if _can_poll_device is not None:
9345 return _can_poll_device
9347 if not hasattr(select, "poll"):
9348 _can_poll_device = False
9349 return _can_poll_device
9352 dev_null = open('/dev/null', 'rb')
9354 _can_poll_device = False
9355 return _can_poll_device
9358 p.register(dev_null.fileno(), PollConstants.POLLIN)
9360 invalid_request = False
9361 for f, event in p.poll():
9362 if event & PollConstants.POLLNVAL:
9363 invalid_request = True
9367 _can_poll_device = not invalid_request
9368 return _can_poll_device
9370 def create_poll_instance():
9372 Create an instance of select.poll, or an instance of
9373 PollSelectAdapter there is no poll() implementation or
9374 it is broken somehow.
9376 if can_poll_device():
9377 return select.poll()
9378 return PollSelectAdapter()
9380 getloadavg = getattr(os, "getloadavg", None)
9381 if getloadavg is None:
9384 Uses /proc/loadavg to emulate os.getloadavg().
9385 Raises OSError if the load average was unobtainable.
9388 loadavg_str = open('/proc/loadavg').readline()
9390 # getloadavg() is only supposed to raise OSError, so convert
9391 raise OSError('unknown')
9392 loadavg_split = loadavg_str.split()
9393 if len(loadavg_split) < 3:
9394 raise OSError('unknown')
9398 loadavg_floats.append(float(loadavg_split[i]))
9400 raise OSError('unknown')
9401 return tuple(loadavg_floats)
9403 class PollScheduler(object):
9405 class _sched_iface_class(SlotObject):
9406 __slots__ = ("register", "schedule", "unregister")
9410 self._max_load = None
9412 self._poll_event_queue = []
9413 self._poll_event_handlers = {}
9414 self._poll_event_handler_ids = {}
9415 # Increment id for each new handler.
9416 self._event_handler_id = 0
9417 self._poll_obj = create_poll_instance()
9418 self._scheduling = False
9420 def _schedule(self):
9422 Calls _schedule_tasks() and automatically returns early from
9423 any recursive calls to this method that the _schedule_tasks()
9424 call might trigger. This makes _schedule() safe to call from
9425 inside exit listeners.
9427 if self._scheduling:
9429 self._scheduling = True
9431 return self._schedule_tasks()
9433 self._scheduling = False
9435 def _running_job_count(self):
9438 def _can_add_job(self):
9439 max_jobs = self._max_jobs
9440 max_load = self._max_load
9442 if self._max_jobs is not True and \
9443 self._running_job_count() >= self._max_jobs:
9446 if max_load is not None and \
9447 (max_jobs is True or max_jobs > 1) and \
9448 self._running_job_count() >= 1:
9450 avg1, avg5, avg15 = getloadavg()
9454 if avg1 >= max_load:
9459 def _poll(self, timeout=None):
9461 All poll() calls pass through here. The poll events
9462 are added directly to self._poll_event_queue.
9463 In order to avoid endless blocking, this raises
9464 StopIteration if timeout is None and there are
9465 no file descriptors to poll.
9467 if not self._poll_event_handlers:
9469 if timeout is None and \
9470 not self._poll_event_handlers:
9471 raise StopIteration(
9472 "timeout is None and there are no poll() event handlers")
9474 # The following error is known to occur with Linux kernel versions
9477 # select.error: (4, 'Interrupted system call')
9479 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9480 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9481 # without any events.
9484 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9486 except select.error, e:
9487 writemsg_level("\n!!! select error: %s\n" % (e,),
9488 level=logging.ERROR, noiselevel=-1)
9490 if timeout is not None:
9493 def _next_poll_event(self, timeout=None):
9495 Since the _schedule_wait() loop is called by event
9496 handlers from _poll_loop(), maintain a central event
9497 queue for both of them to share events from a single
9498 poll() call. In order to avoid endless blocking, this
9499 raises StopIteration if timeout is None and there are
9500 no file descriptors to poll.
9502 if not self._poll_event_queue:
9504 return self._poll_event_queue.pop()
9506 def _poll_loop(self):
9508 event_handlers = self._poll_event_handlers
9509 event_handled = False
9512 while event_handlers:
9513 f, event = self._next_poll_event()
9514 handler, reg_id = event_handlers[f]
9516 event_handled = True
9517 except StopIteration:
9518 event_handled = True
9520 if not event_handled:
9521 raise AssertionError("tight loop")
9523 def _schedule_yield(self):
9525 Schedule for a short period of time chosen by the scheduler based
9526 on internal state. Synchronous tasks should call this periodically
9527 in order to allow the scheduler to service pending poll events. The
9528 scheduler will call poll() exactly once, without blocking, and any
9529 resulting poll events will be serviced.
9531 event_handlers = self._poll_event_handlers
9534 if not event_handlers:
9535 return bool(events_handled)
9537 if not self._poll_event_queue:
9541 while event_handlers and self._poll_event_queue:
9542 f, event = self._next_poll_event()
9543 handler, reg_id = event_handlers[f]
9546 except StopIteration:
9549 return bool(events_handled)
9551 def _register(self, f, eventmask, handler):
9554 @return: A unique registration id, for use in schedule() or
9557 if f in self._poll_event_handlers:
9558 raise AssertionError("fd %d is already registered" % f)
9559 self._event_handler_id += 1
9560 reg_id = self._event_handler_id
9561 self._poll_event_handler_ids[reg_id] = f
9562 self._poll_event_handlers[f] = (handler, reg_id)
9563 self._poll_obj.register(f, eventmask)
9566 def _unregister(self, reg_id):
9567 f = self._poll_event_handler_ids[reg_id]
9568 self._poll_obj.unregister(f)
9569 del self._poll_event_handlers[f]
9570 del self._poll_event_handler_ids[reg_id]
9572 def _schedule_wait(self, wait_ids):
9574 Schedule until wait_id is not longer registered
9577 @param wait_id: a task id to wait for
9579 event_handlers = self._poll_event_handlers
9580 handler_ids = self._poll_event_handler_ids
9581 event_handled = False
9583 if isinstance(wait_ids, int):
9584 wait_ids = frozenset([wait_ids])
9587 while wait_ids.intersection(handler_ids):
9588 f, event = self._next_poll_event()
9589 handler, reg_id = event_handlers[f]
9591 event_handled = True
9592 except StopIteration:
9593 event_handled = True
9595 return event_handled
9597 class QueueScheduler(PollScheduler):
9600 Add instances of SequentialTaskQueue and then call run(). The
9601 run() method returns when no tasks remain.
9604 def __init__(self, max_jobs=None, max_load=None):
9605 PollScheduler.__init__(self)
9607 if max_jobs is None:
9610 self._max_jobs = max_jobs
9611 self._max_load = max_load
9612 self.sched_iface = self._sched_iface_class(
9613 register=self._register,
9614 schedule=self._schedule_wait,
9615 unregister=self._unregister)
9618 self._schedule_listeners = []
9621 self._queues.append(q)
9623 def remove(self, q):
9624 self._queues.remove(q)
9628 while self._schedule():
9631 while self._running_job_count():
9634 def _schedule_tasks(self):
9637 @returns: True if there may be remaining tasks to schedule,
9640 while self._can_add_job():
9641 n = self._max_jobs - self._running_job_count()
9645 if not self._start_next_job(n):
9648 for q in self._queues:
9653 def _running_job_count(self):
9655 for q in self._queues:
9656 job_count += len(q.running_tasks)
9657 self._jobs = job_count
9660 def _start_next_job(self, n=1):
9662 for q in self._queues:
9663 initial_job_count = len(q.running_tasks)
9665 final_job_count = len(q.running_tasks)
9666 if final_job_count > initial_job_count:
9667 started_count += (final_job_count - initial_job_count)
9668 if started_count >= n:
9670 return started_count
9672 class TaskScheduler(object):
9675 A simple way to handle scheduling of AsynchrousTask instances. Simply
9676 add tasks and call run(). The run() method returns when no tasks remain.
9679 def __init__(self, max_jobs=None, max_load=None):
9680 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9681 self._scheduler = QueueScheduler(
9682 max_jobs=max_jobs, max_load=max_load)
9683 self.sched_iface = self._scheduler.sched_iface
9684 self.run = self._scheduler.run
9685 self._scheduler.add(self._queue)
9687 def add(self, task):
9688 self._queue.add(task)
9690 class JobStatusDisplay(object):
9692 _bound_properties = ("curval", "failed", "running")
9693 _jobs_column_width = 48
9695 # Don't update the display unless at least this much
9696 # time has passed, in units of seconds.
9697 _min_display_latency = 2
9699 _default_term_codes = {
9705 _termcap_name_map = {
9706 'carriage_return' : 'cr',
9711 def __init__(self, out=sys.stdout, quiet=False):
9712 object.__setattr__(self, "out", out)
9713 object.__setattr__(self, "quiet", quiet)
9714 object.__setattr__(self, "maxval", 0)
9715 object.__setattr__(self, "merges", 0)
9716 object.__setattr__(self, "_changed", False)
9717 object.__setattr__(self, "_displayed", False)
9718 object.__setattr__(self, "_last_display_time", 0)
9719 object.__setattr__(self, "width", 80)
9722 isatty = hasattr(out, "isatty") and out.isatty()
9723 object.__setattr__(self, "_isatty", isatty)
9724 if not isatty or not self._init_term():
9726 for k, capname in self._termcap_name_map.iteritems():
9727 term_codes[k] = self._default_term_codes[capname]
9728 object.__setattr__(self, "_term_codes", term_codes)
9729 encoding = sys.getdefaultencoding()
9730 for k, v in self._term_codes.items():
9731 if not isinstance(v, str):
9732 self._term_codes[k] = v.decode(encoding, 'replace')
9734 def _init_term(self):
9736 Initialize term control codes.
9738 @returns: True if term codes were successfully initialized,
9742 term_type = os.environ.get("TERM", "vt100")
9748 curses.setupterm(term_type, self.out.fileno())
9749 tigetstr = curses.tigetstr
9750 except curses.error:
9755 if tigetstr is None:
9759 for k, capname in self._termcap_name_map.iteritems():
9760 code = tigetstr(capname)
9762 code = self._default_term_codes[capname]
9763 term_codes[k] = code
9764 object.__setattr__(self, "_term_codes", term_codes)
9767 def _format_msg(self, msg):
9768 return ">>> %s" % msg
9772 self._term_codes['carriage_return'] + \
9773 self._term_codes['clr_eol'])
9775 self._displayed = False
9777 def _display(self, line):
9778 self.out.write(line)
9780 self._displayed = True
9782 def _update(self, msg):
9785 if not self._isatty:
9786 out.write(self._format_msg(msg) + self._term_codes['newline'])
9788 self._displayed = True
9794 self._display(self._format_msg(msg))
9796 def displayMessage(self, msg):
9798 was_displayed = self._displayed
9800 if self._isatty and self._displayed:
9803 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9805 self._displayed = False
9808 self._changed = True
9814 for name in self._bound_properties:
9815 object.__setattr__(self, name, 0)
9818 self.out.write(self._term_codes['newline'])
9820 self._displayed = False
9822 def __setattr__(self, name, value):
9823 old_value = getattr(self, name)
9824 if value == old_value:
9826 object.__setattr__(self, name, value)
9827 if name in self._bound_properties:
9828 self._property_change(name, old_value, value)
9830 def _property_change(self, name, old_value, new_value):
9831 self._changed = True
9834 def _load_avg_str(self):
9849 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9853 Display status on stdout, but only if something has
9854 changed since the last call.
9860 current_time = time.time()
9861 time_delta = current_time - self._last_display_time
9862 if self._displayed and \
9864 if not self._isatty:
9866 if time_delta < self._min_display_latency:
9869 self._last_display_time = current_time
9870 self._changed = False
9871 self._display_status()
9873 def _display_status(self):
9874 # Don't use len(self._completed_tasks) here since that also
9875 # can include uninstall tasks.
9876 curval_str = str(self.curval)
9877 maxval_str = str(self.maxval)
9878 running_str = str(self.running)
9879 failed_str = str(self.failed)
9880 load_avg_str = self._load_avg_str()
9882 color_output = StringIO()
9883 plain_output = StringIO()
9884 style_file = portage.output.ConsoleStyleFile(color_output)
9885 style_file.write_listener = plain_output
9886 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9887 style_writer.style_listener = style_file.new_styles
9888 f = formatter.AbstractFormatter(style_writer)
9890 number_style = "INFORM"
9891 f.add_literal_data("Jobs: ")
9892 f.push_style(number_style)
9893 f.add_literal_data(curval_str)
9895 f.add_literal_data(" of ")
9896 f.push_style(number_style)
9897 f.add_literal_data(maxval_str)
9899 f.add_literal_data(" complete")
9902 f.add_literal_data(", ")
9903 f.push_style(number_style)
9904 f.add_literal_data(running_str)
9906 f.add_literal_data(" running")
9909 f.add_literal_data(", ")
9910 f.push_style(number_style)
9911 f.add_literal_data(failed_str)
9913 f.add_literal_data(" failed")
9915 padding = self._jobs_column_width - len(plain_output.getvalue())
9917 f.add_literal_data(padding * " ")
9919 f.add_literal_data("Load avg: ")
9920 f.add_literal_data(load_avg_str)
9922 # Truncate to fit width, to avoid making the terminal scroll if the
9923 # line overflows (happens when the load average is large).
9924 plain_output = plain_output.getvalue()
9925 if self._isatty and len(plain_output) > self.width:
9926 # Use plain_output here since it's easier to truncate
9927 # properly than the color output which contains console
9929 self._update(plain_output[:self.width])
9931 self._update(color_output.getvalue())
9933 xtermTitle(" ".join(plain_output.split()))
9935 class Scheduler(PollScheduler):
9937 _opts_ignore_blockers = \
9938 frozenset(["--buildpkgonly",
9939 "--fetchonly", "--fetch-all-uri",
9940 "--nodeps", "--pretend"])
9942 _opts_no_background = \
9943 frozenset(["--pretend",
9944 "--fetchonly", "--fetch-all-uri"])
9946 _opts_no_restart = frozenset(["--buildpkgonly",
9947 "--fetchonly", "--fetch-all-uri", "--pretend"])
9949 _bad_resume_opts = set(["--ask", "--changelog",
9950 "--resume", "--skipfirst"])
9952 _fetch_log = "/var/log/emerge-fetch.log"
9954 class _iface_class(SlotObject):
9955 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9956 "dblinkElog", "fetch", "register", "schedule",
9957 "scheduleSetup", "scheduleUnpack", "scheduleYield",
9960 class _fetch_iface_class(SlotObject):
9961 __slots__ = ("log_file", "schedule")
9963 _task_queues_class = slot_dict_class(
9964 ("merge", "jobs", "fetch", "unpack"), prefix="")
9966 class _build_opts_class(SlotObject):
9967 __slots__ = ("buildpkg", "buildpkgonly",
9968 "fetch_all_uri", "fetchonly", "pretend")
9970 class _binpkg_opts_class(SlotObject):
9971 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9973 class _pkg_count_class(SlotObject):
9974 __slots__ = ("curval", "maxval")
9976 class _emerge_log_class(SlotObject):
9977 __slots__ = ("xterm_titles",)
9979 def log(self, *pargs, **kwargs):
9980 if not self.xterm_titles:
9981 # Avoid interference with the scheduler's status display.
9982 kwargs.pop("short_msg", None)
9983 emergelog(self.xterm_titles, *pargs, **kwargs)
9985 class _failed_pkg(SlotObject):
9986 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9988 class _ConfigPool(object):
9989 """Interface for a task to temporarily allocate a config
9990 instance from a pool. This allows a task to be constructed
9991 long before the config instance actually becomes needed, like
9992 when prefetchers are constructed for the whole merge list."""
9993 __slots__ = ("_root", "_allocate", "_deallocate")
9994 def __init__(self, root, allocate, deallocate):
9996 self._allocate = allocate
9997 self._deallocate = deallocate
9999 return self._allocate(self._root)
10000 def deallocate(self, settings):
10001 self._deallocate(settings)
10003 class _unknown_internal_error(portage.exception.PortageException):
10005 Used internally to terminate scheduling. The specific reason for
10006 the failure should have been dumped to stderr.
10008 def __init__(self, value=""):
10009 portage.exception.PortageException.__init__(self, value)
10011 def __init__(self, settings, trees, mtimedb, myopts,
10012 spinner, mergelist, favorites, digraph):
10013 PollScheduler.__init__(self)
10014 self.settings = settings
10015 self.target_root = settings["ROOT"]
10017 self.myopts = myopts
10018 self._spinner = spinner
10019 self._mtimedb = mtimedb
10020 self._mergelist = mergelist
10021 self._favorites = favorites
10022 self._args_set = InternalPackageSet(favorites)
10023 self._build_opts = self._build_opts_class()
10024 for k in self._build_opts.__slots__:
10025 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10026 self._binpkg_opts = self._binpkg_opts_class()
10027 for k in self._binpkg_opts.__slots__:
10028 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10031 self._logger = self._emerge_log_class()
10032 self._task_queues = self._task_queues_class()
10033 for k in self._task_queues.allowed_keys:
10034 setattr(self._task_queues, k,
10035 SequentialTaskQueue())
10037 # Holds merges that will wait to be executed when no builds are
10038 # executing. This is useful for system packages since dependencies
10039 # on system packages are frequently unspecified.
10040 self._merge_wait_queue = []
10041 # Holds merges that have been transfered from the merge_wait_queue to
10042 # the actual merge queue. They are removed from this list upon
10043 # completion. Other packages can start building only when this list is
10045 self._merge_wait_scheduled = []
10047 # Holds system packages and their deep runtime dependencies. Before
10048 # being merged, these packages go to merge_wait_queue, to be merged
10049 # when no other packages are building.
10050 self._deep_system_deps = set()
10052 # Holds packages to merge which will satisfy currently unsatisfied
10053 # deep runtime dependencies of system packages. If this is not empty
10054 # then no parallel builds will be spawned until it is empty. This
10055 # minimizes the possibility that a build will fail due to the system
10056 # being in a fragile state. For example, see bug #259954.
10057 self._unsatisfied_system_deps = set()
10059 self._status_display = JobStatusDisplay()
10060 self._max_load = myopts.get("--load-average")
10061 max_jobs = myopts.get("--jobs")
10062 if max_jobs is None:
10064 self._set_max_jobs(max_jobs)
10066 # The root where the currently running
10067 # portage instance is installed.
10068 self._running_root = trees["/"]["root_config"]
10070 if settings.get("PORTAGE_DEBUG", "") == "1":
10072 self.pkgsettings = {}
10073 self._config_pool = {}
10074 self._blocker_db = {}
10076 self._config_pool[root] = []
10077 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10079 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10080 schedule=self._schedule_fetch)
10081 self._sched_iface = self._iface_class(
10082 dblinkEbuildPhase=self._dblink_ebuild_phase,
10083 dblinkDisplayMerge=self._dblink_display_merge,
10084 dblinkElog=self._dblink_elog,
10085 fetch=fetch_iface, register=self._register,
10086 schedule=self._schedule_wait,
10087 scheduleSetup=self._schedule_setup,
10088 scheduleUnpack=self._schedule_unpack,
10089 scheduleYield=self._schedule_yield,
10090 unregister=self._unregister)
10092 self._prefetchers = weakref.WeakValueDictionary()
10093 self._pkg_queue = []
10094 self._completed_tasks = set()
10096 self._failed_pkgs = []
10097 self._failed_pkgs_all = []
10098 self._failed_pkgs_die_msgs = []
10099 self._post_mod_echo_msgs = []
10100 self._parallel_fetch = False
10101 merge_count = len([x for x in mergelist \
10102 if isinstance(x, Package) and x.operation == "merge"])
10103 self._pkg_count = self._pkg_count_class(
10104 curval=0, maxval=merge_count)
10105 self._status_display.maxval = self._pkg_count.maxval
10107 # The load average takes some time to respond when new
10108 # jobs are added, so we need to limit the rate of adding
10110 self._job_delay_max = 10
10111 self._job_delay_factor = 1.0
10112 self._job_delay_exp = 1.5
10113 self._previous_job_start_time = None
10115 self._set_digraph(digraph)
10117 # This is used to memoize the _choose_pkg() result when
10118 # no packages can be chosen until one of the existing
10120 self._choose_pkg_return_early = False
10122 features = self.settings.features
10123 if "parallel-fetch" in features and \
10124 not ("--pretend" in self.myopts or \
10125 "--fetch-all-uri" in self.myopts or \
10126 "--fetchonly" in self.myopts):
10127 if "distlocks" not in features:
10128 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10129 portage.writemsg(red("!!!")+" parallel-fetching " + \
10130 "requires the distlocks feature enabled"+"\n",
10132 portage.writemsg(red("!!!")+" you have it disabled, " + \
10133 "thus parallel-fetching is being disabled"+"\n",
10135 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10136 elif len(mergelist) > 1:
10137 self._parallel_fetch = True
10139 if self._parallel_fetch:
10140 # clear out existing fetch log if it exists
10142 open(self._fetch_log, 'w')
10143 except EnvironmentError:
10146 self._running_portage = None
10147 portage_match = self._running_root.trees["vartree"].dbapi.match(
10148 portage.const.PORTAGE_PACKAGE_ATOM)
10150 cpv = portage_match.pop()
10151 self._running_portage = self._pkg(cpv, "installed",
10152 self._running_root, installed=True)
10154 def _poll(self, timeout=None):
10156 PollScheduler._poll(self, timeout=timeout)
10158 def _set_max_jobs(self, max_jobs):
10159 self._max_jobs = max_jobs
10160 self._task_queues.jobs.max_jobs = max_jobs
10162 def _background_mode(self):
10164 Check if background mode is enabled and adjust states as necessary.
10167 @returns: True if background mode is enabled, False otherwise.
10169 background = (self._max_jobs is True or \
10170 self._max_jobs > 1 or "--quiet" in self.myopts) and \
10171 not bool(self._opts_no_background.intersection(self.myopts))
10174 interactive_tasks = self._get_interactive_tasks()
10175 if interactive_tasks:
10177 writemsg_level(">>> Sending package output to stdio due " + \
10178 "to interactive package(s):\n",
10179 level=logging.INFO, noiselevel=-1)
10181 for pkg in interactive_tasks:
10182 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
10183 if pkg.root != "/":
10184 pkg_str += " for " + pkg.root
10185 msg.append(pkg_str)
10187 writemsg_level("".join("%s\n" % (l,) for l in msg),
10188 level=logging.INFO, noiselevel=-1)
10189 if self._max_jobs is True or self._max_jobs > 1:
10190 self._set_max_jobs(1)
10191 writemsg_level(">>> Setting --jobs=1 due " + \
10192 "to the above interactive package(s)\n",
10193 level=logging.INFO, noiselevel=-1)
10195 self._status_display.quiet = \
10196 not background or \
10197 ("--quiet" in self.myopts and \
10198 "--verbose" not in self.myopts)
10200 self._logger.xterm_titles = \
10201 "notitles" not in self.settings.features and \
10202 self._status_display.quiet
10206 def _get_interactive_tasks(self):
10207 from portage import flatten
10208 from portage.dep import use_reduce, paren_reduce
10209 interactive_tasks = []
10210 for task in self._mergelist:
10211 if not (isinstance(task, Package) and \
10212 task.operation == "merge"):
10215 properties = flatten(use_reduce(paren_reduce(
10216 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10217 except portage.exception.InvalidDependString, e:
10218 show_invalid_depstring_notice(task,
10219 task.metadata["PROPERTIES"], str(e))
10220 raise self._unknown_internal_error()
10221 if "interactive" in properties:
10222 interactive_tasks.append(task)
10223 return interactive_tasks
10225 def _set_digraph(self, digraph):
10226 if "--nodeps" in self.myopts or \
10227 (self._max_jobs is not True and self._max_jobs < 2):
10229 self._digraph = None
10232 self._digraph = digraph
10233 self._find_system_deps()
10234 self._prune_digraph()
10235 self._prevent_builddir_collisions()
10237 def _find_system_deps(self):
10239 Find system packages and their deep runtime dependencies. Before being
10240 merged, these packages go to merge_wait_queue, to be merged when no
10241 other packages are building.
10243 deep_system_deps = self._deep_system_deps
10244 deep_system_deps.clear()
10245 deep_system_deps.update(
10246 _find_deep_system_runtime_deps(self._digraph))
10247 deep_system_deps.difference_update([pkg for pkg in \
10248 deep_system_deps if pkg.operation != "merge"])
10250 def _prune_digraph(self):
10252 Prune any root nodes that are irrelevant.
10255 graph = self._digraph
10256 completed_tasks = self._completed_tasks
10257 removed_nodes = set()
10259 for node in graph.root_nodes():
10260 if not isinstance(node, Package) or \
10261 (node.installed and node.operation == "nomerge") or \
10263 node in completed_tasks:
10264 removed_nodes.add(node)
10266 graph.difference_update(removed_nodes)
10267 if not removed_nodes:
10269 removed_nodes.clear()
10271 def _prevent_builddir_collisions(self):
10273 When building stages, sometimes the same exact cpv needs to be merged
10274 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10275 in the builddir. Currently, normal file locks would be inappropriate
10276 for this purpose since emerge holds all of it's build dir locks from
10280 for pkg in self._mergelist:
10281 if not isinstance(pkg, Package):
10282 # a satisfied blocker
10286 if pkg.cpv not in cpv_map:
10287 cpv_map[pkg.cpv] = [pkg]
10289 for earlier_pkg in cpv_map[pkg.cpv]:
10290 self._digraph.add(earlier_pkg, pkg,
10291 priority=DepPriority(buildtime=True))
10292 cpv_map[pkg.cpv].append(pkg)
10294 class _pkg_failure(portage.exception.PortageException):
10296 An instance of this class is raised by unmerge() when
10297 an uninstallation fails.
10300 def __init__(self, *pargs):
10301 portage.exception.PortageException.__init__(self, pargs)
10303 self.status = pargs[0]
10305 def _schedule_fetch(self, fetcher):
10307 Schedule a fetcher on the fetch queue, in order to
10308 serialize access to the fetch log.
10310 self._task_queues.fetch.addFront(fetcher)
10312 def _schedule_setup(self, setup_phase):
10314 Schedule a setup phase on the merge queue, in order to
10315 serialize unsandboxed access to the live filesystem.
10317 self._task_queues.merge.addFront(setup_phase)
10320 def _schedule_unpack(self, unpack_phase):
10322 Schedule an unpack phase on the unpack queue, in order
10323 to serialize $DISTDIR access for live ebuilds.
10325 self._task_queues.unpack.add(unpack_phase)
10327 def _find_blockers(self, new_pkg):
10329 Returns a callable which should be called only when
10330 the vdb lock has been acquired.
10332 def get_blockers():
10333 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10334 return get_blockers
10336 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10337 if self._opts_ignore_blockers.intersection(self.myopts):
10340 # Call gc.collect() here to avoid heap overflow that
10341 # triggers 'Cannot allocate memory' errors (reported
10342 # with python-2.5).
10346 blocker_db = self._blocker_db[new_pkg.root]
10348 blocker_dblinks = []
10349 for blocking_pkg in blocker_db.findInstalledBlockers(
10350 new_pkg, acquire_lock=acquire_lock):
10351 if new_pkg.slot_atom == blocking_pkg.slot_atom:
10353 if new_pkg.cpv == blocking_pkg.cpv:
10355 blocker_dblinks.append(portage.dblink(
10356 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10357 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10358 vartree=self.trees[blocking_pkg.root]["vartree"]))
10362 return blocker_dblinks
10364 def _dblink_pkg(self, pkg_dblink):
10365 cpv = pkg_dblink.mycpv
10366 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10367 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10368 installed = type_name == "installed"
10369 return self._pkg(cpv, type_name, root_config, installed=installed)
10371 def _append_to_log_path(self, log_path, msg):
10372 f = open(log_path, 'a')
10378 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10380 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10383 background = self._background
10385 if background and log_path is not None:
10386 log_file = open(log_path, 'a')
10391 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10393 if log_file is not None:
10396 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10397 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10398 background = self._background
10400 if log_path is None:
10401 if not (background and level < logging.WARN):
10402 portage.util.writemsg_level(msg,
10403 level=level, noiselevel=noiselevel)
10406 portage.util.writemsg_level(msg,
10407 level=level, noiselevel=noiselevel)
10408 self._append_to_log_path(log_path, msg)
10410 def _dblink_ebuild_phase(self,
10411 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10413 Using this callback for merge phases allows the scheduler
10414 to run while these phases execute asynchronously, and allows
10415 the scheduler control output handling.
10418 scheduler = self._sched_iface
10419 settings = pkg_dblink.settings
10420 pkg = self._dblink_pkg(pkg_dblink)
10421 background = self._background
10422 log_path = settings.get("PORTAGE_LOG_FILE")
10424 ebuild_phase = EbuildPhase(background=background,
10425 pkg=pkg, phase=phase, scheduler=scheduler,
10426 settings=settings, tree=pkg_dblink.treetype)
10427 ebuild_phase.start()
10428 ebuild_phase.wait()
10430 return ebuild_phase.returncode
10432 def _check_manifests(self):
10433 # Verify all the manifests now so that the user is notified of failure
10434 # as soon as possible.
10435 if "strict" not in self.settings.features or \
10436 "--fetchonly" in self.myopts or \
10437 "--fetch-all-uri" in self.myopts:
10440 shown_verifying_msg = False
10441 quiet_settings = {}
10442 for myroot, pkgsettings in self.pkgsettings.iteritems():
10443 quiet_config = portage.config(clone=pkgsettings)
10444 quiet_config["PORTAGE_QUIET"] = "1"
10445 quiet_config.backup_changes("PORTAGE_QUIET")
10446 quiet_settings[myroot] = quiet_config
10449 for x in self._mergelist:
10450 if not isinstance(x, Package) or \
10451 x.type_name != "ebuild":
10454 if not shown_verifying_msg:
10455 shown_verifying_msg = True
10456 self._status_msg("Verifying ebuild manifests")
10458 root_config = x.root_config
10459 portdb = root_config.trees["porttree"].dbapi
10460 quiet_config = quiet_settings[root_config.root]
10461 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10462 if not portage.digestcheck([], quiet_config, strict=True):
10467 def _add_prefetchers(self):
10469 if not self._parallel_fetch:
10472 if self._parallel_fetch:
10473 self._status_msg("Starting parallel fetch")
10475 prefetchers = self._prefetchers
10476 getbinpkg = "--getbinpkg" in self.myopts
10478 # In order to avoid "waiting for lock" messages
10479 # at the beginning, which annoy users, never
10480 # spawn a prefetcher for the first package.
10481 for pkg in self._mergelist[1:]:
10482 prefetcher = self._create_prefetcher(pkg)
10483 if prefetcher is not None:
10484 self._task_queues.fetch.add(prefetcher)
10485 prefetchers[pkg] = prefetcher
10487 def _create_prefetcher(self, pkg):
10489 @return: a prefetcher, or None if not applicable
10493 if not isinstance(pkg, Package):
10496 elif pkg.type_name == "ebuild":
10498 prefetcher = EbuildFetcher(background=True,
10499 config_pool=self._ConfigPool(pkg.root,
10500 self._allocate_config, self._deallocate_config),
10501 fetchonly=1, logfile=self._fetch_log,
10502 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10504 elif pkg.type_name == "binary" and \
10505 "--getbinpkg" in self.myopts and \
10506 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10508 prefetcher = BinpkgPrefetcher(background=True,
10509 pkg=pkg, scheduler=self._sched_iface)
10513 def _is_restart_scheduled(self):
10515 Check if the merge list contains a replacement
10516 for the current running instance, that will result
10517 in restart after merge.
10519 @returns: True if a restart is scheduled, False otherwise.
10521 if self._opts_no_restart.intersection(self.myopts):
10524 mergelist = self._mergelist
10526 for i, pkg in enumerate(mergelist):
10527 if self._is_restart_necessary(pkg) and \
10528 i != len(mergelist) - 1:
10533 def _is_restart_necessary(self, pkg):
10535 @return: True if merging the given package
10536 requires restart, False otherwise.
10539 # Figure out if we need a restart.
10540 if pkg.root == self._running_root.root and \
10541 portage.match_from_list(
10542 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10543 if self._running_portage:
10544 return pkg.cpv != self._running_portage.cpv
10548 def _restart_if_necessary(self, pkg):
10550 Use execv() to restart emerge. This happens
10551 if portage upgrades itself and there are
10552 remaining packages in the list.
10555 if self._opts_no_restart.intersection(self.myopts):
10558 if not self._is_restart_necessary(pkg):
10561 if pkg == self._mergelist[-1]:
10564 self._main_loop_cleanup()
10566 logger = self._logger
10567 pkg_count = self._pkg_count
10568 mtimedb = self._mtimedb
10569 bad_resume_opts = self._bad_resume_opts
10571 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10572 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10574 logger.log(" *** RESTARTING " + \
10575 "emerge via exec() after change of " + \
10576 "portage version.")
10578 mtimedb["resume"]["mergelist"].remove(list(pkg))
10580 portage.run_exitfuncs()
10581 mynewargv = [sys.argv[0], "--resume"]
10582 resume_opts = self.myopts.copy()
10583 # For automatic resume, we need to prevent
10584 # any of bad_resume_opts from leaking in
10585 # via EMERGE_DEFAULT_OPTS.
10586 resume_opts["--ignore-default-opts"] = True
10587 for myopt, myarg in resume_opts.iteritems():
10588 if myopt not in bad_resume_opts:
10590 mynewargv.append(myopt)
10592 mynewargv.append(myopt +"="+ str(myarg))
10593 # priority only needs to be adjusted on the first run
10594 os.environ["PORTAGE_NICENESS"] = "0"
10595 os.execv(mynewargv[0], mynewargv)
10599 if "--resume" in self.myopts:
10601 portage.writemsg_stdout(
10602 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10603 self._logger.log(" *** Resuming merge...")
10605 self._save_resume_list()
10608 self._background = self._background_mode()
10609 except self._unknown_internal_error:
10612 for root in self.trees:
10613 root_config = self.trees[root]["root_config"]
10615 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10616 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10617 # for ensuring sane $PWD (bug #239560) and storing elog messages.
10618 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10619 if not tmpdir or not os.path.isdir(tmpdir):
10620 msg = "The directory specified in your " + \
10621 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10622 "does not exist. Please create this " + \
10623 "directory or correct your PORTAGE_TMPDIR setting."
10624 msg = textwrap.wrap(msg, 70)
10625 out = portage.output.EOutput()
10630 if self._background:
10631 root_config.settings.unlock()
10632 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10633 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10634 root_config.settings.lock()
10636 self.pkgsettings[root] = portage.config(
10637 clone=root_config.settings)
10639 rval = self._check_manifests()
10640 if rval != os.EX_OK:
10643 keep_going = "--keep-going" in self.myopts
10644 fetchonly = self._build_opts.fetchonly
10645 mtimedb = self._mtimedb
10646 failed_pkgs = self._failed_pkgs
10649 rval = self._merge()
10650 if rval == os.EX_OK or fetchonly or not keep_going:
10652 if "resume" not in mtimedb:
10654 mergelist = self._mtimedb["resume"].get("mergelist")
10658 if not failed_pkgs:
10661 for failed_pkg in failed_pkgs:
10662 mergelist.remove(list(failed_pkg.pkg))
10664 self._failed_pkgs_all.extend(failed_pkgs)
10670 if not self._calc_resume_list():
10673 clear_caches(self.trees)
10674 if not self._mergelist:
10677 self._save_resume_list()
10678 self._pkg_count.curval = 0
10679 self._pkg_count.maxval = len([x for x in self._mergelist \
10680 if isinstance(x, Package) and x.operation == "merge"])
10681 self._status_display.maxval = self._pkg_count.maxval
10683 self._logger.log(" *** Finished. Cleaning up...")
10686 self._failed_pkgs_all.extend(failed_pkgs)
10689 background = self._background
10690 failure_log_shown = False
10691 if background and len(self._failed_pkgs_all) == 1:
10692 # If only one package failed then just show it's
10693 # whole log for easy viewing.
10694 failed_pkg = self._failed_pkgs_all[-1]
10695 build_dir = failed_pkg.build_dir
10698 log_paths = [failed_pkg.build_log]
10700 log_path = self._locate_failure_log(failed_pkg)
10701 if log_path is not None:
10703 log_file = open(log_path)
10707 if log_file is not None:
10709 for line in log_file:
10710 writemsg_level(line, noiselevel=-1)
10713 failure_log_shown = True
10715 # Dump mod_echo output now since it tends to flood the terminal.
10716 # This allows us to avoid having more important output, generated
10717 # later, from being swept away by the mod_echo output.
10718 mod_echo_output = _flush_elog_mod_echo()
10720 if background and not failure_log_shown and \
10721 self._failed_pkgs_all and \
10722 self._failed_pkgs_die_msgs and \
10723 not mod_echo_output:
10725 printer = portage.output.EOutput()
10726 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10728 if mysettings["ROOT"] != "/":
10729 root_msg = " merged to %s" % mysettings["ROOT"]
10731 printer.einfo("Error messages for package %s%s:" % \
10732 (colorize("INFORM", key), root_msg))
10734 for phase in portage.const.EBUILD_PHASES:
10735 if phase not in logentries:
10737 for msgtype, msgcontent in logentries[phase]:
10738 if isinstance(msgcontent, basestring):
10739 msgcontent = [msgcontent]
10740 for line in msgcontent:
10741 printer.eerror(line.strip("\n"))
10743 if self._post_mod_echo_msgs:
10744 for msg in self._post_mod_echo_msgs:
10747 if len(self._failed_pkgs_all) > 1 or \
10748 (self._failed_pkgs_all and "--keep-going" in self.myopts):
10749 if len(self._failed_pkgs_all) > 1:
10750 msg = "The following %d packages have " % \
10751 len(self._failed_pkgs_all) + \
10752 "failed to build or install:"
10754 msg = "The following package has " + \
10755 "failed to build or install:"
10756 prefix = bad(" * ")
10757 writemsg(prefix + "\n", noiselevel=-1)
10758 from textwrap import wrap
10759 for line in wrap(msg, 72):
10760 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10761 writemsg(prefix + "\n", noiselevel=-1)
10762 for failed_pkg in self._failed_pkgs_all:
10763 writemsg("%s\t%s\n" % (prefix,
10764 colorize("INFORM", str(failed_pkg.pkg))),
10766 writemsg(prefix + "\n", noiselevel=-1)
10770 def _elog_listener(self, mysettings, key, logentries, fulltext):
10771 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10773 self._failed_pkgs_die_msgs.append(
10774 (mysettings, key, errors))
10776 def _locate_failure_log(self, failed_pkg):
10778 build_dir = failed_pkg.build_dir
10781 log_paths = [failed_pkg.build_log]
10783 for log_path in log_paths:
10788 log_size = os.stat(log_path).st_size
10799 def _add_packages(self):
10800 pkg_queue = self._pkg_queue
10801 for pkg in self._mergelist:
10802 if isinstance(pkg, Package):
10803 pkg_queue.append(pkg)
10804 elif isinstance(pkg, Blocker):
10807 def _system_merge_started(self, merge):
10809 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10811 graph = self._digraph
10814 pkg = merge.merge.pkg
10816 # Skip this if $ROOT != / since it shouldn't matter if there
10817 # are unsatisfied system runtime deps in this case.
10818 if pkg.root != '/':
10821 completed_tasks = self._completed_tasks
10822 unsatisfied = self._unsatisfied_system_deps
10824 def ignore_non_runtime_or_satisfied(priority):
10826 Ignore non-runtime and satisfied runtime priorities.
10828 if isinstance(priority, DepPriority) and \
10829 not priority.satisfied and \
10830 (priority.runtime or priority.runtime_post):
10834 # When checking for unsatisfied runtime deps, only check
10835 # direct deps since indirect deps are checked when the
10836 # corresponding parent is merged.
10837 for child in graph.child_nodes(pkg,
10838 ignore_priority=ignore_non_runtime_or_satisfied):
10839 if not isinstance(child, Package) or \
10840 child.operation == 'uninstall':
10844 if child.operation == 'merge' and \
10845 child not in completed_tasks:
10846 unsatisfied.add(child)
10848 def _merge_wait_exit_handler(self, task):
10849 self._merge_wait_scheduled.remove(task)
10850 self._merge_exit(task)
10852 def _merge_exit(self, merge):
10853 self._do_merge_exit(merge)
10854 self._deallocate_config(merge.merge.settings)
10855 if merge.returncode == os.EX_OK and \
10856 not merge.merge.pkg.installed:
10857 self._status_display.curval += 1
10858 self._status_display.merges = len(self._task_queues.merge)
10861 def _do_merge_exit(self, merge):
10862 pkg = merge.merge.pkg
10863 if merge.returncode != os.EX_OK:
10864 settings = merge.merge.settings
10865 build_dir = settings.get("PORTAGE_BUILDDIR")
10866 build_log = settings.get("PORTAGE_LOG_FILE")
10868 self._failed_pkgs.append(self._failed_pkg(
10869 build_dir=build_dir, build_log=build_log,
10871 returncode=merge.returncode))
10872 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10874 self._status_display.failed = len(self._failed_pkgs)
10877 self._task_complete(pkg)
10878 pkg_to_replace = merge.merge.pkg_to_replace
10879 if pkg_to_replace is not None:
10880 # When a package is replaced, mark it's uninstall
10881 # task complete (if any).
10882 uninst_hash_key = \
10883 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10884 self._task_complete(uninst_hash_key)
10889 self._restart_if_necessary(pkg)
10891 # Call mtimedb.commit() after each merge so that
10892 # --resume still works after being interrupted
10893 # by reboot, sigkill or similar.
10894 mtimedb = self._mtimedb
10895 mtimedb["resume"]["mergelist"].remove(list(pkg))
10896 if not mtimedb["resume"]["mergelist"]:
10897 del mtimedb["resume"]
10900 def _build_exit(self, build):
10901 if build.returncode == os.EX_OK:
10903 merge = PackageMerge(merge=build)
10904 if not build.build_opts.buildpkgonly and \
10905 build.pkg in self._deep_system_deps:
10906 # Since dependencies on system packages are frequently
10907 # unspecified, merge them only when no builds are executing.
10908 self._merge_wait_queue.append(merge)
10909 merge.addStartListener(self._system_merge_started)
10911 merge.addExitListener(self._merge_exit)
10912 self._task_queues.merge.add(merge)
10913 self._status_display.merges = len(self._task_queues.merge)
10915 settings = build.settings
10916 build_dir = settings.get("PORTAGE_BUILDDIR")
10917 build_log = settings.get("PORTAGE_LOG_FILE")
10919 self._failed_pkgs.append(self._failed_pkg(
10920 build_dir=build_dir, build_log=build_log,
10922 returncode=build.returncode))
10923 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10925 self._status_display.failed = len(self._failed_pkgs)
10926 self._deallocate_config(build.settings)
10928 self._status_display.running = self._jobs
10931 def _extract_exit(self, build):
10932 self._build_exit(build)
10934 def _task_complete(self, pkg):
10935 self._completed_tasks.add(pkg)
10936 self._unsatisfied_system_deps.discard(pkg)
10937 self._choose_pkg_return_early = False
10941 self._add_prefetchers()
10942 self._add_packages()
10943 pkg_queue = self._pkg_queue
10944 failed_pkgs = self._failed_pkgs
10945 portage.locks._quiet = self._background
10946 portage.elog._emerge_elog_listener = self._elog_listener
10952 self._main_loop_cleanup()
10953 portage.locks._quiet = False
10954 portage.elog._emerge_elog_listener = None
10956 rval = failed_pkgs[-1].returncode
10960 def _main_loop_cleanup(self):
10961 del self._pkg_queue[:]
10962 self._completed_tasks.clear()
10963 self._deep_system_deps.clear()
10964 self._unsatisfied_system_deps.clear()
10965 self._choose_pkg_return_early = False
10966 self._status_display.reset()
10967 self._digraph = None
10968 self._task_queues.fetch.clear()
10970 def _choose_pkg(self):
10972 Choose a task that has all it's dependencies satisfied.
10975 if self._choose_pkg_return_early:
10978 if self._digraph is None:
10979 if (self._jobs or self._task_queues.merge) and \
10980 not ("--nodeps" in self.myopts and \
10981 (self._max_jobs is True or self._max_jobs > 1)):
10982 self._choose_pkg_return_early = True
10984 return self._pkg_queue.pop(0)
10986 if not (self._jobs or self._task_queues.merge):
10987 return self._pkg_queue.pop(0)
10989 self._prune_digraph()
10992 later = set(self._pkg_queue)
10993 for pkg in self._pkg_queue:
10995 if not self._dependent_on_scheduled_merges(pkg, later):
10999 if chosen_pkg is not None:
11000 self._pkg_queue.remove(chosen_pkg)
11002 if chosen_pkg is None:
11003 # There's no point in searching for a package to
11004 # choose until at least one of the existing jobs
11006 self._choose_pkg_return_early = True
11010 def _dependent_on_scheduled_merges(self, pkg, later):
11012 Traverse the subgraph of the given packages deep dependencies
11013 to see if it contains any scheduled merges.
11014 @param pkg: a package to check dependencies for
11016 @param later: packages for which dependence should be ignored
11017 since they will be merged later than pkg anyway and therefore
11018 delaying the merge of pkg will not result in a more optimal
11022 @returns: True if the package is dependent, False otherwise.
11025 graph = self._digraph
11026 completed_tasks = self._completed_tasks
11029 traversed_nodes = set([pkg])
11030 direct_deps = graph.child_nodes(pkg)
11031 node_stack = direct_deps
11032 direct_deps = frozenset(direct_deps)
11034 node = node_stack.pop()
11035 if node in traversed_nodes:
11037 traversed_nodes.add(node)
11038 if not ((node.installed and node.operation == "nomerge") or \
11039 (node.operation == "uninstall" and \
11040 node not in direct_deps) or \
11041 node in completed_tasks or \
11045 node_stack.extend(graph.child_nodes(node))
11049 def _allocate_config(self, root):
11051 Allocate a unique config instance for a task in order
11052 to prevent interference between parallel tasks.
11054 if self._config_pool[root]:
11055 temp_settings = self._config_pool[root].pop()
11057 temp_settings = portage.config(clone=self.pkgsettings[root])
11058 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11059 # performance reasons, call it here to make sure all settings from the
11060 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11061 temp_settings.reload()
11062 temp_settings.reset()
11063 return temp_settings
11065 def _deallocate_config(self, settings):
11066 self._config_pool[settings["ROOT"]].append(settings)
11068 def _main_loop(self):
11070 # Only allow 1 job max if a restart is scheduled
11071 # due to portage update.
11072 if self._is_restart_scheduled() or \
11073 self._opts_no_background.intersection(self.myopts):
11074 self._set_max_jobs(1)
11076 merge_queue = self._task_queues.merge
11078 while self._schedule():
11079 if self._poll_event_handlers:
11084 if not (self._jobs or merge_queue):
11086 if self._poll_event_handlers:
11089 def _keep_scheduling(self):
11090 return bool(self._pkg_queue and \
11091 not (self._failed_pkgs and not self._build_opts.fetchonly))
11093 def _schedule_tasks(self):
11095 # When the number of jobs drops to zero, process all waiting merges.
11096 if not self._jobs and self._merge_wait_queue:
11097 for task in self._merge_wait_queue:
11098 task.addExitListener(self._merge_wait_exit_handler)
11099 self._task_queues.merge.add(task)
11100 self._status_display.merges = len(self._task_queues.merge)
11101 self._merge_wait_scheduled.extend(self._merge_wait_queue)
11102 del self._merge_wait_queue[:]
11104 self._schedule_tasks_imp()
11105 self._status_display.display()
11108 for q in self._task_queues.values():
11112 # Cancel prefetchers if they're the only reason
11113 # the main poll loop is still running.
11114 if self._failed_pkgs and not self._build_opts.fetchonly and \
11115 not (self._jobs or self._task_queues.merge) and \
11116 self._task_queues.fetch:
11117 self._task_queues.fetch.clear()
11121 self._schedule_tasks_imp()
11122 self._status_display.display()
11124 return self._keep_scheduling()
11126 def _job_delay(self):
11129 @returns: True if job scheduling should be delayed, False otherwise.
11132 if self._jobs and self._max_load is not None:
11134 current_time = time.time()
11136 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11137 if delay > self._job_delay_max:
11138 delay = self._job_delay_max
11139 if (current_time - self._previous_job_start_time) < delay:
11144 def _schedule_tasks_imp(self):
11147 @returns: True if state changed, False otherwise.
11154 if not self._keep_scheduling():
11155 return bool(state_change)
11157 if self._choose_pkg_return_early or \
11158 self._merge_wait_scheduled or \
11159 (self._jobs and self._unsatisfied_system_deps) or \
11160 not self._can_add_job() or \
11162 return bool(state_change)
11164 pkg = self._choose_pkg()
11166 return bool(state_change)
11170 if not pkg.installed:
11171 self._pkg_count.curval += 1
11173 task = self._task(pkg)
11176 merge = PackageMerge(merge=task)
11177 merge.addExitListener(self._merge_exit)
11178 self._task_queues.merge.add(merge)
11182 self._previous_job_start_time = time.time()
11183 self._status_display.running = self._jobs
11184 task.addExitListener(self._extract_exit)
11185 self._task_queues.jobs.add(task)
11189 self._previous_job_start_time = time.time()
11190 self._status_display.running = self._jobs
11191 task.addExitListener(self._build_exit)
11192 self._task_queues.jobs.add(task)
11194 return bool(state_change)
11196 def _task(self, pkg):
11198 pkg_to_replace = None
11199 if pkg.operation != "uninstall":
11200 vardb = pkg.root_config.trees["vartree"].dbapi
11201 previous_cpv = vardb.match(pkg.slot_atom)
11203 previous_cpv = previous_cpv.pop()
11204 pkg_to_replace = self._pkg(previous_cpv,
11205 "installed", pkg.root_config, installed=True)
11207 task = MergeListItem(args_set=self._args_set,
11208 background=self._background, binpkg_opts=self._binpkg_opts,
11209 build_opts=self._build_opts,
11210 config_pool=self._ConfigPool(pkg.root,
11211 self._allocate_config, self._deallocate_config),
11212 emerge_opts=self.myopts,
11213 find_blockers=self._find_blockers(pkg), logger=self._logger,
11214 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11215 pkg_to_replace=pkg_to_replace,
11216 prefetcher=self._prefetchers.get(pkg),
11217 scheduler=self._sched_iface,
11218 settings=self._allocate_config(pkg.root),
11219 statusMessage=self._status_msg,
11220 world_atom=self._world_atom)
11224 def _failed_pkg_msg(self, failed_pkg, action, preposition):
11225 pkg = failed_pkg.pkg
11226 msg = "%s to %s %s" % \
11227 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11228 if pkg.root != "/":
11229 msg += " %s %s" % (preposition, pkg.root)
11231 log_path = self._locate_failure_log(failed_pkg)
11232 if log_path is not None:
11233 msg += ", Log file:"
11234 self._status_msg(msg)
11236 if log_path is not None:
11237 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11239 def _status_msg(self, msg):
11241 Display a brief status message (no newlines) in the status display.
11242 This is called by tasks to provide feedback to the user. This
11243 delegates the resposibility of generating \r and \n control characters,
11244 to guarantee that lines are created or erased when necessary and
11248 @param msg: a brief status message (no newlines allowed)
11250 if not self._background:
11251 writemsg_level("\n")
11252 self._status_display.displayMessage(msg)
11254 def _save_resume_list(self):
11256 Do this before verifying the ebuild Manifests since it might
11257 be possible for the user to use --resume --skipfirst get past
11258 a non-essential package with a broken digest.
11260 mtimedb = self._mtimedb
11261 mtimedb["resume"]["mergelist"] = [list(x) \
11262 for x in self._mergelist \
11263 if isinstance(x, Package) and x.operation == "merge"]
11267 def _calc_resume_list(self):
11269 Use the current resume list to calculate a new one,
11270 dropping any packages with unsatisfied deps.
11272 @returns: True if successful, False otherwise.
11274 print colorize("GOOD", "*** Resuming merge...")
11276 if self._show_list():
11277 if "--tree" in self.myopts:
11278 portage.writemsg_stdout("\n" + \
11279 darkgreen("These are the packages that " + \
11280 "would be merged, in reverse order:\n\n"))
11283 portage.writemsg_stdout("\n" + \
11284 darkgreen("These are the packages that " + \
11285 "would be merged, in order:\n\n"))
11287 show_spinner = "--quiet" not in self.myopts and \
11288 "--nodeps" not in self.myopts
11291 print "Calculating dependencies ",
11293 myparams = create_depgraph_params(self.myopts, None)
11297 success, mydepgraph, dropped_tasks = resume_depgraph(
11298 self.settings, self.trees, self._mtimedb, self.myopts,
11299 myparams, self._spinner)
11300 except depgraph.UnsatisfiedResumeDep, exc:
11301 # rename variable to avoid python-3.0 error:
11302 # SyntaxError: can not delete variable 'e' referenced in nested
11305 mydepgraph = e.depgraph
11306 dropped_tasks = set()
11309 print "\b\b... done!"
11312 def unsatisfied_resume_dep_msg():
11313 mydepgraph.display_problems()
11314 out = portage.output.EOutput()
11315 out.eerror("One or more packages are either masked or " + \
11316 "have missing dependencies:")
11319 show_parents = set()
11320 for dep in e.value:
11321 if dep.parent in show_parents:
11323 show_parents.add(dep.parent)
11324 if dep.atom is None:
11325 out.eerror(indent + "Masked package:")
11326 out.eerror(2 * indent + str(dep.parent))
11329 out.eerror(indent + str(dep.atom) + " pulled in by:")
11330 out.eerror(2 * indent + str(dep.parent))
11332 msg = "The resume list contains packages " + \
11333 "that are either masked or have " + \
11334 "unsatisfied dependencies. " + \
11335 "Please restart/continue " + \
11336 "the operation manually, or use --skipfirst " + \
11337 "to skip the first package in the list and " + \
11338 "any other packages that may be " + \
11339 "masked or have missing dependencies."
11340 for line in textwrap.wrap(msg, 72):
11342 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11345 if success and self._show_list():
11346 mylist = mydepgraph.altlist()
11348 if "--tree" in self.myopts:
11350 mydepgraph.display(mylist, favorites=self._favorites)
11353 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11355 mydepgraph.display_problems()
11357 mylist = mydepgraph.altlist()
11358 mydepgraph.break_refs(mylist)
11359 mydepgraph.break_refs(dropped_tasks)
11360 self._mergelist = mylist
11361 self._set_digraph(mydepgraph.schedulerGraph())
11364 for task in dropped_tasks:
11365 if not (isinstance(task, Package) and task.operation == "merge"):
11368 msg = "emerge --keep-going:" + \
11370 if pkg.root != "/":
11371 msg += " for %s" % (pkg.root,)
11372 msg += " dropped due to unsatisfied dependency."
11373 for line in textwrap.wrap(msg, msg_width):
11374 eerror(line, phase="other", key=pkg.cpv)
11375 settings = self.pkgsettings[pkg.root]
11376 # Ensure that log collection from $T is disabled inside
11377 # elog_process(), since any logs that might exist are
11379 settings.pop("T", None)
11380 portage.elog.elog_process(pkg.cpv, settings)
11381 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11385 def _show_list(self):
11386 myopts = self.myopts
11387 if "--quiet" not in myopts and \
11388 ("--ask" in myopts or "--tree" in myopts or \
11389 "--verbose" in myopts):
11393 def _world_atom(self, pkg):
11395 Add the package to the world file, but only if
11396 it's supposed to be added. Otherwise, do nothing.
11399 if set(("--buildpkgonly", "--fetchonly",
11401 "--oneshot", "--onlydeps",
11402 "--pretend")).intersection(self.myopts):
11405 if pkg.root != self.target_root:
11408 args_set = self._args_set
11409 if not args_set.findAtomForPackage(pkg):
11412 logger = self._logger
11413 pkg_count = self._pkg_count
11414 root_config = pkg.root_config
11415 world_set = root_config.sets["world"]
11416 world_locked = False
11417 if hasattr(world_set, "lock"):
11419 world_locked = True
11422 if hasattr(world_set, "load"):
11423 world_set.load() # maybe it's changed on disk
11425 atom = create_world_atom(pkg, args_set, root_config)
11427 if hasattr(world_set, "add"):
11428 self._status_msg(('Recording %s in "world" ' + \
11429 'favorites file...') % atom)
11430 logger.log(" === (%s of %s) Updating world file (%s)" % \
11431 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11432 world_set.add(atom)
11434 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11435 (atom,), level=logging.WARN, noiselevel=-1)
11440 def _pkg(self, cpv, type_name, root_config, installed=False):
11442 Get a package instance from the cache, or create a new
11443 one if necessary. Raises KeyError from aux_get if it
11444 failures for some reason (package does not exist or is
11447 operation = "merge"
11449 operation = "nomerge"
11451 if self._digraph is not None:
11452 # Reuse existing instance when available.
11453 pkg = self._digraph.get(
11454 (type_name, root_config.root, cpv, operation))
11455 if pkg is not None:
11458 tree_type = depgraph.pkg_tree_map[type_name]
11459 db = root_config.trees[tree_type].dbapi
11460 db_keys = list(self.trees[root_config.root][
11461 tree_type].dbapi._aux_cache_keys)
11462 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11463 pkg = Package(cpv=cpv, metadata=metadata,
11464 root_config=root_config, installed=installed)
11465 if type_name == "ebuild":
11466 settings = self.pkgsettings[root_config.root]
11467 settings.setcpv(pkg)
11468 pkg.metadata["USE"] = settings["PORTAGE_USE"]
11469 pkg.metadata['CHOST'] = settings.get('CHOST', '')
11473 class MetadataRegen(PollScheduler):
11475 def __init__(self, portdb, max_jobs=None, max_load=None):
11476 PollScheduler.__init__(self)
11477 self._portdb = portdb
11479 if max_jobs is None:
11482 self._max_jobs = max_jobs
11483 self._max_load = max_load
11484 self._sched_iface = self._sched_iface_class(
11485 register=self._register,
11486 schedule=self._schedule_wait,
11487 unregister=self._unregister)
11489 self._valid_pkgs = set()
11490 self._process_iter = self._iter_metadata_processes()
11491 self.returncode = os.EX_OK
11492 self._error_count = 0
11494 def _iter_metadata_processes(self):
11495 portdb = self._portdb
11496 valid_pkgs = self._valid_pkgs
11497 every_cp = portdb.cp_all()
11498 every_cp.sort(reverse=True)
11501 cp = every_cp.pop()
11502 portage.writemsg_stdout("Processing %s\n" % cp)
11503 cpv_list = portdb.cp_list(cp)
11504 for cpv in cpv_list:
11505 valid_pkgs.add(cpv)
11506 ebuild_path, repo_path = portdb.findname2(cpv)
11507 metadata_process = portdb._metadata_process(
11508 cpv, ebuild_path, repo_path)
11509 if metadata_process is None:
11511 yield metadata_process
11515 portdb = self._portdb
11516 from portage.cache.cache_errors import CacheError
11519 for mytree in portdb.porttrees:
11521 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11522 except CacheError, e:
11523 portage.writemsg("Error listing cache entries for " + \
11524 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11529 while self._schedule():
11536 for y in self._valid_pkgs:
11537 for mytree in portdb.porttrees:
11538 if portdb.findname2(y, mytree=mytree)[0]:
11539 dead_nodes[mytree].discard(y)
11541 for mytree, nodes in dead_nodes.iteritems():
11542 auxdb = portdb.auxdb[mytree]
11546 except (KeyError, CacheError):
11549 def _schedule_tasks(self):
11552 @returns: True if there may be remaining tasks to schedule,
11555 while self._can_add_job():
11557 metadata_process = self._process_iter.next()
11558 except StopIteration:
11562 metadata_process.scheduler = self._sched_iface
11563 metadata_process.addExitListener(self._metadata_exit)
11564 metadata_process.start()
11567 def _metadata_exit(self, metadata_process):
11569 if metadata_process.returncode != os.EX_OK:
11570 self.returncode = 1
11571 self._error_count += 1
11572 self._valid_pkgs.discard(metadata_process.cpv)
11573 portage.writemsg("Error processing %s, continuing...\n" % \
11574 (metadata_process.cpv,))
11577 class UninstallFailure(portage.exception.PortageException):
11579 An instance of this class is raised by unmerge() when
11580 an uninstallation fails.
11583 def __init__(self, *pargs):
11584 portage.exception.PortageException.__init__(self, pargs)
11586 self.status = pargs[0]
11588 def unmerge(root_config, myopts, unmerge_action,
11589 unmerge_files, ldpath_mtimes, autoclean=0,
11590 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11591 scheduler=None, writemsg_level=portage.util.writemsg_level):
11593 quiet = "--quiet" in myopts
11594 settings = root_config.settings
11595 sets = root_config.sets
11596 vartree = root_config.trees["vartree"]
11597 candidate_catpkgs=[]
11599 xterm_titles = "notitles" not in settings.features
11600 out = portage.output.EOutput()
11602 db_keys = list(vartree.dbapi._aux_cache_keys)
11605 pkg = pkg_cache.get(cpv)
11607 pkg = Package(cpv=cpv, installed=True,
11608 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11609 root_config=root_config,
11610 type_name="installed")
11611 pkg_cache[cpv] = pkg
11614 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11616 # At least the parent needs to exist for the lock file.
11617 portage.util.ensure_dirs(vdb_path)
11618 except portage.exception.PortageException:
11622 if os.access(vdb_path, os.W_OK):
11623 vdb_lock = portage.locks.lockdir(vdb_path)
11624 realsyslist = sets["system"].getAtoms()
11626 for x in realsyslist:
11627 mycp = portage.dep_getkey(x)
11628 if mycp in settings.getvirtuals():
11630 for provider in settings.getvirtuals()[mycp]:
11631 if vartree.dbapi.match(provider):
11632 providers.append(provider)
11633 if len(providers) == 1:
11634 syslist.extend(providers)
11636 syslist.append(mycp)
11638 mysettings = portage.config(clone=settings)
11640 if not unmerge_files:
11641 if unmerge_action == "unmerge":
11643 print bold("emerge unmerge") + " can only be used with specific package names"
11649 localtree = vartree
11650 # process all arguments and add all
11651 # valid db entries to candidate_catpkgs
11653 if not unmerge_files:
11654 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11656 #we've got command-line arguments
11657 if not unmerge_files:
11658 print "\nNo packages to unmerge have been provided.\n"
11660 for x in unmerge_files:
11661 arg_parts = x.split('/')
11662 if x[0] not in [".","/"] and \
11663 arg_parts[-1][-7:] != ".ebuild":
11664 #possible cat/pkg or dep; treat as such
11665 candidate_catpkgs.append(x)
11666 elif unmerge_action in ["prune","clean"]:
11667 print "\n!!! Prune and clean do not accept individual" + \
11668 " ebuilds as arguments;\n skipping.\n"
11671 # it appears that the user is specifying an installed
11672 # ebuild and we're in "unmerge" mode, so it's ok.
11673 if not os.path.exists(x):
11674 print "\n!!! The path '"+x+"' doesn't exist.\n"
11677 absx = os.path.abspath(x)
11678 sp_absx = absx.split("/")
11679 if sp_absx[-1][-7:] == ".ebuild":
11681 absx = "/".join(sp_absx)
11683 sp_absx_len = len(sp_absx)
11685 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11686 vdb_len = len(vdb_path)
11688 sp_vdb = vdb_path.split("/")
11689 sp_vdb_len = len(sp_vdb)
11691 if not os.path.exists(absx+"/CONTENTS"):
11692 print "!!! Not a valid db dir: "+str(absx)
11695 if sp_absx_len <= sp_vdb_len:
11696 # The Path is shorter... so it can't be inside the vdb.
11699 print "\n!!!",x,"cannot be inside "+ \
11700 vdb_path+"; aborting.\n"
11703 for idx in range(0,sp_vdb_len):
11704 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11707 print "\n!!!", x, "is not inside "+\
11708 vdb_path+"; aborting.\n"
11711 print "="+"/".join(sp_absx[sp_vdb_len:])
11712 candidate_catpkgs.append(
11713 "="+"/".join(sp_absx[sp_vdb_len:]))
11716 if (not "--quiet" in myopts):
11718 if settings["ROOT"] != "/":
11719 writemsg_level(darkgreen(newline+ \
11720 ">>> Using system located in ROOT tree %s\n" % \
11723 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11724 not ("--quiet" in myopts):
11725 writemsg_level(darkgreen(newline+\
11726 ">>> These are the packages that would be unmerged:\n"))
11728 # Preservation of order is required for --depclean and --prune so
11729 # that dependencies are respected. Use all_selected to eliminate
11730 # duplicate packages since the same package may be selected by
11733 all_selected = set()
11734 for x in candidate_catpkgs:
11735 # cycle through all our candidate deps and determine
11736 # what will and will not get unmerged
11738 mymatch = vartree.dbapi.match(x)
11739 except portage.exception.AmbiguousPackageName, errpkgs:
11740 print "\n\n!!! The short ebuild name \"" + \
11741 x + "\" is ambiguous. Please specify"
11742 print "!!! one of the following fully-qualified " + \
11743 "ebuild names instead:\n"
11744 for i in errpkgs[0]:
11745 print " " + green(i)
11749 if not mymatch and x[0] not in "<>=~":
11750 mymatch = localtree.dep_match(x)
11752 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11753 (x, unmerge_action), noiselevel=-1)
11757 {"protected": set(), "selected": set(), "omitted": set()})
11758 mykey = len(pkgmap) - 1
11759 if unmerge_action=="unmerge":
11761 if y not in all_selected:
11762 pkgmap[mykey]["selected"].add(y)
11763 all_selected.add(y)
11764 elif unmerge_action == "prune":
11765 if len(mymatch) == 1:
11767 best_version = mymatch[0]
11768 best_slot = vartree.getslot(best_version)
11769 best_counter = vartree.dbapi.cpv_counter(best_version)
11770 for mypkg in mymatch[1:]:
11771 myslot = vartree.getslot(mypkg)
11772 mycounter = vartree.dbapi.cpv_counter(mypkg)
11773 if (myslot == best_slot and mycounter > best_counter) or \
11774 mypkg == portage.best([mypkg, best_version]):
11775 if myslot == best_slot:
11776 if mycounter < best_counter:
11777 # On slot collision, keep the one with the
11778 # highest counter since it is the most
11779 # recently installed.
11781 best_version = mypkg
11783 best_counter = mycounter
11784 pkgmap[mykey]["protected"].add(best_version)
11785 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11786 if mypkg != best_version and mypkg not in all_selected)
11787 all_selected.update(pkgmap[mykey]["selected"])
11789 # unmerge_action == "clean"
11791 for mypkg in mymatch:
11792 if unmerge_action == "clean":
11793 myslot = localtree.getslot(mypkg)
11795 # since we're pruning, we don't care about slots
11796 # and put all the pkgs in together
11798 if myslot not in slotmap:
11799 slotmap[myslot] = {}
11800 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11802 for mypkg in vartree.dbapi.cp_list(
11803 portage.dep_getkey(mymatch[0])):
11804 myslot = vartree.getslot(mypkg)
11805 if myslot not in slotmap:
11806 slotmap[myslot] = {}
11807 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11809 for myslot in slotmap:
11810 counterkeys = slotmap[myslot].keys()
11811 if not counterkeys:
11814 pkgmap[mykey]["protected"].add(
11815 slotmap[myslot][counterkeys[-1]])
11816 del counterkeys[-1]
11818 for counter in counterkeys[:]:
11819 mypkg = slotmap[myslot][counter]
11820 if mypkg not in mymatch:
11821 counterkeys.remove(counter)
11822 pkgmap[mykey]["protected"].add(
11823 slotmap[myslot][counter])
11825 #be pretty and get them in order of merge:
11826 for ckey in counterkeys:
11827 mypkg = slotmap[myslot][ckey]
11828 if mypkg not in all_selected:
11829 pkgmap[mykey]["selected"].add(mypkg)
11830 all_selected.add(mypkg)
11831 # ok, now the last-merged package
11832 # is protected, and the rest are selected
11833 numselected = len(all_selected)
11834 if global_unmerge and not numselected:
11835 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11838 if not numselected:
11839 portage.writemsg_stdout(
11840 "\n>>> No packages selected for removal by " + \
11841 unmerge_action + "\n")
11845 vartree.dbapi.flush_cache()
11846 portage.locks.unlockdir(vdb_lock)
11848 from portage.sets.base import EditablePackageSet
11850 # generate a list of package sets that are directly or indirectly listed in "world",
11851 # as there is no persistent list of "installed" sets
11852 installed_sets = ["world"]
11857 pos = len(installed_sets)
11858 for s in installed_sets[pos - 1:]:
11861 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11864 installed_sets += candidates
11865 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11868 # we don't want to unmerge packages that are still listed in user-editable package sets
11869 # listed in "world" as they would be remerged on the next update of "world" or the
11870 # relevant package sets.
11871 unknown_sets = set()
11872 for cp in xrange(len(pkgmap)):
11873 for cpv in pkgmap[cp]["selected"].copy():
11877 # It could have been uninstalled
11878 # by a concurrent process.
11881 if unmerge_action != "clean" and \
11882 root_config.root == "/" and \
11883 portage.match_from_list(
11884 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11885 msg = ("Not unmerging package %s since there is no valid " + \
11886 "reason for portage to unmerge itself.") % (pkg.cpv,)
11887 for line in textwrap.wrap(msg, 75):
11889 # adjust pkgmap so the display output is correct
11890 pkgmap[cp]["selected"].remove(cpv)
11891 all_selected.remove(cpv)
11892 pkgmap[cp]["protected"].add(cpv)
11896 for s in installed_sets:
11897 # skip sets that the user requested to unmerge, and skip world
11898 # unless we're unmerging a package set (as the package would be
11899 # removed from "world" later on)
11900 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11904 if s in unknown_sets:
11906 unknown_sets.add(s)
11907 out = portage.output.EOutput()
11908 out.eerror(("Unknown set '@%s' in " + \
11909 "%svar/lib/portage/world_sets") % \
11910 (s, root_config.root))
11913 # only check instances of EditablePackageSet as other classes are generally used for
11914 # special purposes and can be ignored here (and are usually generated dynamically, so the
11915 # user can't do much about them anyway)
11916 if isinstance(sets[s], EditablePackageSet):
11918 # This is derived from a snippet of code in the
11919 # depgraph._iter_atoms_for_pkg() method.
11920 for atom in sets[s].iterAtomsForPackage(pkg):
11921 inst_matches = vartree.dbapi.match(atom)
11922 inst_matches.reverse() # descending order
11924 for inst_cpv in inst_matches:
11926 inst_pkg = _pkg(inst_cpv)
11928 # It could have been uninstalled
11929 # by a concurrent process.
11932 if inst_pkg.cp != atom.cp:
11934 if pkg >= inst_pkg:
11935 # This is descending order, and we're not
11936 # interested in any versions <= pkg given.
11938 if pkg.slot_atom != inst_pkg.slot_atom:
11939 higher_slot = inst_pkg
11941 if higher_slot is None:
11945 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11946 #print colorize("WARN", "but still listed in the following package sets:")
11947 #print " %s\n" % ", ".join(parents)
11948 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11949 print colorize("WARN", "still referenced by the following package sets:")
11950 print " %s\n" % ", ".join(parents)
11951 # adjust pkgmap so the display output is correct
11952 pkgmap[cp]["selected"].remove(cpv)
11953 all_selected.remove(cpv)
11954 pkgmap[cp]["protected"].add(cpv)
11958 numselected = len(all_selected)
11959 if not numselected:
11961 "\n>>> No packages selected for removal by " + \
11962 unmerge_action + "\n")
11965 # Unmerge order only matters in some cases
11969 selected = d["selected"]
11972 cp = portage.cpv_getkey(iter(selected).next())
11973 cp_dict = unordered.get(cp)
11974 if cp_dict is None:
11976 unordered[cp] = cp_dict
11979 for k, v in d.iteritems():
11980 cp_dict[k].update(v)
11981 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11983 for x in xrange(len(pkgmap)):
11984 selected = pkgmap[x]["selected"]
11987 for mytype, mylist in pkgmap[x].iteritems():
11988 if mytype == "selected":
11990 mylist.difference_update(all_selected)
11991 cp = portage.cpv_getkey(iter(selected).next())
11992 for y in localtree.dep_match(cp):
11993 if y not in pkgmap[x]["omitted"] and \
11994 y not in pkgmap[x]["selected"] and \
11995 y not in pkgmap[x]["protected"] and \
11996 y not in all_selected:
11997 pkgmap[x]["omitted"].add(y)
11998 if global_unmerge and not pkgmap[x]["selected"]:
11999 #avoid cluttering the preview printout with stuff that isn't getting unmerged
12001 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12002 writemsg_level(colorize("BAD","\a\n\n!!! " + \
12003 "'%s' is part of your system profile.\n" % cp),
12004 level=logging.WARNING, noiselevel=-1)
12005 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12006 "be damaging to your system.\n\n"),
12007 level=logging.WARNING, noiselevel=-1)
12008 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12009 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12010 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12012 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12014 writemsg_level(bold(cp) + ": ", noiselevel=-1)
12015 for mytype in ["selected","protected","omitted"]:
12017 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12018 if pkgmap[x][mytype]:
12019 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12020 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12021 for pn, ver, rev in sorted_pkgs:
12025 myversion = ver + "-" + rev
12026 if mytype == "selected":
12028 colorize("UNMERGE_WARN", myversion + " "),
12032 colorize("GOOD", myversion + " "), noiselevel=-1)
12034 writemsg_level("none ", noiselevel=-1)
12036 writemsg_level("\n", noiselevel=-1)
12038 writemsg_level("\n", noiselevel=-1)
12040 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12041 " packages are slated for removal.\n")
12042 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12043 " and " + colorize("GOOD", "'omitted'") + \
12044 " packages will not be removed.\n\n")
12046 if "--pretend" in myopts:
12047 #we're done... return
12049 if "--ask" in myopts:
12050 if userquery("Would you like to unmerge these packages?")=="No":
12051 # enter pretend mode for correct formatting of results
12052 myopts["--pretend"] = True
12057 #the real unmerging begins, after a short delay....
12058 if clean_delay and not autoclean:
12059 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12061 for x in xrange(len(pkgmap)):
12062 for y in pkgmap[x]["selected"]:
12063 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12064 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12065 mysplit = y.split("/")
12067 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12068 mysettings, unmerge_action not in ["clean","prune"],
12069 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12070 scheduler=scheduler)
12072 if retval != os.EX_OK:
12073 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12075 raise UninstallFailure(retval)
12078 if clean_world and hasattr(sets["world"], "cleanPackage"):
12079 sets["world"].cleanPackage(vartree.dbapi, y)
12080 emergelog(xterm_titles, " >>> unmerge success: "+y)
12081 if clean_world and hasattr(sets["world"], "remove"):
12082 for s in root_config.setconfig.active:
12083 sets["world"].remove(SETPREFIX+s)
12086 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12088 if os.path.exists("/usr/bin/install-info"):
12089 out = portage.output.EOutput()
12094 inforoot=normpath(root+z)
12095 if os.path.isdir(inforoot):
12096 infomtime = long(os.stat(inforoot).st_mtime)
12097 if inforoot not in prev_mtimes or \
12098 prev_mtimes[inforoot] != infomtime:
12099 regen_infodirs.append(inforoot)
12101 if not regen_infodirs:
12102 portage.writemsg_stdout("\n")
12103 out.einfo("GNU info directory index is up-to-date.")
12105 portage.writemsg_stdout("\n")
12106 out.einfo("Regenerating GNU info directory index...")
12108 dir_extensions = ("", ".gz", ".bz2")
12112 for inforoot in regen_infodirs:
12116 if not os.path.isdir(inforoot) or \
12117 not os.access(inforoot, os.W_OK):
12120 file_list = os.listdir(inforoot)
12122 dir_file = os.path.join(inforoot, "dir")
12123 moved_old_dir = False
12124 processed_count = 0
12125 for x in file_list:
12126 if x.startswith(".") or \
12127 os.path.isdir(os.path.join(inforoot, x)):
12129 if x.startswith("dir"):
12131 for ext in dir_extensions:
12132 if x == "dir" + ext or \
12133 x == "dir" + ext + ".old":
12138 if processed_count == 0:
12139 for ext in dir_extensions:
12141 os.rename(dir_file + ext, dir_file + ext + ".old")
12142 moved_old_dir = True
12143 except EnvironmentError, e:
12144 if e.errno != errno.ENOENT:
12147 processed_count += 1
12148 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12149 existsstr="already exists, for file `"
12151 if re.search(existsstr,myso):
12152 # Already exists... Don't increment the count for this.
12154 elif myso[:44]=="install-info: warning: no info dir entry in ":
12155 # This info file doesn't contain a DIR-header: install-info produces this
12156 # (harmless) warning (the --quiet switch doesn't seem to work).
12157 # Don't increment the count for this.
12160 badcount=badcount+1
12161 errmsg += myso + "\n"
12164 if moved_old_dir and not os.path.exists(dir_file):
12165 # We didn't generate a new dir file, so put the old file
12166 # back where it was originally found.
12167 for ext in dir_extensions:
12169 os.rename(dir_file + ext + ".old", dir_file + ext)
12170 except EnvironmentError, e:
12171 if e.errno != errno.ENOENT:
12175 # Clean dir.old cruft so that they don't prevent
12176 # unmerge of otherwise empty directories.
12177 for ext in dir_extensions:
12179 os.unlink(dir_file + ext + ".old")
12180 except EnvironmentError, e:
12181 if e.errno != errno.ENOENT:
12185 #update mtime so we can potentially avoid regenerating.
12186 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12189 out.eerror("Processed %d info files; %d errors." % \
12190 (icount, badcount))
12191 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12194 out.einfo("Processed %d info files." % (icount,))
12197 def display_news_notification(root_config, myopts):
12198 target_root = root_config.root
12199 trees = root_config.trees
12200 settings = trees["vartree"].settings
12201 portdb = trees["porttree"].dbapi
12202 vardb = trees["vartree"].dbapi
12203 NEWS_PATH = os.path.join("metadata", "news")
12204 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12205 newsReaderDisplay = False
12206 update = "--pretend" not in myopts
12208 for repo in portdb.getRepositories():
12209 unreadItems = checkUpdatedNewsItems(
12210 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12212 if not newsReaderDisplay:
12213 newsReaderDisplay = True
12215 print colorize("WARN", " * IMPORTANT:"),
12216 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12219 if newsReaderDisplay:
12220 print colorize("WARN", " *"),
12221 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12224 def display_preserved_libs(vardbapi):
12227 # Ensure the registry is consistent with existing files.
12228 vardbapi.plib_registry.pruneNonExisting()
12230 if vardbapi.plib_registry.hasEntries():
12232 print colorize("WARN", "!!!") + " existing preserved libs:"
12233 plibdata = vardbapi.plib_registry.getPreservedLibs()
12234 linkmap = vardbapi.linkmap
12237 linkmap_broken = False
12241 except portage.exception.CommandNotFound, e:
12242 writemsg_level("!!! Command Not Found: %s\n" % (e,),
12243 level=logging.ERROR, noiselevel=-1)
12245 linkmap_broken = True
12247 search_for_owners = set()
12248 for cpv in plibdata:
12249 internal_plib_keys = set(linkmap._obj_key(f) \
12250 for f in plibdata[cpv])
12251 for f in plibdata[cpv]:
12252 if f in consumer_map:
12255 for c in linkmap.findConsumers(f):
12256 # Filter out any consumers that are also preserved libs
12257 # belonging to the same package as the provider.
12258 if linkmap._obj_key(c) not in internal_plib_keys:
12259 consumers.append(c)
12261 consumer_map[f] = consumers
12262 search_for_owners.update(consumers[:MAX_DISPLAY+1])
12264 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12266 for cpv in plibdata:
12267 print colorize("WARN", ">>>") + " package: %s" % cpv
12269 for f in plibdata[cpv]:
12270 obj_key = linkmap._obj_key(f)
12271 alt_paths = samefile_map.get(obj_key)
12272 if alt_paths is None:
12274 samefile_map[obj_key] = alt_paths
12277 for alt_paths in samefile_map.itervalues():
12278 alt_paths = sorted(alt_paths)
12279 for p in alt_paths:
12280 print colorize("WARN", " * ") + " - %s" % (p,)
12282 consumers = consumer_map.get(f, [])
12283 for c in consumers[:MAX_DISPLAY]:
12284 print colorize("WARN", " * ") + " used by %s (%s)" % \
12285 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12286 if len(consumers) == MAX_DISPLAY + 1:
12287 print colorize("WARN", " * ") + " used by %s (%s)" % \
12288 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12289 for x in owners.get(consumers[MAX_DISPLAY], [])))
12290 elif len(consumers) > MAX_DISPLAY:
12291 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
12292 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12295 def _flush_elog_mod_echo():
12297 Dump the mod_echo output now so that our other
12298 notifications are shown last.
12300 @returns: True if messages were shown, False otherwise.
12302 messages_shown = False
12304 from portage.elog import mod_echo
12305 except ImportError:
12306 pass # happens during downgrade to a version without the module
12308 messages_shown = bool(mod_echo._items)
12309 mod_echo.finalize()
12310 return messages_shown
12312 def post_emerge(root_config, myopts, mtimedb, retval):
12314 Misc. things to run at the end of a merge session.
12317 Update Config Files
12320 Display preserved libs warnings
12323 @param trees: A dictionary mapping each ROOT to it's package databases
12325 @param mtimedb: The mtimeDB to store data needed across merge invocations
12326 @type mtimedb: MtimeDB class instance
12327 @param retval: Emerge's return value
12331 1. Calls sys.exit(retval)
12334 target_root = root_config.root
12335 trees = { target_root : root_config.trees }
12336 vardbapi = trees[target_root]["vartree"].dbapi
12337 settings = vardbapi.settings
12338 info_mtimes = mtimedb["info"]
12340 # Load the most current variables from ${ROOT}/etc/profile.env
12343 settings.regenerate()
12346 config_protect = settings.get("CONFIG_PROTECT","").split()
12347 infodirs = settings.get("INFOPATH","").split(":") + \
12348 settings.get("INFODIR","").split(":")
12352 if retval == os.EX_OK:
12353 exit_msg = " *** exiting successfully."
12355 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12356 emergelog("notitles" not in settings.features, exit_msg)
12358 _flush_elog_mod_echo()
12360 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12361 if "--pretend" in myopts or (counter_hash is not None and \
12362 counter_hash == vardbapi._counter_hash()):
12363 display_news_notification(root_config, myopts)
12364 # If vdb state has not changed then there's nothing else to do.
12367 vdb_path = os.path.join(target_root, portage.VDB_PATH)
12368 portage.util.ensure_dirs(vdb_path)
12370 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12371 vdb_lock = portage.locks.lockdir(vdb_path)
12375 if "noinfo" not in settings.features:
12376 chk_updated_info_files(target_root,
12377 infodirs, info_mtimes, retval)
12381 portage.locks.unlockdir(vdb_lock)
12383 chk_updated_cfg_files(target_root, config_protect)
12385 display_news_notification(root_config, myopts)
12386 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12387 display_preserved_libs(vardbapi)
12392 def chk_updated_cfg_files(target_root, config_protect):
12394 #number of directories with some protect files in them
12396 for x in config_protect:
12397 x = os.path.join(target_root, x.lstrip(os.path.sep))
12398 if not os.access(x, os.W_OK):
12399 # Avoid Permission denied errors generated
12403 mymode = os.lstat(x).st_mode
12406 if stat.S_ISLNK(mymode):
12407 # We want to treat it like a directory if it
12408 # is a symlink to an existing directory.
12410 real_mode = os.stat(x).st_mode
12411 if stat.S_ISDIR(real_mode):
12415 if stat.S_ISDIR(mymode):
12416 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12418 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12419 os.path.split(x.rstrip(os.path.sep))
12420 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12421 a = commands.getstatusoutput(mycommand)
12423 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12425 # Show the error message alone, sending stdout to /dev/null.
12426 os.system(mycommand + " 1>/dev/null")
12428 files = a[1].split('\0')
12429 # split always produces an empty string as the last element
12430 if files and not files[-1]:
12434 print "\n"+colorize("WARN", " * IMPORTANT:"),
12435 if stat.S_ISDIR(mymode):
12436 print "%d config files in '%s' need updating." % \
12439 print "config file '%s' needs updating." % x
12442 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12443 " section of the " + bold("emerge")
12444 print " "+yellow("*")+" man page to learn how to update config files."
12446 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12449 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12450 Returns the number of unread (yet relevent) items.
12452 @param portdb: a portage tree database
12453 @type portdb: pordbapi
12454 @param vardb: an installed package database
12455 @type vardb: vardbapi
12458 @param UNREAD_PATH:
12464 1. The number of unread but relevant news items.
12467 from portage.news import NewsManager
12468 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12469 return manager.getUnreadItems( repo_id, update=update )
12471 def insert_category_into_atom(atom, category):
12472 alphanum = re.search(r'\w', atom)
12474 ret = atom[:alphanum.start()] + "%s/" % category + \
12475 atom[alphanum.start():]
12480 def is_valid_package_atom(x):
12482 alphanum = re.search(r'\w', x)
12484 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12485 return portage.isvalidatom(x)
12487 def show_blocker_docs_link():
12489 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12490 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12492 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12495 def show_mask_docs():
12496 print "For more information, see the MASKED PACKAGES section in the emerge"
12497 print "man page or refer to the Gentoo Handbook."
12499 def action_sync(settings, trees, mtimedb, myopts, myaction):
12500 xterm_titles = "notitles" not in settings.features
12501 emergelog(xterm_titles, " === sync")
12502 myportdir = settings.get("PORTDIR", None)
12503 out = portage.output.EOutput()
12505 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
12507 if myportdir[-1]=="/":
12508 myportdir=myportdir[:-1]
12510 st = os.stat(myportdir)
12514 print ">>>",myportdir,"not found, creating it."
12515 os.makedirs(myportdir,0755)
12516 st = os.stat(myportdir)
12519 spawn_kwargs["env"] = settings.environ()
12520 if 'usersync' in settings.features and \
12521 portage.data.secpass >= 2 and \
12522 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12523 st.st_gid != os.getgid() and st.st_mode & 0070):
12525 homedir = pwd.getpwuid(st.st_uid).pw_dir
12529 # Drop privileges when syncing, in order to match
12530 # existing uid/gid settings.
12531 spawn_kwargs["uid"] = st.st_uid
12532 spawn_kwargs["gid"] = st.st_gid
12533 spawn_kwargs["groups"] = [st.st_gid]
12534 spawn_kwargs["env"]["HOME"] = homedir
12536 if not st.st_mode & 0020:
12537 umask = umask | 0020
12538 spawn_kwargs["umask"] = umask
12540 syncuri = settings.get("SYNC", "").strip()
12542 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12543 noiselevel=-1, level=logging.ERROR)
12546 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12547 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12550 dosyncuri = syncuri
12551 updatecache_flg = False
12552 if myaction == "metadata":
12553 print "skipping sync"
12554 updatecache_flg = True
12555 elif ".git" in vcs_dirs:
12556 # Update existing git repository, and ignore the syncuri. We are
12557 # going to trust the user and assume that the user is in the branch
12558 # that he/she wants updated. We'll let the user manage branches with
12560 if portage.process.find_binary("git") is None:
12561 msg = ["Command not found: git",
12562 "Type \"emerge dev-util/git\" to enable git support."]
12564 writemsg_level("!!! %s\n" % l,
12565 level=logging.ERROR, noiselevel=-1)
12567 msg = ">>> Starting git pull in %s..." % myportdir
12568 emergelog(xterm_titles, msg )
12569 writemsg_level(msg + "\n")
12570 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12571 (portage._shell_quote(myportdir),), **spawn_kwargs)
12572 if exitcode != os.EX_OK:
12573 msg = "!!! git pull error in %s." % myportdir
12574 emergelog(xterm_titles, msg)
12575 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12577 msg = ">>> Git pull in %s successful" % myportdir
12578 emergelog(xterm_titles, msg)
12579 writemsg_level(msg + "\n")
12580 exitcode = git_sync_timestamps(settings, myportdir)
12581 if exitcode == os.EX_OK:
12582 updatecache_flg = True
12583 elif syncuri[:8]=="rsync://":
12584 for vcs_dir in vcs_dirs:
12585 writemsg_level(("!!! %s appears to be under revision " + \
12586 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12587 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12589 if not os.path.exists("/usr/bin/rsync"):
12590 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12591 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12596 if settings["PORTAGE_RSYNC_OPTS"] == "":
12597 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12598 rsync_opts.extend([
12599 "--recursive", # Recurse directories
12600 "--links", # Consider symlinks
12601 "--safe-links", # Ignore links outside of tree
12602 "--perms", # Preserve permissions
12603 "--times", # Preserive mod times
12604 "--compress", # Compress the data transmitted
12605 "--force", # Force deletion on non-empty dirs
12606 "--whole-file", # Don't do block transfers, only entire files
12607 "--delete", # Delete files that aren't in the master tree
12608 "--stats", # Show final statistics about what was transfered
12609 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12610 "--exclude=/distfiles", # Exclude distfiles from consideration
12611 "--exclude=/local", # Exclude local from consideration
12612 "--exclude=/packages", # Exclude packages from consideration
12616 # The below validation is not needed when using the above hardcoded
12619 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12621 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12622 for opt in ("--recursive", "--times"):
12623 if opt not in rsync_opts:
12624 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12625 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12626 rsync_opts.append(opt)
12628 for exclude in ("distfiles", "local", "packages"):
12629 opt = "--exclude=/%s" % exclude
12630 if opt not in rsync_opts:
12631 portage.writemsg(yellow("WARNING:") + \
12632 " adding required option %s not included in " % opt + \
12633 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12634 rsync_opts.append(opt)
12636 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12637 def rsync_opt_startswith(opt_prefix):
12638 for x in rsync_opts:
12639 if x.startswith(opt_prefix):
12643 if not rsync_opt_startswith("--timeout="):
12644 rsync_opts.append("--timeout=%d" % mytimeout)
12646 for opt in ("--compress", "--whole-file"):
12647 if opt not in rsync_opts:
12648 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12649 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12650 rsync_opts.append(opt)
12652 if "--quiet" in myopts:
12653 rsync_opts.append("--quiet") # Shut up a lot
12655 rsync_opts.append("--verbose") # Print filelist
12657 if "--verbose" in myopts:
12658 rsync_opts.append("--progress") # Progress meter for each file
12660 if "--debug" in myopts:
12661 rsync_opts.append("--checksum") # Force checksum on all files
12663 # Real local timestamp file.
12664 servertimestampfile = os.path.join(
12665 myportdir, "metadata", "timestamp.chk")
12667 content = portage.util.grabfile(servertimestampfile)
12671 mytimestamp = time.mktime(time.strptime(content[0],
12672 "%a, %d %b %Y %H:%M:%S +0000"))
12673 except (OverflowError, ValueError):
12678 rsync_initial_timeout = \
12679 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12681 rsync_initial_timeout = 15
12684 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12685 except SystemExit, e:
12686 raise # Needed else can't exit
12688 maxretries=3 #default number of retries
12691 user_name, hostname, port = re.split(
12692 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12695 if user_name is None:
12697 updatecache_flg=True
12698 all_rsync_opts = set(rsync_opts)
12699 extra_rsync_opts = shlex.split(
12700 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12701 all_rsync_opts.update(extra_rsync_opts)
12702 family = socket.AF_INET
12703 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12704 family = socket.AF_INET
12705 elif socket.has_ipv6 and \
12706 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12707 family = socket.AF_INET6
12709 SERVER_OUT_OF_DATE = -1
12710 EXCEEDED_MAX_RETRIES = -2
12716 for addrinfo in socket.getaddrinfo(
12717 hostname, None, family, socket.SOCK_STREAM):
12718 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12719 # IPv6 addresses need to be enclosed in square brackets
12720 ips.append("[%s]" % addrinfo[4][0])
12722 ips.append(addrinfo[4][0])
12723 from random import shuffle
12725 except SystemExit, e:
12726 raise # Needed else can't exit
12727 except Exception, e:
12728 print "Notice:",str(e)
12733 dosyncuri = syncuri.replace(
12734 "//" + user_name + hostname + port + "/",
12735 "//" + user_name + ips[0] + port + "/", 1)
12736 except SystemExit, e:
12737 raise # Needed else can't exit
12738 except Exception, e:
12739 print "Notice:",str(e)
12743 if "--ask" in myopts:
12744 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12749 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12750 if "--quiet" not in myopts:
12751 print ">>> Starting rsync with "+dosyncuri+"..."
12753 emergelog(xterm_titles,
12754 ">>> Starting retry %d of %d with %s" % \
12755 (retries,maxretries,dosyncuri))
12756 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12758 if mytimestamp != 0 and "--quiet" not in myopts:
12759 print ">>> Checking server timestamp ..."
12761 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12763 if "--debug" in myopts:
12766 exitcode = os.EX_OK
12767 servertimestamp = 0
12768 # Even if there's no timestamp available locally, fetch the
12769 # timestamp anyway as an initial probe to verify that the server is
12770 # responsive. This protects us from hanging indefinitely on a
12771 # connection attempt to an unresponsive server which rsync's
12772 # --timeout option does not prevent.
12774 # Temporary file for remote server timestamp comparison.
12775 from tempfile import mkstemp
12776 fd, tmpservertimestampfile = mkstemp()
12778 mycommand = rsynccommand[:]
12779 mycommand.append(dosyncuri.rstrip("/") + \
12780 "/metadata/timestamp.chk")
12781 mycommand.append(tmpservertimestampfile)
12785 def timeout_handler(signum, frame):
12786 raise portage.exception.PortageException("timed out")
12787 signal.signal(signal.SIGALRM, timeout_handler)
12788 # Timeout here in case the server is unresponsive. The
12789 # --timeout rsync option doesn't apply to the initial
12790 # connection attempt.
12791 if rsync_initial_timeout:
12792 signal.alarm(rsync_initial_timeout)
12794 mypids.extend(portage.process.spawn(
12795 mycommand, env=settings.environ(), returnpid=True))
12796 exitcode = os.waitpid(mypids[0], 0)[1]
12797 content = portage.grabfile(tmpservertimestampfile)
12799 if rsync_initial_timeout:
12802 os.unlink(tmpservertimestampfile)
12805 except portage.exception.PortageException, e:
12809 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12810 os.kill(mypids[0], signal.SIGTERM)
12811 os.waitpid(mypids[0], 0)
12812 # This is the same code rsync uses for timeout.
12815 if exitcode != os.EX_OK:
12816 if exitcode & 0xff:
12817 exitcode = (exitcode & 0xff) << 8
12819 exitcode = exitcode >> 8
12821 portage.process.spawned_pids.remove(mypids[0])
12824 servertimestamp = time.mktime(time.strptime(
12825 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12826 except (OverflowError, ValueError):
12828 del mycommand, mypids, content
12829 if exitcode == os.EX_OK:
12830 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12831 emergelog(xterm_titles,
12832 ">>> Cancelling sync -- Already current.")
12835 print ">>> Timestamps on the server and in the local repository are the same."
12836 print ">>> Cancelling all further sync action. You are already up to date."
12838 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12842 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12843 emergelog(xterm_titles,
12844 ">>> Server out of date: %s" % dosyncuri)
12847 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12849 print ">>> In order to force sync, remove '%s'." % servertimestampfile
12852 exitcode = SERVER_OUT_OF_DATE
12853 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12855 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12856 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12857 if exitcode in [0,1,3,4,11,14,20,21]:
12859 elif exitcode in [1,3,4,11,14,20,21]:
12862 # Code 2 indicates protocol incompatibility, which is expected
12863 # for servers with protocol < 29 that don't support
12864 # --prune-empty-directories. Retry for a server that supports
12865 # at least rsync protocol version 29 (>=rsync-2.6.4).
12870 if retries<=maxretries:
12871 print ">>> Retrying..."
12876 updatecache_flg=False
12877 exitcode = EXCEEDED_MAX_RETRIES
12881 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12882 elif exitcode == SERVER_OUT_OF_DATE:
12884 elif exitcode == EXCEEDED_MAX_RETRIES:
12886 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12891 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12892 msg.append("that your SYNC statement is proper.")
12893 msg.append("SYNC=" + settings["SYNC"])
12895 msg.append("Rsync has reported that there is a File IO error. Normally")
12896 msg.append("this means your disk is full, but can be caused by corruption")
12897 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12898 msg.append("and try again after the problem has been fixed.")
12899 msg.append("PORTDIR=" + settings["PORTDIR"])
12901 msg.append("Rsync was killed before it finished.")
12903 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12904 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12905 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12906 msg.append("temporary problem unless complications exist with your network")
12907 msg.append("(and possibly your system's filesystem) configuration.")
12911 elif syncuri[:6]=="cvs://":
12912 if not os.path.exists("/usr/bin/cvs"):
12913 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12914 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12916 cvsroot=syncuri[6:]
12917 cvsdir=os.path.dirname(myportdir)
12918 if not os.path.exists(myportdir+"/CVS"):
12920 print ">>> Starting initial cvs checkout with "+syncuri+"..."
12921 if os.path.exists(cvsdir+"/gentoo-x86"):
12922 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12925 os.rmdir(myportdir)
12927 if e.errno != errno.ENOENT:
12929 "!!! existing '%s' directory; exiting.\n" % myportdir)
12932 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12933 print "!!! cvs checkout error; exiting."
12935 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12938 print ">>> Starting cvs update with "+syncuri+"..."
12939 retval = portage.process.spawn_bash(
12940 "cd %s; cvs -z0 -q update -dP" % \
12941 (portage._shell_quote(myportdir),), **spawn_kwargs)
12942 if retval != os.EX_OK:
12944 dosyncuri = syncuri
12946 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12947 noiselevel=-1, level=logging.ERROR)
12950 if updatecache_flg and \
12951 myaction != "metadata" and \
12952 "metadata-transfer" not in settings.features:
12953 updatecache_flg = False
12955 # Reload the whole config from scratch.
12956 settings, trees, mtimedb = load_emerge_config(trees=trees)
12957 root_config = trees[settings["ROOT"]]["root_config"]
12958 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12960 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12961 action_metadata(settings, portdb, myopts)
12963 if portage._global_updates(trees, mtimedb["updates"]):
12965 # Reload the whole config from scratch.
12966 settings, trees, mtimedb = load_emerge_config(trees=trees)
12967 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12968 root_config = trees[settings["ROOT"]]["root_config"]
12970 mybestpv = portdb.xmatch("bestmatch-visible",
12971 portage.const.PORTAGE_PACKAGE_ATOM)
12972 mypvs = portage.best(
12973 trees[settings["ROOT"]]["vartree"].dbapi.match(
12974 portage.const.PORTAGE_PACKAGE_ATOM))
12976 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12978 if myaction != "metadata":
12979 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12980 retval = portage.process.spawn(
12981 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12982 dosyncuri], env=settings.environ())
12983 if retval != os.EX_OK:
12984 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12986 if(mybestpv != mypvs) and not "--quiet" in myopts:
12988 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12989 print red(" * ")+"that you update portage now, before any other packages are updated."
12991 print red(" * ")+"To update portage, run 'emerge portage' now."
12994 display_news_notification(root_config, myopts)
12997 def git_sync_timestamps(settings, portdir):
12999 Since git doesn't preserve timestamps, synchronize timestamps between
13000 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13001 for a given file as long as the file in the working tree is not modified
13002 (relative to HEAD).
13004 cache_dir = os.path.join(portdir, "metadata", "cache")
13005 if not os.path.isdir(cache_dir):
13007 writemsg_level(">>> Synchronizing timestamps...\n")
13009 from portage.cache.cache_errors import CacheError
13011 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13012 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13013 except CacheError, e:
13014 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13015 level=logging.ERROR, noiselevel=-1)
13018 ec_dir = os.path.join(portdir, "eclass")
13020 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13021 if f.endswith(".eclass"))
13023 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13024 level=logging.ERROR, noiselevel=-1)
13027 args = [portage.const.BASH_BINARY, "-c",
13028 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13029 portage._shell_quote(portdir)]
13031 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13032 modified_files = set(l.rstrip("\n") for l in proc.stdout)
13034 if rval != os.EX_OK:
13037 modified_eclasses = set(ec for ec in ec_names \
13038 if os.path.join("eclass", ec + ".eclass") in modified_files)
13040 updated_ec_mtimes = {}
13042 for cpv in cache_db:
13043 cpv_split = portage.catpkgsplit(cpv)
13044 if cpv_split is None:
13045 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13046 level=logging.ERROR, noiselevel=-1)
13049 cat, pn, ver, rev = cpv_split
13050 cat, pf = portage.catsplit(cpv)
13051 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13052 if relative_eb_path in modified_files:
13056 cache_entry = cache_db[cpv]
13057 eb_mtime = cache_entry.get("_mtime_")
13058 ec_mtimes = cache_entry.get("_eclasses_")
13060 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13061 level=logging.ERROR, noiselevel=-1)
13063 except CacheError, e:
13064 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13065 (cpv, e), level=logging.ERROR, noiselevel=-1)
13068 if eb_mtime is None:
13069 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13070 level=logging.ERROR, noiselevel=-1)
13074 eb_mtime = long(eb_mtime)
13076 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13077 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13080 if ec_mtimes is None:
13081 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13082 level=logging.ERROR, noiselevel=-1)
13085 if modified_eclasses.intersection(ec_mtimes):
13088 missing_eclasses = set(ec_mtimes).difference(ec_names)
13089 if missing_eclasses:
13090 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13091 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13095 eb_path = os.path.join(portdir, relative_eb_path)
13097 current_eb_mtime = os.stat(eb_path)
13099 writemsg_level("!!! Missing ebuild: %s\n" % \
13100 (cpv,), level=logging.ERROR, noiselevel=-1)
13103 inconsistent = False
13104 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13105 updated_mtime = updated_ec_mtimes.get(ec)
13106 if updated_mtime is not None and updated_mtime != ec_mtime:
13107 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13108 (cpv, ec), level=logging.ERROR, noiselevel=-1)
13109 inconsistent = True
13115 if current_eb_mtime != eb_mtime:
13116 os.utime(eb_path, (eb_mtime, eb_mtime))
13118 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13119 if ec in updated_ec_mtimes:
13121 ec_path = os.path.join(ec_dir, ec + ".eclass")
13122 current_mtime = long(os.stat(ec_path).st_mtime)
13123 if current_mtime != ec_mtime:
13124 os.utime(ec_path, (ec_mtime, ec_mtime))
13125 updated_ec_mtimes[ec] = ec_mtime
13129 def action_metadata(settings, portdb, myopts):
13130 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
13131 old_umask = os.umask(0002)
13132 cachedir = os.path.normpath(settings.depcachedir)
13133 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
13134 "/lib", "/opt", "/proc", "/root", "/sbin",
13135 "/sys", "/tmp", "/usr", "/var"]:
13136 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13137 "ROOT DIRECTORY ON YOUR SYSTEM."
13138 print >> sys.stderr, \
13139 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13141 if not os.path.exists(cachedir):
13144 ec = portage.eclass_cache.cache(portdb.porttree_root)
13145 myportdir = os.path.realpath(settings["PORTDIR"])
13146 cm = settings.load_best_module("portdbapi.metadbmodule")(
13147 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13149 from portage.cache import util
13151 class percentage_noise_maker(util.quiet_mirroring):
13152 def __init__(self, dbapi):
13154 self.cp_all = dbapi.cp_all()
13155 l = len(self.cp_all)
13156 self.call_update_min = 100000000
13157 self.min_cp_all = l/100.0
13161 def __iter__(self):
13162 for x in self.cp_all:
13164 if self.count > self.min_cp_all:
13165 self.call_update_min = 0
13167 for y in self.dbapi.cp_list(x):
13169 self.call_update_mine = 0
13171 def update(self, *arg):
13172 try: self.pstr = int(self.pstr) + 1
13173 except ValueError: self.pstr = 1
13174 sys.stdout.write("%s%i%%" % \
13175 ("\b" * (len(str(self.pstr))+1), self.pstr))
13177 self.call_update_min = 10000000
13179 def finish(self, *arg):
13180 sys.stdout.write("\b\b\b\b100%\n")
13183 if "--quiet" in myopts:
13184 def quicky_cpv_generator(cp_all_list):
13185 for x in cp_all_list:
13186 for y in portdb.cp_list(x):
13188 source = quicky_cpv_generator(portdb.cp_all())
13189 noise_maker = portage.cache.util.quiet_mirroring()
13191 noise_maker = source = percentage_noise_maker(portdb)
13192 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13193 eclass_cache=ec, verbose_instance=noise_maker)
13196 os.umask(old_umask)
13198 def action_regen(settings, portdb, max_jobs, max_load):
13199 xterm_titles = "notitles" not in settings.features
13200 emergelog(xterm_titles, " === regen")
13201 #regenerate cache entries
13202 portage.writemsg_stdout("Regenerating cache entries...\n")
13204 os.close(sys.stdin.fileno())
13205 except SystemExit, e:
13206 raise # Needed else can't exit
13211 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13214 portage.writemsg_stdout("done!\n")
13215 return regen.returncode
13217 def action_config(settings, trees, myopts, myfiles):
13218 if len(myfiles) != 1:
13219 print red("!!! config can only take a single package atom at this time\n")
13221 if not is_valid_package_atom(myfiles[0]):
13222 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13224 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13225 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13229 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13230 except portage.exception.AmbiguousPackageName, e:
13231 # Multiple matches thrown from cpv_expand
13234 print "No packages found.\n"
13236 elif len(pkgs) > 1:
13237 if "--ask" in myopts:
13239 print "Please select a package to configure:"
13243 options.append(str(idx))
13244 print options[-1]+") "+pkg
13246 options.append("X")
13247 idx = userquery("Selection?", options)
13250 pkg = pkgs[int(idx)-1]
13252 print "The following packages available:"
13255 print "\nPlease use a specific atom or the --ask option."
13261 if "--ask" in myopts:
13262 if userquery("Ready to configure "+pkg+"?") == "No":
13265 print "Configuring pkg..."
13267 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13268 mysettings = portage.config(clone=settings)
13269 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13270 debug = mysettings.get("PORTAGE_DEBUG") == "1"
13271 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13273 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13274 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13275 if retval == os.EX_OK:
13276 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13277 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13280 def action_info(settings, trees, myopts, myfiles):
13281 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13282 settings.profile_path, settings["CHOST"],
13283 trees[settings["ROOT"]]["vartree"].dbapi)
13285 header_title = "System Settings"
13287 print header_width * "="
13288 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13289 print header_width * "="
13290 print "System uname: "+platform.platform(aliased=1)
13292 lastSync = portage.grabfile(os.path.join(
13293 settings["PORTDIR"], "metadata", "timestamp.chk"))
13294 print "Timestamp of tree:",
13300 output=commands.getstatusoutput("distcc --version")
13302 print str(output[1].split("\n",1)[0]),
13303 if "distcc" in settings.features:
13308 output=commands.getstatusoutput("ccache -V")
13310 print str(output[1].split("\n",1)[0]),
13311 if "ccache" in settings.features:
13316 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13317 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
13318 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13319 myvars = portage.util.unique_array(myvars)
13323 if portage.isvalidatom(x):
13324 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13325 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13326 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13328 for pn, ver, rev in pkg_matches:
13330 pkgs.append(ver + "-" + rev)
13334 pkgs = ", ".join(pkgs)
13335 print "%-20s %s" % (x+":", pkgs)
13337 print "%-20s %s" % (x+":", "[NOT VALID]")
13339 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13341 if "--verbose" in myopts:
13342 myvars=settings.keys()
13344 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13345 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13346 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13347 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13349 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13351 myvars = portage.util.unique_array(myvars)
13357 print '%s="%s"' % (x, settings[x])
13359 use = set(settings["USE"].split())
13360 use_expand = settings["USE_EXPAND"].split()
13362 for varname in use_expand:
13363 flag_prefix = varname.lower() + "_"
13364 for f in list(use):
13365 if f.startswith(flag_prefix):
13369 print 'USE="%s"' % " ".join(use),
13370 for varname in use_expand:
13371 myval = settings.get(varname)
13373 print '%s="%s"' % (varname, myval),
13376 unset_vars.append(x)
13378 print "Unset: "+", ".join(unset_vars)
13381 if "--debug" in myopts:
13382 for x in dir(portage):
13383 module = getattr(portage, x)
13384 if "cvs_id_string" in dir(module):
13385 print "%s: %s" % (str(x), str(module.cvs_id_string))
13387 # See if we can find any packages installed matching the strings
13388 # passed on the command line
13390 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13391 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13393 mypkgs.extend(vardb.match(x))
13395 # If some packages were found...
13397 # Get our global settings (we only print stuff if it varies from
13398 # the current config)
13399 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13400 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13402 pkgsettings = portage.config(clone=settings)
13404 for myvar in mydesiredvars:
13405 global_vals[myvar] = set(settings.get(myvar, "").split())
13407 # Loop through each package
13408 # Only print settings if they differ from global settings
13409 header_title = "Package Settings"
13410 print header_width * "="
13411 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13412 print header_width * "="
13413 from portage.output import EOutput
13416 # Get all package specific variables
13417 auxvalues = vardb.aux_get(pkg, auxkeys)
13419 for i in xrange(len(auxkeys)):
13420 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13422 for myvar in mydesiredvars:
13423 # If the package variable doesn't match the
13424 # current global variable, something has changed
13425 # so set diff_found so we know to print
13426 if valuesmap[myvar] != global_vals[myvar]:
13427 diff_values[myvar] = valuesmap[myvar]
13428 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13429 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13430 pkgsettings.reset()
13431 # If a matching ebuild is no longer available in the tree, maybe it
13432 # would make sense to compare against the flags for the best
13433 # available version with the same slot?
13435 if portdb.cpv_exists(pkg):
13437 pkgsettings.setcpv(pkg, mydb=mydb)
13438 if valuesmap["IUSE"].intersection(
13439 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13440 diff_values["USE"] = valuesmap["USE"]
13441 # If a difference was found, print the info for
13444 # Print package info
13445 print "%s was built with the following:" % pkg
13446 for myvar in mydesiredvars + ["USE"]:
13447 if myvar in diff_values:
13448 mylist = list(diff_values[myvar])
13450 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13452 print ">>> Attempting to run pkg_info() for '%s'" % pkg
13453 ebuildpath = vardb.findname(pkg)
13454 if not ebuildpath or not os.path.exists(ebuildpath):
13455 out.ewarn("No ebuild found for '%s'" % pkg)
13457 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13458 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13459 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13462 def action_search(root_config, myopts, myfiles, spinner):
13464 print "emerge: no search terms provided."
13466 searchinstance = search(root_config,
13467 spinner, "--searchdesc" in myopts,
13468 "--quiet" not in myopts, "--usepkg" in myopts,
13469 "--usepkgonly" in myopts)
13470 for mysearch in myfiles:
13472 searchinstance.execute(mysearch)
13473 except re.error, comment:
13474 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13476 searchinstance.output()
13478 def action_depclean(settings, trees, ldpath_mtimes,
13479 myopts, action, myfiles, spinner):
13480 # Kill packages that aren't explicitly merged or are required as a
13481 # dependency of another package. World file is explicit.
13483 # Global depclean or prune operations are not very safe when there are
13484 # missing dependencies since it's unknown how badly incomplete
13485 # the dependency graph is, and we might accidentally remove packages
13486 # that should have been pulled into the graph. On the other hand, it's
13487 # relatively safe to ignore missing deps when only asked to remove
13488 # specific packages.
13489 allow_missing_deps = len(myfiles) > 0
13492 msg.append("Always study the list of packages to be cleaned for any obvious\n")
13493 msg.append("mistakes. Packages that are part of the world set will always\n")
13494 msg.append("be kept. They can be manually added to this set with\n")
13495 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
13496 msg.append("package.provided (see portage(5)) will be removed by\n")
13497 msg.append("depclean, even if they are part of the world set.\n")
13499 msg.append("As a safety measure, depclean will not remove any packages\n")
13500 msg.append("unless *all* required dependencies have been resolved. As a\n")
13501 msg.append("consequence, it is often necessary to run %s\n" % \
13502 good("`emerge --update"))
13503 msg.append(good("--newuse --deep @system @world`") + \
13504 " prior to depclean.\n")
13506 if action == "depclean" and "--quiet" not in myopts and not myfiles:
13507 portage.writemsg_stdout("\n")
13509 portage.writemsg_stdout(colorize("WARN", " * ") + x)
13511 xterm_titles = "notitles" not in settings.features
13512 myroot = settings["ROOT"]
13513 root_config = trees[myroot]["root_config"]
13514 getSetAtoms = root_config.setconfig.getSetAtoms
13515 vardb = trees[myroot]["vartree"].dbapi
13517 required_set_names = ("system", "world")
13521 for s in required_set_names:
13522 required_sets[s] = InternalPackageSet(
13523 initial_atoms=getSetAtoms(s))
13526 # When removing packages, use a temporary version of world
13527 # which excludes packages that are intended to be eligible for
13529 world_temp_set = required_sets["world"]
13530 system_set = required_sets["system"]
13532 if not system_set or not world_temp_set:
13535 writemsg_level("!!! You have no system list.\n",
13536 level=logging.ERROR, noiselevel=-1)
13538 if not world_temp_set:
13539 writemsg_level("!!! You have no world file.\n",
13540 level=logging.WARNING, noiselevel=-1)
13542 writemsg_level("!!! Proceeding is likely to " + \
13543 "break your installation.\n",
13544 level=logging.WARNING, noiselevel=-1)
13545 if "--pretend" not in myopts:
13546 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13548 if action == "depclean":
13549 emergelog(xterm_titles, " >>> depclean")
13552 args_set = InternalPackageSet()
13555 if not is_valid_package_atom(x):
13556 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13557 level=logging.ERROR, noiselevel=-1)
13558 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13561 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13562 except portage.exception.AmbiguousPackageName, e:
13563 msg = "The short ebuild name \"" + x + \
13564 "\" is ambiguous. Please specify " + \
13565 "one of the following " + \
13566 "fully-qualified ebuild names instead:"
13567 for line in textwrap.wrap(msg, 70):
13568 writemsg_level("!!! %s\n" % (line,),
13569 level=logging.ERROR, noiselevel=-1)
13571 writemsg_level(" %s\n" % colorize("INFORM", i),
13572 level=logging.ERROR, noiselevel=-1)
13573 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13576 matched_packages = False
13579 matched_packages = True
13581 if not matched_packages:
13582 writemsg_level(">>> No packages selected for removal by %s\n" % \
13586 writemsg_level("\nCalculating dependencies ")
13587 resolver_params = create_depgraph_params(myopts, "remove")
13588 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13589 vardb = resolver.trees[myroot]["vartree"].dbapi
13591 if action == "depclean":
13594 # Pull in everything that's installed but not matched
13595 # by an argument atom since we don't want to clean any
13596 # package if something depends on it.
13598 world_temp_set.clear()
13603 if args_set.findAtomForPackage(pkg) is None:
13604 world_temp_set.add("=" + pkg.cpv)
13606 except portage.exception.InvalidDependString, e:
13607 show_invalid_depstring_notice(pkg,
13608 pkg.metadata["PROVIDE"], str(e))
13610 world_temp_set.add("=" + pkg.cpv)
13613 elif action == "prune":
13615 # Pull in everything that's installed since we don't
13616 # to prune a package if something depends on it.
13617 world_temp_set.clear()
13618 world_temp_set.update(vardb.cp_all())
13622 # Try to prune everything that's slotted.
13623 for cp in vardb.cp_all():
13624 if len(vardb.cp_list(cp)) > 1:
13627 # Remove atoms from world that match installed packages
13628 # that are also matched by argument atoms, but do not remove
13629 # them if they match the highest installed version.
13632 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13633 if not pkgs_for_cp or pkg not in pkgs_for_cp:
13634 raise AssertionError("package expected in matches: " + \
13635 "cp = %s, cpv = %s matches = %s" % \
13636 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13638 highest_version = pkgs_for_cp[-1]
13639 if pkg == highest_version:
13640 # pkg is the highest version
13641 world_temp_set.add("=" + pkg.cpv)
13644 if len(pkgs_for_cp) <= 1:
13645 raise AssertionError("more packages expected: " + \
13646 "cp = %s, cpv = %s matches = %s" % \
13647 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13650 if args_set.findAtomForPackage(pkg) is None:
13651 world_temp_set.add("=" + pkg.cpv)
13653 except portage.exception.InvalidDependString, e:
13654 show_invalid_depstring_notice(pkg,
13655 pkg.metadata["PROVIDE"], str(e))
13657 world_temp_set.add("=" + pkg.cpv)
13661 for s, package_set in required_sets.iteritems():
13662 set_atom = SETPREFIX + s
13663 set_arg = SetArg(arg=set_atom, set=package_set,
13664 root_config=resolver.roots[myroot])
13665 set_args[s] = set_arg
13666 for atom in set_arg.set:
13667 resolver._dep_stack.append(
13668 Dependency(atom=atom, root=myroot, parent=set_arg))
13669 resolver.digraph.add(set_arg, None)
13671 success = resolver._complete_graph()
13672 writemsg_level("\b\b... done!\n")
13674 resolver.display_problems()
13679 def unresolved_deps():
13681 unresolvable = set()
13682 for dep in resolver._initially_unsatisfied_deps:
13683 if isinstance(dep.parent, Package) and \
13684 (dep.priority > UnmergeDepPriority.SOFT):
13685 unresolvable.add((dep.atom, dep.parent.cpv))
13687 if not unresolvable:
13690 if unresolvable and not allow_missing_deps:
13691 prefix = bad(" * ")
13693 msg.append("Dependencies could not be completely resolved due to")
13694 msg.append("the following required packages not being installed:")
13696 for atom, parent in unresolvable:
13697 msg.append(" %s pulled in by:" % (atom,))
13698 msg.append(" %s" % (parent,))
13700 msg.append("Have you forgotten to run " + \
13701 good("`emerge --update --newuse --deep @system @world`") + " prior")
13702 msg.append(("to %s? It may be necessary to manually " + \
13703 "uninstall packages that no longer") % action)
13704 msg.append("exist in the portage tree since " + \
13705 "it may not be possible to satisfy their")
13706 msg.append("dependencies. Also, be aware of " + \
13707 "the --with-bdeps option that is documented")
13708 msg.append("in " + good("`man emerge`") + ".")
13709 if action == "prune":
13711 msg.append("If you would like to ignore " + \
13712 "dependencies then use %s." % good("--nodeps"))
13713 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13714 level=logging.ERROR, noiselevel=-1)
13718 if unresolved_deps():
13721 graph = resolver.digraph.copy()
13722 required_pkgs_total = 0
13724 if isinstance(node, Package):
13725 required_pkgs_total += 1
13727 def show_parents(child_node):
13728 parent_nodes = graph.parent_nodes(child_node)
13729 if not parent_nodes:
13730 # With --prune, the highest version can be pulled in without any
13731 # real parent since all installed packages are pulled in. In that
13732 # case there's nothing to show here.
13735 for node in parent_nodes:
13736 parent_strs.append(str(getattr(node, "cpv", node)))
13739 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
13740 for parent_str in parent_strs:
13741 msg.append(" %s\n" % (parent_str,))
13743 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13745 def cmp_pkg_cpv(pkg1, pkg2):
13746 """Sort Package instances by cpv."""
13747 if pkg1.cpv > pkg2.cpv:
13749 elif pkg1.cpv == pkg2.cpv:
13754 def create_cleanlist():
13755 pkgs_to_remove = []
13757 if action == "depclean":
13760 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13763 arg_atom = args_set.findAtomForPackage(pkg)
13764 except portage.exception.InvalidDependString:
13765 # this error has already been displayed by now
13769 if pkg not in graph:
13770 pkgs_to_remove.append(pkg)
13771 elif "--verbose" in myopts:
13775 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13776 if pkg not in graph:
13777 pkgs_to_remove.append(pkg)
13778 elif "--verbose" in myopts:
13781 elif action == "prune":
13782 # Prune really uses all installed instead of world. It's not
13783 # a real reverse dependency so don't display it as such.
13784 graph.remove(set_args["world"])
13786 for atom in args_set:
13787 for pkg in vardb.match_pkgs(atom):
13788 if pkg not in graph:
13789 pkgs_to_remove.append(pkg)
13790 elif "--verbose" in myopts:
13793 if not pkgs_to_remove:
13795 ">>> No packages selected for removal by %s\n" % action)
13796 if "--verbose" not in myopts:
13798 ">>> To see reverse dependencies, use %s\n" % \
13800 if action == "prune":
13802 ">>> To ignore dependencies, use %s\n" % \
13805 return pkgs_to_remove
13807 cleanlist = create_cleanlist()
13810 clean_set = set(cleanlist)
13812 # Check if any of these package are the sole providers of libraries
13813 # with consumers that have not been selected for removal. If so, these
13814 # packages and any dependencies need to be added to the graph.
13815 real_vardb = trees[myroot]["vartree"].dbapi
13816 linkmap = real_vardb.linkmap
13817 liblist = linkmap.listLibraryObjects()
13818 consumer_cache = {}
13819 provider_cache = {}
13823 writemsg_level(">>> Checking for lib consumers...\n")
13825 for pkg in cleanlist:
13826 pkg_dblink = real_vardb._dblink(pkg.cpv)
13827 provided_libs = set()
13829 for lib in liblist:
13830 if pkg_dblink.isowner(lib, myroot):
13831 provided_libs.add(lib)
13833 if not provided_libs:
13837 for lib in provided_libs:
13838 lib_consumers = consumer_cache.get(lib)
13839 if lib_consumers is None:
13840 lib_consumers = linkmap.findConsumers(lib)
13841 consumer_cache[lib] = lib_consumers
13843 consumers[lib] = lib_consumers
13848 for lib, lib_consumers in consumers.items():
13849 for consumer_file in list(lib_consumers):
13850 if pkg_dblink.isowner(consumer_file, myroot):
13851 lib_consumers.remove(consumer_file)
13852 if not lib_consumers:
13858 for lib, lib_consumers in consumers.iteritems():
13860 soname = soname_cache.get(lib)
13862 soname = linkmap.getSoname(lib)
13863 soname_cache[lib] = soname
13865 consumer_providers = []
13866 for lib_consumer in lib_consumers:
13867 providers = provider_cache.get(lib)
13868 if providers is None:
13869 providers = linkmap.findProviders(lib_consumer)
13870 provider_cache[lib_consumer] = providers
13871 if soname not in providers:
13872 # Why does this happen?
13874 consumer_providers.append(
13875 (lib_consumer, providers[soname]))
13877 consumers[lib] = consumer_providers
13879 consumer_map[pkg] = consumers
13883 search_files = set()
13884 for consumers in consumer_map.itervalues():
13885 for lib, consumer_providers in consumers.iteritems():
13886 for lib_consumer, providers in consumer_providers:
13887 search_files.add(lib_consumer)
13888 search_files.update(providers)
13890 writemsg_level(">>> Assigning files to packages...\n")
13891 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13893 for pkg, consumers in consumer_map.items():
13894 for lib, consumer_providers in consumers.items():
13895 lib_consumers = set()
13897 for lib_consumer, providers in consumer_providers:
13898 owner_set = file_owners.get(lib_consumer)
13899 provider_dblinks = set()
13900 provider_pkgs = set()
13902 if len(providers) > 1:
13903 for provider in providers:
13904 provider_set = file_owners.get(provider)
13905 if provider_set is not None:
13906 provider_dblinks.update(provider_set)
13908 if len(provider_dblinks) > 1:
13909 for provider_dblink in provider_dblinks:
13910 pkg_key = ("installed", myroot,
13911 provider_dblink.mycpv, "nomerge")
13912 if pkg_key not in clean_set:
13913 provider_pkgs.add(vardb.get(pkg_key))
13918 if owner_set is not None:
13919 lib_consumers.update(owner_set)
13921 for consumer_dblink in list(lib_consumers):
13922 if ("installed", myroot, consumer_dblink.mycpv,
13923 "nomerge") in clean_set:
13924 lib_consumers.remove(consumer_dblink)
13928 consumers[lib] = lib_consumers
13932 del consumer_map[pkg]
13935 # TODO: Implement a package set for rebuilding consumer packages.
13937 msg = "In order to avoid breakage of link level " + \
13938 "dependencies, one or more packages will not be removed. " + \
13939 "This can be solved by rebuilding " + \
13940 "the packages that pulled them in."
13942 prefix = bad(" * ")
13943 from textwrap import wrap
13944 writemsg_level("".join(prefix + "%s\n" % line for \
13945 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13948 for pkg, consumers in consumer_map.iteritems():
13949 unique_consumers = set(chain(*consumers.values()))
13950 unique_consumers = sorted(consumer.mycpv \
13951 for consumer in unique_consumers)
13953 msg.append(" %s pulled in by:" % (pkg.cpv,))
13954 for consumer in unique_consumers:
13955 msg.append(" %s" % (consumer,))
13957 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13958 level=logging.WARNING, noiselevel=-1)
13960 # Add lib providers to the graph as children of lib consumers,
13961 # and also add any dependencies pulled in by the provider.
13962 writemsg_level(">>> Adding lib providers to graph...\n")
13964 for pkg, consumers in consumer_map.iteritems():
13965 for consumer_dblink in set(chain(*consumers.values())):
13966 consumer_pkg = vardb.get(("installed", myroot,
13967 consumer_dblink.mycpv, "nomerge"))
13968 if not resolver._add_pkg(pkg,
13969 Dependency(parent=consumer_pkg,
13970 priority=UnmergeDepPriority(runtime=True),
13972 resolver.display_problems()
13975 writemsg_level("\nCalculating dependencies ")
13976 success = resolver._complete_graph()
13977 writemsg_level("\b\b... done!\n")
13978 resolver.display_problems()
13981 if unresolved_deps():
13984 graph = resolver.digraph.copy()
13985 required_pkgs_total = 0
13987 if isinstance(node, Package):
13988 required_pkgs_total += 1
13989 cleanlist = create_cleanlist()
13992 clean_set = set(cleanlist)
13994 # Use a topological sort to create an unmerge order such that
13995 # each package is unmerged before it's dependencies. This is
13996 # necessary to avoid breaking things that may need to run
13997 # during pkg_prerm or pkg_postrm phases.
13999 # Create a new graph to account for dependencies between the
14000 # packages being unmerged.
14004 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14005 runtime = UnmergeDepPriority(runtime=True)
14006 runtime_post = UnmergeDepPriority(runtime_post=True)
14007 buildtime = UnmergeDepPriority(buildtime=True)
14009 "RDEPEND": runtime,
14010 "PDEPEND": runtime_post,
14011 "DEPEND": buildtime,
14014 for node in clean_set:
14015 graph.add(node, None)
14017 node_use = node.metadata["USE"].split()
14018 for dep_type in dep_keys:
14019 depstr = node.metadata[dep_type]
14023 portage.dep._dep_check_strict = False
14024 success, atoms = portage.dep_check(depstr, None, settings,
14025 myuse=node_use, trees=resolver._graph_trees,
14028 portage.dep._dep_check_strict = True
14030 # Ignore invalid deps of packages that will
14031 # be uninstalled anyway.
14034 priority = priority_map[dep_type]
14036 if not isinstance(atom, portage.dep.Atom):
14037 # Ignore invalid atoms returned from dep_check().
14041 matches = vardb.match_pkgs(atom)
14044 for child_node in matches:
14045 if child_node in clean_set:
14046 graph.add(child_node, node, priority=priority)
14049 if len(graph.order) == len(graph.root_nodes()):
14050 # If there are no dependencies between packages
14051 # let unmerge() group them by cat/pn.
14053 cleanlist = [pkg.cpv for pkg in graph.order]
14055 # Order nodes from lowest to highest overall reference count for
14056 # optimal root node selection.
14057 node_refcounts = {}
14058 for node in graph.order:
14059 node_refcounts[node] = len(graph.parent_nodes(node))
14060 def cmp_reference_count(node1, node2):
14061 return node_refcounts[node1] - node_refcounts[node2]
14062 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14064 ignore_priority_range = [None]
14065 ignore_priority_range.extend(
14066 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14067 while not graph.empty():
14068 for ignore_priority in ignore_priority_range:
14069 nodes = graph.root_nodes(ignore_priority=ignore_priority)
14073 raise AssertionError("no root nodes")
14074 if ignore_priority is not None:
14075 # Some deps have been dropped due to circular dependencies,
14076 # so only pop one node in order do minimize the number that
14081 cleanlist.append(node.cpv)
14083 unmerge(root_config, myopts, "unmerge", cleanlist,
14084 ldpath_mtimes, ordered=ordered)
14086 if action == "prune":
14089 if not cleanlist and "--quiet" in myopts:
14092 print "Packages installed: "+str(len(vardb.cpv_all()))
14093 print "Packages in world: " + \
14094 str(len(root_config.sets["world"].getAtoms()))
14095 print "Packages in system: " + \
14096 str(len(root_config.sets["system"].getAtoms()))
14097 print "Required packages: "+str(required_pkgs_total)
14098 if "--pretend" in myopts:
14099 print "Number to remove: "+str(len(cleanlist))
14101 print "Number removed: "+str(len(cleanlist))
14103 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14105 Construct a depgraph for the given resume list. This will raise
14106 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14108 @returns: (success, depgraph, dropped_tasks)
14111 skip_unsatisfied = True
14112 mergelist = mtimedb["resume"]["mergelist"]
14113 dropped_tasks = set()
14115 mydepgraph = depgraph(settings, trees,
14116 myopts, myparams, spinner)
14118 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14119 skip_masked=skip_masked)
14120 except depgraph.UnsatisfiedResumeDep, e:
14121 if not skip_unsatisfied:
14124 graph = mydepgraph.digraph
14125 unsatisfied_parents = dict((dep.parent, dep.parent) \
14126 for dep in e.value)
14127 traversed_nodes = set()
14128 unsatisfied_stack = list(unsatisfied_parents)
14129 while unsatisfied_stack:
14130 pkg = unsatisfied_stack.pop()
14131 if pkg in traversed_nodes:
14133 traversed_nodes.add(pkg)
14135 # If this package was pulled in by a parent
14136 # package scheduled for merge, removing this
14137 # package may cause the the parent package's
14138 # dependency to become unsatisfied.
14139 for parent_node in graph.parent_nodes(pkg):
14140 if not isinstance(parent_node, Package) \
14141 or parent_node.operation not in ("merge", "nomerge"):
14144 graph.child_nodes(parent_node,
14145 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14146 if pkg in unsatisfied:
14147 unsatisfied_parents[parent_node] = parent_node
14148 unsatisfied_stack.append(parent_node)
14150 pruned_mergelist = []
14151 for x in mergelist:
14152 if isinstance(x, list) and \
14153 tuple(x) not in unsatisfied_parents:
14154 pruned_mergelist.append(x)
14156 # If the mergelist doesn't shrink then this loop is infinite.
14157 if len(pruned_mergelist) == len(mergelist):
14158 # This happens if a package can't be dropped because
14159 # it's already installed, but it has unsatisfied PDEPEND.
14161 mergelist[:] = pruned_mergelist
14163 # Exclude installed packages that have been removed from the graph due
14164 # to failure to build/install runtime dependencies after the dependent
14165 # package has already been installed.
14166 dropped_tasks.update(pkg for pkg in \
14167 unsatisfied_parents if pkg.operation != "nomerge")
14168 mydepgraph.break_refs(unsatisfied_parents)
14170 del e, graph, traversed_nodes, \
14171 unsatisfied_parents, unsatisfied_stack
14175 return (success, mydepgraph, dropped_tasks)
14177 def action_build(settings, trees, mtimedb,
14178 myopts, myaction, myfiles, spinner):
14180 # validate the state of the resume data
14181 # so that we can make assumptions later.
14182 for k in ("resume", "resume_backup"):
14183 if k not in mtimedb:
14185 resume_data = mtimedb[k]
14186 if not isinstance(resume_data, dict):
14189 mergelist = resume_data.get("mergelist")
14190 if not isinstance(mergelist, list):
14193 for x in mergelist:
14194 if not (isinstance(x, list) and len(x) == 4):
14196 pkg_type, pkg_root, pkg_key, pkg_action = x
14197 if pkg_root not in trees:
14198 # Current $ROOT setting differs,
14199 # so the list must be stale.
14205 resume_opts = resume_data.get("myopts")
14206 if not isinstance(resume_opts, (dict, list)):
14209 favorites = resume_data.get("favorites")
14210 if not isinstance(favorites, list):
14215 if "--resume" in myopts and \
14216 ("resume" in mtimedb or
14217 "resume_backup" in mtimedb):
14219 if "resume" not in mtimedb:
14220 mtimedb["resume"] = mtimedb["resume_backup"]
14221 del mtimedb["resume_backup"]
14223 # "myopts" is a list for backward compatibility.
14224 resume_opts = mtimedb["resume"].get("myopts", [])
14225 if isinstance(resume_opts, list):
14226 resume_opts = dict((k,True) for k in resume_opts)
14227 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14228 resume_opts.pop(opt, None)
14229 myopts.update(resume_opts)
14231 if "--debug" in myopts:
14232 writemsg_level("myopts %s\n" % (myopts,))
14234 # Adjust config according to options of the command being resumed.
14235 for myroot in trees:
14236 mysettings = trees[myroot]["vartree"].settings
14237 mysettings.unlock()
14238 adjust_config(myopts, mysettings)
14240 del myroot, mysettings
14242 ldpath_mtimes = mtimedb["ldpath"]
14245 buildpkgonly = "--buildpkgonly" in myopts
14246 pretend = "--pretend" in myopts
14247 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14248 ask = "--ask" in myopts
14249 nodeps = "--nodeps" in myopts
14250 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14251 tree = "--tree" in myopts
14252 if nodeps and tree:
14254 del myopts["--tree"]
14255 portage.writemsg(colorize("WARN", " * ") + \
14256 "--tree is broken with --nodeps. Disabling...\n")
14257 debug = "--debug" in myopts
14258 verbose = "--verbose" in myopts
14259 quiet = "--quiet" in myopts
14260 if pretend or fetchonly:
14261 # make the mtimedb readonly
14262 mtimedb.filename = None
14263 if "--digest" in myopts:
14264 msg = "The --digest option can prevent corruption from being" + \
14265 " noticed. The `repoman manifest` command is the preferred" + \
14266 " way to generate manifests and it is capable of doing an" + \
14267 " entire repository or category at once."
14268 prefix = bad(" * ")
14269 writemsg(prefix + "\n")
14270 from textwrap import wrap
14271 for line in wrap(msg, 72):
14272 writemsg("%s%s\n" % (prefix, line))
14273 writemsg(prefix + "\n")
14275 if "--quiet" not in myopts and \
14276 ("--pretend" in myopts or "--ask" in myopts or \
14277 "--tree" in myopts or "--verbose" in myopts):
14279 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14281 elif "--buildpkgonly" in myopts:
14285 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14287 print darkgreen("These are the packages that would be %s, in reverse order:") % action
14291 print darkgreen("These are the packages that would be %s, in order:") % action
14294 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14295 if not show_spinner:
14296 spinner.update = spinner.update_quiet
14299 favorites = mtimedb["resume"].get("favorites")
14300 if not isinstance(favorites, list):
14304 print "Calculating dependencies ",
14305 myparams = create_depgraph_params(myopts, myaction)
14307 resume_data = mtimedb["resume"]
14308 mergelist = resume_data["mergelist"]
14309 if mergelist and "--skipfirst" in myopts:
14310 for i, task in enumerate(mergelist):
14311 if isinstance(task, list) and \
14312 task and task[-1] == "merge":
14319 success, mydepgraph, dropped_tasks = resume_depgraph(
14320 settings, trees, mtimedb, myopts, myparams, spinner)
14321 except (portage.exception.PackageNotFound,
14322 depgraph.UnsatisfiedResumeDep), e:
14323 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14324 mydepgraph = e.depgraph
14327 from textwrap import wrap
14328 from portage.output import EOutput
14331 resume_data = mtimedb["resume"]
14332 mergelist = resume_data.get("mergelist")
14333 if not isinstance(mergelist, list):
14335 if mergelist and debug or (verbose and not quiet):
14336 out.eerror("Invalid resume list:")
14339 for task in mergelist:
14340 if isinstance(task, list):
14341 out.eerror(indent + str(tuple(task)))
14344 if isinstance(e, depgraph.UnsatisfiedResumeDep):
14345 out.eerror("One or more packages are either masked or " + \
14346 "have missing dependencies:")
14349 for dep in e.value:
14350 if dep.atom is None:
14351 out.eerror(indent + "Masked package:")
14352 out.eerror(2 * indent + str(dep.parent))
14355 out.eerror(indent + str(dep.atom) + " pulled in by:")
14356 out.eerror(2 * indent + str(dep.parent))
14358 msg = "The resume list contains packages " + \
14359 "that are either masked or have " + \
14360 "unsatisfied dependencies. " + \
14361 "Please restart/continue " + \
14362 "the operation manually, or use --skipfirst " + \
14363 "to skip the first package in the list and " + \
14364 "any other packages that may be " + \
14365 "masked or have missing dependencies."
14366 for line in wrap(msg, 72):
14368 elif isinstance(e, portage.exception.PackageNotFound):
14369 out.eerror("An expected package is " + \
14370 "not available: %s" % str(e))
14372 msg = "The resume list contains one or more " + \
14373 "packages that are no longer " + \
14374 "available. Please restart/continue " + \
14375 "the operation manually."
14376 for line in wrap(msg, 72):
14380 print "\b\b... done!"
14384 portage.writemsg("!!! One or more packages have been " + \
14385 "dropped due to\n" + \
14386 "!!! masking or unsatisfied dependencies:\n\n",
14388 for task in dropped_tasks:
14389 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
14390 portage.writemsg("\n", noiselevel=-1)
14393 if mydepgraph is not None:
14394 mydepgraph.display_problems()
14395 if not (ask or pretend):
14396 # delete the current list and also the backup
14397 # since it's probably stale too.
14398 for k in ("resume", "resume_backup"):
14399 mtimedb.pop(k, None)
14404 if ("--resume" in myopts):
14405 print darkgreen("emerge: It seems we have nothing to resume...")
14408 myparams = create_depgraph_params(myopts, myaction)
14409 if "--quiet" not in myopts and "--nodeps" not in myopts:
14410 print "Calculating dependencies ",
14412 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14414 retval, favorites = mydepgraph.select_files(myfiles)
14415 except portage.exception.PackageNotFound, e:
14416 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14418 except portage.exception.PackageSetNotFound, e:
14419 root_config = trees[settings["ROOT"]]["root_config"]
14420 display_missing_pkg_set(root_config, e.value)
14423 print "\b\b... done!"
14425 mydepgraph.display_problems()
14428 if "--pretend" not in myopts and \
14429 ("--ask" in myopts or "--tree" in myopts or \
14430 "--verbose" in myopts) and \
14431 not ("--quiet" in myopts and "--ask" not in myopts):
14432 if "--resume" in myopts:
14433 mymergelist = mydepgraph.altlist()
14434 if len(mymergelist) == 0:
14435 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14437 favorites = mtimedb["resume"]["favorites"]
14438 retval = mydepgraph.display(
14439 mydepgraph.altlist(reversed=tree),
14440 favorites=favorites)
14441 mydepgraph.display_problems()
14442 if retval != os.EX_OK:
14444 prompt="Would you like to resume merging these packages?"
14446 retval = mydepgraph.display(
14447 mydepgraph.altlist(reversed=("--tree" in myopts)),
14448 favorites=favorites)
14449 mydepgraph.display_problems()
14450 if retval != os.EX_OK:
14453 for x in mydepgraph.altlist():
14454 if isinstance(x, Package) and x.operation == "merge":
14458 sets = trees[settings["ROOT"]]["root_config"].sets
14459 world_candidates = None
14460 if "--noreplace" in myopts and \
14461 not oneshot and favorites:
14462 # Sets that are not world candidates are filtered
14463 # out here since the favorites list needs to be
14464 # complete for depgraph.loadResumeCommand() to
14465 # operate correctly.
14466 world_candidates = [x for x in favorites \
14467 if not (x.startswith(SETPREFIX) and \
14468 not sets[x[1:]].world_candidate)]
14469 if "--noreplace" in myopts and \
14470 not oneshot and world_candidates:
14472 for x in world_candidates:
14473 print " %s %s" % (good("*"), x)
14474 prompt="Would you like to add these packages to your world favorites?"
14475 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14476 prompt="Nothing to merge; would you like to auto-clean packages?"
14479 print "Nothing to merge; quitting."
14482 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14483 prompt="Would you like to fetch the source files for these packages?"
14485 prompt="Would you like to merge these packages?"
14487 if "--ask" in myopts and userquery(prompt) == "No":
14492 # Don't ask again (e.g. when auto-cleaning packages after merge)
14493 myopts.pop("--ask", None)
14495 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14496 if ("--resume" in myopts):
14497 mymergelist = mydepgraph.altlist()
14498 if len(mymergelist) == 0:
14499 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14501 favorites = mtimedb["resume"]["favorites"]
14502 retval = mydepgraph.display(
14503 mydepgraph.altlist(reversed=tree),
14504 favorites=favorites)
14505 mydepgraph.display_problems()
14506 if retval != os.EX_OK:
14509 retval = mydepgraph.display(
14510 mydepgraph.altlist(reversed=("--tree" in myopts)),
14511 favorites=favorites)
14512 mydepgraph.display_problems()
14513 if retval != os.EX_OK:
14515 if "--buildpkgonly" in myopts:
14516 graph_copy = mydepgraph.digraph.clone()
14517 removed_nodes = set()
14518 for node in graph_copy:
14519 if not isinstance(node, Package) or \
14520 node.operation == "nomerge":
14521 removed_nodes.add(node)
14522 graph_copy.difference_update(removed_nodes)
14523 if not graph_copy.hasallzeros(ignore_priority = \
14524 DepPrioritySatisfiedRange.ignore_medium):
14525 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14526 print "!!! You have to merge the dependencies before you can build this package.\n"
14529 if "--buildpkgonly" in myopts:
14530 graph_copy = mydepgraph.digraph.clone()
14531 removed_nodes = set()
14532 for node in graph_copy:
14533 if not isinstance(node, Package) or \
14534 node.operation == "nomerge":
14535 removed_nodes.add(node)
14536 graph_copy.difference_update(removed_nodes)
14537 if not graph_copy.hasallzeros(ignore_priority = \
14538 DepPrioritySatisfiedRange.ignore_medium):
14539 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14540 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14543 if ("--resume" in myopts):
14544 favorites=mtimedb["resume"]["favorites"]
14545 mymergelist = mydepgraph.altlist()
14546 mydepgraph.break_refs(mymergelist)
14547 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14548 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14549 del mydepgraph, mymergelist
14550 clear_caches(trees)
14552 retval = mergetask.merge()
14553 merge_count = mergetask.curval
14555 if "resume" in mtimedb and \
14556 "mergelist" in mtimedb["resume"] and \
14557 len(mtimedb["resume"]["mergelist"]) > 1:
14558 mtimedb["resume_backup"] = mtimedb["resume"]
14559 del mtimedb["resume"]
14561 mtimedb["resume"]={}
14562 # Stored as a dict starting with portage-2.1.6_rc1, and supported
14563 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14564 # a list type for options.
14565 mtimedb["resume"]["myopts"] = myopts.copy()
14567 # Convert Atom instances to plain str.
14568 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14570 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14571 for pkgline in mydepgraph.altlist():
14572 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14573 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14574 tmpsettings = portage.config(clone=settings)
14576 if settings.get("PORTAGE_DEBUG", "") == "1":
14578 retval = portage.doebuild(
14579 y, "digest", settings["ROOT"], tmpsettings, edebug,
14580 ("--pretend" in myopts),
14581 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14584 pkglist = mydepgraph.altlist()
14585 mydepgraph.saveNomergeFavorites()
14586 mydepgraph.break_refs(pkglist)
14587 mergetask = Scheduler(settings, trees, mtimedb, myopts,
14588 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14589 del mydepgraph, pkglist
14590 clear_caches(trees)
14592 retval = mergetask.merge()
14593 merge_count = mergetask.curval
14595 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14596 if "yes" == settings.get("AUTOCLEAN"):
14597 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14598 unmerge(trees[settings["ROOT"]]["root_config"],
14599 myopts, "clean", [],
14600 ldpath_mtimes, autoclean=1)
14602 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14603 + " AUTOCLEAN is disabled. This can cause serious"
14604 + " problems due to overlapping packages.\n")
14605 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14609 def multiple_actions(action1, action2):
14610 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14611 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14614 def insert_optional_args(args):
14616 Parse optional arguments and insert a value if one has
14617 not been provided. This is done before feeding the args
14618 to the optparse parser since that parser does not support
14619 this feature natively.
14623 jobs_opts = ("-j", "--jobs")
14624 arg_stack = args[:]
14625 arg_stack.reverse()
14627 arg = arg_stack.pop()
14629 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14630 if not (short_job_opt or arg in jobs_opts):
14631 new_args.append(arg)
14634 # Insert an empty placeholder in order to
14635 # satisfy the requirements of optparse.
14637 new_args.append("--jobs")
14640 if short_job_opt and len(arg) > 2:
14641 if arg[:2] == "-j":
14643 job_count = int(arg[2:])
14645 saved_opts = arg[2:]
14648 saved_opts = arg[1:].replace("j", "")
14650 if job_count is None and arg_stack:
14652 job_count = int(arg_stack[-1])
14656 # Discard the job count from the stack
14657 # since we're consuming it here.
14660 if job_count is None:
14661 # unlimited number of jobs
14662 new_args.append("True")
14664 new_args.append(str(job_count))
14666 if saved_opts is not None:
14667 new_args.append("-" + saved_opts)
14671 def parse_opts(tmpcmdline, silent=False):
14676 global actions, options, shortmapping
14678 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14679 argument_options = {
14681 "help":"specify the location for portage configuration files",
14685 "help":"enable or disable color output",
14687 "choices":("y", "n")
14692 "help" : "Specifies the number of packages to build " + \
14698 "--load-average": {
14700 "help" :"Specifies that no new builds should be started " + \
14701 "if there are other builds running and the load average " + \
14702 "is at least LOAD (a floating-point number).",
14708 "help":"include unnecessary build time dependencies",
14710 "choices":("y", "n")
14713 "help":"specify conditions to trigger package reinstallation",
14715 "choices":["changed-use"]
14719 from optparse import OptionParser
14720 parser = OptionParser()
14721 if parser.has_option("--help"):
14722 parser.remove_option("--help")
14724 for action_opt in actions:
14725 parser.add_option("--" + action_opt, action="store_true",
14726 dest=action_opt.replace("-", "_"), default=False)
14727 for myopt in options:
14728 parser.add_option(myopt, action="store_true",
14729 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14730 for shortopt, longopt in shortmapping.iteritems():
14731 parser.add_option("-" + shortopt, action="store_true",
14732 dest=longopt.lstrip("--").replace("-", "_"), default=False)
14733 for myalias, myopt in longopt_aliases.iteritems():
14734 parser.add_option(myalias, action="store_true",
14735 dest=myopt.lstrip("--").replace("-", "_"), default=False)
14737 for myopt, kwargs in argument_options.iteritems():
14738 parser.add_option(myopt,
14739 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14741 tmpcmdline = insert_optional_args(tmpcmdline)
14743 myoptions, myargs = parser.parse_args(args=tmpcmdline)
14747 if myoptions.jobs == "True":
14751 jobs = int(myoptions.jobs)
14755 if jobs is not True and \
14759 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14760 (myoptions.jobs,), noiselevel=-1)
14762 myoptions.jobs = jobs
14764 if myoptions.load_average:
14766 load_average = float(myoptions.load_average)
14770 if load_average <= 0.0:
14771 load_average = None
14773 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14774 (myoptions.load_average,), noiselevel=-1)
14776 myoptions.load_average = load_average
14778 for myopt in options:
14779 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14781 myopts[myopt] = True
14783 for myopt in argument_options:
14784 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14788 for action_opt in actions:
14789 v = getattr(myoptions, action_opt.replace("-", "_"))
14792 multiple_actions(myaction, action_opt)
14794 myaction = action_opt
14798 return myaction, myopts, myfiles
14800 def validate_ebuild_environment(trees):
14801 for myroot in trees:
14802 settings = trees[myroot]["vartree"].settings
14803 settings.validate()
14805 def clear_caches(trees):
14806 for d in trees.itervalues():
14807 d["porttree"].dbapi.melt()
14808 d["porttree"].dbapi._aux_cache.clear()
14809 d["bintree"].dbapi._aux_cache.clear()
14810 d["bintree"].dbapi._clear_cache()
14811 d["vartree"].dbapi.linkmap._clear_cache()
14812 portage.dircache.clear()
14815 def load_emerge_config(trees=None):
14817 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14818 v = os.environ.get(envvar, None)
14819 if v and v.strip():
14821 trees = portage.create_trees(trees=trees, **kwargs)
14823 for root, root_trees in trees.iteritems():
14824 settings = root_trees["vartree"].settings
14825 setconfig = load_default_config(settings, root_trees)
14826 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14828 settings = trees["/"]["vartree"].settings
14830 for myroot in trees:
14832 settings = trees[myroot]["vartree"].settings
14835 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14836 mtimedb = portage.MtimeDB(mtimedbfile)
14838 return settings, trees, mtimedb
14840 def adjust_config(myopts, settings):
14841 """Make emerge specific adjustments to the config."""
14843 # To enhance usability, make some vars case insensitive by forcing them to
14845 for myvar in ("AUTOCLEAN", "NOCOLOR"):
14846 if myvar in settings:
14847 settings[myvar] = settings[myvar].lower()
14848 settings.backup_changes(myvar)
14851 # Kill noauto as it will break merges otherwise.
14852 if "noauto" in settings.features:
14853 while "noauto" in settings.features:
14854 settings.features.remove("noauto")
14855 settings["FEATURES"] = " ".join(settings.features)
14856 settings.backup_changes("FEATURES")
14860 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14861 except ValueError, e:
14862 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14863 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14864 settings["CLEAN_DELAY"], noiselevel=-1)
14865 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14866 settings.backup_changes("CLEAN_DELAY")
14868 EMERGE_WARNING_DELAY = 10
14870 EMERGE_WARNING_DELAY = int(settings.get(
14871 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14872 except ValueError, e:
14873 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14874 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14875 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14876 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14877 settings.backup_changes("EMERGE_WARNING_DELAY")
14879 if "--quiet" in myopts:
14880 settings["PORTAGE_QUIET"]="1"
14881 settings.backup_changes("PORTAGE_QUIET")
14883 if "--verbose" in myopts:
14884 settings["PORTAGE_VERBOSE"] = "1"
14885 settings.backup_changes("PORTAGE_VERBOSE")
14887 # Set so that configs will be merged regardless of remembered status
14888 if ("--noconfmem" in myopts):
14889 settings["NOCONFMEM"]="1"
14890 settings.backup_changes("NOCONFMEM")
14892 # Set various debug markers... They should be merged somehow.
14895 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14896 if PORTAGE_DEBUG not in (0, 1):
14897 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14898 PORTAGE_DEBUG, noiselevel=-1)
14899 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14902 except ValueError, e:
14903 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14904 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14905 settings["PORTAGE_DEBUG"], noiselevel=-1)
14907 if "--debug" in myopts:
14909 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14910 settings.backup_changes("PORTAGE_DEBUG")
14912 if settings.get("NOCOLOR") not in ("yes","true"):
14913 portage.output.havecolor = 1
14915 """The explicit --color < y | n > option overrides the NOCOLOR environment
14916 variable and stdout auto-detection."""
14917 if "--color" in myopts:
14918 if "y" == myopts["--color"]:
14919 portage.output.havecolor = 1
14920 settings["NOCOLOR"] = "false"
14922 portage.output.havecolor = 0
14923 settings["NOCOLOR"] = "true"
14924 settings.backup_changes("NOCOLOR")
14925 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14926 portage.output.havecolor = 0
14927 settings["NOCOLOR"] = "true"
14928 settings.backup_changes("NOCOLOR")
14930 def apply_priorities(settings):
14934 def nice(settings):
14936 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14937 except (OSError, ValueError), e:
14938 out = portage.output.EOutput()
14939 out.eerror("Failed to change nice value to '%s'" % \
14940 settings["PORTAGE_NICENESS"])
14941 out.eerror("%s\n" % str(e))
14943 def ionice(settings):
14945 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14947 ionice_cmd = shlex.split(ionice_cmd)
14951 from portage.util import varexpand
14952 variables = {"PID" : str(os.getpid())}
14953 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14956 rval = portage.process.spawn(cmd, env=os.environ)
14957 except portage.exception.CommandNotFound:
14958 # The OS kernel probably doesn't support ionice,
14959 # so return silently.
14962 if rval != os.EX_OK:
14963 out = portage.output.EOutput()
14964 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14965 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14967 def display_missing_pkg_set(root_config, set_name):
14970 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14971 "The following sets exist:") % \
14972 colorize("INFORM", set_name))
14975 for s in sorted(root_config.sets):
14976 msg.append(" %s" % s)
14979 writemsg_level("".join("%s\n" % l for l in msg),
14980 level=logging.ERROR, noiselevel=-1)
14982 def expand_set_arguments(myfiles, myaction, root_config):
14984 setconfig = root_config.setconfig
14986 sets = setconfig.getSets()
14988 # In order to know exactly which atoms/sets should be added to the
14989 # world file, the depgraph performs set expansion later. It will get
14990 # confused about where the atoms came from if it's not allowed to
14991 # expand them itself.
14992 do_not_expand = (None, )
14995 if a in ("system", "world"):
14996 newargs.append(SETPREFIX+a)
15003 # separators for set arguments
15007 # WARNING: all operators must be of equal length
15009 DIFF_OPERATOR = "-@"
15010 UNION_OPERATOR = "+@"
15012 for i in range(0, len(myfiles)):
15013 if myfiles[i].startswith(SETPREFIX):
15016 x = myfiles[i][len(SETPREFIX):]
15019 start = x.find(ARG_START)
15020 end = x.find(ARG_END)
15021 if start > 0 and start < end:
15022 namepart = x[:start]
15023 argpart = x[start+1:end]
15025 # TODO: implement proper quoting
15026 args = argpart.split(",")
15030 k, v = a.split("=", 1)
15033 options[a] = "True"
15034 setconfig.update(namepart, options)
15035 newset += (x[:start-len(namepart)]+namepart)
15036 x = x[end+len(ARG_END):]
15040 myfiles[i] = SETPREFIX+newset
15042 sets = setconfig.getSets()
15044 # display errors that occured while loading the SetConfig instance
15045 for e in setconfig.errors:
15046 print colorize("BAD", "Error during set creation: %s" % e)
15048 # emerge relies on the existance of sets with names "world" and "system"
15049 required_sets = ("world", "system")
15052 for s in required_sets:
15054 missing_sets.append(s)
15056 if len(missing_sets) > 2:
15057 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15058 missing_sets_str += ', and "%s"' % missing_sets[-1]
15059 elif len(missing_sets) == 2:
15060 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15062 missing_sets_str = '"%s"' % missing_sets[-1]
15063 msg = ["emerge: incomplete set configuration, " + \
15064 "missing set(s): %s" % missing_sets_str]
15066 msg.append(" sets defined: %s" % ", ".join(sets))
15067 msg.append(" This usually means that '%s'" % \
15068 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15069 msg.append(" is missing or corrupt.")
15071 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15073 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15076 if a.startswith(SETPREFIX):
15077 # support simple set operations (intersection, difference and union)
15078 # on the commandline. Expressions are evaluated strictly left-to-right
15079 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15080 expression = a[len(SETPREFIX):]
15083 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15084 is_pos = expression.rfind(IS_OPERATOR)
15085 diff_pos = expression.rfind(DIFF_OPERATOR)
15086 union_pos = expression.rfind(UNION_OPERATOR)
15087 op_pos = max(is_pos, diff_pos, union_pos)
15088 s1 = expression[:op_pos]
15089 s2 = expression[op_pos+len(IS_OPERATOR):]
15090 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15092 display_missing_pkg_set(root_config, s2)
15094 expr_sets.insert(0, s2)
15095 expr_ops.insert(0, op)
15097 if not expression in sets:
15098 display_missing_pkg_set(root_config, expression)
15100 expr_sets.insert(0, expression)
15101 result = set(setconfig.getSetAtoms(expression))
15102 for i in range(0, len(expr_ops)):
15103 s2 = setconfig.getSetAtoms(expr_sets[i+1])
15104 if expr_ops[i] == IS_OPERATOR:
15105 result.intersection_update(s2)
15106 elif expr_ops[i] == DIFF_OPERATOR:
15107 result.difference_update(s2)
15108 elif expr_ops[i] == UNION_OPERATOR:
15111 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15112 newargs.extend(result)
15114 s = a[len(SETPREFIX):]
15116 display_missing_pkg_set(root_config, s)
15118 setconfig.active.append(s)
15120 set_atoms = setconfig.getSetAtoms(s)
15121 except portage.exception.PackageSetNotFound, e:
15122 writemsg_level(("emerge: the given set '%s' " + \
15123 "contains a non-existent set named '%s'.\n") % \
15124 (s, e), level=logging.ERROR, noiselevel=-1)
15126 if myaction in unmerge_actions and \
15127 not sets[s].supportsOperation("unmerge"):
15128 sys.stderr.write("emerge: the given set '%s' does " % s + \
15129 "not support unmerge operations\n")
15131 elif not set_atoms:
15132 print "emerge: '%s' is an empty set" % s
15133 elif myaction not in do_not_expand:
15134 newargs.extend(set_atoms)
15136 newargs.append(SETPREFIX+s)
15137 for e in sets[s].errors:
15141 return (newargs, retval)
15143 def repo_name_check(trees):
15144 missing_repo_names = set()
15145 for root, root_trees in trees.iteritems():
15146 if "porttree" in root_trees:
15147 portdb = root_trees["porttree"].dbapi
15148 missing_repo_names.update(portdb.porttrees)
15149 repos = portdb.getRepositories()
15151 missing_repo_names.discard(portdb.getRepositoryPath(r))
15152 if portdb.porttree_root in missing_repo_names and \
15153 not os.path.exists(os.path.join(
15154 portdb.porttree_root, "profiles")):
15155 # This is normal if $PORTDIR happens to be empty,
15156 # so don't warn about it.
15157 missing_repo_names.remove(portdb.porttree_root)
15159 if missing_repo_names:
15161 msg.append("WARNING: One or more repositories " + \
15162 "have missing repo_name entries:")
15164 for p in missing_repo_names:
15165 msg.append("\t%s/profiles/repo_name" % (p,))
15167 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15168 "should be a plain text file containing a unique " + \
15169 "name for the repository on the first line.", 70))
15170 writemsg_level("".join("%s\n" % l for l in msg),
15171 level=logging.WARNING, noiselevel=-1)
15173 return bool(missing_repo_names)
15175 def config_protect_check(trees):
15176 for root, root_trees in trees.iteritems():
15177 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15178 msg = "!!! CONFIG_PROTECT is empty"
15180 msg += " for '%s'" % root
15181 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15183 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15185 if "--quiet" in myopts:
15186 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15187 print "!!! one of the following fully-qualified ebuild names instead:\n"
15188 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15189 print " " + colorize("INFORM", cp)
15192 s = search(root_config, spinner, "--searchdesc" in myopts,
15193 "--quiet" not in myopts, "--usepkg" in myopts,
15194 "--usepkgonly" in myopts)
15195 null_cp = portage.dep_getkey(insert_category_into_atom(
15197 cat, atom_pn = portage.catsplit(null_cp)
15198 s.searchkey = atom_pn
15199 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15202 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15203 print "!!! one of the above fully-qualified ebuild names instead.\n"
15205 def profile_check(trees, myaction, myopts):
15206 if myaction in ("info", "sync"):
15208 elif "--version" in myopts or "--help" in myopts:
15210 for root, root_trees in trees.iteritems():
15211 if root_trees["root_config"].settings.profiles:
15213 # generate some profile related warning messages
15214 validate_ebuild_environment(trees)
15215 msg = "If you have just changed your profile configuration, you " + \
15216 "should revert back to the previous configuration. Due to " + \
15217 "your current profile being invalid, allowed actions are " + \
15218 "limited to --help, --info, --sync, and --version."
15219 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15220 level=logging.ERROR, noiselevel=-1)
15225 global portage # NFC why this is necessary now - genone
15226 portage._disable_legacy_globals()
15227 # Disable color until we're sure that it should be enabled (after
15228 # EMERGE_DEFAULT_OPTS has been parsed).
15229 portage.output.havecolor = 0
15230 # This first pass is just for options that need to be known as early as
15231 # possible, such as --config-root. They will be parsed again later,
15232 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15233 # the value of --config-root).
15234 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15235 if "--debug" in myopts:
15236 os.environ["PORTAGE_DEBUG"] = "1"
15237 if "--config-root" in myopts:
15238 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15240 # Portage needs to ensure a sane umask for the files it creates.
15242 settings, trees, mtimedb = load_emerge_config()
15243 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15244 rval = profile_check(trees, myaction, myopts)
15245 if rval != os.EX_OK:
15248 if portage._global_updates(trees, mtimedb["updates"]):
15250 # Reload the whole config from scratch.
15251 settings, trees, mtimedb = load_emerge_config(trees=trees)
15252 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15254 xterm_titles = "notitles" not in settings.features
15257 if "--ignore-default-opts" not in myopts:
15258 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15259 tmpcmdline.extend(sys.argv[1:])
15260 myaction, myopts, myfiles = parse_opts(tmpcmdline)
15262 if "--digest" in myopts:
15263 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15264 # Reload the whole config from scratch so that the portdbapi internal
15265 # config is updated with new FEATURES.
15266 settings, trees, mtimedb = load_emerge_config(trees=trees)
15267 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15269 for myroot in trees:
15270 mysettings = trees[myroot]["vartree"].settings
15271 mysettings.unlock()
15272 adjust_config(myopts, mysettings)
15273 if "--pretend" not in myopts:
15274 mysettings["PORTAGE_COUNTER_HASH"] = \
15275 trees[myroot]["vartree"].dbapi._counter_hash()
15276 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15278 del myroot, mysettings
15280 apply_priorities(settings)
15282 spinner = stdout_spinner()
15283 if "candy" in settings.features:
15284 spinner.update = spinner.update_scroll
15286 if "--quiet" not in myopts:
15287 portage.deprecated_profile_check(settings=settings)
15288 repo_name_check(trees)
15289 config_protect_check(trees)
15291 eclasses_overridden = {}
15292 for mytrees in trees.itervalues():
15293 mydb = mytrees["porttree"].dbapi
15294 # Freeze the portdbapi for performance (memoize all xmatch results).
15296 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
15299 if eclasses_overridden and \
15300 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
15301 prefix = bad(" * ")
15302 if len(eclasses_overridden) == 1:
15303 writemsg(prefix + "Overlay eclass overrides " + \
15304 "eclass from PORTDIR:\n", noiselevel=-1)
15306 writemsg(prefix + "Overlay eclasses override " + \
15307 "eclasses from PORTDIR:\n", noiselevel=-1)
15308 writemsg(prefix + "\n", noiselevel=-1)
15309 for eclass_name in sorted(eclasses_overridden):
15310 writemsg(prefix + " '%s/%s.eclass'\n" % \
15311 (eclasses_overridden[eclass_name], eclass_name),
15313 writemsg(prefix + "\n", noiselevel=-1)
15314 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
15315 "because it will trigger invalidation of cached ebuild metadata " + \
15316 "that is distributed with the portage tree. If you must " + \
15317 "override eclasses from PORTDIR then you are advised to add " + \
15318 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
15319 "`emerge --regen` after each time that you run `emerge --sync`. " + \
15320 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
15321 "you would like to disable this warning."
15322 from textwrap import wrap
15323 for line in wrap(msg, 72):
15324 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
15326 if "moo" in myfiles:
15329 Larry loves Gentoo (""" + platform.system() + """)
15331 _______________________
15332 < Have you mooed today? >
15333 -----------------------
15343 ext = os.path.splitext(x)[1]
15344 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15345 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15348 root_config = trees[settings["ROOT"]]["root_config"]
15349 if myaction == "list-sets":
15350 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15354 # only expand sets for actions taking package arguments
15355 oldargs = myfiles[:]
15356 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15357 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15358 if retval != os.EX_OK:
15361 # Need to handle empty sets specially, otherwise emerge will react
15362 # with the help message for empty argument lists
15363 if oldargs and not myfiles:
15364 print "emerge: no targets left after set expansion"
15367 if ("--tree" in myopts) and ("--columns" in myopts):
15368 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15371 if ("--quiet" in myopts):
15372 spinner.update = spinner.update_quiet
15373 portage.util.noiselimit = -1
15375 # Always create packages if FEATURES=buildpkg
15376 # Imply --buildpkg if --buildpkgonly
15377 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15378 if "--buildpkg" not in myopts:
15379 myopts["--buildpkg"] = True
15381 # Also allow -S to invoke search action (-sS)
15382 if ("--searchdesc" in myopts):
15383 if myaction and myaction != "search":
15384 myfiles.append(myaction)
15385 if "--search" not in myopts:
15386 myopts["--search"] = True
15387 myaction = "search"
15389 # Always try and fetch binary packages if FEATURES=getbinpkg
15390 if ("getbinpkg" in settings.features):
15391 myopts["--getbinpkg"] = True
15393 if "--buildpkgonly" in myopts:
15394 # --buildpkgonly will not merge anything, so
15395 # it cancels all binary package options.
15396 for opt in ("--getbinpkg", "--getbinpkgonly",
15397 "--usepkg", "--usepkgonly"):
15398 myopts.pop(opt, None)
15400 if "--fetch-all-uri" in myopts:
15401 myopts["--fetchonly"] = True
15403 if "--skipfirst" in myopts and "--resume" not in myopts:
15404 myopts["--resume"] = True
15406 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15407 myopts["--usepkgonly"] = True
15409 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15410 myopts["--getbinpkg"] = True
15412 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15413 myopts["--usepkg"] = True
15415 # Also allow -K to apply --usepkg/-k
15416 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15417 myopts["--usepkg"] = True
15419 # Allow -p to remove --ask
15420 if ("--pretend" in myopts) and ("--ask" in myopts):
15421 print ">>> --pretend disables --ask... removing --ask from options."
15422 del myopts["--ask"]
15424 # forbid --ask when not in a terminal
15425 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15426 if ("--ask" in myopts) and (not sys.stdin.isatty()):
15427 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15431 if settings.get("PORTAGE_DEBUG", "") == "1":
15432 spinner.update = spinner.update_quiet
15434 if "python-trace" in settings.features:
15435 import portage.debug
15436 portage.debug.set_trace(True)
15438 if not ("--quiet" in myopts):
15439 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15440 spinner.update = spinner.update_basic
15442 if myaction == 'version':
15443 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15444 settings.profile_path, settings["CHOST"],
15445 trees[settings["ROOT"]]["vartree"].dbapi)
15447 elif "--help" in myopts:
15448 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15451 if "--debug" in myopts:
15452 print "myaction", myaction
15453 print "myopts", myopts
15455 if not myaction and not myfiles and "--resume" not in myopts:
15456 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15459 pretend = "--pretend" in myopts
15460 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15461 buildpkgonly = "--buildpkgonly" in myopts
15463 # check if root user is the current user for the actions where emerge needs this
15464 if portage.secpass < 2:
15465 # We've already allowed "--version" and "--help" above.
15466 if "--pretend" not in myopts and myaction not in ("search","info"):
15467 need_superuser = not \
15469 (buildpkgonly and secpass >= 1) or \
15470 myaction in ("metadata", "regen") or \
15471 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15472 if portage.secpass < 1 or \
15475 access_desc = "superuser"
15477 access_desc = "portage group"
15478 # Always show portage_group_warning() when only portage group
15479 # access is required but the user is not in the portage group.
15480 from portage.data import portage_group_warning
15481 if "--ask" in myopts:
15482 myopts["--pretend"] = True
15483 del myopts["--ask"]
15484 print ("%s access is required... " + \
15485 "adding --pretend to options.\n") % access_desc
15486 if portage.secpass < 1 and not need_superuser:
15487 portage_group_warning()
15489 sys.stderr.write(("emerge: %s access is " + \
15490 "required.\n\n") % access_desc)
15491 if portage.secpass < 1 and not need_superuser:
15492 portage_group_warning()
15495 disable_emergelog = False
15496 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15498 disable_emergelog = True
15500 if myaction in ("search", "info"):
15501 disable_emergelog = True
15502 if disable_emergelog:
15503 """ Disable emergelog for everything except build or unmerge
15504 operations. This helps minimize parallel emerge.log entries that can
15505 confuse log parsers. We especially want it disabled during
15506 parallel-fetch, which uses --resume --fetchonly."""
15508 def emergelog(*pargs, **kargs):
15511 if not "--pretend" in myopts:
15512 emergelog(xterm_titles, "Started emerge on: "+\
15513 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15516 myelogstr=" ".join(myopts)
15518 myelogstr+=" "+myaction
15520 myelogstr += " " + " ".join(oldargs)
15521 emergelog(xterm_titles, " *** emerge " + myelogstr)
15524 def emergeexitsig(signum, frame):
15525 signal.signal(signal.SIGINT, signal.SIG_IGN)
15526 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15527 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15528 sys.exit(100+signum)
15529 signal.signal(signal.SIGINT, emergeexitsig)
15530 signal.signal(signal.SIGTERM, emergeexitsig)
15533 """This gets out final log message in before we quit."""
15534 if "--pretend" not in myopts:
15535 emergelog(xterm_titles, " *** terminating.")
15536 if "notitles" not in settings.features:
15538 portage.atexit_register(emergeexit)
15540 if myaction in ("config", "metadata", "regen", "sync"):
15541 if "--pretend" in myopts:
15542 sys.stderr.write(("emerge: The '%s' action does " + \
15543 "not support '--pretend'.\n") % myaction)
15546 if "sync" == myaction:
15547 return action_sync(settings, trees, mtimedb, myopts, myaction)
15548 elif "metadata" == myaction:
15549 action_metadata(settings, portdb, myopts)
15550 elif myaction=="regen":
15551 validate_ebuild_environment(trees)
15552 return action_regen(settings, portdb, myopts.get("--jobs"),
15553 myopts.get("--load-average"))
15555 elif "config"==myaction:
15556 validate_ebuild_environment(trees)
15557 action_config(settings, trees, myopts, myfiles)
15560 elif "search"==myaction:
15561 validate_ebuild_environment(trees)
15562 action_search(trees[settings["ROOT"]]["root_config"],
15563 myopts, myfiles, spinner)
15564 elif myaction in ("clean", "unmerge") or \
15565 (myaction == "prune" and "--nodeps" in myopts):
15566 validate_ebuild_environment(trees)
15568 # Ensure atoms are valid before calling unmerge().
15569 # For backward compat, leading '=' is not required.
15571 if is_valid_package_atom(x) or \
15572 is_valid_package_atom("=" + x):
15575 msg.append("'%s' is not a valid package atom." % (x,))
15576 msg.append("Please check ebuild(5) for full details.")
15577 writemsg_level("".join("!!! %s\n" % line for line in msg),
15578 level=logging.ERROR, noiselevel=-1)
15581 # When given a list of atoms, unmerge
15582 # them in the order given.
15583 ordered = myaction == "unmerge"
15584 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15585 mtimedb["ldpath"], ordered=ordered):
15586 if not (buildpkgonly or fetchonly or pretend):
15587 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15589 elif myaction in ("depclean", "info", "prune"):
15591 # Ensure atoms are valid before calling unmerge().
15592 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15595 if is_valid_package_atom(x):
15597 valid_atoms.append(
15598 portage.dep_expand(x, mydb=vardb, settings=settings))
15599 except portage.exception.AmbiguousPackageName, e:
15600 msg = "The short ebuild name \"" + x + \
15601 "\" is ambiguous. Please specify " + \
15602 "one of the following " + \
15603 "fully-qualified ebuild names instead:"
15604 for line in textwrap.wrap(msg, 70):
15605 writemsg_level("!!! %s\n" % (line,),
15606 level=logging.ERROR, noiselevel=-1)
15608 writemsg_level(" %s\n" % colorize("INFORM", i),
15609 level=logging.ERROR, noiselevel=-1)
15610 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15614 msg.append("'%s' is not a valid package atom." % (x,))
15615 msg.append("Please check ebuild(5) for full details.")
15616 writemsg_level("".join("!!! %s\n" % line for line in msg),
15617 level=logging.ERROR, noiselevel=-1)
15620 if myaction == "info":
15621 return action_info(settings, trees, myopts, valid_atoms)
15623 validate_ebuild_environment(trees)
15624 action_depclean(settings, trees, mtimedb["ldpath"],
15625 myopts, myaction, valid_atoms, spinner)
15626 if not (buildpkgonly or fetchonly or pretend):
15627 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15628 # "update", "system", or just process files:
15630 validate_ebuild_environment(trees)
15631 if "--pretend" not in myopts:
15632 display_news_notification(root_config, myopts)
15633 retval = action_build(settings, trees, mtimedb,
15634 myopts, myaction, myfiles, spinner)
15635 root_config = trees[settings["ROOT"]]["root_config"]
15636 post_emerge(root_config, myopts, mtimedb, retval)