2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 # This block ensures that ^C interrupts are handled quietly.
11 def exithandler(signum,frame):
12 signal.signal(signal.SIGINT, signal.SIG_IGN)
13 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16 signal.signal(signal.SIGINT, exithandler)
17 signal.signal(signal.SIGTERM, exithandler)
18 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
20 except KeyboardInterrupt:
25 os.environ["PORTAGE_LEGACY_GLOBALS"] = "false"
29 from os import path as osp
30 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 del os.environ["PORTAGE_LEGACY_GLOBALS"]
33 from portage import digraph, portdbapi
34 from portage.const import NEWS_LIB_PATH, CACHE_PATH, PRIVATE_PATH, USER_CONFIG_PATH, GLOBAL_CONFIG_PATH
37 import portage.xpak, commands, errno, re, socket, time, types
38 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
39 havecolor, nc_len, nocolor, red, teal, turquoise, white, xtermTitle, \
40 xtermTitleReset, yellow
41 from portage.output import create_color_func
42 good = create_color_func("GOOD")
43 bad = create_color_func("BAD")
44 # white looks bad on terminals with white background
45 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.data import secpass
53 from portage.util import normalize_path as normpath
54 from portage.util import writemsg
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
59 from UserDict import DictMixin
64 import pickle as cPickle
66 class stdout_spinner(object):
68 "Gentoo Rocks ("+os.uname()[0]+")",
69 "Thank you for using Gentoo. :)",
70 "Are you actually trying to read this?",
71 "How many times have you stared at this?",
72 "We are generating the cache right now",
73 "You are paying too much attention.",
74 "A theory is better than its explanation.",
75 "Phasers locked on target, Captain.",
76 "Thrashing is just virtual crashing.",
77 "To be is to program.",
78 "Real Users hate Real Programmers.",
79 "When all else fails, read the instructions.",
80 "Functionality breeds Contempt.",
81 "The future lies ahead.",
82 "3.1415926535897932384626433832795028841971694",
83 "Sometimes insanity is the only alternative.",
84 "Inaccuracy saves a world of explanation.",
87 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
91 self.update = self.update_twirl
92 self.scroll_sequence = self.scroll_msgs[
93 int(time.time() * 100) % len(self.scroll_msgs)]
95 self.min_display_latency = 0.05
97 def _return_early(self):
99 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
100 each update* method should return without doing any output when this
103 cur_time = time.time()
104 if cur_time - self.last_update < self.min_display_latency:
106 self.last_update = cur_time
109 def update_basic(self):
110 self.spinpos = (self.spinpos + 1) % 500
111 if self._return_early():
113 if (self.spinpos % 100) == 0:
114 if self.spinpos == 0:
115 sys.stdout.write(". ")
117 sys.stdout.write(".")
120 def update_scroll(self):
121 if self._return_early():
123 if(self.spinpos >= len(self.scroll_sequence)):
124 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
125 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
127 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
129 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
131 def update_twirl(self):
132 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
133 if self._return_early():
135 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
138 def update_quiet(self):
141 def userquery(prompt, responses=None, colours=None):
142 """Displays a prompt and a set of responses, then waits for a response
143 which is checked against the responses and the first to match is
144 returned. An empty response will match the first value in responses. The
145 input buffer is *not* cleared prior to the prompt!
148 responses: a List of Strings.
149 colours: a List of Functions taking and returning a String, used to
150 process the responses for display. Typically these will be functions
151 like red() but could be e.g. lambda x: "DisplayString".
152 If responses is omitted, defaults to ["Yes", "No"], [green, red].
153 If only colours is omitted, defaults to [bold, ...].
155 Returns a member of the List responses. (If called without optional
156 arguments, returns "Yes" or "No".)
157 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
159 if responses is None:
160 responses = ["Yes", "No"]
162 create_color_func("PROMPT_CHOICE_DEFAULT"),
163 create_color_func("PROMPT_CHOICE_OTHER")
165 elif colours is None:
167 colours=(colours*len(responses))[:len(responses)]
171 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
172 for key in responses:
173 # An empty response will match the first value in responses.
174 if response.upper()==key[:len(response)].upper():
176 print "Sorry, response '%s' not understood." % response,
177 except (EOFError, KeyboardInterrupt):
182 "clean", "config", "depclean",
184 "prune", "regen", "search",
188 "--ask", "--alphabetical",
189 "--buildpkg", "--buildpkgonly",
190 "--changelog", "--columns",
195 "--fetchonly", "--fetch-all-uri",
196 "--getbinpkg", "--getbinpkgonly",
197 "--help", "--ignore-default-opts",
199 "--newuse", "--nocolor",
200 "--nodeps", "--noreplace",
201 "--nospinner", "--oneshot",
202 "--onlydeps", "--pretend",
203 "--quiet", "--resume",
204 "--searchdesc", "--selective",
208 "--usepkg", "--usepkgonly",
209 "--verbose", "--version"
215 "b":"--buildpkg", "B":"--buildpkgonly",
216 "c":"--clean", "C":"--unmerge",
217 "d":"--debug", "D":"--deep",
219 "f":"--fetchonly", "F":"--fetch-all-uri",
220 "g":"--getbinpkg", "G":"--getbinpkgonly",
222 "k":"--usepkg", "K":"--usepkgonly",
224 "n":"--noreplace", "N":"--newuse",
225 "o":"--onlydeps", "O":"--nodeps",
226 "p":"--pretend", "P":"--prune",
228 "s":"--search", "S":"--searchdesc",
231 "v":"--verbose", "V":"--version"
234 def emergelog(xterm_titles, mystr, short_msg=None):
236 if short_msg == None:
238 if "HOSTNAME" in os.environ:
239 short_msg = os.environ["HOSTNAME"]+": "+short_msg
240 xtermTitle(short_msg)
242 file_path = "/var/log/emerge.log"
243 mylogfile = open(file_path, "a")
244 portage.util.apply_secpass_permissions(file_path,
245 uid=portage.portage_uid, gid=portage.portage_gid,
249 mylock = portage.locks.lockfile(mylogfile)
250 # seek because we may have gotten held up by the lock.
251 # if so, we may not be positioned at the end of the file.
253 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257 portage.locks.unlockfile(mylock)
259 except (IOError,OSError,portage.exception.PortageException), e:
261 print >> sys.stderr, "emergelog():",e
263 def countdown(secs=5, doing="Starting"):
265 print ">>> Waiting",secs,"seconds before starting..."
266 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275 # formats a size given in bytes nicely
276 def format_size(mysize):
277 if type(mysize) not in [types.IntType,types.LongType]:
279 if 0 != mysize % 1024:
280 # Always round up to the next kB so that it doesn't show 0 kB when
281 # some small file still needs to be fetched.
282 mysize += 1024 - mysize % 1024
283 mystr=str(mysize/1024)
287 mystr=mystr[:mycount]+","+mystr[mycount:]
291 def getgccversion(chost):
294 return: the current in-use gcc version
297 gcc_ver_command = 'gcc -dumpversion'
298 gcc_ver_prefix = 'gcc-'
300 gcc_not_found_error = red(
301 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
302 "!!! to update the environment of this terminal and possibly\n" +
303 "!!! other terminals also.\n"
306 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
307 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
308 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
310 mystatus, myoutput = commands.getstatusoutput(
311 chost + "-" + gcc_ver_command)
312 if mystatus == os.EX_OK:
313 return gcc_ver_prefix + myoutput
315 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
316 if mystatus == os.EX_OK:
317 return gcc_ver_prefix + myoutput
319 portage.writemsg(gcc_not_found_error, noiselevel=-1)
320 return "[unavailable]"
322 def getportageversion(portdir, target_root, profile, chost, vardb):
323 profilever = "unavailable"
325 realpath = os.path.realpath(profile)
326 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
327 if realpath.startswith(basepath):
328 profilever = realpath[1 + len(basepath):]
331 profilever = "!" + os.readlink(profile)
334 del realpath, basepath
337 libclist = vardb.match("virtual/libc")
338 libclist += vardb.match("virtual/glibc")
339 libclist = portage.util.unique_array(libclist)
341 xs=portage.catpkgsplit(x)
343 libcver+=","+"-".join(xs[1:])
345 libcver="-".join(xs[1:])
347 libcver="unavailable"
349 gccver = getgccversion(chost)
350 unameout=os.uname()[2]+" "+os.uname()[4]
352 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
354 def create_depgraph_params(myopts, myaction):
355 #configure emerge engine parameters
357 # self: include _this_ package regardless of if it is merged.
358 # selective: exclude the package if it is merged
359 # recurse: go into the dependencies
360 # deep: go into the dependencies of already merged packages
361 # empty: pretend nothing is merged
362 # complete: completely account for all known dependencies
363 myparams = set(["recurse"])
364 if "--update" in myopts or \
365 "--newuse" in myopts or \
366 "--reinstall" in myopts or \
367 "--noreplace" in myopts:
368 myparams.add("selective")
369 if "--emptytree" in myopts:
370 myparams.add("empty")
371 myparams.discard("selective")
372 if "--nodeps" in myopts:
373 myparams.discard("recurse")
374 if "--deep" in myopts:
376 if "--complete-graph" in myopts:
377 myparams.add("complete")
380 # search functionality
381 class search(object):
392 def __init__(self, root_config, spinner, searchdesc,
393 verbose, usepkg, usepkgonly):
394 """Searches the available and installed packages for the supplied search key.
395 The list of available and installed packages is created at object instantiation.
396 This makes successive searches faster."""
397 self.settings = root_config.settings
398 self.vartree = root_config.trees["vartree"]
399 self.spinner = spinner
400 self.verbose = verbose
401 self.searchdesc = searchdesc
402 self.setconfig = root_config.setconfig
406 self.portdb = fake_portdb
407 for attrib in ("aux_get", "cp_all",
408 "xmatch", "findname", "getfetchlist"):
409 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
413 portdb = root_config.trees["porttree"].dbapi
414 bindb = root_config.trees["bintree"].dbapi
415 vardb = root_config.trees["vartree"].dbapi
417 if not usepkgonly and portdb._have_root_eclass_dir:
418 self._dbs.append(portdb)
420 if (usepkg or usepkgonly) and bindb.cp_all():
421 self._dbs.append(bindb)
423 self._dbs.append(vardb)
424 self._portdb = portdb
429 cp_all.update(db.cp_all())
430 return list(sorted(cp_all))
432 def _aux_get(self, *args, **kwargs):
435 return db.aux_get(*args, **kwargs)
440 def _findname(self, *args, **kwargs):
442 if db is not self._portdb:
443 # We don't want findname to return anything
444 # unless it's an ebuild in a portage tree.
445 # Otherwise, it's already built and we don't
448 func = getattr(db, "findname", None)
450 value = func(*args, **kwargs)
455 def _getfetchlist(self, *args, **kwargs):
457 func = getattr(db, "getfetchlist", None)
459 value = func(*args, **kwargs)
464 def _visible(self, db, cpv, metadata):
465 installed = db is self.vartree.dbapi
466 built = installed or db is not self._portdb
469 pkg_type = "installed"
472 return visible(self.settings,
473 Package(type_name=pkg_type, root=self.settings["ROOT"],
474 cpv=cpv, built=built, installed=installed, metadata=metadata))
476 def _xmatch(self, level, atom):
478 This method does not expand old-style virtuals because it
479 is restricted to returning matches for a single ${CATEGORY}/${PN}
480 and old-style virual matches unreliable for that when querying
481 multiple package databases. If necessary, old-style virtuals
482 can be performed on atoms prior to calling this method.
484 cp = portage.dep_getkey(atom)
485 if level == "match-all":
488 if hasattr(db, "xmatch"):
489 matches.update(db.xmatch(level, atom))
491 matches.update(db.match(atom))
492 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
493 db._cpv_sort_ascending(result)
494 elif level == "match-visible":
497 if hasattr(db, "xmatch"):
498 matches.update(db.xmatch(level, atom))
500 db_keys = list(db._aux_cache_keys)
501 for cpv in db.match(atom):
502 metadata = dict(izip(db_keys,
503 db.aux_get(cpv, db_keys)))
504 if not self._visible(db, cpv, metadata):
507 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508 db._cpv_sort_ascending(result)
509 elif level == "bestmatch-visible":
512 if hasattr(db, "xmatch"):
513 cpv = db.xmatch("bestmatch-visible", atom)
514 if not cpv or portage.cpv_getkey(cpv) != cp:
516 if not result or cpv == portage.best([cpv, result]):
519 db_keys = list(db._aux_cache_keys)
520 # break out of this loop with highest visible
521 # match, checked in descending order
522 for cpv in reversed(db.match(atom)):
523 if portage.cpv_getkey(cpv) != cp:
525 metadata = dict(izip(db_keys,
526 db.aux_get(cpv, db_keys)))
527 if not self._visible(db, cpv, metadata):
529 if not result or cpv == portage.best([cpv, result]):
533 raise NotImplementedError(level)
536 def execute(self,searchkey):
537 """Performs the search for the supplied search key"""
539 self.searchkey=searchkey
540 self.packagematches = []
543 self.matches = {"pkg":[], "desc":[], "set":[]}
546 self.matches = {"pkg":[], "set":[]}
547 print "Searching... ",
550 if self.searchkey.startswith('%'):
552 self.searchkey = self.searchkey[1:]
553 if self.searchkey.startswith('@'):
555 self.searchkey = self.searchkey[1:]
557 self.searchre=re.compile(self.searchkey,re.I)
559 self.searchre=re.compile(re.escape(self.searchkey), re.I)
560 for package in self.portdb.cp_all():
561 self.spinner.update()
564 match_string = package[:]
566 match_string = package.split("/")[-1]
569 if self.searchre.search(match_string):
570 if not self.portdb.xmatch("match-visible", package):
572 self.matches["pkg"].append([package,masked])
573 elif self.searchdesc: # DESCRIPTION searching
574 full_package = self.portdb.xmatch("bestmatch-visible", package)
576 #no match found; we don't want to query description
577 full_package = portage.best(
578 self.portdb.xmatch("match-all", package))
584 full_desc = self.portdb.aux_get(
585 full_package, ["DESCRIPTION"])[0]
587 print "emerge: search: aux_get() failed, skipping"
589 if self.searchre.search(full_desc):
590 self.matches["desc"].append([full_package,masked])
592 self.sdict = self.setconfig.getSets()
593 for setname in self.sdict:
594 self.spinner.update()
596 match_string = setname
598 match_string = setname.split("/")[-1]
600 if self.searchre.search(match_string):
601 self.matches["set"].append([setname, False])
602 elif self.searchdesc:
603 if self.searchre.search(
604 self.sdict[setname].getMetadata("DESCRIPTION")):
605 self.matches["set"].append([setname, False])
608 for mtype in self.matches:
609 self.matches[mtype].sort()
610 self.mlen += len(self.matches[mtype])
613 """Outputs the results of the search."""
614 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
615 print "[ Applications found : "+white(str(self.mlen))+" ]"
617 vardb = self.vartree.dbapi
618 for mtype in self.matches:
619 for match,masked in self.matches[mtype]:
623 full_package = self.portdb.xmatch(
624 "bestmatch-visible", match)
626 #no match found; we don't want to query description
628 full_package = portage.best(
629 self.portdb.xmatch("match-all",match))
630 elif mtype == "desc":
632 match = portage.cpv_getkey(match)
634 print green("*")+" "+white(match)
635 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
639 desc, homepage, license = self.portdb.aux_get(
640 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
642 print "emerge: search: aux_get() failed, skipping"
645 print green("*")+" "+white(match)+" "+red("[ Masked ]")
647 print green("*")+" "+white(match)
648 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
652 mycat = match.split("/")[0]
653 mypkg = match.split("/")[1]
654 mycpv = match + "-" + myversion
655 myebuild = self.portdb.findname(mycpv)
657 pkgdir = os.path.dirname(myebuild)
658 from portage import manifest
659 mf = manifest.Manifest(
660 pkgdir, self.settings["DISTDIR"])
661 fetchlist = self.portdb.getfetchlist(mycpv,
662 mysettings=self.settings, all=True)[1]
664 mysum[0] = mf.getDistfilesSize(fetchlist)
666 file_size_str = "Unknown (missing digest for %s)" % \
671 if db is not vardb and \
672 db.cpv_exists(mycpv):
674 if not myebuild and hasattr(db, "bintree"):
675 myebuild = db.bintree.getname(mycpv)
677 mysum[0] = os.stat(myebuild).st_size
682 if myebuild and file_size_str is None:
683 mystr = str(mysum[0] / 1024)
687 mystr = mystr[:mycount] + "," + mystr[mycount:]
688 file_size_str = mystr + " kB"
692 print " ", darkgreen("Latest version available:"),myversion
693 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
696 (darkgreen("Size of files:"), file_size_str)
697 print " ", darkgreen("Homepage:")+" ",homepage
698 print " ", darkgreen("Description:")+" ",desc
699 print " ", darkgreen("License:")+" ",license
705 def getInstallationStatus(self,package):
706 installed_package = self.vartree.dep_bestmatch(package)
708 version = self.getVersion(installed_package,search.VERSION_RELEASE)
710 result = darkgreen("Latest version installed:")+" "+version
712 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
715 def getVersion(self,full_package,detail):
716 if len(full_package) > 1:
717 package_parts = portage.catpkgsplit(full_package)
718 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
719 result = package_parts[2]+ "-" + package_parts[3]
721 result = package_parts[2]
726 class RootConfig(object):
727 """This is used internally by depgraph to track information about a
729 def __init__(self, settings, trees, setconfig):
731 self.settings = settings
732 self.root = self.settings["ROOT"]
733 self.setconfig = setconfig
734 self.sets = self.setconfig.getSets()
735 self.visible_pkgs = PackageVirtualDbapi(self.settings)
737 def create_world_atom(pkg_key, metadata, args_set, root_config):
738 """Create a new atom for the world file if one does not exist. If the
739 argument atom is precise enough to identify a specific slot then a slot
740 atom will be returned. Atoms that are in the system set may also be stored
741 in world since system atoms can only match one slot while world atoms can
742 be greedy with respect to slots. Unslotted system packages will not be
744 arg_atom = args_set.findAtomForPackage(pkg_key, metadata)
747 cp = portage.dep_getkey(arg_atom)
749 sets = root_config.sets
750 portdb = root_config.trees["porttree"].dbapi
751 vardb = root_config.trees["vartree"].dbapi
752 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
753 for cpv in portdb.match(cp))
754 slotted = len(available_slots) > 1 or \
755 (len(available_slots) == 1 and "0" not in available_slots)
757 # check the vdb in case this is multislot
758 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
759 for cpv in vardb.match(cp))
760 slotted = len(available_slots) > 1 or \
761 (len(available_slots) == 1 and "0" not in available_slots)
762 if slotted and arg_atom != cp:
763 # If the user gave a specific atom, store it as a
764 # slot atom in the world file.
765 slot_atom = "%s:%s" % (cp, metadata["SLOT"])
767 # For USE=multislot, there are a couple of cases to
770 # 1) SLOT="0", but the real SLOT spontaneously changed to some
771 # unknown value, so just record an unslotted atom.
773 # 2) SLOT comes from an installed package and there is no
774 # matching SLOT in the portage tree.
776 # Make sure that the slot atom is available in either the
777 # portdb or the vardb, since otherwise the user certainly
778 # doesn't want the SLOT atom recorded in the world file
779 # (case 1 above). If it's only available in the vardb,
780 # the user may be trying to prevent a USE=multislot
781 # package from being removed by --depclean (case 2 above).
784 if not portdb.match(slot_atom):
785 # SLOT seems to come from an installed multislot package
787 # If there is no installed package matching the SLOT atom,
788 # it probably changed SLOT spontaneously due to USE=multislot,
789 # so just record an unslotted atom.
790 if vardb.match(slot_atom):
791 # Now verify that the argument is precise
792 # enough to identify a specific slot.
793 matches = mydb.match(arg_atom)
794 matched_slots = set()
796 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
797 if len(matched_slots) == 1:
798 new_world_atom = slot_atom
800 if new_world_atom == sets["world"].findAtomForPackage(pkg_key, metadata):
801 # Both atoms would be identical, so there's nothing to add.
804 # Unlike world atoms, system atoms are not greedy for slots, so they
805 # can't be safely excluded from world if they are slotted.
806 system_atom = sets["system"].findAtomForPackage(pkg_key, metadata)
808 if not portage.dep_getkey(system_atom).startswith("virtual/"):
810 # System virtuals aren't safe to exclude from world since they can
811 # match multiple old-style virtuals but only one of them will be
812 # pulled in by update or depclean.
813 providers = portdb.mysettings.getvirtuals().get(
814 portage.dep_getkey(system_atom))
815 if providers and len(providers) == 1 and providers[0] == cp:
817 return new_world_atom
819 def filter_iuse_defaults(iuse):
821 if flag.startswith("+") or flag.startswith("-"):
826 class SlotObject(object):
827 __slots__ = ("__weakref__",)
829 def __init__(self, **kwargs):
830 classes = [self.__class__]
835 classes.extend(c.__bases__)
836 slots = getattr(c, "__slots__", None)
840 myvalue = kwargs.get(myattr, None)
841 setattr(self, myattr, myvalue)
843 class AbstractDepPriority(SlotObject):
844 __slots__ = ("buildtime", "runtime", "runtime_post")
846 def __lt__(self, other):
847 return self.__int__() < other
849 def __le__(self, other):
850 return self.__int__() <= other
852 def __eq__(self, other):
853 return self.__int__() == other
855 def __ne__(self, other):
856 return self.__int__() != other
858 def __gt__(self, other):
859 return self.__int__() > other
861 def __ge__(self, other):
862 return self.__int__() >= other
866 return copy.copy(self)
868 class DepPriority(AbstractDepPriority):
870 This class generates an integer priority level based of various
871 attributes of the dependency relationship. Attributes can be assigned
872 at any time and the new integer value will be generated on calls to the
873 __int__() method. Rich comparison operators are supported.
875 The boolean attributes that affect the integer value are "satisfied",
876 "buildtime", "runtime", and "system". Various combinations of
877 attributes lead to the following priority levels:
879 Combination of properties Priority Category
881 not satisfied and buildtime 0 HARD
882 not satisfied and runtime -1 MEDIUM
883 not satisfied and runtime_post -2 MEDIUM_SOFT
884 satisfied and buildtime and rebuild -3 SOFT
885 satisfied and buildtime -4 SOFT
886 satisfied and runtime -5 SOFT
887 satisfied and runtime_post -6 SOFT
888 (none of the above) -6 SOFT
890 Several integer constants are defined for categorization of priority
893 MEDIUM The upper boundary for medium dependencies.
894 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
895 SOFT The upper boundary for soft dependencies.
896 MIN The lower boundary for soft dependencies.
898 __slots__ = ("satisfied", "rebuild")
905 if not self.satisfied:
910 if self.runtime_post:
918 if self.runtime_post:
923 myvalue = self.__int__()
924 if myvalue > self.MEDIUM:
926 if myvalue > self.MEDIUM_SOFT:
928 if myvalue > self.SOFT:
932 class BlockerDepPriority(DepPriority):
937 BlockerDepPriority.instance = BlockerDepPriority()
939 class UnmergeDepPriority(AbstractDepPriority):
942 Combination of properties Priority Category
947 (none of the above) -2 SOFT
957 if self.runtime_post:
964 myvalue = self.__int__()
965 if myvalue > self.SOFT:
969 class FakeVartree(portage.vartree):
970 """This is implements an in-memory copy of a vartree instance that provides
971 all the interfaces required for use by the depgraph. The vardb is locked
972 during the constructor call just long enough to read a copy of the
973 installed package information. This allows the depgraph to do it's
974 dependency calculations without holding a lock on the vardb. It also
975 allows things like vardb global updates to be done in memory so that the
976 user doesn't necessarily need write access to the vardb in cases where
977 global updates are necessary (updates are performed when necessary if there
978 is not a matching ebuild in the tree)."""
979 def __init__(self, real_vartree, portdb,
980 db_keys, pkg_cache, acquire_lock=1):
981 self.root = real_vartree.root
982 self.settings = real_vartree.settings
984 for required_key in ("COUNTER", "SLOT"):
985 if required_key not in mykeys:
986 mykeys.append(required_key)
987 self._pkg_cache = pkg_cache
988 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
989 vdb_path = os.path.join(self.root, portage.VDB_PATH)
991 # At least the parent needs to exist for the lock file.
992 portage.util.ensure_dirs(vdb_path)
993 except portage.exception.PortageException:
997 if acquire_lock and os.access(vdb_path, os.W_OK):
998 vdb_lock = portage.locks.lockdir(vdb_path)
999 real_dbapi = real_vartree.dbapi
1001 for cpv in real_dbapi.cpv_all():
1002 cache_key = ("installed", self.root, cpv, "nomerge")
1003 pkg = self._pkg_cache.get(cache_key)
1005 metadata = pkg.metadata
1007 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1008 myslot = metadata["SLOT"]
1009 mycp = portage.dep_getkey(cpv)
1010 myslot_atom = "%s:%s" % (mycp, myslot)
1012 mycounter = long(metadata["COUNTER"])
1015 metadata["COUNTER"] = str(mycounter)
1016 other_counter = slot_counters.get(myslot_atom, None)
1017 if other_counter is not None:
1018 if other_counter > mycounter:
1020 slot_counters[myslot_atom] = mycounter
1022 pkg = Package(built=True, cpv=cpv,
1023 installed=True, metadata=metadata,
1024 root=self.root, type_name="installed")
1025 self._pkg_cache[pkg] = pkg
1026 self.dbapi.cpv_inject(pkg)
1027 real_dbapi.flush_cache()
1030 portage.locks.unlockdir(vdb_lock)
1031 # Populate the old-style virtuals using the cached values.
1032 if not self.settings.treeVirtuals:
1033 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1034 portage.getCPFromCPV, self.get_all_provides())
1036 # Intialize variables needed for lazy cache pulls of the live ebuild
1037 # metadata. This ensures that the vardb lock is released ASAP, without
1038 # being delayed in case cache generation is triggered.
1039 self._aux_get = self.dbapi.aux_get
1040 self.dbapi.aux_get = self._aux_get_wrapper
1041 self._match = self.dbapi.match
1042 self.dbapi.match = self._match_wrapper
1043 self._aux_get_history = set()
1044 self._portdb_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1045 self._portdb = portdb
1046 self._global_updates = None
1048 def _match_wrapper(self, cpv, use_cache=1):
1050 Make sure the metadata in Package instances gets updated for any
1051 cpv that is returned from a match() call, since the metadata can
1052 be accessed directly from the Package instance instead of via
1055 matches = self._match(cpv, use_cache=use_cache)
1057 if cpv in self._aux_get_history:
1059 self._aux_get_wrapper(cpv, [])
1062 def _aux_get_wrapper(self, pkg, wants):
1063 if pkg in self._aux_get_history:
1064 return self._aux_get(pkg, wants)
1065 self._aux_get_history.add(pkg)
1067 # Use the live ebuild metadata if possible.
1068 live_metadata = dict(izip(self._portdb_keys,
1069 self._portdb.aux_get(pkg, self._portdb_keys)))
1070 self.dbapi.aux_update(pkg, live_metadata)
1071 except (KeyError, portage.exception.PortageException):
1072 if self._global_updates is None:
1073 self._global_updates = \
1074 grab_global_updates(self._portdb.porttree_root)
1075 perform_global_updates(
1076 pkg, self.dbapi, self._global_updates)
1077 return self._aux_get(pkg, wants)
1079 def grab_global_updates(portdir):
1080 from portage.update import grab_updates, parse_updates
1081 updpath = os.path.join(portdir, "profiles", "updates")
1083 rawupdates = grab_updates(updpath)
1084 except portage.exception.DirectoryNotFound:
1087 for mykey, mystat, mycontent in rawupdates:
1088 commands, errors = parse_updates(mycontent)
1089 upd_commands.extend(commands)
1092 def perform_global_updates(mycpv, mydb, mycommands):
1093 from portage.update import update_dbentries
1094 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1095 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1096 updates = update_dbentries(mycommands, aux_dict)
1098 mydb.aux_update(mycpv, updates)
1100 def visible(pkgsettings, pkg):
1102 Check if a package is visible. This can raise an InvalidDependString
1103 exception if LICENSE is invalid.
1104 TODO: optionally generate a list of masking reasons
1106 @returns: True if the package is visible, False otherwise.
1108 if not pkg.metadata["SLOT"]:
1110 if pkg.built and not pkg.installed:
1111 pkg_chost = pkg.metadata.get("CHOST")
1112 if pkg_chost and pkg_chost != pkgsettings["CHOST"]:
1114 if not portage.eapi_is_supported(pkg.metadata["EAPI"]):
1116 if not pkg.installed and \
1117 pkgsettings.getMissingKeywords(pkg.cpv, pkg.metadata):
1119 if pkgsettings.getMaskAtom(pkg.cpv, pkg.metadata):
1121 if pkgsettings.getProfileMaskAtom(pkg.cpv, pkg.metadata):
1124 if pkgsettings.getMissingLicenses(pkg.cpv, pkg.metadata):
1126 except portage.exception.InvalidDependString:
1130 def get_masking_status(pkg, pkgsettings, root_config):
1132 mreasons = portage.getmaskingstatus(
1133 pkg, settings=pkgsettings,
1134 portdb=root_config.trees["porttree"].dbapi)
1136 if pkg.built and not pkg.installed:
1137 pkg_chost = pkg.metadata.get("CHOST")
1138 if pkg_chost and pkg_chost != pkgsettings["CHOST"]:
1139 mreasons.append("CHOST: %s" % \
1140 pkg.metadata["CHOST"])
1142 if not pkg.metadata["SLOT"]:
1143 mreasons.append("invalid: SLOT is undefined")
1147 def get_mask_info(root_config, cpv, pkgsettings,
1148 db, pkg_type, built, installed, db_keys):
1151 metadata = dict(izip(db_keys,
1152 db.aux_get(cpv, db_keys)))
1155 if metadata and not built:
1156 pkgsettings.setcpv(cpv, mydb=metadata)
1157 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1158 if metadata is None:
1159 mreasons = ["corruption"]
1161 pkg = Package(type_name=pkg_type, root=root_config.root,
1162 cpv=cpv, built=built, installed=installed, metadata=metadata)
1163 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1164 return metadata, mreasons
1166 def show_masked_packages(masked_packages):
1167 shown_licenses = set()
1168 shown_comments = set()
1169 # Maybe there is both an ebuild and a binary. Only
1170 # show one of them to avoid redundant appearance.
1172 have_eapi_mask = False
1173 for (root_config, pkgsettings, cpv,
1174 metadata, mreasons) in masked_packages:
1175 if cpv in shown_cpvs:
1178 comment, filename = None, None
1179 if "package.mask" in mreasons:
1180 comment, filename = \
1181 portage.getmaskingreason(
1182 cpv, metadata=metadata,
1183 settings=pkgsettings,
1184 portdb=root_config.trees["porttree"].dbapi,
1185 return_location=True)
1186 missing_licenses = []
1188 if not portage.eapi_is_supported(metadata["EAPI"]):
1189 have_eapi_mask = True
1191 missing_licenses = \
1192 pkgsettings.getMissingLicenses(
1194 except portage.exception.InvalidDependString:
1195 # This will have already been reported
1196 # above via mreasons.
1199 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1200 if comment and comment not in shown_comments:
1203 shown_comments.add(comment)
1204 portdb = root_config.trees["porttree"].dbapi
1205 for l in missing_licenses:
1206 l_path = portdb.findLicensePath(l)
1207 if l in shown_licenses:
1209 msg = ("A copy of the '%s' license" + \
1210 " is located at '%s'.") % (l, l_path)
1213 shown_licenses.add(l)
1214 return have_eapi_mask
1216 class Task(SlotObject):
1217 __slots__ = ("_hash_key", "_hash_value")
1219 def _get_hash_key(self):
1220 hash_key = getattr(self, "_hash_key", None)
1221 if hash_key is None:
1222 raise NotImplementedError(self)
1225 def __eq__(self, other):
1226 return self._get_hash_key() == other
1228 def __ne__(self, other):
1229 return self._get_hash_key() != other
1232 hash_value = getattr(self, "_hash_value", None)
1233 if hash_value is None:
1234 self._hash_value = hash(self._get_hash_key())
1235 return self._hash_value
1238 return len(self._get_hash_key())
1240 def __getitem__(self, key):
1241 return self._get_hash_key()[key]
1244 return iter(self._get_hash_key())
1246 def __contains__(self, key):
1247 return key in self._get_hash_key()
1250 return str(self._get_hash_key())
1252 class Blocker(Task):
1253 __slots__ = ("root", "atom", "cp", "satisfied")
1255 def __init__(self, **kwargs):
1256 Task.__init__(self, **kwargs)
1257 self.cp = portage.dep_getkey(self.atom)
1259 def _get_hash_key(self):
1260 hash_key = getattr(self, "_hash_key", None)
1261 if hash_key is None:
1263 ("blocks", self.root, self.atom)
1264 return self._hash_key
1266 class Package(Task):
1267 __slots__ = ("built", "cpv", "depth",
1268 "installed", "metadata", "onlydeps", "operation",
1269 "root", "type_name",
1270 "category", "cp", "cpv_slot", "pf", "pv_split", "slot_atom")
1273 "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
1274 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1275 "repository", "RESTRICT", "SLOT", "USE"]
1277 def __init__(self, **kwargs):
1278 Task.__init__(self, **kwargs)
1279 self.cp = portage.cpv_getkey(self.cpv)
1280 self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
1281 self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
1282 self.category, self.pf = portage.catsplit(self.cpv)
1283 self.pv_split = portage.catpkgsplit(self.cpv)[1:]
1285 def _get_hash_key(self):
1286 hash_key = getattr(self, "_hash_key", None)
1287 if hash_key is None:
1288 if self.operation is None:
1289 self.operation = "merge"
1290 if self.onlydeps or self.installed:
1291 self.operation = "nomerge"
1293 (self.type_name, self.root, self.cpv, self.operation)
1294 return self._hash_key
1296 def __lt__(self, other):
1297 if other.cp != self.cp:
1299 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1303 def __le__(self, other):
1304 if other.cp != self.cp:
1306 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1310 def __gt__(self, other):
1311 if other.cp != self.cp:
1313 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1317 def __ge__(self, other):
1318 if other.cp != self.cp:
1320 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1324 class DependencyArg(object):
1325 def __init__(self, arg=None, root_config=None):
1327 self.root_config = root_config
1332 class AtomArg(DependencyArg):
1333 def __init__(self, atom=None, **kwargs):
1334 DependencyArg.__init__(self, **kwargs)
1336 self.set = (self.atom, )
1338 class PackageArg(DependencyArg):
1339 def __init__(self, package=None, **kwargs):
1340 DependencyArg.__init__(self, **kwargs)
1341 self.package = package
1342 self.atom = "=" + package.cpv
1343 self.set = (self.atom, )
1345 class SetArg(DependencyArg):
1346 def __init__(self, set=None, **kwargs):
1347 DependencyArg.__init__(self, **kwargs)
1349 self.name = self.arg[len(SETPREFIX):]
1351 class Dependency(SlotObject):
1352 __slots__ = ("atom", "blocker", "depth",
1353 "parent", "onlydeps", "priority", "root")
1354 def __init__(self, **kwargs):
1355 SlotObject.__init__(self, **kwargs)
1356 if self.priority is None:
1357 self.priority = DepPriority()
1358 if self.depth is None:
1361 class BlockerCache(DictMixin):
1362 """This caches blockers of installed packages so that dep_check does not
1363 have to be done for every single installed package on every invocation of
1364 emerge. The cache is invalidated whenever it is detected that something
1365 has changed that might alter the results of dep_check() calls:
1366 1) the set of installed packages (including COUNTER) has changed
1367 2) the old-style virtuals have changed
1369 class BlockerData(object):
1371 __slots__ = ("__weakref__", "atoms", "counter")
1373 def __init__(self, counter, atoms):
1374 self.counter = counter
1377 def __init__(self, myroot, vardb):
1379 self._virtuals = vardb.settings.getvirtuals()
1380 self._cache_filename = os.path.join(myroot,
1381 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
1382 self._cache_version = "1"
1383 self._cache_data = None
1384 self._modified = False
1389 f = open(self._cache_filename)
1390 mypickle = cPickle.Unpickler(f)
1391 mypickle.find_global = None
1392 self._cache_data = mypickle.load()
1395 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
1397 cache_valid = self._cache_data and \
1398 isinstance(self._cache_data, dict) and \
1399 self._cache_data.get("version") == self._cache_version and \
1400 isinstance(self._cache_data.get("blockers"), dict)
1402 # Validate all the atoms and counters so that
1403 # corruption is detected as soon as possible.
1404 invalid_items = set()
1405 for k, v in self._cache_data["blockers"].iteritems():
1406 if not isinstance(k, basestring):
1407 invalid_items.add(k)
1410 if portage.catpkgsplit(k) is None:
1411 invalid_items.add(k)
1413 except portage.exception.InvalidData:
1414 invalid_items.add(k)
1416 if not isinstance(v, tuple) or \
1418 invalid_items.add(k)
1421 if not isinstance(counter, (int, long)):
1422 invalid_items.add(k)
1424 if not isinstance(atoms, list):
1425 invalid_items.add(k)
1427 invalid_atom = False
1429 if not isinstance(atom, basestring):
1432 if atom[:1] != "!" or \
1433 not portage.isvalidatom(
1434 atom, allow_blockers=True):
1438 invalid_items.add(k)
1441 for k in invalid_items:
1442 del self._cache_data["blockers"][k]
1443 if not self._cache_data["blockers"]:
1447 self._cache_data = {"version":self._cache_version}
1448 self._cache_data["blockers"] = {}
1449 self._cache_data["virtuals"] = self._virtuals
1450 self._modified = False
1453 """If the current user has permission and the internal blocker cache
1454 been updated, save it to disk and mark it unmodified. This is called
1455 by emerge after it has proccessed blockers for all installed packages.
1456 Currently, the cache is only written if the user has superuser
1457 privileges (since that's required to obtain a lock), but all users
1458 have read access and benefit from faster blocker lookups (as long as
1459 the entire cache is still valid). The cache is stored as a pickled
1460 dict object with the following format:
1464 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
1465 "virtuals" : vardb.settings.getvirtuals()
1468 if self._modified and \
1471 f = portage.util.atomic_ofstream(self._cache_filename)
1472 cPickle.dump(self._cache_data, f, -1)
1474 portage.util.apply_secpass_permissions(
1475 self._cache_filename, gid=portage.portage_gid, mode=0644)
1476 except (IOError, OSError), e:
1478 self._modified = False
1480 def __setitem__(self, cpv, blocker_data):
1482 Update the cache and mark it as modified for a future call to
1485 @param cpv: Package for which to cache blockers.
1487 @param blocker_data: An object with counter and atoms attributes.
1488 @type blocker_data: BlockerData
1490 self._cache_data["blockers"][cpv] = \
1491 (blocker_data.counter, blocker_data.atoms)
1492 self._modified = True
1495 return iter(self._cache_data["blockers"])
1497 def __delitem__(self, cpv):
1498 del self._cache_data["blockers"][cpv]
1499 self._modified = True
1501 def __getitem__(self, cpv):
1504 @returns: An object with counter and atoms attributes.
1506 return self.BlockerData(*self._cache_data["blockers"][cpv])
1509 """This needs to be implemented so that self.__repr__() doesn't raise
1510 an AttributeError."""
1513 class BlockerDB(object):
1515 def __init__(self, vartree, portdb):
1516 self._vartree = vartree
1517 self._portdb = portdb
1518 self._blocker_cache = \
1519 BlockerCache(self._vartree.root, vartree.dbapi)
1520 self._dep_check_trees = { self._vartree.root : {
1521 "porttree" : self._vartree,
1522 "vartree" : self._vartree,
1525 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
1526 blocker_cache = self._blocker_cache
1527 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1528 dep_check_trees = self._dep_check_trees
1529 settings = self._vartree.settings
1530 stale_cache = set(blocker_cache)
1532 FakeVartree(self._vartree,
1533 self._portdb, Package.metadata_keys, {},
1534 acquire_lock=acquire_lock)
1535 vardb = fake_vartree.dbapi
1536 installed_pkgs = list(vardb)
1538 for inst_pkg in installed_pkgs:
1539 stale_cache.discard(inst_pkg.cpv)
1540 cached_blockers = blocker_cache.get(inst_pkg.cpv)
1541 if cached_blockers is not None and \
1542 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
1543 cached_blockers = None
1544 if cached_blockers is not None:
1545 blocker_atoms = cached_blockers.atoms
1547 myuse = inst_pkg.metadata["USE"].split()
1548 # Use aux_get() to trigger FakeVartree global
1549 # updates on *DEPEND when appropriate.
1550 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
1552 portage.dep._dep_check_strict = False
1553 success, atoms = portage.dep_check(depstr,
1554 vardb, settings, myuse=myuse,
1555 trees=dep_check_trees, myroot=inst_pkg.root)
1557 portage.dep._dep_check_strict = True
1559 pkg_location = os.path.join(inst_pkg.root,
1560 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
1561 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
1562 (pkg_location, atoms), noiselevel=-1)
1565 blocker_atoms = [atom for atom in atoms \
1566 if atom.startswith("!")]
1567 blocker_atoms.sort()
1568 counter = long(inst_pkg.metadata["COUNTER"])
1569 blocker_cache[inst_pkg.cpv] = \
1570 blocker_cache.BlockerData(counter, blocker_atoms)
1571 for cpv in stale_cache:
1572 del blocker_cache[cpv]
1573 blocker_cache.flush()
1575 blocker_parents = digraph()
1577 for pkg in installed_pkgs:
1578 for blocker_atom in self._blocker_cache[pkg.cpv].atoms:
1579 blocker_atom = blocker_atom[1:]
1580 blocker_atoms.append(blocker_atom)
1581 blocker_parents.add(blocker_atom, pkg)
1583 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1584 blocking_pkgs = set()
1585 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
1586 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
1588 # Check for blockers in the other direction.
1589 myuse = new_pkg.metadata["USE"].split()
1590 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
1592 portage.dep._dep_check_strict = False
1593 success, atoms = portage.dep_check(depstr,
1594 vardb, settings, myuse=myuse,
1595 trees=dep_check_trees, myroot=new_pkg.root)
1597 portage.dep._dep_check_strict = True
1599 # We should never get this far with invalid deps.
1600 show_invalid_depstring_notice(new_pkg, depstr, atoms)
1603 blocker_atoms = [atom[1:] for atom in atoms \
1604 if atom.startswith("!")]
1606 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1607 for inst_pkg in installed_pkgs:
1609 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
1610 except (portage.exception.InvalidDependString, StopIteration):
1612 blocking_pkgs.add(inst_pkg)
1614 return blocking_pkgs
1616 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
1618 from formatter import AbstractFormatter, DumbWriter
1619 f = AbstractFormatter(DumbWriter(maxcol=72))
1621 print "\n\n!!! Invalid or corrupt dependency specification: "
1629 p_type, p_root, p_key, p_status = parent_node
1631 if p_status == "nomerge":
1632 category, pf = portage.catsplit(p_key)
1633 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
1634 msg.append("Portage is unable to process the dependencies of the ")
1635 msg.append("'%s' package. " % p_key)
1636 msg.append("In order to correct this problem, the package ")
1637 msg.append("should be uninstalled, reinstalled, or upgraded. ")
1638 msg.append("As a temporary workaround, the --nodeps option can ")
1639 msg.append("be used to ignore all dependencies. For reference, ")
1640 msg.append("the problematic dependencies can be found in the ")
1641 msg.append("*DEPEND files located in '%s/'." % pkg_location)
1643 msg.append("This package can not be installed. ")
1644 msg.append("Please notify the '%s' package maintainer " % p_key)
1645 msg.append("about this problem.")
1648 f.add_flowing_data(x)
1651 class PackageVirtualDbapi(portage.dbapi):
1653 A dbapi-like interface class that represents the state of the installed
1654 package database as new packages are installed, replacing any packages
1655 that previously existed in the same slot. The main difference between
1656 this class and fakedbapi is that this one uses Package instances
1657 internally (passed in via cpv_inject() and cpv_remove() calls).
1659 def __init__(self, settings):
1660 portage.dbapi.__init__(self)
1661 self.settings = settings
1662 self._match_cache = {}
1667 obj = PackageVirtualDbapi(self.settings)
1668 obj._match_cache = self._match_cache.copy()
1669 obj._cp_map = self._cp_map.copy()
1670 for k, v in obj._cp_map.iteritems():
1671 obj._cp_map[k] = v[:]
1672 obj._cpv_map = self._cpv_map.copy()
1676 return self._cpv_map.itervalues()
1678 def __contains__(self, item):
1679 existing = self._cpv_map.get(item.cpv)
1680 if existing is not None and \
1685 def match_pkgs(self, atom):
1686 return [self._cpv_map[cpv] for cpv in self.match(atom)]
1688 def _clear_cache(self):
1689 if self._categories is not None:
1690 self._categories = None
1691 if self._match_cache:
1692 self._match_cache = {}
1694 def match(self, origdep, use_cache=1):
1695 result = self._match_cache.get(origdep)
1696 if result is not None:
1698 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
1699 self._match_cache[origdep] = result
1702 def cpv_exists(self, cpv):
1703 return cpv in self._cpv_map
1705 def cp_list(self, mycp, use_cache=1):
1706 cachelist = self._match_cache.get(mycp)
1707 # cp_list() doesn't expand old-style virtuals
1708 if cachelist and cachelist[0].startswith(mycp):
1710 cpv_list = self._cp_map.get(mycp)
1711 if cpv_list is None:
1714 cpv_list = [pkg.cpv for pkg in cpv_list]
1715 self._cpv_sort_ascending(cpv_list)
1716 if not (not cpv_list and mycp.startswith("virtual/")):
1717 self._match_cache[mycp] = cpv_list
1721 return list(self._cp_map)
1724 return list(self._cpv_map)
1726 def cpv_inject(self, pkg):
1727 cp_list = self._cp_map.get(pkg.cp)
1730 self._cp_map[pkg.cp] = cp_list
1731 e_pkg = self._cpv_map.get(pkg.cpv)
1732 if e_pkg is not None:
1735 self.cpv_remove(e_pkg)
1736 for e_pkg in cp_list:
1737 if e_pkg.slot_atom == pkg.slot_atom:
1740 self.cpv_remove(e_pkg)
1743 self._cpv_map[pkg.cpv] = pkg
1746 def cpv_remove(self, pkg):
1747 old_pkg = self._cpv_map.get(pkg.cpv)
1750 self._cp_map[pkg.cp].remove(pkg)
1751 del self._cpv_map[pkg.cpv]
1754 def aux_get(self, cpv, wants):
1755 metadata = self._cpv_map[cpv].metadata
1756 return [metadata.get(x, "") for x in wants]
1758 def aux_update(self, cpv, values):
1759 self._cpv_map[cpv].metadata.update(values)
1762 class depgraph(object):
1765 "ebuild":"porttree",
1767 "installed":"vartree"}
1769 _mydbapi_keys = Package.metadata_keys
1771 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1773 def __init__(self, settings, trees, myopts, myparams, spinner):
1774 self.settings = settings
1775 self.target_root = settings["ROOT"]
1776 self.myopts = myopts
1777 self.myparams = myparams
1779 if settings.get("PORTAGE_DEBUG", "") == "1":
1781 self.spinner = spinner
1782 self.pkgsettings = {}
1783 # Maps slot atom to package for each Package added to the graph.
1784 self._slot_pkg_map = {}
1785 # Maps nodes to the reasons they were selected for reinstallation.
1786 self._reinstall_nodes = {}
1789 self._trees_orig = trees
1791 # Contains a filtered view of preferred packages that are selected
1792 # from available repositories.
1793 self._filtered_trees = {}
1794 # Contains installed packages and new packages that have been added
1796 self._graph_trees = {}
1797 # All Package instances
1798 self._pkg_cache = self._package_cache(self)
1799 for myroot in trees:
1800 self.trees[myroot] = {}
1801 # Create a RootConfig instance that references
1802 # the FakeVartree instead of the real one.
1803 self.roots[myroot] = RootConfig(
1804 trees[myroot]["vartree"].settings,
1806 trees[myroot]["root_config"].setconfig)
1807 for tree in ("porttree", "bintree"):
1808 self.trees[myroot][tree] = trees[myroot][tree]
1809 self.trees[myroot]["vartree"] = \
1810 FakeVartree(trees[myroot]["vartree"],
1811 trees[myroot]["porttree"].dbapi,
1812 self._mydbapi_keys, self._pkg_cache)
1813 self.pkgsettings[myroot] = portage.config(
1814 clone=self.trees[myroot]["vartree"].settings)
1815 self._slot_pkg_map[myroot] = {}
1816 vardb = self.trees[myroot]["vartree"].dbapi
1817 preload_installed_pkgs = "--nodeps" not in self.myopts and \
1818 "--buildpkgonly" not in self.myopts
1819 # This fakedbapi instance will model the state that the vdb will
1820 # have after new packages have been installed.
1821 fakedb = PackageVirtualDbapi(vardb.settings)
1822 if preload_installed_pkgs:
1823 for cpv in vardb.cpv_all():
1824 self.spinner.update()
1825 metadata = dict(izip(self._mydbapi_keys,
1826 vardb.aux_get(cpv, self._mydbapi_keys)))
1827 pkg = Package(built=True, cpv=cpv,
1828 installed=True, metadata=metadata,
1829 root=myroot, type_name="installed")
1830 self._pkg_cache[pkg] = pkg
1831 fakedb.cpv_inject(pkg)
1832 self.mydbapi[myroot] = fakedb
1835 graph_tree.dbapi = fakedb
1836 self._graph_trees[myroot] = {}
1837 self._filtered_trees[myroot] = {}
1838 # Substitute the graph tree for the vartree in dep_check() since we
1839 # want atom selections to be consistent with package selections
1840 # have already been made.
1841 self._graph_trees[myroot]["porttree"] = graph_tree
1842 self._graph_trees[myroot]["vartree"] = graph_tree
1843 def filtered_tree():
1845 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
1846 self._filtered_trees[myroot]["porttree"] = filtered_tree
1848 # Passing in graph_tree as the vartree here could lead to better
1849 # atom selections in some cases by causing atoms for packages that
1850 # have been added to the graph to be preferred over other choices.
1851 # However, it can trigger atom selections that result in
1852 # unresolvable direct circular dependencies. For example, this
1853 # happens with gwydion-dylan which depends on either itself or
1854 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
1855 # gwydion-dylan-bin needs to be selected in order to avoid a
1856 # an unresolvable direct circular dependency.
1858 # To solve the problem described above, pass in "graph_db" so that
1859 # packages that have been added to the graph are distinguishable
1860 # from other available packages and installed packages. Also, pass
1861 # the parent package into self._select_atoms() calls so that
1862 # unresolvable direct circular dependencies can be detected and
1863 # avoided when possible.
1864 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
1865 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
1868 portdb = self.trees[myroot]["porttree"].dbapi
1869 bindb = self.trees[myroot]["bintree"].dbapi
1870 vardb = self.trees[myroot]["vartree"].dbapi
1871 # (db, pkg_type, built, installed, db_keys)
1872 if "--usepkgonly" not in self.myopts:
1873 db_keys = list(portdb._aux_cache_keys)
1874 dbs.append((portdb, "ebuild", False, False, db_keys))
1875 if "--usepkg" in self.myopts:
1876 db_keys = list(bindb._aux_cache_keys)
1877 dbs.append((bindb, "binary", True, False, db_keys))
1878 db_keys = self._mydbapi_keys
1879 dbs.append((vardb, "installed", True, True, db_keys))
1880 self._filtered_trees[myroot]["dbs"] = dbs
1881 if "--usepkg" in self.myopts:
1882 self.trees[myroot]["bintree"].populate(
1883 "--getbinpkg" in self.myopts,
1884 "--getbinpkgonly" in self.myopts)
1887 self.digraph=portage.digraph()
1888 # contains all sets added to the graph
1890 # contains atoms given as arguments
1891 self._sets["args"] = InternalPackageSet()
1892 # contains all atoms from all sets added to the graph, including
1893 # atoms given as arguments
1894 self._set_atoms = InternalPackageSet()
1895 self._atom_arg_map = {}
1896 # contains all nodes pulled in by self._set_atoms
1897 self._set_nodes = set()
1898 # Contains only Blocker -> Uninstall edges
1899 self._blocker_uninstalls = digraph()
1900 # Contains only Package -> Blocker edges
1901 self._blocker_parents = digraph()
1902 # Contains only irrelevant Package -> Blocker edges
1903 self._irrelevant_blockers = digraph()
1904 # Contains only unsolvable Package -> Blocker edges
1905 self._unsolvable_blockers = digraph()
1906 self._slot_collision_info = set()
1907 # Slot collision nodes are not allowed to block other packages since
1908 # blocker validation is only able to account for one package per slot.
1909 self._slot_collision_nodes = set()
1910 self._serialized_tasks_cache = None
1911 self._displayed_list = None
1912 self._pprovided_args = []
1913 self._missing_args = []
1914 self._masked_installed = []
1915 self._unsatisfied_deps_for_display = []
1916 self._unsatisfied_blockers_for_display = None
1917 self._circular_deps_for_display = None
1918 self._dep_stack = []
1919 self._unsatisfied_deps = []
1920 self._ignored_deps = []
1921 self._required_set_names = set(["system", "world"])
1922 self._select_atoms = self._select_atoms_highest_available
1923 self._select_package = self._select_pkg_highest_available
1924 self._highest_pkg_cache = {}
1926 def _show_slot_collision_notice(self):
1927 """Show an informational message advising the user to mask one of the
1928 the packages. In some cases it may be possible to resolve this
1929 automatically, but support for backtracking (removal nodes that have
1930 already been selected) will be required in order to handle all possible
1933 if not self._slot_collision_info:
1936 self._show_merge_list()
1939 msg.append("\n!!! Multiple versions within a single " + \
1940 "package slot have been pulled\n")
1941 msg.append("!!! into the dependency graph, resulting" + \
1942 " in a slot conflict:\n\n")
1944 # Max number of parents shown, to avoid flooding the display.
1946 for slot_atom, root in self._slot_collision_info:
1947 msg.append(slot_atom)
1950 for node in self._slot_collision_nodes:
1951 if node.slot_atom == slot_atom:
1952 slot_nodes.append(node)
1953 slot_nodes.append(self._slot_pkg_map[root][slot_atom])
1954 for node in slot_nodes:
1956 msg.append(str(node))
1957 parents = self.digraph.parent_nodes(node)
1960 if len(parents) > max_parents:
1962 # When generating the pruned list, prefer instances
1963 # of DependencyArg over instances of Package.
1964 for parent in parents:
1965 if isinstance(parent, DependencyArg):
1966 pruned_list.append(parent)
1967 # Prefer Packages instances that themselves have been
1968 # pulled into collision slots.
1969 for parent in parents:
1970 if isinstance(parent, Package) and \
1971 (parent.slot_atom, parent.root) \
1972 in self._slot_collision_info:
1973 pruned_list.append(parent)
1974 for parent in parents:
1975 if len(pruned_list) >= max_parents:
1977 if not isinstance(parent, DependencyArg) and \
1978 parent not in pruned_list:
1979 pruned_list.append(parent)
1980 omitted_parents = len(parents) - len(pruned_list)
1981 parents = pruned_list
1982 msg.append(" pulled in by\n")
1983 for parent in parents:
1984 msg.append(2*indent)
1985 msg.append(str(parent))
1988 msg.append(2*indent)
1989 msg.append("(and %d more)\n" % omitted_parents)
1991 msg.append(" (no parents)\n")
1994 sys.stderr.write("".join(msg))
1997 if "--quiet" in self.myopts:
2001 msg.append("It may be possible to solve this problem ")
2002 msg.append("by using package.mask to prevent one of ")
2003 msg.append("those packages from being selected. ")
2004 msg.append("However, it is also possible that conflicting ")
2005 msg.append("dependencies exist such that they are impossible to ")
2006 msg.append("satisfy simultaneously. If such a conflict exists in ")
2007 msg.append("the dependencies of two different packages, then those ")
2008 msg.append("packages can not be installed simultaneously.")
2010 from formatter import AbstractFormatter, DumbWriter
2011 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
2013 f.add_flowing_data(x)
2017 msg.append("For more information, see MASKED PACKAGES ")
2018 msg.append("section in the emerge man page or refer ")
2019 msg.append("to the Gentoo Handbook.")
2021 f.add_flowing_data(x)
2025 def _reinstall_for_flags(self, forced_flags,
2026 orig_use, orig_iuse, cur_use, cur_iuse):
2027 """Return a set of flags that trigger reinstallation, or None if there
2028 are no such flags."""
2029 if "--newuse" in self.myopts:
2030 flags = orig_iuse.symmetric_difference(
2031 cur_iuse).difference(forced_flags)
2032 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
2033 cur_iuse.intersection(cur_use)))
2036 elif "changed-use" == self.myopts.get("--reinstall"):
2037 flags = orig_iuse.intersection(orig_use).symmetric_difference(
2038 cur_iuse.intersection(cur_use))
2043 def _create_graph(self, allow_unsatisfied=False):
2044 dep_stack = self._dep_stack
2046 self.spinner.update()
2047 dep = dep_stack.pop()
2048 if isinstance(dep, Package):
2049 if not self._add_pkg_deps(dep,
2050 allow_unsatisfied=allow_unsatisfied):
2053 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
2057 def _add_dep(self, dep, allow_unsatisfied=False):
2058 debug = "--debug" in self.myopts
2059 buildpkgonly = "--buildpkgonly" in self.myopts
2060 nodeps = "--nodeps" in self.myopts
2061 empty = "empty" in self.myparams
2062 deep = "deep" in self.myparams
2063 update = "--update" in self.myopts and dep.depth <= 1
2065 if not buildpkgonly and \
2067 dep.parent not in self._slot_collision_nodes:
2068 if dep.parent.onlydeps:
2069 # It's safe to ignore blockers if the
2070 # parent is an --onlydeps node.
2072 # The blocker applies to the root where
2073 # the parent is or will be installed.
2074 blocker = Blocker(atom=dep.atom, root=dep.parent.root)
2075 self._blocker_parents.add(blocker, dep.parent)
2077 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
2078 onlydeps=dep.onlydeps)
2080 if allow_unsatisfied:
2081 self._unsatisfied_deps.append(dep)
2083 self._unsatisfied_deps_for_display.append(
2084 ((dep.root, dep.atom), {"myparent":dep.parent}))
2086 # In some cases, dep_check will return deps that shouldn't
2087 # be proccessed any further, so they are identified and
2088 # discarded here. Try to discard as few as possible since
2089 # discarded dependencies reduce the amount of information
2090 # available for optimization of merge order.
2091 if dep.priority.satisfied and \
2092 not (existing_node or empty or deep or update):
2094 if dep.root == self.target_root:
2096 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
2097 except StopIteration:
2099 except portage.exception.InvalidDependString:
2100 if not dep_pkg.installed:
2101 # This shouldn't happen since the package
2102 # should have been masked.
2105 self._ignored_deps.append(dep)
2108 if not self._add_pkg(dep_pkg, dep.parent,
2109 priority=dep.priority, depth=dep.depth):
2113 def _add_pkg(self, pkg, myparent, priority=None, depth=0):
2114 if priority is None:
2115 priority = DepPriority()
2117 Fills the digraph with nodes comprised of packages to merge.
2118 mybigkey is the package spec of the package to merge.
2119 myparent is the package depending on mybigkey ( or None )
2120 addme = Should we add this package to the digraph or are we just looking at it's deps?
2121 Think --onlydeps, we need to ignore packages in that case.
2124 #IUSE-aware emerge -> USE DEP aware depgraph
2125 #"no downgrade" emerge
2128 # select the correct /var database that we'll be checking against
2129 vardbapi = self.trees[pkg.root]["vartree"].dbapi
2130 pkgsettings = self.pkgsettings[pkg.root]
2136 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
2137 except portage.exception.InvalidDependString, e:
2138 if not pkg.installed:
2139 show_invalid_depstring_notice(
2140 pkg, pkg.metadata["PROVIDE"], str(e))
2144 args = [arg for arg, atom in arg_atoms]
2146 if not pkg.onlydeps:
2147 if not pkg.installed and \
2148 "empty" not in self.myparams and \
2149 vardbapi.match(pkg.slot_atom):
2150 # Increase the priority of dependencies on packages that
2151 # are being rebuilt. This optimizes merge order so that
2152 # dependencies are rebuilt/updated as soon as possible,
2153 # which is needed especially when emerge is called by
2154 # revdep-rebuild since dependencies may be affected by ABI
2155 # breakage that has rendered them useless. Don't adjust
2156 # priority here when in "empty" mode since all packages
2157 # are being merged in that case.
2158 priority.rebuild = True
2160 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
2161 slot_collision = False
2163 if pkg.cpv == existing_node.cpv:
2164 # The existing node can be reused.
2167 self.digraph.add(existing_node, arg,
2169 # If a direct circular dependency is not an unsatisfied
2170 # buildtime dependency then drop it here since otherwise
2171 # it can skew the merge order calculation in an unwanted
2173 if existing_node != myparent or \
2174 (priority.buildtime and not priority.satisfied):
2175 self.digraph.addnode(existing_node, myparent,
2179 if pkg in self._slot_collision_nodes:
2181 # A slot collision has occurred. Sometimes this coincides
2182 # with unresolvable blockers, so the slot collision will be
2183 # shown later if there are no unresolvable blockers.
2184 self._slot_collision_info.add((pkg.slot_atom, pkg.root))
2185 self._slot_collision_nodes.add(pkg)
2186 slot_collision = True
2189 # Now add this node to the graph so that self.display()
2190 # can show use flags and --tree portage.output. This node is
2191 # only being partially added to the graph. It must not be
2192 # allowed to interfere with the other nodes that have been
2193 # added. Do not overwrite data for existing nodes in
2194 # self.mydbapi since that data will be used for blocker
2196 # Even though the graph is now invalid, continue to process
2197 # dependencies so that things like --fetchonly can still
2198 # function despite collisions.
2201 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
2202 self.mydbapi[pkg.root].cpv_inject(pkg)
2204 self.digraph.addnode(pkg, myparent, priority=priority)
2206 if not pkg.installed:
2207 # Allow this package to satisfy old-style virtuals in case it
2208 # doesn't already. Any pre-existing providers will be preferred
2211 pkgsettings.setinst(pkg.cpv, pkg.metadata)
2212 # For consistency, also update the global virtuals.
2213 settings = self.roots[pkg.root].settings
2215 settings.setinst(pkg.cpv, pkg.metadata)
2217 except portage.exception.InvalidDependString, e:
2218 show_invalid_depstring_notice(
2219 pkg, pkg.metadata["PROVIDE"], str(e))
2224 # Warn if an installed package is masked and it
2225 # is pulled into the graph.
2226 if not visible(pkgsettings, pkg):
2227 self._masked_installed.append((pkg, pkgsettings))
2230 self._set_nodes.add(pkg)
2232 # Do this even when addme is False (--onlydeps) so that the
2233 # parent/child relationship is always known in case
2234 # self._show_slot_collision_notice() needs to be called later.
2236 self.digraph.add(pkg, myparent, priority=priority)
2239 self.digraph.add(pkg, arg, priority=priority)
2241 """ This section determines whether we go deeper into dependencies or not.
2242 We want to go deeper on a few occasions:
2243 Installing package A, we need to make sure package A's deps are met.
2244 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
2245 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
2247 dep_stack = self._dep_stack
2248 if "recurse" not in self.myparams:
2250 elif pkg.installed and \
2251 "deep" not in self.myparams:
2252 dep_stack = self._ignored_deps
2254 self.spinner.update()
2259 dep_stack.append(pkg)
2262 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
2264 mytype = pkg.type_name
2267 metadata = pkg.metadata
2268 myuse = metadata["USE"].split()
2270 depth = pkg.depth + 1
2273 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
2275 edepend[k] = metadata[k]
2277 if not pkg.built and \
2278 "--buildpkgonly" in self.myopts and \
2279 "deep" not in self.myparams and \
2280 "empty" not in self.myparams:
2281 edepend["RDEPEND"] = ""
2282 edepend["PDEPEND"] = ""
2283 bdeps_satisfied = False
2284 if mytype in ("installed", "binary"):
2285 if self.myopts.get("--with-bdeps", "n") == "y":
2286 # Pull in build time deps as requested, but marked them as
2287 # "satisfied" since they are not strictly required. This allows
2288 # more freedom in the merge order calculation for solving
2289 # circular dependencies. Don't convert to PDEPEND since that
2290 # could make --with-bdeps=y less effective if it is used to
2291 # adjust merge order to prevent built_with_use() calls from
2293 bdeps_satisfied = True
2295 # built packages do not have build time dependencies.
2296 edepend["DEPEND"] = ""
2299 ("/", edepend["DEPEND"],
2300 DepPriority(buildtime=True, satisfied=bdeps_satisfied)),
2301 (myroot, edepend["RDEPEND"], DepPriority(runtime=True)),
2302 (myroot, edepend["PDEPEND"], DepPriority(runtime_post=True))
2305 debug = "--debug" in self.myopts
2306 strict = mytype != "installed"
2308 for dep_root, dep_string, dep_priority in deps:
2310 # Decrease priority so that --buildpkgonly
2311 # hasallzeros() works correctly.
2312 dep_priority = DepPriority()
2317 print "Parent: ", jbigkey
2318 print "Depstring:", dep_string
2319 print "Priority:", dep_priority
2320 vardb = self.roots[dep_root].trees["vartree"].dbapi
2322 selected_atoms = self._select_atoms(dep_root,
2323 dep_string, myuse=myuse, parent=pkg, strict=strict)
2324 except portage.exception.InvalidDependString, e:
2325 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
2328 print "Candidates:", selected_atoms
2329 for atom in selected_atoms:
2330 blocker = atom.startswith("!")
2333 mypriority = dep_priority.copy()
2334 if not blocker and vardb.match(atom):
2335 mypriority.satisfied = True
2336 if not self._add_dep(Dependency(atom=atom,
2337 blocker=blocker, depth=depth, parent=pkg,
2338 priority=mypriority, root=dep_root),
2339 allow_unsatisfied=allow_unsatisfied):
2342 print "Exiting...", jbigkey
2343 except ValueError, e:
2344 if not e.args or not isinstance(e.args[0], list) or \
2348 portage.writemsg("\n\n!!! An atom in the dependencies " + \
2349 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
2351 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
2352 portage.writemsg("\n", noiselevel=-1)
2353 if mytype == "binary":
2355 "!!! This binary package cannot be installed: '%s'\n" % \
2356 mykey, noiselevel=-1)
2357 elif mytype == "ebuild":
2358 portdb = self.roots[myroot].trees["porttree"].dbapi
2359 myebuild, mylocation = portdb.findname2(mykey)
2360 portage.writemsg("!!! This ebuild cannot be installed: " + \
2361 "'%s'\n" % myebuild, noiselevel=-1)
2362 portage.writemsg("!!! Please notify the package maintainer " + \
2363 "that atoms must be fully-qualified.\n", noiselevel=-1)
2367 def _dep_expand(self, root_config, atom_without_category):
2369 @param root_config: a root config instance
2370 @type root_config: RootConfig
2371 @param atom_without_category: an atom without a category component
2372 @type atom_without_category: String
2374 @returns: a list of atoms containing categories (possibly empty)
2376 null_cp = portage.dep_getkey(insert_category_into_atom(
2377 atom_without_category, "null"))
2378 cat, atom_pn = portage.catsplit(null_cp)
2381 for db, pkg_type, built, installed, db_keys in \
2382 self._filtered_trees[root_config.root]["dbs"]:
2383 cp_set.update(db.cp_all())
2384 for cp in list(cp_set):
2385 cat, pn = portage.catsplit(cp)
2390 cat, pn = portage.catsplit(cp)
2391 deps.append(insert_category_into_atom(
2392 atom_without_category, cat))
2395 def _have_new_virt(self, root, atom_cp):
2397 for db, pkg_type, built, installed, db_keys in \
2398 self._filtered_trees[root]["dbs"]:
2399 if db.cp_list(atom_cp):
2404 def _iter_atoms_for_pkg(self, pkg):
2405 # TODO: add multiple $ROOT support
2406 if pkg.root != self.target_root:
2408 atom_arg_map = self._atom_arg_map
2409 root_config = self.roots[pkg.root]
2410 for atom in self._set_atoms.iterAtomsForPackage(pkg):
2411 atom_cp = portage.dep_getkey(atom)
2412 if atom_cp != pkg.cp and \
2413 self._have_new_virt(pkg.root, atom_cp):
2415 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
2416 visible_pkgs.reverse() # descending order
2418 for visible_pkg in visible_pkgs:
2419 if visible_pkg.cp != atom_cp:
2421 if pkg >= visible_pkg:
2422 # This is descending order, and we're not
2423 # interested in any versions <= pkg given.
2425 if pkg.slot_atom != visible_pkg.slot_atom:
2426 higher_slot = visible_pkg
2428 if higher_slot is not None:
2430 for arg in atom_arg_map[(atom, pkg.root)]:
2431 if isinstance(arg, PackageArg) and \
2436 def select_files(self, myfiles):
2437 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
2438 appropriate depgraph and return a favorite list."""
2439 root_config = self.roots[self.target_root]
2440 sets = root_config.sets
2441 getSetAtoms = root_config.setconfig.getSetAtoms
2443 myroot = self.target_root
2444 dbs = self._filtered_trees[myroot]["dbs"]
2445 vardb = self.trees[myroot]["vartree"].dbapi
2446 portdb = self.trees[myroot]["porttree"].dbapi
2447 bindb = self.trees[myroot]["bintree"].dbapi
2448 pkgsettings = self.pkgsettings[myroot]
2450 onlydeps = "--onlydeps" in self.myopts
2452 ext = os.path.splitext(x)[1]
2454 if not os.path.exists(x):
2456 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2457 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2458 elif os.path.exists(
2459 os.path.join(pkgsettings["PKGDIR"], x)):
2460 x = os.path.join(pkgsettings["PKGDIR"], x)
2462 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
2463 print "!!! Please ensure the tbz2 exists as specified.\n"
2464 return 0, myfavorites
2465 mytbz2=portage.xpak.tbz2(x)
2466 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2467 if os.path.realpath(x) != \
2468 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
2469 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
2470 return 0, myfavorites
2471 metadata = dict(izip(self._mydbapi_keys,
2472 bindb.aux_get(mykey, self._mydbapi_keys)))
2473 pkg = Package(type_name="binary", root=myroot,
2474 cpv=mykey, built=True, metadata=metadata,
2476 self._pkg_cache[pkg] = pkg
2477 args.append(PackageArg(arg=x, package=pkg,
2478 root_config=root_config))
2479 elif ext==".ebuild":
2480 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2481 pkgdir = os.path.dirname(ebuild_path)
2482 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2483 cp = pkgdir[len(tree_root)+1:]
2484 e = portage.exception.PackageNotFound(
2485 ("%s is not in a valid portage tree " + \
2486 "hierarchy or does not exist") % x)
2487 if not portage.isvalidatom(cp):
2489 cat = portage.catsplit(cp)[0]
2490 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2491 if not portage.isvalidatom("="+mykey):
2493 ebuild_path = portdb.findname(mykey)
2495 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2496 cp, os.path.basename(ebuild_path)):
2497 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
2498 return 0, myfavorites
2499 if mykey not in portdb.xmatch(
2500 "match-visible", portage.dep_getkey(mykey)):
2501 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
2502 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
2503 print colorize("BAD", "*** page for details.")
2504 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
2507 raise portage.exception.PackageNotFound(
2508 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2509 metadata = dict(izip(self._mydbapi_keys,
2510 portdb.aux_get(mykey, self._mydbapi_keys)))
2511 pkgsettings.setcpv(mykey, mydb=metadata)
2512 metadata["USE"] = pkgsettings["PORTAGE_USE"]
2513 pkg = Package(type_name="ebuild", root=myroot,
2514 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
2515 self._pkg_cache[pkg] = pkg
2516 args.append(PackageArg(arg=x, package=pkg,
2517 root_config=root_config))
2518 elif x.startswith(os.path.sep):
2519 if not x.startswith(myroot):
2520 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2521 " $ROOT.\n") % x, noiselevel=-1)
2523 relative_path = x[len(myroot):]
2524 vartree = self._trees_orig[myroot]["vartree"]
2526 for cpv in vardb.cpv_all():
2527 self.spinner.update()
2528 cat, pf = portage.catsplit(cpv)
2529 if portage.dblink(cat, pf, myroot,
2530 pkgsettings, vartree=vartree).isowner(
2531 relative_path, myroot):
2534 if owner_cpv is None:
2535 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2536 "by any package.\n") % x, noiselevel=-1)
2538 slot = vardb.aux_get(owner_cpv, ["SLOT"])[0]
2540 # portage now masks packages with missing slot, but it's
2541 # possible that one was installed by an older version
2542 atom = portage.cpv_getkey(owner_cpv)
2544 atom = "%s:%s" % (portage.cpv_getkey(owner_cpv), slot)
2545 args.append(AtomArg(arg=atom, atom=atom,
2546 root_config=root_config))
2548 if x in ("system", "world"):
2550 if x.startswith(SETPREFIX):
2551 s = x[len(SETPREFIX):]
2553 raise portage.exception.PackageNotFound(
2554 "emerge: there are no sets to satisfy '%s'." % s)
2557 # Recursively expand sets so that containment tests in
2558 # self._get_parent_sets() properly match atoms in nested
2559 # sets (like if world contains system).
2560 expanded_set = InternalPackageSet(
2561 initial_atoms=getSetAtoms(s))
2562 self._sets[s] = expanded_set
2563 args.append(SetArg(arg=x, set=expanded_set,
2564 root_config=root_config))
2565 myfavorites.append(x)
2567 if not is_valid_package_atom(x):
2568 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2570 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2571 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2573 # Don't expand categories or old-style virtuals here unless
2574 # necessary. Expansion of old-style virtuals here causes at
2575 # least the following problems:
2576 # 1) It's more difficult to determine which set(s) an atom
2577 # came from, if any.
2578 # 2) It takes away freedom from the resolver to choose other
2579 # possible expansions when necessary.
2581 args.append(AtomArg(arg=x, atom=x,
2582 root_config=root_config))
2584 expanded_atoms = self._dep_expand(root_config, x)
2585 installed_cp_set = set()
2586 for atom in expanded_atoms:
2587 atom_cp = portage.dep_getkey(atom)
2588 if vardb.cp_list(atom_cp):
2589 installed_cp_set.add(atom_cp)
2590 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2591 installed_cp = iter(installed_cp_set).next()
2592 expanded_atoms = [atom for atom in expanded_atoms \
2593 if portage.dep_getkey(atom) == installed_cp]
2595 if len(expanded_atoms) > 1:
2596 print "\n\n!!! The short ebuild name \"" + x + "\" is ambiguous. Please specify"
2597 print "!!! one of the following fully-qualified ebuild names instead:\n"
2598 expanded_atoms = set(portage.dep_getkey(atom) \
2599 for atom in expanded_atoms)
2600 for i in sorted(expanded_atoms):
2601 print " " + green(i)
2603 return False, myfavorites
2605 atom = expanded_atoms[0]
2607 null_atom = insert_category_into_atom(x, "null")
2608 null_cp = portage.dep_getkey(null_atom)
2609 cat, atom_pn = portage.catsplit(null_cp)
2610 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2612 # Allow the depgraph to choose which virtual.
2613 atom = insert_category_into_atom(x, "virtual")
2615 atom = insert_category_into_atom(x, "null")
2617 args.append(AtomArg(arg=x, atom=atom,
2618 root_config=root_config))
2620 if "--update" in self.myopts:
2621 # Enable greedy SLOT atoms for atoms given as arguments.
2622 # This is currently disabled for sets since greedy SLOT
2623 # atoms could be a property of the set itself.
2626 # In addition to any installed slots, also try to pull
2627 # in the latest new slot that may be available.
2628 greedy_atoms.append(arg)
2629 if not isinstance(arg, (AtomArg, PackageArg)):
2631 atom_cp = portage.dep_getkey(arg.atom)
2633 for cpv in vardb.match(arg.atom):
2634 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2636 greedy_atoms.append(
2637 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
2638 root_config=root_config))
2642 # Create the "args" package set from atoms and
2643 # packages given as arguments.
2644 args_set = self._sets["args"]
2646 if not isinstance(arg, (AtomArg, PackageArg)):
2649 if myatom in args_set:
2651 args_set.add(myatom)
2652 myfavorites.append(myatom)
2653 self._set_atoms.update(chain(*self._sets.itervalues()))
2654 atom_arg_map = self._atom_arg_map
2656 for atom in arg.set:
2657 atom_key = (atom, myroot)
2658 refs = atom_arg_map.get(atom_key)
2661 atom_arg_map[atom_key] = refs
2664 pprovideddict = pkgsettings.pprovideddict
2665 # Order needs to be preserved since a feature of --nodeps
2666 # is to allow the user to force a specific merge order.
2670 for atom in arg.set:
2671 self.spinner.update()
2672 atom_cp = portage.dep_getkey(atom)
2674 pprovided = pprovideddict.get(portage.dep_getkey(atom))
2675 if pprovided and portage.match_from_list(atom, pprovided):
2676 # A provided package has been specified on the command line.
2677 self._pprovided_args.append((arg, atom))
2679 if isinstance(arg, PackageArg):
2680 if not self._add_pkg(arg.package, arg) or \
2681 not self._create_graph():
2682 sys.stderr.write(("\n\n!!! Problem resolving " + \
2683 "dependencies for %s\n") % arg.arg)
2684 return 0, myfavorites
2686 pkg, existing_node = self._select_package(
2687 myroot, atom, onlydeps=onlydeps)
2689 if not (isinstance(arg, SetArg) and \
2690 arg.name in ("system", "world")):
2691 self._unsatisfied_deps_for_display.append(
2692 ((myroot, atom), {}))
2693 return 0, myfavorites
2694 self._missing_args.append((arg, atom))
2696 if atom_cp != pkg.cp:
2697 # For old-style virtuals, we need to repeat the
2698 # package.provided check against the selected package.
2699 expanded_atom = atom.replace(atom_cp, pkg.cp)
2700 pprovided = pprovideddict.get(pkg.cp)
2702 portage.match_from_list(expanded_atom, pprovided):
2703 # A provided package has been
2704 # specified on the command line.
2705 self._pprovided_args.append((arg, atom))
2707 if pkg.installed and "selective" not in self.myparams:
2708 self._unsatisfied_deps_for_display.append(
2709 ((myroot, atom), {}))
2710 # Previous behavior was to bail out in this case, but
2711 # since the dep is satisfied by the installed package,
2712 # it's more friendly to continue building the graph
2713 # and just show a warning message. Therefore, only bail
2714 # out here if the atom is not from either the system or
2716 if not (isinstance(arg, SetArg) and \
2717 arg.name in ("system", "world")):
2718 return 0, myfavorites
2720 dep = Dependency(atom=atom, onlydeps=onlydeps,
2721 root=myroot, parent=arg)
2723 # Add the selected package to the graph as soon as possible
2724 # so that later dep_check() calls can use it as feedback
2725 # for making more consistent atom selections.
2726 if not self._add_pkg(pkg, dep.parent,
2727 priority=dep.priority, depth=dep.depth):
2728 if isinstance(arg, SetArg):
2729 sys.stderr.write(("\n\n!!! Problem resolving " + \
2730 "dependencies for %s from %s\n") % \
2733 sys.stderr.write(("\n\n!!! Problem resolving " + \
2734 "dependencies for %s\n") % atom)
2735 return 0, myfavorites
2737 except portage.exception.MissingSignature, e:
2738 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
2739 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
2740 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
2741 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
2742 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
2743 return 0, myfavorites
2744 except portage.exception.InvalidSignature, e:
2745 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
2746 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
2747 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
2748 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
2749 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
2750 return 0, myfavorites
2751 except SystemExit, e:
2752 raise # Needed else can't exit
2753 except Exception, e:
2754 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
2755 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
2758 # Now that the root packages have been added to the graph,
2759 # process the dependencies.
2760 if not self._create_graph():
2761 return 0, myfavorites
2764 if "--usepkgonly" in self.myopts:
2765 for xs in self.digraph.all_nodes():
2766 if not isinstance(xs, Package):
2768 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
2772 print "Missing binary for:",xs[2]
2776 except self._unknown_internal_error:
2777 return False, myfavorites
2779 # We're true here unless we are missing binaries.
2780 return (not missing,myfavorites)
2782 def _select_atoms_from_graph(self, *pargs, **kwargs):
2784 Prefer atoms matching packages that have already been
2785 added to the graph or those that are installed and have
2786 not been scheduled for replacement.
2788 kwargs["trees"] = self._graph_trees
2789 return self._select_atoms_highest_available(*pargs, **kwargs)
2791 def _select_atoms_highest_available(self, root, depstring,
2792 myuse=None, parent=None, strict=True, trees=None):
2793 """This will raise InvalidDependString if necessary. If trees is
2794 None then self._filtered_trees is used."""
2795 pkgsettings = self.pkgsettings[root]
2797 trees = self._filtered_trees
2800 if parent is not None:
2801 trees[root]["parent"] = parent
2803 portage.dep._dep_check_strict = False
2804 mycheck = portage.dep_check(depstring, None,
2805 pkgsettings, myuse=myuse,
2806 myroot=root, trees=trees)
2808 if parent is not None:
2809 trees[root].pop("parent")
2810 portage.dep._dep_check_strict = True
2812 raise portage.exception.InvalidDependString(mycheck[1])
2813 selected_atoms = mycheck[1]
2814 return selected_atoms
2816 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
2817 xinfo = '"%s"' % atom
2820 # Discard null/ from failed cpv_expand category expansion.
2821 xinfo = xinfo.replace("null/", "")
2823 xfrom = '(dependency required by '+ \
2824 green('"%s"' % myparent[2]) + \
2825 red(' [%s]' % myparent[0]) + ')'
2826 masked_packages = []
2827 missing_licenses = []
2828 have_eapi_mask = False
2829 pkgsettings = self.pkgsettings[root]
2830 root_config = self.roots[root]
2831 portdb = self.roots[root].trees["porttree"].dbapi
2832 dbs = self._filtered_trees[root]["dbs"]
2833 for db, pkg_type, built, installed, db_keys in dbs:
2837 if hasattr(db, "xmatch"):
2838 cpv_list = db.xmatch("match-all", atom)
2840 cpv_list = db.match(atom)
2843 for cpv in cpv_list:
2844 metadata, mreasons = get_mask_info(root_config, cpv,
2845 pkgsettings, db, pkg_type, built, installed, db_keys)
2846 masked_packages.append(
2847 (root_config, pkgsettings, cpv, metadata, mreasons))
2850 print "\n!!! "+red("All ebuilds that could satisfy ")+green(xinfo)+red(" have been masked.")
2851 print "!!! One of the following masked packages is required to complete your request:"
2852 have_eapi_mask = show_masked_packages(masked_packages)
2855 msg = ("The current version of portage supports " + \
2856 "EAPI '%s'. You must upgrade to a newer version" + \
2857 " of portage before EAPI masked packages can" + \
2858 " be installed.") % portage.const.EAPI
2859 from textwrap import wrap
2860 for line in wrap(msg, 75):
2865 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
2870 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2871 cache_key = (root, atom, onlydeps)
2872 ret = self._highest_pkg_cache.get(cache_key)
2875 if pkg and not existing:
2876 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
2877 if existing and existing == pkg:
2878 # Update the cache to reflect that the
2879 # package has been added to the graph.
2881 self._highest_pkg_cache[cache_key] = ret
2883 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2884 self._highest_pkg_cache[cache_key] = ret
2887 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2888 pkgsettings = self.pkgsettings[root]
2889 dbs = self._filtered_trees[root]["dbs"]
2890 vardb = self.roots[root].trees["vartree"].dbapi
2891 portdb = self.roots[root].trees["porttree"].dbapi
2892 # List of acceptable packages, ordered by type preference.
2893 matched_packages = []
2894 highest_version = None
2895 atom_cp = portage.dep_getkey(atom)
2896 existing_node = None
2898 usepkgonly = "--usepkgonly" in self.myopts
2899 empty = "empty" in self.myparams
2900 selective = "selective" in self.myparams
2902 noreplace = "--noreplace" in self.myopts
2903 # Behavior of the "selective" parameter depends on
2904 # whether or not a package matches an argument atom.
2905 # If an installed package provides an old-style
2906 # virtual that is no longer provided by an available
2907 # package, the installed package may match an argument
2908 # atom even though none of the available packages do.
2909 # Therefore, "selective" logic does not consider
2910 # whether or not an installed package matches an
2911 # argument atom. It only considers whether or not
2912 # available packages match argument atoms, which is
2913 # represented by the found_available_arg flag.
2914 found_available_arg = False
2915 for find_existing_node in True, False:
2918 for db, pkg_type, built, installed, db_keys in dbs:
2921 if installed and not find_existing_node:
2922 want_reinstall = reinstall or empty or \
2923 (found_available_arg and not selective)
2924 if want_reinstall and matched_packages:
2926 if hasattr(db, "xmatch"):
2927 cpv_list = db.xmatch("match-all", atom)
2929 cpv_list = db.match(atom)
2931 # USE=multislot can make an installed package appear as if
2932 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2933 # won't do any good as long as USE=multislot is enabled since
2934 # the newly built package still won't have the expected slot.
2935 # Therefore, assume that such SLOT dependencies are already
2936 # satisfied rather than forcing a rebuild.
2937 if installed and not cpv_list and matched_packages \
2938 and portage.dep.dep_getslot(atom):
2939 for pkg in matched_packages:
2940 if vardb.cpv_exists(pkg.cpv):
2941 cpv_list = [pkg.cpv]
2946 pkg_status = "merge"
2947 if installed or onlydeps:
2948 pkg_status = "nomerge"
2951 for cpv in cpv_list:
2952 # Make --noreplace take precedence over --newuse.
2953 if not installed and noreplace and \
2954 cpv in vardb.match(atom):
2955 # If the installed version is masked, it may
2956 # be necessary to look at lower versions,
2957 # in case there is a visible downgrade.
2959 reinstall_for_flags = None
2960 cache_key = (pkg_type, root, cpv, pkg_status)
2961 calculated_use = True
2962 pkg = self._pkg_cache.get(cache_key)
2964 calculated_use = False
2966 metadata = dict(izip(self._mydbapi_keys,
2967 db.aux_get(cpv, self._mydbapi_keys)))
2970 if not built and ("?" in metadata["LICENSE"] or \
2971 "?" in metadata["PROVIDE"]):
2972 # This is avoided whenever possible because
2973 # it's expensive. It only needs to be done here
2974 # if it has an effect on visibility.
2975 pkgsettings.setcpv(cpv, mydb=metadata)
2976 metadata["USE"] = pkgsettings["PORTAGE_USE"]
2977 calculated_use = True
2978 pkg = Package(built=built, cpv=cpv,
2979 installed=installed, metadata=metadata,
2980 onlydeps=onlydeps, root=root, type_name=pkg_type)
2981 self._pkg_cache[pkg] = pkg
2983 if root == self.target_root:
2985 myarg = self._iter_atoms_for_pkg(pkg).next()
2986 except StopIteration:
2988 except portage.exception.InvalidDependString:
2990 # masked by corruption
2992 if not installed and myarg:
2993 found_available_arg = True
2994 if not installed or (installed and matched_packages):
2995 # Only enforce visibility on installed packages
2996 # if there is at least one other visible package
2997 # available. By filtering installed masked packages
2998 # here, packages that have been masked since they
2999 # were installed can be automatically downgraded
3000 # to an unmasked version.
3002 if not visible(pkgsettings, pkg):
3004 except portage.exception.InvalidDependString:
3008 # Enable upgrade or downgrade to a version
3009 # with visible KEYWORDS when the installed
3010 # version is masked by KEYWORDS, but never
3011 # reinstall the same exact version only due
3012 # to a KEYWORDS mask.
3013 if installed and matched_packages and \
3014 pkgsettings.getMissingKeywords(
3015 pkg.cpv, pkg.metadata):
3016 different_version = None
3017 for avail_pkg in matched_packages:
3018 if not portage.dep.cpvequal(
3019 pkg.cpv, avail_pkg.cpv):
3020 different_version = avail_pkg
3022 if different_version is not None:
3023 # Only reinstall for KEYWORDS if
3024 # it's not the same version.
3027 if not built and not calculated_use:
3028 # This is avoided whenever possible because
3030 pkgsettings.setcpv(cpv, mydb=pkg.metadata)
3031 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
3032 if pkg.cp == atom_cp:
3033 if highest_version is None:
3034 highest_version = pkg
3035 elif pkg > highest_version:
3036 highest_version = pkg
3037 # At this point, we've found the highest visible
3038 # match from the current repo. Any lower versions
3039 # from this repo are ignored, so this so the loop
3040 # will always end with a break statement below
3042 if find_existing_node:
3043 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
3046 cpv_slot = "%s:%s" % \
3047 (e_pkg.cpv, e_pkg.metadata["SLOT"])
3048 if portage.dep.match_from_list(atom, [cpv_slot]):
3049 if highest_version and \
3050 e_pkg.cp == atom_cp and \
3051 e_pkg < highest_version and \
3052 e_pkg.slot_atom != highest_version.slot_atom:
3053 # There is a higher version available in a
3054 # different slot, so this existing node is
3058 matched_packages.append(e_pkg)
3059 existing_node = e_pkg
3061 # Compare built package to current config and
3062 # reject the built package if necessary.
3063 if built and not installed and \
3064 ("--newuse" in self.myopts or \
3065 "--reinstall" in self.myopts):
3066 iuses = set(filter_iuse_defaults(
3067 pkg.metadata["IUSE"].split()))
3068 old_use = pkg.metadata["USE"].split()
3070 if myeb and not usepkgonly:
3073 pkgsettings.setcpv(myeb, mydb=mydb)
3075 pkgsettings.setcpv(cpv, mydb=mydb)
3076 now_use = pkgsettings["PORTAGE_USE"].split()
3077 forced_flags = set()
3078 forced_flags.update(pkgsettings.useforce)
3079 forced_flags.update(pkgsettings.usemask)
3081 if myeb and not usepkgonly:
3082 cur_iuse = set(filter_iuse_defaults(
3083 portdb.aux_get(myeb,
3084 ["IUSE"])[0].split()))
3085 if self._reinstall_for_flags(forced_flags,
3089 # Compare current config to installed package
3090 # and do not reinstall if possible.
3091 if not installed and \
3092 ("--newuse" in self.myopts or \
3093 "--reinstall" in self.myopts) and \
3094 cpv in vardb.match(atom):
3095 pkgsettings.setcpv(cpv, mydb=pkg.metadata)
3096 forced_flags = set()
3097 forced_flags.update(pkgsettings.useforce)
3098 forced_flags.update(pkgsettings.usemask)
3099 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
3100 old_iuse = set(filter_iuse_defaults(
3101 vardb.aux_get(cpv, ["IUSE"])[0].split()))
3102 cur_use = pkgsettings["PORTAGE_USE"].split()
3103 cur_iuse = set(filter_iuse_defaults(
3104 pkg.metadata["IUSE"].split()))
3105 reinstall_for_flags = \
3106 self._reinstall_for_flags(
3107 forced_flags, old_use, old_iuse,
3109 if reinstall_for_flags:
3113 matched_packages.append(pkg)
3114 if reinstall_for_flags:
3115 self._reinstall_nodes[pkg] = \
3119 if not matched_packages:
3122 if "--debug" in self.myopts:
3123 for pkg in matched_packages:
3124 print (pkg.type_name + ":").rjust(10), pkg.cpv
3126 # Filter out any old-style virtual matches if they are
3127 # mixed with new-style virtual matches.
3128 cp = portage.dep_getkey(atom)
3129 if len(matched_packages) > 1 and \
3130 "virtual" == portage.catsplit(cp)[0]:
3131 for pkg in matched_packages:
3134 # Got a new-style virtual, so filter
3135 # out any old-style virtuals.
3136 matched_packages = [pkg for pkg in matched_packages \
3140 if len(matched_packages) > 1:
3141 bestmatch = portage.best(
3142 [pkg.cpv for pkg in matched_packages])
3143 matched_packages = [pkg for pkg in matched_packages \
3144 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3146 # ordered by type preference ("ebuild" type is the last resort)
3147 return matched_packages[-1], existing_node
3149 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3151 Select packages that have already been added to the graph or
3152 those that are installed and have not been scheduled for
3155 graph_db = self._graph_trees[root]["porttree"].dbapi
3156 matches = graph_db.match(atom)
3159 cpv = matches[-1] # highest match
3160 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
3161 graph_db.aux_get(cpv, ["SLOT"])[0])
3162 e_pkg = self._slot_pkg_map[root].get(slot_atom)
3165 # Since this cpv exists in the graph_db,
3166 # we must have a cached Package instance.
3167 cache_key = ("installed", root, cpv, "nomerge")
3168 return (self._pkg_cache[cache_key], None)
3170 def _complete_graph(self):
3172 Add any deep dependencies of required sets (args, system, world) that
3173 have not been pulled into the graph yet. This ensures that the graph
3174 is consistent such that initially satisfied deep dependencies are not
3175 broken in the new graph. Initially unsatisfied dependencies are
3176 irrelevant since we only want to avoid breaking dependencies that are
3179 Since this method can consume enough time to disturb users, it is
3180 currently only enabled by the --complete-graph option.
3182 if "complete" not in self.myparams:
3183 # Skip this to avoid consuming enough time to disturb users.
3186 if "--buildpkgonly" in self.myopts or \
3187 "recurse" not in self.myparams:
3190 # Put the depgraph into a mode that causes it to only
3191 # select packages that have already been added to the
3192 # graph or those that are installed and have not been
3193 # scheduled for replacement. Also, toggle the "deep"
3194 # parameter so that all dependencies are traversed and
3196 self._select_atoms = self._select_atoms_from_graph
3197 self._select_package = self._select_pkg_from_graph
3198 already_deep = "deep" in self.myparams
3199 if not already_deep:
3200 self.myparams.add("deep")
3202 for root in self.roots:
3203 required_set_names = self._required_set_names.copy()
3204 if root == self.target_root and \
3205 (already_deep or "empty" in self.myparams):
3206 required_set_names.difference_update(self._sets)
3207 if not required_set_names and not self._ignored_deps:
3209 root_config = self.roots[root]
3210 setconfig = root_config.setconfig
3212 # Reuse existing SetArg instances when available.
3213 for arg in self.digraph.root_nodes():
3214 if not isinstance(arg, SetArg):
3216 if arg.root_config != root_config:
3218 if arg.name in required_set_names:
3220 required_set_names.remove(arg.name)
3221 # Create new SetArg instances only when necessary.
3222 for s in required_set_names:
3223 expanded_set = InternalPackageSet(
3224 initial_atoms=setconfig.getSetAtoms(s))
3225 atom = SETPREFIX + s
3226 args.append(SetArg(arg=atom, set=expanded_set,
3227 root_config=root_config))
3228 vardb = root_config.trees["vartree"].dbapi
3230 for atom in arg.set:
3231 self._dep_stack.append(
3232 Dependency(atom=atom, root=root, parent=arg))
3233 if self._ignored_deps:
3234 self._dep_stack.extend(self._ignored_deps)
3235 self._ignored_deps = []
3236 if not self._create_graph(allow_unsatisfied=True):
3238 # Check the unsatisfied deps to see if any initially satisfied deps
3239 # will become unsatisfied due to an upgrade. Initially unsatisfied
3240 # deps are irrelevant since we only want to avoid breaking deps
3241 # that are initially satisfied.
3242 while self._unsatisfied_deps:
3243 dep = self._unsatisfied_deps.pop()
3244 matches = vardb.match_pkgs(dep.atom)
3246 # Initially unsatisfied.
3248 # An scheduled installation broke a deep dependency.
3249 # Add the installed package to the graph so that it
3250 # will be appropriately reported as a slot collision
3251 # (possibly solvable via backtracking).
3252 pkg = matches[-1] # highest match
3253 if not self._add_pkg(pkg, dep.parent,
3254 priority=dep.priority, depth=dep.depth):
3256 if not self._create_graph(allow_unsatisfied=True):
3260 def validate_blockers(self):
3261 """Remove any blockers from the digraph that do not match any of the
3262 packages within the graph. If necessary, create hard deps to ensure
3263 correct merge order such that mutually blocking packages are never
3264 installed simultaneously."""
3266 if "--buildpkgonly" in self.myopts or \
3267 "--nodeps" in self.myopts:
3270 #if "deep" in self.myparams:
3272 # Pull in blockers from all installed packages that haven't already
3273 # been pulled into the depgraph. This is not enabled by default
3274 # due to the performance penalty that is incurred by all the
3275 # additional dep_check calls that are required.
3277 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
3278 for myroot in self.trees:
3279 vardb = self.trees[myroot]["vartree"].dbapi
3280 portdb = self.trees[myroot]["porttree"].dbapi
3281 pkgsettings = self.pkgsettings[myroot]
3282 final_db = self.mydbapi[myroot]
3283 blocker_cache = BlockerCache(myroot, vardb)
3284 stale_cache = set(blocker_cache)
3287 stale_cache.discard(cpv)
3288 blocker_atoms = None
3290 if self.digraph.contains(pkg):
3294 self._blocker_parents.child_nodes(pkg))
3299 self._irrelevant_blockers.child_nodes(pkg))
3302 if blockers is not None:
3303 blockers = set("!" + blocker.atom \
3304 for blocker in blockers)
3306 # If this node has any blockers, create a "nomerge"
3307 # node for it so that they can be enforced.
3308 self.spinner.update()
3309 blocker_data = blocker_cache.get(cpv)
3310 if blocker_data is not None and \
3311 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3314 # If blocker data from the graph is available, use
3315 # it to validate the cache and update the cache if
3317 if blocker_data is not None and \
3318 blockers is not None:
3319 if not blockers.symmetric_difference(
3320 blocker_data.atoms):
3324 if blocker_data is None and \
3325 blockers is not None:
3326 # Re-use the blockers from the graph.
3327 blocker_atoms = sorted(blockers)
3328 counter = long(pkg.metadata["COUNTER"])
3330 blocker_cache.BlockerData(counter, blocker_atoms)
3331 blocker_cache[pkg.cpv] = blocker_data
3335 blocker_atoms = blocker_data.atoms
3337 myuse = pkg.metadata["USE"].split()
3338 # Use aux_get() to trigger FakeVartree global
3339 # updates on *DEPEND when appropriate.
3340 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3341 # It is crucial to pass in final_db here in order to
3342 # optimize dep_check calls by eliminating atoms via
3343 # dep_wordreduce and dep_eval calls.
3345 portage.dep._dep_check_strict = False
3347 success, atoms = portage.dep_check(depstr,
3348 final_db, pkgsettings, myuse=myuse,
3349 trees=self._graph_trees, myroot=myroot)
3350 except Exception, e:
3351 if isinstance(e, SystemExit):
3353 # This is helpful, for example, if a ValueError
3354 # is thrown from cpv_expand due to multiple
3355 # matches (this can happen if an atom lacks a
3357 show_invalid_depstring_notice(
3358 pkg, depstr, str(e))
3362 portage.dep._dep_check_strict = True
3364 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3365 if replacement_pkg and \
3366 replacement_pkg[0].operation == "merge":
3367 # This package is being replaced anyway, so
3368 # ignore invalid dependencies so as not to
3369 # annoy the user too much (otherwise they'd be
3370 # forced to manually unmerge it first).
3372 show_invalid_depstring_notice(pkg, depstr, atoms)
3374 blocker_atoms = [myatom for myatom in atoms \
3375 if myatom.startswith("!")]
3376 blocker_atoms.sort()
3377 counter = long(pkg.metadata["COUNTER"])
3378 blocker_cache[cpv] = \
3379 blocker_cache.BlockerData(counter, blocker_atoms)
3381 for myatom in blocker_atoms:
3382 blocker = Blocker(atom=myatom[1:], root=myroot)
3383 self._blocker_parents.add(blocker, pkg)
3384 for cpv in stale_cache:
3385 del blocker_cache[cpv]
3386 blocker_cache.flush()
3389 # Discard any "uninstall" tasks scheduled by previous calls
3390 # to this method, since those tasks may not make sense given
3391 # the current graph state.
3392 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
3393 if previous_uninstall_tasks:
3394 self._blocker_uninstalls = digraph()
3395 self.digraph.difference_update(previous_uninstall_tasks)
3397 for blocker in self._blocker_parents.leaf_nodes():
3398 self.spinner.update()
3399 root_config = self.roots[blocker.root]
3400 virtuals = root_config.settings.getvirtuals()
3401 mytype, myroot, mydep = blocker
3402 initial_db = self.trees[myroot]["vartree"].dbapi
3403 final_db = self.mydbapi[myroot]
3405 provider_virtual = False
3406 if blocker.cp in virtuals and \
3407 not self._have_new_virt(blocker.root, blocker.cp):
3408 provider_virtual = True
3410 if provider_virtual:
3412 for provider_entry in virtuals[blocker.cp]:
3414 portage.dep_getkey(provider_entry)
3415 atoms.append(blocker.atom.replace(
3416 blocker.cp, provider_cp))
3418 atoms = [blocker.atom]
3420 blocked_initial = []
3422 blocked_initial.extend(initial_db.match_pkgs(atom))
3426 blocked_final.extend(final_db.match_pkgs(atom))
3428 if not blocked_initial and not blocked_final:
3429 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
3430 self._blocker_parents.remove(blocker)
3431 # Discard any parents that don't have any more blockers.
3432 for pkg in parent_pkgs:
3433 self._irrelevant_blockers.add(blocker, pkg)
3434 if not self._blocker_parents.child_nodes(pkg):
3435 self._blocker_parents.remove(pkg)
3437 for parent in self._blocker_parents.parent_nodes(blocker):
3438 unresolved_blocks = False
3439 depends_on_order = set()
3440 for pkg in blocked_initial:
3441 if pkg.slot_atom == parent.slot_atom:
3442 # TODO: Support blocks within slots in cases where it
3443 # might make sense. For example, a new version might
3444 # require that the old version be uninstalled at build
3447 if parent.installed:
3448 # Two currently installed packages conflict with
3449 # eachother. Ignore this case since the damage
3450 # is already done and this would be likely to
3451 # confuse users if displayed like a normal blocker.
3453 if parent.operation == "merge":
3454 # Maybe the blocked package can be replaced or simply
3455 # unmerged to resolve this block.
3456 depends_on_order.add((pkg, parent))
3458 # None of the above blocker resolutions techniques apply,
3459 # so apparently this one is unresolvable.
3460 unresolved_blocks = True
3461 for pkg in blocked_final:
3462 if pkg.slot_atom == parent.slot_atom:
3463 # TODO: Support blocks within slots.
3465 if parent.operation == "nomerge" and \
3466 pkg.operation == "nomerge":
3467 # This blocker will be handled the next time that a
3468 # merge of either package is triggered.
3471 # Maybe the blocking package can be
3472 # unmerged to resolve this block.
3473 if parent.operation == "merge" and pkg.installed:
3474 depends_on_order.add((pkg, parent))
3476 elif parent.operation == "nomerge":
3477 depends_on_order.add((parent, pkg))
3479 # None of the above blocker resolutions techniques apply,
3480 # so apparently this one is unresolvable.
3481 unresolved_blocks = True
3483 # Make sure we don't unmerge any package that have been pulled
3485 if not unresolved_blocks and depends_on_order:
3486 for inst_pkg, inst_task in depends_on_order:
3487 if self.digraph.contains(inst_pkg) and \
3488 self.digraph.parent_nodes(inst_pkg):
3489 unresolved_blocks = True
3492 if not unresolved_blocks and depends_on_order:
3493 for inst_pkg, inst_task in depends_on_order:
3494 uninst_task = Package(built=inst_pkg.built,
3495 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
3496 metadata=inst_pkg.metadata,
3497 operation="uninstall", root=inst_pkg.root,
3498 type_name=inst_pkg.type_name)
3499 self._pkg_cache[uninst_task] = uninst_task
3500 # Enforce correct merge order with a hard dep.
3501 self.digraph.addnode(uninst_task, inst_task,
3502 priority=BlockerDepPriority.instance)
3503 # Count references to this blocker so that it can be
3504 # invalidated after nodes referencing it have been
3506 self._blocker_uninstalls.addnode(uninst_task, blocker)
3507 if not unresolved_blocks and not depends_on_order:
3508 self._irrelevant_blockers.add(blocker, parent)
3509 self._blocker_parents.remove_edge(blocker, parent)
3510 if not self._blocker_parents.parent_nodes(blocker):
3511 self._blocker_parents.remove(blocker)
3512 if not self._blocker_parents.child_nodes(parent):
3513 self._blocker_parents.remove(parent)
3514 if unresolved_blocks:
3515 self._unsolvable_blockers.add(blocker, parent)
3519 def _accept_blocker_conflicts(self):
3521 for x in ("--buildpkgonly", "--fetchonly",
3522 "--fetch-all-uri", "--nodeps", "--pretend"):
3523 if x in self.myopts:
3528 def _merge_order_bias(self, mygraph):
3529 """Order nodes from highest to lowest overall reference count for
3530 optimal leaf node selection."""
3532 for node in mygraph.order:
3533 node_info[node] = len(mygraph.parent_nodes(node))
3534 def cmp_merge_preference(node1, node2):
3535 return node_info[node2] - node_info[node1]
3536 mygraph.order.sort(cmp_merge_preference)
3538 def altlist(self, reversed=False):
3540 while self._serialized_tasks_cache is None:
3541 self._resolve_conflicts()
3543 self._serialized_tasks_cache = self._serialize_tasks()
3544 except self._serialize_tasks_retry:
3547 retlist = self._serialized_tasks_cache[:]
3552 def _resolve_conflicts(self):
3553 if not self._complete_graph():
3554 raise self._unknown_internal_error()
3556 if not self.validate_blockers():
3557 raise self._unknown_internal_error()
3559 def _serialize_tasks(self):
3560 mygraph=self.digraph.copy()
3561 # Prune "nomerge" root nodes if nothing depends on them, since
3562 # otherwise they slow down merge order calculation. Don't remove
3563 # non-root nodes since they help optimize merge order in some cases
3564 # such as revdep-rebuild.
3565 removed_nodes = set()
3567 for node in mygraph.root_nodes():
3568 if not isinstance(node, Package) or \
3569 node.installed or node.onlydeps:
3570 removed_nodes.add(node)
3572 self.spinner.update()
3573 mygraph.difference_update(removed_nodes)
3574 if not removed_nodes:
3576 removed_nodes.clear()
3577 self._merge_order_bias(mygraph)
3578 def cmp_circular_bias(n1, n2):
3580 RDEPEND is stronger than PDEPEND and this function
3581 measures such a strength bias within a circular
3582 dependency relationship.
3584 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3585 ignore_priority=DepPriority.MEDIUM_SOFT)
3586 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3587 ignore_priority=DepPriority.MEDIUM_SOFT)
3588 if n1_n2_medium == n2_n1_medium:
3593 myblocker_uninstalls = self._blocker_uninstalls.copy()
3595 # Contains uninstall tasks that have been scheduled to
3596 # occur after overlapping blockers have been installed.
3597 scheduled_uninstalls = set()
3598 # Contains any Uninstall tasks that have been ignored
3599 # in order to avoid the circular deps code path. These
3600 # correspond to blocker conflicts that could not be
3602 ignored_uninstall_tasks = set()
3603 have_uninstall_task = False
3604 complete = "complete" in self.myparams
3605 myblocker_parents = self._blocker_parents.copy()
3608 def get_nodes(**kwargs):
3610 Returns leaf nodes excluding Uninstall instances
3611 since those should be executed as late as possible.
3613 return [node for node in mygraph.leaf_nodes(**kwargs) \
3614 if isinstance(node, Package) and \
3615 (node.operation != "uninstall" or \
3616 node in scheduled_uninstalls)]
3618 # sys-apps/portage needs special treatment if ROOT="/"
3620 from portage.const import PORTAGE_PACKAGE_ATOM
3621 runtime_deps = InternalPackageSet(
3622 initial_atoms=[PORTAGE_PACKAGE_ATOM])
3623 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
3624 PORTAGE_PACKAGE_ATOM)
3625 replacement_portage = self.mydbapi[running_root].match_pkgs(
3626 PORTAGE_PACKAGE_ATOM)
3629 running_portage = running_portage[0]
3631 running_portage = None
3633 if replacement_portage:
3634 replacement_portage = replacement_portage[0]
3636 replacement_portage = None
3638 if replacement_portage == running_portage:
3639 replacement_portage = None
3641 if replacement_portage is not None:
3642 # update from running_portage to replacement_portage asap
3643 asap_nodes.append(replacement_portage)
3645 if running_portage is not None:
3647 portage_rdepend = self._select_atoms_highest_available(
3648 running_root, running_portage.metadata["RDEPEND"],
3649 myuse=running_portage.metadata["USE"].split(),
3650 parent=running_portage, strict=False)
3651 except portage.exception.InvalidDependString, e:
3652 portage.writemsg("!!! Invalid RDEPEND in " + \
3653 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3654 (running_root, running_portage.cpv, e), noiselevel=-1)
3656 portage_rdepend = []
3657 runtime_deps.update(atom for atom in portage_rdepend \
3658 if not atom.startswith("!"))
3660 ignore_priority_soft_range = [None]
3661 ignore_priority_soft_range.extend(
3662 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
3663 tree_mode = "--tree" in self.myopts
3664 # Tracks whether or not the current iteration should prefer asap_nodes
3665 # if available. This is set to False when the previous iteration
3666 # failed to select any nodes. It is reset whenever nodes are
3667 # successfully selected.
3670 # By default, try to avoid selecting root nodes whenever possible. This
3671 # helps ensure that the maximimum possible number of soft dependencies
3672 # have been removed from the graph before their parent nodes have
3673 # selected. This is especially important when those dependencies are
3674 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
3675 # CHOST has been changed (like when building a stage3 from a stage2).
3676 accept_root_node = False
3678 # State of prefer_asap and accept_root_node flags for successive
3679 # iterations that loosen the criteria for node selection.
3681 # iteration prefer_asap accept_root_node
3686 # If no nodes are selected on the 3rd iteration, it is due to
3687 # unresolved blockers or circular dependencies.
3689 while not mygraph.empty():
3690 self.spinner.update()
3691 selected_nodes = None
3692 ignore_priority = None
3693 if prefer_asap and asap_nodes:
3694 """ASAP nodes are merged before their soft deps."""
3695 asap_nodes = [node for node in asap_nodes \
3696 if mygraph.contains(node)]
3697 for node in asap_nodes:
3698 if not mygraph.child_nodes(node,
3699 ignore_priority=DepPriority.SOFT):
3700 selected_nodes = [node]
3701 asap_nodes.remove(node)
3703 if not selected_nodes and \
3704 not (prefer_asap and asap_nodes):
3705 for ignore_priority in ignore_priority_soft_range:
3706 nodes = get_nodes(ignore_priority=ignore_priority)
3710 if ignore_priority is None and not tree_mode:
3711 # Greedily pop all of these nodes since no relationship
3712 # has been ignored. This optimization destroys --tree
3713 # output, so it's disabled in reversed mode. If there
3714 # is a mix of merge and uninstall nodes, save the
3715 # uninstall nodes from later since sometimes a merge
3716 # node will render an install node unnecessary, and
3717 # we want to avoid doing a separate uninstall task in
3719 merge_nodes = [node for node in nodes \
3720 if node.operation == "merge"]
3722 selected_nodes = merge_nodes
3724 selected_nodes = nodes
3726 # For optimal merge order:
3727 # * Only pop one node.
3728 # * Removing a root node (node without a parent)
3729 # will not produce a leaf node, so avoid it.
3731 if mygraph.parent_nodes(node):
3732 # found a non-root node
3733 selected_nodes = [node]
3735 if not selected_nodes and \
3736 (accept_root_node or ignore_priority is None):
3737 # settle for a root node
3738 selected_nodes = [nodes[0]]
3740 if not selected_nodes:
3741 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
3743 """Recursively gather a group of nodes that RDEPEND on
3744 eachother. This ensures that they are merged as a group
3745 and get their RDEPENDs satisfied as soon as possible."""
3746 def gather_deps(ignore_priority,
3747 mergeable_nodes, selected_nodes, node):
3748 if node in selected_nodes:
3750 if node not in mergeable_nodes:
3752 if node == replacement_portage and \
3753 mygraph.child_nodes(node,
3754 ignore_priority=DepPriority.MEDIUM_SOFT):
3755 # Make sure that portage always has all of it's
3756 # RDEPENDs installed first.
3758 selected_nodes.add(node)
3759 for child in mygraph.child_nodes(node,
3760 ignore_priority=ignore_priority):
3761 if not gather_deps(ignore_priority,
3762 mergeable_nodes, selected_nodes, child):
3765 mergeable_nodes = set(nodes)
3766 if prefer_asap and asap_nodes:
3768 for ignore_priority in xrange(DepPriority.SOFT,
3769 DepPriority.MEDIUM_SOFT + 1):
3771 if nodes is not asap_nodes and \
3772 not accept_root_node and \
3773 not mygraph.parent_nodes(node):
3775 selected_nodes = set()
3776 if gather_deps(ignore_priority,
3777 mergeable_nodes, selected_nodes, node):
3780 selected_nodes = None
3784 # If any nodes have been selected here, it's always
3785 # possible that anything up to a MEDIUM_SOFT priority
3786 # relationship has been ignored. This state is recorded
3787 # in ignore_priority so that relevant nodes will be
3788 # added to asap_nodes when appropriate.
3790 ignore_priority = DepPriority.MEDIUM_SOFT
3792 if prefer_asap and asap_nodes and not selected_nodes:
3793 # We failed to find any asap nodes to merge, so ignore
3794 # them for the next iteration.
3798 if not selected_nodes and not accept_root_node:
3799 # Maybe there are only root nodes left, so accept them
3800 # for the next iteration.
3801 accept_root_node = True
3804 if selected_nodes and ignore_priority > DepPriority.SOFT:
3805 # Try to merge ignored medium deps as soon as possible.
3806 for node in selected_nodes:
3807 children = set(mygraph.child_nodes(node))
3808 soft = children.difference(
3809 mygraph.child_nodes(node,
3810 ignore_priority=DepPriority.SOFT))
3811 medium_soft = children.difference(
3812 mygraph.child_nodes(node,
3813 ignore_priority=DepPriority.MEDIUM_SOFT))
3814 medium_soft.difference_update(soft)
3815 for child in medium_soft:
3816 if child in selected_nodes:
3818 if child in asap_nodes:
3820 asap_nodes.append(child)
3822 if selected_nodes and len(selected_nodes) > 1:
3823 if not isinstance(selected_nodes, list):
3824 selected_nodes = list(selected_nodes)
3825 selected_nodes.sort(cmp_circular_bias)
3827 if not selected_nodes and not myblocker_uninstalls.is_empty():
3828 # An Uninstall task needs to be executed in order to
3829 # avoid conflict if possible.
3830 min_parent_deps = None
3832 for task in myblocker_uninstalls.leaf_nodes():
3833 # Do some sanity checks so that system or world packages
3834 # don't get uninstalled inappropriately here (only really
3835 # necessary when --complete-graph has not been enabled).
3837 if task in ignored_uninstall_tasks:
3840 if task in scheduled_uninstalls:
3841 # It's been scheduled but it hasn't
3842 # been executed yet due to dependence
3843 # on installation of blocking packages.
3846 root_config = self.roots[task.root]
3847 inst_pkg = self._pkg_cache[
3848 ("installed", task.root, task.cpv, "nomerge")]
3850 if self.digraph.contains(inst_pkg):
3853 if running_root == task.root:
3854 # Never uninstall sys-apps/portage or it's essential
3855 # dependencies, except through replacement.
3857 runtime_dep_atoms = \
3858 list(runtime_deps.iterAtomsForPackage(task))
3859 except portage.exception.InvalidDependString, e:
3860 portage.writemsg("!!! Invalid PROVIDE in " + \
3861 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3862 (task.root, task.cpv, e), noiselevel=-1)
3866 # Don't uninstall a runtime dep if it appears
3867 # to be the only suitable one installed.
3869 vardb = root_config.trees["vartree"].dbapi
3870 for atom in runtime_dep_atoms:
3871 other_version = None
3872 for pkg in vardb.match_pkgs(atom):
3873 if pkg.cpv == task.cpv and \
3874 pkg.metadata["COUNTER"] == \
3875 task.metadata["COUNTER"]:
3879 if other_version is None:
3885 # For packages in the system set, don't take
3886 # any chances. If the conflict can't be resolved
3887 # by a normal replacement operation then abort.
3890 for atom in root_config.sets[
3891 "system"].iterAtomsForPackage(task):
3894 except portage.exception.InvalidDependString, e:
3895 portage.writemsg("!!! Invalid PROVIDE in " + \
3896 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3897 (task.root, task.cpv, e), noiselevel=-1)
3903 # Note that the world check isn't always
3904 # necessary since self._complete_graph() will
3905 # add all packages from the system and world sets to the
3906 # graph. This just allows unresolved conflicts to be
3907 # detected as early as possible, which makes it possible
3908 # to avoid calling self._complete_graph() when it is
3909 # unnecessary due to blockers triggering an abortion.
3911 # For packages in the world set, go ahead an uninstall
3912 # when necessary, as long as the atom will be satisfied
3913 # in the final state.
3914 graph_db = self.mydbapi[task.root]
3917 for atom in root_config.sets[
3918 "world"].iterAtomsForPackage(task):
3920 for pkg in graph_db.match_pkgs(atom):
3928 except portage.exception.InvalidDependString, e:
3929 portage.writemsg("!!! Invalid PROVIDE in " + \
3930 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3931 (task.root, task.cpv, e), noiselevel=-1)
3937 # Check the deps of parent nodes to ensure that
3938 # the chosen task produces a leaf node. Maybe
3939 # this can be optimized some more to make the
3940 # best possible choice, but the current algorithm
3941 # is simple and should be near optimal for most
3944 for parent in mygraph.parent_nodes(task):
3945 parent_deps.update(mygraph.child_nodes(parent,
3946 ignore_priority=DepPriority.MEDIUM_SOFT))
3947 parent_deps.remove(task)
3948 if min_parent_deps is None or \
3949 len(parent_deps) < min_parent_deps:
3950 min_parent_deps = len(parent_deps)
3953 if uninst_task is not None:
3954 # The uninstall is performed only after blocking
3955 # packages have been merged on top of it. File
3956 # collisions between blocking packages are detected
3957 # and removed from the list of files to be uninstalled.
3958 scheduled_uninstalls.add(uninst_task)
3959 parent_nodes = mygraph.parent_nodes(uninst_task)
3961 # Reverse the parent -> uninstall edges since we want
3962 # to do the uninstall after blocking packages have
3963 # been merged on top of it.
3964 mygraph.remove(uninst_task)
3965 for blocked_pkg in parent_nodes:
3966 mygraph.add(blocked_pkg, uninst_task,
3967 priority=BlockerDepPriority.instance)
3969 # None of the Uninstall tasks are acceptable, so
3970 # the corresponding blockers are unresolvable.
3971 # We need to drop an Uninstall task here in order
3972 # to avoid the circular deps code path, but the
3973 # blocker will still be counted as an unresolved
3975 for node in myblocker_uninstalls.leaf_nodes():
3977 mygraph.remove(node)
3981 ignored_uninstall_tasks.add(node)
3984 # After dropping an Uninstall task, reset
3985 # the state variables for leaf node selection and
3986 # continue trying to select leaf nodes.
3988 accept_root_node = False
3991 if not selected_nodes:
3992 self._circular_deps_for_display = mygraph
3993 raise self._unknown_internal_error()
3995 # At this point, we've succeeded in selecting one or more nodes, so
3996 # it's now safe to reset the prefer_asap and accept_root_node flags
3997 # to their default states.
3999 accept_root_node = False
4001 mygraph.difference_update(selected_nodes)
4003 for node in selected_nodes:
4004 if isinstance(node, Package) and \
4005 node.operation == "nomerge":
4008 # Handle interactions between blockers
4009 # and uninstallation tasks.
4010 solved_blockers = set()
4012 if isinstance(node, Package) and \
4013 "uninstall" == node.operation:
4014 have_uninstall_task = True
4017 vardb = self.trees[node.root]["vartree"].dbapi
4018 previous_cpv = vardb.match(node.slot_atom)
4020 # The package will be replaced by this one, so remove
4021 # the corresponding Uninstall task if necessary.
4022 previous_cpv = previous_cpv[0]
4024 ("installed", node.root, previous_cpv, "uninstall")
4026 mygraph.remove(uninst_task)
4030 if uninst_task is not None and \
4031 uninst_task not in ignored_uninstall_tasks and \
4032 myblocker_uninstalls.contains(uninst_task):
4033 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4034 myblocker_uninstalls.remove(uninst_task)
4035 # Discard any blockers that this Uninstall solves.
4036 for blocker in blocker_nodes:
4037 if not myblocker_uninstalls.child_nodes(blocker):
4038 myblocker_uninstalls.remove(blocker)
4039 solved_blockers.add(blocker)
4041 retlist.append(node)
4043 if (isinstance(node, Package) and \
4044 "uninstall" == node.operation) or \
4045 (uninst_task is not None and \
4046 uninst_task in scheduled_uninstalls):
4047 # Include satisfied blockers in the merge list
4048 # since the user might be interested and also
4049 # it serves as an indicator that blocking packages
4050 # will be temporarily installed simultaneously.
4051 for blocker in solved_blockers:
4052 retlist.append(Blocker(atom=blocker.atom,
4053 root=blocker.root, satisfied=True))
4055 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
4056 for node in myblocker_uninstalls.root_nodes():
4057 unsolvable_blockers.add(node)
4059 for blocker in unsolvable_blockers:
4060 retlist.append(blocker)
4062 # If any Uninstall tasks need to be executed in order
4063 # to avoid a conflict, complete the graph with any
4064 # dependencies that may have been initially
4065 # neglected (to ensure that unsafe Uninstall tasks
4066 # are properly identified and blocked from execution).
4067 if have_uninstall_task and \
4069 not unsolvable_blockers:
4070 self.myparams.add("complete")
4071 raise self._serialize_tasks_retry("")
4073 if unsolvable_blockers and \
4074 not self._accept_blocker_conflicts():
4075 self._unsatisfied_blockers_for_display = unsolvable_blockers
4076 self._serialized_tasks_cache = retlist[:]
4077 raise self._unknown_internal_error()
4079 if self._slot_collision_info and \
4080 not self._accept_blocker_conflicts():
4081 self._serialized_tasks_cache = retlist[:]
4082 raise self._unknown_internal_error()
4086 def _show_circular_deps(self, mygraph):
4087 # No leaf nodes are available, so we have a circular
4088 # dependency panic situation. Reduce the noise level to a
4089 # minimum via repeated elimination of root nodes since they
4090 # have no parents and thus can not be part of a cycle.
4092 root_nodes = mygraph.root_nodes(
4093 ignore_priority=DepPriority.MEDIUM_SOFT)
4096 mygraph.difference_update(root_nodes)
4097 # Display the USE flags that are enabled on nodes that are part
4098 # of dependency cycles in case that helps the user decide to
4099 # disable some of them.
4101 tempgraph = mygraph.copy()
4102 while not tempgraph.empty():
4103 nodes = tempgraph.leaf_nodes()
4105 node = tempgraph.order[0]
4108 display_order.append(node)
4109 tempgraph.remove(node)
4110 display_order.reverse()
4111 self.myopts.pop("--quiet", None)
4112 self.myopts.pop("--verbose", None)
4113 self.myopts["--tree"] = True
4114 portage.writemsg("\n\n", noiselevel=-1)
4115 self.display(display_order)
4116 prefix = colorize("BAD", " * ")
4117 portage.writemsg("\n", noiselevel=-1)
4118 portage.writemsg(prefix + "Error: circular dependencies:\n",
4120 portage.writemsg("\n", noiselevel=-1)
4121 mygraph.debug_print()
4122 portage.writemsg("\n", noiselevel=-1)
4123 portage.writemsg(prefix + "Note that circular dependencies " + \
4124 "can often be avoided by temporarily\n", noiselevel=-1)
4125 portage.writemsg(prefix + "disabling USE flags that trigger " + \
4126 "optional dependencies.\n", noiselevel=-1)
4128 def _show_merge_list(self):
4129 if self._serialized_tasks_cache is not None and \
4130 self._serialized_tasks_cache != self._displayed_list:
4131 display_list = self._serialized_tasks_cache[:]
4132 if "--tree" in self.myopts:
4133 display_list.reverse()
4134 self.display(display_list)
4136 def _show_unsatisfied_blockers(self, blockers):
4137 self._show_merge_list()
4138 msg = "Error: The above package list contains " + \
4139 "packages which cannot be installed " + \
4140 "at the same time on the same system."
4141 prefix = colorize("BAD", " * ")
4142 from textwrap import wrap
4143 portage.writemsg("\n", noiselevel=-1)
4144 for line in wrap(msg, 70):
4145 portage.writemsg(prefix + line + "\n", noiselevel=-1)
4146 if "--quiet" not in self.myopts:
4147 show_blocker_docs_link()
4149 def display(self, mylist, favorites=[], verbosity=None):
4151 # This is used to prevent display_problems() from
4152 # redundantly displaying this exact same merge list
4153 # again via _show_merge_list().
4154 self._displayed_list = mylist
4156 if verbosity is None:
4157 verbosity = ("--quiet" in self.myopts and 1 or \
4158 "--verbose" in self.myopts and 3 or 2)
4159 favorites_set = InternalPackageSet(favorites)
4160 oneshot = "--oneshot" in self.myopts or \
4161 "--onlydeps" in self.myopts
4166 counters = PackageCounters()
4168 if verbosity == 1 and "--verbose" not in self.myopts:
4169 def create_use_string(*args):
4172 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
4174 is_new, reinst_flags,
4175 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
4176 alphabetical=("--alphabetical" in self.myopts)):
4184 cur_iuse = set(cur_iuse)
4185 enabled_flags = cur_iuse.intersection(cur_use)
4186 removed_iuse = set(old_iuse).difference(cur_iuse)
4187 any_iuse = cur_iuse.union(old_iuse)
4188 any_iuse = list(any_iuse)
4190 for flag in any_iuse:
4193 reinst_flag = reinst_flags and flag in reinst_flags
4194 if flag in enabled_flags:
4196 if is_new or flag in old_use and \
4197 (all_flags or reinst_flag):
4198 flag_str = red(flag)
4199 elif flag not in old_iuse:
4200 flag_str = yellow(flag) + "%*"
4201 elif flag not in old_use:
4202 flag_str = green(flag) + "*"
4203 elif flag in removed_iuse:
4204 if all_flags or reinst_flag:
4205 flag_str = yellow("-" + flag) + "%"
4208 flag_str = "(" + flag_str + ")"
4209 removed.append(flag_str)
4212 if is_new or flag in old_iuse and \
4213 flag not in old_use and \
4214 (all_flags or reinst_flag):
4215 flag_str = blue("-" + flag)
4216 elif flag not in old_iuse:
4217 flag_str = yellow("-" + flag)
4218 if flag not in iuse_forced:
4220 elif flag in old_use:
4221 flag_str = green("-" + flag) + "*"
4223 if flag in iuse_forced:
4224 flag_str = "(" + flag_str + ")"
4226 enabled.append(flag_str)
4228 disabled.append(flag_str)
4231 ret = " ".join(enabled)
4233 ret = " ".join(enabled + disabled + removed)
4235 ret = '%s="%s" ' % (name, ret)
4238 repo_display = RepoDisplay(self.roots)
4243 mygraph = self.digraph.copy()
4245 # If there are any Uninstall instances, add the corresponding
4246 # blockers to the digraph (useful for --tree display).
4248 executed_uninstalls = set(node for node in mylist \
4249 if isinstance(node, Package) and node.operation == "unmerge")
4251 for uninstall in self._blocker_uninstalls.leaf_nodes():
4252 uninstall_parents = \
4253 self._blocker_uninstalls.parent_nodes(uninstall)
4254 if not uninstall_parents:
4257 # Remove the corresponding "nomerge" node and substitute
4258 # the Uninstall node.
4259 inst_pkg = self._pkg_cache[
4260 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
4262 mygraph.remove(inst_pkg)
4267 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
4269 inst_pkg_blockers = []
4271 # Break the Package -> Uninstall edges.
4272 mygraph.remove(uninstall)
4274 # Resolution of a package's blockers
4275 # depend on it's own uninstallation.
4276 for blocker in inst_pkg_blockers:
4277 mygraph.add(uninstall, blocker)
4279 # Expand Package -> Uninstall edges into
4280 # Package -> Blocker -> Uninstall edges.
4281 for blocker in uninstall_parents:
4282 mygraph.add(uninstall, blocker)
4283 for parent in self._blocker_parents.parent_nodes(blocker):
4284 if parent != inst_pkg:
4285 mygraph.add(blocker, parent)
4287 # If the uninstall task did not need to be executed because
4288 # of an upgrade, display Blocker -> Upgrade edges since the
4289 # corresponding Blocker -> Uninstall edges will not be shown.
4291 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
4292 if upgrade_node is not None and \
4293 uninstall not in executed_uninstalls:
4294 for blocker in uninstall_parents:
4295 mygraph.add(upgrade_node, blocker)
4297 unsatisfied_blockers = []
4302 if isinstance(x, Blocker) and not x.satisfied:
4303 unsatisfied_blockers.append(x)
4306 if "--tree" in self.myopts:
4307 depth = len(tree_nodes)
4308 while depth and graph_key not in \
4309 mygraph.child_nodes(tree_nodes[depth-1]):
4312 tree_nodes = tree_nodes[:depth]
4313 tree_nodes.append(graph_key)
4314 display_list.append((x, depth, True))
4315 shown_edges.add((graph_key, tree_nodes[depth-1]))
4317 traversed_nodes = set() # prevent endless circles
4318 traversed_nodes.add(graph_key)
4319 def add_parents(current_node, ordered):
4321 # Do not traverse to parents if this node is an
4322 # an argument or a direct member of a set that has
4323 # been specified as an argument (system or world).
4324 if current_node not in self._set_nodes:
4325 parent_nodes = mygraph.parent_nodes(current_node)
4327 child_nodes = set(mygraph.child_nodes(current_node))
4328 selected_parent = None
4329 # First, try to avoid a direct cycle.
4330 for node in parent_nodes:
4331 if not isinstance(node, (Blocker, Package)):
4333 if node not in traversed_nodes and \
4334 node not in child_nodes:
4335 edge = (current_node, node)
4336 if edge in shown_edges:
4338 selected_parent = node
4340 if not selected_parent:
4341 # A direct cycle is unavoidable.
4342 for node in parent_nodes:
4343 if not isinstance(node, (Blocker, Package)):
4345 if node not in traversed_nodes:
4346 edge = (current_node, node)
4347 if edge in shown_edges:
4349 selected_parent = node
4352 shown_edges.add((current_node, selected_parent))
4353 traversed_nodes.add(selected_parent)
4354 add_parents(selected_parent, False)
4355 display_list.append((current_node,
4356 len(tree_nodes), ordered))
4357 tree_nodes.append(current_node)
4359 add_parents(graph_key, True)
4361 display_list.append((x, depth, True))
4362 mylist = display_list
4363 for x in unsatisfied_blockers:
4364 mylist.append((x, 0, True))
4366 last_merge_depth = 0
4367 for i in xrange(len(mylist)-1,-1,-1):
4368 graph_key, depth, ordered = mylist[i]
4369 if not ordered and depth == 0 and i > 0 \
4370 and graph_key == mylist[i-1][0] and \
4371 mylist[i-1][1] == 0:
4372 # An ordered node got a consecutive duplicate when the tree was
4376 if ordered and graph_key[-1] != "nomerge":
4377 last_merge_depth = depth
4379 if depth >= last_merge_depth or \
4380 i < len(mylist) - 1 and \
4381 depth >= mylist[i+1][1]:
4384 from portage import flatten
4385 from portage.dep import use_reduce, paren_reduce
4386 # files to fetch list - avoids counting a same file twice
4387 # in size display (verbose mode)
4390 for mylist_index in xrange(len(mylist)):
4391 x, depth, ordered = mylist[mylist_index]
4395 portdb = self.trees[myroot]["porttree"].dbapi
4396 bindb = self.trees[myroot]["bintree"].dbapi
4397 vardb = self.trees[myroot]["vartree"].dbapi
4398 vartree = self.trees[myroot]["vartree"]
4399 pkgsettings = self.pkgsettings[myroot]
4402 indent = " " * depth
4404 if isinstance(x, Blocker):
4406 blocker_style = "PKG_BLOCKER_SATISFIED"
4407 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
4409 blocker_style = "PKG_BLOCKER"
4410 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
4412 counters.blocks += 1
4414 counters.blocks_satisfied += 1
4415 resolved = portage.key_expand(
4416 pkg_key, mydb=vardb, settings=pkgsettings)
4417 if "--columns" in self.myopts and "--quiet" in self.myopts:
4418 addl += " " + colorize(blocker_style, resolved)
4420 addl = "[%s %s] %s%s" % \
4421 (colorize(blocker_style, "blocks"),
4422 addl, indent, colorize(blocker_style, resolved))
4423 block_parents = self._blocker_parents.parent_nodes(x)
4424 block_parents = set([pnode[2] for pnode in block_parents])
4425 block_parents = ", ".join(block_parents)
4427 addl += colorize(blocker_style,
4428 " (\"%s\" is blocking %s)") % \
4429 (pkg_key, block_parents)
4431 addl += colorize(blocker_style,
4432 " (is blocking %s)") % block_parents
4433 if isinstance(x, Blocker) and x.satisfied:
4436 blockers.append(addl)
4439 pkg_merge = ordered and pkg_status == "merge"
4440 if not pkg_merge and pkg_status == "merge":
4441 pkg_status = "nomerge"
4442 built = pkg_type != "ebuild"
4443 installed = pkg_type == "installed"
4445 metadata = pkg.metadata
4447 repo_name = metadata["repository"]
4448 if pkg_type == "ebuild":
4449 ebuild_path = portdb.findname(pkg_key)
4450 if not ebuild_path: # shouldn't happen
4451 raise portage.exception.PackageNotFound(pkg_key)
4452 repo_path_real = os.path.dirname(os.path.dirname(
4453 os.path.dirname(ebuild_path)))
4455 repo_path_real = portdb.getRepositoryPath(repo_name)
4456 pkg_use = metadata["USE"].split()
4458 restrict = flatten(use_reduce(paren_reduce(
4459 pkg.metadata["RESTRICT"]), uselist=pkg_use))
4460 except portage.exception.InvalidDependString, e:
4461 if not pkg.installed:
4462 show_invalid_depstring_notice(x,
4463 pkg.metadata["RESTRICT"], str(e))
4467 if "ebuild" == pkg_type and x[3] != "nomerge" and \
4468 "fetch" in restrict:
4471 counters.restrict_fetch += 1
4472 if portdb.fetch_check(pkg_key, pkg_use):
4475 counters.restrict_fetch_satisfied += 1
4477 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
4478 #param is used for -u, where you still *do* want to see when something is being upgraded.
4481 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
4482 if vardb.cpv_exists(pkg_key):
4483 addl=" "+yellow("R")+fetch+" "
4486 counters.reinst += 1
4487 elif pkg_status == "uninstall":
4488 counters.uninst += 1
4489 # filter out old-style virtual matches
4490 elif installed_versions and \
4491 portage.cpv_getkey(installed_versions[0]) == \
4492 portage.cpv_getkey(pkg_key):
4493 myinslotlist = vardb.match(pkg.slot_atom)
4494 # If this is the first install of a new-style virtual, we
4495 # need to filter out old-style virtual matches.
4496 if myinslotlist and \
4497 portage.cpv_getkey(myinslotlist[0]) != \
4498 portage.cpv_getkey(pkg_key):
4501 myoldbest = myinslotlist[:]
4503 if not portage.dep.cpvequal(pkg_key,
4504 portage.best([pkg_key] + myoldbest)):
4506 addl += turquoise("U")+blue("D")
4508 counters.downgrades += 1
4511 addl += turquoise("U") + " "
4513 counters.upgrades += 1
4515 # New slot, mark it new.
4516 addl = " " + green("NS") + fetch + " "
4517 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
4519 counters.newslot += 1
4521 if "--changelog" in self.myopts:
4522 inst_matches = vardb.match(pkg.slot_atom)
4524 changelogs.extend(self.calc_changelog(
4525 portdb.findname(pkg_key),
4526 inst_matches[0], pkg_key))
4528 addl = " " + green("N") + " " + fetch + " "
4536 cur_iuse = list(filter_iuse_defaults(
4537 pkg.metadata["IUSE"].split()))
4539 forced_flags = set()
4540 pkgsettings.setcpv(pkg.cpv, mydb=pkg.metadata) # for package.use.{mask,force}
4541 forced_flags.update(pkgsettings.useforce)
4542 forced_flags.update(pkgsettings.usemask)
4544 cur_iuse = portage.unique_array(cur_iuse)
4547 cur_use = [flag for flag in cur_use if flag in cur_iuse]
4549 if myoldbest and myinslotlist:
4550 previous_cpv = myoldbest[0]
4552 previous_cpv = pkg.cpv
4553 if vardb.cpv_exists(previous_cpv):
4554 old_iuse, old_use = vardb.aux_get(
4555 previous_cpv, ["IUSE", "USE"])
4556 old_iuse = list(set(
4557 filter_iuse_defaults(old_iuse.split())))
4559 old_use = old_use.split()
4566 old_use = [flag for flag in old_use if flag in old_iuse]
4568 use_expand = pkgsettings["USE_EXPAND"].lower().split()
4570 use_expand.reverse()
4571 use_expand_hidden = \
4572 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
4574 def map_to_use_expand(myvals, forcedFlags=False,
4578 for exp in use_expand:
4581 for val in myvals[:]:
4582 if val.startswith(exp.lower()+"_"):
4583 if val in forced_flags:
4584 forced[exp].add(val[len(exp)+1:])
4585 ret[exp].append(val[len(exp)+1:])
4588 forced["USE"] = [val for val in myvals \
4589 if val in forced_flags]
4591 for exp in use_expand_hidden:
4597 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
4598 # are the only thing that triggered reinstallation.
4599 reinst_flags_map = {}
4600 reinstall_for_flags = self._reinstall_nodes.get(pkg)
4601 reinst_expand_map = None
4602 if reinstall_for_flags:
4603 reinst_flags_map = map_to_use_expand(
4604 list(reinstall_for_flags), removeHidden=False)
4605 for k in list(reinst_flags_map):
4606 if not reinst_flags_map[k]:
4607 del reinst_flags_map[k]
4608 if not reinst_flags_map.get("USE"):
4609 reinst_expand_map = reinst_flags_map.copy()
4610 reinst_expand_map.pop("USE", None)
4611 if reinst_expand_map and \
4612 not set(reinst_expand_map).difference(
4614 use_expand_hidden = \
4615 set(use_expand_hidden).difference(
4618 cur_iuse_map, iuse_forced = \
4619 map_to_use_expand(cur_iuse, forcedFlags=True)
4620 cur_use_map = map_to_use_expand(cur_use)
4621 old_iuse_map = map_to_use_expand(old_iuse)
4622 old_use_map = map_to_use_expand(old_use)
4625 use_expand.insert(0, "USE")
4627 for key in use_expand:
4628 if key in use_expand_hidden:
4630 verboseadd += create_use_string(key.upper(),
4631 cur_iuse_map[key], iuse_forced[key],
4632 cur_use_map[key], old_iuse_map[key],
4633 old_use_map[key], is_new,
4634 reinst_flags_map.get(key))
4639 if pkg_type == "ebuild" and pkg_merge:
4641 myfilesdict = portdb.getfetchsizes(pkg_key,
4642 useflags=pkg_use, debug=self.edebug)
4643 except portage.exception.InvalidDependString, e:
4644 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
4645 show_invalid_depstring_notice(x, src_uri, str(e))
4648 if myfilesdict is None:
4649 myfilesdict="[empty/missing/bad digest]"
4651 for myfetchfile in myfilesdict:
4652 if myfetchfile not in myfetchlist:
4653 mysize+=myfilesdict[myfetchfile]
4654 myfetchlist.append(myfetchfile)
4656 counters.totalsize += mysize
4657 verboseadd+=format_size(mysize)+" "
4660 # assign index for a previous version in the same slot
4661 has_previous = False
4662 repo_name_prev = None
4663 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
4665 slot_matches = vardb.match(slot_atom)
4668 repo_name_prev = vardb.aux_get(slot_matches[0],
4671 # now use the data to generate output
4673 if pkg.installed or not has_previous:
4674 repoadd = repo_display.repoStr(repo_path_real)
4676 repo_path_prev = None
4678 repo_path_prev = portdb.getRepositoryPath(
4680 if repo_path_prev == repo_path_real:
4681 repoadd = repo_display.repoStr(repo_path_real)
4683 repoadd = "%s=>%s" % (
4684 repo_display.repoStr(repo_path_prev),
4685 repo_display.repoStr(repo_path_real))
4686 if repoadd and repoadd != "0":
4688 verboseadd += teal("[%s]" % repoadd)
4690 xs = [portage.cpv_getkey(pkg_key)] + \
4691 list(portage.catpkgsplit(pkg_key)[2:])
4698 if "COLUMNWIDTH" in self.settings:
4700 mywidth = int(self.settings["COLUMNWIDTH"])
4701 except ValueError, e:
4702 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
4704 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
4705 self.settings["COLUMNWIDTH"], noiselevel=-1)
4707 oldlp = mywidth - 30
4710 # Convert myoldbest from a list to a string.
4714 for pos, key in enumerate(myoldbest):
4715 key = portage.catpkgsplit(key)[2] + \
4716 "-" + portage.catpkgsplit(key)[3]
4717 if key[-3:] == "-r0":
4719 myoldbest[pos] = key
4720 myoldbest = blue("["+", ".join(myoldbest)+"]")
4723 root_config = self.roots[myroot]
4724 system_set = root_config.sets["system"]
4725 world_set = root_config.sets["world"]
4730 pkg_system = system_set.findAtomForPackage(pkg_key, metadata)
4731 pkg_world = world_set.findAtomForPackage(pkg_key, metadata)
4732 if not (oneshot or pkg_world) and \
4733 myroot == self.target_root and \
4734 favorites_set.findAtomForPackage(pkg_key, metadata):
4735 # Maybe it will be added to world now.
4736 if create_world_atom(pkg_key, metadata,
4737 favorites_set, root_config):
4739 except portage.exception.InvalidDependString:
4740 # This is reported elsewhere if relevant.
4743 def pkgprint(pkg_str):
4746 return colorize("PKG_MERGE_SYSTEM", pkg_str)
4748 return colorize("PKG_MERGE_WORLD", pkg_str)
4750 return colorize("PKG_MERGE", pkg_str)
4751 elif pkg_status == "uninstall":
4752 return colorize("PKG_UNINSTALL", pkg_str)
4755 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4757 return colorize("PKG_NOMERGE_WORLD", pkg_str)
4759 return colorize("PKG_NOMERGE", pkg_str)
4764 if "--columns" in self.myopts:
4765 if "--quiet" in self.myopts:
4766 myprint=addl+" "+indent+pkgprint(pkg_cp)
4767 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4768 myprint=myprint+myoldbest
4769 myprint=myprint+darkgreen("to "+x[1])
4772 myprint = "[%s] %s%s" % \
4773 (pkgprint(pkg_status.ljust(13)),
4774 indent, pkgprint(pkg.cp))
4776 myprint = "[%s %s] %s%s" % \
4777 (pkgprint(pkg.type_name), addl,
4778 indent, pkgprint(pkg.cp))
4779 if (newlp-nc_len(myprint)) > 0:
4780 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4781 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4782 if (oldlp-nc_len(myprint)) > 0:
4783 myprint=myprint+" "*(oldlp-nc_len(myprint))
4784 myprint=myprint+myoldbest
4785 myprint=myprint+darkgreen("to "+x[1])+" "+verboseadd
4788 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4790 myprint = "[" + pkg_type + " " + addl + "] "
4791 myprint += indent + pkgprint(pkg_key) + " " + \
4792 myoldbest + darkgreen("to " + myroot) + " " + \
4795 if "--columns" in self.myopts:
4796 if "--quiet" in self.myopts:
4797 myprint=addl+" "+indent+pkgprint(pkg_cp)
4798 myprint=myprint+" "+green(xs[1]+xs[2])+" "
4799 myprint=myprint+myoldbest
4802 myprint = "[%s] %s%s" % \
4803 (pkgprint(pkg_status.ljust(13)),
4804 indent, pkgprint(pkg.cp))
4806 myprint = "[%s %s] %s%s" % \
4807 (pkgprint(pkg.type_name), addl,
4808 indent, pkgprint(pkg.cp))
4809 if (newlp-nc_len(myprint)) > 0:
4810 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4811 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4812 if (oldlp-nc_len(myprint)) > 0:
4813 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4814 myprint=myprint+myoldbest+" "+verboseadd
4817 myprint = "[%s] %s%s %s %s" % \
4818 (pkgprint(pkg_status.ljust(13)),
4819 indent, pkgprint(pkg.cpv),
4820 myoldbest, verboseadd)
4822 myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
4825 mysplit = [portage.cpv_getkey(pkg_key)] + \
4826 list(portage.catpkgsplit(pkg_key)[2:])
4827 if "--tree" not in self.myopts and mysplit and \
4828 len(mysplit) == 3 and mysplit[0] == "sys-apps/portage" and \
4831 if mysplit[2] == "r0":
4832 myversion = mysplit[1]
4834 myversion = "%s-%s" % (mysplit[1], mysplit[2])
4836 if myversion != portage.VERSION and "--quiet" not in self.myopts:
4837 if mylist_index < len(mylist) - 1:
4838 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4839 p.append(colorize("WARN", " then resume the merge."))
4852 sys.stdout.write(str(repo_display))
4854 if "--changelog" in self.myopts:
4856 for revision,text in changelogs:
4857 print bold('*'+revision)
4858 sys.stdout.write(text)
4863 def display_problems(self):
4865 Display problems with the dependency graph such as slot collisions.
4866 This is called internally by display() to show the problems _after_
4867 the merge list where it is most likely to be seen, but if display()
4868 is not going to be called then this method should be called explicitly
4869 to ensure that the user is notified of problems with the graph.
4872 if self._circular_deps_for_display is not None:
4873 self._show_circular_deps(
4874 self._circular_deps_for_display)
4876 # The user is only notified of a slot conflict if
4877 # there are no unresolvable blocker conflicts.
4878 if self._unsatisfied_blockers_for_display is not None:
4879 self._show_unsatisfied_blockers(
4880 self._unsatisfied_blockers_for_display)
4882 self._show_slot_collision_notice()
4884 # TODO: Add generic support for "set problem" handlers so that
4885 # the below warnings aren't special cases for world only.
4887 if self._missing_args:
4888 world_problems = False
4889 if "world" in self._sets:
4890 for arg, atom in self._missing_args:
4891 if arg.name == "world":
4892 world_problems = True
4896 sys.stderr.write("\n!!! Problems have been " + \
4897 "detected with your world file\n")
4898 sys.stderr.write("!!! Please run " + \
4899 green("emaint --check world")+"\n\n")
4901 if self._missing_args:
4902 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4903 " Ebuilds for the following packages are either all\n")
4904 sys.stderr.write(colorize("BAD", "!!!") + \
4905 " masked or don't exist:\n")
4906 sys.stderr.write(" ".join(atom for arg, atom in \
4907 self._missing_args) + "\n")
4909 if self._pprovided_args:
4911 for arg, atom in self._pprovided_args:
4912 if isinstance(arg, SetArg):
4914 arg_atom = (atom, atom)
4917 arg_atom = (arg.arg, atom)
4918 refs = arg_refs.setdefault(arg_atom, [])
4919 if parent not in refs:
4922 msg.append(bad("\nWARNING: "))
4923 if len(self._pprovided_args) > 1:
4924 msg.append("Requested packages will not be " + \
4925 "merged because they are listed in\n")
4927 msg.append("A requested package will not be " + \
4928 "merged because it is listed in\n")
4929 msg.append("package.provided:\n\n")
4930 problems_sets = set()
4931 for (arg, atom), refs in arg_refs.iteritems():
4934 problems_sets.update(refs)
4936 ref_string = ", ".join(["'%s'" % name for name in refs])
4937 ref_string = " pulled in by " + ref_string
4938 msg.append(" %s%s\n" % (colorize("INFORM", arg), ref_string))
4940 if "world" in problems_sets:
4941 msg.append("This problem can be solved in one of the following ways:\n\n")
4942 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4943 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4944 msg.append(" C) Remove offending entries from package.provided.\n\n")
4945 msg.append("The best course of action depends on the reason that an offending\n")
4946 msg.append("package.provided entry exists.\n\n")
4947 sys.stderr.write("".join(msg))
4949 masked_packages = []
4950 for pkg, pkgsettings in self._masked_installed:
4951 root_config = self.roots[pkg.root]
4952 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4953 masked_packages.append((root_config, pkgsettings,
4954 pkg.cpv, pkg.metadata, mreasons))
4956 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4957 " The following installed packages are masked:\n")
4958 show_masked_packages(masked_packages)
4962 for pargs, kwargs in self._unsatisfied_deps_for_display:
4963 self._show_unsatisfied_dep(*pargs, **kwargs)
4965 def calc_changelog(self,ebuildpath,current,next):
4966 if ebuildpath == None or not os.path.exists(ebuildpath):
4968 current = '-'.join(portage.catpkgsplit(current)[1:])
4969 if current.endswith('-r0'):
4970 current = current[:-3]
4971 next = '-'.join(portage.catpkgsplit(next)[1:])
4972 if next.endswith('-r0'):
4974 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
4976 changelog = open(changelogpath).read()
4977 except SystemExit, e:
4978 raise # Needed else can't exit
4981 divisions = self.find_changelog_tags(changelog)
4982 #print 'XX from',current,'to',next
4983 #for div,text in divisions: print 'XX',div
4984 # skip entries for all revisions above the one we are about to emerge
4985 for i in range(len(divisions)):
4986 if divisions[i][0]==next:
4987 divisions = divisions[i:]
4989 # find out how many entries we are going to display
4990 for i in range(len(divisions)):
4991 if divisions[i][0]==current:
4992 divisions = divisions[:i]
4995 # couldnt find the current revision in the list. display nothing
4999 def find_changelog_tags(self,changelog):
5003 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
5005 if release is not None:
5006 divs.append((release,changelog))
5008 if release is not None:
5009 divs.append((release,changelog[:match.start()]))
5010 changelog = changelog[match.end():]
5011 release = match.group(1)
5012 if release.endswith('.ebuild'):
5013 release = release[:-7]
5014 if release.endswith('-r0'):
5015 release = release[:-3]
5017 def saveNomergeFavorites(self):
5018 """Find atoms in favorites that are not in the mergelist and add them
5019 to the world file if necessary."""
5020 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
5021 "--oneshot", "--onlydeps", "--pretend"):
5022 if x in self.myopts:
5024 root_config = self.roots[self.target_root]
5025 world_set = root_config.sets["world"]
5027 world_set.load() # maybe it's changed on disk
5028 args_set = self._sets["args"]
5029 portdb = self.trees[self.target_root]["porttree"].dbapi
5030 added_favorites = set()
5031 for x in self._set_nodes:
5032 pkg_type, root, pkg_key, pkg_status = x
5033 if pkg_status != "nomerge":
5035 metadata = dict(izip(self._mydbapi_keys,
5036 self.mydbapi[root].aux_get(pkg_key, self._mydbapi_keys)))
5038 myfavkey = create_world_atom(pkg_key, metadata,
5039 args_set, root_config)
5041 if myfavkey in added_favorites:
5043 added_favorites.add(myfavkey)
5044 except portage.exception.InvalidDependString, e:
5045 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
5046 (pkg_key, str(e)), noiselevel=-1)
5047 writemsg("!!! see '%s'\n\n" % os.path.join(
5048 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
5051 for k in self._sets:
5052 if k in ("args", "world") or not root_config.sets[k].world_candidate:
5057 all_added.append(SETPREFIX + k)
5058 all_added.extend(added_favorites)
5061 print ">>> Recording %s in \"world\" favorites file..." % \
5062 colorize("INFORM", a)
5064 world_set.update(all_added)
5067 def loadResumeCommand(self, resume_data):
5069 Add a resume command to the graph and validate it in the process. This
5070 will raise a PackageNotFound exception if a package is not available.
5073 if not isinstance(resume_data, dict):
5076 mergelist = resume_data.get("mergelist")
5077 if not isinstance(mergelist, list):
5080 if mergelist and "--skipfirst" in self.myopts:
5081 for i, task in enumerate(mergelist):
5082 if isinstance(task, list) and \
5083 task and task[-1] == "merge":
5087 fakedb = self.mydbapi
5089 serialized_tasks = []
5091 if not (isinstance(x, list) and len(x) == 4):
5093 pkg_type, myroot, pkg_key, action = x
5094 if pkg_type not in self.pkg_tree_map:
5096 if action != "merge":
5098 mydb = trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
5100 metadata = dict(izip(self._mydbapi_keys,
5101 mydb.aux_get(pkg_key, self._mydbapi_keys)))
5103 # It does no exist or it is corrupt.
5104 if action == "uninstall":
5106 raise portage.exception.PackageNotFound(pkg_key)
5107 if pkg_type == "ebuild":
5108 pkgsettings = self.pkgsettings[myroot]
5109 pkgsettings.setcpv(pkg_key, mydb=metadata)
5110 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5111 installed = action == "uninstall"
5112 built = pkg_type != "ebuild"
5113 pkg = Package(built=built, cpv=pkg_key,
5114 installed=installed, metadata=metadata,
5115 operation=action, root=myroot,
5117 self._pkg_cache[pkg] = pkg
5119 root_config = self.roots[pkg.root]
5120 if "merge" == pkg.operation and \
5121 not visible(root_config.settings, pkg):
5122 self._unsatisfied_deps_for_display.append(
5123 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
5125 fakedb[myroot].cpv_inject(pkg)
5126 serialized_tasks.append(pkg)
5127 self.spinner.update()
5129 if self._unsatisfied_deps_for_display:
5132 if not serialized_tasks or "--nodeps" in self.myopts:
5133 self._serialized_tasks_cache = serialized_tasks
5135 self._select_package = self._select_pkg_from_graph
5136 self.myparams.add("selective")
5138 favorites = resume_data.get("favorites")
5139 if isinstance(favorites, list):
5140 args = self._load_favorites(favorites)
5144 for task in serialized_tasks:
5145 if isinstance(task, Package) and \
5146 task.operation == "merge":
5147 if not self._add_pkg(task, None):
5150 # Packages for argument atoms need to be explicitly
5151 # added via _add_pkg() so that they are included in the
5152 # digraph (needed at least for --tree display).
5154 for atom in arg.set:
5155 pkg, existing_node = self._select_package(
5156 arg.root_config.root, atom)
5157 if existing_node is None and \
5159 if not self._add_pkg(pkg, arg):
5162 # Allow unsatisfied deps here to avoid showing a masking
5163 # message for an unsatisfied dep that isn't necessarily
5165 if not self._create_graph(allow_unsatisfied=True):
5167 if self._unsatisfied_deps:
5168 # This probably means that a required package
5169 # was dropped via --skipfirst. It makes the
5170 # resume list invalid, so convert it to a
5171 # UnsatisfiedResumeDep exception.
5172 raise self.UnsatisfiedResumeDep(
5173 self._unsatisfied_deps)
5174 self._serialized_tasks_cache = None
5177 except self._unknown_internal_error:
5182 def _load_favorites(self, favorites):
5184 Use a list of favorites to resume state from a
5185 previous select_files() call. This creates similar
5186 DependencyArg instances to those that would have
5187 been created by the original select_files() call.
5188 This allows Package instances to be matched with
5189 DependencyArg instances during graph creation.
5191 root_config = self.roots[self.target_root]
5192 getSetAtoms = root_config.setconfig.getSetAtoms
5193 sets = root_config.sets
5196 if not isinstance(x, basestring):
5198 if x in ("system", "world"):
5200 if x.startswith(SETPREFIX):
5201 s = x[len(SETPREFIX):]
5206 # Recursively expand sets so that containment tests in
5207 # self._get_parent_sets() properly match atoms in nested
5208 # sets (like if world contains system).
5209 expanded_set = InternalPackageSet(
5210 initial_atoms=getSetAtoms(s))
5211 self._sets[s] = expanded_set
5212 args.append(SetArg(arg=x, set=expanded_set,
5213 root_config=root_config))
5215 if not portage.isvalidatom(x):
5217 args.append(AtomArg(arg=x, atom=x,
5218 root_config=root_config))
5220 # Create the "args" package set from atoms and
5221 # packages given as arguments.
5222 args_set = self._sets["args"]
5224 if not isinstance(arg, (AtomArg, PackageArg)):
5227 if myatom in args_set:
5229 args_set.add(myatom)
5230 self._set_atoms.update(chain(*self._sets.itervalues()))
5231 atom_arg_map = self._atom_arg_map
5233 for atom in arg.set:
5234 atom_key = (atom, arg.root_config.root)
5235 refs = atom_arg_map.get(atom_key)
5238 atom_arg_map[atom_key] = refs
5243 class UnsatisfiedResumeDep(portage.exception.PortageException):
5245 A dependency of a resume list is not installed. This
5246 can occur when a required package is dropped from the
5247 merge list via --skipfirst.
5250 class _internal_exception(portage.exception.PortageException):
5251 def __init__(self, value=""):
5252 portage.exception.PortageException.__init__(self, value)
5254 class _unknown_internal_error(_internal_exception):
5256 Used by the depgraph internally to terminate graph creation.
5257 The specific reason for the failure should have been dumped
5258 to stderr, unfortunately, the exact reason for the failure
5262 class _serialize_tasks_retry(_internal_exception):
5264 This is raised by the _serialize_tasks() method when it needs to
5265 be called again for some reason. The only case that it's currently
5266 used for is when neglected dependencies need to be added to the
5267 graph in order to avoid making a potentially unsafe decision.
5270 class _dep_check_composite_db(portage.dbapi):
5272 A dbapi-like interface that is optimized for use in dep_check() calls.
5273 This is built on top of the existing depgraph package selection logic.
5274 Some packages that have been added to the graph may be masked from this
5275 view in order to influence the atom preference selection that occurs
5278 def __init__(self, depgraph, root):
5279 portage.dbapi.__init__(self)
5280 self._depgraph = depgraph
5282 self._match_cache = {}
5283 self._cpv_pkg_map = {}
5285 def match(self, atom):
5286 ret = self._match_cache.get(atom)
5291 atom = self._dep_expand(atom)
5292 pkg, existing = self._depgraph._select_package(self._root, atom)
5296 # Return the highest available from select_package() as well as
5297 # any matching slots in the graph db.
5299 slots.add(pkg.metadata["SLOT"])
5300 atom_cp = portage.dep_getkey(atom)
5301 if pkg.cp.startswith("virtual/"):
5302 # For new-style virtual lookahead that occurs inside
5303 # dep_check(), examine all slots. This is needed
5304 # so that newer slots will not unnecessarily be pulled in
5305 # when a satisfying lower slot is already installed. For
5306 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5307 # there's no need to pull in a newer slot to satisfy a
5308 # virtual/jdk dependency.
5309 for db, pkg_type, built, installed, db_keys in \
5310 self._depgraph._filtered_trees[self._root]["dbs"]:
5311 for cpv in db.match(atom):
5312 if portage.cpv_getkey(cpv) != pkg.cp:
5314 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5316 if self._visible(pkg):
5317 self._cpv_pkg_map[pkg.cpv] = pkg
5319 slots.remove(pkg.metadata["SLOT"])
5321 slot_atom = "%s:%s" % (atom_cp, slots.pop())
5322 pkg, existing = self._depgraph._select_package(
5323 self._root, slot_atom)
5326 if not self._visible(pkg):
5328 self._cpv_pkg_map[pkg.cpv] = pkg
5331 self._cpv_sort_ascending(ret)
5332 self._match_cache[orig_atom] = ret
5335 def _visible(self, pkg):
5336 if pkg.installed and "selective" not in self._depgraph.myparams:
5338 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
5339 except (StopIteration, portage.exception.InvalidDependString):
5346 self._depgraph.pkgsettings[pkg.root], pkg):
5348 except portage.exception.InvalidDependString:
5352 def _dep_expand(self, atom):
5354 This is only needed for old installed packages that may
5355 contain atoms that are not fully qualified with a specific
5356 category. Emulate the cpv_expand() function that's used by
5357 dbapi.match() in cases like this. If there are multiple
5358 matches, it's often due to a new-style virtual that has
5359 been added, so try to filter those out to avoid raising
5362 root_config = self._depgraph.roots[self._root]
5364 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5365 if len(expanded_atoms) > 1:
5366 non_virtual_atoms = []
5367 for x in expanded_atoms:
5368 if not portage.dep_getkey(x).startswith("virtual/"):
5369 non_virtual_atoms.append(x)
5370 if len(non_virtual_atoms) == 1:
5371 expanded_atoms = non_virtual_atoms
5372 if len(expanded_atoms) > 1:
5373 # compatible with portage.cpv_expand()
5374 raise ValueError([portage.dep_getkey(x) \
5375 for x in expanded_atoms])
5377 atom = expanded_atoms[0]
5379 null_atom = insert_category_into_atom(atom, "null")
5380 null_cp = portage.dep_getkey(null_atom)
5381 cat, atom_pn = portage.catsplit(null_cp)
5382 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5384 # Allow the resolver to choose which virtual.
5385 atom = insert_category_into_atom(atom, "virtual")
5387 atom = insert_category_into_atom(atom, "null")
5390 def aux_get(self, cpv, wants):
5391 metadata = self._cpv_pkg_map[cpv].metadata
5392 return [metadata.get(x, "") for x in wants]
5394 class _package_cache(dict):
5395 def __init__(self, depgraph):
5397 self._depgraph = depgraph
5399 def __setitem__(self, k, v):
5400 dict.__setitem__(self, k, v)
5401 root_config = self._depgraph.roots[v.root]
5402 if visible(root_config.settings, v):
5403 root_config.visible_pkgs.cpv_inject(v)
5405 class RepoDisplay(object):
5406 def __init__(self, roots):
5407 self._shown_repos = {}
5408 self._unknown_repo = False
5410 for root_config in roots.itervalues():
5411 portdir = root_config.settings.get("PORTDIR")
5413 repo_paths.add(portdir)
5414 overlays = root_config.settings.get("PORTDIR_OVERLAY")
5416 repo_paths.update(overlays.split())
5417 repo_paths = list(repo_paths)
5418 self._repo_paths = repo_paths
5419 self._repo_paths_real = [ os.path.realpath(repo_path) \
5420 for repo_path in repo_paths ]
5422 # pre-allocate index for PORTDIR so that it always has index 0.
5423 for root_config in roots.itervalues():
5424 portdb = root_config.trees["porttree"].dbapi
5425 portdir = portdb.porttree_root
5427 self.repoStr(portdir)
5429 def repoStr(self, repo_path_real):
5432 real_index = self._repo_paths_real.index(repo_path_real)
5433 if real_index == -1:
5435 self._unknown_repo = True
5437 shown_repos = self._shown_repos
5438 repo_paths = self._repo_paths
5439 repo_path = repo_paths[real_index]
5440 index = shown_repos.get(repo_path)
5442 index = len(shown_repos)
5443 shown_repos[repo_path] = index
5449 shown_repos = self._shown_repos
5450 unknown_repo = self._unknown_repo
5451 if shown_repos or self._unknown_repo:
5452 output.append("Portage tree and overlays:\n")
5453 show_repo_paths = list(shown_repos)
5454 for repo_path, repo_index in shown_repos.iteritems():
5455 show_repo_paths[repo_index] = repo_path
5457 for index, repo_path in enumerate(show_repo_paths):
5458 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
5460 output.append(" "+teal("[?]") + \
5461 " indicates that the source repository could not be determined\n")
5462 return "".join(output)
5464 class PackageCounters(object):
5474 self.blocks_satisfied = 0
5476 self.restrict_fetch = 0
5477 self.restrict_fetch_satisfied = 0
5480 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
5483 myoutput.append("Total: %s package" % total_installs)
5484 if total_installs != 1:
5485 myoutput.append("s")
5486 if total_installs != 0:
5487 myoutput.append(" (")
5488 if self.upgrades > 0:
5489 details.append("%s upgrade" % self.upgrades)
5490 if self.upgrades > 1:
5492 if self.downgrades > 0:
5493 details.append("%s downgrade" % self.downgrades)
5494 if self.downgrades > 1:
5497 details.append("%s new" % self.new)
5498 if self.newslot > 0:
5499 details.append("%s in new slot" % self.newslot)
5500 if self.newslot > 1:
5503 details.append("%s reinstall" % self.reinst)
5507 details.append("%s uninstall" % self.uninst)
5510 myoutput.append(", ".join(details))
5511 if total_installs != 0:
5512 myoutput.append(")")
5513 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
5514 if self.restrict_fetch:
5515 myoutput.append("\nFetch Restriction: %s package" % \
5516 self.restrict_fetch)
5517 if self.restrict_fetch > 1:
5518 myoutput.append("s")
5519 if self.restrict_fetch_satisfied < self.restrict_fetch:
5520 myoutput.append(bad(" (%s unsatisfied)") % \
5521 (self.restrict_fetch - self.restrict_fetch_satisfied))
5523 myoutput.append("\nConflict: %s block" % \
5526 myoutput.append("s")
5527 if self.blocks_satisfied < self.blocks:
5528 myoutput.append(bad(" (%s unsatisfied)") % \
5529 (self.blocks - self.blocks_satisfied))
5530 return "".join(myoutput)
5532 class MergeTask(object):
5534 _opts_ignore_blockers = \
5535 frozenset(["--buildpkgonly",
5536 "--fetchonly", "--fetch-all-uri",
5537 "--nodeps", "--pretend"])
5539 def __init__(self, settings, trees, myopts):
5540 self.settings = settings
5541 self.target_root = settings["ROOT"]
5543 self.myopts = myopts
5545 if settings.get("PORTAGE_DEBUG", "") == "1":
5547 self.pkgsettings = {}
5548 self._blocker_db = {}
5550 self.pkgsettings[root] = portage.config(
5551 clone=trees[root]["vartree"].settings)
5552 self._blocker_db[root] = BlockerDB(
5553 trees[root]["vartree"],
5554 trees[root]["porttree"].dbapi)
5556 self._spawned_pids = []
5558 def _find_blockers(self, new_pkg):
5560 Returns a callable which should be called only when
5561 the vdb lock has been acquired.
5564 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
5567 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
5568 if self._opts_ignore_blockers.intersection(self.myopts):
5571 blocker_dblinks = []
5572 for blocking_pkg in self._blocker_db[
5573 new_pkg.root].findInstalledBlockers(new_pkg,
5574 acquire_lock=acquire_lock):
5575 if new_pkg.slot_atom == blocking_pkg.slot_atom:
5577 if new_pkg.cpv == blocking_pkg.cpv:
5579 blocker_dblinks.append(portage.dblink(
5580 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
5581 self.pkgsettings[blocking_pkg.root], treetype="vartree",
5582 vartree=self.trees[blocking_pkg.root]["vartree"]))
5584 return blocker_dblinks
5586 def merge(self, mylist, favorites, mtimedb):
5588 return self._merge(mylist, favorites, mtimedb)
5590 if self._spawned_pids:
5591 from portage import process
5592 process.spawned_pids.extend(self._spawned_pids)
5593 self._spawned_pids = []
5595 def _poll_child_processes(self):
5597 After each merge, collect status from child processes
5598 in order to clean up zombies (such as the parallel-fetch
5601 spawned_pids = self._spawned_pids
5602 if not spawned_pids:
5604 for pid in list(spawned_pids):
5606 if os.waitpid(pid, os.WNOHANG) == (0, 0):
5609 # This pid has been cleaned up elsewhere,
5610 # so remove it from our list.
5612 spawned_pids.remove(pid)
5614 def _merge(self, mylist, favorites, mtimedb):
5615 from portage.elog import elog_process
5616 from portage.elog.filtering import filter_mergephases
5617 buildpkgonly = "--buildpkgonly" in self.myopts
5619 fetchonly = "--fetchonly" in self.myopts or \
5620 "--fetch-all-uri" in self.myopts
5621 oneshot = "--oneshot" in self.myopts or \
5622 "--onlydeps" in self.myopts
5623 pretend = "--pretend" in self.myopts
5624 ldpath_mtimes = mtimedb["ldpath"]
5625 xterm_titles = "notitles" not in self.settings.features
5627 if "--resume" in self.myopts:
5629 print colorize("GOOD", "*** Resuming merge...")
5630 emergelog(xterm_titles, " *** Resuming merge...")
5632 # Do this before verifying the ebuild Manifests since it might
5633 # be possible for the user to use --resume --skipfirst get past
5634 # a non-essential package with a broken digest.
5635 mtimedb["resume"]["mergelist"] = [list(x) for x in mylist \
5636 if isinstance(x, Package) and x.operation == "merge"]
5639 # Verify all the manifests now so that the user is notified of failure
5640 # as soon as possible.
5641 if "--fetchonly" not in self.myopts and \
5642 "--fetch-all-uri" not in self.myopts and \
5643 "strict" in self.settings.features:
5644 shown_verifying_msg = False
5646 for myroot, pkgsettings in self.pkgsettings.iteritems():
5647 quiet_config = portage.config(clone=pkgsettings)
5648 quiet_config["PORTAGE_QUIET"] = "1"
5649 quiet_config.backup_changes("PORTAGE_QUIET")
5650 quiet_settings[myroot] = quiet_config
5653 if x[0] != "ebuild" or x[-1] == "nomerge":
5655 if not shown_verifying_msg:
5656 shown_verifying_msg = True
5657 print ">>> Verifying ebuild Manifests..."
5658 mytype, myroot, mycpv, mystatus = x
5659 portdb = self.trees[myroot]["porttree"].dbapi
5660 quiet_config = quiet_settings[myroot]
5661 quiet_config["O"] = os.path.dirname(portdb.findname(mycpv))
5662 if not portage.digestcheck([], quiet_config, strict=True):
5664 del x, mytype, myroot, mycpv, mystatus, quiet_config
5665 del shown_verifying_msg, quiet_settings
5667 root_config = self.trees[self.target_root]["root_config"]
5668 system_set = root_config.sets["system"]
5669 args_set = InternalPackageSet(favorites)
5670 world_set = root_config.sets["world"]
5672 mymergelist = mylist
5673 myfeat = self.settings.features[:]
5674 bad_resume_opts = set(["--ask", "--changelog", "--skipfirst",
5676 if "parallel-fetch" in myfeat and \
5677 not ("--pretend" in self.myopts or \
5678 "--fetch-all-uri" in self.myopts or \
5679 "--fetchonly" in self.myopts):
5680 if "distlocks" not in myfeat:
5682 print red("!!!")+" parallel-fetching requires the distlocks feature enabled"
5683 print red("!!!")+" you have it disabled, thus parallel-fetching is being disabled"
5685 elif len(mymergelist) > 1:
5686 fetch_log = "/var/log/emerge-fetch.log"
5687 logfile = open(fetch_log, "w")
5688 fd_pipes = {1:logfile.fileno(), 2:logfile.fileno()}
5689 portage.util.apply_secpass_permissions(fetch_log,
5690 uid=portage.portage_uid, gid=portage.portage_gid,
5692 fetch_env = os.environ.copy()
5693 fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs"
5694 fetch_env["PORTAGE_NICENESS"] = "0"
5695 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
5696 fetch_args = [sys.argv[0], "--resume",
5697 "--fetchonly", "--nodeps"]
5698 resume_opts = self.myopts.copy()
5699 # For automatic resume, we need to prevent
5700 # any of bad_resume_opts from leaking in
5701 # via EMERGE_DEFAULT_OPTS.
5702 resume_opts["--ignore-default-opts"] = True
5703 for myopt, myarg in resume_opts.iteritems():
5704 if myopt not in bad_resume_opts:
5706 fetch_args.append(myopt)
5708 fetch_args.append(myopt +"="+ myarg)
5709 self._spawned_pids.extend(
5710 portage.process.spawn(
5711 fetch_args, env=fetch_env,
5712 fd_pipes=fd_pipes, returnpid=True))
5713 logfile.close() # belongs to the spawned process
5714 del fetch_log, logfile, fd_pipes, fetch_env, fetch_args, \
5716 print ">>> starting parallel fetching pid %d" % \
5717 self._spawned_pids[-1]
5719 metadata_keys = [k for k in portage.auxdbkeys \
5720 if not k.startswith("UNUSED_")] + ["USE"]
5722 task_list = mymergelist
5723 # Filter mymergelist so that all the len(mymergelist) calls
5724 # below (for display) do not count Uninstall instances.
5725 mymergelist = [x for x in mymergelist if x[-1] == "merge"]
5728 if x[0] == "blocks":
5730 pkg_type, myroot, pkg_key, operation = x
5732 built = pkg_type != "ebuild"
5733 installed = pkg_type == "installed"
5734 portdb = self.trees[myroot]["porttree"].dbapi
5735 bindb = self.trees[myroot]["bintree"].dbapi
5736 vartree = self.trees[myroot]["vartree"]
5737 vardb = vartree.dbapi
5738 root_config = self.trees[myroot]["root_config"]
5739 pkgsettings = self.pkgsettings[myroot]
5740 if pkg_type == "blocks":
5742 elif pkg_type == "ebuild":
5745 if pkg_type == "binary":
5747 elif pkg_type == "installed":
5750 raise AssertionError("Package type: '%s'" % pkg_type)
5754 metadata = pkg.metadata
5757 if not (buildpkgonly or fetchonly or pretend):
5759 unmerge(root_config, self.myopts, "unmerge",
5760 [pkg.cpv], mtimedb["ldpath"], clean_world=0,
5762 except UninstallFailure, e:
5768 y = portdb.findname(pkg_key)
5769 if "--pretend" not in self.myopts:
5770 print "\n>>> Emerging (" + \
5771 colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \
5772 colorize("MERGE_LIST_PROGRESS", str(len(mymergelist))) + ") " + \
5773 colorize("GOOD", x[pkgindex]) + " to " + x[1]
5774 emergelog(xterm_titles, " >>> emerge ("+\
5775 str(mergecount)+" of "+str(len(mymergelist))+\
5776 ") "+x[pkgindex]+" to "+x[1])
5778 pkgsettings["EMERGE_FROM"] = x[0]
5779 pkgsettings.backup_changes("EMERGE_FROM")
5782 #buildsyspkg: Check if we need to _force_ binary package creation
5783 issyspkg = ("buildsyspkg" in myfeat) \
5784 and x[0] != "blocks" \
5785 and system_set.findAtomForPackage(pkg_key, metadata) \
5786 and "--buildpkg" not in self.myopts
5787 if x[0] in ["ebuild","blocks"]:
5788 if x[0] == "blocks" and "--fetchonly" not in self.myopts:
5789 raise Exception, "Merging a blocker"
5790 elif "--fetchonly" in self.myopts or \
5791 "--fetch-all-uri" in self.myopts:
5792 if "--fetch-all-uri" in self.myopts:
5793 retval = portage.doebuild(y, "fetch", myroot,
5794 pkgsettings, self.edebug,
5795 "--pretend" in self.myopts, fetchonly=1,
5796 fetchall=1, mydbapi=portdb, tree="porttree")
5798 retval = portage.doebuild(y, "fetch", myroot,
5799 pkgsettings, self.edebug,
5800 "--pretend" in self.myopts, fetchonly=1,
5801 mydbapi=portdb, tree="porttree")
5802 if (retval is None) or retval:
5804 print "!!! Fetch for",y,"failed, continuing..."
5806 failed_fetches.append(pkg_key)
5810 portage.doebuild_environment(y, "setup", myroot,
5811 pkgsettings, self.edebug, 1, portdb)
5812 catdir = os.path.dirname(pkgsettings["PORTAGE_BUILDDIR"])
5813 portage.util.ensure_dirs(os.path.dirname(catdir),
5814 uid=portage.portage_uid, gid=portage.portage_gid,
5816 builddir_lock = None
5819 catdir_lock = portage.locks.lockdir(catdir)
5820 portage.util.ensure_dirs(catdir,
5821 gid=portage.portage_gid,
5823 builddir_lock = portage.locks.lockdir(
5824 pkgsettings["PORTAGE_BUILDDIR"])
5826 portage.locks.unlockdir(catdir_lock)
5829 msg = " === (%s of %s) Cleaning (%s::%s)" % \
5830 (mergecount, len(mymergelist), pkg_key, y)
5831 short_msg = "emerge: (%s of %s) %s Clean" % \
5832 (mergecount, len(mymergelist), pkg_key)
5833 emergelog(xterm_titles, msg, short_msg=short_msg)
5834 retval = portage.doebuild(y, "clean", myroot,
5835 pkgsettings, self.edebug, cleanup=1,
5836 mydbapi=portdb, tree="porttree")
5837 if retval != os.EX_OK:
5839 if "--buildpkg" in self.myopts or issyspkg:
5841 print ">>> This is a system package, " + \
5842 "let's pack a rescue tarball."
5843 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
5844 (mergecount, len(mymergelist), pkg_key, y)
5845 short_msg = "emerge: (%s of %s) %s Compile" % \
5846 (mergecount, len(mymergelist), pkg_key)
5847 emergelog(xterm_titles, msg, short_msg=short_msg)
5848 self.trees[myroot]["bintree"].prevent_collision(pkg_key)
5849 binpkg_tmpfile = os.path.join(pkgsettings["PKGDIR"],
5850 pkg_key + ".tbz2." + str(os.getpid()))
5851 pkgsettings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
5852 pkgsettings.backup_changes("PORTAGE_BINPKG_TMPFILE")
5853 retval = portage.doebuild(y, "package", myroot,
5854 pkgsettings, self.edebug, mydbapi=portdb,
5856 del pkgsettings["PORTAGE_BINPKG_TMPFILE"]
5857 if retval != os.EX_OK or \
5858 "--buildpkgonly" in self.myopts:
5859 elog_process(pkg_key, pkgsettings, phasefilter=filter_mergephases)
5860 if retval != os.EX_OK:
5862 bintree = self.trees[myroot]["bintree"]
5863 bintree.inject(pkg_key, filename=binpkg_tmpfile)
5865 if "--buildpkgonly" not in self.myopts:
5866 msg = " === (%s of %s) Merging (%s::%s)" % \
5867 (mergecount, len(mymergelist), pkg_key, y)
5868 short_msg = "emerge: (%s of %s) %s Merge" % \
5869 (mergecount, len(mymergelist), pkg_key)
5870 emergelog(xterm_titles, msg, short_msg=short_msg)
5872 retval = portage.merge(pkgsettings["CATEGORY"],
5873 pkgsettings["PF"], pkgsettings["D"],
5874 os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
5875 "build-info"), myroot, pkgsettings,
5876 myebuild=pkgsettings["EBUILD"],
5877 mytree="porttree", mydbapi=portdb,
5878 vartree=vartree, prev_mtimes=ldpath_mtimes,
5879 blockers=self._find_blockers(pkg))
5880 if retval != os.EX_OK:
5882 elif "noclean" not in pkgsettings.features:
5883 portage.doebuild(y, "clean", myroot,
5884 pkgsettings, self.edebug, mydbapi=portdb,
5887 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
5888 (mergecount, len(mymergelist), pkg_key, y)
5889 short_msg = "emerge: (%s of %s) %s Compile" % \
5890 (mergecount, len(mymergelist), pkg_key)
5891 emergelog(xterm_titles, msg, short_msg=short_msg)
5892 retval = portage.doebuild(y, "install", myroot,
5893 pkgsettings, self.edebug, vartree=vartree,
5894 mydbapi=portdb, tree="porttree",
5895 prev_mtimes=ldpath_mtimes)
5896 if retval != os.EX_OK:
5899 retval = portage.merge(pkgsettings["CATEGORY"],
5900 pkgsettings["PF"], pkgsettings["D"],
5901 os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
5902 "build-info"), myroot, pkgsettings,
5903 myebuild=pkgsettings["EBUILD"],
5904 mytree="porttree", mydbapi=portdb,
5905 vartree=vartree, prev_mtimes=ldpath_mtimes,
5906 blockers=self._find_blockers(pkg))
5907 if retval != os.EX_OK:
5911 portage.locks.unlockdir(builddir_lock)
5914 # Lock catdir for removal if empty.
5915 catdir_lock = portage.locks.lockdir(catdir)
5921 if e.errno not in (errno.ENOENT,
5922 errno.ENOTEMPTY, errno.EEXIST):
5925 portage.locks.unlockdir(catdir_lock)
5927 elif x[0]=="binary":
5929 mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key)
5930 if "--getbinpkg" in self.myopts:
5933 if "distlocks" in pkgsettings.features and \
5934 os.access(pkgsettings["PKGDIR"], os.W_OK):
5935 portage.util.ensure_dirs(os.path.dirname(mytbz2))
5936 tbz2_lock = portage.locks.lockfile(mytbz2,
5938 if self.trees[myroot]["bintree"].isremote(pkg_key):
5939 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
5940 (mergecount, len(mymergelist), pkg_key, mytbz2)
5941 short_msg = "emerge: (%s of %s) %s Fetch" % \
5942 (mergecount, len(mymergelist), pkg_key)
5943 emergelog(xterm_titles, msg, short_msg=short_msg)
5945 self.trees[myroot]["bintree"].gettbz2(pkg_key)
5946 except portage.exception.FileNotFound:
5947 writemsg("!!! Fetching Binary failed " + \
5948 "for '%s'\n" % pkg_key, noiselevel=-1)
5951 failed_fetches.append(pkg_key)
5952 except portage.exception.DigestException, e:
5953 writemsg("\n!!! Digest verification failed:\n",
5955 writemsg("!!! %s\n" % e.value[0],
5957 writemsg("!!! Reason: %s\n" % e.value[1],
5959 writemsg("!!! Got: %s\n" % e.value[2],
5961 writemsg("!!! Expected: %s\n" % e.value[3],
5966 failed_fetches.append(pkg_key)
5969 portage.locks.unlockfile(tbz2_lock)
5971 if "--fetchonly" in self.myopts or \
5972 "--fetch-all-uri" in self.myopts:
5976 short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Merge Binary"
5977 emergelog(xterm_titles, " === ("+str(mergecount)+\
5978 " of "+str(len(mymergelist))+") Merging Binary ("+\
5979 x[pkgindex]+"::"+mytbz2+")", short_msg=short_msg)
5980 retval = portage.pkgmerge(mytbz2, x[1], pkgsettings,
5982 vartree=self.trees[myroot]["vartree"],
5983 prev_mtimes=ldpath_mtimes,
5984 blockers=self._find_blockers(pkg))
5985 if retval != os.EX_OK:
5987 #need to check for errors
5988 if not buildpkgonly:
5989 if not (fetchonly or oneshot or pretend) and \
5990 args_set.findAtomForPackage(pkg_key, metadata):
5992 world_set.load() # maybe it's changed on disk
5993 myfavkey = create_world_atom(pkg_key, metadata,
5994 args_set, root_config)
5996 print ">>> Recording",myfavkey,"in \"world\" favorites file..."
5997 emergelog(xterm_titles, " === ("+\
5998 str(mergecount)+" of "+\
5999 str(len(mymergelist))+\
6000 ") Updating world file ("+x[pkgindex]+")")
6001 world_set.add(myfavkey)
6004 if "--pretend" not in self.myopts and \
6005 "--fetchonly" not in self.myopts and \
6006 "--fetch-all-uri" not in self.myopts:
6008 # Figure out if we need a restart.
6009 if myroot == "/" and pkg.cp == "sys-apps/portage":
6010 if len(mymergelist) > mergecount:
6011 emergelog(xterm_titles,
6012 " ::: completed emerge ("+ \
6013 str(mergecount)+" of "+ \
6014 str(len(mymergelist))+") "+ \
6016 emergelog(xterm_titles, " *** RESTARTING " + \
6017 "emerge via exec() after change of " + \
6019 del mtimedb["resume"]["mergelist"][0]
6021 portage.run_exitfuncs()
6022 mynewargv=[sys.argv[0],"--resume"]
6023 resume_opts = self.myopts.copy()
6024 # For automatic resume, we need to prevent
6025 # any of bad_resume_opts from leaking in
6026 # via EMERGE_DEFAULT_OPTS.
6027 resume_opts["--ignore-default-opts"] = True
6028 for myopt, myarg in resume_opts.iteritems():
6029 if myopt not in bad_resume_opts:
6031 mynewargv.append(myopt)
6033 mynewargv.append(myopt +"="+ myarg)
6034 # priority only needs to be adjusted on the first run
6035 os.environ["PORTAGE_NICENESS"] = "0"
6036 os.execv(mynewargv[0], mynewargv)
6038 if "--pretend" not in self.myopts and \
6039 "--fetchonly" not in self.myopts and \
6040 "--fetch-all-uri" not in self.myopts:
6041 if "noclean" not in self.settings.features:
6042 short_msg = "emerge: (%s of %s) %s Clean Post" % \
6043 (mergecount, len(mymergelist), x[pkgindex])
6044 emergelog(xterm_titles, (" === (%s of %s) " + \
6045 "Post-Build Cleaning (%s::%s)") % \
6046 (mergecount, len(mymergelist), x[pkgindex], y),
6047 short_msg=short_msg)
6048 emergelog(xterm_titles, " ::: completed emerge ("+\
6049 str(mergecount)+" of "+str(len(mymergelist))+") "+\
6052 # Unsafe for parallel merges
6053 del mtimedb["resume"]["mergelist"][0]
6054 # Commit after each merge so that --resume may still work in
6055 # in the event that portage is not allowed to exit normally
6056 # due to power failure, SIGKILL, etc...
6059 self._poll_child_processes()
6061 if "--pretend" not in self.myopts:
6062 emergelog(xterm_titles, " *** Finished. Cleaning up...")
6064 # We're out of the loop... We're done. Delete the resume data.
6065 if mtimedb.has_key("resume"):
6066 del mtimedb["resume"]
6069 #by doing an exit this way, --fetchonly can continue to try to
6070 #fetch everything even if a particular download fails.
6071 if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts:
6073 sys.stderr.write("\n\n!!! Some fetch errors were " + \
6074 "encountered. Please see above for details.\n\n")
6075 for cpv in failed_fetches:
6076 sys.stderr.write(" ")
6077 sys.stderr.write(cpv)
6078 sys.stderr.write("\n")
6079 sys.stderr.write("\n")
6085 class UninstallFailure(portage.exception.PortageException):
6087 An instance of this class is raised by unmerge() when
6088 an uninstallation fails.
6091 def __init__(self, *pargs):
6092 portage.exception.PortageException.__init__(self, pargs)
6094 self.status = pargs[0]
6096 def unmerge(root_config, myopts, unmerge_action,
6097 unmerge_files, ldpath_mtimes, autoclean=0,
6098 clean_world=1, ordered=0, raise_on_error=0):
6099 settings = root_config.settings
6100 sets = root_config.sets
6101 vartree = root_config.trees["vartree"]
6102 candidate_catpkgs=[]
6104 xterm_titles = "notitles" not in settings.features
6106 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
6108 # At least the parent needs to exist for the lock file.
6109 portage.util.ensure_dirs(vdb_path)
6110 except portage.exception.PortageException:
6114 if os.access(vdb_path, os.W_OK):
6115 vdb_lock = portage.locks.lockdir(vdb_path)
6116 realsyslist = sets["system"].getAtoms()
6118 for x in realsyslist:
6119 mycp = portage.dep_getkey(x)
6120 if mycp in settings.getvirtuals():
6122 for provider in settings.getvirtuals()[mycp]:
6123 if vartree.dbapi.match(provider):
6124 providers.append(provider)
6125 if len(providers) == 1:
6126 syslist.extend(providers)
6128 syslist.append(mycp)
6130 mysettings = portage.config(clone=settings)
6132 if not unmerge_files:
6133 if unmerge_action == "unmerge":
6135 print bold("emerge unmerge") + " can only be used with specific package names"
6142 # process all arguments and add all
6143 # valid db entries to candidate_catpkgs
6145 if not unmerge_files:
6146 candidate_catpkgs.extend(vartree.dbapi.cp_all())
6148 #we've got command-line arguments
6149 if not unmerge_files:
6150 print "\nNo packages to unmerge have been provided.\n"
6152 for x in unmerge_files:
6153 arg_parts = x.split('/')
6154 if x[0] not in [".","/"] and \
6155 arg_parts[-1][-7:] != ".ebuild":
6156 #possible cat/pkg or dep; treat as such
6157 candidate_catpkgs.append(x)
6158 elif unmerge_action in ["prune","clean"]:
6159 print "\n!!! Prune and clean do not accept individual" + \
6160 " ebuilds as arguments;\n skipping.\n"
6163 # it appears that the user is specifying an installed
6164 # ebuild and we're in "unmerge" mode, so it's ok.
6165 if not os.path.exists(x):
6166 print "\n!!! The path '"+x+"' doesn't exist.\n"
6169 absx = os.path.abspath(x)
6170 sp_absx = absx.split("/")
6171 if sp_absx[-1][-7:] == ".ebuild":
6173 absx = "/".join(sp_absx)
6175 sp_absx_len = len(sp_absx)
6177 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
6178 vdb_len = len(vdb_path)
6180 sp_vdb = vdb_path.split("/")
6181 sp_vdb_len = len(sp_vdb)
6183 if not os.path.exists(absx+"/CONTENTS"):
6184 print "!!! Not a valid db dir: "+str(absx)
6187 if sp_absx_len <= sp_vdb_len:
6188 # The Path is shorter... so it can't be inside the vdb.
6191 print "\n!!!",x,"cannot be inside "+ \
6192 vdb_path+"; aborting.\n"
6195 for idx in range(0,sp_vdb_len):
6196 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
6199 print "\n!!!", x, "is not inside "+\
6200 vdb_path+"; aborting.\n"
6203 print "="+"/".join(sp_absx[sp_vdb_len:])
6204 candidate_catpkgs.append(
6205 "="+"/".join(sp_absx[sp_vdb_len:]))
6208 if (not "--quiet" in myopts):
6210 if settings["ROOT"] != "/":
6211 print darkgreen(newline+ \
6212 ">>> Using system located in ROOT tree "+settings["ROOT"])
6213 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
6214 not ("--quiet" in myopts):
6215 print darkgreen(newline+\
6216 ">>> These are the packages that would be unmerged:")
6218 # Preservation of order is required for --depclean and --prune so
6219 # that dependencies are respected. Use all_selected to eliminate
6220 # duplicate packages since the same package may be selected by
6223 all_selected = set()
6224 for x in candidate_catpkgs:
6225 # cycle through all our candidate deps and determine
6226 # what will and will not get unmerged
6228 mymatch=localtree.dep_match(x)
6231 except ValueError, errpkgs:
6232 print "\n\n!!! The short ebuild name \"" + \
6233 x + "\" is ambiguous. Please specify"
6234 print "!!! one of the following fully-qualified " + \
6235 "ebuild names instead:\n"
6236 for i in errpkgs[0]:
6237 print " " + green(i)
6241 if not mymatch and x[0] not in "<>=~":
6242 #add a "=" if missing
6243 mymatch=localtree.dep_match("="+x)
6245 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
6246 (x, unmerge_action), noiselevel=-1)
6250 {"protected": set(), "selected": set(), "omitted": set()})
6251 mykey = len(pkgmap) - 1
6252 if unmerge_action=="unmerge":
6254 if y not in all_selected:
6255 pkgmap[mykey]["selected"].add(y)
6257 elif unmerge_action == "prune":
6258 if len(mymatch) == 1:
6260 best_version = mymatch[0]
6261 best_slot = vartree.getslot(best_version)
6262 best_counter = vartree.dbapi.cpv_counter(best_version)
6263 for mypkg in mymatch[1:]:
6264 myslot = vartree.getslot(mypkg)
6265 mycounter = vartree.dbapi.cpv_counter(mypkg)
6266 if (myslot == best_slot and mycounter > best_counter) or \
6267 mypkg == portage.best([mypkg, best_version]):
6268 if myslot == best_slot:
6269 if mycounter < best_counter:
6270 # On slot collision, keep the one with the
6271 # highest counter since it is the most
6272 # recently installed.
6274 best_version = mypkg
6276 best_counter = mycounter
6277 pkgmap[mykey]["protected"].add(best_version)
6278 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
6279 if mypkg != best_version and mypkg not in all_selected)
6280 all_selected.update(pkgmap[mykey]["selected"])
6282 # unmerge_action == "clean"
6284 for mypkg in mymatch:
6285 if unmerge_action == "clean":
6286 myslot = localtree.getslot(mypkg)
6288 # since we're pruning, we don't care about slots
6289 # and put all the pkgs in together
6291 if not slotmap.has_key(myslot):
6292 slotmap[myslot] = {}
6293 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
6295 for myslot in slotmap:
6296 counterkeys = slotmap[myslot].keys()
6300 pkgmap[mykey]["protected"].add(
6301 slotmap[myslot][counterkeys[-1]])
6303 #be pretty and get them in order of merge:
6304 for ckey in counterkeys:
6305 mypkg = slotmap[myslot][ckey]
6306 if mypkg not in all_selected:
6307 pkgmap[mykey]["selected"].add(mypkg)
6308 all_selected.add(mypkg)
6309 # ok, now the last-merged package
6310 # is protected, and the rest are selected
6311 numselected = len(all_selected)
6312 if global_unmerge and not numselected:
6313 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
6317 portage.writemsg_stdout(
6318 "\n>>> No packages selected for removal by " + \
6319 unmerge_action + "\n")
6323 portage.locks.unlockdir(vdb_lock)
6325 from portage.sets.base import EditablePackageSet
6327 # generate a list of package sets that are directly or indirectly listed in "world",
6328 # as there is no persistent list of "installed" sets
6329 installed_sets = ["world"]
6334 pos = len(installed_sets)
6335 for s in installed_sets[pos - 1:]:
6336 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
6339 installed_sets += candidates
6340 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
6343 # we don't want to unmerge packages that are still listed in user-editable package sets
6344 # listed in "world" as they would be remerged on the next update of "world" or the
6345 # relevant package sets.
6346 for cp in xrange(len(pkgmap)):
6347 for cpv in pkgmap[cp]["selected"].copy():
6349 for s in installed_sets:
6350 # skip sets that the user requested to unmerge, and skip world
6351 # unless we're unmerging a package set (as the package would be
6352 # removed from "world" later on)
6353 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
6355 # only check instances of EditablePackageSet as other classes are generally used for
6356 # special purposes and can be ignored here (and are usually generated dynamically, so the
6357 # user can't do much about them anyway)
6358 elif sets[s].containsCPV(cpv) \
6359 and isinstance(sets[s], EditablePackageSet):
6362 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
6363 #print colorize("WARN", "but still listed in the following package sets:")
6364 #print " %s\n" % ", ".join(parents)
6365 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
6366 print colorize("WARN", "still referenced by the following package sets:")
6367 print " %s\n" % ", ".join(parents)
6368 # adjust pkgmap so the display output is correct
6369 pkgmap[cp]["selected"].remove(cpv)
6370 pkgmap[cp]["protected"].add(cpv)
6374 # Unmerge order only matters in some cases
6378 selected = d["selected"]
6381 cp = portage.cpv_getkey(iter(selected).next())
6382 cp_dict = unordered.get(cp)
6385 unordered[cp] = cp_dict
6388 for k, v in d.iteritems():
6389 cp_dict[k].update(v)
6390 pkgmap = [unordered[cp] for cp in sorted(unordered)]
6392 for x in xrange(len(pkgmap)):
6393 selected = pkgmap[x]["selected"]
6396 for mytype, mylist in pkgmap[x].iteritems():
6397 if mytype == "selected":
6399 mylist.difference_update(all_selected)
6400 cp = portage.cpv_getkey(iter(selected).next())
6401 for y in localtree.dep_match(cp):
6402 if y not in pkgmap[x]["omitted"] and \
6403 y not in pkgmap[x]["selected"] and \
6404 y not in pkgmap[x]["protected"] and \
6405 y not in all_selected:
6406 pkgmap[x]["omitted"].add(y)
6407 if global_unmerge and not pkgmap[x]["selected"]:
6408 #avoid cluttering the preview printout with stuff that isn't getting unmerged
6410 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
6411 print colorize("BAD","\a\n\n!!! '%s' is part of your system profile." % cp)
6412 print colorize("WARN","\a!!! Unmerging it may be damaging to your system.\n")
6413 if "--pretend" not in myopts and "--ask" not in myopts:
6414 countdown(int(settings["EMERGE_WARNING_DELAY"]),
6415 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
6416 if "--quiet" not in myopts:
6417 print "\n "+bold(cp)
6419 print bold(cp)+": ",
6420 for mytype in ["selected","protected","omitted"]:
6421 if "--quiet" not in myopts:
6422 portage.writemsg_stdout((mytype + ": ").rjust(14), noiselevel=-1)
6423 if pkgmap[x][mytype]:
6424 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
6425 sorted_pkgs.sort(portage.pkgcmp)
6426 for pn, ver, rev in sorted_pkgs:
6430 myversion = ver + "-" + rev
6431 if mytype == "selected":
6432 portage.writemsg_stdout(
6433 colorize("UNMERGE_WARN", myversion + " "), noiselevel=-1)
6435 portage.writemsg_stdout(
6436 colorize("GOOD", myversion + " "), noiselevel=-1)
6438 portage.writemsg_stdout("none ", noiselevel=-1)
6439 if "--quiet" not in myopts:
6440 portage.writemsg_stdout("\n", noiselevel=-1)
6441 if "--quiet" in myopts:
6442 portage.writemsg_stdout("\n", noiselevel=-1)
6444 portage.writemsg_stdout("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
6445 " packages are slated for removal.\n")
6446 portage.writemsg_stdout(">>> " + colorize("GOOD", "'Protected'") + \
6447 " and " + colorize("GOOD", "'omitted'") + \
6448 " packages will not be removed.\n\n")
6450 if "--pretend" in myopts:
6451 #we're done... return
6453 if "--ask" in myopts:
6454 if userquery("Would you like to unmerge these packages?")=="No":
6455 # enter pretend mode for correct formatting of results
6456 myopts["--pretend"] = True
6461 #the real unmerging begins, after a short delay....
6463 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
6465 for x in xrange(len(pkgmap)):
6466 for y in pkgmap[x]["selected"]:
6467 print ">>> Unmerging "+y+"..."
6468 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
6469 mysplit = y.split("/")
6471 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
6472 mysettings, unmerge_action not in ["clean","prune"],
6473 vartree=vartree, ldpath_mtimes=ldpath_mtimes)
6474 if retval != os.EX_OK:
6475 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
6477 raise UninstallFailure(retval)
6481 sets["world"].cleanPackage(vartree.dbapi, y)
6482 emergelog(xterm_titles, " >>> unmerge success: "+y)
6484 for s in root_config.setconfig.active:
6485 sets["world"].remove(SETPREFIX+s)
6488 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
6490 if os.path.exists("/usr/bin/install-info"):
6495 inforoot=normpath(root+z)
6496 if os.path.isdir(inforoot):
6497 infomtime = long(os.stat(inforoot).st_mtime)
6498 if inforoot not in prev_mtimes or \
6499 prev_mtimes[inforoot] != infomtime:
6500 regen_infodirs.append(inforoot)
6502 if not regen_infodirs:
6503 portage.writemsg_stdout("\n "+green("*")+" GNU info directory index is up-to-date.\n")
6505 portage.writemsg_stdout("\n "+green("*")+" Regenerating GNU info directory index...\n")
6507 dir_extensions = ("", ".gz", ".bz2")
6510 for inforoot in regen_infodirs:
6514 if not os.path.isdir(inforoot):
6517 file_list = os.listdir(inforoot)
6519 dir_file = os.path.join(inforoot, "dir")
6520 moved_old_dir = False
6523 if x.startswith(".") or \
6524 os.path.isdir(os.path.join(inforoot, x)):
6526 if x.startswith("dir"):
6528 for ext in dir_extensions:
6529 if x == "dir" + ext or \
6530 x == "dir" + ext + ".old":
6535 if processed_count == 0:
6536 for ext in dir_extensions:
6538 os.rename(dir_file + ext, dir_file + ext + ".old")
6539 moved_old_dir = True
6540 except EnvironmentError, e:
6541 if e.errno != errno.ENOENT:
6544 processed_count += 1
6545 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
6546 existsstr="already exists, for file `"
6548 if re.search(existsstr,myso):
6549 # Already exists... Don't increment the count for this.
6551 elif myso[:44]=="install-info: warning: no info dir entry in ":
6552 # This info file doesn't contain a DIR-header: install-info produces this
6553 # (harmless) warning (the --quiet switch doesn't seem to work).
6554 # Don't increment the count for this.
6558 errmsg += myso + "\n"
6561 if moved_old_dir and not os.path.exists(dir_file):
6562 # We didn't generate a new dir file, so put the old file
6563 # back where it was originally found.
6564 for ext in dir_extensions:
6566 os.rename(dir_file + ext + ".old", dir_file + ext)
6567 except EnvironmentError, e:
6568 if e.errno != errno.ENOENT:
6572 # Clean dir.old cruft so that they don't prevent
6573 # unmerge of otherwise empty directories.
6574 for ext in dir_extensions:
6576 os.unlink(dir_file + ext + ".old")
6577 except EnvironmentError, e:
6578 if e.errno != errno.ENOENT:
6582 #update mtime so we can potentially avoid regenerating.
6583 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
6586 print " "+yellow("*")+" Processed",icount,"info files;",badcount,"errors."
6590 print " "+green("*")+" Processed",icount,"info files."
6593 def display_news_notification(trees):
6594 for target_root in trees:
6595 if len(trees) > 1 and target_root != "/":
6597 settings = trees[target_root]["vartree"].settings
6598 portdb = trees[target_root]["porttree"].dbapi
6599 vardb = trees[target_root]["vartree"].dbapi
6600 NEWS_PATH = os.path.join("metadata", "news")
6601 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
6602 newsReaderDisplay = False
6604 for repo in portdb.getRepositories():
6605 unreadItems = checkUpdatedNewsItems(
6606 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo)
6608 if not newsReaderDisplay:
6609 newsReaderDisplay = True
6611 print colorize("WARN", " * IMPORTANT:"),
6612 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
6615 if newsReaderDisplay:
6616 print colorize("WARN", " *"),
6617 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
6620 def post_emerge(trees, mtimedb, retval):
6622 Misc. things to run at the end of a merge session.
6628 Display preserved libs warnings
6631 @param trees: A dictionary mapping each ROOT to it's package databases
6633 @param mtimedb: The mtimeDB to store data needed across merge invocations
6634 @type mtimedb: MtimeDB class instance
6635 @param retval: Emerge's return value
6639 1. Calls sys.exit(retval)
6641 for target_root in trees:
6642 if len(trees) > 1 and target_root != "/":
6644 vardbapi = trees[target_root]["vartree"].dbapi
6645 settings = vardbapi.settings
6646 info_mtimes = mtimedb["info"]
6648 # Load the most current variables from ${ROOT}/etc/profile.env
6651 settings.regenerate()
6654 config_protect = settings.get("CONFIG_PROTECT","").split()
6655 infodirs = settings.get("INFOPATH","").split(":") + \
6656 settings.get("INFODIR","").split(":")
6660 if retval == os.EX_OK:
6661 exit_msg = " *** exiting successfully."
6663 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
6664 emergelog("notitles" not in settings.features, exit_msg)
6666 # Dump the mod_echo output now so that our other notifications are shown
6669 from portage.elog import mod_echo
6671 pass # happens during downgrade to a version without the module
6675 vdb_path = os.path.join(target_root, portage.VDB_PATH)
6676 portage.util.ensure_dirs(vdb_path)
6677 vdb_lock = portage.locks.lockdir(vdb_path)
6679 if "noinfo" not in settings.features:
6680 chk_updated_info_files(target_root, infodirs, info_mtimes, retval)
6683 portage.locks.unlockdir(vdb_lock)
6685 chk_updated_cfg_files(target_root, config_protect)
6687 display_news_notification(trees)
6689 if vardbapi.plib_registry.hasEntries():
6691 print colorize("WARN", "!!!") + " existing preserved libs:"
6692 plibdata = vardbapi.plib_registry.getPreservedLibs()
6693 for cpv in plibdata:
6694 print colorize("WARN", ">>>") + " package: %s" % cpv
6695 for f in plibdata[cpv]:
6696 print colorize("WARN", " * ") + " - %s" % f
6697 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
6702 def chk_updated_cfg_files(target_root, config_protect):
6704 #number of directories with some protect files in them
6706 for x in config_protect:
6707 x = os.path.join(target_root, x.lstrip(os.path.sep))
6708 if not os.access(x, os.W_OK):
6709 # Avoid Permission denied errors generated
6713 mymode = os.lstat(x).st_mode
6716 if stat.S_ISLNK(mymode):
6717 # We want to treat it like a directory if it
6718 # is a symlink to an existing directory.
6720 real_mode = os.stat(x).st_mode
6721 if stat.S_ISDIR(real_mode):
6725 if stat.S_ISDIR(mymode):
6726 mycommand = "find '%s' -iname '._cfg????_*'" % x
6728 mycommand = "find '%s' -maxdepth 1 -iname '._cfg????_%s'" % \
6729 os.path.split(x.rstrip(os.path.sep))
6730 mycommand += " ! -iname '.*~' ! -iname '.*.bak' -print0"
6731 a = commands.getstatusoutput(mycommand)
6733 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
6735 # Show the error message alone, sending stdout to /dev/null.
6736 os.system(mycommand + " 1>/dev/null")
6738 files = a[1].split('\0')
6739 # split always produces an empty string as the last element
6740 if files and not files[-1]:
6744 print "\n"+colorize("WARN", " * IMPORTANT:"),
6745 if stat.S_ISDIR(mymode):
6746 print "%d config files in '%s' need updating." % \
6749 print "config file '%s' needs updating." % x
6752 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
6753 " section of the " + bold("emerge")
6754 print " "+yellow("*")+" man page to learn how to update config files."
6756 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id):
6758 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
6759 Returns the number of unread (yet relevent) items.
6761 @param portdb: a portage tree database
6762 @type portdb: pordbapi
6763 @param vardb: an installed package database
6764 @type vardb: vardbapi
6773 1. The number of unread but relevant news items.
6776 from portage.news import NewsManager
6777 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
6778 return manager.getUnreadItems( repo_id, update=True )
6780 def insert_category_into_atom(atom, category):
6781 alphanum = re.search(r'\w', atom)
6783 ret = atom[:alphanum.start()] + "%s/" % category + \
6784 atom[alphanum.start():]
6789 def is_valid_package_atom(x):
6791 alphanum = re.search(r'\w', x)
6793 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
6794 return portage.isvalidatom(x)
6796 def show_blocker_docs_link():
6798 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
6799 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
6801 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
6804 def show_mask_docs():
6805 print "For more information, see the MASKED PACKAGES section in the emerge"
6806 print "man page or refer to the Gentoo Handbook."
6808 def action_sync(settings, trees, mtimedb, myopts, myaction):
6809 xterm_titles = "notitles" not in settings.features
6810 emergelog(xterm_titles, " === sync")
6811 myportdir = settings.get("PORTDIR", None)
6813 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
6815 if myportdir[-1]=="/":
6816 myportdir=myportdir[:-1]
6817 if not os.path.exists(myportdir):
6818 print ">>>",myportdir,"not found, creating it."
6819 os.makedirs(myportdir,0755)
6820 syncuri=settings["SYNC"].rstrip()
6822 updatecache_flg = False
6823 if myaction == "metadata":
6824 print "skipping sync"
6825 updatecache_flg = True
6826 elif syncuri[:8]=="rsync://":
6827 if not os.path.exists("/usr/bin/rsync"):
6828 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
6829 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
6834 import shlex, StringIO
6835 if settings["PORTAGE_RSYNC_OPTS"] == "":
6836 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
6838 "--recursive", # Recurse directories
6839 "--links", # Consider symlinks
6840 "--safe-links", # Ignore links outside of tree
6841 "--perms", # Preserve permissions
6842 "--times", # Preserive mod times
6843 "--compress", # Compress the data transmitted
6844 "--force", # Force deletion on non-empty dirs
6845 "--whole-file", # Don't do block transfers, only entire files
6846 "--delete", # Delete files that aren't in the master tree
6847 "--stats", # Show final statistics about what was transfered
6848 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
6849 "--exclude=/distfiles", # Exclude distfiles from consideration
6850 "--exclude=/local", # Exclude local from consideration
6851 "--exclude=/packages", # Exclude packages from consideration
6855 # The below validation is not needed when using the above hardcoded
6858 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
6859 lexer = shlex.shlex(StringIO.StringIO(
6860 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
6861 lexer.whitespace_split = True
6862 rsync_opts.extend(lexer)
6865 for opt in ("--recursive", "--times"):
6866 if opt not in rsync_opts:
6867 portage.writemsg(yellow("WARNING:") + " adding required option " + \
6868 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
6869 rsync_opts.append(opt)
6871 for exclude in ("distfiles", "local", "packages"):
6872 opt = "--exclude=/%s" % exclude
6873 if opt not in rsync_opts:
6874 portage.writemsg(yellow("WARNING:") + \
6875 " adding required option %s not included in " % opt + \
6876 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
6877 rsync_opts.append(opt)
6879 if settings["RSYNC_TIMEOUT"] != "":
6880 portage.writemsg("WARNING: usage of RSYNC_TIMEOUT is deprecated, " + \
6881 "use PORTAGE_RSYNC_EXTRA_OPTS instead\n")
6883 mytimeout = int(settings["RSYNC_TIMEOUT"])
6884 rsync_opts.append("--timeout=%d" % mytimeout)
6885 except ValueError, e:
6886 portage.writemsg("!!! %s\n" % str(e))
6888 # TODO: determine options required for official servers
6889 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
6891 def rsync_opt_startswith(opt_prefix):
6892 for x in rsync_opts:
6893 if x.startswith(opt_prefix):
6897 if not rsync_opt_startswith("--timeout="):
6898 rsync_opts.append("--timeout=%d" % mytimeout)
6900 for opt in ("--compress", "--whole-file"):
6901 if opt not in rsync_opts:
6902 portage.writemsg(yellow("WARNING:") + " adding required option " + \
6903 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
6904 rsync_opts.append(opt)
6906 if "--quiet" in myopts:
6907 rsync_opts.append("--quiet") # Shut up a lot
6909 rsync_opts.append("--verbose") # Print filelist
6911 if "--verbose" in myopts:
6912 rsync_opts.append("--progress") # Progress meter for each file
6914 if "--debug" in myopts:
6915 rsync_opts.append("--checksum") # Force checksum on all files
6917 if settings["RSYNC_EXCLUDEFROM"] != "":
6918 portage.writemsg(yellow("WARNING:") + \
6919 " usage of RSYNC_EXCLUDEFROM is deprecated, use " + \
6920 "PORTAGE_RSYNC_EXTRA_OPTS instead\n")
6921 if os.path.exists(settings["RSYNC_EXCLUDEFROM"]):
6922 rsync_opts.append("--exclude-from=%s" % \
6923 settings["RSYNC_EXCLUDEFROM"])
6925 portage.writemsg("!!! RSYNC_EXCLUDEFROM specified," + \
6926 " but file does not exist.\n")
6928 if settings["RSYNC_RATELIMIT"] != "":
6929 portage.writemsg(yellow("WARNING:") + \
6930 " usage of RSYNC_RATELIMIT is deprecated, use " + \
6931 "PORTAGE_RSYNC_EXTRA_OPTS instead")
6932 rsync_opts.append("--bwlimit=%s" % \
6933 settings["RSYNC_RATELIMIT"])
6935 # Real local timestamp file.
6936 servertimestampfile = os.path.join(
6937 myportdir, "metadata", "timestamp.chk")
6939 content = portage.util.grabfile(servertimestampfile)
6943 mytimestamp = time.mktime(time.strptime(content[0],
6944 "%a, %d %b %Y %H:%M:%S +0000"))
6945 except (OverflowError, ValueError):
6950 rsync_initial_timeout = \
6951 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
6953 rsync_initial_timeout = 15
6956 if settings.has_key("RSYNC_RETRIES"):
6957 print yellow("WARNING:")+" usage of RSYNC_RETRIES is deprecated, use PORTAGE_RSYNC_RETRIES instead"
6958 maxretries=int(settings["RSYNC_RETRIES"])
6960 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
6961 except SystemExit, e:
6962 raise # Needed else can't exit
6964 maxretries=3 #default number of retries
6967 user_name, hostname, port = re.split(
6968 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
6971 if user_name is None:
6973 updatecache_flg=True
6974 all_rsync_opts = set(rsync_opts)
6975 lexer = shlex.shlex(StringIO.StringIO(
6976 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
6977 lexer.whitespace_split = True
6978 extra_rsync_opts = list(lexer)
6980 all_rsync_opts.update(extra_rsync_opts)
6981 family = socket.AF_INET
6982 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
6983 family = socket.AF_INET
6984 elif socket.has_ipv6 and \
6985 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
6986 family = socket.AF_INET6
6988 SERVER_OUT_OF_DATE = -1
6989 EXCEEDED_MAX_RETRIES = -2
6995 for addrinfo in socket.getaddrinfo(
6996 hostname, None, family, socket.SOCK_STREAM):
6997 if addrinfo[0] == socket.AF_INET6:
6998 # IPv6 addresses need to be enclosed in square brackets
6999 ips.append("[%s]" % addrinfo[4][0])
7001 ips.append(addrinfo[4][0])
7002 from random import shuffle
7004 except SystemExit, e:
7005 raise # Needed else can't exit
7006 except Exception, e:
7007 print "Notice:",str(e)
7012 dosyncuri = syncuri.replace(
7013 "//" + user_name + hostname + port + "/",
7014 "//" + user_name + ips[0] + port + "/", 1)
7015 except SystemExit, e:
7016 raise # Needed else can't exit
7017 except Exception, e:
7018 print "Notice:",str(e)
7022 if "--ask" in myopts:
7023 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
7028 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
7029 if "--quiet" not in myopts:
7030 print ">>> Starting rsync with "+dosyncuri+"..."
7032 emergelog(xterm_titles,
7033 ">>> Starting retry %d of %d with %s" % \
7034 (retries,maxretries,dosyncuri))
7035 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
7037 if mytimestamp != 0 and "--quiet" not in myopts:
7038 print ">>> Checking server timestamp ..."
7040 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
7042 if "--debug" in myopts:
7047 # Even if there's no timestamp available locally, fetch the
7048 # timestamp anyway as an initial probe to verify that the server is
7049 # responsive. This protects us from hanging indefinitely on a
7050 # connection attempt to an unresponsive server which rsync's
7051 # --timeout option does not prevent.
7053 # Temporary file for remote server timestamp comparison.
7054 from tempfile import mkstemp
7055 fd, tmpservertimestampfile = mkstemp()
7057 mycommand = rsynccommand[:]
7058 mycommand.append(dosyncuri.rstrip("/") + \
7059 "/metadata/timestamp.chk")
7060 mycommand.append(tmpservertimestampfile)
7064 def timeout_handler(signum, frame):
7065 raise portage.exception.PortageException("timed out")
7066 signal.signal(signal.SIGALRM, timeout_handler)
7067 # Timeout here in case the server is unresponsive. The
7068 # --timeout rsync option doesn't apply to the initial
7069 # connection attempt.
7070 if rsync_initial_timeout:
7071 signal.alarm(rsync_initial_timeout)
7073 mypids.extend(portage.process.spawn(
7074 mycommand, env=settings.environ(), returnpid=True))
7075 exitcode = os.waitpid(mypids[0], 0)[1]
7076 content = portage.grabfile(tmpservertimestampfile)
7078 if rsync_initial_timeout:
7081 os.unlink(tmpservertimestampfile)
7084 except portage.exception.PortageException, e:
7088 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
7089 os.kill(mypids[0], signal.SIGTERM)
7090 os.waitpid(mypids[0], 0)
7091 # This is the same code rsync uses for timeout.
7094 if exitcode != os.EX_OK:
7096 exitcode = (exitcode & 0xff) << 8
7098 exitcode = exitcode >> 8
7100 portage.process.spawned_pids.remove(mypids[0])
7103 servertimestamp = time.mktime(time.strptime(
7104 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
7105 except (OverflowError, ValueError):
7107 del mycommand, mypids, content
7108 if exitcode == os.EX_OK:
7109 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
7110 emergelog(xterm_titles,
7111 ">>> Cancelling sync -- Already current.")
7114 print ">>> Timestamps on the server and in the local repository are the same."
7115 print ">>> Cancelling all further sync action. You are already up to date."
7117 print ">>> In order to force sync, remove '%s'." % servertimestampfile
7121 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
7122 emergelog(xterm_titles,
7123 ">>> Server out of date: %s" % dosyncuri)
7126 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
7128 print ">>> In order to force sync, remove '%s'." % servertimestampfile
7131 exitcode = SERVER_OUT_OF_DATE
7132 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
7134 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
7135 exitcode = portage.process.spawn(mycommand,
7136 env=settings.environ())
7137 if exitcode in [0,1,3,4,11,14,20,21]:
7139 elif exitcode in [1,3,4,11,14,20,21]:
7142 # Code 2 indicates protocol incompatibility, which is expected
7143 # for servers with protocol < 29 that don't support
7144 # --prune-empty-directories. Retry for a server that supports
7145 # at least rsync protocol version 29 (>=rsync-2.6.4).
7150 if retries<=maxretries:
7151 print ">>> Retrying..."
7156 updatecache_flg=False
7157 exitcode = EXCEEDED_MAX_RETRIES
7161 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
7162 elif exitcode == SERVER_OUT_OF_DATE:
7164 elif exitcode == EXCEEDED_MAX_RETRIES:
7166 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
7171 print darkred("!!!")+green(" Rsync has reported that there is a syntax error. Please ensure")
7172 print darkred("!!!")+green(" that your SYNC statement is proper.")
7173 print darkred("!!!")+green(" SYNC="+settings["SYNC"])
7175 print darkred("!!!")+green(" Rsync has reported that there is a File IO error. Normally")
7176 print darkred("!!!")+green(" this means your disk is full, but can be caused by corruption")
7177 print darkred("!!!")+green(" on the filesystem that contains PORTDIR. Please investigate")
7178 print darkred("!!!")+green(" and try again after the problem has been fixed.")
7179 print darkred("!!!")+green(" PORTDIR="+settings["PORTDIR"])
7181 print darkred("!!!")+green(" Rsync was killed before it finished.")
7183 print darkred("!!!")+green(" Rsync has not successfully finished. It is recommended that you keep")
7184 print darkred("!!!")+green(" trying or that you use the 'emerge-webrsync' option if you are unable")
7185 print darkred("!!!")+green(" to use rsync due to firewall or other restrictions. This should be a")
7186 print darkred("!!!")+green(" temporary problem unless complications exist with your network")
7187 print darkred("!!!")+green(" (and possibly your system's filesystem) configuration.")
7190 elif syncuri[:6]=="cvs://":
7191 if not os.path.exists("/usr/bin/cvs"):
7192 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
7193 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
7196 cvsdir=os.path.dirname(myportdir)
7197 if not os.path.exists(myportdir+"/CVS"):
7199 print ">>> Starting initial cvs checkout with "+syncuri+"..."
7200 if os.path.exists(cvsdir+"/gentoo-x86"):
7201 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
7206 if e.errno != errno.ENOENT:
7208 "!!! existing '%s' directory; exiting.\n" % myportdir)
7211 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
7212 print "!!! cvs checkout error; exiting."
7214 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
7217 print ">>> Starting cvs update with "+syncuri+"..."
7218 retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
7219 myportdir, settings, free=1)
7220 if retval != os.EX_OK:
7224 print "!!! rsync setting: ",syncuri,"not recognized; exiting."
7227 if updatecache_flg and \
7228 myaction != "metadata" and \
7229 "metadata-transfer" not in settings.features:
7230 updatecache_flg = False
7232 # Reload the whole config from scratch.
7233 settings, trees, mtimedb = load_emerge_config(trees=trees)
7234 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7236 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
7237 action_metadata(settings, portdb, myopts)
7239 if portage._global_updates(trees, mtimedb["updates"]):
7241 # Reload the whole config from scratch.
7242 settings, trees, mtimedb = load_emerge_config(trees=trees)
7243 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7245 mybestpv = portdb.xmatch("bestmatch-visible", "sys-apps/portage")
7246 mypvs = portage.best(
7247 trees[settings["ROOT"]]["vartree"].dbapi.match("sys-apps/portage"))
7249 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
7251 if myaction != "metadata":
7252 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
7253 retval = portage.process.spawn(
7254 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
7255 dosyncuri], env=settings.environ())
7256 if retval != os.EX_OK:
7257 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
7259 if(mybestpv != mypvs) and not "--quiet" in myopts:
7261 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
7262 print red(" * ")+"that you update portage now, before any other packages are updated."
7264 print red(" * ")+"To update portage, run 'emerge portage' now."
7267 display_news_notification(trees)
7269 def action_metadata(settings, portdb, myopts):
7270 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
7271 old_umask = os.umask(0002)
7272 cachedir = os.path.normpath(settings.depcachedir)
7273 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
7274 "/lib", "/opt", "/proc", "/root", "/sbin",
7275 "/sys", "/tmp", "/usr", "/var"]:
7276 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
7277 "ROOT DIRECTORY ON YOUR SYSTEM."
7278 print >> sys.stderr, \
7279 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
7281 if not os.path.exists(cachedir):
7284 ec = portage.eclass_cache.cache(portdb.porttree_root)
7285 myportdir = os.path.realpath(settings["PORTDIR"])
7286 cm = settings.load_best_module("portdbapi.metadbmodule")(
7287 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
7289 from portage.cache import util
7291 class percentage_noise_maker(util.quiet_mirroring):
7292 def __init__(self, dbapi):
7294 self.cp_all = dbapi.cp_all()
7295 l = len(self.cp_all)
7296 self.call_update_min = 100000000
7297 self.min_cp_all = l/100.0
7302 for x in self.cp_all:
7304 if self.count > self.min_cp_all:
7305 self.call_update_min = 0
7307 for y in self.dbapi.cp_list(x):
7309 self.call_update_mine = 0
7311 def update(self, *arg):
7312 try: self.pstr = int(self.pstr) + 1
7313 except ValueError: self.pstr = 1
7314 sys.stdout.write("%s%i%%" % \
7315 ("\b" * (len(str(self.pstr))+1), self.pstr))
7317 self.call_update_min = 10000000
7319 def finish(self, *arg):
7320 sys.stdout.write("\b\b\b\b100%\n")
7323 if "--quiet" in myopts:
7324 def quicky_cpv_generator(cp_all_list):
7325 for x in cp_all_list:
7326 for y in portdb.cp_list(x):
7328 source = quicky_cpv_generator(portdb.cp_all())
7329 noise_maker = portage.cache.util.quiet_mirroring()
7331 noise_maker = source = percentage_noise_maker(portdb)
7332 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
7333 eclass_cache=ec, verbose_instance=noise_maker)
7338 def action_regen(settings, portdb):
7339 xterm_titles = "notitles" not in settings.features
7340 emergelog(xterm_titles, " === regen")
7341 #regenerate cache entries
7342 portage.writemsg_stdout("Regenerating cache entries...\n")
7344 os.close(sys.stdin.fileno())
7345 except SystemExit, e:
7346 raise # Needed else can't exit
7350 mynodes = portdb.cp_all()
7351 from portage.cache.cache_errors import CacheError
7353 for mytree in portdb.porttrees:
7355 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
7356 except CacheError, e:
7357 portage.writemsg("Error listing cache entries for " + \
7358 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
7363 mymatches = portdb.cp_list(x)
7364 portage.writemsg_stdout("Processing %s\n" % x)
7367 foo = portdb.aux_get(y,["DEPEND"])
7368 except (KeyError, portage.exception.PortageException), e:
7370 "Error processing %(cpv)s, continuing... (%(e)s)\n" % \
7371 {"cpv":y,"e":str(e)}, noiselevel=-1)
7373 for mytree in portdb.porttrees:
7374 if portdb.findname2(y, mytree=mytree)[0]:
7375 dead_nodes[mytree].discard(y)
7377 for mytree, nodes in dead_nodes.iteritems():
7378 auxdb = portdb.auxdb[mytree]
7382 except (KeyError, CacheError):
7384 portage.writemsg_stdout("done!\n")
7386 def action_config(settings, trees, myopts, myfiles):
7387 if len(myfiles) != 1:
7388 print red("!!! config can only take a single package atom at this time\n")
7390 if not is_valid_package_atom(myfiles[0]):
7391 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
7393 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
7394 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
7398 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
7399 except ValueError, e:
7400 # Multiple matches thrown from cpv_expand
7403 print "No packages found.\n"
7406 if "--ask" in myopts:
7408 print "Please select a package to configure:"
7412 options.append(str(idx))
7413 print options[-1]+") "+pkg
7416 idx = userquery("Selection?", options)
7419 pkg = pkgs[int(idx)-1]
7421 print "The following packages available:"
7424 print "\nPlease use a specific atom or the --ask option."
7430 if "--ask" in myopts:
7431 if userquery("Ready to configure "+pkg+"?") == "No":
7434 print "Configuring pkg..."
7436 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
7437 mysettings = portage.config(clone=settings)
7438 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
7439 debug = mysettings.get("PORTAGE_DEBUG") == "1"
7440 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
7442 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
7443 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
7444 if retval == os.EX_OK:
7445 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
7446 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
7449 def action_info(settings, trees, myopts, myfiles):
7450 unameout=commands.getstatusoutput("uname -mrp")[1]
7451 print getportageversion(settings["PORTDIR"], settings["ROOT"],
7452 settings.profile_path, settings["CHOST"],
7453 trees[settings["ROOT"]]["vartree"].dbapi)
7455 header_title = "System Settings"
7457 print header_width * "="
7458 print header_title.rjust(int(header_width/2 + len(header_title)/2))
7459 print header_width * "="
7460 print "System uname: "+unameout
7462 lastSync = portage.grabfile(os.path.join(
7463 settings["PORTDIR"], "metadata", "timestamp.chk"))
7464 print "Timestamp of tree:",
7470 output=commands.getstatusoutput("distcc --version")
7472 print str(output[1].split("\n",1)[0]),
7473 if "distcc" in settings.features:
7478 output=commands.getstatusoutput("ccache -V")
7480 print str(output[1].split("\n",1)[0]),
7481 if "ccache" in settings.features:
7486 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
7487 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
7488 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
7489 myvars = portage.util.unique_array(myvars)
7493 if portage.isvalidatom(x):
7494 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
7495 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
7496 pkg_matches.sort(portage.pkgcmp)
7498 for pn, ver, rev in pkg_matches:
7500 pkgs.append(ver + "-" + rev)
7504 pkgs = ", ".join(pkgs)
7505 print "%-20s %s" % (x+":", pkgs)
7507 print "%-20s %s" % (x+":", "[NOT VALID]")
7509 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
7511 if "--verbose" in myopts:
7512 myvars=settings.keys()
7514 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
7515 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
7516 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
7517 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
7519 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
7521 myvars = portage.util.unique_array(myvars)
7527 print '%s="%s"' % (x, settings[x])
7529 use = set(settings["USE"].split())
7530 use_expand = settings["USE_EXPAND"].split()
7532 for varname in use_expand:
7533 flag_prefix = varname.lower() + "_"
7535 if f.startswith(flag_prefix):
7539 print 'USE="%s"' % " ".join(use),
7540 for varname in use_expand:
7541 myval = settings.get(varname)
7543 print '%s="%s"' % (varname, myval),
7546 unset_vars.append(x)
7548 print "Unset: "+", ".join(unset_vars)
7551 if "--debug" in myopts:
7552 for x in dir(portage):
7553 module = getattr(portage, x)
7554 if "cvs_id_string" in dir(module):
7555 print "%s: %s" % (str(x), str(module.cvs_id_string))
7557 # See if we can find any packages installed matching the strings
7558 # passed on the command line
7560 vardb = trees[settings["ROOT"]]["vartree"].dbapi
7561 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7563 mypkgs.extend(vardb.match(x))
7565 # If some packages were found...
7567 # Get our global settings (we only print stuff if it varies from
7568 # the current config)
7569 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS' ]
7570 auxkeys = mydesiredvars + [ "USE", "IUSE"]
7572 pkgsettings = portage.config(clone=settings)
7574 for myvar in mydesiredvars:
7575 global_vals[myvar] = set(settings.get(myvar, "").split())
7577 # Loop through each package
7578 # Only print settings if they differ from global settings
7579 header_title = "Package Settings"
7580 print header_width * "="
7581 print header_title.rjust(int(header_width/2 + len(header_title)/2))
7582 print header_width * "="
7583 from portage.output import EOutput
7586 # Get all package specific variables
7587 auxvalues = vardb.aux_get(pkg, auxkeys)
7589 for i in xrange(len(auxkeys)):
7590 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
7592 for myvar in mydesiredvars:
7593 # If the package variable doesn't match the
7594 # current global variable, something has changed
7595 # so set diff_found so we know to print
7596 if valuesmap[myvar] != global_vals[myvar]:
7597 diff_values[myvar] = valuesmap[myvar]
7598 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
7599 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
7601 # If a matching ebuild is no longer available in the tree, maybe it
7602 # would make sense to compare against the flags for the best
7603 # available version with the same slot?
7605 if portdb.cpv_exists(pkg):
7607 pkgsettings.setcpv(pkg, mydb=mydb)
7608 if valuesmap["IUSE"].intersection(
7609 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
7610 diff_values["USE"] = valuesmap["USE"]
7611 # If a difference was found, print the info for
7614 # Print package info
7615 print "%s was built with the following:" % pkg
7616 for myvar in mydesiredvars + ["USE"]:
7617 if myvar in diff_values:
7618 mylist = list(diff_values[myvar])
7620 print "%s=\"%s\"" % (myvar, " ".join(mylist))
7622 print ">>> Attempting to run pkg_info() for '%s'" % pkg
7623 ebuildpath = vardb.findname(pkg)
7624 if not ebuildpath or not os.path.exists(ebuildpath):
7625 out.ewarn("No ebuild found for '%s'" % pkg)
7627 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
7628 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
7629 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
7632 def action_search(root_config, myopts, myfiles, spinner):
7634 print "emerge: no search terms provided."
7636 searchinstance = search(root_config,
7637 spinner, "--searchdesc" in myopts,
7638 "--quiet" not in myopts, "--usepkg" in myopts,
7639 "--usepkgonly" in myopts)
7640 for mysearch in myfiles:
7642 searchinstance.execute(mysearch)
7643 except re.error, comment:
7644 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
7646 searchinstance.output()
7648 def action_depclean(settings, trees, ldpath_mtimes,
7649 myopts, action, myfiles, spinner):
7650 # Kill packages that aren't explicitly merged or are required as a
7651 # dependency of another package. World file is explicit.
7653 # Global depclean or prune operations are not very safe when there are
7654 # missing dependencies since it's unknown how badly incomplete
7655 # the dependency graph is, and we might accidentally remove packages
7656 # that should have been pulled into the graph. On the other hand, it's
7657 # relatively safe to ignore missing deps when only asked to remove
7658 # specific packages.
7659 allow_missing_deps = len(myfiles) > 0
7662 msg.append("Depclean may break link level dependencies. Thus, it is\n")
7663 msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
7664 msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
7666 msg.append("Also study the list of packages to be cleaned for any obvious\n")
7667 msg.append("mistakes. Packages that are part of the world set will always\n")
7668 msg.append("be kept. They can be manually added to this set with\n")
7669 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
7670 msg.append("package.provided (see portage(5)) will be removed by\n")
7671 msg.append("depclean, even if they are part of the world set.\n")
7673 msg.append("As a safety measure, depclean will not remove any packages\n")
7674 msg.append("unless *all* required dependencies have been resolved. As a\n")
7675 msg.append("consequence, it is often necessary to run\n")
7676 msg.append(good("`emerge --update --newuse --deep world`") + " prior to depclean.\n")
7678 if action == "depclean" and "--quiet" not in myopts and not myfiles:
7679 portage.writemsg_stdout("\n")
7681 portage.writemsg_stdout(colorize("BAD", "*** WARNING *** ") + x)
7683 xterm_titles = "notitles" not in settings.features
7684 myroot = settings["ROOT"]
7685 portdb = trees[myroot]["porttree"].dbapi
7687 dep_check_trees = {}
7688 dep_check_trees[myroot] = {}
7689 dep_check_trees[myroot]["vartree"] = \
7690 FakeVartree(trees[myroot]["vartree"],
7691 trees[myroot]["porttree"].dbapi,
7692 depgraph._mydbapi_keys, pkg_cache)
7693 vardb = dep_check_trees[myroot]["vartree"].dbapi
7694 # Constrain dependency selection to the installed packages.
7695 dep_check_trees[myroot]["porttree"] = dep_check_trees[myroot]["vartree"]
7696 root_config = trees[myroot]["root_config"]
7697 setconfig = root_config.setconfig
7698 syslist = setconfig.getSetAtoms("system")
7699 worldlist = setconfig.getSetAtoms("world")
7700 args_set = InternalPackageSet()
7701 fakedb = portage.fakedbapi(settings=settings)
7702 myvarlist = vardb.cpv_all()
7705 print "\n!!! You have no system list.",
7707 print "\n!!! You have no world file.",
7709 print "\n!!! You have no installed package database (%s)." % portage.VDB_PATH,
7711 if not (syslist and worldlist and myvarlist):
7712 print "\n!!! Proceeding "+(syslist and myvarlist and "may" or "will")
7713 print " break your installation.\n"
7714 if "--pretend" not in myopts:
7715 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
7717 if action == "depclean":
7718 emergelog(xterm_titles, " >>> depclean")
7721 if not is_valid_package_atom(x):
7722 portage.writemsg("!!! '%s' is not a valid package atom.\n" % x,
7724 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
7727 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
7728 except ValueError, e:
7729 print "!!! The short ebuild name \"" + x + "\" is ambiguous. Please specify"
7730 print "!!! one of the following fully-qualified ebuild names instead:\n"
7732 print " " + colorize("INFORM", i)
7736 matched_packages = False
7739 matched_packages = True
7741 if not matched_packages:
7742 portage.writemsg_stdout(
7743 ">>> No packages selected for removal by %s\n" % action)
7746 if "--quiet" not in myopts:
7747 print "\nCalculating dependencies ",
7749 runtime = UnmergeDepPriority(runtime=True)
7750 runtime_post = UnmergeDepPriority(runtime_post=True)
7751 buildtime = UnmergeDepPriority(buildtime=True)
7755 "PDEPEND": runtime_post,
7756 "DEPEND": buildtime,
7759 remaining_atoms = []
7760 if action == "depclean":
7761 for atom in syslist:
7762 if vardb.match(atom):
7763 remaining_atoms.append((atom, 'system', runtime))
7765 # Pull in everything that's installed since we don't want
7766 # to clean any package if something depends on it.
7767 remaining_atoms.extend(
7768 ("="+cpv, 'world', runtime) for cpv in vardb.cpv_all())
7770 for atom in worldlist:
7771 if vardb.match(atom):
7772 remaining_atoms.append((atom, 'world', runtime))
7773 elif action == "prune":
7774 for atom in syslist:
7775 if vardb.match(atom):
7776 remaining_atoms.append((atom, 'system', runtime))
7777 # Pull in everything that's installed since we don't want to prune a
7778 # package if something depends on it.
7779 remaining_atoms.extend(
7780 (atom, 'world', runtime) for atom in vardb.cp_all())
7782 # Try to prune everything that's slotted.
7783 for cp in vardb.cp_all():
7784 if len(vardb.cp_list(cp)) > 1:
7788 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
7789 metadata_keys = depgraph._mydbapi_keys
7791 with_bdeps = myopts.get("--with-bdeps", "y") == "y"
7793 while remaining_atoms:
7794 atom, parent, priority = remaining_atoms.pop()
7795 pkgs = vardb.match(atom)
7797 if priority > UnmergeDepPriority.SOFT:
7798 unresolveable.setdefault(atom, []).append(parent)
7800 if action == "depclean" and parent == "world" and myfiles:
7801 # Filter out packages given as arguments since the user wants
7805 metadata = dict(izip(metadata_keys,
7806 vardb.aux_get(pkg, metadata_keys)))
7809 arg_atom = args_set.findAtomForPackage(pkg, metadata)
7810 except portage.exception.InvalidDependString, e:
7811 file_path = os.path.join(
7812 myroot, portage.VDB_PATH, pkg, "PROVIDE")
7813 portage.writemsg("\n\nInvalid PROVIDE: %s\n" % str(e),
7815 portage.writemsg("See '%s'\n" % file_path,
7819 filtered_pkgs.append(pkg)
7820 pkgs = filtered_pkgs
7822 # For consistency with the update algorithm, keep the highest
7823 # visible version and prune any versions that are old or masked.
7824 for cpv in reversed(pkgs):
7825 if visible(settings,
7826 pkg_cache[("installed", myroot, cpv, "nomerge")]):
7830 # They're all masked, so just keep the highest version.
7833 graph.add(pkg, parent, priority=priority)
7834 if fakedb.cpv_exists(pkg):
7837 fakedb.cpv_inject(pkg)
7838 myaux = dict(izip(aux_keys, vardb.aux_get(pkg, aux_keys)))
7841 usedef = vardb.aux_get(pkg, ["USE"])[0].split()
7842 for dep_type, depstr in myaux.iteritems():
7847 if not with_bdeps and dep_type == "DEPEND":
7850 priority = priority_map[dep_type]
7851 if "--debug" in myopts:
7853 print "Parent: ", pkg
7854 print "Depstring:", depstr
7855 print "Priority:", priority
7858 portage.dep._dep_check_strict = False
7859 success, atoms = portage.dep_check(depstr, None, settings,
7860 myuse=usedef, trees=dep_check_trees, myroot=myroot)
7862 portage.dep._dep_check_strict = True
7864 show_invalid_depstring_notice(
7865 ("installed", myroot, pkg, "nomerge"),
7869 if "--debug" in myopts:
7870 print "Candidates:", atoms
7873 if atom.startswith("!"):
7875 remaining_atoms.append((atom, pkg, priority))
7877 if "--quiet" not in myopts:
7878 print "\b\b... done!\n"
7880 if unresolveable and not allow_missing_deps:
7881 print "Dependencies could not be completely resolved due to"
7882 print "the following required packages not being installed:"
7884 for atom in unresolveable:
7885 print atom, "required by", " ".join(unresolveable[atom])
7886 if unresolveable and not allow_missing_deps:
7888 print "Have you forgotten to run " + good("`emerge --update --newuse --deep world`") + " prior to"
7889 print "%s? It may be necessary to manually uninstall packages that no longer" % action
7890 print "exist in the portage tree since it may not be possible to satisfy their"
7891 print "dependencies. Also, be aware of the --with-bdeps option that is documented"
7892 print "in " + good("`man emerge`") + "."
7894 if action == "prune":
7895 print "If you would like to ignore dependencies then use %s." % \
7899 def show_parents(child_node):
7900 parent_nodes = graph.parent_nodes(child_node)
7901 if not parent_nodes:
7902 # With --prune, the highest version can be pulled in without any
7903 # real parent since all installed packages are pulled in. In that
7904 # case there's nothing to show here.
7908 msg.append(" %s pulled in by:\n" % str(child_node))
7909 for parent_node in parent_nodes:
7910 msg.append(" %s\n" % str(parent_node))
7912 portage.writemsg_stdout("".join(msg), noiselevel=-1)
7915 if action == "depclean":
7917 for pkg in vardb.cpv_all():
7918 metadata = dict(izip(metadata_keys,
7919 vardb.aux_get(pkg, metadata_keys)))
7922 arg_atom = args_set.findAtomForPackage(pkg, metadata)
7923 except portage.exception.InvalidDependString:
7924 # this error has already been displayed by now
7927 if not fakedb.cpv_exists(pkg):
7928 cleanlist.append(pkg)
7929 elif "--verbose" in myopts:
7932 for pkg in vardb.cpv_all():
7933 if not fakedb.cpv_exists(pkg):
7934 cleanlist.append(pkg)
7935 elif "--verbose" in myopts:
7937 elif action == "prune":
7938 # Prune really uses all installed instead of world. It's not a real
7939 # reverse dependency so don't display it as such.
7940 if graph.contains("world"):
7941 graph.remove("world")
7942 for atom in args_set:
7943 for pkg in vardb.match(atom):
7944 if not fakedb.cpv_exists(pkg):
7945 cleanlist.append(pkg)
7946 elif "--verbose" in myopts:
7950 portage.writemsg_stdout(
7951 ">>> No packages selected for removal by %s\n" % action)
7952 if "--verbose" not in myopts:
7953 portage.writemsg_stdout(
7954 ">>> To see reverse dependencies, use %s\n" % \
7956 if action == "prune":
7957 portage.writemsg_stdout(
7958 ">>> To ignore dependencies, use %s\n" % \
7962 # Use a topological sort to create an unmerge order such that
7963 # each package is unmerged before it's dependencies. This is
7964 # necessary to avoid breaking things that may need to run
7965 # during pkg_prerm or pkg_postrm phases.
7967 # Create a new graph to account for dependencies between the
7968 # packages being unmerged.
7970 clean_set = set(cleanlist)
7972 for node in clean_set:
7973 graph.add(node, None)
7974 myaux = dict(izip(aux_keys, vardb.aux_get(node, aux_keys)))
7976 usedef = vardb.aux_get(node, ["USE"])[0].split()
7977 for dep_type, depstr in myaux.iteritems():
7981 portage.dep._dep_check_strict = False
7982 success, atoms = portage.dep_check(depstr, None, settings,
7983 myuse=usedef, trees=dep_check_trees, myroot=myroot)
7985 portage.dep._dep_check_strict = True
7987 show_invalid_depstring_notice(
7988 ("installed", myroot, node, "nomerge"),
7992 priority = priority_map[dep_type]
7994 if atom.startswith("!"):
7996 matches = vardb.match(atom)
8000 if cpv in clean_set:
8001 graph.add(cpv, node, priority=priority)
8004 if len(graph.order) == len(graph.root_nodes()):
8005 # If there are no dependencies between packages
8006 # let unmerge() group them by cat/pn.
8008 cleanlist = graph.all_nodes()
8010 # Order nodes from lowest to highest overall reference count for
8011 # optimal root node selection.
8013 for node in graph.order:
8014 node_refcounts[node] = len(graph.parent_nodes(node))
8015 def cmp_reference_count(node1, node2):
8016 return node_refcounts[node1] - node_refcounts[node2]
8017 graph.order.sort(cmp_reference_count)
8019 ignore_priority_range = [None]
8020 ignore_priority_range.extend(
8021 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
8022 while not graph.empty():
8023 for ignore_priority in ignore_priority_range:
8024 nodes = graph.root_nodes(ignore_priority=ignore_priority)
8028 raise AssertionError("no root nodes")
8029 if ignore_priority is not None:
8030 # Some deps have been dropped due to circular dependencies,
8031 # so only pop one node in order do minimize the number that
8036 cleanlist.append(node)
8038 unmerge(root_config, myopts, "unmerge", cleanlist,
8039 ldpath_mtimes, ordered=ordered)
8041 if action == "prune":
8044 if not cleanlist and "--quiet" in myopts:
8047 print "Packages installed: "+str(len(myvarlist))
8048 print "Packages in world: "+str(len(worldlist))
8049 print "Packages in system: "+str(len(syslist))
8050 print "Unique package names: "+str(len(myvarlist))
8051 print "Required packages: "+str(len(fakedb.cpv_all()))
8052 if "--pretend" in myopts:
8053 print "Number to remove: "+str(len(cleanlist))
8055 print "Number removed: "+str(len(cleanlist))
8057 def action_build(settings, trees, mtimedb,
8058 myopts, myaction, myfiles, spinner):
8060 # validate the state of the resume data
8061 # so that we can make assumptions later.
8062 for k in ("resume", "resume_backup"):
8063 if k not in mtimedb:
8065 resume_data = mtimedb[k]
8066 if not isinstance(resume_data, dict):
8069 mergelist = resume_data.get("mergelist")
8070 if not isinstance(mergelist, list):
8073 resume_opts = resume_data.get("myopts")
8074 if not isinstance(resume_opts, (dict, list)):
8077 favorites = resume_data.get("favorites")
8078 if not isinstance(favorites, list):
8083 if "--resume" in myopts and \
8084 ("resume" in mtimedb or
8085 "resume_backup" in mtimedb):
8087 if "resume" not in mtimedb:
8088 mtimedb["resume"] = mtimedb["resume_backup"]
8089 del mtimedb["resume_backup"]
8091 # "myopts" is a list for backward compatibility.
8092 resume_opts = mtimedb["resume"].get("myopts", [])
8093 if isinstance(resume_opts, list):
8094 resume_opts = dict((k,True) for k in resume_opts)
8095 for opt in ("--skipfirst", "--ask", "--tree"):
8096 resume_opts.pop(opt, None)
8097 myopts.update(resume_opts)
8098 # Adjust config according to options of the command being resumed.
8099 for myroot in trees:
8100 mysettings = trees[myroot]["vartree"].settings
8102 adjust_config(myopts, mysettings)
8104 del myroot, mysettings
8106 ldpath_mtimes = mtimedb["ldpath"]
8109 buildpkgonly = "--buildpkgonly" in myopts
8110 pretend = "--pretend" in myopts
8111 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
8112 ask = "--ask" in myopts
8113 nodeps = "--nodeps" in myopts
8114 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
8115 tree = "--tree" in myopts
8118 del myopts["--tree"]
8119 portage.writemsg(colorize("WARN", " * ") + \
8120 "--tree is broken with --nodeps. Disabling...\n")
8121 debug = "--debug" in myopts
8122 verbose = "--verbose" in myopts
8123 quiet = "--quiet" in myopts
8124 if pretend or fetchonly:
8125 # make the mtimedb readonly
8126 mtimedb.filename = None
8127 if "--digest" in myopts:
8128 msg = "The --digest option can prevent corruption from being" + \
8129 " noticed. The `repoman manifest` command is the preferred" + \
8130 " way to generate manifests and it is capable of doing an" + \
8131 " entire repository or category at once."
8133 writemsg(prefix + "\n")
8134 from textwrap import wrap
8135 for line in wrap(msg, 72):
8136 writemsg("%s%s\n" % (prefix, line))
8137 writemsg(prefix + "\n")
8139 if "--quiet" not in myopts and \
8140 ("--pretend" in myopts or "--ask" in myopts or \
8141 "--tree" in myopts or "--verbose" in myopts):
8143 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
8145 elif "--buildpkgonly" in myopts:
8149 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
8151 print darkgreen("These are the packages that would be %s, in reverse order:") % action
8155 print darkgreen("These are the packages that would be %s, in order:") % action
8158 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
8159 if not show_spinner:
8160 spinner.update = spinner.update_quiet
8163 favorites = mtimedb["resume"].get("favorites")
8164 if not isinstance(favorites, list):
8168 print "Calculating dependencies ",
8169 myparams = create_depgraph_params(myopts, myaction)
8170 mydepgraph = depgraph(settings, trees,
8171 myopts, myparams, spinner)
8174 success = mydepgraph.loadResumeCommand(mtimedb["resume"])
8175 except (portage.exception.PackageNotFound,
8176 mydepgraph.UnsatisfiedResumeDep), e:
8179 from textwrap import wrap
8180 from portage.output import EOutput
8183 resume_data = mtimedb["resume"]
8184 mergelist = resume_data.get("mergelist")
8185 if not isinstance(mergelist, list):
8187 if mergelist and debug or (verbose and not quiet):
8188 out.eerror("Invalid resume list:")
8191 for task in mergelist:
8192 if isinstance(task, list):
8193 out.eerror(indent + str(tuple(task)))
8196 if isinstance(e, mydepgraph.UnsatisfiedResumeDep):
8197 out.eerror("One or more expected dependencies " + \
8198 "are not installed:")
8202 out.eerror(indent + str(dep.atom) + " pulled in by:")
8203 out.eerror(2 * indent + str(dep.parent))
8205 msg = "The resume list contains packages " + \
8206 "with dependencies that have not been " + \
8207 "installed yet. Please restart/continue " + \
8208 "the operation manually."
8209 for line in wrap(msg, 72):
8211 elif isinstance(e, portage.exception.PackageNotFound):
8212 out.eerror("An expected package is " + \
8213 "not available: %s" % str(e))
8215 msg = "The resume list contains one or more " + \
8216 "packages that are no longer " + \
8217 "available. Please restart/continue " + \
8218 "the operation manually."
8219 for line in wrap(msg, 72):
8223 print "\b\b... done!"
8226 mydepgraph.display_problems()
8227 if not (ask or pretend):
8228 # delete the current list and also the backup
8229 # since it's probably stale too.
8230 for k in ("resume", "resume_backup"):
8231 mtimedb.pop(k, None)
8236 if ("--resume" in myopts):
8237 print darkgreen("emerge: It seems we have nothing to resume...")
8240 myparams = create_depgraph_params(myopts, myaction)
8241 if "--quiet" not in myopts and "--nodeps" not in myopts:
8242 print "Calculating dependencies ",
8244 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
8246 retval, favorites = mydepgraph.select_files(myfiles)
8247 except portage.exception.PackageNotFound, e:
8248 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
8251 print "\b\b... done!"
8253 mydepgraph.display_problems()
8256 if "--pretend" not in myopts and \
8257 ("--ask" in myopts or "--tree" in myopts or \
8258 "--verbose" in myopts) and \
8259 not ("--quiet" in myopts and "--ask" not in myopts):
8260 if "--resume" in myopts:
8261 mymergelist = mydepgraph.altlist()
8262 if len(mymergelist) == 0:
8263 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
8265 favorites = mtimedb["resume"]["favorites"]
8266 retval = mydepgraph.display(
8267 mydepgraph.altlist(reversed=tree),
8268 favorites=favorites)
8269 mydepgraph.display_problems()
8270 if retval != os.EX_OK:
8272 prompt="Would you like to resume merging these packages?"
8274 retval = mydepgraph.display(
8275 mydepgraph.altlist(reversed=("--tree" in myopts)),
8276 favorites=favorites)
8277 mydepgraph.display_problems()
8278 if retval != os.EX_OK:
8281 for x in mydepgraph.altlist():
8282 if isinstance(x, Package) and x.operation == "merge":
8286 sets = trees[settings["ROOT"]]["root_config"].sets
8287 world_candidates = None
8288 if "--noreplace" in myopts and \
8289 not oneshot and favorites:
8290 # Sets that are not world candidates are filtered
8291 # out here since the favorites list needs to be
8292 # complete for depgraph.loadResumeCommand() to
8293 # operate correctly.
8294 world_candidates = [x for x in favorites \
8295 if not (x.startswith(SETPREFIX) and \
8296 not sets[x[1:]].world_candidate)]
8297 if "--noreplace" in myopts and \
8298 not oneshot and world_candidates:
8300 for x in world_candidates:
8301 print " %s %s" % (good("*"), x)
8302 prompt="Would you like to add these packages to your world favorites?"
8303 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
8304 prompt="Nothing to merge; would you like to auto-clean packages?"
8307 print "Nothing to merge; quitting."
8310 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
8311 prompt="Would you like to fetch the source files for these packages?"
8313 prompt="Would you like to merge these packages?"
8315 if "--ask" in myopts and userquery(prompt) == "No":
8320 # Don't ask again (e.g. when auto-cleaning packages after merge)
8321 myopts.pop("--ask", None)
8323 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
8324 if ("--resume" in myopts):
8325 mymergelist = mydepgraph.altlist()
8326 if len(mymergelist) == 0:
8327 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
8329 favorites = mtimedb["resume"]["favorites"]
8330 retval = mydepgraph.display(
8331 mydepgraph.altlist(reversed=tree),
8332 favorites=favorites)
8333 mydepgraph.display_problems()
8334 if retval != os.EX_OK:
8337 retval = mydepgraph.display(
8338 mydepgraph.altlist(reversed=("--tree" in myopts)),
8339 favorites=favorites)
8340 mydepgraph.display_problems()
8341 if retval != os.EX_OK:
8343 if "--buildpkgonly" in myopts:
8344 graph_copy = mydepgraph.digraph.clone()
8345 for node in list(graph_copy.order):
8346 if not isinstance(node, Package):
8347 graph_copy.remove(node)
8348 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
8349 print "\n!!! --buildpkgonly requires all dependencies to be merged."
8350 print "!!! You have to merge the dependencies before you can build this package.\n"
8353 if "--buildpkgonly" in myopts:
8354 graph_copy = mydepgraph.digraph.clone()
8355 for node in list(graph_copy.order):
8356 if not isinstance(node, Package):
8357 graph_copy.remove(node)
8358 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
8359 print "\n!!! --buildpkgonly requires all dependencies to be merged."
8360 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
8363 if ("--resume" in myopts):
8364 favorites=mtimedb["resume"]["favorites"]
8365 mergetask = MergeTask(settings, trees, myopts)
8366 if "PORTAGE_PARALLEL_FETCHONLY" in settings:
8367 """ parallel-fetch uses --resume --fetchonly and we don't want
8368 it to write the mtimedb"""
8369 mtimedb.filename = None
8370 time.sleep(3) # allow the parent to have first fetch
8371 mymergelist = mydepgraph.altlist()
8373 retval = mergetask.merge(mymergelist, favorites, mtimedb)
8374 merge_count = mergetask.curval
8376 if "resume" in mtimedb and \
8377 "mergelist" in mtimedb["resume"] and \
8378 len(mtimedb["resume"]["mergelist"]) > 1:
8379 mtimedb["resume_backup"] = mtimedb["resume"]
8380 del mtimedb["resume"]
8382 mtimedb["resume"]={}
8383 # XXX: Stored as a list for backward compatibility.
8384 mtimedb["resume"]["myopts"] = \
8385 [k for k in myopts if myopts[k] is True]
8386 mtimedb["resume"]["favorites"]=favorites
8387 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
8388 for pkgline in mydepgraph.altlist():
8389 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
8390 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
8391 tmpsettings = portage.config(clone=settings)
8393 if settings.get("PORTAGE_DEBUG", "") == "1":
8395 retval = portage.doebuild(
8396 y, "digest", settings["ROOT"], tmpsettings, edebug,
8397 ("--pretend" in myopts),
8398 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
8401 pkglist = mydepgraph.altlist()
8402 mydepgraph.saveNomergeFavorites()
8404 mergetask = MergeTask(settings, trees, myopts)
8405 retval = mergetask.merge(pkglist, favorites, mtimedb)
8406 merge_count = mergetask.curval
8408 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
8409 if "yes" == settings.get("AUTOCLEAN"):
8410 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
8411 unmerge(trees[settings["ROOT"]]["root_config"],
8412 myopts, "clean", [],
8413 ldpath_mtimes, autoclean=1)
8415 portage.writemsg_stdout(colorize("WARN", "WARNING:")
8416 + " AUTOCLEAN is disabled. This can cause serious"
8417 + " problems due to overlapping packages.\n")
8418 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
8420 if merge_count and not (buildpkgonly or fetchonly or pretend):
8421 post_emerge(trees, mtimedb, retval)
8424 def multiple_actions(action1, action2):
8425 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
8426 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
8429 def parse_opts(tmpcmdline, silent=False):
8434 global actions, options, shortmapping
8436 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
8437 argument_options = {
8439 "help":"specify the location for portage configuration files",
8443 "help":"enable or disable color output",
8445 "choices":("y", "n")
8448 "help":"include unnecessary build time dependencies",
8450 "choices":("y", "n")
8453 "help":"specify conditions to trigger package reinstallation",
8455 "choices":["changed-use"]
8459 from optparse import OptionParser
8460 parser = OptionParser()
8461 if parser.has_option("--help"):
8462 parser.remove_option("--help")
8464 for action_opt in actions:
8465 parser.add_option("--" + action_opt, action="store_true",
8466 dest=action_opt.replace("-", "_"), default=False)
8467 for myopt in options:
8468 parser.add_option(myopt, action="store_true",
8469 dest=myopt.lstrip("--").replace("-", "_"), default=False)
8470 for shortopt, longopt in shortmapping.iteritems():
8471 parser.add_option("-" + shortopt, action="store_true",
8472 dest=longopt.lstrip("--").replace("-", "_"), default=False)
8473 for myalias, myopt in longopt_aliases.iteritems():
8474 parser.add_option(myalias, action="store_true",
8475 dest=myopt.lstrip("--").replace("-", "_"), default=False)
8477 for myopt, kwargs in argument_options.iteritems():
8478 parser.add_option(myopt,
8479 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
8481 myoptions, myargs = parser.parse_args(args=tmpcmdline)
8483 for myopt in options:
8484 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
8486 myopts[myopt] = True
8488 for myopt in argument_options:
8489 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
8493 for action_opt in actions:
8494 v = getattr(myoptions, action_opt.replace("-", "_"))
8497 multiple_actions(myaction, action_opt)
8499 myaction = action_opt
8502 if x in actions and myaction != "search":
8504 print red("*** Deprecated use of action '%s', use '--%s' instead" % (x,x))
8505 # special case "search" so people can search for action terms, e.g. emerge -s sync
8507 multiple_actions(myaction, x)
8513 if "--nocolor" in myopts:
8515 sys.stderr.write("*** Deprecated use of '--nocolor', " + \
8516 "use '--color=n' instead.\n")
8517 del myopts["--nocolor"]
8518 myopts["--color"] = "n"
8520 return myaction, myopts, myfiles
8522 def validate_ebuild_environment(trees):
8523 for myroot in trees:
8524 settings = trees[myroot]["vartree"].settings
8527 def load_emerge_config(trees=None):
8529 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8530 kwargs[k] = os.environ.get(envvar, None)
8531 trees = portage.create_trees(trees=trees, **kwargs)
8533 for root, root_trees in trees.iteritems():
8534 settings = root_trees["vartree"].settings
8535 setconfig = load_default_config(settings, root_trees)
8536 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
8538 settings = trees["/"]["vartree"].settings
8540 for myroot in trees:
8542 settings = trees[myroot]["vartree"].settings
8545 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8546 mtimedb = portage.MtimeDB(mtimedbfile)
8548 return settings, trees, mtimedb
8550 def adjust_config(myopts, settings):
8551 """Make emerge specific adjustments to the config."""
8553 # To enhance usability, make some vars case insensitive by forcing them to
8555 for myvar in ("AUTOCLEAN", "NOCOLOR"):
8556 if myvar in settings:
8557 settings[myvar] = settings[myvar].lower()
8558 settings.backup_changes(myvar)
8561 # Kill noauto as it will break merges otherwise.
8562 if "noauto" in settings.features:
8563 while "noauto" in settings.features:
8564 settings.features.remove("noauto")
8565 settings["FEATURES"] = " ".join(settings.features)
8566 settings.backup_changes("FEATURES")
8570 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
8571 except ValueError, e:
8572 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8573 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
8574 settings["CLEAN_DELAY"], noiselevel=-1)
8575 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
8576 settings.backup_changes("CLEAN_DELAY")
8578 EMERGE_WARNING_DELAY = 10
8580 EMERGE_WARNING_DELAY = int(settings.get(
8581 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
8582 except ValueError, e:
8583 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8584 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
8585 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
8586 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
8587 settings.backup_changes("EMERGE_WARNING_DELAY")
8589 if "--quiet" in myopts:
8590 settings["PORTAGE_QUIET"]="1"
8591 settings.backup_changes("PORTAGE_QUIET")
8593 # Set so that configs will be merged regardless of remembered status
8594 if ("--noconfmem" in myopts):
8595 settings["NOCONFMEM"]="1"
8596 settings.backup_changes("NOCONFMEM")
8598 # Set various debug markers... They should be merged somehow.
8601 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
8602 if PORTAGE_DEBUG not in (0, 1):
8603 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
8604 PORTAGE_DEBUG, noiselevel=-1)
8605 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
8608 except ValueError, e:
8609 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8610 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
8611 settings["PORTAGE_DEBUG"], noiselevel=-1)
8613 if "--debug" in myopts:
8615 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
8616 settings.backup_changes("PORTAGE_DEBUG")
8618 if settings.get("NOCOLOR") not in ("yes","true"):
8619 portage.output.havecolor = 1
8621 """The explicit --color < y | n > option overrides the NOCOLOR environment
8622 variable and stdout auto-detection."""
8623 if "--color" in myopts:
8624 if "y" == myopts["--color"]:
8625 portage.output.havecolor = 1
8626 settings["NOCOLOR"] = "false"
8628 portage.output.havecolor = 0
8629 settings["NOCOLOR"] = "true"
8630 settings.backup_changes("NOCOLOR")
8631 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
8632 portage.output.havecolor = 0
8633 settings["NOCOLOR"] = "true"
8634 settings.backup_changes("NOCOLOR")
8637 global portage # NFC why this is necessary now - genone
8638 # Disable color until we're sure that it should be enabled (after
8639 # EMERGE_DEFAULT_OPTS has been parsed).
8640 portage.output.havecolor = 0
8641 # This first pass is just for options that need to be known as early as
8642 # possible, such as --config-root. They will be parsed again later,
8643 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
8644 # the value of --config-root).
8645 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
8646 if "--debug" in myopts:
8647 os.environ["PORTAGE_DEBUG"] = "1"
8648 if "--config-root" in myopts:
8649 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
8651 # Portage needs to ensure a sane umask for the files it creates.
8653 settings, trees, mtimedb = load_emerge_config()
8654 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8657 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
8658 except (OSError, ValueError), e:
8659 portage.writemsg("!!! Failed to change nice value to '%s'\n" % \
8660 settings["PORTAGE_NICENESS"])
8661 portage.writemsg("!!! %s\n" % str(e))
8664 if portage._global_updates(trees, mtimedb["updates"]):
8666 # Reload the whole config from scratch.
8667 settings, trees, mtimedb = load_emerge_config(trees=trees)
8668 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8670 xterm_titles = "notitles" not in settings.features
8673 if "--ignore-default-opts" not in myopts:
8674 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
8675 tmpcmdline.extend(sys.argv[1:])
8676 myaction, myopts, myfiles = parse_opts(tmpcmdline)
8678 if "--digest" in myopts:
8679 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
8680 # Reload the whole config from scratch so that the portdbapi internal
8681 # config is updated with new FEATURES.
8682 settings, trees, mtimedb = load_emerge_config(trees=trees)
8683 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8685 for myroot in trees:
8686 mysettings = trees[myroot]["vartree"].settings
8688 adjust_config(myopts, mysettings)
8690 del myroot, mysettings
8692 spinner = stdout_spinner()
8693 if "candy" in settings.features:
8694 spinner.update = spinner.update_scroll
8696 if "--quiet" not in myopts:
8697 portage.deprecated_profile_check()
8699 eclasses_overridden = {}
8700 for mytrees in trees.itervalues():
8701 mydb = mytrees["porttree"].dbapi
8702 # Freeze the portdbapi for performance (memoize all xmatch results).
8704 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
8707 if eclasses_overridden and \
8708 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
8710 if len(eclasses_overridden) == 1:
8711 writemsg(prefix + "Overlay eclass overrides " + \
8712 "eclass from PORTDIR:\n", noiselevel=-1)
8714 writemsg(prefix + "Overlay eclasses override " + \
8715 "eclasses from PORTDIR:\n", noiselevel=-1)
8716 writemsg(prefix + "\n", noiselevel=-1)
8717 for eclass_name in sorted(eclasses_overridden):
8718 writemsg(prefix + " '%s/%s.eclass'\n" % \
8719 (eclasses_overridden[eclass_name], eclass_name),
8721 writemsg(prefix + "\n", noiselevel=-1)
8722 msg = "It is best to avoid overridding eclasses from PORTDIR " + \
8723 "because it will trigger invalidation of cached ebuild metadata " + \
8724 "that is distributed with the portage tree. If you must " + \
8725 "override eclasses from PORTDIR then you are advised to add " + \
8726 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
8727 "`emerge --regen` after each time that you run `emerge --sync`. " + \
8728 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
8729 "you would like to disable this warning."
8730 from textwrap import wrap
8731 for line in wrap(msg, 72):
8732 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
8734 if "moo" in myfiles:
8737 Larry loves Gentoo (""" + os.uname()[0] + """)
8739 _______________________
8740 < Have you mooed today? >
8741 -----------------------
8751 ext = os.path.splitext(x)[1]
8752 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
8753 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
8756 # only expand sets for actions taking package arguments
8757 oldargs = myfiles[:]
8758 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
8759 root_config = trees[settings["ROOT"]]["root_config"]
8760 setconfig = root_config.setconfig
8761 # display errors that occured while loading the SetConfig instance
8762 for e in setconfig.errors:
8763 print colorize("BAD", "Error during set creation: %s" % e)
8765 sets = setconfig.getSets()
8766 # emerge relies on the existance of sets with names "world" and "system"
8767 required_sets = ("world", "system")
8768 for s in required_sets:
8770 msg = ["emerge: incomplete set configuration, " + \
8771 "no \"%s\" set defined" % s]
8772 msg.append(" sets defined: %s" % ", ".join(sets))
8774 sys.stderr.write(line + "\n")
8776 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
8778 # In order to know exactly which atoms/sets should be added to the
8779 # world file, the depgraph performs set expansion later. It will get
8780 # confused about where the atoms came from if it's not allowed to
8781 # expand them itself.
8782 do_not_expand = (None, )
8785 if a in ("system", "world"):
8786 newargs.append(SETPREFIX+a)
8793 if a.startswith(SETPREFIX):
8794 s = a[len(SETPREFIX):]
8796 print "emerge: there are no sets to satisfy %s." % \
8797 colorize("INFORM", s)
8799 setconfig.active.append(s)
8800 if myaction in unmerge_actions and \
8801 not sets[s].supportsOperation("unmerge"):
8802 sys.stderr.write("emerge: the given set %s does " + \
8803 "not support unmerge operations\n" % s)
8805 if not setconfig.getSetAtoms(s):
8806 print "emerge: '%s' is an empty set" % s
8807 elif myaction not in do_not_expand:
8808 newargs.extend(setconfig.getSetAtoms(s))
8810 newargs.append(SETPREFIX+s)
8811 for e in sets[s].errors:
8817 # Need to handle empty sets specially, otherwise emerge will react
8818 # with the help message for empty argument lists
8819 if oldargs and not myfiles:
8820 print "emerge: no targets left after set expansion"
8823 if ("--tree" in myopts) and ("--columns" in myopts):
8824 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
8827 if ("--quiet" in myopts):
8828 spinner.update = spinner.update_quiet
8829 portage.util.noiselimit = -1
8831 # Always create packages if FEATURES=buildpkg
8832 # Imply --buildpkg if --buildpkgonly
8833 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
8834 if "--buildpkg" not in myopts:
8835 myopts["--buildpkg"] = True
8837 # Also allow -S to invoke search action (-sS)
8838 if ("--searchdesc" in myopts):
8839 if myaction and myaction != "search":
8840 myfiles.append(myaction)
8841 if "--search" not in myopts:
8842 myopts["--search"] = True
8845 # Always try and fetch binary packages if FEATURES=getbinpkg
8846 if ("getbinpkg" in settings.features):
8847 myopts["--getbinpkg"] = True
8849 if "--buildpkgonly" in myopts:
8850 # --buildpkgonly will not merge anything, so
8851 # it cancels all binary package options.
8852 for opt in ("--getbinpkg", "--getbinpkgonly",
8853 "--usepkg", "--usepkgonly"):
8854 myopts.pop(opt, None)
8856 if "--skipfirst" in myopts and "--resume" not in myopts:
8857 myopts["--resume"] = True
8859 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
8860 myopts["--usepkgonly"] = True
8862 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
8863 myopts["--getbinpkg"] = True
8865 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
8866 myopts["--usepkg"] = True
8868 # Also allow -K to apply --usepkg/-k
8869 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
8870 myopts["--usepkg"] = True
8872 # Allow -p to remove --ask
8873 if ("--pretend" in myopts) and ("--ask" in myopts):
8874 print ">>> --pretend disables --ask... removing --ask from options."
8877 # forbid --ask when not in a terminal
8878 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
8879 if ("--ask" in myopts) and (not sys.stdin.isatty()):
8880 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
8884 if settings.get("PORTAGE_DEBUG", "") == "1":
8885 spinner.update = spinner.update_quiet
8887 if "python-trace" in settings.features:
8888 import portage.debug
8889 portage.debug.set_trace(True)
8891 if not ("--quiet" in myopts):
8892 if not sys.stdout.isatty() or ("--nospinner" in myopts):
8893 spinner.update = spinner.update_basic
8895 if "--version" in myopts:
8896 print getportageversion(settings["PORTDIR"], settings["ROOT"],
8897 settings.profile_path, settings["CHOST"],
8898 trees[settings["ROOT"]]["vartree"].dbapi)
8900 elif "--help" in myopts:
8901 _emerge.help.help(myaction, myopts, portage.output.havecolor)
8904 if "--debug" in myopts:
8905 print "myaction", myaction
8906 print "myopts", myopts
8908 if not myaction and not myfiles and "--resume" not in myopts:
8909 _emerge.help.help(myaction, myopts, portage.output.havecolor)
8912 pretend = "--pretend" in myopts
8913 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
8914 buildpkgonly = "--buildpkgonly" in myopts
8916 # check if root user is the current user for the actions where emerge needs this
8917 if portage.secpass < 2:
8918 # We've already allowed "--version" and "--help" above.
8919 if "--pretend" not in myopts and myaction not in ("search","info"):
8920 need_superuser = not \
8922 (buildpkgonly and secpass >= 1) or \
8923 myaction in ("metadata", "regen") or \
8924 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
8925 if portage.secpass < 1 or \
8928 access_desc = "superuser"
8930 access_desc = "portage group"
8931 # Always show portage_group_warning() when only portage group
8932 # access is required but the user is not in the portage group.
8933 from portage.data import portage_group_warning
8934 if "--ask" in myopts:
8935 myopts["--pretend"] = True
8937 print ("%s access is required... " + \
8938 "adding --pretend to options.\n") % access_desc
8939 if portage.secpass < 1 and not need_superuser:
8940 portage_group_warning()
8942 sys.stderr.write(("emerge: %s access is " + \
8943 "required.\n\n") % access_desc)
8944 if portage.secpass < 1 and not need_superuser:
8945 portage_group_warning()
8948 disable_emergelog = False
8949 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
8951 disable_emergelog = True
8953 if myaction in ("search", "info"):
8954 disable_emergelog = True
8955 if disable_emergelog:
8956 """ Disable emergelog for everything except build or unmerge
8957 operations. This helps minimize parallel emerge.log entries that can
8958 confuse log parsers. We especially want it disabled during
8959 parallel-fetch, which uses --resume --fetchonly."""
8961 def emergelog(*pargs, **kargs):
8964 if not "--pretend" in myopts:
8965 emergelog(xterm_titles, "Started emerge on: "+\
8966 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
8969 myelogstr=" ".join(myopts)
8971 myelogstr+=" "+myaction
8973 myelogstr += " " + " ".join(oldargs)
8974 emergelog(xterm_titles, " *** emerge " + myelogstr)
8977 def emergeexitsig(signum, frame):
8978 signal.signal(signal.SIGINT, signal.SIG_IGN)
8979 signal.signal(signal.SIGTERM, signal.SIG_IGN)
8980 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
8981 sys.exit(100+signum)
8982 signal.signal(signal.SIGINT, emergeexitsig)
8983 signal.signal(signal.SIGTERM, emergeexitsig)
8986 """This gets out final log message in before we quit."""
8987 if "--pretend" not in myopts:
8988 emergelog(xterm_titles, " *** terminating.")
8989 if "notitles" not in settings.features:
8991 portage.atexit_register(emergeexit)
8993 if myaction in ("config", "metadata", "regen", "sync"):
8994 if "--pretend" in myopts:
8995 sys.stderr.write(("emerge: The '%s' action does " + \
8996 "not support '--pretend'.\n") % myaction)
8998 if "sync" == myaction:
8999 action_sync(settings, trees, mtimedb, myopts, myaction)
9000 elif "metadata" == myaction:
9001 action_metadata(settings, portdb, myopts)
9002 elif myaction=="regen":
9003 validate_ebuild_environment(trees)
9004 action_regen(settings, portdb)
9006 elif "config"==myaction:
9007 validate_ebuild_environment(trees)
9008 action_config(settings, trees, myopts, myfiles)
9011 elif "info"==myaction:
9012 action_info(settings, trees, myopts, myfiles)
9015 elif "search"==myaction:
9016 validate_ebuild_environment(trees)
9017 action_search(trees[settings["ROOT"]]["root_config"],
9018 myopts, myfiles, spinner)
9019 elif myaction in ("clean", "unmerge") or \
9020 (myaction == "prune" and "--nodeps" in myopts):
9021 validate_ebuild_environment(trees)
9022 root_config = trees[settings["ROOT"]]["root_config"]
9023 # When given a list of atoms, unmerge
9024 # them in the order given.
9025 ordered = myaction == "unmerge"
9026 if 1 == unmerge(root_config, myopts, myaction, myfiles,
9027 mtimedb["ldpath"], ordered=ordered):
9028 if not (buildpkgonly or fetchonly or pretend):
9029 post_emerge(trees, mtimedb, os.EX_OK)
9031 elif myaction in ("depclean", "prune"):
9032 validate_ebuild_environment(trees)
9033 action_depclean(settings, trees, mtimedb["ldpath"],
9034 myopts, myaction, myfiles, spinner)
9035 if not (buildpkgonly or fetchonly or pretend):
9036 post_emerge(trees, mtimedb, os.EX_OK)
9037 # "update", "system", or just process files:
9039 validate_ebuild_environment(trees)
9040 if "--pretend" not in myopts:
9041 display_news_notification(trees)
9042 retval = action_build(settings, trees, mtimedb,
9043 myopts, myaction, myfiles, spinner)
9044 # if --pretend was not enabled then display_news_notification
9045 # was already called by post_emerge
9046 if "--pretend" in myopts:
9047 display_news_notification(trees)