2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
7 # This block ensures that ^C interrupts are handled quietly.
11 def exithandler(signum,frame):
12 signal.signal(signal.SIGINT, signal.SIG_IGN)
13 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16 signal.signal(signal.SIGINT, exithandler)
17 signal.signal(signal.SIGTERM, exithandler)
18 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
20 except KeyboardInterrupt:
25 os.environ["PORTAGE_LEGACY_GLOBALS"] = "false"
29 from os import path as osp
30 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 del os.environ["PORTAGE_LEGACY_GLOBALS"]
33 from portage import digraph, portdbapi
34 from portage.const import NEWS_LIB_PATH, CACHE_PATH, PRIVATE_PATH, USER_CONFIG_PATH, GLOBAL_CONFIG_PATH
37 import portage.xpak, commands, errno, re, socket, time, types
38 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
39 havecolor, nc_len, nocolor, red, teal, turquoise, white, xtermTitle, \
40 xtermTitleReset, yellow
41 from portage.output import create_color_func
42 good = create_color_func("GOOD")
43 bad = create_color_func("BAD")
44 # white looks bad on terminals with white background
45 from portage.output import bold as white
48 portage.dep._dep_check_strict = True
51 import portage.exception
52 from portage.data import secpass
53 from portage.util import normalize_path as normpath
54 from portage.util import writemsg
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
58 from itertools import chain, izip
59 from UserDict import DictMixin
64 import pickle as cPickle
66 class stdout_spinner(object):
68 "Gentoo Rocks ("+os.uname()[0]+")",
69 "Thank you for using Gentoo. :)",
70 "Are you actually trying to read this?",
71 "How many times have you stared at this?",
72 "We are generating the cache right now",
73 "You are paying too much attention.",
74 "A theory is better than its explanation.",
75 "Phasers locked on target, Captain.",
76 "Thrashing is just virtual crashing.",
77 "To be is to program.",
78 "Real Users hate Real Programmers.",
79 "When all else fails, read the instructions.",
80 "Functionality breeds Contempt.",
81 "The future lies ahead.",
82 "3.1415926535897932384626433832795028841971694",
83 "Sometimes insanity is the only alternative.",
84 "Inaccuracy saves a world of explanation.",
87 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
91 self.update = self.update_twirl
92 self.scroll_sequence = self.scroll_msgs[
93 int(time.time() * 100) % len(self.scroll_msgs)]
95 self.min_display_latency = 0.05
97 def _return_early(self):
99 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
100 each update* method should return without doing any output when this
103 cur_time = time.time()
104 if cur_time - self.last_update < self.min_display_latency:
106 self.last_update = cur_time
109 def update_basic(self):
110 self.spinpos = (self.spinpos + 1) % 500
111 if self._return_early():
113 if (self.spinpos % 100) == 0:
114 if self.spinpos == 0:
115 sys.stdout.write(". ")
117 sys.stdout.write(".")
120 def update_scroll(self):
121 if self._return_early():
123 if(self.spinpos >= len(self.scroll_sequence)):
124 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
125 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
127 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
129 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
131 def update_twirl(self):
132 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
133 if self._return_early():
135 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
138 def update_quiet(self):
141 def userquery(prompt, responses=None, colours=None):
142 """Displays a prompt and a set of responses, then waits for a response
143 which is checked against the responses and the first to match is
144 returned. An empty response will match the first value in responses. The
145 input buffer is *not* cleared prior to the prompt!
148 responses: a List of Strings.
149 colours: a List of Functions taking and returning a String, used to
150 process the responses for display. Typically these will be functions
151 like red() but could be e.g. lambda x: "DisplayString".
152 If responses is omitted, defaults to ["Yes", "No"], [green, red].
153 If only colours is omitted, defaults to [bold, ...].
155 Returns a member of the List responses. (If called without optional
156 arguments, returns "Yes" or "No".)
157 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
159 if responses is None:
160 responses = ["Yes", "No"]
162 create_color_func("PROMPT_CHOICE_DEFAULT"),
163 create_color_func("PROMPT_CHOICE_OTHER")
165 elif colours is None:
167 colours=(colours*len(responses))[:len(responses)]
171 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
172 for key in responses:
173 # An empty response will match the first value in responses.
174 if response.upper()==key[:len(response)].upper():
176 print "Sorry, response '%s' not understood." % response,
177 except (EOFError, KeyboardInterrupt):
182 "clean", "config", "depclean",
184 "prune", "regen", "search",
188 "--ask", "--alphabetical",
189 "--buildpkg", "--buildpkgonly",
190 "--changelog", "--columns",
195 "--fetchonly", "--fetch-all-uri",
196 "--getbinpkg", "--getbinpkgonly",
197 "--help", "--ignore-default-opts",
199 "--newuse", "--nocolor",
200 "--nodeps", "--noreplace",
201 "--nospinner", "--oneshot",
202 "--onlydeps", "--pretend",
203 "--quiet", "--resume",
204 "--searchdesc", "--selective",
208 "--usepkg", "--usepkgonly",
209 "--verbose", "--version"
215 "b":"--buildpkg", "B":"--buildpkgonly",
216 "c":"--clean", "C":"--unmerge",
217 "d":"--debug", "D":"--deep",
219 "f":"--fetchonly", "F":"--fetch-all-uri",
220 "g":"--getbinpkg", "G":"--getbinpkgonly",
222 "k":"--usepkg", "K":"--usepkgonly",
224 "n":"--noreplace", "N":"--newuse",
225 "o":"--onlydeps", "O":"--nodeps",
226 "p":"--pretend", "P":"--prune",
228 "s":"--search", "S":"--searchdesc",
231 "v":"--verbose", "V":"--version"
234 def emergelog(xterm_titles, mystr, short_msg=None):
236 if short_msg == None:
238 if "HOSTNAME" in os.environ:
239 short_msg = os.environ["HOSTNAME"]+": "+short_msg
240 xtermTitle(short_msg)
242 file_path = "/var/log/emerge.log"
243 mylogfile = open(file_path, "a")
244 portage.util.apply_secpass_permissions(file_path,
245 uid=portage.portage_uid, gid=portage.portage_gid,
249 mylock = portage.locks.lockfile(mylogfile)
250 # seek because we may have gotten held up by the lock.
251 # if so, we may not be positioned at the end of the file.
253 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257 portage.locks.unlockfile(mylock)
259 except (IOError,OSError,portage.exception.PortageException), e:
261 print >> sys.stderr, "emergelog():",e
263 def countdown(secs=5, doing="Starting"):
265 print ">>> Waiting",secs,"seconds before starting..."
266 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275 # formats a size given in bytes nicely
276 def format_size(mysize):
277 if type(mysize) not in [types.IntType,types.LongType]:
279 if 0 != mysize % 1024:
280 # Always round up to the next kB so that it doesn't show 0 kB when
281 # some small file still needs to be fetched.
282 mysize += 1024 - mysize % 1024
283 mystr=str(mysize/1024)
287 mystr=mystr[:mycount]+","+mystr[mycount:]
291 def getgccversion(chost):
294 return: the current in-use gcc version
297 gcc_ver_command = 'gcc -dumpversion'
298 gcc_ver_prefix = 'gcc-'
300 gcc_not_found_error = red(
301 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
302 "!!! to update the environment of this terminal and possibly\n" +
303 "!!! other terminals also.\n"
306 mystatus, myoutput = commands.getstatusoutput("eselect compiler show")
307 if mystatus == os.EX_OK and len(myoutput.split("/")) == 2:
308 part1, part2 = myoutput.split("/")
309 if part1.startswith(chost + "-"):
310 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
313 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
314 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
316 mystatus, myoutput = commands.getstatusoutput(
317 chost + "-" + gcc_ver_command)
318 if mystatus == os.EX_OK:
319 return gcc_ver_prefix + myoutput
321 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
322 if mystatus == os.EX_OK:
323 return gcc_ver_prefix + myoutput
325 portage.writemsg(gcc_not_found_error, noiselevel=-1)
326 return "[unavailable]"
328 def getportageversion(portdir, target_root, profile, chost, vardb):
329 profilever = "unavailable"
331 realpath = os.path.realpath(profile)
332 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
333 if realpath.startswith(basepath):
334 profilever = realpath[1 + len(basepath):]
337 profilever = "!" + os.readlink(profile)
340 del realpath, basepath
343 libclist = vardb.match("virtual/libc")
344 libclist += vardb.match("virtual/glibc")
345 libclist = portage.util.unique_array(libclist)
347 xs=portage.catpkgsplit(x)
349 libcver+=","+"-".join(xs[1:])
351 libcver="-".join(xs[1:])
353 libcver="unavailable"
355 gccver = getgccversion(chost)
356 unameout=os.uname()[2]+" "+os.uname()[4]
358 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
360 def create_depgraph_params(myopts, myaction):
361 #configure emerge engine parameters
363 # self: include _this_ package regardless of if it is merged.
364 # selective: exclude the package if it is merged
365 # recurse: go into the dependencies
366 # deep: go into the dependencies of already merged packages
367 # empty: pretend nothing is merged
368 # complete: completely account for all known dependencies
369 myparams = set(["recurse"])
370 if "--update" in myopts or \
371 "--newuse" in myopts or \
372 "--reinstall" in myopts or \
373 "--noreplace" in myopts:
374 myparams.add("selective")
375 if "--emptytree" in myopts:
376 myparams.add("empty")
377 myparams.discard("selective")
378 if "--nodeps" in myopts:
379 myparams.discard("recurse")
380 if "--deep" in myopts:
382 if "--complete-graph" in myopts:
383 myparams.add("complete")
386 # search functionality
387 class search(object):
398 def __init__(self, root_config, spinner, searchdesc,
399 verbose, usepkg, usepkgonly):
400 """Searches the available and installed packages for the supplied search key.
401 The list of available and installed packages is created at object instantiation.
402 This makes successive searches faster."""
403 self.settings = root_config.settings
404 self.vartree = root_config.trees["vartree"]
405 self.spinner = spinner
406 self.verbose = verbose
407 self.searchdesc = searchdesc
408 self.setconfig = root_config.setconfig
412 self.portdb = fake_portdb
413 for attrib in ("aux_get", "cp_all",
414 "xmatch", "findname", "getfetchlist"):
415 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
419 portdb = root_config.trees["porttree"].dbapi
420 bindb = root_config.trees["bintree"].dbapi
421 vardb = root_config.trees["vartree"].dbapi
423 if not usepkgonly and portdb._have_root_eclass_dir:
424 self._dbs.append(portdb)
426 if (usepkg or usepkgonly) and bindb.cp_all():
427 self._dbs.append(bindb)
429 self._dbs.append(vardb)
430 self._portdb = portdb
435 cp_all.update(db.cp_all())
436 return list(sorted(cp_all))
438 def _aux_get(self, *args, **kwargs):
441 return db.aux_get(*args, **kwargs)
446 def _findname(self, *args, **kwargs):
448 if db is not self._portdb:
449 # We don't want findname to return anything
450 # unless it's an ebuild in a portage tree.
451 # Otherwise, it's already built and we don't
454 func = getattr(db, "findname", None)
456 value = func(*args, **kwargs)
461 def _getfetchlist(self, *args, **kwargs):
463 func = getattr(db, "getfetchlist", None)
465 value = func(*args, **kwargs)
470 def _visible(self, db, cpv, metadata):
471 installed = db is self.vartree.dbapi
472 built = installed or db is not self._portdb
475 pkg_type = "installed"
478 return visible(self.settings,
479 Package(type_name=pkg_type, root=self.settings["ROOT"],
480 cpv=cpv, built=built, installed=installed, metadata=metadata))
482 def _xmatch(self, level, atom):
484 This method does not expand old-style virtuals because it
485 is restricted to returning matches for a single ${CATEGORY}/${PN}
486 and old-style virual matches unreliable for that when querying
487 multiple package databases. If necessary, old-style virtuals
488 can be performed on atoms prior to calling this method.
490 cp = portage.dep_getkey(atom)
491 if level == "match-all":
494 if hasattr(db, "xmatch"):
495 matches.update(db.xmatch(level, atom))
497 matches.update(db.match(atom))
498 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
499 db._cpv_sort_ascending(result)
500 elif level == "match-visible":
503 if hasattr(db, "xmatch"):
504 matches.update(db.xmatch(level, atom))
506 db_keys = list(db._aux_cache_keys)
507 for cpv in db.match(atom):
508 metadata = dict(izip(db_keys,
509 db.aux_get(cpv, db_keys)))
510 if not self._visible(db, cpv, metadata):
513 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
514 db._cpv_sort_ascending(result)
515 elif level == "bestmatch-visible":
518 if hasattr(db, "xmatch"):
519 cpv = db.xmatch("bestmatch-visible", atom)
520 if not cpv or portage.cpv_getkey(cpv) != cp:
522 if not result or cpv == portage.best([cpv, result]):
525 db_keys = list(db._aux_cache_keys)
526 # break out of this loop with highest visible
527 # match, checked in descending order
528 for cpv in reversed(db.match(atom)):
529 if portage.cpv_getkey(cpv) != cp:
531 metadata = dict(izip(db_keys,
532 db.aux_get(cpv, db_keys)))
533 if not self._visible(db, cpv, metadata):
535 if not result or cpv == portage.best([cpv, result]):
539 raise NotImplementedError(level)
542 def execute(self,searchkey):
543 """Performs the search for the supplied search key"""
545 self.searchkey=searchkey
546 self.packagematches = []
549 self.matches = {"pkg":[], "desc":[], "set":[]}
552 self.matches = {"pkg":[], "set":[]}
553 print "Searching... ",
556 if self.searchkey.startswith('%'):
558 self.searchkey = self.searchkey[1:]
559 if self.searchkey.startswith('@'):
561 self.searchkey = self.searchkey[1:]
563 self.searchre=re.compile(self.searchkey,re.I)
565 self.searchre=re.compile(re.escape(self.searchkey), re.I)
566 for package in self.portdb.cp_all():
567 self.spinner.update()
570 match_string = package[:]
572 match_string = package.split("/")[-1]
575 if self.searchre.search(match_string):
576 if not self.portdb.xmatch("match-visible", package):
578 self.matches["pkg"].append([package,masked])
579 elif self.searchdesc: # DESCRIPTION searching
580 full_package = self.portdb.xmatch("bestmatch-visible", package)
582 #no match found; we don't want to query description
583 full_package = portage.best(
584 self.portdb.xmatch("match-all", package))
590 full_desc = self.portdb.aux_get(
591 full_package, ["DESCRIPTION"])[0]
593 print "emerge: search: aux_get() failed, skipping"
595 if self.searchre.search(full_desc):
596 self.matches["desc"].append([full_package,masked])
598 self.sdict = self.setconfig.getSets()
599 for setname in self.sdict:
600 self.spinner.update()
602 match_string = setname
604 match_string = setname.split("/")[-1]
606 if self.searchre.search(match_string):
607 self.matches["set"].append([setname, False])
608 elif self.searchdesc:
609 if self.searchre.search(
610 self.sdict[setname].getMetadata("DESCRIPTION")):
611 self.matches["set"].append([setname, False])
614 for mtype in self.matches:
615 self.matches[mtype].sort()
616 self.mlen += len(self.matches[mtype])
619 """Outputs the results of the search."""
620 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
621 print "[ Applications found : "+white(str(self.mlen))+" ]"
623 vardb = self.vartree.dbapi
624 for mtype in self.matches:
625 for match,masked in self.matches[mtype]:
629 full_package = self.portdb.xmatch(
630 "bestmatch-visible", match)
632 #no match found; we don't want to query description
634 full_package = portage.best(
635 self.portdb.xmatch("match-all",match))
636 elif mtype == "desc":
638 match = portage.cpv_getkey(match)
640 print green("*")+" "+white(match)
641 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
645 desc, homepage, license = self.portdb.aux_get(
646 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
648 print "emerge: search: aux_get() failed, skipping"
651 print green("*")+" "+white(match)+" "+red("[ Masked ]")
653 print green("*")+" "+white(match)
654 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
658 mycat = match.split("/")[0]
659 mypkg = match.split("/")[1]
660 mycpv = match + "-" + myversion
661 myebuild = self.portdb.findname(mycpv)
663 pkgdir = os.path.dirname(myebuild)
664 from portage import manifest
665 mf = manifest.Manifest(
666 pkgdir, self.settings["DISTDIR"])
667 fetchlist = self.portdb.getfetchlist(mycpv,
668 mysettings=self.settings, all=True)[1]
670 mysum[0] = mf.getDistfilesSize(fetchlist)
672 file_size_str = "Unknown (missing digest for %s)" % \
677 if db is not vardb and \
678 db.cpv_exists(mycpv):
680 if not myebuild and hasattr(db, "bintree"):
681 myebuild = db.bintree.getname(mycpv)
683 mysum[0] = os.stat(myebuild).st_size
688 if myebuild and file_size_str is None:
689 mystr = str(mysum[0] / 1024)
693 mystr = mystr[:mycount] + "," + mystr[mycount:]
694 file_size_str = mystr + " kB"
698 print " ", darkgreen("Latest version available:"),myversion
699 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
702 (darkgreen("Size of files:"), file_size_str)
703 print " ", darkgreen("Homepage:")+" ",homepage
704 print " ", darkgreen("Description:")+" ",desc
705 print " ", darkgreen("License:")+" ",license
711 def getInstallationStatus(self,package):
712 installed_package = self.vartree.dep_bestmatch(package)
714 version = self.getVersion(installed_package,search.VERSION_RELEASE)
716 result = darkgreen("Latest version installed:")+" "+version
718 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
721 def getVersion(self,full_package,detail):
722 if len(full_package) > 1:
723 package_parts = portage.catpkgsplit(full_package)
724 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
725 result = package_parts[2]+ "-" + package_parts[3]
727 result = package_parts[2]
732 class RootConfig(object):
733 """This is used internally by depgraph to track information about a
735 def __init__(self, settings, trees, setconfig):
737 self.settings = settings
738 self.root = self.settings["ROOT"]
739 self.setconfig = setconfig
740 self.sets = self.setconfig.getSets()
741 self.visible_pkgs = PackageVirtualDbapi(self.settings)
743 def create_world_atom(pkg_key, metadata, args_set, root_config):
744 """Create a new atom for the world file if one does not exist. If the
745 argument atom is precise enough to identify a specific slot then a slot
746 atom will be returned. Atoms that are in the system set may also be stored
747 in world since system atoms can only match one slot while world atoms can
748 be greedy with respect to slots. Unslotted system packages will not be
750 arg_atom = args_set.findAtomForPackage(pkg_key, metadata)
753 cp = portage.dep_getkey(arg_atom)
755 sets = root_config.sets
756 portdb = root_config.trees["porttree"].dbapi
757 vardb = root_config.trees["vartree"].dbapi
758 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
759 for cpv in portdb.match(cp))
760 slotted = len(available_slots) > 1 or \
761 (len(available_slots) == 1 and "0" not in available_slots)
763 # check the vdb in case this is multislot
764 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
765 for cpv in vardb.match(cp))
766 slotted = len(available_slots) > 1 or \
767 (len(available_slots) == 1 and "0" not in available_slots)
768 if slotted and arg_atom != cp:
769 # If the user gave a specific atom, store it as a
770 # slot atom in the world file.
771 slot_atom = "%s:%s" % (cp, metadata["SLOT"])
773 # For USE=multislot, there are a couple of cases to
776 # 1) SLOT="0", but the real SLOT spontaneously changed to some
777 # unknown value, so just record an unslotted atom.
779 # 2) SLOT comes from an installed package and there is no
780 # matching SLOT in the portage tree.
782 # Make sure that the slot atom is available in either the
783 # portdb or the vardb, since otherwise the user certainly
784 # doesn't want the SLOT atom recorded in the world file
785 # (case 1 above). If it's only available in the vardb,
786 # the user may be trying to prevent a USE=multislot
787 # package from being removed by --depclean (case 2 above).
790 if not portdb.match(slot_atom):
791 # SLOT seems to come from an installed multislot package
793 # If there is no installed package matching the SLOT atom,
794 # it probably changed SLOT spontaneously due to USE=multislot,
795 # so just record an unslotted atom.
796 if vardb.match(slot_atom):
797 # Now verify that the argument is precise
798 # enough to identify a specific slot.
799 matches = mydb.match(arg_atom)
800 matched_slots = set()
802 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
803 if len(matched_slots) == 1:
804 new_world_atom = slot_atom
806 if new_world_atom == sets["world"].findAtomForPackage(pkg_key, metadata):
807 # Both atoms would be identical, so there's nothing to add.
810 # Unlike world atoms, system atoms are not greedy for slots, so they
811 # can't be safely excluded from world if they are slotted.
812 system_atom = sets["system"].findAtomForPackage(pkg_key, metadata)
814 if not portage.dep_getkey(system_atom).startswith("virtual/"):
816 # System virtuals aren't safe to exclude from world since they can
817 # match multiple old-style virtuals but only one of them will be
818 # pulled in by update or depclean.
819 providers = portdb.mysettings.getvirtuals().get(
820 portage.dep_getkey(system_atom))
821 if providers and len(providers) == 1 and providers[0] == cp:
823 return new_world_atom
825 def filter_iuse_defaults(iuse):
827 if flag.startswith("+") or flag.startswith("-"):
832 class SlotObject(object):
833 __slots__ = ("__weakref__",)
835 def __init__(self, **kwargs):
836 classes = [self.__class__]
841 classes.extend(c.__bases__)
842 slots = getattr(c, "__slots__", None)
846 myvalue = kwargs.get(myattr, None)
847 setattr(self, myattr, myvalue)
849 class AbstractDepPriority(SlotObject):
850 __slots__ = ("buildtime", "runtime", "runtime_post")
852 def __lt__(self, other):
853 return self.__int__() < other
855 def __le__(self, other):
856 return self.__int__() <= other
858 def __eq__(self, other):
859 return self.__int__() == other
861 def __ne__(self, other):
862 return self.__int__() != other
864 def __gt__(self, other):
865 return self.__int__() > other
867 def __ge__(self, other):
868 return self.__int__() >= other
872 return copy.copy(self)
874 class DepPriority(AbstractDepPriority):
876 This class generates an integer priority level based of various
877 attributes of the dependency relationship. Attributes can be assigned
878 at any time and the new integer value will be generated on calls to the
879 __int__() method. Rich comparison operators are supported.
881 The boolean attributes that affect the integer value are "satisfied",
882 "buildtime", "runtime", and "system". Various combinations of
883 attributes lead to the following priority levels:
885 Combination of properties Priority Category
887 not satisfied and buildtime 0 HARD
888 not satisfied and runtime -1 MEDIUM
889 not satisfied and runtime_post -2 MEDIUM_SOFT
890 satisfied and buildtime and rebuild -3 SOFT
891 satisfied and buildtime -4 SOFT
892 satisfied and runtime -5 SOFT
893 satisfied and runtime_post -6 SOFT
894 (none of the above) -6 SOFT
896 Several integer constants are defined for categorization of priority
899 MEDIUM The upper boundary for medium dependencies.
900 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
901 SOFT The upper boundary for soft dependencies.
902 MIN The lower boundary for soft dependencies.
904 __slots__ = ("satisfied", "rebuild")
911 if not self.satisfied:
916 if self.runtime_post:
924 if self.runtime_post:
929 myvalue = self.__int__()
930 if myvalue > self.MEDIUM:
932 if myvalue > self.MEDIUM_SOFT:
934 if myvalue > self.SOFT:
938 class BlockerDepPriority(DepPriority):
943 BlockerDepPriority.instance = BlockerDepPriority()
945 class UnmergeDepPriority(AbstractDepPriority):
948 Combination of properties Priority Category
953 (none of the above) -2 SOFT
963 if self.runtime_post:
970 myvalue = self.__int__()
971 if myvalue > self.SOFT:
975 class FakeVartree(portage.vartree):
976 """This is implements an in-memory copy of a vartree instance that provides
977 all the interfaces required for use by the depgraph. The vardb is locked
978 during the constructor call just long enough to read a copy of the
979 installed package information. This allows the depgraph to do it's
980 dependency calculations without holding a lock on the vardb. It also
981 allows things like vardb global updates to be done in memory so that the
982 user doesn't necessarily need write access to the vardb in cases where
983 global updates are necessary (updates are performed when necessary if there
984 is not a matching ebuild in the tree)."""
985 def __init__(self, real_vartree, portdb, db_keys, pkg_cache):
986 self.root = real_vartree.root
987 self.settings = real_vartree.settings
989 for required_key in ("COUNTER", "SLOT"):
990 if required_key not in mykeys:
991 mykeys.append(required_key)
992 self._pkg_cache = pkg_cache
993 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
994 vdb_path = os.path.join(self.root, portage.VDB_PATH)
996 # At least the parent needs to exist for the lock file.
997 portage.util.ensure_dirs(vdb_path)
998 except portage.exception.PortageException:
1002 if os.access(vdb_path, os.W_OK):
1003 vdb_lock = portage.locks.lockdir(vdb_path)
1004 real_dbapi = real_vartree.dbapi
1006 for cpv in real_dbapi.cpv_all():
1007 cache_key = ("installed", self.root, cpv, "nomerge")
1008 pkg = self._pkg_cache.get(cache_key)
1010 metadata = pkg.metadata
1012 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1013 myslot = metadata["SLOT"]
1014 mycp = portage.dep_getkey(cpv)
1015 myslot_atom = "%s:%s" % (mycp, myslot)
1017 mycounter = long(metadata["COUNTER"])
1020 metadata["COUNTER"] = str(mycounter)
1021 other_counter = slot_counters.get(myslot_atom, None)
1022 if other_counter is not None:
1023 if other_counter > mycounter:
1025 slot_counters[myslot_atom] = mycounter
1027 pkg = Package(built=True, cpv=cpv,
1028 installed=True, metadata=metadata,
1029 root=self.root, type_name="installed")
1030 self._pkg_cache[pkg] = pkg
1031 self.dbapi.cpv_inject(pkg)
1032 real_dbapi.flush_cache()
1035 portage.locks.unlockdir(vdb_lock)
1036 # Populate the old-style virtuals using the cached values.
1037 if not self.settings.treeVirtuals:
1038 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1039 portage.getCPFromCPV, self.get_all_provides())
1041 # Intialize variables needed for lazy cache pulls of the live ebuild
1042 # metadata. This ensures that the vardb lock is released ASAP, without
1043 # being delayed in case cache generation is triggered.
1044 self._aux_get = self.dbapi.aux_get
1045 self.dbapi.aux_get = self._aux_get_wrapper
1046 self._match = self.dbapi.match
1047 self.dbapi.match = self._match_wrapper
1048 self._aux_get_history = set()
1049 self._portdb_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1050 self._portdb = portdb
1051 self._global_updates = None
1053 def _match_wrapper(self, cpv, use_cache=1):
1055 Make sure the metadata in Package instances gets updated for any
1056 cpv that is returned from a match() call, since the metadata can
1057 be accessed directly from the Package instance instead of via
1060 matches = self._match(cpv, use_cache=use_cache)
1062 if cpv in self._aux_get_history:
1064 self._aux_get_wrapper(cpv, [])
1067 def _aux_get_wrapper(self, pkg, wants):
1068 if pkg in self._aux_get_history:
1069 return self._aux_get(pkg, wants)
1070 self._aux_get_history.add(pkg)
1072 # Use the live ebuild metadata if possible.
1073 live_metadata = dict(izip(self._portdb_keys,
1074 self._portdb.aux_get(pkg, self._portdb_keys)))
1075 self.dbapi.aux_update(pkg, live_metadata)
1076 except (KeyError, portage.exception.PortageException):
1077 if self._global_updates is None:
1078 self._global_updates = \
1079 grab_global_updates(self._portdb.porttree_root)
1080 perform_global_updates(
1081 pkg, self.dbapi, self._global_updates)
1082 return self._aux_get(pkg, wants)
1084 def grab_global_updates(portdir):
1085 from portage.update import grab_updates, parse_updates
1086 updpath = os.path.join(portdir, "profiles", "updates")
1088 rawupdates = grab_updates(updpath)
1089 except portage.exception.DirectoryNotFound:
1092 for mykey, mystat, mycontent in rawupdates:
1093 commands, errors = parse_updates(mycontent)
1094 upd_commands.extend(commands)
1097 def perform_global_updates(mycpv, mydb, mycommands):
1098 from portage.update import update_dbentries
1099 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1100 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1101 updates = update_dbentries(mycommands, aux_dict)
1103 mydb.aux_update(mycpv, updates)
1105 def visible(pkgsettings, pkg):
1107 Check if a package is visible. This can raise an InvalidDependString
1108 exception if LICENSE is invalid.
1109 TODO: optionally generate a list of masking reasons
1111 @returns: True if the package is visible, False otherwise.
1113 if not pkg.metadata["SLOT"]:
1115 if pkg.built and not pkg.installed:
1116 pkg_chost = pkg.metadata.get("CHOST")
1117 if pkg_chost and pkg_chost != pkgsettings["CHOST"]:
1119 if not portage.eapi_is_supported(pkg.metadata["EAPI"]):
1121 if not pkg.installed and \
1122 pkgsettings.getMissingKeywords(pkg.cpv, pkg.metadata):
1124 if pkgsettings.getMaskAtom(pkg.cpv, pkg.metadata):
1126 if pkgsettings.getProfileMaskAtom(pkg.cpv, pkg.metadata):
1129 if pkgsettings.getMissingLicenses(pkg.cpv, pkg.metadata):
1131 except portage.exception.InvalidDependString:
1135 def get_masking_status(pkg, pkgsettings, root_config):
1137 mreasons = portage.getmaskingstatus(
1138 pkg, settings=pkgsettings,
1139 portdb=root_config.trees["porttree"].dbapi)
1141 if pkg.built and not pkg.installed:
1142 pkg_chost = pkg.metadata.get("CHOST")
1143 if pkg_chost and pkg_chost != pkgsettings["CHOST"]:
1144 mreasons.append("CHOST: %s" % \
1145 pkg.metadata["CHOST"])
1147 if not pkg.metadata["SLOT"]:
1148 mreasons.append("invalid: SLOT is undefined")
1152 def get_mask_info(root_config, cpv, pkgsettings,
1153 db, pkg_type, built, installed, db_keys):
1156 metadata = dict(izip(db_keys,
1157 db.aux_get(cpv, db_keys)))
1160 if metadata and not built:
1161 pkgsettings.setcpv(cpv, mydb=metadata)
1162 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1163 if metadata is None:
1164 mreasons = ["corruption"]
1166 pkg = Package(type_name=pkg_type, root=root_config.root,
1167 cpv=cpv, built=built, installed=installed, metadata=metadata)
1168 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1169 return metadata, mreasons
1171 def show_masked_packages(masked_packages):
1172 shown_licenses = set()
1173 shown_comments = set()
1174 # Maybe there is both an ebuild and a binary. Only
1175 # show one of them to avoid redundant appearance.
1177 have_eapi_mask = False
1178 for (root_config, pkgsettings, cpv,
1179 metadata, mreasons) in masked_packages:
1180 if cpv in shown_cpvs:
1183 comment, filename = None, None
1184 if "package.mask" in mreasons:
1185 comment, filename = \
1186 portage.getmaskingreason(
1187 cpv, metadata=metadata,
1188 settings=pkgsettings,
1189 portdb=root_config.trees["porttree"].dbapi,
1190 return_location=True)
1191 missing_licenses = []
1193 if not portage.eapi_is_supported(metadata["EAPI"]):
1194 have_eapi_mask = True
1196 missing_licenses = \
1197 pkgsettings.getMissingLicenses(
1199 except portage.exception.InvalidDependString:
1200 # This will have already been reported
1201 # above via mreasons.
1204 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1205 if comment and comment not in shown_comments:
1208 shown_comments.add(comment)
1209 portdb = root_config.trees["porttree"].dbapi
1210 for l in missing_licenses:
1211 l_path = portdb.findLicensePath(l)
1212 if l in shown_licenses:
1214 msg = ("A copy of the '%s' license" + \
1215 " is located at '%s'.") % (l, l_path)
1218 shown_licenses.add(l)
1219 return have_eapi_mask
1221 class Task(SlotObject):
1222 __slots__ = ("_hash_key", "_hash_value")
1224 def _get_hash_key(self):
1225 hash_key = getattr(self, "_hash_key", None)
1226 if hash_key is None:
1227 raise NotImplementedError(self)
1230 def __eq__(self, other):
1231 return self._get_hash_key() == other
1233 def __ne__(self, other):
1234 return self._get_hash_key() != other
1237 hash_value = getattr(self, "_hash_value", None)
1238 if hash_value is None:
1239 self._hash_value = hash(self._get_hash_key())
1240 return self._hash_value
1243 return len(self._get_hash_key())
1245 def __getitem__(self, key):
1246 return self._get_hash_key()[key]
1249 return iter(self._get_hash_key())
1251 def __contains__(self, key):
1252 return key in self._get_hash_key()
1255 return str(self._get_hash_key())
1257 class Blocker(Task):
1258 __slots__ = ("root", "atom", "cp", "satisfied")
1260 def __init__(self, **kwargs):
1261 Task.__init__(self, **kwargs)
1262 self.cp = portage.dep_getkey(self.atom)
1264 def _get_hash_key(self):
1265 hash_key = getattr(self, "_hash_key", None)
1266 if hash_key is None:
1268 ("blocks", self.root, self.atom)
1269 return self._hash_key
1271 class Package(Task):
1272 __slots__ = ("built", "cpv", "depth",
1273 "installed", "metadata", "onlydeps", "operation",
1274 "root", "type_name",
1275 "category", "cp", "cpv_slot", "pf", "pv_split", "slot_atom")
1278 "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
1279 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1280 "repository", "RESTRICT", "SLOT", "USE"]
1282 def __init__(self, **kwargs):
1283 Task.__init__(self, **kwargs)
1284 self.cp = portage.cpv_getkey(self.cpv)
1285 self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
1286 self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
1287 cpv_parts = portage.catpkgsplit(self.cpv)
1288 self.category = cpv_parts[0]
1289 self.pv_split = cpv_parts[1:]
1290 self.pf = self.cpv.replace(self.category + "/", "", 1)
1292 def _get_hash_key(self):
1293 hash_key = getattr(self, "_hash_key", None)
1294 if hash_key is None:
1295 if self.operation is None:
1296 self.operation = "merge"
1297 if self.onlydeps or self.installed:
1298 self.operation = "nomerge"
1300 (self.type_name, self.root, self.cpv, self.operation)
1301 return self._hash_key
1303 def __lt__(self, other):
1304 if other.cp != self.cp:
1306 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1310 def __le__(self, other):
1311 if other.cp != self.cp:
1313 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1317 def __gt__(self, other):
1318 if other.cp != self.cp:
1320 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1324 def __ge__(self, other):
1325 if other.cp != self.cp:
1327 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1331 class DependencyArg(object):
1332 def __init__(self, arg=None, root_config=None):
1334 self.root_config = root_config
1339 class AtomArg(DependencyArg):
1340 def __init__(self, atom=None, **kwargs):
1341 DependencyArg.__init__(self, **kwargs)
1343 self.set = (self.atom, )
1345 class PackageArg(DependencyArg):
1346 def __init__(self, package=None, **kwargs):
1347 DependencyArg.__init__(self, **kwargs)
1348 self.package = package
1349 self.atom = "=" + package.cpv
1350 self.set = (self.atom, )
1352 class SetArg(DependencyArg):
1353 def __init__(self, set=None, **kwargs):
1354 DependencyArg.__init__(self, **kwargs)
1356 self.name = self.arg[len(SETPREFIX):]
1358 class Dependency(SlotObject):
1359 __slots__ = ("atom", "blocker", "depth",
1360 "parent", "onlydeps", "priority", "root")
1361 def __init__(self, **kwargs):
1362 SlotObject.__init__(self, **kwargs)
1363 if self.priority is None:
1364 self.priority = DepPriority()
1365 if self.depth is None:
1368 class BlockerCache(DictMixin):
1369 """This caches blockers of installed packages so that dep_check does not
1370 have to be done for every single installed package on every invocation of
1371 emerge. The cache is invalidated whenever it is detected that something
1372 has changed that might alter the results of dep_check() calls:
1373 1) the set of installed packages (including COUNTER) has changed
1374 2) the old-style virtuals have changed
1376 class BlockerData(object):
1378 __slots__ = ("__weakref__", "atoms", "counter")
1380 def __init__(self, counter, atoms):
1381 self.counter = counter
1384 def __init__(self, myroot, vardb):
1386 self._virtuals = vardb.settings.getvirtuals()
1387 self._cache_filename = os.path.join(myroot,
1388 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
1389 self._cache_version = "1"
1390 self._cache_data = None
1391 self._modified = False
1396 f = open(self._cache_filename)
1397 mypickle = cPickle.Unpickler(f)
1398 mypickle.find_global = None
1399 self._cache_data = mypickle.load()
1402 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
1404 cache_valid = self._cache_data and \
1405 isinstance(self._cache_data, dict) and \
1406 self._cache_data.get("version") == self._cache_version and \
1407 isinstance(self._cache_data.get("blockers"), dict)
1409 # Validate all the atoms and counters so that
1410 # corruption is detected as soon as possible.
1411 invalid_items = set()
1412 for k, v in self._cache_data["blockers"].iteritems():
1413 if not isinstance(k, basestring):
1414 invalid_items.add(k)
1417 if portage.catpkgsplit(k) is None:
1418 invalid_items.add(k)
1420 except portage.exception.InvalidData:
1421 invalid_items.add(k)
1423 if not isinstance(v, tuple) or \
1425 invalid_items.add(k)
1428 if not isinstance(counter, (int, long)):
1429 invalid_items.add(k)
1431 if not isinstance(atoms, list):
1432 invalid_items.add(k)
1434 invalid_atom = False
1436 if not isinstance(atom, basestring):
1439 if atom[:1] != "!" or \
1440 not portage.isvalidatom(
1441 atom, allow_blockers=True):
1445 invalid_items.add(k)
1448 for k in invalid_items:
1449 del self._cache_data["blockers"][k]
1450 if not self._cache_data["blockers"]:
1454 self._cache_data = {"version":self._cache_version}
1455 self._cache_data["blockers"] = {}
1456 self._cache_data["virtuals"] = self._virtuals
1457 self._modified = False
1460 """If the current user has permission and the internal blocker cache
1461 been updated, save it to disk and mark it unmodified. This is called
1462 by emerge after it has proccessed blockers for all installed packages.
1463 Currently, the cache is only written if the user has superuser
1464 privileges (since that's required to obtain a lock), but all users
1465 have read access and benefit from faster blocker lookups (as long as
1466 the entire cache is still valid). The cache is stored as a pickled
1467 dict object with the following format:
1471 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
1472 "virtuals" : vardb.settings.getvirtuals()
1475 if self._modified and \
1478 f = portage.util.atomic_ofstream(self._cache_filename)
1479 cPickle.dump(self._cache_data, f, -1)
1481 portage.util.apply_secpass_permissions(
1482 self._cache_filename, gid=portage.portage_gid, mode=0644)
1483 except (IOError, OSError), e:
1485 self._modified = False
1487 def __setitem__(self, cpv, blocker_data):
1489 Update the cache and mark it as modified for a future call to
1492 @param cpv: Package for which to cache blockers.
1494 @param blocker_data: An object with counter and atoms attributes.
1495 @type blocker_data: BlockerData
1497 self._cache_data["blockers"][cpv] = \
1498 (blocker_data.counter, blocker_data.atoms)
1499 self._modified = True
1502 return iter(self._cache_data["blockers"])
1504 def __delitem__(self, cpv):
1505 del self._cache_data["blockers"][cpv]
1506 self._modified = True
1508 def __getitem__(self, cpv):
1511 @returns: An object with counter and atoms attributes.
1513 return self.BlockerData(*self._cache_data["blockers"][cpv])
1516 """This needs to be implemented so that self.__repr__() doesn't raise
1517 an AttributeError."""
1520 class BlockerDB(object):
1522 def __init__(self, vartree, portdb):
1523 self._vartree = vartree
1524 self._portdb = portdb
1525 self._blocker_cache = \
1526 BlockerCache(self._vartree.root, vartree.dbapi)
1527 self._dep_check_trees = { self._vartree.root : {
1528 "porttree" : self._vartree,
1529 "vartree" : self._vartree,
1532 def findInstalledBlockers(self, new_pkg):
1533 blocker_cache = self._blocker_cache
1534 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1535 dep_check_trees = self._dep_check_trees
1536 settings = self._vartree.settings
1537 stale_cache = set(blocker_cache)
1539 FakeVartree(self._vartree,
1540 self._portdb, Package.metadata_keys, {})
1541 vardb = fake_vartree.dbapi
1542 installed_pkgs = list(vardb)
1544 for inst_pkg in installed_pkgs:
1545 stale_cache.discard(inst_pkg.cpv)
1546 cached_blockers = blocker_cache.get(inst_pkg.cpv)
1547 if cached_blockers is not None and \
1548 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
1549 cached_blockers = None
1550 if cached_blockers is not None:
1551 blocker_atoms = cached_blockers.atoms
1553 myuse = inst_pkg.metadata["USE"].split()
1554 # Use aux_get() to trigger FakeVartree global
1555 # updates on *DEPEND when appropriate.
1556 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
1558 portage.dep._dep_check_strict = False
1559 success, atoms = portage.dep_check(depstr,
1560 vardb, settings, myuse=myuse,
1561 trees=dep_check_trees, myroot=inst_pkg.root)
1563 portage.dep._dep_check_strict = True
1565 pkg_location = os.path.join(inst_pkg.root,
1566 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
1567 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
1568 (pkg_location, atoms), noiselevel=-1)
1571 blocker_atoms = [atom for atom in atoms \
1572 if atom.startswith("!")]
1573 blocker_atoms.sort()
1574 counter = long(inst_pkg.metadata["COUNTER"])
1575 blocker_cache[inst_pkg.cpv] = \
1576 blocker_cache.BlockerData(counter, blocker_atoms)
1577 for cpv in stale_cache:
1578 del blocker_cache[cpv]
1579 blocker_cache.flush()
1581 blocker_parents = digraph()
1583 for pkg in installed_pkgs:
1584 for blocker_atom in self._blocker_cache[pkg.cpv].atoms:
1585 blocker_atom = blocker_atom[1:]
1586 blocker_atoms.append(blocker_atom)
1587 blocker_parents.add(blocker_atom, pkg)
1589 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1590 blocking_pkgs = set()
1591 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
1592 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
1594 # Check for blockers in the other direction.
1595 myuse = new_pkg.metadata["USE"].split()
1596 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
1598 portage.dep._dep_check_strict = False
1599 success, atoms = portage.dep_check(depstr,
1600 vardb, settings, myuse=myuse,
1601 trees=dep_check_trees, myroot=new_pkg.root)
1603 portage.dep._dep_check_strict = True
1605 # We should never get this far with invalid deps.
1606 show_invalid_depstring_notice(new_pkg, depstr, atoms)
1609 blocker_atoms = [atom[1:] for atom in atoms \
1610 if atom.startswith("!")]
1612 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1613 for inst_pkg in installed_pkgs:
1615 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
1616 except (portage.exception.InvalidDependString, StopIteration):
1618 blocking_pkgs.add(inst_pkg)
1620 return blocking_pkgs
1622 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
1624 from formatter import AbstractFormatter, DumbWriter
1625 f = AbstractFormatter(DumbWriter(maxcol=72))
1627 print "\n\n!!! Invalid or corrupt dependency specification: "
1635 p_type, p_root, p_key, p_status = parent_node
1637 if p_status == "nomerge":
1638 category, pf = portage.catsplit(p_key)
1639 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
1640 msg.append("Portage is unable to process the dependencies of the ")
1641 msg.append("'%s' package. " % p_key)
1642 msg.append("In order to correct this problem, the package ")
1643 msg.append("should be uninstalled, reinstalled, or upgraded. ")
1644 msg.append("As a temporary workaround, the --nodeps option can ")
1645 msg.append("be used to ignore all dependencies. For reference, ")
1646 msg.append("the problematic dependencies can be found in the ")
1647 msg.append("*DEPEND files located in '%s/'." % pkg_location)
1649 msg.append("This package can not be installed. ")
1650 msg.append("Please notify the '%s' package maintainer " % p_key)
1651 msg.append("about this problem.")
1654 f.add_flowing_data(x)
1657 class PackageVirtualDbapi(portage.dbapi):
1659 A dbapi-like interface class that represents the state of the installed
1660 package database as new packages are installed, replacing any packages
1661 that previously existed in the same slot. The main difference between
1662 this class and fakedbapi is that this one uses Package instances
1663 internally (passed in via cpv_inject() and cpv_remove() calls).
1665 def __init__(self, settings):
1666 portage.dbapi.__init__(self)
1667 self.settings = settings
1668 self._match_cache = {}
1673 obj = PackageVirtualDbapi(self.settings)
1674 obj._match_cache = self._match_cache.copy()
1675 obj._cp_map = self._cp_map.copy()
1676 for k, v in obj._cp_map.iteritems():
1677 obj._cp_map[k] = v[:]
1678 obj._cpv_map = self._cpv_map.copy()
1682 return self._cpv_map.itervalues()
1684 def __contains__(self, item):
1685 existing = self._cpv_map.get(item.cpv)
1686 if existing is not None and \
1691 def match_pkgs(self, atom):
1692 return [self._cpv_map[cpv] for cpv in self.match(atom)]
1694 def _clear_cache(self):
1695 if self._categories is not None:
1696 self._categories = None
1697 if self._match_cache:
1698 self._match_cache = {}
1700 def match(self, origdep, use_cache=1):
1701 result = self._match_cache.get(origdep)
1702 if result is not None:
1704 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
1705 self._match_cache[origdep] = result
1708 def cpv_exists(self, cpv):
1709 return cpv in self._cpv_map
1711 def cp_list(self, mycp, use_cache=1):
1712 cachelist = self._match_cache.get(mycp)
1713 # cp_list() doesn't expand old-style virtuals
1714 if cachelist and cachelist[0].startswith(mycp):
1716 cpv_list = self._cp_map.get(mycp)
1717 if cpv_list is None:
1720 cpv_list = [pkg.cpv for pkg in cpv_list]
1721 self._cpv_sort_ascending(cpv_list)
1722 if not (not cpv_list and mycp.startswith("virtual/")):
1723 self._match_cache[mycp] = cpv_list
1727 return list(self._cp_map)
1730 return list(self._cpv_map)
1732 def cpv_inject(self, pkg):
1733 cp_list = self._cp_map.get(pkg.cp)
1736 self._cp_map[pkg.cp] = cp_list
1737 e_pkg = self._cpv_map.get(pkg.cpv)
1738 if e_pkg is not None:
1741 self.cpv_remove(e_pkg)
1742 for e_pkg in cp_list:
1743 if e_pkg.slot_atom == pkg.slot_atom:
1746 self.cpv_remove(e_pkg)
1749 self._cpv_map[pkg.cpv] = pkg
1752 def cpv_remove(self, pkg):
1753 old_pkg = self._cpv_map.get(pkg.cpv)
1756 self._cp_map[pkg.cp].remove(pkg)
1757 del self._cpv_map[pkg.cpv]
1760 def aux_get(self, cpv, wants):
1761 metadata = self._cpv_map[cpv].metadata
1762 return [metadata.get(x, "") for x in wants]
1764 def aux_update(self, cpv, values):
1765 self._cpv_map[cpv].metadata.update(values)
1768 class depgraph(object):
1771 "ebuild":"porttree",
1773 "installed":"vartree"}
1775 _mydbapi_keys = Package.metadata_keys
1777 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1779 def __init__(self, settings, trees, myopts, myparams, spinner):
1780 self.settings = settings
1781 self.target_root = settings["ROOT"]
1782 self.myopts = myopts
1783 self.myparams = myparams
1785 if settings.get("PORTAGE_DEBUG", "") == "1":
1787 self.spinner = spinner
1788 self.pkgsettings = {}
1789 # Maps slot atom to package for each Package added to the graph.
1790 self._slot_pkg_map = {}
1791 # Maps nodes to the reasons they were selected for reinstallation.
1792 self._reinstall_nodes = {}
1795 self._trees_orig = trees
1797 # Contains a filtered view of preferred packages that are selected
1798 # from available repositories.
1799 self._filtered_trees = {}
1800 # Contains installed packages and new packages that have been added
1802 self._graph_trees = {}
1803 # All Package instances
1804 self._pkg_cache = self._package_cache(self)
1805 for myroot in trees:
1806 self.trees[myroot] = {}
1807 # Create a RootConfig instance that references
1808 # the FakeVartree instead of the real one.
1809 self.roots[myroot] = RootConfig(
1810 trees[myroot]["vartree"].settings,
1812 trees[myroot]["root_config"].setconfig)
1813 for tree in ("porttree", "bintree"):
1814 self.trees[myroot][tree] = trees[myroot][tree]
1815 self.trees[myroot]["vartree"] = \
1816 FakeVartree(trees[myroot]["vartree"],
1817 trees[myroot]["porttree"].dbapi,
1818 self._mydbapi_keys, self._pkg_cache)
1819 self.pkgsettings[myroot] = portage.config(
1820 clone=self.trees[myroot]["vartree"].settings)
1821 self._slot_pkg_map[myroot] = {}
1822 vardb = self.trees[myroot]["vartree"].dbapi
1823 preload_installed_pkgs = "--nodeps" not in self.myopts and \
1824 "--buildpkgonly" not in self.myopts
1825 # This fakedbapi instance will model the state that the vdb will
1826 # have after new packages have been installed.
1827 fakedb = PackageVirtualDbapi(vardb.settings)
1828 if preload_installed_pkgs:
1829 for cpv in vardb.cpv_all():
1830 self.spinner.update()
1831 metadata = dict(izip(self._mydbapi_keys,
1832 vardb.aux_get(cpv, self._mydbapi_keys)))
1833 pkg = Package(built=True, cpv=cpv,
1834 installed=True, metadata=metadata,
1835 root=myroot, type_name="installed")
1836 self._pkg_cache[pkg] = pkg
1837 fakedb.cpv_inject(pkg)
1838 self.mydbapi[myroot] = fakedb
1841 graph_tree.dbapi = fakedb
1842 self._graph_trees[myroot] = {}
1843 self._filtered_trees[myroot] = {}
1844 # Substitute the graph tree for the vartree in dep_check() since we
1845 # want atom selections to be consistent with package selections
1846 # have already been made.
1847 self._graph_trees[myroot]["porttree"] = graph_tree
1848 self._graph_trees[myroot]["vartree"] = graph_tree
1849 def filtered_tree():
1851 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
1852 self._filtered_trees[myroot]["porttree"] = filtered_tree
1854 # Passing in graph_tree as the vartree here could lead to better
1855 # atom selections in some cases by causing atoms for packages that
1856 # have been added to the graph to be preferred over other choices.
1857 # However, it can trigger atom selections that result in
1858 # unresolvable direct circular dependencies. For example, this
1859 # happens with gwydion-dylan which depends on either itself or
1860 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
1861 # gwydion-dylan-bin needs to be selected in order to avoid a
1862 # an unresolvable direct circular dependency.
1864 # To solve the problem described above, pass in "graph_db" so that
1865 # packages that have been added to the graph are distinguishable
1866 # from other available packages and installed packages. Also, pass
1867 # the parent package into self._select_atoms() calls so that
1868 # unresolvable direct circular dependencies can be detected and
1869 # avoided when possible.
1870 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
1871 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
1874 portdb = self.trees[myroot]["porttree"].dbapi
1875 bindb = self.trees[myroot]["bintree"].dbapi
1876 vardb = self.trees[myroot]["vartree"].dbapi
1877 # (db, pkg_type, built, installed, db_keys)
1878 if "--usepkgonly" not in self.myopts:
1879 db_keys = list(portdb._aux_cache_keys)
1880 dbs.append((portdb, "ebuild", False, False, db_keys))
1881 if "--usepkg" in self.myopts:
1882 db_keys = list(bindb._aux_cache_keys)
1883 dbs.append((bindb, "binary", True, False, db_keys))
1884 db_keys = self._mydbapi_keys
1885 dbs.append((vardb, "installed", True, True, db_keys))
1886 self._filtered_trees[myroot]["dbs"] = dbs
1887 if "--usepkg" in self.myopts:
1888 self.trees[myroot]["bintree"].populate(
1889 "--getbinpkg" in self.myopts,
1890 "--getbinpkgonly" in self.myopts)
1893 self.digraph=portage.digraph()
1894 # contains all sets added to the graph
1896 # contains atoms given as arguments
1897 self._sets["args"] = InternalPackageSet()
1898 # contains all atoms from all sets added to the graph, including
1899 # atoms given as arguments
1900 self._set_atoms = InternalPackageSet()
1901 self._atom_arg_map = {}
1902 # contains all nodes pulled in by self._set_atoms
1903 self._set_nodes = set()
1904 # Contains only Blocker -> Uninstall edges
1905 self._blocker_uninstalls = digraph()
1906 # Contains only Package -> Blocker edges
1907 self._blocker_parents = digraph()
1908 # Contains only irrelevant Package -> Blocker edges
1909 self._irrelevant_blockers = digraph()
1910 # Contains only unsolvable Package -> Blocker edges
1911 self._unsolvable_blockers = digraph()
1912 self._slot_collision_info = set()
1913 # Slot collision nodes are not allowed to block other packages since
1914 # blocker validation is only able to account for one package per slot.
1915 self._slot_collision_nodes = set()
1916 self._serialized_tasks_cache = None
1917 self._pprovided_args = []
1918 self._missing_args = []
1919 self._masked_installed = []
1920 self._unsatisfied_deps_for_display = []
1921 self._unsatisfied_blockers_for_display = None
1922 self._circular_deps_for_display = None
1923 self._dep_stack = []
1924 self._unsatisfied_deps = []
1925 self._ignored_deps = []
1926 self._required_set_names = set(["system", "world"])
1927 self._select_atoms = self._select_atoms_highest_available
1928 self._select_package = self._select_pkg_highest_available
1929 self._highest_pkg_cache = {}
1931 def _show_slot_collision_notice(self):
1932 """Show an informational message advising the user to mask one of the
1933 the packages. In some cases it may be possible to resolve this
1934 automatically, but support for backtracking (removal nodes that have
1935 already been selected) will be required in order to handle all possible
1938 if not self._slot_collision_info:
1941 self._show_merge_list()
1944 msg.append("\n!!! Multiple versions within a single " + \
1945 "package slot have been pulled\n")
1946 msg.append("!!! into the dependency graph, resulting" + \
1947 " in a slot conflict:\n\n")
1949 # Max number of parents shown, to avoid flooding the display.
1951 for slot_atom, root in self._slot_collision_info:
1952 msg.append(slot_atom)
1955 for node in self._slot_collision_nodes:
1956 if node.slot_atom == slot_atom:
1957 slot_nodes.append(node)
1958 slot_nodes.append(self._slot_pkg_map[root][slot_atom])
1959 for node in slot_nodes:
1961 msg.append(str(node))
1962 parents = self.digraph.parent_nodes(node)
1965 if len(parents) > max_parents:
1967 # When generating the pruned list, prefer instances
1968 # of DependencyArg over instances of Package.
1969 for parent in parents:
1970 if isinstance(parent, DependencyArg):
1971 pruned_list.append(parent)
1972 # Prefer Packages instances that themselves have been
1973 # pulled into collision slots.
1974 for parent in parents:
1975 if isinstance(parent, Package) and \
1976 (parent.slot_atom, parent.root) \
1977 in self._slot_collision_info:
1978 pruned_list.append(parent)
1979 for parent in parents:
1980 if len(pruned_list) >= max_parents:
1982 if not isinstance(parent, DependencyArg) and \
1983 parent not in pruned_list:
1984 pruned_list.append(parent)
1985 omitted_parents = len(parents) - len(pruned_list)
1986 parents = pruned_list
1987 msg.append(" pulled in by\n")
1988 for parent in parents:
1989 msg.append(2*indent)
1990 msg.append(str(parent))
1993 msg.append(2*indent)
1994 msg.append("(and %d more)\n" % omitted_parents)
1996 msg.append(" (no parents)\n")
1999 sys.stderr.write("".join(msg))
2002 if "--quiet" in self.myopts:
2006 msg.append("It may be possible to solve this problem ")
2007 msg.append("by using package.mask to prevent one of ")
2008 msg.append("those packages from being selected. ")
2009 msg.append("However, it is also possible that conflicting ")
2010 msg.append("dependencies exist such that they are impossible to ")
2011 msg.append("satisfy simultaneously. If such a conflict exists in ")
2012 msg.append("the dependencies of two different packages, then those ")
2013 msg.append("packages can not be installed simultaneously.")
2015 from formatter import AbstractFormatter, DumbWriter
2016 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
2018 f.add_flowing_data(x)
2022 msg.append("For more information, see MASKED PACKAGES ")
2023 msg.append("section in the emerge man page or refer ")
2024 msg.append("to the Gentoo Handbook.")
2026 f.add_flowing_data(x)
2030 def _reinstall_for_flags(self, forced_flags,
2031 orig_use, orig_iuse, cur_use, cur_iuse):
2032 """Return a set of flags that trigger reinstallation, or None if there
2033 are no such flags."""
2034 if "--newuse" in self.myopts:
2035 flags = orig_iuse.symmetric_difference(
2036 cur_iuse).difference(forced_flags)
2037 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
2038 cur_iuse.intersection(cur_use)))
2041 elif "changed-use" == self.myopts.get("--reinstall"):
2042 flags = orig_iuse.intersection(orig_use).symmetric_difference(
2043 cur_iuse.intersection(cur_use))
2048 def _create_graph(self, allow_unsatisfied=False):
2049 dep_stack = self._dep_stack
2051 self.spinner.update()
2052 dep = dep_stack.pop()
2053 if isinstance(dep, Package):
2054 if not self._add_pkg_deps(dep,
2055 allow_unsatisfied=allow_unsatisfied):
2058 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
2062 def _add_dep(self, dep, allow_unsatisfied=False):
2063 debug = "--debug" in self.myopts
2064 buildpkgonly = "--buildpkgonly" in self.myopts
2065 nodeps = "--nodeps" in self.myopts
2066 empty = "empty" in self.myparams
2067 deep = "deep" in self.myparams
2068 update = "--update" in self.myopts and dep.depth <= 1
2070 if not buildpkgonly and \
2072 dep.parent not in self._slot_collision_nodes:
2073 if dep.parent.onlydeps:
2074 # It's safe to ignore blockers if the
2075 # parent is an --onlydeps node.
2077 # The blocker applies to the root where
2078 # the parent is or will be installed.
2079 blocker = Blocker(atom=dep.atom, root=dep.parent.root)
2080 self._blocker_parents.add(blocker, dep.parent)
2082 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
2083 onlydeps=dep.onlydeps)
2085 if allow_unsatisfied:
2086 self._unsatisfied_deps.append(dep)
2088 self._unsatisfied_deps_for_display.append(
2089 ((dep.root, dep.atom), {"myparent":dep.parent}))
2091 # In some cases, dep_check will return deps that shouldn't
2092 # be proccessed any further, so they are identified and
2093 # discarded here. Try to discard as few as possible since
2094 # discarded dependencies reduce the amount of information
2095 # available for optimization of merge order.
2096 if dep.priority.satisfied and \
2097 not (existing_node or empty or deep or update):
2099 if dep.root == self.target_root:
2101 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
2102 except StopIteration:
2104 except portage.exception.InvalidDependString:
2105 if not dep_pkg.installed:
2106 # This shouldn't happen since the package
2107 # should have been masked.
2110 self._ignored_deps.append(dep)
2113 if not self._add_pkg(dep_pkg, dep.parent,
2114 priority=dep.priority, depth=dep.depth):
2118 def _add_pkg(self, pkg, myparent, priority=None, depth=0):
2119 if priority is None:
2120 priority = DepPriority()
2122 Fills the digraph with nodes comprised of packages to merge.
2123 mybigkey is the package spec of the package to merge.
2124 myparent is the package depending on mybigkey ( or None )
2125 addme = Should we add this package to the digraph or are we just looking at it's deps?
2126 Think --onlydeps, we need to ignore packages in that case.
2129 #IUSE-aware emerge -> USE DEP aware depgraph
2130 #"no downgrade" emerge
2133 # select the correct /var database that we'll be checking against
2134 vardbapi = self.trees[pkg.root]["vartree"].dbapi
2135 pkgsettings = self.pkgsettings[pkg.root]
2141 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
2142 except portage.exception.InvalidDependString, e:
2143 if not pkg.installed:
2144 show_invalid_depstring_notice(
2145 pkg, pkg.metadata["PROVIDE"], str(e))
2149 args = [arg for arg, atom in arg_atoms]
2151 if not pkg.onlydeps:
2152 if not pkg.installed and \
2153 "empty" not in self.myparams and \
2154 vardbapi.match(pkg.slot_atom):
2155 # Increase the priority of dependencies on packages that
2156 # are being rebuilt. This optimizes merge order so that
2157 # dependencies are rebuilt/updated as soon as possible,
2158 # which is needed especially when emerge is called by
2159 # revdep-rebuild since dependencies may be affected by ABI
2160 # breakage that has rendered them useless. Don't adjust
2161 # priority here when in "empty" mode since all packages
2162 # are being merged in that case.
2163 priority.rebuild = True
2165 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
2166 slot_collision = False
2168 if pkg.cpv == existing_node.cpv:
2169 # The existing node can be reused.
2172 self.digraph.add(existing_node, arg,
2174 # If a direct circular dependency is not an unsatisfied
2175 # buildtime dependency then drop it here since otherwise
2176 # it can skew the merge order calculation in an unwanted
2178 if existing_node != myparent or \
2179 (priority.buildtime and not priority.satisfied):
2180 self.digraph.addnode(existing_node, myparent,
2184 if pkg in self._slot_collision_nodes:
2186 # A slot collision has occurred. Sometimes this coincides
2187 # with unresolvable blockers, so the slot collision will be
2188 # shown later if there are no unresolvable blockers.
2189 self._slot_collision_info.add((pkg.slot_atom, pkg.root))
2190 self._slot_collision_nodes.add(pkg)
2191 slot_collision = True
2194 # Now add this node to the graph so that self.display()
2195 # can show use flags and --tree portage.output. This node is
2196 # only being partially added to the graph. It must not be
2197 # allowed to interfere with the other nodes that have been
2198 # added. Do not overwrite data for existing nodes in
2199 # self.mydbapi since that data will be used for blocker
2201 # Even though the graph is now invalid, continue to process
2202 # dependencies so that things like --fetchonly can still
2203 # function despite collisions.
2206 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
2207 self.mydbapi[pkg.root].cpv_inject(pkg)
2209 self.digraph.addnode(pkg, myparent, priority=priority)
2211 if not pkg.installed:
2212 # Allow this package to satisfy old-style virtuals in case it
2213 # doesn't already. Any pre-existing providers will be preferred
2216 pkgsettings.setinst(pkg.cpv, pkg.metadata)
2217 # For consistency, also update the global virtuals.
2218 settings = self.roots[pkg.root].settings
2220 settings.setinst(pkg.cpv, pkg.metadata)
2222 except portage.exception.InvalidDependString, e:
2223 show_invalid_depstring_notice(
2224 pkg, pkg.metadata["PROVIDE"], str(e))
2229 # Warn if an installed package is masked and it
2230 # is pulled into the graph.
2231 if not visible(pkgsettings, pkg):
2232 self._masked_installed.append((pkg, pkgsettings))
2235 self._set_nodes.add(pkg)
2237 # Do this even when addme is False (--onlydeps) so that the
2238 # parent/child relationship is always known in case
2239 # self._show_slot_collision_notice() needs to be called later.
2241 self.digraph.add(pkg, myparent, priority=priority)
2244 self.digraph.add(pkg, arg, priority=priority)
2246 """ This section determines whether we go deeper into dependencies or not.
2247 We want to go deeper on a few occasions:
2248 Installing package A, we need to make sure package A's deps are met.
2249 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
2250 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
2252 dep_stack = self._dep_stack
2253 if "recurse" not in self.myparams:
2255 elif pkg.installed and \
2256 "deep" not in self.myparams:
2257 dep_stack = self._ignored_deps
2259 self.spinner.update()
2264 dep_stack.append(pkg)
2267 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
2269 mytype = pkg.type_name
2272 metadata = pkg.metadata
2273 myuse = metadata["USE"].split()
2275 depth = pkg.depth + 1
2278 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
2280 edepend[k] = metadata[k]
2282 if not pkg.built and \
2283 "--buildpkgonly" in self.myopts and \
2284 "deep" not in self.myparams and \
2285 "empty" not in self.myparams:
2286 edepend["RDEPEND"] = ""
2287 edepend["PDEPEND"] = ""
2288 bdeps_satisfied = False
2289 if mytype in ("installed", "binary"):
2290 if self.myopts.get("--with-bdeps", "n") == "y":
2291 # Pull in build time deps as requested, but marked them as
2292 # "satisfied" since they are not strictly required. This allows
2293 # more freedom in the merge order calculation for solving
2294 # circular dependencies. Don't convert to PDEPEND since that
2295 # could make --with-bdeps=y less effective if it is used to
2296 # adjust merge order to prevent built_with_use() calls from
2298 bdeps_satisfied = True
2300 # built packages do not have build time dependencies.
2301 edepend["DEPEND"] = ""
2304 ("/", edepend["DEPEND"],
2305 DepPriority(buildtime=True, satisfied=bdeps_satisfied)),
2306 (myroot, edepend["RDEPEND"], DepPriority(runtime=True)),
2307 (myroot, edepend["PDEPEND"], DepPriority(runtime_post=True))
2310 debug = "--debug" in self.myopts
2311 strict = mytype != "installed"
2313 for dep_root, dep_string, dep_priority in deps:
2315 # Decrease priority so that --buildpkgonly
2316 # hasallzeros() works correctly.
2317 dep_priority = DepPriority()
2322 print "Parent: ", jbigkey
2323 print "Depstring:", dep_string
2324 print "Priority:", dep_priority
2325 vardb = self.roots[dep_root].trees["vartree"].dbapi
2327 selected_atoms = self._select_atoms(dep_root,
2328 dep_string, myuse=myuse, parent=pkg, strict=strict)
2329 except portage.exception.InvalidDependString, e:
2330 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
2333 print "Candidates:", selected_atoms
2334 for atom in selected_atoms:
2335 blocker = atom.startswith("!")
2338 mypriority = dep_priority.copy()
2339 if not blocker and vardb.match(atom):
2340 mypriority.satisfied = True
2341 if not self._add_dep(Dependency(atom=atom,
2342 blocker=blocker, depth=depth, parent=pkg,
2343 priority=mypriority, root=dep_root),
2344 allow_unsatisfied=allow_unsatisfied):
2347 print "Exiting...", jbigkey
2348 except ValueError, e:
2349 if not e.args or not isinstance(e.args[0], list) or \
2353 portage.writemsg("\n\n!!! An atom in the dependencies " + \
2354 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
2356 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
2357 portage.writemsg("\n", noiselevel=-1)
2358 if mytype == "binary":
2360 "!!! This binary package cannot be installed: '%s'\n" % \
2361 mykey, noiselevel=-1)
2362 elif mytype == "ebuild":
2363 portdb = self.roots[myroot].trees["porttree"].dbapi
2364 myebuild, mylocation = portdb.findname2(mykey)
2365 portage.writemsg("!!! This ebuild cannot be installed: " + \
2366 "'%s'\n" % myebuild, noiselevel=-1)
2367 portage.writemsg("!!! Please notify the package maintainer " + \
2368 "that atoms must be fully-qualified.\n", noiselevel=-1)
2372 def _dep_expand(self, root_config, atom_without_category):
2374 @param root_config: a root config instance
2375 @type root_config: RootConfig
2376 @param atom_without_category: an atom without a category component
2377 @type atom_without_category: String
2379 @returns: a list of atoms containing categories (possibly empty)
2381 null_cp = portage.dep_getkey(insert_category_into_atom(
2382 atom_without_category, "null"))
2383 cat, atom_pn = portage.catsplit(null_cp)
2386 for db, pkg_type, built, installed, db_keys in \
2387 self._filtered_trees[root_config.root]["dbs"]:
2388 cp_set.update(db.cp_all())
2389 for cp in list(cp_set):
2390 cat, pn = portage.catsplit(cp)
2395 cat, pn = portage.catsplit(cp)
2396 deps.append(insert_category_into_atom(
2397 atom_without_category, cat))
2400 def _have_new_virt(self, root, atom_cp):
2402 for db, pkg_type, built, installed, db_keys in \
2403 self._filtered_trees[root]["dbs"]:
2404 if db.cp_list(atom_cp):
2409 def _iter_atoms_for_pkg(self, pkg):
2410 # TODO: add multiple $ROOT support
2411 if pkg.root != self.target_root:
2413 atom_arg_map = self._atom_arg_map
2414 root_config = self.roots[pkg.root]
2415 for atom in self._set_atoms.iterAtomsForPackage(pkg):
2416 atom_cp = portage.dep_getkey(atom)
2417 if atom_cp != pkg.cp and \
2418 self._have_new_virt(pkg.root, atom_cp):
2420 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
2421 visible_pkgs.reverse() # descending order
2423 for visible_pkg in visible_pkgs:
2424 if visible_pkg.cp != atom_cp:
2426 if pkg >= visible_pkg:
2427 # This is descending order, and we're not
2428 # interested in any versions <= pkg given.
2430 if pkg.slot_atom != visible_pkg.slot_atom:
2431 higher_slot = visible_pkg
2433 if higher_slot is not None:
2435 for arg in atom_arg_map[(atom, pkg.root)]:
2436 if isinstance(arg, PackageArg) and \
2441 def select_files(self, myfiles):
2442 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
2443 appropriate depgraph and return a favorite list."""
2444 root_config = self.roots[self.target_root]
2445 sets = root_config.sets
2446 getSetAtoms = root_config.setconfig.getSetAtoms
2448 myroot = self.target_root
2449 dbs = self._filtered_trees[myroot]["dbs"]
2450 vardb = self.trees[myroot]["vartree"].dbapi
2451 portdb = self.trees[myroot]["porttree"].dbapi
2452 bindb = self.trees[myroot]["bintree"].dbapi
2453 pkgsettings = self.pkgsettings[myroot]
2455 onlydeps = "--onlydeps" in self.myopts
2457 ext = os.path.splitext(x)[1]
2459 if not os.path.exists(x):
2461 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2462 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2463 elif os.path.exists(
2464 os.path.join(pkgsettings["PKGDIR"], x)):
2465 x = os.path.join(pkgsettings["PKGDIR"], x)
2467 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
2468 print "!!! Please ensure the tbz2 exists as specified.\n"
2469 return 0, myfavorites
2470 mytbz2=portage.xpak.tbz2(x)
2471 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2472 if os.path.realpath(x) != \
2473 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
2474 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
2475 return 0, myfavorites
2476 metadata = dict(izip(self._mydbapi_keys,
2477 bindb.aux_get(mykey, self._mydbapi_keys)))
2478 pkg = Package(type_name="binary", root=myroot,
2479 cpv=mykey, built=True, metadata=metadata,
2481 self._pkg_cache[pkg] = pkg
2482 args.append(PackageArg(arg=x, package=pkg,
2483 root_config=root_config))
2484 elif ext==".ebuild":
2485 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2486 pkgdir = os.path.dirname(ebuild_path)
2487 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2488 cp = pkgdir[len(tree_root)+1:]
2489 e = portage.exception.PackageNotFound(
2490 ("%s is not in a valid portage tree " + \
2491 "hierarchy or does not exist") % x)
2492 if not portage.isvalidatom(cp):
2494 cat = portage.catsplit(cp)[0]
2495 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2496 if not portage.isvalidatom("="+mykey):
2498 ebuild_path = portdb.findname(mykey)
2500 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2501 cp, os.path.basename(ebuild_path)):
2502 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
2503 return 0, myfavorites
2504 if mykey not in portdb.xmatch(
2505 "match-visible", portage.dep_getkey(mykey)):
2506 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
2507 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
2508 print colorize("BAD", "*** page for details.")
2509 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
2512 raise portage.exception.PackageNotFound(
2513 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2514 metadata = dict(izip(self._mydbapi_keys,
2515 portdb.aux_get(mykey, self._mydbapi_keys)))
2516 pkgsettings.setcpv(mykey, mydb=metadata)
2517 metadata["USE"] = pkgsettings["PORTAGE_USE"]
2518 pkg = Package(type_name="ebuild", root=myroot,
2519 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
2520 self._pkg_cache[pkg] = pkg
2521 args.append(PackageArg(arg=x, package=pkg,
2522 root_config=root_config))
2523 elif x.startswith(os.path.sep):
2524 if not x.startswith(myroot):
2525 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2526 " $ROOT.\n") % x, noiselevel=-1)
2528 relative_path = x[len(myroot):]
2529 vartree = self._trees_orig[myroot]["vartree"]
2531 for cpv in vardb.cpv_all():
2532 self.spinner.update()
2533 cat, pf = portage.catsplit(cpv)
2534 if portage.dblink(cat, pf, myroot,
2535 pkgsettings, vartree=vartree).isowner(
2536 relative_path, myroot):
2539 if owner_cpv is None:
2540 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2541 "by any package.\n") % x, noiselevel=-1)
2543 slot = vardb.aux_get(owner_cpv, ["SLOT"])[0]
2545 # portage now masks packages with missing slot, but it's
2546 # possible that one was installed by an older version
2547 atom = portage.cpv_getkey(owner_cpv)
2549 atom = "%s:%s" % (portage.cpv_getkey(owner_cpv), slot)
2550 args.append(AtomArg(arg=atom, atom=atom,
2551 root_config=root_config))
2553 if x in ("system", "world"):
2555 if x.startswith(SETPREFIX):
2556 s = x[len(SETPREFIX):]
2558 raise portage.exception.PackageNotFound(
2559 "emerge: there are no sets to satisfy '%s'." % s)
2562 # Recursively expand sets so that containment tests in
2563 # self._get_parent_sets() properly match atoms in nested
2564 # sets (like if world contains system).
2565 expanded_set = InternalPackageSet(
2566 initial_atoms=getSetAtoms(s))
2567 self._sets[s] = expanded_set
2568 args.append(SetArg(arg=x, set=expanded_set,
2569 root_config=root_config))
2570 myfavorites.append(x)
2572 if not is_valid_package_atom(x):
2573 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2575 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2576 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2578 # Don't expand categories or old-style virtuals here unless
2579 # necessary. Expansion of old-style virtuals here causes at
2580 # least the following problems:
2581 # 1) It's more difficult to determine which set(s) an atom
2582 # came from, if any.
2583 # 2) It takes away freedom from the resolver to choose other
2584 # possible expansions when necessary.
2586 args.append(AtomArg(arg=x, atom=x,
2587 root_config=root_config))
2589 expanded_atoms = self._dep_expand(root_config, x)
2590 installed_cp_set = set()
2591 for atom in expanded_atoms:
2592 atom_cp = portage.dep_getkey(atom)
2593 if vardb.cp_list(atom_cp):
2594 installed_cp_set.add(atom_cp)
2595 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2596 installed_cp = iter(installed_cp_set).next()
2597 expanded_atoms = [atom for atom in expanded_atoms \
2598 if portage.dep_getkey(atom) == installed_cp]
2600 if len(expanded_atoms) > 1:
2601 print "\n\n!!! The short ebuild name \"" + x + "\" is ambiguous. Please specify"
2602 print "!!! one of the following fully-qualified ebuild names instead:\n"
2603 expanded_atoms = set(portage.dep_getkey(atom) \
2604 for atom in expanded_atoms)
2605 for i in sorted(expanded_atoms):
2606 print " " + green(i)
2608 return False, myfavorites
2610 atom = expanded_atoms[0]
2612 null_atom = insert_category_into_atom(x, "null")
2613 null_cp = portage.dep_getkey(null_atom)
2614 cat, atom_pn = portage.catsplit(null_cp)
2615 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2617 # Allow the depgraph to choose which virtual.
2618 atom = insert_category_into_atom(x, "virtual")
2620 atom = insert_category_into_atom(x, "null")
2622 args.append(AtomArg(arg=x, atom=atom,
2623 root_config=root_config))
2625 if "--update" in self.myopts:
2626 # Enable greedy SLOT atoms for atoms given as arguments.
2627 # This is currently disabled for sets since greedy SLOT
2628 # atoms could be a property of the set itself.
2631 # In addition to any installed slots, also try to pull
2632 # in the latest new slot that may be available.
2633 greedy_atoms.append(arg)
2634 if not isinstance(arg, (AtomArg, PackageArg)):
2636 atom_cp = portage.dep_getkey(arg.atom)
2638 for cpv in vardb.match(arg.atom):
2639 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2641 greedy_atoms.append(
2642 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
2643 root_config=root_config))
2647 # Create the "args" package set from atoms and
2648 # packages given as arguments.
2649 args_set = self._sets["args"]
2651 if not isinstance(arg, (AtomArg, PackageArg)):
2654 if myatom in args_set:
2656 args_set.add(myatom)
2657 myfavorites.append(myatom)
2658 self._set_atoms.update(chain(*self._sets.itervalues()))
2659 atom_arg_map = self._atom_arg_map
2661 for atom in arg.set:
2662 atom_key = (atom, myroot)
2663 refs = atom_arg_map.get(atom_key)
2666 atom_arg_map[atom_key] = refs
2669 pprovideddict = pkgsettings.pprovideddict
2670 # Order needs to be preserved since a feature of --nodeps
2671 # is to allow the user to force a specific merge order.
2675 for atom in arg.set:
2676 self.spinner.update()
2677 atom_cp = portage.dep_getkey(atom)
2679 pprovided = pprovideddict.get(portage.dep_getkey(atom))
2680 if pprovided and portage.match_from_list(atom, pprovided):
2681 # A provided package has been specified on the command line.
2682 self._pprovided_args.append((arg, atom))
2684 if isinstance(arg, PackageArg):
2685 if not self._add_pkg(arg.package, arg) or \
2686 not self._create_graph():
2687 sys.stderr.write(("\n\n!!! Problem resolving " + \
2688 "dependencies for %s\n") % arg.arg)
2689 return 0, myfavorites
2691 pkg, existing_node = self._select_package(
2692 myroot, atom, onlydeps=onlydeps)
2694 if not (isinstance(arg, SetArg) and \
2695 arg.name in ("system", "world")):
2696 self._unsatisfied_deps_for_display.append(
2697 ((myroot, atom), {}))
2698 return 0, myfavorites
2699 self._missing_args.append((arg, atom))
2701 if atom_cp != pkg.cp:
2702 # For old-style virtuals, we need to repeat the
2703 # package.provided check against the selected package.
2704 expanded_atom = atom.replace(atom_cp, pkg.cp)
2705 pprovided = pprovideddict.get(pkg.cp)
2707 portage.match_from_list(expanded_atom, pprovided):
2708 # A provided package has been
2709 # specified on the command line.
2710 self._pprovided_args.append((arg, atom))
2712 if pkg.installed and "selective" not in self.myparams:
2713 self._unsatisfied_deps_for_display.append(
2714 ((myroot, atom), {}))
2715 # Previous behavior was to bail out in this case, but
2716 # since the dep is satisfied by the installed package,
2717 # it's more friendly to continue building the graph
2718 # and just show a warning message. Therefore, only bail
2719 # out here if the atom is not from either the system or
2721 if not (isinstance(arg, SetArg) and \
2722 arg.name in ("system", "world")):
2723 return 0, myfavorites
2725 dep = Dependency(atom=atom, onlydeps=onlydeps,
2726 root=myroot, parent=arg)
2728 # Add the selected package to the graph as soon as possible
2729 # so that later dep_check() calls can use it as feedback
2730 # for making more consistent atom selections.
2731 if not self._add_pkg(pkg, dep.parent,
2732 priority=dep.priority, depth=dep.depth):
2733 if isinstance(arg, SetArg):
2734 sys.stderr.write(("\n\n!!! Problem resolving " + \
2735 "dependencies for %s from %s\n") % \
2738 sys.stderr.write(("\n\n!!! Problem resolving " + \
2739 "dependencies for %s\n") % atom)
2740 return 0, myfavorites
2742 except portage.exception.MissingSignature, e:
2743 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
2744 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
2745 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
2746 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
2747 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
2748 return 0, myfavorites
2749 except portage.exception.InvalidSignature, e:
2750 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
2751 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
2752 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
2753 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
2754 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
2755 return 0, myfavorites
2756 except SystemExit, e:
2757 raise # Needed else can't exit
2758 except Exception, e:
2759 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
2760 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
2763 # Now that the root packages have been added to the graph,
2764 # process the dependencies.
2765 if not self._create_graph():
2766 return 0, myfavorites
2769 if "--usepkgonly" in self.myopts:
2770 for xs in self.digraph.all_nodes():
2771 if not isinstance(xs, Package):
2773 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
2777 print "Missing binary for:",xs[2]
2781 except self._unknown_internal_error:
2782 return False, myfavorites
2784 # We're true here unless we are missing binaries.
2785 return (not missing,myfavorites)
2787 def _select_atoms_from_graph(self, *pargs, **kwargs):
2789 Prefer atoms matching packages that have already been
2790 added to the graph or those that are installed and have
2791 not been scheduled for replacement.
2793 kwargs["trees"] = self._graph_trees
2794 return self._select_atoms_highest_available(*pargs, **kwargs)
2796 def _select_atoms_highest_available(self, root, depstring,
2797 myuse=None, parent=None, strict=True, trees=None):
2798 """This will raise InvalidDependString if necessary. If trees is
2799 None then self._filtered_trees is used."""
2800 pkgsettings = self.pkgsettings[root]
2802 trees = self._filtered_trees
2805 if parent is not None:
2806 trees[root]["parent"] = parent
2808 portage.dep._dep_check_strict = False
2809 mycheck = portage.dep_check(depstring, None,
2810 pkgsettings, myuse=myuse,
2811 myroot=root, trees=trees)
2813 if parent is not None:
2814 trees[root].pop("parent")
2815 portage.dep._dep_check_strict = True
2817 raise portage.exception.InvalidDependString(mycheck[1])
2818 selected_atoms = mycheck[1]
2819 return selected_atoms
2821 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
2822 xinfo = '"%s"' % atom
2825 # Discard null/ from failed cpv_expand category expansion.
2826 xinfo = xinfo.replace("null/", "")
2828 xfrom = '(dependency required by '+ \
2829 green('"%s"' % myparent[2]) + \
2830 red(' [%s]' % myparent[0]) + ')'
2831 masked_packages = []
2832 missing_licenses = []
2833 have_eapi_mask = False
2834 pkgsettings = self.pkgsettings[root]
2835 root_config = self.roots[root]
2836 portdb = self.roots[root].trees["porttree"].dbapi
2837 dbs = self._filtered_trees[root]["dbs"]
2838 for db, pkg_type, built, installed, db_keys in dbs:
2842 if hasattr(db, "xmatch"):
2843 cpv_list = db.xmatch("match-all", atom)
2845 cpv_list = db.match(atom)
2848 for cpv in cpv_list:
2849 metadata, mreasons = get_mask_info(root_config, cpv,
2850 pkgsettings, db, pkg_type, built, installed, db_keys)
2851 masked_packages.append(
2852 (root_config, pkgsettings, cpv, metadata, mreasons))
2855 print "\n!!! "+red("All ebuilds that could satisfy ")+green(xinfo)+red(" have been masked.")
2856 print "!!! One of the following masked packages is required to complete your request:"
2857 have_eapi_mask = show_masked_packages(masked_packages)
2860 msg = ("The current version of portage supports " + \
2861 "EAPI '%s'. You must upgrade to a newer version" + \
2862 " of portage before EAPI masked packages can" + \
2863 " be installed.") % portage.const.EAPI
2864 from textwrap import wrap
2865 for line in wrap(msg, 75):
2870 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
2875 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2876 cache_key = (root, atom, onlydeps)
2877 ret = self._highest_pkg_cache.get(cache_key)
2880 if pkg and not existing:
2881 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
2882 if existing and existing == pkg:
2883 # Update the cache to reflect that the
2884 # package has been added to the graph.
2886 self._highest_pkg_cache[cache_key] = ret
2888 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2889 self._highest_pkg_cache[cache_key] = ret
2892 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2893 pkgsettings = self.pkgsettings[root]
2894 dbs = self._filtered_trees[root]["dbs"]
2895 vardb = self.roots[root].trees["vartree"].dbapi
2896 portdb = self.roots[root].trees["porttree"].dbapi
2897 # List of acceptable packages, ordered by type preference.
2898 matched_packages = []
2899 highest_version = None
2900 atom_cp = portage.dep_getkey(atom)
2901 existing_node = None
2903 usepkgonly = "--usepkgonly" in self.myopts
2904 empty = "empty" in self.myparams
2905 selective = "selective" in self.myparams
2907 noreplace = "--noreplace" in self.myopts
2908 # Behavior of the "selective" parameter depends on
2909 # whether or not a package matches an argument atom.
2910 # If an installed package provides an old-style
2911 # virtual that is no longer provided by an available
2912 # package, the installed package may match an argument
2913 # atom even though none of the available packages do.
2914 # Therefore, "selective" logic does not consider
2915 # whether or not an installed package matches an
2916 # argument atom. It only considers whether or not
2917 # available packages match argument atoms, which is
2918 # represented by the found_available_arg flag.
2919 found_available_arg = False
2920 for find_existing_node in True, False:
2923 for db, pkg_type, built, installed, db_keys in dbs:
2926 if installed and not find_existing_node:
2927 want_reinstall = reinstall or empty or \
2928 (found_available_arg and not selective)
2929 if want_reinstall and matched_packages:
2931 if hasattr(db, "xmatch"):
2932 cpv_list = db.xmatch("match-all", atom)
2934 cpv_list = db.match(atom)
2936 # USE=multislot can make an installed package appear as if
2937 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
2938 # won't do any good as long as USE=multislot is enabled since
2939 # the newly built package still won't have the expected slot.
2940 # Therefore, assume that such SLOT dependencies are already
2941 # satisfied rather than forcing a rebuild.
2942 if installed and not cpv_list and matched_packages \
2943 and portage.dep.dep_getslot(atom):
2944 for pkg in matched_packages:
2945 if vardb.cpv_exists(pkg.cpv):
2946 cpv_list = [pkg.cpv]
2951 pkg_status = "merge"
2952 if installed or onlydeps:
2953 pkg_status = "nomerge"
2956 for cpv in cpv_list:
2957 # Make --noreplace take precedence over --newuse.
2958 if not installed and noreplace and \
2959 cpv in vardb.match(atom):
2960 # If the installed version is masked, it may
2961 # be necessary to look at lower versions,
2962 # in case there is a visible downgrade.
2964 reinstall_for_flags = None
2965 cache_key = (pkg_type, root, cpv, pkg_status)
2966 calculated_use = True
2967 pkg = self._pkg_cache.get(cache_key)
2969 calculated_use = False
2971 metadata = dict(izip(self._mydbapi_keys,
2972 db.aux_get(cpv, self._mydbapi_keys)))
2975 if not built and ("?" in metadata["LICENSE"] or \
2976 "?" in metadata["PROVIDE"]):
2977 # This is avoided whenever possible because
2978 # it's expensive. It only needs to be done here
2979 # if it has an effect on visibility.
2980 pkgsettings.setcpv(cpv, mydb=metadata)
2981 metadata["USE"] = pkgsettings["PORTAGE_USE"]
2982 calculated_use = True
2983 pkg = Package(built=built, cpv=cpv,
2984 installed=installed, metadata=metadata,
2985 onlydeps=onlydeps, root=root, type_name=pkg_type)
2986 self._pkg_cache[pkg] = pkg
2988 if root == self.target_root:
2990 myarg = self._iter_atoms_for_pkg(pkg).next()
2991 except StopIteration:
2993 except portage.exception.InvalidDependString:
2995 # masked by corruption
2997 if not installed and myarg:
2998 found_available_arg = True
2999 if not installed or (installed and matched_packages):
3000 # Only enforce visibility on installed packages
3001 # if there is at least one other visible package
3002 # available. By filtering installed masked packages
3003 # here, packages that have been masked since they
3004 # were installed can be automatically downgraded
3005 # to an unmasked version.
3007 if not visible(pkgsettings, pkg):
3009 except portage.exception.InvalidDependString:
3013 # Enable upgrade or downgrade to a version
3014 # with visible KEYWORDS when the installed
3015 # version is masked by KEYWORDS, but never
3016 # reinstall the same exact version only due
3017 # to a KEYWORDS mask.
3018 if installed and matched_packages and \
3019 pkgsettings.getMissingKeywords(
3020 pkg.cpv, pkg.metadata):
3021 different_version = None
3022 for avail_pkg in matched_packages:
3023 if not portage.dep.cpvequal(
3024 pkg.cpv, avail_pkg.cpv):
3025 different_version = avail_pkg
3027 if different_version is not None:
3028 # Only reinstall for KEYWORDS if
3029 # it's not the same version.
3032 if not built and not calculated_use:
3033 # This is avoided whenever possible because
3035 pkgsettings.setcpv(cpv, mydb=pkg.metadata)
3036 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
3037 if pkg.cp == atom_cp:
3038 if highest_version is None:
3039 highest_version = pkg
3040 elif pkg > highest_version:
3041 highest_version = pkg
3042 # At this point, we've found the highest visible
3043 # match from the current repo. Any lower versions
3044 # from this repo are ignored, so this so the loop
3045 # will always end with a break statement below
3047 if find_existing_node:
3048 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
3051 cpv_slot = "%s:%s" % \
3052 (e_pkg.cpv, e_pkg.metadata["SLOT"])
3053 if portage.dep.match_from_list(atom, [cpv_slot]):
3054 if highest_version and \
3055 e_pkg.cp == atom_cp and \
3056 e_pkg < highest_version and \
3057 e_pkg.slot_atom != highest_version.slot_atom:
3058 # There is a higher version available in a
3059 # different slot, so this existing node is
3063 matched_packages.append(e_pkg)
3064 existing_node = e_pkg
3066 # Compare built package to current config and
3067 # reject the built package if necessary.
3068 if built and not installed and \
3069 ("--newuse" in self.myopts or \
3070 "--reinstall" in self.myopts):
3071 iuses = set(filter_iuse_defaults(
3072 pkg.metadata["IUSE"].split()))
3073 old_use = pkg.metadata["USE"].split()
3075 if myeb and not usepkgonly:
3078 pkgsettings.setcpv(myeb, mydb=mydb)
3080 pkgsettings.setcpv(cpv, mydb=mydb)
3081 now_use = pkgsettings["PORTAGE_USE"].split()
3082 forced_flags = set()
3083 forced_flags.update(pkgsettings.useforce)
3084 forced_flags.update(pkgsettings.usemask)
3086 if myeb and not usepkgonly:
3087 cur_iuse = set(filter_iuse_defaults(
3088 portdb.aux_get(myeb,
3089 ["IUSE"])[0].split()))
3090 if self._reinstall_for_flags(forced_flags,
3094 # Compare current config to installed package
3095 # and do not reinstall if possible.
3096 if not installed and \
3097 ("--newuse" in self.myopts or \
3098 "--reinstall" in self.myopts) and \
3099 cpv in vardb.match(atom):
3100 pkgsettings.setcpv(cpv, mydb=pkg.metadata)
3101 forced_flags = set()
3102 forced_flags.update(pkgsettings.useforce)
3103 forced_flags.update(pkgsettings.usemask)
3104 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
3105 old_iuse = set(filter_iuse_defaults(
3106 vardb.aux_get(cpv, ["IUSE"])[0].split()))
3107 cur_use = pkgsettings["PORTAGE_USE"].split()
3108 cur_iuse = set(filter_iuse_defaults(
3109 pkg.metadata["IUSE"].split()))
3110 reinstall_for_flags = \
3111 self._reinstall_for_flags(
3112 forced_flags, old_use, old_iuse,
3114 if reinstall_for_flags:
3118 matched_packages.append(pkg)
3119 if reinstall_for_flags:
3120 self._reinstall_nodes[pkg] = \
3124 if not matched_packages:
3127 if "--debug" in self.myopts:
3128 for pkg in matched_packages:
3129 print (pkg.type_name + ":").rjust(10), pkg.cpv
3131 # Filter out any old-style virtual matches if they are
3132 # mixed with new-style virtual matches.
3133 cp = portage.dep_getkey(atom)
3134 if len(matched_packages) > 1 and \
3135 "virtual" == portage.catsplit(cp)[0]:
3136 for pkg in matched_packages:
3139 # Got a new-style virtual, so filter
3140 # out any old-style virtuals.
3141 matched_packages = [pkg for pkg in matched_packages \
3145 if len(matched_packages) > 1:
3146 bestmatch = portage.best(
3147 [pkg.cpv for pkg in matched_packages])
3148 matched_packages = [pkg for pkg in matched_packages \
3149 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3151 # ordered by type preference ("ebuild" type is the last resort)
3152 return matched_packages[-1], existing_node
3154 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3156 Select packages that have already been added to the graph or
3157 those that are installed and have not been scheduled for
3160 graph_db = self._graph_trees[root]["porttree"].dbapi
3161 matches = graph_db.match(atom)
3164 cpv = matches[-1] # highest match
3165 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
3166 graph_db.aux_get(cpv, ["SLOT"])[0])
3167 e_pkg = self._slot_pkg_map[root].get(slot_atom)
3170 # Since this cpv exists in the graph_db,
3171 # we must have a cached Package instance.
3172 cache_key = ("installed", root, cpv, "nomerge")
3173 return (self._pkg_cache[cache_key], None)
3175 def _complete_graph(self):
3177 Add any deep dependencies of required sets (args, system, world) that
3178 have not been pulled into the graph yet. This ensures that the graph
3179 is consistent such that initially satisfied deep dependencies are not
3180 broken in the new graph. Initially unsatisfied dependencies are
3181 irrelevant since we only want to avoid breaking dependencies that are
3184 Since this method can consume enough time to disturb users, it is
3185 currently only enabled by the --complete-graph option.
3187 if "complete" not in self.myparams:
3188 # Skip this to avoid consuming enough time to disturb users.
3191 if "--buildpkgonly" in self.myopts or \
3192 "recurse" not in self.myparams:
3195 # Put the depgraph into a mode that causes it to only
3196 # select packages that have already been added to the
3197 # graph or those that are installed and have not been
3198 # scheduled for replacement. Also, toggle the "deep"
3199 # parameter so that all dependencies are traversed and
3201 self._select_atoms = self._select_atoms_from_graph
3202 self._select_package = self._select_pkg_from_graph
3203 already_deep = "deep" in self.myparams
3204 if not already_deep:
3205 self.myparams.add("deep")
3207 for root in self.roots:
3208 required_set_names = self._required_set_names.copy()
3209 if root == self.target_root and \
3210 (already_deep or "empty" in self.myparams):
3211 required_set_names.difference_update(self._sets)
3212 if not required_set_names and not self._ignored_deps:
3214 root_config = self.roots[root]
3215 setconfig = root_config.setconfig
3217 # Reuse existing SetArg instances when available.
3218 for arg in self.digraph.root_nodes():
3219 if not isinstance(arg, SetArg):
3221 if arg.root_config != root_config:
3223 if arg.name in required_set_names:
3225 required_set_names.remove(arg.name)
3226 # Create new SetArg instances only when necessary.
3227 for s in required_set_names:
3228 expanded_set = InternalPackageSet(
3229 initial_atoms=setconfig.getSetAtoms(s))
3230 atom = SETPREFIX + s
3231 args.append(SetArg(arg=atom, set=expanded_set,
3232 root_config=root_config))
3233 vardb = root_config.trees["vartree"].dbapi
3235 for atom in arg.set:
3236 self._dep_stack.append(
3237 Dependency(atom=atom, root=root, parent=arg))
3238 if self._ignored_deps:
3239 self._dep_stack.extend(self._ignored_deps)
3240 self._ignored_deps = []
3241 if not self._create_graph(allow_unsatisfied=True):
3243 # Check the unsatisfied deps to see if any initially satisfied deps
3244 # will become unsatisfied due to an upgrade. Initially unsatisfied
3245 # deps are irrelevant since we only want to avoid breaking deps
3246 # that are initially satisfied.
3247 while self._unsatisfied_deps:
3248 dep = self._unsatisfied_deps.pop()
3249 matches = vardb.match_pkgs(dep.atom)
3251 # Initially unsatisfied.
3253 # An scheduled installation broke a deep dependency.
3254 # Add the installed package to the graph so that it
3255 # will be appropriately reported as a slot collision
3256 # (possibly solvable via backtracking).
3257 pkg = matches[-1] # highest match
3258 if not self._add_pkg(pkg, dep.parent,
3259 priority=dep.priority, depth=dep.depth):
3261 if not self._create_graph(allow_unsatisfied=True):
3265 def validate_blockers(self):
3266 """Remove any blockers from the digraph that do not match any of the
3267 packages within the graph. If necessary, create hard deps to ensure
3268 correct merge order such that mutually blocking packages are never
3269 installed simultaneously."""
3271 if "--buildpkgonly" in self.myopts or \
3272 "--nodeps" in self.myopts:
3275 #if "deep" in self.myparams:
3277 # Pull in blockers from all installed packages that haven't already
3278 # been pulled into the depgraph. This is not enabled by default
3279 # due to the performance penalty that is incurred by all the
3280 # additional dep_check calls that are required.
3282 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
3283 for myroot in self.trees:
3284 vardb = self.trees[myroot]["vartree"].dbapi
3285 portdb = self.trees[myroot]["porttree"].dbapi
3286 pkgsettings = self.pkgsettings[myroot]
3287 final_db = self.mydbapi[myroot]
3288 blocker_cache = BlockerCache(myroot, vardb)
3289 stale_cache = set(blocker_cache)
3292 stale_cache.discard(cpv)
3293 blocker_atoms = None
3295 if self.digraph.contains(pkg):
3299 self._blocker_parents.child_nodes(pkg))
3304 self._irrelevant_blockers.child_nodes(pkg))
3307 if blockers is not None:
3308 blockers = set("!" + blocker.atom \
3309 for blocker in blockers)
3311 # If this node has any blockers, create a "nomerge"
3312 # node for it so that they can be enforced.
3313 self.spinner.update()
3314 blocker_data = blocker_cache.get(cpv)
3315 if blocker_data is not None and \
3316 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3319 # If blocker data from the graph is available, use
3320 # it to validate the cache and update the cache if
3322 if blocker_data is not None and \
3323 blockers is not None:
3324 if not blockers.symmetric_difference(
3325 blocker_data.atoms):
3329 if blocker_data is None and \
3330 blockers is not None:
3331 # Re-use the blockers from the graph.
3332 blocker_atoms = sorted(blockers)
3333 counter = long(pkg.metadata["COUNTER"])
3335 blocker_cache.BlockerData(counter, blocker_atoms)
3336 blocker_cache[pkg.cpv] = blocker_data
3340 blocker_atoms = blocker_data.atoms
3342 myuse = pkg.metadata["USE"].split()
3343 # Use aux_get() to trigger FakeVartree global
3344 # updates on *DEPEND when appropriate.
3345 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3346 # It is crucial to pass in final_db here in order to
3347 # optimize dep_check calls by eliminating atoms via
3348 # dep_wordreduce and dep_eval calls.
3350 portage.dep._dep_check_strict = False
3352 success, atoms = portage.dep_check(depstr,
3353 final_db, pkgsettings, myuse=myuse,
3354 trees=self._graph_trees, myroot=myroot)
3355 except Exception, e:
3356 if isinstance(e, SystemExit):
3358 # This is helpful, for example, if a ValueError
3359 # is thrown from cpv_expand due to multiple
3360 # matches (this can happen if an atom lacks a
3362 show_invalid_depstring_notice(
3363 pkg, depstr, str(e))
3367 portage.dep._dep_check_strict = True
3369 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3370 if replacement_pkg and \
3371 replacement_pkg[0].operation == "merge":
3372 # This package is being replaced anyway, so
3373 # ignore invalid dependencies so as not to
3374 # annoy the user too much (otherwise they'd be
3375 # forced to manually unmerge it first).
3377 show_invalid_depstring_notice(pkg, depstr, atoms)
3379 blocker_atoms = [myatom for myatom in atoms \
3380 if myatom.startswith("!")]
3381 blocker_atoms.sort()
3382 counter = long(pkg.metadata["COUNTER"])
3383 blocker_cache[cpv] = \
3384 blocker_cache.BlockerData(counter, blocker_atoms)
3386 for myatom in blocker_atoms:
3387 blocker = Blocker(atom=myatom[1:], root=myroot)
3388 self._blocker_parents.add(blocker, pkg)
3389 for cpv in stale_cache:
3390 del blocker_cache[cpv]
3391 blocker_cache.flush()
3394 # Discard any "uninstall" tasks scheduled by previous calls
3395 # to this method, since those tasks may not make sense given
3396 # the current graph state.
3397 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
3398 if previous_uninstall_tasks:
3399 self._blocker_uninstalls = digraph()
3400 self.digraph.difference_update(previous_uninstall_tasks)
3402 for blocker in self._blocker_parents.leaf_nodes():
3403 self.spinner.update()
3404 root_config = self.roots[blocker.root]
3405 virtuals = root_config.settings.getvirtuals()
3406 mytype, myroot, mydep = blocker
3407 initial_db = self.trees[myroot]["vartree"].dbapi
3408 final_db = self.mydbapi[myroot]
3410 provider_virtual = False
3411 if blocker.cp in virtuals and \
3412 not self._have_new_virt(blocker.root, blocker.cp):
3413 provider_virtual = True
3415 if provider_virtual:
3417 for provider_entry in virtuals[blocker.cp]:
3419 portage.dep_getkey(provider_entry)
3420 atoms.append(blocker.atom.replace(
3421 blocker.cp, provider_cp))
3423 atoms = [blocker.atom]
3425 blocked_initial = []
3427 blocked_initial.extend(initial_db.match_pkgs(atom))
3431 blocked_final.extend(final_db.match_pkgs(atom))
3433 if not blocked_initial and not blocked_final:
3434 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
3435 self._blocker_parents.remove(blocker)
3436 # Discard any parents that don't have any more blockers.
3437 for pkg in parent_pkgs:
3438 self._irrelevant_blockers.add(blocker, pkg)
3439 if not self._blocker_parents.child_nodes(pkg):
3440 self._blocker_parents.remove(pkg)
3442 for parent in self._blocker_parents.parent_nodes(blocker):
3443 unresolved_blocks = False
3444 depends_on_order = set()
3445 for pkg in blocked_initial:
3446 if pkg.slot_atom == parent.slot_atom:
3447 # TODO: Support blocks within slots in cases where it
3448 # might make sense. For example, a new version might
3449 # require that the old version be uninstalled at build
3452 if parent.installed:
3453 # Two currently installed packages conflict with
3454 # eachother. Ignore this case since the damage
3455 # is already done and this would be likely to
3456 # confuse users if displayed like a normal blocker.
3458 if parent.operation == "merge":
3459 # Maybe the blocked package can be replaced or simply
3460 # unmerged to resolve this block.
3461 depends_on_order.add((pkg, parent))
3463 # None of the above blocker resolutions techniques apply,
3464 # so apparently this one is unresolvable.
3465 unresolved_blocks = True
3466 for pkg in blocked_final:
3467 if pkg.slot_atom == parent.slot_atom:
3468 # TODO: Support blocks within slots.
3470 if parent.operation == "nomerge" and \
3471 pkg.operation == "nomerge":
3472 # This blocker will be handled the next time that a
3473 # merge of either package is triggered.
3476 # Maybe the blocking package can be
3477 # unmerged to resolve this block.
3478 if parent.operation == "merge" and pkg.installed:
3479 depends_on_order.add((pkg, parent))
3481 elif parent.operation == "nomerge":
3482 depends_on_order.add((parent, pkg))
3484 # None of the above blocker resolutions techniques apply,
3485 # so apparently this one is unresolvable.
3486 unresolved_blocks = True
3488 # Make sure we don't unmerge any package that have been pulled
3490 if not unresolved_blocks and depends_on_order:
3491 for inst_pkg, inst_task in depends_on_order:
3492 if self.digraph.contains(inst_pkg) and \
3493 self.digraph.parent_nodes(inst_pkg):
3494 unresolved_blocks = True
3497 if not unresolved_blocks and depends_on_order:
3498 for inst_pkg, inst_task in depends_on_order:
3499 uninst_task = Package(built=inst_pkg.built,
3500 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
3501 metadata=inst_pkg.metadata,
3502 operation="uninstall", root=inst_pkg.root,
3503 type_name=inst_pkg.type_name)
3504 self._pkg_cache[uninst_task] = uninst_task
3505 # Enforce correct merge order with a hard dep.
3506 self.digraph.addnode(uninst_task, inst_task,
3507 priority=BlockerDepPriority.instance)
3508 # Count references to this blocker so that it can be
3509 # invalidated after nodes referencing it have been
3511 self._blocker_uninstalls.addnode(uninst_task, blocker)
3512 if not unresolved_blocks and not depends_on_order:
3513 self._irrelevant_blockers.add(blocker, parent)
3514 self._blocker_parents.remove_edge(blocker, parent)
3515 if not self._blocker_parents.parent_nodes(blocker):
3516 self._blocker_parents.remove(blocker)
3517 if not self._blocker_parents.child_nodes(parent):
3518 self._blocker_parents.remove(parent)
3519 if unresolved_blocks:
3520 self._unsolvable_blockers.add(blocker, parent)
3524 def _accept_blocker_conflicts(self):
3526 for x in ("--buildpkgonly", "--fetchonly",
3527 "--fetch-all-uri", "--nodeps", "--pretend"):
3528 if x in self.myopts:
3533 def _merge_order_bias(self, mygraph):
3534 """Order nodes from highest to lowest overall reference count for
3535 optimal leaf node selection."""
3537 for node in mygraph.order:
3538 node_info[node] = len(mygraph.parent_nodes(node))
3539 def cmp_merge_preference(node1, node2):
3540 return node_info[node2] - node_info[node1]
3541 mygraph.order.sort(cmp_merge_preference)
3543 def altlist(self, reversed=False):
3545 while self._serialized_tasks_cache is None:
3546 self._resolve_conflicts()
3548 self._serialized_tasks_cache = self._serialize_tasks()
3549 except self._serialize_tasks_retry:
3552 retlist = self._serialized_tasks_cache[:]
3557 def _resolve_conflicts(self):
3558 if not self._complete_graph():
3559 raise self._unknown_internal_error()
3561 if not self.validate_blockers():
3562 raise self._unknown_internal_error()
3564 def _serialize_tasks(self):
3565 mygraph=self.digraph.copy()
3566 # Prune "nomerge" root nodes if nothing depends on them, since
3567 # otherwise they slow down merge order calculation. Don't remove
3568 # non-root nodes since they help optimize merge order in some cases
3569 # such as revdep-rebuild.
3570 removed_nodes = set()
3572 for node in mygraph.root_nodes():
3573 if not isinstance(node, Package) or \
3574 node.installed or node.onlydeps:
3575 removed_nodes.add(node)
3577 self.spinner.update()
3578 mygraph.difference_update(removed_nodes)
3579 if not removed_nodes:
3581 removed_nodes.clear()
3582 self._merge_order_bias(mygraph)
3583 def cmp_circular_bias(n1, n2):
3585 RDEPEND is stronger than PDEPEND and this function
3586 measures such a strength bias within a circular
3587 dependency relationship.
3589 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3590 ignore_priority=DepPriority.MEDIUM_SOFT)
3591 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3592 ignore_priority=DepPriority.MEDIUM_SOFT)
3593 if n1_n2_medium == n2_n1_medium:
3598 myblocker_uninstalls = self._blocker_uninstalls.copy()
3600 # Contains uninstall tasks that have been scheduled to
3601 # occur after overlapping blockers have been installed.
3602 scheduled_uninstalls = set()
3603 # Contains any Uninstall tasks that have been ignored
3604 # in order to avoid the circular deps code path. These
3605 # correspond to blocker conflicts that could not be
3607 ignored_uninstall_tasks = set()
3608 have_uninstall_task = False
3609 complete = "complete" in self.myparams
3610 myblocker_parents = self._blocker_parents.copy()
3613 def get_nodes(**kwargs):
3615 Returns leaf nodes excluding Uninstall instances
3616 since those should be executed as late as possible.
3618 return [node for node in mygraph.leaf_nodes(**kwargs) \
3619 if isinstance(node, Package) and \
3620 (node.operation != "uninstall" or \
3621 node in scheduled_uninstalls)]
3623 # sys-apps/portage needs special treatment if ROOT="/"
3625 from portage.const import PORTAGE_PACKAGE_ATOM
3626 runtime_deps = InternalPackageSet(
3627 initial_atoms=[PORTAGE_PACKAGE_ATOM])
3628 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
3629 PORTAGE_PACKAGE_ATOM)
3630 replacement_portage = self.mydbapi[running_root].match_pkgs(
3631 PORTAGE_PACKAGE_ATOM)
3634 running_portage = running_portage[0]
3636 running_portage = None
3638 if replacement_portage:
3639 replacement_portage = replacement_portage[0]
3641 replacement_portage = None
3643 if replacement_portage == running_portage:
3644 replacement_portage = None
3646 if running_portage is not None:
3648 portage_rdepend = self._select_atoms_highest_available(
3649 running_root, running_portage.metadata["RDEPEND"],
3650 myuse=running_portage.metadata["USE"].split(),
3651 parent=running_portage, strict=False)
3652 except portage.exception.InvalidDependString, e:
3653 portage.writemsg("!!! Invalid RDEPEND in " + \
3654 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3655 (running_root, running_portage.cpv, e), noiselevel=-1)
3657 portage_rdepend = []
3658 runtime_deps.update(atom for atom in portage_rdepend \
3659 if not atom.startswith("!"))
3661 ignore_priority_soft_range = [None]
3662 ignore_priority_soft_range.extend(
3663 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
3664 tree_mode = "--tree" in self.myopts
3665 # Tracks whether or not the current iteration should prefer asap_nodes
3666 # if available. This is set to False when the previous iteration
3667 # failed to select any nodes. It is reset whenever nodes are
3668 # successfully selected.
3671 # By default, try to avoid selecting root nodes whenever possible. This
3672 # helps ensure that the maximimum possible number of soft dependencies
3673 # have been removed from the graph before their parent nodes have
3674 # selected. This is especially important when those dependencies are
3675 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
3676 # CHOST has been changed (like when building a stage3 from a stage2).
3677 accept_root_node = False
3679 # State of prefer_asap and accept_root_node flags for successive
3680 # iterations that loosen the criteria for node selection.
3682 # iteration prefer_asap accept_root_node
3687 # If no nodes are selected on the 3rd iteration, it is due to
3688 # unresolved blockers or circular dependencies.
3690 while not mygraph.empty():
3691 self.spinner.update()
3692 selected_nodes = None
3693 ignore_priority = None
3694 if prefer_asap and asap_nodes:
3695 """ASAP nodes are merged before their soft deps."""
3696 asap_nodes = [node for node in asap_nodes \
3697 if mygraph.contains(node)]
3698 for node in asap_nodes:
3699 if not mygraph.child_nodes(node,
3700 ignore_priority=DepPriority.SOFT):
3701 selected_nodes = [node]
3702 asap_nodes.remove(node)
3704 if not selected_nodes and \
3705 not (prefer_asap and asap_nodes):
3706 for ignore_priority in ignore_priority_soft_range:
3707 nodes = get_nodes(ignore_priority=ignore_priority)
3711 if ignore_priority is None and not tree_mode:
3712 # Greedily pop all of these nodes since no relationship
3713 # has been ignored. This optimization destroys --tree
3714 # output, so it's disabled in reversed mode.
3715 selected_nodes = nodes
3717 # For optimal merge order:
3718 # * Only pop one node.
3719 # * Removing a root node (node without a parent)
3720 # will not produce a leaf node, so avoid it.
3722 if mygraph.parent_nodes(node):
3723 # found a non-root node
3724 selected_nodes = [node]
3726 if not selected_nodes and \
3727 (accept_root_node or ignore_priority is None):
3728 # settle for a root node
3729 selected_nodes = [nodes[0]]
3731 if not selected_nodes:
3732 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
3734 """Recursively gather a group of nodes that RDEPEND on
3735 eachother. This ensures that they are merged as a group
3736 and get their RDEPENDs satisfied as soon as possible."""
3737 def gather_deps(ignore_priority,
3738 mergeable_nodes, selected_nodes, node):
3739 if node in selected_nodes:
3741 if node not in mergeable_nodes:
3743 if node == replacement_portage and \
3744 mygraph.child_nodes(node,
3745 ignore_priority=DepPriority.MEDIUM_SOFT):
3746 # Make sure that portage always has all of it's
3747 # RDEPENDs installed first.
3749 selected_nodes.add(node)
3750 for child in mygraph.child_nodes(node,
3751 ignore_priority=ignore_priority):
3752 if not gather_deps(ignore_priority,
3753 mergeable_nodes, selected_nodes, child):
3756 mergeable_nodes = set(nodes)
3757 if prefer_asap and asap_nodes:
3759 for ignore_priority in xrange(DepPriority.SOFT,
3760 DepPriority.MEDIUM_SOFT + 1):
3762 if nodes is not asap_nodes and \
3763 not accept_root_node and \
3764 not mygraph.parent_nodes(node):
3766 selected_nodes = set()
3767 if gather_deps(ignore_priority,
3768 mergeable_nodes, selected_nodes, node):
3771 selected_nodes = None
3775 # If any nodes have been selected here, it's always
3776 # possible that anything up to a MEDIUM_SOFT priority
3777 # relationship has been ignored. This state is recorded
3778 # in ignore_priority so that relevant nodes will be
3779 # added to asap_nodes when appropriate.
3781 ignore_priority = DepPriority.MEDIUM_SOFT
3783 if prefer_asap and asap_nodes and not selected_nodes:
3784 # We failed to find any asap nodes to merge, so ignore
3785 # them for the next iteration.
3789 if not selected_nodes and not accept_root_node:
3790 # Maybe there are only root nodes left, so accept them
3791 # for the next iteration.
3792 accept_root_node = True
3795 if selected_nodes and ignore_priority > DepPriority.SOFT:
3796 # Try to merge ignored medium deps as soon as possible.
3797 for node in selected_nodes:
3798 children = set(mygraph.child_nodes(node))
3799 soft = children.difference(
3800 mygraph.child_nodes(node,
3801 ignore_priority=DepPriority.SOFT))
3802 medium_soft = children.difference(
3803 mygraph.child_nodes(node,
3804 ignore_priority=DepPriority.MEDIUM_SOFT))
3805 medium_soft.difference_update(soft)
3806 for child in medium_soft:
3807 if child in selected_nodes:
3809 if child in asap_nodes:
3811 asap_nodes.append(child)
3813 if selected_nodes and len(selected_nodes) > 1:
3814 if not isinstance(selected_nodes, list):
3815 selected_nodes = list(selected_nodes)
3816 selected_nodes.sort(cmp_circular_bias)
3818 if not selected_nodes and not myblocker_uninstalls.is_empty():
3819 # An Uninstall task needs to be executed in order to
3820 # avoid conflict if possible.
3821 min_parent_deps = None
3823 for task in myblocker_uninstalls.leaf_nodes():
3824 # Do some sanity checks so that system or world packages
3825 # don't get uninstalled inappropriately here (only really
3826 # necessary when --complete-graph has not been enabled).
3828 if task in ignored_uninstall_tasks:
3831 root_config = self.roots[task.root]
3832 inst_pkg = self._pkg_cache[
3833 ("installed", task.root, task.cpv, "nomerge")]
3835 if self.digraph.contains(inst_pkg):
3838 if running_root == task.root:
3839 # Never uninstall sys-apps/portage or it's essential
3840 # dependencies, except through replacement.
3842 runtime_dep_atoms = \
3843 list(runtime_deps.iterAtomsForPackage(task))
3844 except portage.exception.InvalidDependString, e:
3845 portage.writemsg("!!! Invalid PROVIDE in " + \
3846 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3847 (task.root, task.cpv, e), noiselevel=-1)
3851 # Don't uninstall a runtime dep if it appears
3852 # to be the only suitable one installed.
3854 vardb = root_config.trees["vartree"].dbapi
3855 for atom in runtime_dep_atoms:
3856 other_version = None
3857 for pkg in vardb.match_pkgs(atom):
3858 if pkg.cpv == task.cpv and \
3859 pkg.metadata["COUNTER"] == \
3860 task.metadata["COUNTER"]:
3864 if other_version is None:
3870 # For packages in the system set, don't take
3871 # any chances. If the conflict can't be resolved
3872 # by a normal replacement operation then abort.
3875 for atom in root_config.sets[
3876 "system"].iterAtomsForPackage(task):
3879 except portage.exception.InvalidDependString, e:
3880 portage.writemsg("!!! Invalid PROVIDE in " + \
3881 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3882 (task.root, task.cpv, e), noiselevel=-1)
3888 # Note that the world check isn't always
3889 # necessary since self._complete_graph() will
3890 # add all packages from the system and world sets to the
3891 # graph. This just allows unresolved conflicts to be
3892 # detected as early as possible, which makes it possible
3893 # to avoid calling self._complete_graph() when it is
3894 # unnecessary due to blockers triggering an abortion.
3896 # For packages in the world set, go ahead an uninstall
3897 # when necessary, as long as the atom will be satisfied
3898 # in the final state.
3899 graph_db = self.mydbapi[task.root]
3902 for atom in root_config.sets[
3903 "world"].iterAtomsForPackage(task):
3905 for pkg in graph_db.match_pkgs(atom):
3913 except portage.exception.InvalidDependString, e:
3914 portage.writemsg("!!! Invalid PROVIDE in " + \
3915 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3916 (task.root, task.cpv, e), noiselevel=-1)
3922 # Check the deps of parent nodes to ensure that
3923 # the chosen task produces a leaf node. Maybe
3924 # this can be optimized some more to make the
3925 # best possible choice, but the current algorithm
3926 # is simple and should be near optimal for most
3929 for parent in mygraph.parent_nodes(task):
3930 parent_deps.update(mygraph.child_nodes(parent,
3931 ignore_priority=DepPriority.MEDIUM_SOFT))
3932 parent_deps.remove(task)
3933 if min_parent_deps is None or \
3934 len(parent_deps) < min_parent_deps:
3935 min_parent_deps = len(parent_deps)
3938 if uninst_task is not None:
3939 # The uninstall is performed only after blocking
3940 # packages have been merged on top of it. File
3941 # collisions between blocking packages are detected
3942 # and removed from the list of files to be uninstalled.
3943 scheduled_uninstalls.add(uninst_task)
3944 parent_nodes = mygraph.parent_nodes(uninst_task)
3946 # Reverse the parent -> uninstall edges since we want
3947 # to do the uninstall after blocking packages have
3948 # been merged on top of it.
3949 mygraph.remove(uninst_task)
3950 for blocked_pkg in parent_nodes:
3951 mygraph.add(blocked_pkg, uninst_task,
3952 priority=BlockerDepPriority.instance)
3954 # None of the Uninstall tasks are acceptable, so
3955 # the corresponding blockers are unresolvable.
3956 # We need to drop an Uninstall task here in order
3957 # to avoid the circular deps code path, but the
3958 # blocker will still be counted as an unresolved
3960 for node in myblocker_uninstalls.leaf_nodes():
3962 mygraph.remove(node)
3966 ignored_uninstall_tasks.add(node)
3969 # After dropping an Uninstall task, reset
3970 # the state variables for leaf node selection and
3971 # continue trying to select leaf nodes.
3973 accept_root_node = False
3976 if not selected_nodes:
3977 self._circular_deps_for_display = mygraph
3978 raise self._unknown_internal_error()
3980 # At this point, we've succeeded in selecting one or more nodes, so
3981 # it's now safe to reset the prefer_asap and accept_root_node flags
3982 # to their default states.
3984 accept_root_node = False
3986 mygraph.difference_update(selected_nodes)
3988 for node in selected_nodes:
3989 if isinstance(node, Package) and \
3990 node.operation == "nomerge":
3993 # Handle interactions between blockers
3994 # and uninstallation tasks.
3995 solved_blockers = set()
3997 if isinstance(node, Package) and \
3998 "uninstall" == node.operation:
3999 have_uninstall_task = True
4001 scheduled_uninstalls.remove(uninst_task)
4003 vardb = self.trees[node.root]["vartree"].dbapi
4004 previous_cpv = vardb.match(node.slot_atom)
4006 # The package will be replaced by this one, so remove
4007 # the corresponding Uninstall task if necessary.
4008 previous_cpv = previous_cpv[0]
4010 ("installed", node.root, previous_cpv, "uninstall")
4012 mygraph.remove(uninst_task)
4015 scheduled_uninstalls.discard(uninst_task)
4017 if uninst_task is not None and \
4018 uninst_task not in ignored_uninstall_tasks and \
4019 myblocker_uninstalls.contains(uninst_task):
4020 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4021 myblocker_uninstalls.remove(uninst_task)
4022 # Discard any blockers that this Uninstall solves.
4023 for blocker in blocker_nodes:
4024 if not myblocker_uninstalls.child_nodes(blocker):
4025 myblocker_uninstalls.remove(blocker)
4026 solved_blockers.add(blocker)
4028 retlist.append(node)
4030 if isinstance(node, Package) and \
4031 "uninstall" == node.operation:
4032 # Include satisfied blockers in the merge list so
4033 # that the user can see why the package had to be
4034 # uninstalled in advance rather than through
4036 for blocker in solved_blockers:
4037 retlist.append(Blocker(atom=blocker.atom,
4038 root=blocker.root, satisfied=True))
4040 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
4041 for node in myblocker_uninstalls.root_nodes():
4042 unsolvable_blockers.add(node)
4044 for blocker in unsolvable_blockers:
4045 retlist.append(blocker)
4047 # If any Uninstall tasks need to be executed in order
4048 # to avoid a conflict, complete the graph with any
4049 # dependencies that may have been initially
4050 # neglected (to ensure that unsafe Uninstall tasks
4051 # are properly identified and blocked from execution).
4052 if have_uninstall_task and \
4054 not unsolvable_blockers:
4055 self.myparams.add("complete")
4056 raise self._serialize_tasks_retry("")
4058 if unsolvable_blockers and \
4059 not self._accept_blocker_conflicts():
4060 self._unsatisfied_blockers_for_display = unsolvable_blockers
4061 self._serialized_tasks_cache = retlist[:]
4062 raise self._unknown_internal_error()
4064 if self._slot_collision_info and \
4065 not self._accept_blocker_conflicts():
4066 self._serialized_tasks_cache = retlist[:]
4067 raise self._unknown_internal_error()
4071 def _show_circular_deps(self, mygraph):
4072 # No leaf nodes are available, so we have a circular
4073 # dependency panic situation. Reduce the noise level to a
4074 # minimum via repeated elimination of root nodes since they
4075 # have no parents and thus can not be part of a cycle.
4077 root_nodes = mygraph.root_nodes(
4078 ignore_priority=DepPriority.MEDIUM_SOFT)
4081 mygraph.difference_update(root_nodes)
4082 # Display the USE flags that are enabled on nodes that are part
4083 # of dependency cycles in case that helps the user decide to
4084 # disable some of them.
4086 tempgraph = mygraph.copy()
4087 while not tempgraph.empty():
4088 nodes = tempgraph.leaf_nodes()
4090 node = tempgraph.order[0]
4093 display_order.append(node)
4094 tempgraph.remove(node)
4095 display_order.reverse()
4096 self.myopts.pop("--quiet", None)
4097 self.myopts.pop("--verbose", None)
4098 self.myopts["--tree"] = True
4099 portage.writemsg("\n\n", noiselevel=-1)
4100 self.display(display_order)
4101 prefix = colorize("BAD", " * ")
4102 portage.writemsg("\n", noiselevel=-1)
4103 portage.writemsg(prefix + "Error: circular dependencies:\n",
4105 portage.writemsg("\n", noiselevel=-1)
4106 mygraph.debug_print()
4107 portage.writemsg("\n", noiselevel=-1)
4108 portage.writemsg(prefix + "Note that circular dependencies " + \
4109 "can often be avoided by temporarily\n", noiselevel=-1)
4110 portage.writemsg(prefix + "disabling USE flags that trigger " + \
4111 "optional dependencies.\n", noiselevel=-1)
4113 def _show_merge_list(self):
4114 if self._serialized_tasks_cache is not None:
4115 display_list = self._serialized_tasks_cache[:]
4116 if "--tree" in self.myopts:
4117 display_list.reverse()
4118 self.display(display_list)
4120 def _show_unsatisfied_blockers(self, blockers):
4121 self._show_merge_list()
4122 msg = "Error: The above package list contains " + \
4123 "packages which cannot be installed " + \
4124 "at the same time on the same system."
4125 prefix = colorize("BAD", " * ")
4126 from textwrap import wrap
4127 portage.writemsg("\n", noiselevel=-1)
4128 for line in wrap(msg, 70):
4129 portage.writemsg(prefix + line + "\n", noiselevel=-1)
4130 if "--quiet" not in self.myopts:
4131 show_blocker_docs_link()
4133 def display(self, mylist, favorites=[], verbosity=None):
4134 if verbosity is None:
4135 verbosity = ("--quiet" in self.myopts and 1 or \
4136 "--verbose" in self.myopts and 3 or 2)
4137 favorites_set = InternalPackageSet(favorites)
4138 oneshot = "--oneshot" in self.myopts or \
4139 "--onlydeps" in self.myopts
4144 counters = PackageCounters()
4146 if verbosity == 1 and "--verbose" not in self.myopts:
4147 def create_use_string(*args):
4150 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
4152 is_new, reinst_flags,
4153 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
4154 alphabetical=("--alphabetical" in self.myopts)):
4162 cur_iuse = set(cur_iuse)
4163 enabled_flags = cur_iuse.intersection(cur_use)
4164 removed_iuse = set(old_iuse).difference(cur_iuse)
4165 any_iuse = cur_iuse.union(old_iuse)
4166 any_iuse = list(any_iuse)
4168 for flag in any_iuse:
4171 reinst_flag = reinst_flags and flag in reinst_flags
4172 if flag in enabled_flags:
4174 if is_new or flag in old_use and \
4175 (all_flags or reinst_flag):
4176 flag_str = red(flag)
4177 elif flag not in old_iuse:
4178 flag_str = yellow(flag) + "%*"
4179 elif flag not in old_use:
4180 flag_str = green(flag) + "*"
4181 elif flag in removed_iuse:
4182 if all_flags or reinst_flag:
4183 flag_str = yellow("-" + flag) + "%"
4186 flag_str = "(" + flag_str + ")"
4187 removed.append(flag_str)
4190 if is_new or flag in old_iuse and \
4191 flag not in old_use and \
4192 (all_flags or reinst_flag):
4193 flag_str = blue("-" + flag)
4194 elif flag not in old_iuse:
4195 flag_str = yellow("-" + flag)
4196 if flag not in iuse_forced:
4198 elif flag in old_use:
4199 flag_str = green("-" + flag) + "*"
4201 if flag in iuse_forced:
4202 flag_str = "(" + flag_str + ")"
4204 enabled.append(flag_str)
4206 disabled.append(flag_str)
4209 ret = " ".join(enabled)
4211 ret = " ".join(enabled + disabled + removed)
4213 ret = '%s="%s" ' % (name, ret)
4216 repo_display = RepoDisplay(self.roots)
4221 mygraph = self.digraph.copy()
4223 # If there are any Uninstall instances, add the corresponding
4224 # blockers to the digraph (useful for --tree display).
4225 for uninstall in self._blocker_uninstalls.leaf_nodes():
4226 uninstall_parents = \
4227 self._blocker_uninstalls.parent_nodes(uninstall)
4228 if not uninstall_parents:
4231 # Remove the corresponding "nomerge" node and substitute
4232 # the Uninstall node.
4233 inst_pkg = self._pkg_cache[
4234 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
4236 mygraph.remove(inst_pkg)
4241 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
4243 inst_pkg_blockers = []
4245 # Break the Package -> Uninstall edges.
4246 mygraph.remove(uninstall)
4248 # Resolution of a package's blockers
4249 # depend on it's own uninstallation.
4250 for blocker in inst_pkg_blockers:
4251 mygraph.add(uninstall, blocker)
4253 # Expand Package -> Uninstall edges into
4254 # Package -> Blocker -> Uninstall edges.
4255 for blocker in uninstall_parents:
4256 mygraph.add(uninstall, blocker)
4257 for parent in self._blocker_parents.parent_nodes(blocker):
4258 if parent != inst_pkg:
4259 mygraph.add(blocker, parent)
4261 unsatisfied_blockers = []
4266 if isinstance(x, Blocker) and not x.satisfied:
4267 unsatisfied_blockers.append(x)
4270 if "--tree" in self.myopts:
4271 depth = len(tree_nodes)
4272 while depth and graph_key not in \
4273 mygraph.child_nodes(tree_nodes[depth-1]):
4276 tree_nodes = tree_nodes[:depth]
4277 tree_nodes.append(graph_key)
4278 display_list.append((x, depth, True))
4279 shown_edges.add((graph_key, tree_nodes[depth-1]))
4281 traversed_nodes = set() # prevent endless circles
4282 traversed_nodes.add(graph_key)
4283 def add_parents(current_node, ordered):
4285 # Do not traverse to parents if this node is an
4286 # an argument or a direct member of a set that has
4287 # been specified as an argument (system or world).
4288 if current_node not in self._set_nodes:
4289 parent_nodes = mygraph.parent_nodes(current_node)
4291 child_nodes = set(mygraph.child_nodes(current_node))
4292 selected_parent = None
4293 # First, try to avoid a direct cycle.
4294 for node in parent_nodes:
4295 if not isinstance(node, (Blocker, Package)):
4297 if node not in traversed_nodes and \
4298 node not in child_nodes:
4299 edge = (current_node, node)
4300 if edge in shown_edges:
4302 selected_parent = node
4304 if not selected_parent:
4305 # A direct cycle is unavoidable.
4306 for node in parent_nodes:
4307 if not isinstance(node, (Blocker, Package)):
4309 if node not in traversed_nodes:
4310 edge = (current_node, node)
4311 if edge in shown_edges:
4313 selected_parent = node
4316 shown_edges.add((current_node, selected_parent))
4317 traversed_nodes.add(selected_parent)
4318 add_parents(selected_parent, False)
4319 display_list.append((current_node,
4320 len(tree_nodes), ordered))
4321 tree_nodes.append(current_node)
4323 add_parents(graph_key, True)
4325 display_list.append((x, depth, True))
4326 mylist = display_list
4327 for x in unsatisfied_blockers:
4328 mylist.append((x, 0, True))
4330 last_merge_depth = 0
4331 for i in xrange(len(mylist)-1,-1,-1):
4332 graph_key, depth, ordered = mylist[i]
4333 if not ordered and depth == 0 and i > 0 \
4334 and graph_key == mylist[i-1][0] and \
4335 mylist[i-1][1] == 0:
4336 # An ordered node got a consecutive duplicate when the tree was
4340 if ordered and graph_key[-1] != "nomerge":
4341 last_merge_depth = depth
4343 if depth >= last_merge_depth or \
4344 i < len(mylist) - 1 and \
4345 depth >= mylist[i+1][1]:
4348 from portage import flatten
4349 from portage.dep import use_reduce, paren_reduce
4350 # files to fetch list - avoids counting a same file twice
4351 # in size display (verbose mode)
4354 for mylist_index in xrange(len(mylist)):
4355 x, depth, ordered = mylist[mylist_index]
4359 portdb = self.trees[myroot]["porttree"].dbapi
4360 bindb = self.trees[myroot]["bintree"].dbapi
4361 vardb = self.trees[myroot]["vartree"].dbapi
4362 vartree = self.trees[myroot]["vartree"]
4363 pkgsettings = self.pkgsettings[myroot]
4366 indent = " " * depth
4368 if isinstance(x, Blocker):
4370 blocker_style = "PKG_BLOCKER_SATISFIED"
4371 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
4373 blocker_style = "PKG_BLOCKER"
4374 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
4376 counters.blocks += 1
4378 counters.blocks_satisfied += 1
4379 resolved = portage.key_expand(
4380 pkg_key, mydb=vardb, settings=pkgsettings)
4381 if "--columns" in self.myopts and "--quiet" in self.myopts:
4382 addl += " " + colorize(blocker_style, resolved)
4384 addl = "[%s %s] %s%s" % \
4385 (colorize(blocker_style, "blocks"),
4386 addl, indent, colorize(blocker_style, resolved))
4387 block_parents = self._blocker_parents.parent_nodes(x)
4388 block_parents = set([pnode[2] for pnode in block_parents])
4389 block_parents = ", ".join(block_parents)
4391 addl += colorize(blocker_style,
4392 " (\"%s\" is blocking %s)") % \
4393 (pkg_key, block_parents)
4395 addl += colorize(blocker_style,
4396 " (is blocking %s)") % block_parents
4397 if isinstance(x, Blocker) and x.satisfied:
4400 blockers.append(addl)
4403 pkg_merge = ordered and pkg_status == "merge"
4404 if not pkg_merge and pkg_status == "merge":
4405 pkg_status = "nomerge"
4406 built = pkg_type != "ebuild"
4407 installed = pkg_type == "installed"
4409 metadata = pkg.metadata
4411 repo_name = metadata["repository"]
4412 if pkg_type == "ebuild":
4413 ebuild_path = portdb.findname(pkg_key)
4414 if not ebuild_path: # shouldn't happen
4415 raise portage.exception.PackageNotFound(pkg_key)
4416 repo_path_real = os.path.dirname(os.path.dirname(
4417 os.path.dirname(ebuild_path)))
4419 repo_path_real = portdb.getRepositoryPath(repo_name)
4420 pkg_use = metadata["USE"].split()
4422 restrict = flatten(use_reduce(paren_reduce(
4423 pkg.metadata["RESTRICT"]), uselist=pkg_use))
4424 except portage.exception.InvalidDependString, e:
4425 if not pkg.installed:
4426 show_invalid_depstring_notice(x,
4427 pkg.metadata["RESTRICT"], str(e))
4431 if "ebuild" == pkg_type and x[3] != "nomerge" and \
4432 "fetch" in restrict:
4435 counters.restrict_fetch += 1
4436 if portdb.fetch_check(pkg_key, pkg_use):
4439 counters.restrict_fetch_satisfied += 1
4441 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
4442 #param is used for -u, where you still *do* want to see when something is being upgraded.
4445 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
4446 if vardb.cpv_exists(pkg_key):
4447 addl=" "+yellow("R")+fetch+" "
4450 counters.reinst += 1
4451 elif pkg_status == "uninstall":
4452 counters.uninst += 1
4453 # filter out old-style virtual matches
4454 elif installed_versions and \
4455 portage.cpv_getkey(installed_versions[0]) == \
4456 portage.cpv_getkey(pkg_key):
4457 myinslotlist = vardb.match(pkg.slot_atom)
4458 # If this is the first install of a new-style virtual, we
4459 # need to filter out old-style virtual matches.
4460 if myinslotlist and \
4461 portage.cpv_getkey(myinslotlist[0]) != \
4462 portage.cpv_getkey(pkg_key):
4465 myoldbest = myinslotlist[:]
4467 if not portage.dep.cpvequal(pkg_key,
4468 portage.best([pkg_key] + myoldbest)):
4470 addl += turquoise("U")+blue("D")
4472 counters.downgrades += 1
4475 addl += turquoise("U") + " "
4477 counters.upgrades += 1
4479 # New slot, mark it new.
4480 addl = " " + green("NS") + fetch + " "
4481 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
4483 counters.newslot += 1
4485 if "--changelog" in self.myopts:
4486 inst_matches = vardb.match(pkg.slot_atom)
4488 changelogs.extend(self.calc_changelog(
4489 portdb.findname(pkg_key),
4490 inst_matches[0], pkg_key))
4492 addl = " " + green("N") + " " + fetch + " "
4500 cur_iuse = list(filter_iuse_defaults(
4501 pkg.metadata["IUSE"].split()))
4503 forced_flags = set()
4504 pkgsettings.setcpv(pkg.cpv, mydb=pkg.metadata) # for package.use.{mask,force}
4505 forced_flags.update(pkgsettings.useforce)
4506 forced_flags.update(pkgsettings.usemask)
4508 cur_iuse = portage.unique_array(cur_iuse)
4511 cur_use = [flag for flag in cur_use if flag in cur_iuse]
4513 if myoldbest and myinslotlist:
4514 previous_cpv = myoldbest[0]
4516 previous_cpv = pkg.cpv
4517 if vardb.cpv_exists(previous_cpv):
4518 old_iuse, old_use = vardb.aux_get(
4519 previous_cpv, ["IUSE", "USE"])
4520 old_iuse = list(set(
4521 filter_iuse_defaults(old_iuse.split())))
4523 old_use = old_use.split()
4530 old_use = [flag for flag in old_use if flag in old_iuse]
4532 use_expand = pkgsettings["USE_EXPAND"].lower().split()
4534 use_expand.reverse()
4535 use_expand_hidden = \
4536 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
4538 def map_to_use_expand(myvals, forcedFlags=False,
4542 for exp in use_expand:
4545 for val in myvals[:]:
4546 if val.startswith(exp.lower()+"_"):
4547 if val in forced_flags:
4548 forced[exp].add(val[len(exp)+1:])
4549 ret[exp].append(val[len(exp)+1:])
4552 forced["USE"] = [val for val in myvals \
4553 if val in forced_flags]
4555 for exp in use_expand_hidden:
4561 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
4562 # are the only thing that triggered reinstallation.
4563 reinst_flags_map = {}
4564 reinstall_for_flags = self._reinstall_nodes.get(pkg)
4565 reinst_expand_map = None
4566 if reinstall_for_flags:
4567 reinst_flags_map = map_to_use_expand(
4568 list(reinstall_for_flags), removeHidden=False)
4569 for k in list(reinst_flags_map):
4570 if not reinst_flags_map[k]:
4571 del reinst_flags_map[k]
4572 if not reinst_flags_map.get("USE"):
4573 reinst_expand_map = reinst_flags_map.copy()
4574 reinst_expand_map.pop("USE", None)
4575 if reinst_expand_map and \
4576 not set(reinst_expand_map).difference(
4578 use_expand_hidden = \
4579 set(use_expand_hidden).difference(
4582 cur_iuse_map, iuse_forced = \
4583 map_to_use_expand(cur_iuse, forcedFlags=True)
4584 cur_use_map = map_to_use_expand(cur_use)
4585 old_iuse_map = map_to_use_expand(old_iuse)
4586 old_use_map = map_to_use_expand(old_use)
4589 use_expand.insert(0, "USE")
4591 for key in use_expand:
4592 if key in use_expand_hidden:
4594 verboseadd += create_use_string(key.upper(),
4595 cur_iuse_map[key], iuse_forced[key],
4596 cur_use_map[key], old_iuse_map[key],
4597 old_use_map[key], is_new,
4598 reinst_flags_map.get(key))
4603 if pkg_type == "ebuild" and pkg_merge:
4605 myfilesdict = portdb.getfetchsizes(pkg_key,
4606 useflags=pkg_use, debug=self.edebug)
4607 except portage.exception.InvalidDependString, e:
4608 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
4609 show_invalid_depstring_notice(x, src_uri, str(e))
4612 if myfilesdict is None:
4613 myfilesdict="[empty/missing/bad digest]"
4615 for myfetchfile in myfilesdict:
4616 if myfetchfile not in myfetchlist:
4617 mysize+=myfilesdict[myfetchfile]
4618 myfetchlist.append(myfetchfile)
4620 counters.totalsize += mysize
4621 verboseadd+=format_size(mysize)+" "
4624 # assign index for a previous version in the same slot
4625 has_previous = False
4626 repo_name_prev = None
4627 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
4629 slot_matches = vardb.match(slot_atom)
4632 repo_name_prev = vardb.aux_get(slot_matches[0],
4635 # now use the data to generate output
4637 if pkg.installed or not has_previous:
4638 repoadd = repo_display.repoStr(repo_path_real)
4640 repo_path_prev = None
4642 repo_path_prev = portdb.getRepositoryPath(
4644 if repo_path_prev == repo_path_real:
4645 repoadd = repo_display.repoStr(repo_path_real)
4647 repoadd = "%s=>%s" % (
4648 repo_display.repoStr(repo_path_prev),
4649 repo_display.repoStr(repo_path_real))
4650 if repoadd and repoadd != "0":
4652 verboseadd += teal("[%s]" % repoadd)
4654 xs = [portage.cpv_getkey(pkg_key)] + \
4655 list(portage.catpkgsplit(pkg_key)[2:])
4662 if "COLUMNWIDTH" in self.settings:
4664 mywidth = int(self.settings["COLUMNWIDTH"])
4665 except ValueError, e:
4666 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
4668 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
4669 self.settings["COLUMNWIDTH"], noiselevel=-1)
4671 oldlp = mywidth - 30
4674 # Convert myoldbest from a list to a string.
4678 for pos, key in enumerate(myoldbest):
4679 key = portage.catpkgsplit(key)[2] + \
4680 "-" + portage.catpkgsplit(key)[3]
4681 if key[-3:] == "-r0":
4683 myoldbest[pos] = key
4684 myoldbest = blue("["+", ".join(myoldbest)+"]")
4687 root_config = self.roots[myroot]
4688 system_set = root_config.sets["system"]
4689 world_set = root_config.sets["world"]
4694 pkg_system = system_set.findAtomForPackage(pkg_key, metadata)
4695 pkg_world = world_set.findAtomForPackage(pkg_key, metadata)
4696 if not (oneshot or pkg_world) and \
4697 myroot == self.target_root and \
4698 favorites_set.findAtomForPackage(pkg_key, metadata):
4699 # Maybe it will be added to world now.
4700 if create_world_atom(pkg_key, metadata,
4701 favorites_set, root_config):
4703 except portage.exception.InvalidDependString:
4704 # This is reported elsewhere if relevant.
4707 def pkgprint(pkg_str):
4710 return colorize("PKG_MERGE_SYSTEM", pkg_str)
4712 return colorize("PKG_MERGE_WORLD", pkg_str)
4714 return colorize("PKG_MERGE", pkg_str)
4715 elif pkg_status == "uninstall":
4716 return colorize("PKG_UNINSTALL", pkg_str)
4719 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4721 return colorize("PKG_NOMERGE_WORLD", pkg_str)
4723 return colorize("PKG_NOMERGE", pkg_str)
4728 if "--columns" in self.myopts:
4729 if "--quiet" in self.myopts:
4730 myprint=addl+" "+indent+pkgprint(pkg_cp)
4731 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4732 myprint=myprint+myoldbest
4733 myprint=myprint+darkgreen("to "+x[1])
4736 myprint = "[%s] %s%s" % \
4737 (pkgprint(pkg_status.ljust(13)),
4738 indent, pkgprint(pkg.cp))
4740 myprint = "[%s %s] %s%s" % \
4741 (pkgprint(pkg.type_name), addl,
4742 indent, pkgprint(pkg.cp))
4743 if (newlp-nc_len(myprint)) > 0:
4744 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4745 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4746 if (oldlp-nc_len(myprint)) > 0:
4747 myprint=myprint+" "*(oldlp-nc_len(myprint))
4748 myprint=myprint+myoldbest
4749 myprint=myprint+darkgreen("to "+x[1])+" "+verboseadd
4752 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4754 myprint = "[" + pkg_type + " " + addl + "] "
4755 myprint += indent + pkgprint(pkg_key) + " " + \
4756 myoldbest + darkgreen("to " + myroot) + " " + \
4759 if "--columns" in self.myopts:
4760 if "--quiet" in self.myopts:
4761 myprint=addl+" "+indent+pkgprint(pkg_cp)
4762 myprint=myprint+" "+green(xs[1]+xs[2])+" "
4763 myprint=myprint+myoldbest
4766 myprint = "[%s] %s%s" % \
4767 (pkgprint(pkg_status.ljust(13)),
4768 indent, pkgprint(pkg.cp))
4770 myprint = "[%s %s] %s%s" % \
4771 (pkgprint(pkg.type_name), addl,
4772 indent, pkgprint(pkg.cp))
4773 if (newlp-nc_len(myprint)) > 0:
4774 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4775 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4776 if (oldlp-nc_len(myprint)) > 0:
4777 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4778 myprint=myprint+myoldbest+" "+verboseadd
4781 myprint = "[%s] %s%s %s %s" % \
4782 (pkgprint(pkg_status.ljust(13)),
4783 indent, pkgprint(pkg.cpv),
4784 myoldbest, verboseadd)
4786 myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
4789 mysplit = [portage.cpv_getkey(pkg_key)] + \
4790 list(portage.catpkgsplit(pkg_key)[2:])
4791 if "--tree" not in self.myopts and mysplit and \
4792 len(mysplit) == 3 and mysplit[0] == "sys-apps/portage" and \
4795 if mysplit[2] == "r0":
4796 myversion = mysplit[1]
4798 myversion = "%s-%s" % (mysplit[1], mysplit[2])
4800 if myversion != portage.VERSION and "--quiet" not in self.myopts:
4801 if mylist_index < len(mylist) - 1:
4802 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4803 p.append(colorize("WARN", " then resume the merge."))
4816 sys.stdout.write(str(repo_display))
4818 if "--changelog" in self.myopts:
4820 for revision,text in changelogs:
4821 print bold('*'+revision)
4822 sys.stdout.write(text)
4827 def display_problems(self):
4829 Display problems with the dependency graph such as slot collisions.
4830 This is called internally by display() to show the problems _after_
4831 the merge list where it is most likely to be seen, but if display()
4832 is not going to be called then this method should be called explicitly
4833 to ensure that the user is notified of problems with the graph.
4836 if self._circular_deps_for_display is not None:
4837 self._show_circular_deps(
4838 self._circular_deps_for_display)
4840 # The user is only notified of a slot conflict if
4841 # there are no unresolvable blocker conflicts.
4842 if self._unsatisfied_blockers_for_display is not None:
4843 self._show_unsatisfied_blockers(
4844 self._unsatisfied_blockers_for_display)
4846 self._show_slot_collision_notice()
4848 # TODO: Add generic support for "set problem" handlers so that
4849 # the below warnings aren't special cases for world only.
4851 if self._missing_args:
4852 world_problems = False
4853 if "world" in self._sets:
4854 for arg, atom in self._missing_args:
4855 if arg.name == "world":
4856 world_problems = True
4860 sys.stderr.write("\n!!! Problems have been " + \
4861 "detected with your world file\n")
4862 sys.stderr.write("!!! Please run " + \
4863 green("emaint --check world")+"\n\n")
4865 if self._missing_args:
4866 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4867 " Ebuilds for the following packages are either all\n")
4868 sys.stderr.write(colorize("BAD", "!!!") + \
4869 " masked or don't exist:\n")
4870 sys.stderr.write(" ".join(atom for arg, atom in \
4871 self._missing_args) + "\n")
4873 if self._pprovided_args:
4875 for arg, atom in self._pprovided_args:
4876 if isinstance(arg, SetArg):
4878 arg_atom = (atom, atom)
4881 arg_atom = (arg.arg, atom)
4882 refs = arg_refs.setdefault(arg_atom, [])
4883 if parent not in refs:
4886 msg.append(bad("\nWARNING: "))
4887 if len(self._pprovided_args) > 1:
4888 msg.append("Requested packages will not be " + \
4889 "merged because they are listed in\n")
4891 msg.append("A requested package will not be " + \
4892 "merged because it is listed in\n")
4893 msg.append("package.provided:\n\n")
4894 problems_sets = set()
4895 for (arg, atom), refs in arg_refs.iteritems():
4898 problems_sets.update(refs)
4900 ref_string = ", ".join(["'%s'" % name for name in refs])
4901 ref_string = " pulled in by " + ref_string
4902 msg.append(" %s%s\n" % (colorize("INFORM", arg), ref_string))
4904 if "world" in problems_sets:
4905 msg.append("This problem can be solved in one of the following ways:\n\n")
4906 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4907 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4908 msg.append(" C) Remove offending entries from package.provided.\n\n")
4909 msg.append("The best course of action depends on the reason that an offending\n")
4910 msg.append("package.provided entry exists.\n\n")
4911 sys.stderr.write("".join(msg))
4913 masked_packages = []
4914 for pkg, pkgsettings in self._masked_installed:
4915 root_config = self.roots[pkg.root]
4916 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4917 masked_packages.append((root_config, pkgsettings,
4918 pkg.cpv, pkg.metadata, mreasons))
4920 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4921 " The following installed packages are masked:\n")
4922 show_masked_packages(masked_packages)
4926 for pargs, kwargs in self._unsatisfied_deps_for_display:
4927 self._show_unsatisfied_dep(*pargs, **kwargs)
4929 def calc_changelog(self,ebuildpath,current,next):
4930 if ebuildpath == None or not os.path.exists(ebuildpath):
4932 current = '-'.join(portage.catpkgsplit(current)[1:])
4933 if current.endswith('-r0'):
4934 current = current[:-3]
4935 next = '-'.join(portage.catpkgsplit(next)[1:])
4936 if next.endswith('-r0'):
4938 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
4940 changelog = open(changelogpath).read()
4941 except SystemExit, e:
4942 raise # Needed else can't exit
4945 divisions = self.find_changelog_tags(changelog)
4946 #print 'XX from',current,'to',next
4947 #for div,text in divisions: print 'XX',div
4948 # skip entries for all revisions above the one we are about to emerge
4949 for i in range(len(divisions)):
4950 if divisions[i][0]==next:
4951 divisions = divisions[i:]
4953 # find out how many entries we are going to display
4954 for i in range(len(divisions)):
4955 if divisions[i][0]==current:
4956 divisions = divisions[:i]
4959 # couldnt find the current revision in the list. display nothing
4963 def find_changelog_tags(self,changelog):
4967 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
4969 if release is not None:
4970 divs.append((release,changelog))
4972 if release is not None:
4973 divs.append((release,changelog[:match.start()]))
4974 changelog = changelog[match.end():]
4975 release = match.group(1)
4976 if release.endswith('.ebuild'):
4977 release = release[:-7]
4978 if release.endswith('-r0'):
4979 release = release[:-3]
4981 def saveNomergeFavorites(self):
4982 """Find atoms in favorites that are not in the mergelist and add them
4983 to the world file if necessary."""
4984 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4985 "--oneshot", "--onlydeps", "--pretend"):
4986 if x in self.myopts:
4988 root_config = self.roots[self.target_root]
4989 world_set = root_config.sets["world"]
4991 world_set.load() # maybe it's changed on disk
4992 args_set = self._sets["args"]
4993 portdb = self.trees[self.target_root]["porttree"].dbapi
4994 added_favorites = set()
4995 for x in self._set_nodes:
4996 pkg_type, root, pkg_key, pkg_status = x
4997 if pkg_status != "nomerge":
4999 metadata = dict(izip(self._mydbapi_keys,
5000 self.mydbapi[root].aux_get(pkg_key, self._mydbapi_keys)))
5002 myfavkey = create_world_atom(pkg_key, metadata,
5003 args_set, root_config)
5005 if myfavkey in added_favorites:
5007 added_favorites.add(myfavkey)
5008 except portage.exception.InvalidDependString, e:
5009 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
5010 (pkg_key, str(e)), noiselevel=-1)
5011 writemsg("!!! see '%s'\n\n" % os.path.join(
5012 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
5015 for k in self._sets:
5016 if k in ("args", "world") or not root_config.sets[k].world_candidate:
5021 all_added.append(SETPREFIX + k)
5022 all_added.extend(added_favorites)
5025 print ">>> Recording %s in \"world\" favorites file..." % \
5026 colorize("INFORM", a)
5028 world_set.update(all_added)
5031 def loadResumeCommand(self, resume_data):
5033 Add a resume command to the graph and validate it in the process. This
5034 will raise a PackageNotFound exception if a package is not available.
5037 if not isinstance(resume_data, dict):
5040 mergelist = resume_data.get("mergelist")
5041 if not isinstance(mergelist, list):
5044 if mergelist and "--skipfirst" in self.myopts:
5045 for i, task in enumerate(mergelist):
5046 if isinstance(task, list) and \
5047 task and task[-1] == "merge":
5051 fakedb = self.mydbapi
5053 serialized_tasks = []
5055 if not (isinstance(x, list) and len(x) == 4):
5057 pkg_type, myroot, pkg_key, action = x
5058 if pkg_type not in self.pkg_tree_map:
5060 if action != "merge":
5062 mydb = trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
5064 metadata = dict(izip(self._mydbapi_keys,
5065 mydb.aux_get(pkg_key, self._mydbapi_keys)))
5067 # It does no exist or it is corrupt.
5068 if action == "uninstall":
5070 raise portage.exception.PackageNotFound(pkg_key)
5071 if pkg_type == "ebuild":
5072 pkgsettings = self.pkgsettings[myroot]
5073 pkgsettings.setcpv(pkg_key, mydb=metadata)
5074 metadata["USE"] = pkgsettings["PORTAGE_USE"]
5075 installed = action == "uninstall"
5076 built = pkg_type != "ebuild"
5077 pkg = Package(built=built, cpv=pkg_key,
5078 installed=installed, metadata=metadata,
5079 operation=action, root=myroot,
5081 self._pkg_cache[pkg] = pkg
5083 root_config = self.roots[pkg.root]
5084 if "merge" == pkg.operation and \
5085 not visible(root_config.settings, pkg):
5086 self._unsatisfied_deps_for_display.append(
5087 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
5089 fakedb[myroot].cpv_inject(pkg)
5090 serialized_tasks.append(pkg)
5091 self.spinner.update()
5093 if self._unsatisfied_deps_for_display:
5096 if not serialized_tasks or "--nodeps" in self.myopts:
5097 self._serialized_tasks_cache = serialized_tasks
5099 self._select_package = self._select_pkg_from_graph
5100 self.myparams.add("selective")
5102 favorites = resume_data.get("favorites")
5103 if isinstance(favorites, list):
5104 args = self._load_favorites(favorites)
5108 for task in serialized_tasks:
5109 if isinstance(task, Package) and \
5110 task.operation == "merge":
5111 if not self._add_pkg(task, None):
5114 # Packages for argument atoms need to be explicitly
5115 # added via _add_pkg() so that they are included in the
5116 # digraph (needed at least for --tree display).
5118 for atom in arg.set:
5119 pkg, existing_node = self._select_package(
5120 arg.root_config.root, atom)
5121 if existing_node is None and \
5123 if not self._add_pkg(pkg, arg):
5126 # Allow unsatisfied deps here to avoid showing a masking
5127 # message for an unsatisfied dep that isn't necessarily
5129 if not self._create_graph(allow_unsatisfied=True):
5131 if self._unsatisfied_deps:
5132 # This probably means that a required package
5133 # was dropped via --skipfirst. It makes the
5134 # resume list invalid, so convert it to a
5135 # UnsatisfiedResumeDep exception.
5136 raise self.UnsatisfiedResumeDep(
5137 self._unsatisfied_deps)
5138 self._serialized_tasks_cache = None
5141 except self._unknown_internal_error:
5146 def _load_favorites(self, favorites):
5148 Use a list of favorites to resume state from a
5149 previous select_files() call. This creates similar
5150 DependencyArg instances to those that would have
5151 been created by the original select_files() call.
5152 This allows Package instances to be matched with
5153 DependencyArg instances during graph creation.
5155 root_config = self.roots[self.target_root]
5156 getSetAtoms = root_config.setconfig.getSetAtoms
5157 sets = root_config.sets
5160 if not isinstance(x, basestring):
5162 if x in ("system", "world"):
5164 if x.startswith(SETPREFIX):
5165 s = x[len(SETPREFIX):]
5170 # Recursively expand sets so that containment tests in
5171 # self._get_parent_sets() properly match atoms in nested
5172 # sets (like if world contains system).
5173 expanded_set = InternalPackageSet(
5174 initial_atoms=getSetAtoms(s))
5175 self._sets[s] = expanded_set
5176 args.append(SetArg(arg=x, set=expanded_set,
5177 root_config=root_config))
5179 if not portage.isvalidatom(x):
5181 args.append(AtomArg(arg=x, atom=x,
5182 root_config=root_config))
5184 # Create the "args" package set from atoms and
5185 # packages given as arguments.
5186 args_set = self._sets["args"]
5188 if not isinstance(arg, (AtomArg, PackageArg)):
5191 if myatom in args_set:
5193 args_set.add(myatom)
5194 self._set_atoms.update(chain(*self._sets.itervalues()))
5195 atom_arg_map = self._atom_arg_map
5197 for atom in arg.set:
5198 atom_key = (atom, arg.root_config.root)
5199 refs = atom_arg_map.get(atom_key)
5202 atom_arg_map[atom_key] = refs
5207 class UnsatisfiedResumeDep(portage.exception.PortageException):
5209 A dependency of a resume list is not installed. This
5210 can occur when a required package is dropped from the
5211 merge list via --skipfirst.
5214 class _internal_exception(portage.exception.PortageException):
5215 def __init__(self, value=""):
5216 portage.exception.PortageException.__init__(self, value)
5218 class _unknown_internal_error(_internal_exception):
5220 Used by the depgraph internally to terminate graph creation.
5221 The specific reason for the failure should have been dumped
5222 to stderr, unfortunately, the exact reason for the failure
5226 class _serialize_tasks_retry(_internal_exception):
5228 This is raised by the _serialize_tasks() method when it needs to
5229 be called again for some reason. The only case that it's currently
5230 used for is when neglected dependencies need to be added to the
5231 graph in order to avoid making a potentially unsafe decision.
5234 class _dep_check_composite_db(portage.dbapi):
5236 A dbapi-like interface that is optimized for use in dep_check() calls.
5237 This is built on top of the existing depgraph package selection logic.
5238 Some packages that have been added to the graph may be masked from this
5239 view in order to influence the atom preference selection that occurs
5242 def __init__(self, depgraph, root):
5243 portage.dbapi.__init__(self)
5244 self._depgraph = depgraph
5246 self._match_cache = {}
5247 self._cpv_pkg_map = {}
5249 def match(self, atom):
5250 ret = self._match_cache.get(atom)
5255 atom = self._dep_expand(atom)
5256 pkg, existing = self._depgraph._select_package(self._root, atom)
5260 # Return the highest available from select_package() as well as
5261 # any matching slots in the graph db.
5263 slots.add(pkg.metadata["SLOT"])
5264 atom_cp = portage.dep_getkey(atom)
5265 if pkg.cp.startswith("virtual/"):
5266 # For new-style virtual lookahead that occurs inside
5267 # dep_check(), examine all slots. This is needed
5268 # so that newer slots will not unnecessarily be pulled in
5269 # when a satisfying lower slot is already installed. For
5270 # example, if virtual/jdk-1.4 is satisfied via kaffe then
5271 # there's no need to pull in a newer slot to satisfy a
5272 # virtual/jdk dependency.
5273 for db, pkg_type, built, installed, db_keys in \
5274 self._depgraph._filtered_trees[self._root]["dbs"]:
5275 for cpv in db.match(atom):
5276 if portage.cpv_getkey(cpv) != pkg.cp:
5278 slots.add(db.aux_get(cpv, ["SLOT"])[0])
5280 if self._visible(pkg):
5281 self._cpv_pkg_map[pkg.cpv] = pkg
5283 slots.remove(pkg.metadata["SLOT"])
5285 slot_atom = "%s:%s" % (atom_cp, slots.pop())
5286 pkg, existing = self._depgraph._select_package(
5287 self._root, slot_atom)
5290 if not self._visible(pkg):
5292 self._cpv_pkg_map[pkg.cpv] = pkg
5295 self._cpv_sort_ascending(ret)
5296 self._match_cache[orig_atom] = ret
5299 def _visible(self, pkg):
5300 if pkg.installed and "selective" not in self._depgraph.myparams:
5302 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
5303 except (StopIteration, portage.exception.InvalidDependString):
5310 self._depgraph.pkgsettings[pkg.root], pkg):
5312 except portage.exception.InvalidDependString:
5316 def _dep_expand(self, atom):
5318 This is only needed for old installed packages that may
5319 contain atoms that are not fully qualified with a specific
5320 category. Emulate the cpv_expand() function that's used by
5321 dbapi.match() in cases like this. If there are multiple
5322 matches, it's often due to a new-style virtual that has
5323 been added, so try to filter those out to avoid raising
5326 root_config = self._depgraph.roots[self._root]
5328 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5329 if len(expanded_atoms) > 1:
5330 non_virtual_atoms = []
5331 for x in expanded_atoms:
5332 if not portage.dep_getkey(x).startswith("virtual/"):
5333 non_virtual_atoms.append(x)
5334 if len(non_virtual_atoms) == 1:
5335 expanded_atoms = non_virtual_atoms
5336 if len(expanded_atoms) > 1:
5337 # compatible with portage.cpv_expand()
5338 raise ValueError([portage.dep_getkey(x) \
5339 for x in expanded_atoms])
5341 atom = expanded_atoms[0]
5343 null_atom = insert_category_into_atom(atom, "null")
5344 null_cp = portage.dep_getkey(null_atom)
5345 cat, atom_pn = portage.catsplit(null_cp)
5346 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5348 # Allow the resolver to choose which virtual.
5349 atom = insert_category_into_atom(atom, "virtual")
5351 atom = insert_category_into_atom(atom, "null")
5354 def aux_get(self, cpv, wants):
5355 metadata = self._cpv_pkg_map[cpv].metadata
5356 return [metadata.get(x, "") for x in wants]
5358 class _package_cache(dict):
5359 def __init__(self, depgraph):
5361 self._depgraph = depgraph
5363 def __setitem__(self, k, v):
5364 dict.__setitem__(self, k, v)
5365 root_config = self._depgraph.roots[v.root]
5366 if visible(root_config.settings, v):
5367 root_config.visible_pkgs.cpv_inject(v)
5369 class RepoDisplay(object):
5370 def __init__(self, roots):
5371 self._shown_repos = {}
5372 self._unknown_repo = False
5374 for root_config in roots.itervalues():
5375 portdir = root_config.settings.get("PORTDIR")
5377 repo_paths.add(portdir)
5378 overlays = root_config.settings.get("PORTDIR_OVERLAY")
5380 repo_paths.update(overlays.split())
5381 repo_paths = list(repo_paths)
5382 self._repo_paths = repo_paths
5383 self._repo_paths_real = [ os.path.realpath(repo_path) \
5384 for repo_path in repo_paths ]
5386 # pre-allocate index for PORTDIR so that it always has index 0.
5387 for root_config in roots.itervalues():
5388 portdb = root_config.trees["porttree"].dbapi
5389 portdir = portdb.porttree_root
5391 self.repoStr(portdir)
5393 def repoStr(self, repo_path_real):
5396 real_index = self._repo_paths_real.index(repo_path_real)
5397 if real_index == -1:
5399 self._unknown_repo = True
5401 shown_repos = self._shown_repos
5402 repo_paths = self._repo_paths
5403 repo_path = repo_paths[real_index]
5404 index = shown_repos.get(repo_path)
5406 index = len(shown_repos)
5407 shown_repos[repo_path] = index
5413 shown_repos = self._shown_repos
5414 unknown_repo = self._unknown_repo
5415 if shown_repos or self._unknown_repo:
5416 output.append("Portage tree and overlays:\n")
5417 show_repo_paths = list(shown_repos)
5418 for repo_path, repo_index in shown_repos.iteritems():
5419 show_repo_paths[repo_index] = repo_path
5421 for index, repo_path in enumerate(show_repo_paths):
5422 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
5424 output.append(" "+teal("[?]") + \
5425 " indicates that the source repository could not be determined\n")
5426 return "".join(output)
5428 class PackageCounters(object):
5438 self.blocks_satisfied = 0
5440 self.restrict_fetch = 0
5441 self.restrict_fetch_satisfied = 0
5444 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
5447 myoutput.append("Total: %s package" % total_installs)
5448 if total_installs != 1:
5449 myoutput.append("s")
5450 if total_installs != 0:
5451 myoutput.append(" (")
5452 if self.upgrades > 0:
5453 details.append("%s upgrade" % self.upgrades)
5454 if self.upgrades > 1:
5456 if self.downgrades > 0:
5457 details.append("%s downgrade" % self.downgrades)
5458 if self.downgrades > 1:
5461 details.append("%s new" % self.new)
5462 if self.newslot > 0:
5463 details.append("%s in new slot" % self.newslot)
5464 if self.newslot > 1:
5467 details.append("%s reinstall" % self.reinst)
5471 details.append("%s uninstall" % self.uninst)
5474 myoutput.append(", ".join(details))
5475 if total_installs != 0:
5476 myoutput.append(")")
5477 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
5478 if self.restrict_fetch:
5479 myoutput.append("\nFetch Restriction: %s package" % \
5480 self.restrict_fetch)
5481 if self.restrict_fetch > 1:
5482 myoutput.append("s")
5483 if self.restrict_fetch_satisfied < self.restrict_fetch:
5484 myoutput.append(bad(" (%s unsatisfied)") % \
5485 (self.restrict_fetch - self.restrict_fetch_satisfied))
5487 myoutput.append("\nConflict: %s block" % \
5490 myoutput.append("s")
5491 if self.blocks_satisfied < self.blocks:
5492 myoutput.append(bad(" (%s unsatisfied)") % \
5493 (self.blocks - self.blocks_satisfied))
5494 return "".join(myoutput)
5496 class MergeTask(object):
5498 _opts_ignore_blockers = \
5499 frozenset(["--buildpkgonly",
5500 "--fetchonly", "--fetch-all-uri",
5501 "--nodeps", "--pretend"])
5503 def __init__(self, settings, trees, myopts):
5504 self.settings = settings
5505 self.target_root = settings["ROOT"]
5507 self.myopts = myopts
5509 if settings.get("PORTAGE_DEBUG", "") == "1":
5511 self.pkgsettings = {}
5512 self._blocker_db = {}
5514 self.pkgsettings[root] = portage.config(
5515 clone=trees[root]["vartree"].settings)
5516 self._blocker_db[root] = BlockerDB(
5517 trees[root]["vartree"],
5518 trees[root]["porttree"].dbapi)
5520 self._spawned_pids = []
5522 def _find_blockers(self, new_pkg):
5523 if self._opts_ignore_blockers.intersection(self.myopts):
5526 blocker_dblinks = []
5527 for blocking_pkg in self._blocker_db[
5528 new_pkg.root].findInstalledBlockers(new_pkg):
5529 if new_pkg.slot_atom == blocking_pkg.slot_atom:
5531 if new_pkg.cpv == blocking_pkg.cpv:
5533 blocker_dblinks.append(portage.dblink(
5534 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
5535 self.pkgsettings[blocking_pkg.root], treetype="vartree",
5536 vartree=self.trees[blocking_pkg.root]["vartree"]))
5538 return blocker_dblinks
5540 def merge(self, mylist, favorites, mtimedb):
5542 return self._merge(mylist, favorites, mtimedb)
5544 if self._spawned_pids:
5545 from portage import process
5546 process.spawned_pids.extend(self._spawned_pids)
5547 self._spawned_pids = []
5549 def _poll_child_processes(self):
5551 After each merge, collect status from child processes
5552 in order to clean up zombies (such as the parallel-fetch
5555 spawned_pids = self._spawned_pids
5556 if not spawned_pids:
5558 for pid in list(spawned_pids):
5560 if os.waitpid(pid, os.WNOHANG) == (0, 0):
5563 # This pid has been cleaned up elsewhere,
5564 # so remove it from our list.
5566 spawned_pids.remove(pid)
5568 def _merge(self, mylist, favorites, mtimedb):
5569 from portage.elog import elog_process
5570 from portage.elog.filtering import filter_mergephases
5571 buildpkgonly = "--buildpkgonly" in self.myopts
5573 fetchonly = "--fetchonly" in self.myopts or \
5574 "--fetch-all-uri" in self.myopts
5575 oneshot = "--oneshot" in self.myopts or \
5576 "--onlydeps" in self.myopts
5577 pretend = "--pretend" in self.myopts
5578 ldpath_mtimes = mtimedb["ldpath"]
5579 xterm_titles = "notitles" not in self.settings.features
5581 if "--resume" in self.myopts:
5583 print colorize("GOOD", "*** Resuming merge...")
5584 emergelog(xterm_titles, " *** Resuming merge...")
5586 # Verify all the manifests now so that the user is notified of failure
5587 # as soon as possible.
5588 if "--fetchonly" not in self.myopts and \
5589 "--fetch-all-uri" not in self.myopts and \
5590 "strict" in self.settings.features:
5591 shown_verifying_msg = False
5593 for myroot, pkgsettings in self.pkgsettings.iteritems():
5594 quiet_config = portage.config(clone=pkgsettings)
5595 quiet_config["PORTAGE_QUIET"] = "1"
5596 quiet_config.backup_changes("PORTAGE_QUIET")
5597 quiet_settings[myroot] = quiet_config
5600 if x[0] != "ebuild" or x[-1] == "nomerge":
5602 if not shown_verifying_msg:
5603 shown_verifying_msg = True
5604 print ">>> Verifying ebuild Manifests..."
5605 mytype, myroot, mycpv, mystatus = x
5606 portdb = self.trees[myroot]["porttree"].dbapi
5607 quiet_config = quiet_settings[myroot]
5608 quiet_config["O"] = os.path.dirname(portdb.findname(mycpv))
5609 if not portage.digestcheck([], quiet_config, strict=True):
5611 del x, mytype, myroot, mycpv, mystatus, quiet_config
5612 del shown_verifying_msg, quiet_settings
5614 root_config = self.trees[self.target_root]["root_config"]
5615 system_set = root_config.sets["system"]
5616 args_set = InternalPackageSet(favorites)
5617 world_set = root_config.sets["world"]
5619 mtimedb["resume"]["mergelist"] = [list(x) for x in mylist \
5620 if isinstance(x, Package) and x.operation == "merge"]
5623 mymergelist = mylist
5624 myfeat = self.settings.features[:]
5625 bad_resume_opts = set(["--ask", "--changelog", "--skipfirst",
5627 if "parallel-fetch" in myfeat and \
5628 not ("--pretend" in self.myopts or \
5629 "--fetch-all-uri" in self.myopts or \
5630 "--fetchonly" in self.myopts):
5631 if "distlocks" not in myfeat:
5633 print red("!!!")+" parallel-fetching requires the distlocks feature enabled"
5634 print red("!!!")+" you have it disabled, thus parallel-fetching is being disabled"
5636 elif len(mymergelist) > 1:
5637 fetch_log = "/var/log/emerge-fetch.log"
5638 logfile = open(fetch_log, "w")
5639 fd_pipes = {1:logfile.fileno(), 2:logfile.fileno()}
5640 portage.util.apply_secpass_permissions(fetch_log,
5641 uid=portage.portage_uid, gid=portage.portage_gid,
5643 fetch_env = os.environ.copy()
5644 fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs"
5645 fetch_env["PORTAGE_NICENESS"] = "0"
5646 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
5647 fetch_args = [sys.argv[0], "--resume",
5648 "--fetchonly", "--nodeps"]
5649 resume_opts = self.myopts.copy()
5650 # For automatic resume, we need to prevent
5651 # any of bad_resume_opts from leaking in
5652 # via EMERGE_DEFAULT_OPTS.
5653 resume_opts["--ignore-default-opts"] = True
5654 for myopt, myarg in resume_opts.iteritems():
5655 if myopt not in bad_resume_opts:
5657 fetch_args.append(myopt)
5659 fetch_args.append(myopt +"="+ myarg)
5660 self._spawned_pids.extend(
5661 portage.process.spawn(
5662 fetch_args, env=fetch_env,
5663 fd_pipes=fd_pipes, returnpid=True))
5664 logfile.close() # belongs to the spawned process
5665 del fetch_log, logfile, fd_pipes, fetch_env, fetch_args, \
5667 print ">>> starting parallel fetching pid %d" % \
5668 self._spawned_pids[-1]
5670 metadata_keys = [k for k in portage.auxdbkeys \
5671 if not k.startswith("UNUSED_")] + ["USE"]
5673 task_list = mymergelist
5674 # Filter mymergelist so that all the len(mymergelist) calls
5675 # below (for display) do not count Uninstall instances.
5676 mymergelist = [x for x in mymergelist if x[-1] == "merge"]
5679 if x[0] == "blocks":
5681 pkg_type, myroot, pkg_key, operation = x
5683 built = pkg_type != "ebuild"
5684 installed = pkg_type == "installed"
5685 portdb = self.trees[myroot]["porttree"].dbapi
5686 bindb = self.trees[myroot]["bintree"].dbapi
5687 vartree = self.trees[myroot]["vartree"]
5688 vardb = vartree.dbapi
5689 root_config = self.trees[myroot]["root_config"]
5690 pkgsettings = self.pkgsettings[myroot]
5691 if pkg_type == "blocks":
5693 elif pkg_type == "ebuild":
5696 if pkg_type == "binary":
5698 elif pkg_type == "installed":
5701 raise AssertionError("Package type: '%s'" % pkg_type)
5705 metadata = pkg.metadata
5707 if not (buildpkgonly or fetchonly or pretend):
5708 unmerge(root_config, self.myopts, "unmerge",
5709 [pkg.cpv], mtimedb["ldpath"], clean_world=0)
5714 y = portdb.findname(pkg_key)
5715 if "--pretend" not in self.myopts:
5716 print "\n>>> Emerging (" + \
5717 colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \
5718 colorize("MERGE_LIST_PROGRESS", str(len(mymergelist))) + ") " + \
5719 colorize("GOOD", x[pkgindex]) + " to " + x[1]
5720 emergelog(xterm_titles, " >>> emerge ("+\
5721 str(mergecount)+" of "+str(len(mymergelist))+\
5722 ") "+x[pkgindex]+" to "+x[1])
5724 pkgsettings["EMERGE_FROM"] = x[0]
5725 pkgsettings.backup_changes("EMERGE_FROM")
5728 #buildsyspkg: Check if we need to _force_ binary package creation
5729 issyspkg = ("buildsyspkg" in myfeat) \
5730 and x[0] != "blocks" \
5731 and system_set.findAtomForPackage(pkg_key, metadata) \
5732 and "--buildpkg" not in self.myopts
5733 if x[0] in ["ebuild","blocks"]:
5734 if x[0] == "blocks" and "--fetchonly" not in self.myopts:
5735 raise Exception, "Merging a blocker"
5736 elif "--fetchonly" in self.myopts or \
5737 "--fetch-all-uri" in self.myopts:
5738 if "--fetch-all-uri" in self.myopts:
5739 retval = portage.doebuild(y, "fetch", myroot,
5740 pkgsettings, self.edebug,
5741 "--pretend" in self.myopts, fetchonly=1,
5742 fetchall=1, mydbapi=portdb, tree="porttree")
5744 retval = portage.doebuild(y, "fetch", myroot,
5745 pkgsettings, self.edebug,
5746 "--pretend" in self.myopts, fetchonly=1,
5747 mydbapi=portdb, tree="porttree")
5748 if (retval is None) or retval:
5750 print "!!! Fetch for",y,"failed, continuing..."
5752 failed_fetches.append(pkg_key)
5756 portage.doebuild_environment(y, "setup", myroot,
5757 pkgsettings, self.edebug, 1, portdb)
5758 catdir = os.path.dirname(pkgsettings["PORTAGE_BUILDDIR"])
5759 portage.util.ensure_dirs(os.path.dirname(catdir),
5760 uid=portage.portage_uid, gid=portage.portage_gid,
5762 builddir_lock = None
5765 catdir_lock = portage.locks.lockdir(catdir)
5766 portage.util.ensure_dirs(catdir,
5767 gid=portage.portage_gid,
5769 builddir_lock = portage.locks.lockdir(
5770 pkgsettings["PORTAGE_BUILDDIR"])
5772 portage.locks.unlockdir(catdir_lock)
5775 msg = " === (%s of %s) Cleaning (%s::%s)" % \
5776 (mergecount, len(mymergelist), pkg_key, y)
5777 short_msg = "emerge: (%s of %s) %s Clean" % \
5778 (mergecount, len(mymergelist), pkg_key)
5779 emergelog(xterm_titles, msg, short_msg=short_msg)
5780 retval = portage.doebuild(y, "clean", myroot,
5781 pkgsettings, self.edebug, cleanup=1,
5782 mydbapi=portdb, tree="porttree")
5783 if retval != os.EX_OK:
5785 if "--buildpkg" in self.myopts or issyspkg:
5787 print ">>> This is a system package, " + \
5788 "let's pack a rescue tarball."
5789 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
5790 (mergecount, len(mymergelist), pkg_key, y)
5791 short_msg = "emerge: (%s of %s) %s Compile" % \
5792 (mergecount, len(mymergelist), pkg_key)
5793 emergelog(xterm_titles, msg, short_msg=short_msg)
5794 self.trees[myroot]["bintree"].prevent_collision(pkg_key)
5795 binpkg_tmpfile = os.path.join(pkgsettings["PKGDIR"],
5796 pkg_key + ".tbz2." + str(os.getpid()))
5797 pkgsettings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
5798 pkgsettings.backup_changes("PORTAGE_BINPKG_TMPFILE")
5799 retval = portage.doebuild(y, "package", myroot,
5800 pkgsettings, self.edebug, mydbapi=portdb,
5802 del pkgsettings["PORTAGE_BINPKG_TMPFILE"]
5803 if retval != os.EX_OK or \
5804 "--buildpkgonly" in self.myopts:
5805 elog_process(pkg_key, pkgsettings, phasefilter=filter_mergephases)
5806 if retval != os.EX_OK:
5808 bintree = self.trees[myroot]["bintree"]
5809 bintree.inject(pkg_key, filename=binpkg_tmpfile)
5811 if "--buildpkgonly" not in self.myopts:
5812 msg = " === (%s of %s) Merging (%s::%s)" % \
5813 (mergecount, len(mymergelist), pkg_key, y)
5814 short_msg = "emerge: (%s of %s) %s Merge" % \
5815 (mergecount, len(mymergelist), pkg_key)
5816 emergelog(xterm_titles, msg, short_msg=short_msg)
5818 retval = portage.merge(pkgsettings["CATEGORY"],
5819 pkgsettings["PF"], pkgsettings["D"],
5820 os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
5821 "build-info"), myroot, pkgsettings,
5822 myebuild=pkgsettings["EBUILD"],
5823 mytree="porttree", mydbapi=portdb,
5824 vartree=vartree, prev_mtimes=ldpath_mtimes,
5825 blockers=self._find_blockers(pkg))
5826 if retval != os.EX_OK:
5828 elif "noclean" not in pkgsettings.features:
5829 portage.doebuild(y, "clean", myroot,
5830 pkgsettings, self.edebug, mydbapi=portdb,
5833 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
5834 (mergecount, len(mymergelist), pkg_key, y)
5835 short_msg = "emerge: (%s of %s) %s Compile" % \
5836 (mergecount, len(mymergelist), pkg_key)
5837 emergelog(xterm_titles, msg, short_msg=short_msg)
5838 retval = portage.doebuild(y, "install", myroot,
5839 pkgsettings, self.edebug, vartree=vartree,
5840 mydbapi=portdb, tree="porttree",
5841 prev_mtimes=ldpath_mtimes)
5842 if retval != os.EX_OK:
5845 retval = portage.merge(pkgsettings["CATEGORY"],
5846 pkgsettings["PF"], pkgsettings["D"],
5847 os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
5848 "build-info"), myroot, pkgsettings,
5849 myebuild=pkgsettings["EBUILD"],
5850 mytree="porttree", mydbapi=portdb,
5851 vartree=vartree, prev_mtimes=ldpath_mtimes,
5852 blockers=self._find_blockers(pkg))
5853 if retval != os.EX_OK:
5857 portage.locks.unlockdir(builddir_lock)
5860 # Lock catdir for removal if empty.
5861 catdir_lock = portage.locks.lockdir(catdir)
5867 if e.errno not in (errno.ENOENT,
5868 errno.ENOTEMPTY, errno.EEXIST):
5871 portage.locks.unlockdir(catdir_lock)
5873 elif x[0]=="binary":
5875 mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key)
5876 if "--getbinpkg" in self.myopts:
5879 if "distlocks" in pkgsettings.features and \
5880 os.access(pkgsettings["PKGDIR"], os.W_OK):
5881 portage.util.ensure_dirs(os.path.dirname(mytbz2))
5882 tbz2_lock = portage.locks.lockfile(mytbz2,
5884 if self.trees[myroot]["bintree"].isremote(pkg_key):
5885 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
5886 (mergecount, len(mymergelist), pkg_key, mytbz2)
5887 short_msg = "emerge: (%s of %s) %s Fetch" % \
5888 (mergecount, len(mymergelist), pkg_key)
5889 emergelog(xterm_titles, msg, short_msg=short_msg)
5891 self.trees[myroot]["bintree"].gettbz2(pkg_key)
5892 except portage.exception.FileNotFound:
5893 writemsg("!!! Fetching Binary failed " + \
5894 "for '%s'\n" % pkg_key, noiselevel=-1)
5897 failed_fetches.append(pkg_key)
5898 except portage.exception.DigestException, e:
5899 writemsg("\n!!! Digest verification failed:\n",
5901 writemsg("!!! %s\n" % e.value[0],
5903 writemsg("!!! Reason: %s\n" % e.value[1],
5905 writemsg("!!! Got: %s\n" % e.value[2],
5907 writemsg("!!! Expected: %s\n" % e.value[3],
5912 failed_fetches.append(pkg_key)
5915 portage.locks.unlockfile(tbz2_lock)
5917 if "--fetchonly" in self.myopts or \
5918 "--fetch-all-uri" in self.myopts:
5922 short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Merge Binary"
5923 emergelog(xterm_titles, " === ("+str(mergecount)+\
5924 " of "+str(len(mymergelist))+") Merging Binary ("+\
5925 x[pkgindex]+"::"+mytbz2+")", short_msg=short_msg)
5926 retval = portage.pkgmerge(mytbz2, x[1], pkgsettings,
5928 vartree=self.trees[myroot]["vartree"],
5929 prev_mtimes=ldpath_mtimes,
5930 blockers=self._find_blockers(pkg))
5931 if retval != os.EX_OK:
5933 #need to check for errors
5934 if not buildpkgonly:
5935 if not (fetchonly or oneshot or pretend) and \
5936 args_set.findAtomForPackage(pkg_key, metadata):
5938 world_set.load() # maybe it's changed on disk
5939 myfavkey = create_world_atom(pkg_key, metadata,
5940 args_set, root_config)
5942 print ">>> Recording",myfavkey,"in \"world\" favorites file..."
5943 emergelog(xterm_titles, " === ("+\
5944 str(mergecount)+" of "+\
5945 str(len(mymergelist))+\
5946 ") Updating world file ("+x[pkgindex]+")")
5947 world_set.add(myfavkey)
5950 if "--pretend" not in self.myopts and \
5951 "--fetchonly" not in self.myopts and \
5952 "--fetch-all-uri" not in self.myopts:
5954 # Figure out if we need a restart.
5955 if myroot == "/" and pkg.cp == "sys-apps/portage":
5956 if len(mymergelist) > mergecount:
5957 emergelog(xterm_titles,
5958 " ::: completed emerge ("+ \
5959 str(mergecount)+" of "+ \
5960 str(len(mymergelist))+") "+ \
5962 emergelog(xterm_titles, " *** RESTARTING " + \
5963 "emerge via exec() after change of " + \
5965 del mtimedb["resume"]["mergelist"][0]
5967 portage.run_exitfuncs()
5968 mynewargv=[sys.argv[0],"--resume"]
5969 resume_opts = self.myopts.copy()
5970 # For automatic resume, we need to prevent
5971 # any of bad_resume_opts from leaking in
5972 # via EMERGE_DEFAULT_OPTS.
5973 resume_opts["--ignore-default-opts"] = True
5974 for myopt, myarg in resume_opts.iteritems():
5975 if myopt not in bad_resume_opts:
5977 mynewargv.append(myopt)
5979 mynewargv.append(myopt +"="+ myarg)
5980 # priority only needs to be adjusted on the first run
5981 os.environ["PORTAGE_NICENESS"] = "0"
5982 os.execv(mynewargv[0], mynewargv)
5984 if "--pretend" not in self.myopts and \
5985 "--fetchonly" not in self.myopts and \
5986 "--fetch-all-uri" not in self.myopts:
5987 if "noclean" not in self.settings.features:
5988 short_msg = "emerge: (%s of %s) %s Clean Post" % \
5989 (mergecount, len(mymergelist), x[pkgindex])
5990 emergelog(xterm_titles, (" === (%s of %s) " + \
5991 "Post-Build Cleaning (%s::%s)") % \
5992 (mergecount, len(mymergelist), x[pkgindex], y),
5993 short_msg=short_msg)
5994 emergelog(xterm_titles, " ::: completed emerge ("+\
5995 str(mergecount)+" of "+str(len(mymergelist))+") "+\
5998 # Unsafe for parallel merges
5999 del mtimedb["resume"]["mergelist"][0]
6000 # Commit after each merge so that --resume may still work in
6001 # in the event that portage is not allowed to exit normally
6002 # due to power failure, SIGKILL, etc...
6005 self._poll_child_processes()
6007 if "--pretend" not in self.myopts:
6008 emergelog(xterm_titles, " *** Finished. Cleaning up...")
6010 # We're out of the loop... We're done. Delete the resume data.
6011 if mtimedb.has_key("resume"):
6012 del mtimedb["resume"]
6015 #by doing an exit this way, --fetchonly can continue to try to
6016 #fetch everything even if a particular download fails.
6017 if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts:
6019 sys.stderr.write("\n\n!!! Some fetch errors were " + \
6020 "encountered. Please see above for details.\n\n")
6021 for cpv in failed_fetches:
6022 sys.stderr.write(" ")
6023 sys.stderr.write(cpv)
6024 sys.stderr.write("\n")
6025 sys.stderr.write("\n")
6031 def unmerge(root_config, myopts, unmerge_action,
6032 unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, ordered=0):
6033 settings = root_config.settings
6034 sets = root_config.sets
6035 vartree = root_config.trees["vartree"]
6036 candidate_catpkgs=[]
6038 xterm_titles = "notitles" not in settings.features
6040 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
6042 # At least the parent needs to exist for the lock file.
6043 portage.util.ensure_dirs(vdb_path)
6044 except portage.exception.PortageException:
6048 if os.access(vdb_path, os.W_OK):
6049 vdb_lock = portage.locks.lockdir(vdb_path)
6050 realsyslist = sets["system"].getAtoms()
6052 for x in realsyslist:
6053 mycp = portage.dep_getkey(x)
6054 if mycp in settings.getvirtuals():
6056 for provider in settings.getvirtuals()[mycp]:
6057 if vartree.dbapi.match(provider):
6058 providers.append(provider)
6059 if len(providers) == 1:
6060 syslist.extend(providers)
6062 syslist.append(mycp)
6064 mysettings = portage.config(clone=settings)
6066 if not unmerge_files:
6067 if unmerge_action == "unmerge":
6069 print bold("emerge unmerge") + " can only be used with specific package names"
6076 # process all arguments and add all
6077 # valid db entries to candidate_catpkgs
6079 if not unmerge_files:
6080 candidate_catpkgs.extend(vartree.dbapi.cp_all())
6082 #we've got command-line arguments
6083 if not unmerge_files:
6084 print "\nNo packages to unmerge have been provided.\n"
6086 for x in unmerge_files:
6087 arg_parts = x.split('/')
6088 if x[0] not in [".","/"] and \
6089 arg_parts[-1][-7:] != ".ebuild":
6090 #possible cat/pkg or dep; treat as such
6091 candidate_catpkgs.append(x)
6092 elif unmerge_action in ["prune","clean"]:
6093 print "\n!!! Prune and clean do not accept individual" + \
6094 " ebuilds as arguments;\n skipping.\n"
6097 # it appears that the user is specifying an installed
6098 # ebuild and we're in "unmerge" mode, so it's ok.
6099 if not os.path.exists(x):
6100 print "\n!!! The path '"+x+"' doesn't exist.\n"
6103 absx = os.path.abspath(x)
6104 sp_absx = absx.split("/")
6105 if sp_absx[-1][-7:] == ".ebuild":
6107 absx = "/".join(sp_absx)
6109 sp_absx_len = len(sp_absx)
6111 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
6112 vdb_len = len(vdb_path)
6114 sp_vdb = vdb_path.split("/")
6115 sp_vdb_len = len(sp_vdb)
6117 if not os.path.exists(absx+"/CONTENTS"):
6118 print "!!! Not a valid db dir: "+str(absx)
6121 if sp_absx_len <= sp_vdb_len:
6122 # The Path is shorter... so it can't be inside the vdb.
6125 print "\n!!!",x,"cannot be inside "+ \
6126 vdb_path+"; aborting.\n"
6129 for idx in range(0,sp_vdb_len):
6130 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
6133 print "\n!!!", x, "is not inside "+\
6134 vdb_path+"; aborting.\n"
6137 print "="+"/".join(sp_absx[sp_vdb_len:])
6138 candidate_catpkgs.append(
6139 "="+"/".join(sp_absx[sp_vdb_len:]))
6142 if (not "--quiet" in myopts):
6144 if settings["ROOT"] != "/":
6145 print darkgreen(newline+ \
6146 ">>> Using system located in ROOT tree "+settings["ROOT"])
6147 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
6148 not ("--quiet" in myopts):
6149 print darkgreen(newline+\
6150 ">>> These are the packages that would be unmerged:")
6152 # Preservation of order is required for --depclean and --prune so
6153 # that dependencies are respected. Use all_selected to eliminate
6154 # duplicate packages since the same package may be selected by
6157 all_selected = set()
6158 for x in candidate_catpkgs:
6159 # cycle through all our candidate deps and determine
6160 # what will and will not get unmerged
6162 mymatch=localtree.dep_match(x)
6165 except ValueError, errpkgs:
6166 print "\n\n!!! The short ebuild name \"" + \
6167 x + "\" is ambiguous. Please specify"
6168 print "!!! one of the following fully-qualified " + \
6169 "ebuild names instead:\n"
6170 for i in errpkgs[0]:
6171 print " " + green(i)
6175 if not mymatch and x[0] not in "<>=~":
6176 #add a "=" if missing
6177 mymatch=localtree.dep_match("="+x)
6179 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
6180 (x, unmerge_action), noiselevel=-1)
6184 {"protected": set(), "selected": set(), "omitted": set()})
6185 mykey = len(pkgmap) - 1
6186 if unmerge_action=="unmerge":
6188 if y not in all_selected:
6189 pkgmap[mykey]["selected"].add(y)
6191 elif unmerge_action == "prune":
6192 if len(mymatch) == 1:
6194 best_version = mymatch[0]
6195 best_slot = vartree.getslot(best_version)
6196 best_counter = vartree.dbapi.cpv_counter(best_version)
6197 for mypkg in mymatch[1:]:
6198 myslot = vartree.getslot(mypkg)
6199 mycounter = vartree.dbapi.cpv_counter(mypkg)
6200 if (myslot == best_slot and mycounter > best_counter) or \
6201 mypkg == portage.best([mypkg, best_version]):
6202 if myslot == best_slot:
6203 if mycounter < best_counter:
6204 # On slot collision, keep the one with the
6205 # highest counter since it is the most
6206 # recently installed.
6208 best_version = mypkg
6210 best_counter = mycounter
6211 pkgmap[mykey]["protected"].add(best_version)
6212 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
6213 if mypkg != best_version and mypkg not in all_selected)
6214 all_selected.update(pkgmap[mykey]["selected"])
6216 # unmerge_action == "clean"
6218 for mypkg in mymatch:
6219 if unmerge_action == "clean":
6220 myslot = localtree.getslot(mypkg)
6222 # since we're pruning, we don't care about slots
6223 # and put all the pkgs in together
6225 if not slotmap.has_key(myslot):
6226 slotmap[myslot] = {}
6227 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
6229 for myslot in slotmap:
6230 counterkeys = slotmap[myslot].keys()
6234 pkgmap[mykey]["protected"].add(
6235 slotmap[myslot][counterkeys[-1]])
6237 #be pretty and get them in order of merge:
6238 for ckey in counterkeys:
6239 mypkg = slotmap[myslot][ckey]
6240 if mypkg not in all_selected:
6241 pkgmap[mykey]["selected"].add(mypkg)
6242 all_selected.add(mypkg)
6243 # ok, now the last-merged package
6244 # is protected, and the rest are selected
6245 numselected = len(all_selected)
6246 if global_unmerge and not numselected:
6247 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
6251 portage.writemsg_stdout(
6252 "\n>>> No packages selected for removal by " + \
6253 unmerge_action + "\n")
6257 portage.locks.unlockdir(vdb_lock)
6259 from portage.sets.base import EditablePackageSet
6261 # generate a list of package sets that are directly or indirectly listed in "world",
6262 # as there is no persistent list of "installed" sets
6263 installed_sets = ["world"]
6268 pos = len(installed_sets)
6269 for s in installed_sets[pos - 1:]:
6270 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
6273 installed_sets += candidates
6274 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
6277 # we don't want to unmerge packages that are still listed in user-editable package sets
6278 # listed in "world" as they would be remerged on the next update of "world" or the
6279 # relevant package sets.
6280 for cp in xrange(len(pkgmap)):
6281 for cpv in pkgmap[cp]["selected"].copy():
6283 for s in installed_sets:
6284 # skip sets that the user requested to unmerge, and skip world
6285 # unless we're unmerging a package set (as the package would be
6286 # removed from "world" later on)
6287 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
6289 # only check instances of EditablePackageSet as other classes are generally used for
6290 # special purposes and can be ignored here (and are usually generated dynamically, so the
6291 # user can't do much about them anyway)
6292 elif sets[s].containsCPV(cpv) \
6293 and isinstance(sets[s], EditablePackageSet):
6296 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
6297 #print colorize("WARN", "but still listed in the following package sets:")
6298 #print " %s\n" % ", ".join(parents)
6299 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
6300 print colorize("WARN", "still referenced by the following package sets:")
6301 print " %s\n" % ", ".join(parents)
6302 # adjust pkgmap so the display output is correct
6303 pkgmap[cp]["selected"].remove(cpv)
6304 pkgmap[cp]["protected"].add(cpv)
6308 # Unmerge order only matters in some cases
6312 selected = d["selected"]
6315 cp = portage.cpv_getkey(iter(selected).next())
6316 cp_dict = unordered.get(cp)
6319 unordered[cp] = cp_dict
6322 for k, v in d.iteritems():
6323 cp_dict[k].update(v)
6324 pkgmap = [unordered[cp] for cp in sorted(unordered)]
6326 for x in xrange(len(pkgmap)):
6327 selected = pkgmap[x]["selected"]
6330 for mytype, mylist in pkgmap[x].iteritems():
6331 if mytype == "selected":
6333 mylist.difference_update(all_selected)
6334 cp = portage.cpv_getkey(iter(selected).next())
6335 for y in localtree.dep_match(cp):
6336 if y not in pkgmap[x]["omitted"] and \
6337 y not in pkgmap[x]["selected"] and \
6338 y not in pkgmap[x]["protected"] and \
6339 y not in all_selected:
6340 pkgmap[x]["omitted"].add(y)
6341 if global_unmerge and not pkgmap[x]["selected"]:
6342 #avoid cluttering the preview printout with stuff that isn't getting unmerged
6344 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
6345 print colorize("BAD","\a\n\n!!! '%s' is part of your system profile." % cp)
6346 print colorize("WARN","\a!!! Unmerging it may be damaging to your system.\n")
6347 if "--pretend" not in myopts and "--ask" not in myopts:
6348 countdown(int(settings["EMERGE_WARNING_DELAY"]),
6349 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
6350 if "--quiet" not in myopts:
6351 print "\n "+bold(cp)
6353 print bold(cp)+": ",
6354 for mytype in ["selected","protected","omitted"]:
6355 if "--quiet" not in myopts:
6356 portage.writemsg_stdout((mytype + ": ").rjust(14), noiselevel=-1)
6357 if pkgmap[x][mytype]:
6358 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
6359 sorted_pkgs.sort(portage.pkgcmp)
6360 for pn, ver, rev in sorted_pkgs:
6364 myversion = ver + "-" + rev
6365 if mytype == "selected":
6366 portage.writemsg_stdout(
6367 colorize("UNMERGE_WARN", myversion + " "), noiselevel=-1)
6369 portage.writemsg_stdout(
6370 colorize("GOOD", myversion + " "), noiselevel=-1)
6372 portage.writemsg_stdout("none ", noiselevel=-1)
6373 if "--quiet" not in myopts:
6374 portage.writemsg_stdout("\n", noiselevel=-1)
6375 if "--quiet" in myopts:
6376 portage.writemsg_stdout("\n", noiselevel=-1)
6378 portage.writemsg_stdout("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
6379 " packages are slated for removal.\n")
6380 portage.writemsg_stdout(">>> " + colorize("GOOD", "'Protected'") + \
6381 " and " + colorize("GOOD", "'omitted'") + \
6382 " packages will not be removed.\n\n")
6384 if "--pretend" in myopts:
6385 #we're done... return
6387 if "--ask" in myopts:
6388 if userquery("Would you like to unmerge these packages?")=="No":
6389 # enter pretend mode for correct formatting of results
6390 myopts["--pretend"] = True
6395 #the real unmerging begins, after a short delay....
6397 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
6399 for x in xrange(len(pkgmap)):
6400 for y in pkgmap[x]["selected"]:
6401 print ">>> Unmerging "+y+"..."
6402 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
6403 mysplit = y.split("/")
6405 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
6406 mysettings, unmerge_action not in ["clean","prune"],
6407 vartree=vartree, ldpath_mtimes=ldpath_mtimes)
6408 if retval != os.EX_OK:
6409 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
6413 sets["world"].cleanPackage(vartree.dbapi, y)
6414 emergelog(xterm_titles, " >>> unmerge success: "+y)
6416 for s in root_config.setconfig.active:
6417 sets["world"].remove(SETPREFIX+s)
6420 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
6422 if os.path.exists("/usr/bin/install-info"):
6427 inforoot=normpath(root+z)
6428 if os.path.isdir(inforoot):
6429 infomtime = long(os.stat(inforoot).st_mtime)
6430 if inforoot not in prev_mtimes or \
6431 prev_mtimes[inforoot] != infomtime:
6432 regen_infodirs.append(inforoot)
6434 if not regen_infodirs:
6435 portage.writemsg_stdout("\n "+green("*")+" GNU info directory index is up-to-date.\n")
6437 portage.writemsg_stdout("\n "+green("*")+" Regenerating GNU info directory index...\n")
6439 dir_extensions = ("", ".gz", ".bz2")
6442 for inforoot in regen_infodirs:
6446 if not os.path.isdir(inforoot):
6449 file_list = os.listdir(inforoot)
6451 dir_file = os.path.join(inforoot, "dir")
6452 moved_old_dir = False
6455 if x.startswith(".") or \
6456 os.path.isdir(os.path.join(inforoot, x)):
6458 if x.startswith("dir"):
6460 for ext in dir_extensions:
6461 if x == "dir" + ext or \
6462 x == "dir" + ext + ".old":
6467 if processed_count == 0:
6468 for ext in dir_extensions:
6470 os.rename(dir_file + ext, dir_file + ext + ".old")
6471 moved_old_dir = True
6472 except EnvironmentError, e:
6473 if e.errno != errno.ENOENT:
6476 processed_count += 1
6477 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
6478 existsstr="already exists, for file `"
6480 if re.search(existsstr,myso):
6481 # Already exists... Don't increment the count for this.
6483 elif myso[:44]=="install-info: warning: no info dir entry in ":
6484 # This info file doesn't contain a DIR-header: install-info produces this
6485 # (harmless) warning (the --quiet switch doesn't seem to work).
6486 # Don't increment the count for this.
6490 errmsg += myso + "\n"
6493 if moved_old_dir and not os.path.exists(dir_file):
6494 # We didn't generate a new dir file, so put the old file
6495 # back where it was originally found.
6496 for ext in dir_extensions:
6498 os.rename(dir_file + ext + ".old", dir_file + ext)
6499 except EnvironmentError, e:
6500 if e.errno != errno.ENOENT:
6504 # Clean dir.old cruft so that they don't prevent
6505 # unmerge of otherwise empty directories.
6506 for ext in dir_extensions:
6508 os.unlink(dir_file + ext + ".old")
6509 except EnvironmentError, e:
6510 if e.errno != errno.ENOENT:
6514 #update mtime so we can potentially avoid regenerating.
6515 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
6518 print " "+yellow("*")+" Processed",icount,"info files;",badcount,"errors."
6522 print " "+green("*")+" Processed",icount,"info files."
6525 def display_news_notification(trees):
6526 for target_root in trees:
6527 if len(trees) > 1 and target_root != "/":
6529 settings = trees[target_root]["vartree"].settings
6530 portdb = trees[target_root]["porttree"].dbapi
6531 vardb = trees[target_root]["vartree"].dbapi
6532 NEWS_PATH = os.path.join("metadata", "news")
6533 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
6534 newsReaderDisplay = False
6536 for repo in portdb.getRepositories():
6537 unreadItems = checkUpdatedNewsItems(
6538 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo)
6540 if not newsReaderDisplay:
6541 newsReaderDisplay = True
6543 print colorize("WARN", " * IMPORTANT:"),
6544 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
6547 if newsReaderDisplay:
6548 print colorize("WARN", " *"),
6549 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
6552 def post_emerge(trees, mtimedb, retval):
6554 Misc. things to run at the end of a merge session.
6560 Display preserved libs warnings
6563 @param trees: A dictionary mapping each ROOT to it's package databases
6565 @param mtimedb: The mtimeDB to store data needed across merge invocations
6566 @type mtimedb: MtimeDB class instance
6567 @param retval: Emerge's return value
6571 1. Calls sys.exit(retval)
6573 for target_root in trees:
6574 if len(trees) > 1 and target_root != "/":
6576 vardbapi = trees[target_root]["vartree"].dbapi
6577 settings = vardbapi.settings
6578 info_mtimes = mtimedb["info"]
6580 # Load the most current variables from ${ROOT}/etc/profile.env
6583 settings.regenerate()
6586 config_protect = settings.get("CONFIG_PROTECT","").split()
6587 infodirs = settings.get("INFOPATH","").split(":") + \
6588 settings.get("INFODIR","").split(":")
6592 if retval == os.EX_OK:
6593 exit_msg = " *** exiting successfully."
6595 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
6596 emergelog("notitles" not in settings.features, exit_msg)
6598 # Dump the mod_echo output now so that our other notifications are shown
6601 from portage.elog import mod_echo
6603 pass # happens during downgrade to a version without the module
6607 vdb_path = os.path.join(target_root, portage.VDB_PATH)
6608 portage.util.ensure_dirs(vdb_path)
6609 vdb_lock = portage.locks.lockdir(vdb_path)
6611 if "noinfo" not in settings.features:
6612 chk_updated_info_files(target_root, infodirs, info_mtimes, retval)
6615 portage.locks.unlockdir(vdb_lock)
6617 chk_updated_cfg_files(target_root, config_protect)
6619 display_news_notification(trees)
6621 if vardbapi.plib_registry.hasEntries():
6623 print colorize("WARN", "!!!") + " existing preserved libs:"
6624 plibdata = vardbapi.plib_registry.getPreservedLibs()
6625 for cpv in plibdata:
6626 print colorize("WARN", ">>>") + " package: %s" % cpv
6627 for f in plibdata[cpv]:
6628 print colorize("WARN", " * ") + " - %s" % f
6629 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
6634 def chk_updated_cfg_files(target_root, config_protect):
6636 #number of directories with some protect files in them
6638 for x in config_protect:
6639 x = os.path.join(target_root, x.lstrip(os.path.sep))
6640 if not os.access(x, os.W_OK):
6641 # Avoid Permission denied errors generated
6645 mymode = os.lstat(x).st_mode
6648 if stat.S_ISLNK(mymode):
6649 # We want to treat it like a directory if it
6650 # is a symlink to an existing directory.
6652 real_mode = os.stat(x).st_mode
6653 if stat.S_ISDIR(real_mode):
6657 if stat.S_ISDIR(mymode):
6658 mycommand = "find '%s' -iname '._cfg????_*'" % x
6660 mycommand = "find '%s' -maxdepth 1 -iname '._cfg????_%s'" % \
6661 os.path.split(x.rstrip(os.path.sep))
6662 mycommand += " ! -iname '.*~' ! -iname '.*.bak' -print0"
6663 a = commands.getstatusoutput(mycommand)
6665 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
6667 # Show the error message alone, sending stdout to /dev/null.
6668 os.system(mycommand + " 1>/dev/null")
6670 files = a[1].split('\0')
6671 # split always produces an empty string as the last element
6672 if files and not files[-1]:
6676 print "\n"+colorize("WARN", " * IMPORTANT:"),
6677 if stat.S_ISDIR(mymode):
6678 print "%d config files in '%s' need updating." % \
6681 print "config file '%s' needs updating." % x
6684 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
6685 " section of the " + bold("emerge")
6686 print " "+yellow("*")+" man page to learn how to update config files."
6688 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id):
6690 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
6691 Returns the number of unread (yet relevent) items.
6693 @param portdb: a portage tree database
6694 @type portdb: pordbapi
6695 @param vardb: an installed package database
6696 @type vardb: vardbapi
6705 1. The number of unread but relevant news items.
6708 from portage.news import NewsManager
6709 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
6710 return manager.getUnreadItems( repo_id, update=True )
6712 def insert_category_into_atom(atom, category):
6713 alphanum = re.search(r'\w', atom)
6715 ret = atom[:alphanum.start()] + "%s/" % category + \
6716 atom[alphanum.start():]
6721 def is_valid_package_atom(x):
6723 alphanum = re.search(r'\w', x)
6725 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
6726 return portage.isvalidatom(x)
6728 def show_blocker_docs_link():
6730 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
6731 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
6733 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
6736 def show_mask_docs():
6737 print "For more information, see the MASKED PACKAGES section in the emerge"
6738 print "man page or refer to the Gentoo Handbook."
6740 def action_sync(settings, trees, mtimedb, myopts, myaction):
6741 xterm_titles = "notitles" not in settings.features
6742 emergelog(xterm_titles, " === sync")
6743 myportdir = settings.get("PORTDIR", None)
6745 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
6747 if myportdir[-1]=="/":
6748 myportdir=myportdir[:-1]
6749 if not os.path.exists(myportdir):
6750 print ">>>",myportdir,"not found, creating it."
6751 os.makedirs(myportdir,0755)
6752 syncuri=settings["SYNC"].rstrip()
6754 updatecache_flg = False
6755 if myaction == "metadata":
6756 print "skipping sync"
6757 updatecache_flg = True
6758 elif syncuri[:8]=="rsync://":
6759 if not os.path.exists("/usr/bin/rsync"):
6760 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
6761 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
6766 import shlex, StringIO
6767 if settings["PORTAGE_RSYNC_OPTS"] == "":
6768 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
6770 "--recursive", # Recurse directories
6771 "--links", # Consider symlinks
6772 "--safe-links", # Ignore links outside of tree
6773 "--perms", # Preserve permissions
6774 "--times", # Preserive mod times
6775 "--compress", # Compress the data transmitted
6776 "--force", # Force deletion on non-empty dirs
6777 "--whole-file", # Don't do block transfers, only entire files
6778 "--delete", # Delete files that aren't in the master tree
6779 "--stats", # Show final statistics about what was transfered
6780 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
6781 "--exclude=/distfiles", # Exclude distfiles from consideration
6782 "--exclude=/local", # Exclude local from consideration
6783 "--exclude=/packages", # Exclude packages from consideration
6787 # The below validation is not needed when using the above hardcoded
6790 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
6791 lexer = shlex.shlex(StringIO.StringIO(
6792 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
6793 lexer.whitespace_split = True
6794 rsync_opts.extend(lexer)
6797 for opt in ("--recursive", "--times"):
6798 if opt not in rsync_opts:
6799 portage.writemsg(yellow("WARNING:") + " adding required option " + \
6800 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
6801 rsync_opts.append(opt)
6803 for exclude in ("distfiles", "local", "packages"):
6804 opt = "--exclude=/%s" % exclude
6805 if opt not in rsync_opts:
6806 portage.writemsg(yellow("WARNING:") + \
6807 " adding required option %s not included in " % opt + \
6808 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
6809 rsync_opts.append(opt)
6811 if settings["RSYNC_TIMEOUT"] != "":
6812 portage.writemsg("WARNING: usage of RSYNC_TIMEOUT is deprecated, " + \
6813 "use PORTAGE_RSYNC_EXTRA_OPTS instead\n")
6815 mytimeout = int(settings["RSYNC_TIMEOUT"])
6816 rsync_opts.append("--timeout=%d" % mytimeout)
6817 except ValueError, e:
6818 portage.writemsg("!!! %s\n" % str(e))
6820 # TODO: determine options required for official servers
6821 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
6823 def rsync_opt_startswith(opt_prefix):
6824 for x in rsync_opts:
6825 if x.startswith(opt_prefix):
6829 if not rsync_opt_startswith("--timeout="):
6830 rsync_opts.append("--timeout=%d" % mytimeout)
6832 for opt in ("--compress", "--whole-file"):
6833 if opt not in rsync_opts:
6834 portage.writemsg(yellow("WARNING:") + " adding required option " + \
6835 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
6836 rsync_opts.append(opt)
6838 if "--quiet" in myopts:
6839 rsync_opts.append("--quiet") # Shut up a lot
6841 rsync_opts.append("--verbose") # Print filelist
6843 if "--verbose" in myopts:
6844 rsync_opts.append("--progress") # Progress meter for each file
6846 if "--debug" in myopts:
6847 rsync_opts.append("--checksum") # Force checksum on all files
6849 if settings["RSYNC_EXCLUDEFROM"] != "":
6850 portage.writemsg(yellow("WARNING:") + \
6851 " usage of RSYNC_EXCLUDEFROM is deprecated, use " + \
6852 "PORTAGE_RSYNC_EXTRA_OPTS instead\n")
6853 if os.path.exists(settings["RSYNC_EXCLUDEFROM"]):
6854 rsync_opts.append("--exclude-from=%s" % \
6855 settings["RSYNC_EXCLUDEFROM"])
6857 portage.writemsg("!!! RSYNC_EXCLUDEFROM specified," + \
6858 " but file does not exist.\n")
6860 if settings["RSYNC_RATELIMIT"] != "":
6861 portage.writemsg(yellow("WARNING:") + \
6862 " usage of RSYNC_RATELIMIT is deprecated, use " + \
6863 "PORTAGE_RSYNC_EXTRA_OPTS instead")
6864 rsync_opts.append("--bwlimit=%s" % \
6865 settings["RSYNC_RATELIMIT"])
6867 # Real local timestamp file.
6868 servertimestampfile = os.path.join(
6869 myportdir, "metadata", "timestamp.chk")
6871 content = portage.util.grabfile(servertimestampfile)
6875 mytimestamp = time.mktime(time.strptime(content[0],
6876 "%a, %d %b %Y %H:%M:%S +0000"))
6877 except (OverflowError, ValueError):
6882 rsync_initial_timeout = \
6883 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
6885 rsync_initial_timeout = 15
6888 if settings.has_key("RSYNC_RETRIES"):
6889 print yellow("WARNING:")+" usage of RSYNC_RETRIES is deprecated, use PORTAGE_RSYNC_RETRIES instead"
6890 maxretries=int(settings["RSYNC_RETRIES"])
6892 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
6893 except SystemExit, e:
6894 raise # Needed else can't exit
6896 maxretries=3 #default number of retries
6899 user_name, hostname, port = re.split(
6900 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
6903 if user_name is None:
6905 updatecache_flg=True
6906 all_rsync_opts = set(rsync_opts)
6907 lexer = shlex.shlex(StringIO.StringIO(
6908 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
6909 lexer.whitespace_split = True
6910 extra_rsync_opts = list(lexer)
6912 all_rsync_opts.update(extra_rsync_opts)
6913 family = socket.AF_INET
6914 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
6915 family = socket.AF_INET
6916 elif socket.has_ipv6 and \
6917 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
6918 family = socket.AF_INET6
6920 SERVER_OUT_OF_DATE = -1
6921 EXCEEDED_MAX_RETRIES = -2
6927 for addrinfo in socket.getaddrinfo(
6928 hostname, None, family, socket.SOCK_STREAM):
6929 if addrinfo[0] == socket.AF_INET6:
6930 # IPv6 addresses need to be enclosed in square brackets
6931 ips.append("[%s]" % addrinfo[4][0])
6933 ips.append(addrinfo[4][0])
6934 from random import shuffle
6936 except SystemExit, e:
6937 raise # Needed else can't exit
6938 except Exception, e:
6939 print "Notice:",str(e)
6944 dosyncuri = syncuri.replace(
6945 "//" + user_name + hostname + port + "/",
6946 "//" + user_name + ips[0] + port + "/", 1)
6947 except SystemExit, e:
6948 raise # Needed else can't exit
6949 except Exception, e:
6950 print "Notice:",str(e)
6954 if "--ask" in myopts:
6955 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
6960 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
6961 if "--quiet" not in myopts:
6962 print ">>> Starting rsync with "+dosyncuri+"..."
6964 emergelog(xterm_titles,
6965 ">>> Starting retry %d of %d with %s" % \
6966 (retries,maxretries,dosyncuri))
6967 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
6969 if mytimestamp != 0 and "--quiet" not in myopts:
6970 print ">>> Checking server timestamp ..."
6972 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
6974 if "--debug" in myopts:
6979 # Even if there's no timestamp available locally, fetch the
6980 # timestamp anyway as an initial probe to verify that the server is
6981 # responsive. This protects us from hanging indefinitely on a
6982 # connection attempt to an unresponsive server which rsync's
6983 # --timeout option does not prevent.
6985 # Temporary file for remote server timestamp comparison.
6986 from tempfile import mkstemp
6987 fd, tmpservertimestampfile = mkstemp()
6989 mycommand = rsynccommand[:]
6990 mycommand.append(dosyncuri.rstrip("/") + \
6991 "/metadata/timestamp.chk")
6992 mycommand.append(tmpservertimestampfile)
6996 def timeout_handler(signum, frame):
6997 raise portage.exception.PortageException("timed out")
6998 signal.signal(signal.SIGALRM, timeout_handler)
6999 # Timeout here in case the server is unresponsive. The
7000 # --timeout rsync option doesn't apply to the initial
7001 # connection attempt.
7002 if rsync_initial_timeout:
7003 signal.alarm(rsync_initial_timeout)
7005 mypids.extend(portage.process.spawn(
7006 mycommand, env=settings.environ(), returnpid=True))
7007 exitcode = os.waitpid(mypids[0], 0)[1]
7008 content = portage.grabfile(tmpservertimestampfile)
7010 if rsync_initial_timeout:
7013 os.unlink(tmpservertimestampfile)
7016 except portage.exception.PortageException, e:
7020 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
7021 os.kill(mypids[0], signal.SIGTERM)
7022 os.waitpid(mypids[0], 0)
7023 # This is the same code rsync uses for timeout.
7026 if exitcode != os.EX_OK:
7028 exitcode = (exitcode & 0xff) << 8
7030 exitcode = exitcode >> 8
7032 portage.process.spawned_pids.remove(mypids[0])
7035 servertimestamp = time.mktime(time.strptime(
7036 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
7037 except (OverflowError, ValueError):
7039 del mycommand, mypids, content
7040 if exitcode == os.EX_OK:
7041 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
7042 emergelog(xterm_titles,
7043 ">>> Cancelling sync -- Already current.")
7046 print ">>> Timestamps on the server and in the local repository are the same."
7047 print ">>> Cancelling all further sync action. You are already up to date."
7049 print ">>> In order to force sync, remove '%s'." % servertimestampfile
7053 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
7054 emergelog(xterm_titles,
7055 ">>> Server out of date: %s" % dosyncuri)
7058 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
7060 print ">>> In order to force sync, remove '%s'." % servertimestampfile
7063 exitcode = SERVER_OUT_OF_DATE
7064 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
7066 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
7067 exitcode = portage.process.spawn(mycommand,
7068 env=settings.environ())
7069 if exitcode in [0,1,3,4,11,14,20,21]:
7071 elif exitcode in [1,3,4,11,14,20,21]:
7074 # Code 2 indicates protocol incompatibility, which is expected
7075 # for servers with protocol < 29 that don't support
7076 # --prune-empty-directories. Retry for a server that supports
7077 # at least rsync protocol version 29 (>=rsync-2.6.4).
7082 if retries<=maxretries:
7083 print ">>> Retrying..."
7088 updatecache_flg=False
7089 exitcode = EXCEEDED_MAX_RETRIES
7093 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
7094 elif exitcode == SERVER_OUT_OF_DATE:
7096 elif exitcode == EXCEEDED_MAX_RETRIES:
7098 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
7103 print darkred("!!!")+green(" Rsync has reported that there is a syntax error. Please ensure")
7104 print darkred("!!!")+green(" that your SYNC statement is proper.")
7105 print darkred("!!!")+green(" SYNC="+settings["SYNC"])
7107 print darkred("!!!")+green(" Rsync has reported that there is a File IO error. Normally")
7108 print darkred("!!!")+green(" this means your disk is full, but can be caused by corruption")
7109 print darkred("!!!")+green(" on the filesystem that contains PORTDIR. Please investigate")
7110 print darkred("!!!")+green(" and try again after the problem has been fixed.")
7111 print darkred("!!!")+green(" PORTDIR="+settings["PORTDIR"])
7113 print darkred("!!!")+green(" Rsync was killed before it finished.")
7115 print darkred("!!!")+green(" Rsync has not successfully finished. It is recommended that you keep")
7116 print darkred("!!!")+green(" trying or that you use the 'emerge-webrsync' option if you are unable")
7117 print darkred("!!!")+green(" to use rsync due to firewall or other restrictions. This should be a")
7118 print darkred("!!!")+green(" temporary problem unless complications exist with your network")
7119 print darkred("!!!")+green(" (and possibly your system's filesystem) configuration.")
7122 elif syncuri[:6]=="cvs://":
7123 if not os.path.exists("/usr/bin/cvs"):
7124 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
7125 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
7128 cvsdir=os.path.dirname(myportdir)
7129 if not os.path.exists(myportdir+"/CVS"):
7131 print ">>> Starting initial cvs checkout with "+syncuri+"..."
7132 if os.path.exists(cvsdir+"/gentoo-x86"):
7133 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
7138 if e.errno != errno.ENOENT:
7140 "!!! existing '%s' directory; exiting.\n" % myportdir)
7143 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
7144 print "!!! cvs checkout error; exiting."
7146 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
7149 print ">>> Starting cvs update with "+syncuri+"..."
7150 retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
7151 myportdir, settings, free=1)
7152 if retval != os.EX_OK:
7156 print "!!! rsync setting: ",syncuri,"not recognized; exiting."
7159 if updatecache_flg and \
7160 myaction != "metadata" and \
7161 "metadata-transfer" not in settings.features:
7162 updatecache_flg = False
7164 # Reload the whole config from scratch.
7165 settings, trees, mtimedb = load_emerge_config(trees=trees)
7166 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7168 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
7169 action_metadata(settings, portdb, myopts)
7171 if portage._global_updates(trees, mtimedb["updates"]):
7173 # Reload the whole config from scratch.
7174 settings, trees, mtimedb = load_emerge_config(trees=trees)
7175 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7177 mybestpv = portdb.xmatch("bestmatch-visible", "sys-apps/portage")
7178 mypvs = portage.best(
7179 trees[settings["ROOT"]]["vartree"].dbapi.match("sys-apps/portage"))
7181 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
7183 if myaction != "metadata":
7184 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
7185 retval = portage.process.spawn(
7186 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
7187 dosyncuri], env=settings.environ())
7188 if retval != os.EX_OK:
7189 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
7191 if(mybestpv != mypvs) and not "--quiet" in myopts:
7193 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
7194 print red(" * ")+"that you update portage now, before any other packages are updated."
7196 print red(" * ")+"To update portage, run 'emerge portage' now."
7199 display_news_notification(trees)
7201 def action_metadata(settings, portdb, myopts):
7202 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
7203 old_umask = os.umask(0002)
7204 cachedir = os.path.normpath(settings.depcachedir)
7205 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
7206 "/lib", "/opt", "/proc", "/root", "/sbin",
7207 "/sys", "/tmp", "/usr", "/var"]:
7208 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
7209 "ROOT DIRECTORY ON YOUR SYSTEM."
7210 print >> sys.stderr, \
7211 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
7213 if not os.path.exists(cachedir):
7216 ec = portage.eclass_cache.cache(portdb.porttree_root)
7217 myportdir = os.path.realpath(settings["PORTDIR"])
7218 cm = settings.load_best_module("portdbapi.metadbmodule")(
7219 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
7221 from portage.cache import util
7223 class percentage_noise_maker(util.quiet_mirroring):
7224 def __init__(self, dbapi):
7226 self.cp_all = dbapi.cp_all()
7227 l = len(self.cp_all)
7228 self.call_update_min = 100000000
7229 self.min_cp_all = l/100.0
7234 for x in self.cp_all:
7236 if self.count > self.min_cp_all:
7237 self.call_update_min = 0
7239 for y in self.dbapi.cp_list(x):
7241 self.call_update_mine = 0
7243 def update(self, *arg):
7244 try: self.pstr = int(self.pstr) + 1
7245 except ValueError: self.pstr = 1
7246 sys.stdout.write("%s%i%%" % \
7247 ("\b" * (len(str(self.pstr))+1), self.pstr))
7249 self.call_update_min = 10000000
7251 def finish(self, *arg):
7252 sys.stdout.write("\b\b\b\b100%\n")
7255 if "--quiet" in myopts:
7256 def quicky_cpv_generator(cp_all_list):
7257 for x in cp_all_list:
7258 for y in portdb.cp_list(x):
7260 source = quicky_cpv_generator(portdb.cp_all())
7261 noise_maker = portage.cache.util.quiet_mirroring()
7263 noise_maker = source = percentage_noise_maker(portdb)
7264 portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
7265 eclass_cache=ec, verbose_instance=noise_maker)
7270 def action_regen(settings, portdb):
7271 xterm_titles = "notitles" not in settings.features
7272 emergelog(xterm_titles, " === regen")
7273 #regenerate cache entries
7274 portage.writemsg_stdout("Regenerating cache entries...\n")
7276 os.close(sys.stdin.fileno())
7277 except SystemExit, e:
7278 raise # Needed else can't exit
7282 mynodes = portdb.cp_all()
7283 from portage.cache.cache_errors import CacheError
7285 for mytree in portdb.porttrees:
7287 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
7288 except CacheError, e:
7289 portage.writemsg("Error listing cache entries for " + \
7290 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
7295 mymatches = portdb.cp_list(x)
7296 portage.writemsg_stdout("Processing %s\n" % x)
7299 foo = portdb.aux_get(y,["DEPEND"])
7300 except (KeyError, portage.exception.PortageException), e:
7302 "Error processing %(cpv)s, continuing... (%(e)s)\n" % \
7303 {"cpv":y,"e":str(e)}, noiselevel=-1)
7305 for mytree in portdb.porttrees:
7306 if portdb.findname2(y, mytree=mytree)[0]:
7307 dead_nodes[mytree].discard(y)
7309 for mytree, nodes in dead_nodes.iteritems():
7310 auxdb = portdb.auxdb[mytree]
7314 except (KeyError, CacheError):
7316 portage.writemsg_stdout("done!\n")
7318 def action_config(settings, trees, myopts, myfiles):
7319 if len(myfiles) != 1:
7320 print red("!!! config can only take a single package atom at this time\n")
7322 if not is_valid_package_atom(myfiles[0]):
7323 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
7325 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
7326 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
7330 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
7331 except ValueError, e:
7332 # Multiple matches thrown from cpv_expand
7335 print "No packages found.\n"
7338 if "--ask" in myopts:
7340 print "Please select a package to configure:"
7344 options.append(str(idx))
7345 print options[-1]+") "+pkg
7348 idx = userquery("Selection?", options)
7351 pkg = pkgs[int(idx)-1]
7353 print "The following packages available:"
7356 print "\nPlease use a specific atom or the --ask option."
7362 if "--ask" in myopts:
7363 if userquery("Ready to configure "+pkg+"?") == "No":
7366 print "Configuring pkg..."
7368 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
7369 mysettings = portage.config(clone=settings)
7370 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
7371 debug = mysettings.get("PORTAGE_DEBUG") == "1"
7372 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
7374 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
7375 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
7376 if retval == os.EX_OK:
7377 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
7378 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
7381 def action_info(settings, trees, myopts, myfiles):
7382 unameout=commands.getstatusoutput("uname -mrp")[1]
7383 print getportageversion(settings["PORTDIR"], settings["ROOT"],
7384 settings.profile_path, settings["CHOST"],
7385 trees[settings["ROOT"]]["vartree"].dbapi)
7387 header_title = "System Settings"
7389 print header_width * "="
7390 print header_title.rjust(int(header_width/2 + len(header_title)/2))
7391 print header_width * "="
7392 print "System uname: "+unameout
7394 lastSync = portage.grabfile(os.path.join(
7395 settings["PORTDIR"], "metadata", "timestamp.chk"))
7396 print "Timestamp of tree:",
7402 output=commands.getstatusoutput("distcc --version")
7404 print str(output[1].split("\n",1)[0]),
7405 if "distcc" in settings.features:
7410 output=commands.getstatusoutput("ccache -V")
7412 print str(output[1].split("\n",1)[0]),
7413 if "ccache" in settings.features:
7418 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
7419 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
7420 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
7421 myvars = portage.util.unique_array(myvars)
7425 if portage.isvalidatom(x):
7426 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
7427 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
7428 pkg_matches.sort(portage.pkgcmp)
7430 for pn, ver, rev in pkg_matches:
7432 pkgs.append(ver + "-" + rev)
7436 pkgs = ", ".join(pkgs)
7437 print "%-20s %s" % (x+":", pkgs)
7439 print "%-20s %s" % (x+":", "[NOT VALID]")
7441 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
7443 if "--verbose" in myopts:
7444 myvars=settings.keys()
7446 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
7447 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
7448 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
7449 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
7451 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
7453 myvars = portage.util.unique_array(myvars)
7459 print '%s="%s"' % (x, settings[x])
7461 use = set(settings["USE"].split())
7462 use_expand = settings["USE_EXPAND"].split()
7464 for varname in use_expand:
7465 flag_prefix = varname.lower() + "_"
7467 if f.startswith(flag_prefix):
7471 print 'USE="%s"' % " ".join(use),
7472 for varname in use_expand:
7473 myval = settings.get(varname)
7475 print '%s="%s"' % (varname, myval),
7478 unset_vars.append(x)
7480 print "Unset: "+", ".join(unset_vars)
7483 if "--debug" in myopts:
7484 for x in dir(portage):
7485 module = getattr(portage, x)
7486 if "cvs_id_string" in dir(module):
7487 print "%s: %s" % (str(x), str(module.cvs_id_string))
7489 # See if we can find any packages installed matching the strings
7490 # passed on the command line
7492 vardb = trees[settings["ROOT"]]["vartree"].dbapi
7493 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7495 mypkgs.extend(vardb.match(x))
7497 # If some packages were found...
7499 # Get our global settings (we only print stuff if it varies from
7500 # the current config)
7501 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS' ]
7502 auxkeys = mydesiredvars + [ "USE", "IUSE"]
7504 pkgsettings = portage.config(clone=settings)
7506 for myvar in mydesiredvars:
7507 global_vals[myvar] = set(settings.get(myvar, "").split())
7509 # Loop through each package
7510 # Only print settings if they differ from global settings
7511 header_title = "Package Settings"
7512 print header_width * "="
7513 print header_title.rjust(int(header_width/2 + len(header_title)/2))
7514 print header_width * "="
7515 from portage.output import EOutput
7518 # Get all package specific variables
7519 auxvalues = vardb.aux_get(pkg, auxkeys)
7521 for i in xrange(len(auxkeys)):
7522 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
7524 for myvar in mydesiredvars:
7525 # If the package variable doesn't match the
7526 # current global variable, something has changed
7527 # so set diff_found so we know to print
7528 if valuesmap[myvar] != global_vals[myvar]:
7529 diff_values[myvar] = valuesmap[myvar]
7530 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
7531 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
7533 # If a matching ebuild is no longer available in the tree, maybe it
7534 # would make sense to compare against the flags for the best
7535 # available version with the same slot?
7537 if portdb.cpv_exists(pkg):
7539 pkgsettings.setcpv(pkg, mydb=mydb)
7540 if valuesmap["IUSE"].intersection(
7541 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
7542 diff_values["USE"] = valuesmap["USE"]
7543 # If a difference was found, print the info for
7546 # Print package info
7547 print "%s was built with the following:" % pkg
7548 for myvar in mydesiredvars + ["USE"]:
7549 if myvar in diff_values:
7550 mylist = list(diff_values[myvar])
7552 print "%s=\"%s\"" % (myvar, " ".join(mylist))
7554 print ">>> Attempting to run pkg_info() for '%s'" % pkg
7555 ebuildpath = vardb.findname(pkg)
7556 if not ebuildpath or not os.path.exists(ebuildpath):
7557 out.ewarn("No ebuild found for '%s'" % pkg)
7559 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
7560 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
7561 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
7564 def action_search(root_config, myopts, myfiles, spinner):
7566 print "emerge: no search terms provided."
7568 searchinstance = search(root_config,
7569 spinner, "--searchdesc" in myopts,
7570 "--quiet" not in myopts, "--usepkg" in myopts,
7571 "--usepkgonly" in myopts)
7572 for mysearch in myfiles:
7574 searchinstance.execute(mysearch)
7575 except re.error, comment:
7576 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
7578 searchinstance.output()
7580 def action_depclean(settings, trees, ldpath_mtimes,
7581 myopts, action, myfiles, spinner):
7582 # Kill packages that aren't explicitly merged or are required as a
7583 # dependency of another package. World file is explicit.
7585 # Global depclean or prune operations are not very safe when there are
7586 # missing dependencies since it's unknown how badly incomplete
7587 # the dependency graph is, and we might accidentally remove packages
7588 # that should have been pulled into the graph. On the other hand, it's
7589 # relatively safe to ignore missing deps when only asked to remove
7590 # specific packages.
7591 allow_missing_deps = len(myfiles) > 0
7594 msg.append("Depclean may break link level dependencies. Thus, it is\n")
7595 msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
7596 msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
7598 msg.append("Also study the list of packages to be cleaned for any obvious\n")
7599 msg.append("mistakes. Packages that are part of the world set will always\n")
7600 msg.append("be kept. They can be manually added to this set with\n")
7601 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
7602 msg.append("package.provided (see portage(5)) will be removed by\n")
7603 msg.append("depclean, even if they are part of the world set.\n")
7605 msg.append("As a safety measure, depclean will not remove any packages\n")
7606 msg.append("unless *all* required dependencies have been resolved. As a\n")
7607 msg.append("consequence, it is often necessary to run\n")
7608 msg.append(good("`emerge --update --newuse --deep world`") + " prior to depclean.\n")
7610 if action == "depclean" and "--quiet" not in myopts and not myfiles:
7611 portage.writemsg_stdout("\n")
7613 portage.writemsg_stdout(colorize("BAD", "*** WARNING *** ") + x)
7615 xterm_titles = "notitles" not in settings.features
7616 myroot = settings["ROOT"]
7617 portdb = trees[myroot]["porttree"].dbapi
7619 dep_check_trees = {}
7620 dep_check_trees[myroot] = {}
7621 dep_check_trees[myroot]["vartree"] = \
7622 FakeVartree(trees[myroot]["vartree"],
7623 trees[myroot]["porttree"].dbapi,
7624 depgraph._mydbapi_keys, pkg_cache)
7625 vardb = dep_check_trees[myroot]["vartree"].dbapi
7626 # Constrain dependency selection to the installed packages.
7627 dep_check_trees[myroot]["porttree"] = dep_check_trees[myroot]["vartree"]
7628 root_config = trees[myroot]["root_config"]
7629 setconfig = root_config.setconfig
7630 syslist = setconfig.getSetAtoms("system")
7631 worldlist = setconfig.getSetAtoms("world")
7632 args_set = InternalPackageSet()
7633 fakedb = portage.fakedbapi(settings=settings)
7634 myvarlist = vardb.cpv_all()
7637 print "\n!!! You have no system list.",
7639 print "\n!!! You have no world file.",
7641 print "\n!!! You have no installed package database (%s)." % portage.VDB_PATH,
7643 if not (syslist and worldlist and myvarlist):
7644 print "\n!!! Proceeding "+(syslist and myvarlist and "may" or "will")
7645 print " break your installation.\n"
7646 if "--pretend" not in myopts:
7647 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
7649 if action == "depclean":
7650 emergelog(xterm_titles, " >>> depclean")
7653 if not is_valid_package_atom(x):
7654 portage.writemsg("!!! '%s' is not a valid package atom.\n" % x,
7656 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
7659 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
7660 except ValueError, e:
7661 print "!!! The short ebuild name \"" + x + "\" is ambiguous. Please specify"
7662 print "!!! one of the following fully-qualified ebuild names instead:\n"
7664 print " " + colorize("INFORM", i)
7668 matched_packages = False
7671 matched_packages = True
7673 if not matched_packages:
7674 portage.writemsg_stdout(
7675 ">>> No packages selected for removal by %s\n" % action)
7678 if "--quiet" not in myopts:
7679 print "\nCalculating dependencies ",
7681 runtime = UnmergeDepPriority(runtime=True)
7682 runtime_post = UnmergeDepPriority(runtime_post=True)
7683 buildtime = UnmergeDepPriority(buildtime=True)
7687 "PDEPEND": runtime_post,
7688 "DEPEND": buildtime,
7691 remaining_atoms = []
7692 if action == "depclean":
7693 for atom in syslist:
7694 if vardb.match(atom):
7695 remaining_atoms.append((atom, 'system', runtime))
7697 # Pull in everything that's installed since we don't want
7698 # to clean any package if something depends on it.
7699 remaining_atoms.extend(
7700 ("="+cpv, 'world', runtime) for cpv in vardb.cpv_all())
7702 for atom in worldlist:
7703 if vardb.match(atom):
7704 remaining_atoms.append((atom, 'world', runtime))
7705 elif action == "prune":
7706 for atom in syslist:
7707 if vardb.match(atom):
7708 remaining_atoms.append((atom, 'system', runtime))
7709 # Pull in everything that's installed since we don't want to prune a
7710 # package if something depends on it.
7711 remaining_atoms.extend(
7712 (atom, 'world', runtime) for atom in vardb.cp_all())
7714 # Try to prune everything that's slotted.
7715 for cp in vardb.cp_all():
7716 if len(vardb.cp_list(cp)) > 1:
7720 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
7721 metadata_keys = depgraph._mydbapi_keys
7723 with_bdeps = myopts.get("--with-bdeps", "y") == "y"
7725 while remaining_atoms:
7726 atom, parent, priority = remaining_atoms.pop()
7727 pkgs = vardb.match(atom)
7729 if priority > UnmergeDepPriority.SOFT:
7730 unresolveable.setdefault(atom, []).append(parent)
7732 if action == "depclean" and parent == "world" and myfiles:
7733 # Filter out packages given as arguments since the user wants
7737 metadata = dict(izip(metadata_keys,
7738 vardb.aux_get(pkg, metadata_keys)))
7741 arg_atom = args_set.findAtomForPackage(pkg, metadata)
7742 except portage.exception.InvalidDependString, e:
7743 file_path = os.path.join(
7744 myroot, portage.VDB_PATH, pkg, "PROVIDE")
7745 portage.writemsg("\n\nInvalid PROVIDE: %s\n" % str(e),
7747 portage.writemsg("See '%s'\n" % file_path,
7751 filtered_pkgs.append(pkg)
7752 pkgs = filtered_pkgs
7754 # For consistency with the update algorithm, keep the highest
7755 # visible version and prune any versions that are old or masked.
7756 for cpv in reversed(pkgs):
7757 if visible(settings,
7758 pkg_cache[("installed", myroot, cpv, "nomerge")]):
7762 # They're all masked, so just keep the highest version.
7765 graph.add(pkg, parent, priority=priority)
7766 if fakedb.cpv_exists(pkg):
7769 fakedb.cpv_inject(pkg)
7770 myaux = dict(izip(aux_keys, vardb.aux_get(pkg, aux_keys)))
7773 usedef = vardb.aux_get(pkg, ["USE"])[0].split()
7774 for dep_type, depstr in myaux.iteritems():
7779 if not with_bdeps and dep_type == "DEPEND":
7782 priority = priority_map[dep_type]
7783 if "--debug" in myopts:
7785 print "Parent: ", pkg
7786 print "Depstring:", depstr
7787 print "Priority:", priority
7790 portage.dep._dep_check_strict = False
7791 success, atoms = portage.dep_check(depstr, None, settings,
7792 myuse=usedef, trees=dep_check_trees, myroot=myroot)
7794 portage.dep._dep_check_strict = True
7796 show_invalid_depstring_notice(
7797 ("installed", myroot, pkg, "nomerge"),
7801 if "--debug" in myopts:
7802 print "Candidates:", atoms
7805 if atom.startswith("!"):
7807 remaining_atoms.append((atom, pkg, priority))
7809 if "--quiet" not in myopts:
7810 print "\b\b... done!\n"
7812 if unresolveable and not allow_missing_deps:
7813 print "Dependencies could not be completely resolved due to"
7814 print "the following required packages not being installed:"
7816 for atom in unresolveable:
7817 print atom, "required by", " ".join(unresolveable[atom])
7818 if unresolveable and not allow_missing_deps:
7820 print "Have you forgotten to run " + good("`emerge --update --newuse --deep world`") + " prior to"
7821 print "%s? It may be necessary to manually uninstall packages that no longer" % action
7822 print "exist in the portage tree since it may not be possible to satisfy their"
7823 print "dependencies. Also, be aware of the --with-bdeps option that is documented"
7824 print "in " + good("`man emerge`") + "."
7826 if action == "prune":
7827 print "If you would like to ignore dependencies then use %s." % \
7831 def show_parents(child_node):
7832 parent_nodes = graph.parent_nodes(child_node)
7833 if not parent_nodes:
7834 # With --prune, the highest version can be pulled in without any
7835 # real parent since all installed packages are pulled in. In that
7836 # case there's nothing to show here.
7840 msg.append(" %s pulled in by:\n" % str(child_node))
7841 for parent_node in parent_nodes:
7842 msg.append(" %s\n" % str(parent_node))
7844 portage.writemsg_stdout("".join(msg), noiselevel=-1)
7847 if action == "depclean":
7849 for pkg in vardb.cpv_all():
7850 metadata = dict(izip(metadata_keys,
7851 vardb.aux_get(pkg, metadata_keys)))
7854 arg_atom = args_set.findAtomForPackage(pkg, metadata)
7855 except portage.exception.InvalidDependString:
7856 # this error has already been displayed by now
7859 if not fakedb.cpv_exists(pkg):
7860 cleanlist.append(pkg)
7861 elif "--verbose" in myopts:
7864 for pkg in vardb.cpv_all():
7865 if not fakedb.cpv_exists(pkg):
7866 cleanlist.append(pkg)
7867 elif "--verbose" in myopts:
7869 elif action == "prune":
7870 # Prune really uses all installed instead of world. It's not a real
7871 # reverse dependency so don't display it as such.
7872 if graph.contains("world"):
7873 graph.remove("world")
7874 for atom in args_set:
7875 for pkg in vardb.match(atom):
7876 if not fakedb.cpv_exists(pkg):
7877 cleanlist.append(pkg)
7878 elif "--verbose" in myopts:
7882 portage.writemsg_stdout(
7883 ">>> No packages selected for removal by %s\n" % action)
7884 if "--verbose" not in myopts:
7885 portage.writemsg_stdout(
7886 ">>> To see reverse dependencies, use %s\n" % \
7888 if action == "prune":
7889 portage.writemsg_stdout(
7890 ">>> To ignore dependencies, use %s\n" % \
7894 # Use a topological sort to create an unmerge order such that
7895 # each package is unmerged before it's dependencies. This is
7896 # necessary to avoid breaking things that may need to run
7897 # during pkg_prerm or pkg_postrm phases.
7899 # Create a new graph to account for dependencies between the
7900 # packages being unmerged.
7902 clean_set = set(cleanlist)
7904 for node in clean_set:
7905 graph.add(node, None)
7906 myaux = dict(izip(aux_keys, vardb.aux_get(node, aux_keys)))
7908 usedef = vardb.aux_get(node, ["USE"])[0].split()
7909 for dep_type, depstr in myaux.iteritems():
7913 portage.dep._dep_check_strict = False
7914 success, atoms = portage.dep_check(depstr, None, settings,
7915 myuse=usedef, trees=dep_check_trees, myroot=myroot)
7917 portage.dep._dep_check_strict = True
7919 show_invalid_depstring_notice(
7920 ("installed", myroot, node, "nomerge"),
7924 priority = priority_map[dep_type]
7926 if atom.startswith("!"):
7928 matches = vardb.match(atom)
7932 if cpv in clean_set:
7933 graph.add(cpv, node, priority=priority)
7936 if len(graph.order) == len(graph.root_nodes()):
7937 # If there are no dependencies between packages
7938 # let unmerge() group them by cat/pn.
7940 cleanlist = graph.all_nodes()
7942 # Order nodes from lowest to highest overall reference count for
7943 # optimal root node selection.
7945 for node in graph.order:
7946 node_refcounts[node] = len(graph.parent_nodes(node))
7947 def cmp_reference_count(node1, node2):
7948 return node_refcounts[node1] - node_refcounts[node2]
7949 graph.order.sort(cmp_reference_count)
7951 ignore_priority_range = [None]
7952 ignore_priority_range.extend(
7953 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
7954 while not graph.empty():
7955 for ignore_priority in ignore_priority_range:
7956 nodes = graph.root_nodes(ignore_priority=ignore_priority)
7960 raise AssertionError("no root nodes")
7961 if ignore_priority is not None:
7962 # Some deps have been dropped due to circular dependencies,
7963 # so only pop one node in order do minimize the number that
7968 cleanlist.append(node)
7970 unmerge(root_config, myopts, "unmerge", cleanlist,
7971 ldpath_mtimes, ordered=ordered)
7973 if action == "prune":
7976 if not cleanlist and "--quiet" in myopts:
7979 print "Packages installed: "+str(len(myvarlist))
7980 print "Packages in world: "+str(len(worldlist))
7981 print "Packages in system: "+str(len(syslist))
7982 print "Unique package names: "+str(len(myvarlist))
7983 print "Required packages: "+str(len(fakedb.cpv_all()))
7984 if "--pretend" in myopts:
7985 print "Number to remove: "+str(len(cleanlist))
7987 print "Number removed: "+str(len(cleanlist))
7989 def action_build(settings, trees, mtimedb,
7990 myopts, myaction, myfiles, spinner):
7992 # validate the state of the resume data
7993 # so that we can make assumptions later.
7994 for k in ("resume", "resume_backup"):
7995 if k not in mtimedb:
7997 resume_data = mtimedb[k]
7998 if not isinstance(resume_data, dict):
8001 mergelist = resume_data.get("mergelist")
8002 if not isinstance(mergelist, list):
8005 resume_opts = resume_data.get("myopts")
8006 if not isinstance(resume_opts, (dict, list)):
8009 favorites = resume_data.get("favorites")
8010 if not isinstance(favorites, list):
8015 if "--resume" in myopts and \
8016 ("resume" in mtimedb or
8017 "resume_backup" in mtimedb):
8019 if "resume" not in mtimedb:
8020 mtimedb["resume"] = mtimedb["resume_backup"]
8021 del mtimedb["resume_backup"]
8023 # "myopts" is a list for backward compatibility.
8024 resume_opts = mtimedb["resume"].get("myopts", [])
8025 if isinstance(resume_opts, list):
8026 resume_opts = dict((k,True) for k in resume_opts)
8027 for opt in ("--skipfirst", "--ask", "--tree"):
8028 resume_opts.pop(opt, None)
8029 myopts.update(resume_opts)
8030 # Adjust config according to options of the command being resumed.
8031 for myroot in trees:
8032 mysettings = trees[myroot]["vartree"].settings
8034 adjust_config(myopts, mysettings)
8036 del myroot, mysettings
8038 ldpath_mtimes = mtimedb["ldpath"]
8041 buildpkgonly = "--buildpkgonly" in myopts
8042 pretend = "--pretend" in myopts
8043 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
8044 ask = "--ask" in myopts
8045 nodeps = "--nodeps" in myopts
8046 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
8047 tree = "--tree" in myopts
8050 del myopts["--tree"]
8051 portage.writemsg(colorize("WARN", " * ") + \
8052 "--tree is broken with --nodeps. Disabling...\n")
8053 debug = "--debug" in myopts
8054 verbose = "--verbose" in myopts
8055 quiet = "--quiet" in myopts
8056 if pretend or fetchonly:
8057 # make the mtimedb readonly
8058 mtimedb.filename = None
8059 if "--digest" in myopts:
8060 msg = "The --digest option can prevent corruption from being" + \
8061 " noticed. The `repoman manifest` command is the preferred" + \
8062 " way to generate manifests and it is capable of doing an" + \
8063 " entire repository or category at once."
8065 writemsg(prefix + "\n")
8066 from textwrap import wrap
8067 for line in wrap(msg, 72):
8068 writemsg("%s%s\n" % (prefix, line))
8069 writemsg(prefix + "\n")
8071 if "--quiet" not in myopts and \
8072 ("--pretend" in myopts or "--ask" in myopts or \
8073 "--tree" in myopts or "--verbose" in myopts):
8075 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
8077 elif "--buildpkgonly" in myopts:
8081 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
8083 print darkgreen("These are the packages that would be %s, in reverse order:") % action
8087 print darkgreen("These are the packages that would be %s, in order:") % action
8090 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
8091 if not show_spinner:
8092 spinner.update = spinner.update_quiet
8095 favorites = mtimedb["resume"].get("favorites")
8096 if not isinstance(favorites, list):
8100 print "Calculating dependencies ",
8101 myparams = create_depgraph_params(myopts, myaction)
8102 mydepgraph = depgraph(settings, trees,
8103 myopts, myparams, spinner)
8106 success = mydepgraph.loadResumeCommand(mtimedb["resume"])
8107 except (portage.exception.PackageNotFound,
8108 mydepgraph.UnsatisfiedResumeDep), e:
8111 from textwrap import wrap
8112 from portage.output import EOutput
8115 resume_data = mtimedb["resume"]
8116 mergelist = resume_data.get("mergelist")
8117 if not isinstance(mergelist, list):
8119 if mergelist and debug or (verbose and not quiet):
8120 out.eerror("Invalid resume list:")
8123 for task in mergelist:
8124 if isinstance(task, list):
8125 out.eerror(indent + str(tuple(task)))
8128 if isinstance(e, mydepgraph.UnsatisfiedResumeDep):
8129 out.eerror("One or more expected dependencies " + \
8130 "are not installed:")
8134 out.eerror(indent + str(dep.atom) + " pulled in by:")
8135 out.eerror(2 * indent + str(dep.parent))
8137 msg = "The resume list contains packages " + \
8138 "with dependencies that have not been " + \
8139 "installed yet. Please restart/continue " + \
8140 "the operation manually."
8141 for line in wrap(msg, 72):
8143 elif isinstance(e, portage.exception.PackageNotFound):
8144 out.eerror("An expected package is " + \
8145 "not available: %s" % str(e))
8147 msg = "The resume list contains one or more " + \
8148 "packages that are no longer " + \
8149 "available. Please restart/continue " + \
8150 "the operation manually."
8151 for line in wrap(msg, 72):
8155 print "\b\b... done!"
8158 mydepgraph.display_problems()
8159 if not (ask or pretend):
8160 # delete the current list and also the backup
8161 # since it's probably stale too.
8162 for k in ("resume", "resume_backup"):
8163 mtimedb.pop(k, None)
8168 if ("--resume" in myopts):
8169 print darkgreen("emerge: It seems we have nothing to resume...")
8172 myparams = create_depgraph_params(myopts, myaction)
8173 if "--quiet" not in myopts and "--nodeps" not in myopts:
8174 print "Calculating dependencies ",
8176 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
8178 retval, favorites = mydepgraph.select_files(myfiles)
8179 except portage.exception.PackageNotFound, e:
8180 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
8183 print "\b\b... done!"
8185 mydepgraph.display_problems()
8188 if "--pretend" not in myopts and \
8189 ("--ask" in myopts or "--tree" in myopts or \
8190 "--verbose" in myopts) and \
8191 not ("--quiet" in myopts and "--ask" not in myopts):
8192 if "--resume" in myopts:
8193 mymergelist = mydepgraph.altlist()
8194 if len(mymergelist) == 0:
8195 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
8197 favorites = mtimedb["resume"]["favorites"]
8198 retval = mydepgraph.display(
8199 mydepgraph.altlist(reversed=tree),
8200 favorites=favorites)
8201 mydepgraph.display_problems()
8202 if retval != os.EX_OK:
8204 prompt="Would you like to resume merging these packages?"
8206 retval = mydepgraph.display(
8207 mydepgraph.altlist(reversed=("--tree" in myopts)),
8208 favorites=favorites)
8209 mydepgraph.display_problems()
8210 if retval != os.EX_OK:
8213 for x in mydepgraph.altlist():
8214 if isinstance(x, Package) and x.operation == "merge":
8218 if "--noreplace" in myopts and not oneshot and favorites:
8221 print " %s %s" % (good("*"), x)
8222 prompt="Would you like to add these packages to your world favorites?"
8223 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
8224 prompt="Nothing to merge; would you like to auto-clean packages?"
8227 print "Nothing to merge; quitting."
8230 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
8231 prompt="Would you like to fetch the source files for these packages?"
8233 prompt="Would you like to merge these packages?"
8235 if "--ask" in myopts and userquery(prompt) == "No":
8240 # Don't ask again (e.g. when auto-cleaning packages after merge)
8241 myopts.pop("--ask", None)
8243 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
8244 if ("--resume" in myopts):
8245 mymergelist = mydepgraph.altlist()
8246 if len(mymergelist) == 0:
8247 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
8249 favorites = mtimedb["resume"]["favorites"]
8250 retval = mydepgraph.display(
8251 mydepgraph.altlist(reversed=tree),
8252 favorites=favorites)
8253 mydepgraph.display_problems()
8254 if retval != os.EX_OK:
8257 retval = mydepgraph.display(
8258 mydepgraph.altlist(reversed=("--tree" in myopts)),
8259 favorites=favorites)
8260 mydepgraph.display_problems()
8261 if retval != os.EX_OK:
8263 if "--buildpkgonly" in myopts:
8264 graph_copy = mydepgraph.digraph.clone()
8265 for node in list(graph_copy.order):
8266 if not isinstance(node, Package):
8267 graph_copy.remove(node)
8268 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
8269 print "\n!!! --buildpkgonly requires all dependencies to be merged."
8270 print "!!! You have to merge the dependencies before you can build this package.\n"
8273 if "--buildpkgonly" in myopts:
8274 graph_copy = mydepgraph.digraph.clone()
8275 for node in list(graph_copy.order):
8276 if not isinstance(node, Package):
8277 graph_copy.remove(node)
8278 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
8279 print "\n!!! --buildpkgonly requires all dependencies to be merged."
8280 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
8283 if ("--resume" in myopts):
8284 favorites=mtimedb["resume"]["favorites"]
8285 mergetask = MergeTask(settings, trees, myopts)
8286 if "PORTAGE_PARALLEL_FETCHONLY" in settings:
8287 """ parallel-fetch uses --resume --fetchonly and we don't want
8288 it to write the mtimedb"""
8289 mtimedb.filename = None
8290 time.sleep(3) # allow the parent to have first fetch
8291 mymergelist = mydepgraph.altlist()
8293 retval = mergetask.merge(mymergelist, favorites, mtimedb)
8294 merge_count = mergetask.curval
8296 if "resume" in mtimedb and \
8297 "mergelist" in mtimedb["resume"] and \
8298 len(mtimedb["resume"]["mergelist"]) > 1:
8299 mtimedb["resume_backup"] = mtimedb["resume"]
8300 del mtimedb["resume"]
8302 mtimedb["resume"]={}
8303 # XXX: Stored as a list for backward compatibility.
8304 mtimedb["resume"]["myopts"] = \
8305 [k for k in myopts if myopts[k] is True]
8306 mtimedb["resume"]["favorites"]=favorites
8307 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
8308 for pkgline in mydepgraph.altlist():
8309 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
8310 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
8311 tmpsettings = portage.config(clone=settings)
8313 if settings.get("PORTAGE_DEBUG", "") == "1":
8315 retval = portage.doebuild(
8316 y, "digest", settings["ROOT"], tmpsettings, edebug,
8317 ("--pretend" in myopts),
8318 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
8321 pkglist = mydepgraph.altlist()
8322 mydepgraph.saveNomergeFavorites()
8324 mergetask = MergeTask(settings, trees, myopts)
8325 retval = mergetask.merge(pkglist, favorites, mtimedb)
8326 merge_count = mergetask.curval
8328 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
8329 if "yes" == settings.get("AUTOCLEAN"):
8330 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
8331 unmerge(trees[settings["ROOT"]]["root_config"],
8332 myopts, "clean", [],
8333 ldpath_mtimes, autoclean=1)
8335 portage.writemsg_stdout(colorize("WARN", "WARNING:")
8336 + " AUTOCLEAN is disabled. This can cause serious"
8337 + " problems due to overlapping packages.\n")
8338 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
8340 if merge_count and not (buildpkgonly or fetchonly or pretend):
8341 post_emerge(trees, mtimedb, retval)
8344 def multiple_actions(action1, action2):
8345 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
8346 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
8349 def parse_opts(tmpcmdline, silent=False):
8354 global actions, options, shortmapping
8356 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
8357 argument_options = {
8359 "help":"specify the location for portage configuration files",
8363 "help":"enable or disable color output",
8365 "choices":("y", "n")
8368 "help":"include unnecessary build time dependencies",
8370 "choices":("y", "n")
8373 "help":"specify conditions to trigger package reinstallation",
8375 "choices":["changed-use"]
8379 from optparse import OptionParser
8380 parser = OptionParser()
8381 if parser.has_option("--help"):
8382 parser.remove_option("--help")
8384 for action_opt in actions:
8385 parser.add_option("--" + action_opt, action="store_true",
8386 dest=action_opt.replace("-", "_"), default=False)
8387 for myopt in options:
8388 parser.add_option(myopt, action="store_true",
8389 dest=myopt.lstrip("--").replace("-", "_"), default=False)
8390 for shortopt, longopt in shortmapping.iteritems():
8391 parser.add_option("-" + shortopt, action="store_true",
8392 dest=longopt.lstrip("--").replace("-", "_"), default=False)
8393 for myalias, myopt in longopt_aliases.iteritems():
8394 parser.add_option(myalias, action="store_true",
8395 dest=myopt.lstrip("--").replace("-", "_"), default=False)
8397 for myopt, kwargs in argument_options.iteritems():
8398 parser.add_option(myopt,
8399 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
8401 myoptions, myargs = parser.parse_args(args=tmpcmdline)
8403 for myopt in options:
8404 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
8406 myopts[myopt] = True
8408 for myopt in argument_options:
8409 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
8413 for action_opt in actions:
8414 v = getattr(myoptions, action_opt.replace("-", "_"))
8417 multiple_actions(myaction, action_opt)
8419 myaction = action_opt
8422 if x in actions and myaction != "search":
8424 print red("*** Deprecated use of action '%s', use '--%s' instead" % (x,x))
8425 # special case "search" so people can search for action terms, e.g. emerge -s sync
8427 multiple_actions(myaction, x)
8433 if "--nocolor" in myopts:
8435 sys.stderr.write("*** Deprecated use of '--nocolor', " + \
8436 "use '--color=n' instead.\n")
8437 del myopts["--nocolor"]
8438 myopts["--color"] = "n"
8440 return myaction, myopts, myfiles
8442 def validate_ebuild_environment(trees):
8443 for myroot in trees:
8444 settings = trees[myroot]["vartree"].settings
8447 def load_emerge_config(trees=None):
8449 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8450 kwargs[k] = os.environ.get(envvar, None)
8451 trees = portage.create_trees(trees=trees, **kwargs)
8453 for root, root_trees in trees.iteritems():
8454 settings = root_trees["vartree"].settings
8455 setconfig = load_default_config(settings, root_trees)
8456 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
8458 settings = trees["/"]["vartree"].settings
8460 for myroot in trees:
8462 settings = trees[myroot]["vartree"].settings
8465 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
8466 mtimedb = portage.MtimeDB(mtimedbfile)
8468 return settings, trees, mtimedb
8470 def adjust_config(myopts, settings):
8471 """Make emerge specific adjustments to the config."""
8473 # To enhance usability, make some vars case insensitive by forcing them to
8475 for myvar in ("AUTOCLEAN", "NOCOLOR"):
8476 if myvar in settings:
8477 settings[myvar] = settings[myvar].lower()
8478 settings.backup_changes(myvar)
8481 # Kill noauto as it will break merges otherwise.
8482 if "noauto" in settings.features:
8483 while "noauto" in settings.features:
8484 settings.features.remove("noauto")
8485 settings["FEATURES"] = " ".join(settings.features)
8486 settings.backup_changes("FEATURES")
8490 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
8491 except ValueError, e:
8492 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8493 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
8494 settings["CLEAN_DELAY"], noiselevel=-1)
8495 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
8496 settings.backup_changes("CLEAN_DELAY")
8498 EMERGE_WARNING_DELAY = 10
8500 EMERGE_WARNING_DELAY = int(settings.get(
8501 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
8502 except ValueError, e:
8503 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8504 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
8505 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
8506 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
8507 settings.backup_changes("EMERGE_WARNING_DELAY")
8509 if "--quiet" in myopts:
8510 settings["PORTAGE_QUIET"]="1"
8511 settings.backup_changes("PORTAGE_QUIET")
8513 # Set so that configs will be merged regardless of remembered status
8514 if ("--noconfmem" in myopts):
8515 settings["NOCONFMEM"]="1"
8516 settings.backup_changes("NOCONFMEM")
8518 # Set various debug markers... They should be merged somehow.
8521 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
8522 if PORTAGE_DEBUG not in (0, 1):
8523 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
8524 PORTAGE_DEBUG, noiselevel=-1)
8525 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
8528 except ValueError, e:
8529 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8530 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
8531 settings["PORTAGE_DEBUG"], noiselevel=-1)
8533 if "--debug" in myopts:
8535 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
8536 settings.backup_changes("PORTAGE_DEBUG")
8538 if settings.get("NOCOLOR") not in ("yes","true"):
8539 portage.output.havecolor = 1
8541 """The explicit --color < y | n > option overrides the NOCOLOR environment
8542 variable and stdout auto-detection."""
8543 if "--color" in myopts:
8544 if "y" == myopts["--color"]:
8545 portage.output.havecolor = 1
8546 settings["NOCOLOR"] = "false"
8548 portage.output.havecolor = 0
8549 settings["NOCOLOR"] = "true"
8550 settings.backup_changes("NOCOLOR")
8551 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
8552 portage.output.havecolor = 0
8553 settings["NOCOLOR"] = "true"
8554 settings.backup_changes("NOCOLOR")
8557 global portage # NFC why this is necessary now - genone
8558 # Disable color until we're sure that it should be enabled (after
8559 # EMERGE_DEFAULT_OPTS has been parsed).
8560 portage.output.havecolor = 0
8561 # This first pass is just for options that need to be known as early as
8562 # possible, such as --config-root. They will be parsed again later,
8563 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
8564 # the value of --config-root).
8565 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
8566 if "--debug" in myopts:
8567 os.environ["PORTAGE_DEBUG"] = "1"
8568 if "--config-root" in myopts:
8569 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
8571 # Portage needs to ensure a sane umask for the files it creates.
8573 settings, trees, mtimedb = load_emerge_config()
8574 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8577 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
8578 except (OSError, ValueError), e:
8579 portage.writemsg("!!! Failed to change nice value to '%s'\n" % \
8580 settings["PORTAGE_NICENESS"])
8581 portage.writemsg("!!! %s\n" % str(e))
8584 if portage._global_updates(trees, mtimedb["updates"]):
8586 # Reload the whole config from scratch.
8587 settings, trees, mtimedb = load_emerge_config(trees=trees)
8588 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8590 xterm_titles = "notitles" not in settings.features
8593 if "--ignore-default-opts" not in myopts:
8594 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
8595 tmpcmdline.extend(sys.argv[1:])
8596 myaction, myopts, myfiles = parse_opts(tmpcmdline)
8598 if "--digest" in myopts:
8599 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
8600 # Reload the whole config from scratch so that the portdbapi internal
8601 # config is updated with new FEATURES.
8602 settings, trees, mtimedb = load_emerge_config(trees=trees)
8603 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8605 for myroot in trees:
8606 mysettings = trees[myroot]["vartree"].settings
8608 adjust_config(myopts, mysettings)
8610 del myroot, mysettings
8612 spinner = stdout_spinner()
8613 if "candy" in settings.features:
8614 spinner.update = spinner.update_scroll
8616 if "--quiet" not in myopts:
8617 portage.deprecated_profile_check()
8619 eclasses_overridden = {}
8620 for mytrees in trees.itervalues():
8621 mydb = mytrees["porttree"].dbapi
8622 # Freeze the portdbapi for performance (memoize all xmatch results).
8624 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
8627 if eclasses_overridden and \
8628 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
8630 if len(eclasses_overridden) == 1:
8631 writemsg(prefix + "Overlay eclass overrides " + \
8632 "eclass from PORTDIR:\n", noiselevel=-1)
8634 writemsg(prefix + "Overlay eclasses override " + \
8635 "eclasses from PORTDIR:\n", noiselevel=-1)
8636 writemsg(prefix + "\n", noiselevel=-1)
8637 for eclass_name in sorted(eclasses_overridden):
8638 writemsg(prefix + " '%s/%s.eclass'\n" % \
8639 (eclasses_overridden[eclass_name], eclass_name),
8641 writemsg(prefix + "\n", noiselevel=-1)
8642 msg = "It is best to avoid overridding eclasses from PORTDIR " + \
8643 "because it will trigger invalidation of cached ebuild metadata " + \
8644 "that is distributed with the portage tree. If you must " + \
8645 "override eclasses from PORTDIR then you are advised to run " + \
8646 "`emerge --regen` after each time that you run `emerge --sync`. " + \
8647 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
8648 "you would like to disable this warning."
8649 from textwrap import wrap
8650 for line in wrap(msg, 72):
8651 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
8653 if "moo" in myfiles:
8656 Larry loves Gentoo (""" + os.uname()[0] + """)
8658 _______________________
8659 < Have you mooed today? >
8660 -----------------------
8670 ext = os.path.splitext(x)[1]
8671 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
8672 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
8675 # only expand sets for actions taking package arguments
8676 oldargs = myfiles[:]
8677 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
8678 root_config = trees[settings["ROOT"]]["root_config"]
8679 setconfig = root_config.setconfig
8680 # display errors that occured while loading the SetConfig instance
8681 for e in setconfig.errors:
8682 print colorize("BAD", "Error during set creation: %s" % e)
8684 sets = setconfig.getSets()
8685 # emerge relies on the existance of sets with names "world" and "system"
8686 required_sets = ("world", "system")
8687 for s in required_sets:
8689 msg = ["emerge: incomplete set configuration, " + \
8690 "no \"%s\" set defined" % s]
8691 msg.append(" sets defined: %s" % ", ".join(sets))
8693 sys.stderr.write(line + "\n")
8695 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
8697 # In order to know exactly which atoms/sets should be added to the
8698 # world file, the depgraph performs set expansion later. It will get
8699 # confused about where the atoms came from if it's not allowed to
8700 # expand them itself.
8701 do_not_expand = (None, )
8704 if a in ("system", "world"):
8705 newargs.append(SETPREFIX+a)
8712 if a.startswith(SETPREFIX):
8713 s = a[len(SETPREFIX):]
8715 print "emerge: there are no sets to satisfy %s." % \
8716 colorize("INFORM", s)
8718 setconfig.active.append(s)
8719 if myaction in unmerge_actions and \
8720 not sets[s].supportsOperation("unmerge"):
8721 sys.stderr.write("emerge: the given set %s does " + \
8722 "not support unmerge operations\n" % s)
8724 if not setconfig.getSetAtoms(s):
8725 print "emerge: '%s' is an empty set" % s
8726 elif myaction not in do_not_expand:
8727 newargs.extend(setconfig.getSetAtoms(s))
8729 newargs.append(SETPREFIX+s)
8730 for e in sets[s].errors:
8736 # Need to handle empty sets specially, otherwise emerge will react
8737 # with the help message for empty argument lists
8738 if oldargs and not myfiles:
8739 print "emerge: no targets left after set expansion"
8742 if ("--tree" in myopts) and ("--columns" in myopts):
8743 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
8746 if ("--quiet" in myopts):
8747 spinner.update = spinner.update_quiet
8748 portage.util.noiselimit = -1
8750 # Always create packages if FEATURES=buildpkg
8751 # Imply --buildpkg if --buildpkgonly
8752 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
8753 if "--buildpkg" not in myopts:
8754 myopts["--buildpkg"] = True
8756 # Also allow -S to invoke search action (-sS)
8757 if ("--searchdesc" in myopts):
8758 if myaction and myaction != "search":
8759 myfiles.append(myaction)
8760 if "--search" not in myopts:
8761 myopts["--search"] = True
8764 # Always try and fetch binary packages if FEATURES=getbinpkg
8765 if ("getbinpkg" in settings.features):
8766 myopts["--getbinpkg"] = True
8768 if "--buildpkgonly" in myopts:
8769 # --buildpkgonly will not merge anything, so
8770 # it cancels all binary package options.
8771 for opt in ("--getbinpkg", "--getbinpkgonly",
8772 "--usepkg", "--usepkgonly"):
8773 myopts.pop(opt, None)
8775 if "--skipfirst" in myopts and "--resume" not in myopts:
8776 myopts["--resume"] = True
8778 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
8779 myopts["--usepkgonly"] = True
8781 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
8782 myopts["--getbinpkg"] = True
8784 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
8785 myopts["--usepkg"] = True
8787 # Also allow -K to apply --usepkg/-k
8788 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
8789 myopts["--usepkg"] = True
8791 # Allow -p to remove --ask
8792 if ("--pretend" in myopts) and ("--ask" in myopts):
8793 print ">>> --pretend disables --ask... removing --ask from options."
8796 # forbid --ask when not in a terminal
8797 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
8798 if ("--ask" in myopts) and (not sys.stdin.isatty()):
8799 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
8803 if settings.get("PORTAGE_DEBUG", "") == "1":
8804 spinner.update = spinner.update_quiet
8806 if "python-trace" in settings.features:
8807 import portage.debug
8808 portage.debug.set_trace(True)
8810 if not ("--quiet" in myopts):
8811 if not sys.stdout.isatty() or ("--nospinner" in myopts):
8812 spinner.update = spinner.update_basic
8814 if "--version" in myopts:
8815 print getportageversion(settings["PORTDIR"], settings["ROOT"],
8816 settings.profile_path, settings["CHOST"],
8817 trees[settings["ROOT"]]["vartree"].dbapi)
8819 elif "--help" in myopts:
8820 _emerge.help.help(myaction, myopts, portage.output.havecolor)
8823 if "--debug" in myopts:
8824 print "myaction", myaction
8825 print "myopts", myopts
8827 if not myaction and not myfiles and "--resume" not in myopts:
8828 _emerge.help.help(myaction, myopts, portage.output.havecolor)
8831 pretend = "--pretend" in myopts
8832 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
8833 buildpkgonly = "--buildpkgonly" in myopts
8835 # check if root user is the current user for the actions where emerge needs this
8836 if portage.secpass < 2:
8837 # We've already allowed "--version" and "--help" above.
8838 if "--pretend" not in myopts and myaction not in ("search","info"):
8839 need_superuser = not \
8841 (buildpkgonly and secpass >= 1) or \
8842 myaction in ("metadata", "regen") or \
8843 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
8844 if portage.secpass < 1 or \
8847 access_desc = "superuser"
8849 access_desc = "portage group"
8850 # Always show portage_group_warning() when only portage group
8851 # access is required but the user is not in the portage group.
8852 from portage.data import portage_group_warning
8853 if "--ask" in myopts:
8854 myopts["--pretend"] = True
8856 print ("%s access is required... " + \
8857 "adding --pretend to options.\n") % access_desc
8858 if portage.secpass < 1 and not need_superuser:
8859 portage_group_warning()
8861 sys.stderr.write(("emerge: %s access is " + \
8862 "required.\n\n") % access_desc)
8863 if portage.secpass < 1 and not need_superuser:
8864 portage_group_warning()
8867 disable_emergelog = False
8868 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
8870 disable_emergelog = True
8872 if myaction in ("search", "info"):
8873 disable_emergelog = True
8874 if disable_emergelog:
8875 """ Disable emergelog for everything except build or unmerge
8876 operations. This helps minimize parallel emerge.log entries that can
8877 confuse log parsers. We especially want it disabled during
8878 parallel-fetch, which uses --resume --fetchonly."""
8880 def emergelog(*pargs, **kargs):
8883 if not "--pretend" in myopts:
8884 emergelog(xterm_titles, "Started emerge on: "+\
8885 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
8888 myelogstr=" ".join(myopts)
8890 myelogstr+=" "+myaction
8892 myelogstr += " " + " ".join(oldargs)
8893 emergelog(xterm_titles, " *** emerge " + myelogstr)
8896 def emergeexitsig(signum, frame):
8897 signal.signal(signal.SIGINT, signal.SIG_IGN)
8898 signal.signal(signal.SIGTERM, signal.SIG_IGN)
8899 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
8900 sys.exit(100+signum)
8901 signal.signal(signal.SIGINT, emergeexitsig)
8902 signal.signal(signal.SIGTERM, emergeexitsig)
8905 """This gets out final log message in before we quit."""
8906 if "--pretend" not in myopts:
8907 emergelog(xterm_titles, " *** terminating.")
8908 if "notitles" not in settings.features:
8910 portage.atexit_register(emergeexit)
8912 if myaction in ("config", "metadata", "regen", "sync"):
8913 if "--pretend" in myopts:
8914 sys.stderr.write(("emerge: The '%s' action does " + \
8915 "not support '--pretend'.\n") % myaction)
8917 if "sync" == myaction:
8918 action_sync(settings, trees, mtimedb, myopts, myaction)
8919 elif "metadata" == myaction:
8920 action_metadata(settings, portdb, myopts)
8921 elif myaction=="regen":
8922 validate_ebuild_environment(trees)
8923 action_regen(settings, portdb)
8925 elif "config"==myaction:
8926 validate_ebuild_environment(trees)
8927 action_config(settings, trees, myopts, myfiles)
8930 elif "info"==myaction:
8931 action_info(settings, trees, myopts, myfiles)
8934 elif "search"==myaction:
8935 validate_ebuild_environment(trees)
8936 action_search(trees[settings["ROOT"]]["root_config"],
8937 myopts, myfiles, spinner)
8938 elif myaction in ("clean", "unmerge") or \
8939 (myaction == "prune" and "--nodeps" in myopts):
8940 validate_ebuild_environment(trees)
8941 root_config = trees[settings["ROOT"]]["root_config"]
8942 # When given a list of atoms, unmerge
8943 # them in the order given.
8944 ordered = myaction == "unmerge"
8945 if 1 == unmerge(root_config, myopts, myaction, myfiles,
8946 mtimedb["ldpath"], ordered=ordered):
8947 if not (buildpkgonly or fetchonly or pretend):
8948 post_emerge(trees, mtimedb, os.EX_OK)
8950 elif myaction in ("depclean", "prune"):
8951 validate_ebuild_environment(trees)
8952 action_depclean(settings, trees, mtimedb["ldpath"],
8953 myopts, myaction, myfiles, spinner)
8954 if not (buildpkgonly or fetchonly or pretend):
8955 post_emerge(trees, mtimedb, os.EX_OK)
8956 # "update", "system", or just process files:
8958 validate_ebuild_environment(trees)
8959 if "--pretend" not in myopts:
8960 display_news_notification(trees)
8961 retval = action_build(settings, trees, mtimedb,
8962 myopts, myaction, myfiles, spinner)
8963 # if --pretend was not enabled then display_news_notification
8964 # was already called by post_emerge
8965 if "--pretend" in myopts:
8966 display_news_notification(trees)