2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
7 # This block ensures that ^C interrupts are handled quietly.
11 def exithandler(signum,frame):
12 signal.signal(signal.SIGINT, signal.SIG_IGN)
13 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16 signal.signal(signal.SIGINT, exithandler)
17 signal.signal(signal.SIGTERM, exithandler)
18 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
20 except KeyboardInterrupt:
25 os.environ["PORTAGE_LEGACY_GLOBALS"] = "false"
29 from os import path as osp
30 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
32 del os.environ["PORTAGE_LEGACY_GLOBALS"]
33 from portage import digraph
35 import emergehelp, xpak, commands, errno, re, socket, time, types
37 from output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38 havecolor, nc_len, nocolor, red, teal, turquoise, white, xtermTitle, \
39 xtermTitleReset, yellow
40 from output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from output import bold as white
48 portage_dep._dep_check_strict = True
51 import portage_exception
52 from portage_data import secpass
53 from portage_util import normalize_path as normpath
54 from portage_util import writemsg
56 if not hasattr(__builtins__, "set"):
57 from sets import Set as set
58 from itertools import chain, izip
59 from UserDict import DictMixin
64 import pickle as cPickle
66 class stdout_spinner(object):
68 "Gentoo Rocks ("+os.uname()[0]+")",
69 "Thank you for using Gentoo. :)",
70 "Are you actually trying to read this?",
71 "How many times have you stared at this?",
72 "We are generating the cache right now",
73 "You are paying too much attention.",
74 "A theory is better than its explanation.",
75 "Phasers locked on target, Captain.",
76 "Thrashing is just virtual crashing.",
77 "To be is to program.",
78 "Real Users hate Real Programmers.",
79 "When all else fails, read the instructions.",
80 "Functionality breeds Contempt.",
81 "The future lies ahead.",
82 "3.1415926535897932384626433832795028841971694",
83 "Sometimes insanity is the only alternative.",
84 "Inaccuracy saves a world of explanation.",
87 twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
91 self.update = self.update_twirl
92 self.scroll_sequence = self.scroll_msgs[
93 int(time.time() * 100) % len(self.scroll_msgs)]
95 self.min_display_latency = 0.05
97 def _return_early(self):
99 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
100 each update* method should return without doing any output when this
103 cur_time = time.time()
104 if cur_time - self.last_update < self.min_display_latency:
106 self.last_update = cur_time
109 def update_basic(self):
110 self.spinpos = (self.spinpos + 1) % 500
111 if self._return_early():
113 if (self.spinpos % 100) == 0:
114 if self.spinpos == 0:
115 sys.stdout.write(". ")
117 sys.stdout.write(".")
120 def update_scroll(self):
121 if self._return_early():
123 if(self.spinpos >= len(self.scroll_sequence)):
124 sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
125 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
127 sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
129 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
131 def update_twirl(self):
132 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
133 if self._return_early():
135 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
138 def update_quiet(self):
141 def userquery(prompt, responses=None, colours=None):
142 """Displays a prompt and a set of responses, then waits for a response
143 which is checked against the responses and the first to match is
144 returned. An empty response will match the first value in responses. The
145 input buffer is *not* cleared prior to the prompt!
148 responses: a List of Strings.
149 colours: a List of Functions taking and returning a String, used to
150 process the responses for display. Typically these will be functions
151 like red() but could be e.g. lambda x: "DisplayString".
152 If responses is omitted, defaults to ["Yes", "No"], [green, red].
153 If only colours is omitted, defaults to [bold, ...].
155 Returns a member of the List responses. (If called without optional
156 arguments, returns "Yes" or "No".)
157 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
159 if responses is None:
160 responses = ["Yes", "No"]
162 create_color_func("PROMPT_CHOICE_DEFAULT"),
163 create_color_func("PROMPT_CHOICE_OTHER")
165 elif colours is None:
167 colours=(colours*len(responses))[:len(responses)]
171 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
172 for key in responses:
173 # An empty response will match the first value in responses.
174 if response.upper()==key[:len(response)].upper():
176 print "Sorry, response '%s' not understood." % response,
177 except (EOFError, KeyboardInterrupt):
182 "clean", "config", "depclean",
184 "prune", "regen", "search",
185 "sync", "system", "unmerge", "world",
188 "--ask", "--alphabetical",
189 "--buildpkg", "--buildpkgonly",
190 "--changelog", "--columns",
195 "--fetchonly", "--fetch-all-uri",
196 "--getbinpkg", "--getbinpkgonly",
197 "--help", "--ignore-default-opts",
199 "--newuse", "--nocolor",
200 "--nodeps", "--noreplace",
201 "--nospinner", "--oneshot",
202 "--onlydeps", "--pretend",
203 "--quiet", "--resume",
204 "--searchdesc", "--selective",
208 "--usepkg", "--usepkgonly",
209 "--verbose", "--version"
215 "b":"--buildpkg", "B":"--buildpkgonly",
216 "c":"--clean", "C":"--unmerge",
217 "d":"--debug", "D":"--deep",
219 "f":"--fetchonly", "F":"--fetch-all-uri",
220 "g":"--getbinpkg", "G":"--getbinpkgonly",
222 "k":"--usepkg", "K":"--usepkgonly",
224 "n":"--noreplace", "N":"--newuse",
225 "o":"--onlydeps", "O":"--nodeps",
226 "p":"--pretend", "P":"--prune",
228 "s":"--search", "S":"--searchdesc",
231 "v":"--verbose", "V":"--version"
234 def emergelog(xterm_titles, mystr, short_msg=None):
236 if short_msg == None:
238 if "HOSTNAME" in os.environ:
239 short_msg = os.environ["HOSTNAME"]+": "+short_msg
240 xtermTitle(short_msg)
242 file_path = "/var/log/emerge.log"
243 mylogfile = open(file_path, "a")
244 portage_util.apply_secpass_permissions(file_path,
245 uid=portage.portage_uid, gid=portage.portage_gid,
249 mylock = portage_locks.lockfile(mylogfile)
250 # seek because we may have gotten held up by the lock.
251 # if so, we may not be positioned at the end of the file.
253 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
257 portage_locks.unlockfile(mylock)
259 except (IOError,OSError,portage_exception.PortageException), e:
261 print >> sys.stderr, "emergelog():",e
263 def countdown(secs=5, doing="Starting"):
265 print ">>> Waiting",secs,"seconds before starting..."
266 print ">>> (Control-C to abort)...\n"+doing+" in: ",
270 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275 # formats a size given in bytes nicely
276 def format_size(mysize):
277 if type(mysize) not in [types.IntType,types.LongType]:
279 if 0 != mysize % 1024:
280 # Always round up to the next kB so that it doesn't show 0 kB when
281 # some small file still needs to be fetched.
282 mysize += 1024 - mysize % 1024
283 mystr=str(mysize/1024)
287 mystr=mystr[:mycount]+","+mystr[mycount:]
291 def getgccversion(chost):
294 return: the current in-use gcc version
297 gcc_ver_command = 'gcc -dumpversion'
298 gcc_ver_prefix = 'gcc-'
300 gcc_not_found_error = red(
301 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
302 "!!! to update the environment of this terminal and possibly\n" +
303 "!!! other terminals also.\n"
306 mystatus, myoutput = commands.getstatusoutput("eselect compiler show")
307 if mystatus == os.EX_OK and len(myoutput.split("/")) == 2:
308 part1, part2 = myoutput.split("/")
309 if part1.startswith(chost + "-"):
310 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
312 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
313 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
314 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
316 mystatus, myoutput = commands.getstatusoutput(
317 chost + "-" + gcc_ver_command)
318 if mystatus == os.EX_OK:
319 return gcc_ver_prefix + myoutput
321 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
322 if mystatus == os.EX_OK:
323 return gcc_ver_prefix + myoutput
325 portage.writemsg(gcc_not_found_error, noiselevel=-1)
326 return "[unavailable]"
328 def getportageversion(portdir, target_root, profile, chost, vardb):
329 profilever = "unavailable"
331 realpath = os.path.realpath(profile)
332 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
333 if realpath.startswith(basepath):
334 profilever = realpath[1 + len(basepath):]
337 profilever = "!" + os.readlink(profile)
340 del realpath, basepath
343 libclist = vardb.match("virtual/libc")
344 libclist += vardb.match("virtual/glibc")
345 libclist = portage_util.unique_array(libclist)
347 xs=portage.catpkgsplit(x)
349 libcver+=","+"-".join(xs[1:])
351 libcver="-".join(xs[1:])
353 libcver="unavailable"
355 gccver = getgccversion(chost)
356 unameout=os.uname()[2]+" "+os.uname()[4]
358 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
360 def create_depgraph_params(myopts, myaction):
361 #configure emerge engine parameters
363 # self: include _this_ package regardless of if it is merged.
364 # selective: exclude the package if it is merged
365 # recurse: go into the dependencies
366 # deep: go into the dependencies of already merged packages
367 # empty: pretend nothing is merged
368 # complete: completely account for all known dependencies
369 myparams = set(["recurse"])
370 if "--update" in myopts or \
371 "--newuse" in myopts or \
372 "--reinstall" in myopts or \
373 "--noreplace" in myopts or \
374 myaction in ("system", "world"):
375 myparams.add("selective")
376 if "--emptytree" in myopts:
377 myparams.add("empty")
378 myparams.discard("selective")
379 if "--nodeps" in myopts:
380 myparams.discard("recurse")
381 if "--deep" in myopts:
383 if "--complete-graph" in myopts:
384 myparams.add("complete")
387 # search functionality
399 def __init__(self, settings, trees, spinner, searchdesc,
400 verbose, usepkg, usepkgonly):
401 """Searches the available and installed packages for the supplied search key.
402 The list of available and installed packages is created at object instantiation.
403 This makes successive searches faster."""
404 self.settings = settings
405 self.vartree = trees["vartree"]
406 self.spinner = spinner
407 self.verbose = verbose
408 self.searchdesc = searchdesc
412 self.portdb = fake_portdb
413 for attrib in ("aux_get", "cp_all",
414 "xmatch", "findname", "getfetchlist"):
415 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
419 portdb = trees["porttree"].dbapi
420 bindb = trees["bintree"].dbapi
421 vardb = trees["vartree"].dbapi
423 if not usepkgonly and portdb._have_root_eclass_dir:
424 self._dbs.append(portdb)
426 if (usepkg or usepkgonly) and bindb.cp_all():
427 self._dbs.append(bindb)
429 self._dbs.append(vardb)
430 self._portdb = portdb
435 cp_all.update(db.cp_all())
436 return list(sorted(cp_all))
438 def _aux_get(self, *args, **kwargs):
441 return db.aux_get(*args, **kwargs)
446 def _findname(self, *args, **kwargs):
448 if db is not self._portdb:
449 # We don't want findname to return anything
450 # unless it's an ebuild in a portage tree.
451 # Otherwise, it's already built and we don't
454 func = getattr(db, "findname", None)
456 value = func(*args, **kwargs)
461 def _getfetchlist(self, *args, **kwargs):
463 func = getattr(db, "getfetchlist", None)
465 value = func(*args, **kwargs)
470 def _visible(self, db, cpv, metadata):
471 installed = db is self.vartree.dbapi
472 built = installed or db is not self._portdb
475 pkg_type = "installed"
478 return visible(self.settings,
479 Package(type_name=pkg_type, root=self.settings["ROOT"],
480 cpv=cpv, built=built, installed=installed, metadata=metadata))
482 def _xmatch(self, level, atom):
484 This method does not expand old-style virtuals because it
485 is restricted to returning matches for a single ${CATEGORY}/${PN}
486 and old-style virual matches unreliable for that when querying
487 multiple package databases. If necessary, old-style virtuals
488 can be performed on atoms prior to calling this method.
490 cp = portage.dep_getkey(atom)
491 if level == "match-all":
494 if hasattr(db, "xmatch"):
495 matches.update(db.xmatch(level, atom))
497 matches.update(db.match(atom))
498 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
499 db._cpv_sort_ascending(result)
500 elif level == "match-visible":
503 if hasattr(db, "xmatch"):
504 matches.update(db.xmatch(level, atom))
506 db_keys = list(db._aux_cache_keys)
507 for cpv in db.match(atom):
508 metadata = dict(izip(db_keys,
509 db.aux_get(cpv, db_keys)))
510 if not self._visible(db, cpv, metadata):
513 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
514 db._cpv_sort_ascending(result)
515 elif level == "bestmatch-visible":
518 if hasattr(db, "xmatch"):
519 cpv = db.xmatch("bestmatch-visible", atom)
520 if not cpv or portage.cpv_getkey(cpv) != cp:
522 if not result or cpv == portage.best([cpv, result]):
525 db_keys = list(db._aux_cache_keys)
526 # break out of this loop with highest visible
527 # match, checked in descending order
528 for cpv in reversed(db.match(atom)):
529 if portage.cpv_getkey(cpv) != cp:
531 metadata = dict(izip(db_keys,
532 db.aux_get(cpv, db_keys)))
533 if not self._visible(db, cpv, metadata):
535 if not result or cpv == portage.best([cpv, result]):
539 raise NotImplementedError(level)
542 def execute(self,searchkey):
543 """Performs the search for the supplied search key"""
545 self.searchkey=searchkey
546 self.packagematches = []
549 self.matches = {"pkg":[], "desc":[]}
552 self.matches = {"pkg":[]}
553 print "Searching... ",
556 if self.searchkey.startswith('%'):
558 self.searchkey = self.searchkey[1:]
559 if self.searchkey.startswith('@'):
561 self.searchkey = self.searchkey[1:]
563 self.searchre=re.compile(self.searchkey,re.I)
565 self.searchre=re.compile(re.escape(self.searchkey), re.I)
566 for package in self.portdb.cp_all():
567 self.spinner.update()
570 match_string = package[:]
572 match_string = package.split("/")[-1]
575 if self.searchre.search(match_string):
576 if not self.portdb.xmatch("match-visible", package):
578 self.matches["pkg"].append([package,masked])
579 elif self.searchdesc: # DESCRIPTION searching
580 full_package = self.portdb.xmatch("bestmatch-visible", package)
582 #no match found; we don't want to query description
583 full_package = portage.best(
584 self.portdb.xmatch("match-all", package))
590 full_desc = self.portdb.aux_get(
591 full_package, ["DESCRIPTION"])[0]
593 print "emerge: search: aux_get() failed, skipping"
595 if self.searchre.search(full_desc):
596 self.matches["desc"].append([full_package,masked])
598 for mtype in self.matches:
599 self.matches[mtype].sort()
600 self.mlen += len(self.matches[mtype])
603 """Outputs the results of the search."""
604 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
605 print "[ Applications found : "+white(str(self.mlen))+" ]"
607 vardb = self.vartree.dbapi
608 for mtype in self.matches:
609 for match,masked in self.matches[mtype]:
612 full_package = self.portdb.xmatch(
613 "bestmatch-visible", match)
615 #no match found; we don't want to query description
617 full_package = portage.best(
618 self.portdb.xmatch("match-all",match))
621 match = portage.cpv_getkey(match)
625 desc, homepage, license = self.portdb.aux_get(
626 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
628 print "emerge: search: aux_get() failed, skipping"
631 print green("*")+" "+white(match)+" "+red("[ Masked ]")
633 print green("*")+" "+white(match)
634 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
638 mycat = match.split("/")[0]
639 mypkg = match.split("/")[1]
640 mycpv = match + "-" + myversion
641 myebuild = self.portdb.findname(mycpv)
643 pkgdir = os.path.dirname(myebuild)
644 import portage_manifest as manifest
645 mf = manifest.Manifest(
646 pkgdir, self.settings["DISTDIR"])
647 fetchlist = self.portdb.getfetchlist(mycpv,
648 mysettings=self.settings, all=True)[1]
650 mysum[0] = mf.getDistfilesSize(fetchlist)
652 file_size_str = "Unknown (missing digest for %s)" % \
657 if db is not vardb and \
658 db.cpv_exists(mycpv):
660 if not myebuild and hasattr(db, "bintree"):
661 myebuild = db.bintree.getname(mycpv)
663 mysum[0] = os.stat(myebuild).st_size
668 if myebuild and file_size_str is None:
669 mystr = str(mysum[0] / 1024)
673 mystr = mystr[:mycount] + "," + mystr[mycount:]
674 file_size_str = mystr + " kB"
678 print " ", darkgreen("Latest version available:"),myversion
679 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
682 (darkgreen("Size of files:"), file_size_str)
683 print " ", darkgreen("Homepage:")+" ",homepage
684 print " ", darkgreen("Description:")+" ",desc
685 print " ", darkgreen("License:")+" ",license
691 def getInstallationStatus(self,package):
692 installed_package = self.vartree.dep_bestmatch(package)
694 version = self.getVersion(installed_package,search.VERSION_RELEASE)
696 result = darkgreen("Latest version installed:")+" "+version
698 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
701 def getVersion(self,full_package,detail):
702 if len(full_package) > 1:
703 package_parts = portage.catpkgsplit(full_package)
704 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
705 result = package_parts[2]+ "-" + package_parts[3]
707 result = package_parts[2]
713 #build our package digraph
714 def getlist(settings, mode):
716 mylines = settings.packages
719 file_path = os.path.join(settings["ROOT"], portage.WORLD_FILE)
720 myfile = open(file_path, "r")
721 mylines = myfile.readlines()
723 except (OSError, IOError), e:
724 if e.errno == errno.ENOENT:
725 portage.writemsg("\n!!! World file does not exist: '%s'\n" % file_path)
731 myline=" ".join(x.split())
740 mynewlines.append(myline.strip())
744 def world_clean_package(vardb, cpv):
745 """Remove a package from the world file when unmerged."""
746 world_set = WorldSet(vardb.settings)
749 worldlist = list(world_set)
750 mykey = portage.cpv_getkey(cpv)
753 if portage.dep_getkey(x) == mykey:
754 matches = vardb.match(x, use_cache=0)
758 elif len(matches) == 1 and matches[0] == cpv:
762 #others are around; keep it.
763 newworldlist.append(x)
765 #this doesn't match the package we're unmerging; keep it.
766 newworldlist.append(x)
769 world_set.update(newworldlist)
775 class SetConfig(object):
776 def __init__(self, settings, trees):
778 self.sets["world"] = WorldSet(settings)
779 self.sets["world"].load()
780 self.sets["system"] = SystemSet(settings)
785 def getSetAtoms(self, name):
786 return set(self.sets[name])
788 class InternalPackageSet(object):
789 def __init__(self, initial_atoms=None):
792 self.update(initial_atoms)
796 cp = portage.dep_getkey(atom)
797 cp_list = self._atoms.get(cp)
800 self._atoms[cp] = cp_list
801 if atom not in cp_list:
803 def update(self, atoms):
806 def __contains__(self, atom):
807 cp = portage.dep_getkey(atom)
808 if cp in self._atoms and atom in self._atoms[cp]:
811 def findAtomForPackage(self, cpv, metadata):
812 """Return the best match for a given package from the arguments, or
813 None if there are no matches. This matches virtual arguments against
814 the PROVIDE metadata. This can raise an InvalidDependString exception
815 if an error occurs while parsing PROVIDE."""
816 cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
817 cp = portage.dep_getkey(cpv)
818 atoms = self._atoms.get(cp)
820 best_match = portage.best_match_to_list(cpv_slot, atoms)
823 if not metadata["PROVIDE"]:
825 provides = portage.flatten(portage_dep.use_reduce(
826 portage_dep.paren_reduce(metadata["PROVIDE"]),
827 uselist=metadata["USE"].split()))
828 for provide in provides:
829 provided_cp = portage.dep_getkey(provide)
830 atoms = self._atoms.get(provided_cp)
832 transformed_atoms = [atom.replace(provided_cp, cp) for atom in atoms]
833 best_match = portage.best_match_to_list(cpv_slot, transformed_atoms)
835 return atoms[transformed_atoms.index(best_match)]
838 def iterAtomsForPackage(self, pkg):
840 Find all matching atoms for a given package. This matches virtual
841 arguments against the PROVIDE metadata. This will raise an
842 InvalidDependString exception if PROVIDE is invalid.
844 cpv_slot_list = ["%s:%s" % (pkg.cpv, pkg.metadata["SLOT"])]
845 cp = portage.cpv_getkey(pkg.cpv)
846 atoms = self._atoms.get(cp)
849 if portage.match_from_list(atom, cpv_slot_list):
851 if not pkg.metadata["PROVIDE"]:
853 provides = portage.flatten(portage_dep.use_reduce(
854 portage_dep.paren_reduce(pkg.metadata["PROVIDE"]),
855 uselist=pkg.metadata["USE"].split()))
856 for provide in provides:
857 provided_cp = portage.dep_getkey(provide)
858 atoms = self._atoms.get(provided_cp)
861 if portage.match_from_list(atom.replace(provided_cp, cp),
866 for atoms in self._atoms.itervalues():
870 class SystemSet(InternalPackageSet):
871 def __init__(self, settings, **kwargs):
872 InternalPackageSet.__init__(self, **kwargs)
873 self.update(getlist(settings, "system"))
875 class WorldSet(InternalPackageSet):
876 def __init__(self, settings, **kwargs):
877 InternalPackageSet.__init__(self, **kwargs)
878 self.world_file = os.path.join(settings["ROOT"], portage.WORLD_FILE)
880 def _ensure_dirs(self):
881 portage_util.ensure_dirs(os.path.dirname(self.world_file),
882 gid=portage.portage_gid, mode=02750, mask=02)
885 self.update(portage_util.grabfile_package(self.world_file))
888 portage.write_atomic(self.world_file,
889 "\n".join(sorted(self)) + "\n")
892 self._lock = portage_locks.lockfile(self.world_file, wantnewlockfile=1)
894 portage_locks.unlockfile(self._lock)
897 class RootConfig(object):
898 """This is used internally by depgraph to track information about a
900 def __init__(self, settings, trees, setconfig):
902 self.settings = settings
903 self.root = self.settings["ROOT"]
904 self.setconfig = setconfig
905 self.sets = self.setconfig.getSets()
906 self.visible_pkgs = PackageVirtualDbapi(self.settings)
908 def create_world_atom(pkg_key, metadata, args_set, root_config):
909 """Create a new atom for the world file if one does not exist. If the
910 argument atom is precise enough to identify a specific slot then a slot
911 atom will be returned. Atoms that are in the system set may also be stored
912 in world since system atoms can only match one slot while world atoms can
913 be greedy with respect to slots. Unslotted system packages will not be
915 arg_atom = args_set.findAtomForPackage(pkg_key, metadata)
916 cp = portage.dep_getkey(arg_atom)
918 sets = root_config.sets
919 portdb = root_config.trees["porttree"].dbapi
920 vardb = root_config.trees["vartree"].dbapi
921 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
922 for cpv in portdb.match(cp))
923 slotted = len(available_slots) > 1 or \
924 (len(available_slots) == 1 and "0" not in available_slots)
926 # check the vdb in case this is multislot
927 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
928 for cpv in vardb.match(cp))
929 slotted = len(available_slots) > 1 or \
930 (len(available_slots) == 1 and "0" not in available_slots)
931 if slotted and arg_atom != cp:
932 # If the user gave a specific atom, store it as a
933 # slot atom in the world file.
934 slot_atom = "%s:%s" % (cp, metadata["SLOT"])
936 # For USE=multislot, there are a couple of cases to
939 # 1) SLOT="0", but the real SLOT spontaneously changed to some
940 # unknown value, so just record an unslotted atom.
942 # 2) SLOT comes from an installed package and there is no
943 # matching SLOT in the portage tree.
945 # Make sure that the slot atom is available in either the
946 # portdb or the vardb, since otherwise the user certainly
947 # doesn't want the SLOT atom recorded in the world file
948 # (case 1 above). If it's only available in the vardb,
949 # the user may be trying to prevent a USE=multislot
950 # package from being removed by --depclean (case 2 above).
953 if not portdb.match(slot_atom):
954 # SLOT seems to come from an installed multislot package
956 # If there is no installed package matching the SLOT atom,
957 # it probably changed SLOT spontaneously due to USE=multislot,
958 # so just record an unslotted atom.
959 if vardb.match(slot_atom):
960 # Now verify that the argument is precise
961 # enough to identify a specific slot.
962 matches = mydb.match(arg_atom)
963 matched_slots = set()
965 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
966 if len(matched_slots) == 1:
967 new_world_atom = slot_atom
969 if new_world_atom == sets["world"].findAtomForPackage(pkg_key, metadata):
970 # Both atoms would be identical, so there's nothing to add.
973 # Unlike world atoms, system atoms are not greedy for slots, so they
974 # can't be safely excluded from world if they are slotted.
975 system_atom = sets["system"].findAtomForPackage(pkg_key, metadata)
977 if not portage.dep_getkey(system_atom).startswith("virtual/"):
979 # System virtuals aren't safe to exclude from world since they can
980 # match multiple old-style virtuals but only one of them will be
981 # pulled in by update or depclean.
982 providers = portdb.mysettings.getvirtuals().get(
983 portage.dep_getkey(system_atom))
984 if providers and len(providers) == 1 and providers[0] == cp:
986 return new_world_atom
988 def filter_iuse_defaults(iuse):
990 if flag.startswith("+") or flag.startswith("-"):
995 class SlotObject(object):
996 __slots__ = ("__weakref__",)
998 def __init__(self, **kwargs):
999 classes = [self.__class__]
1004 classes.extend(c.__bases__)
1005 slots = getattr(c, "__slots__", None)
1008 for myattr in slots:
1009 myvalue = kwargs.get(myattr, None)
1010 setattr(self, myattr, myvalue)
1012 class AbstractDepPriority(SlotObject):
1013 __slots__ = ("buildtime", "runtime", "runtime_post")
1015 def __lt__(self, other):
1016 return self.__int__() < other
1018 def __le__(self, other):
1019 return self.__int__() <= other
1021 def __eq__(self, other):
1022 return self.__int__() == other
1024 def __ne__(self, other):
1025 return self.__int__() != other
1027 def __gt__(self, other):
1028 return self.__int__() > other
1030 def __ge__(self, other):
1031 return self.__int__() >= other
1035 return copy.copy(self)
1037 class DepPriority(AbstractDepPriority):
1039 This class generates an integer priority level based of various
1040 attributes of the dependency relationship. Attributes can be assigned
1041 at any time and the new integer value will be generated on calls to the
1042 __int__() method. Rich comparison operators are supported.
1044 The boolean attributes that affect the integer value are "satisfied",
1045 "buildtime", "runtime", and "system". Various combinations of
1046 attributes lead to the following priority levels:
1048 Combination of properties Priority Category
1050 not satisfied and buildtime 0 HARD
1051 not satisfied and runtime -1 MEDIUM
1052 not satisfied and runtime_post -2 MEDIUM_SOFT
1053 satisfied and buildtime and rebuild -3 SOFT
1054 satisfied and buildtime -4 SOFT
1055 satisfied and runtime -5 SOFT
1056 satisfied and runtime_post -6 SOFT
1057 (none of the above) -6 SOFT
1059 Several integer constants are defined for categorization of priority
1062 MEDIUM The upper boundary for medium dependencies.
1063 MEDIUM_SOFT The upper boundary for medium-soft dependencies.
1064 SOFT The upper boundary for soft dependencies.
1065 MIN The lower boundary for soft dependencies.
1067 __slots__ = ("satisfied", "rebuild")
1074 if not self.satisfied:
1079 if self.runtime_post:
1087 if self.runtime_post:
1092 myvalue = self.__int__()
1093 if myvalue > self.MEDIUM:
1095 if myvalue > self.MEDIUM_SOFT:
1097 if myvalue > self.SOFT:
1098 return "medium-soft"
1101 class BlockerDepPriority(DepPriority):
1106 BlockerDepPriority.instance = BlockerDepPriority()
1108 class UnmergeDepPriority(AbstractDepPriority):
1111 Combination of properties Priority Category
1114 runtime_post -1 HARD
1116 (none of the above) -2 SOFT
1126 if self.runtime_post:
1133 myvalue = self.__int__()
1134 if myvalue > self.SOFT:
1138 class FakeVartree(portage.vartree):
1139 """This is implements an in-memory copy of a vartree instance that provides
1140 all the interfaces required for use by the depgraph. The vardb is locked
1141 during the constructor call just long enough to read a copy of the
1142 installed package information. This allows the depgraph to do it's
1143 dependency calculations without holding a lock on the vardb. It also
1144 allows things like vardb global updates to be done in memory so that the
1145 user doesn't necessarily need write access to the vardb in cases where
1146 global updates are necessary (updates are performed when necessary if there
1147 is not a matching ebuild in the tree)."""
1148 def __init__(self, real_vartree, portdb, db_keys, pkg_cache):
1149 self.root = real_vartree.root
1150 self.settings = real_vartree.settings
1152 for required_key in ("COUNTER", "SLOT"):
1153 if required_key not in mykeys:
1154 mykeys.append(required_key)
1155 self._pkg_cache = pkg_cache
1156 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1157 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1159 # At least the parent needs to exist for the lock file.
1160 portage_util.ensure_dirs(vdb_path)
1161 except portage_exception.PortageException:
1165 if os.access(vdb_path, os.W_OK):
1166 vdb_lock = portage_locks.lockdir(vdb_path)
1167 real_dbapi = real_vartree.dbapi
1169 for cpv in real_dbapi.cpv_all():
1170 cache_key = ("installed", self.root, cpv, "nomerge")
1171 pkg = self._pkg_cache.get(cache_key)
1173 metadata = pkg.metadata
1175 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1176 myslot = metadata["SLOT"]
1177 mycp = portage.dep_getkey(cpv)
1178 myslot_atom = "%s:%s" % (mycp, myslot)
1180 mycounter = long(metadata["COUNTER"])
1183 metadata["COUNTER"] = str(mycounter)
1184 other_counter = slot_counters.get(myslot_atom, None)
1185 if other_counter is not None:
1186 if other_counter > mycounter:
1188 slot_counters[myslot_atom] = mycounter
1190 pkg = Package(built=True, cpv=cpv,
1191 installed=True, metadata=metadata,
1192 root=self.root, type_name="installed")
1193 self._pkg_cache[pkg] = pkg
1194 self.dbapi.cpv_inject(pkg)
1195 real_dbapi.flush_cache()
1198 portage_locks.unlockdir(vdb_lock)
1199 # Populate the old-style virtuals using the cached values.
1200 if not self.settings.treeVirtuals:
1201 self.settings.treeVirtuals = portage_util.map_dictlist_vals(
1202 portage.getCPFromCPV, self.get_all_provides())
1204 # Intialize variables needed for lazy cache pulls of the live ebuild
1205 # metadata. This ensures that the vardb lock is released ASAP, without
1206 # being delayed in case cache generation is triggered.
1207 self._aux_get = self.dbapi.aux_get
1208 self.dbapi.aux_get = self._aux_get_wrapper
1209 self._match = self.dbapi.match
1210 self.dbapi.match = self._match_wrapper
1211 self._aux_get_history = set()
1212 self._portdb_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1213 self._portdb = portdb
1214 self._global_updates = None
1216 def _match_wrapper(self, cpv, use_cache=1):
1218 Make sure the metadata in Package instances gets updated for any
1219 cpv that is returned from a match() call, since the metadata can
1220 be accessed directly from the Package instance instead of via
1223 matches = self._match(cpv, use_cache=use_cache)
1225 if cpv in self._aux_get_history:
1227 self._aux_get_wrapper(cpv, [])
1230 def _aux_get_wrapper(self, pkg, wants):
1231 if pkg in self._aux_get_history:
1232 return self._aux_get(pkg, wants)
1233 self._aux_get_history.add(pkg)
1235 # Use the live ebuild metadata if possible.
1236 live_metadata = dict(izip(self._portdb_keys,
1237 self._portdb.aux_get(pkg, self._portdb_keys)))
1238 self.dbapi.aux_update(pkg, live_metadata)
1239 except (KeyError, portage_exception.PortageException):
1240 if self._global_updates is None:
1241 self._global_updates = \
1242 grab_global_updates(self._portdb.porttree_root)
1243 perform_global_updates(
1244 pkg, self.dbapi, self._global_updates)
1245 return self._aux_get(pkg, wants)
1247 def grab_global_updates(portdir):
1248 from portage_update import grab_updates, parse_updates
1249 updpath = os.path.join(portdir, "profiles", "updates")
1251 rawupdates = grab_updates(updpath)
1252 except portage_exception.DirectoryNotFound:
1255 for mykey, mystat, mycontent in rawupdates:
1256 commands, errors = parse_updates(mycontent)
1257 upd_commands.extend(commands)
1260 def perform_global_updates(mycpv, mydb, mycommands):
1261 from portage_update import update_dbentries
1262 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1263 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1264 updates = update_dbentries(mycommands, aux_dict)
1266 mydb.aux_update(mycpv, updates)
1268 def visible(pkgsettings, pkg):
1270 Check if a package is visible. This can raise an InvalidDependString
1271 exception if LICENSE is invalid.
1272 TODO: optionally generate a list of masking reasons
1274 @returns: True if the package is visible, False otherwise.
1276 if not pkg.metadata["SLOT"]:
1278 if pkg.built and not pkg.installed:
1279 pkg_chost = pkg.metadata.get("CHOST")
1280 if pkg_chost and pkg_chost != pkgsettings["CHOST"]:
1282 if not portage.eapi_is_supported(pkg.metadata["EAPI"]):
1284 if not pkg.installed and \
1285 pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1287 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1289 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1293 def get_masking_status(pkg, pkgsettings, root_config):
1295 mreasons = portage.getmaskingstatus(
1296 pkg, settings=pkgsettings,
1297 portdb=root_config.trees["porttree"].dbapi)
1299 if pkg.built and not pkg.installed:
1300 pkg_chost = pkg.metadata.get("CHOST")
1301 if pkg_chost and pkg_chost != pkgsettings["CHOST"]:
1302 mreasons.append("CHOST: %s" % \
1303 pkg.metadata["CHOST"])
1305 if not pkg.metadata["SLOT"]:
1306 mreasons.append("invalid: SLOT is undefined")
1310 def get_mask_info(root_config, cpv, pkgsettings,
1311 db, pkg_type, built, installed, db_keys):
1314 metadata = dict(izip(db_keys,
1315 db.aux_get(cpv, db_keys)))
1318 if metadata and not built:
1319 pkgsettings.setcpv(cpv, mydb=metadata)
1320 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1321 if metadata is None:
1322 mreasons = ["corruption"]
1324 pkg = Package(type_name=pkg_type, root=root_config.root,
1325 cpv=cpv, built=built, installed=installed, metadata=metadata)
1326 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1327 return metadata, mreasons
1329 def show_masked_packages(masked_packages):
1330 shown_licenses = set()
1331 shown_comments = set()
1332 # Maybe there is both an ebuild and a binary. Only
1333 # show one of them to avoid redundant appearance.
1335 have_eapi_mask = False
1336 for (root_config, pkgsettings, cpv,
1337 metadata, mreasons) in masked_packages:
1338 if cpv in shown_cpvs:
1341 comment, filename = None, None
1342 if "package.mask" in mreasons:
1343 comment, filename = \
1344 portage.getmaskingreason(
1345 cpv, metadata=metadata,
1346 settings=pkgsettings,
1347 portdb=root_config.trees["porttree"].dbapi,
1348 return_location=True)
1349 missing_licenses = []
1351 if not portage.eapi_is_supported(metadata["EAPI"]):
1352 have_eapi_mask = True
1354 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1355 if comment and comment not in shown_comments:
1358 shown_comments.add(comment)
1359 return have_eapi_mask
1361 class Task(SlotObject):
1362 __slots__ = ("_hash_key",)
1364 def _get_hash_key(self):
1365 hash_key = getattr(self, "_hash_key", None)
1366 if hash_key is None:
1367 raise NotImplementedError(self)
1370 def __eq__(self, other):
1371 return self._get_hash_key() == other
1373 def __ne__(self, other):
1374 return self._get_hash_key() != other
1377 return hash(self._get_hash_key())
1380 return len(self._get_hash_key())
1382 def __getitem__(self, key):
1383 return self._get_hash_key()[key]
1386 return iter(self._get_hash_key())
1388 def __contains__(self, key):
1389 return key in self._get_hash_key()
1392 return str(self._get_hash_key())
1394 class Blocker(Task):
1395 __slots__ = ("root", "atom", "cp", "satisfied")
1397 def __init__(self, **kwargs):
1398 Task.__init__(self, **kwargs)
1399 self.cp = portage.dep_getkey(self.atom)
1401 def _get_hash_key(self):
1402 hash_key = getattr(self, "_hash_key", None)
1403 if hash_key is None:
1405 ("blocks", self.root, self.atom)
1406 return self._hash_key
1408 class Package(Task):
1409 __slots__ = ("built", "cpv", "depth",
1410 "installed", "metadata", "onlydeps", "operation",
1411 "root", "type_name",
1412 "cp", "cpv_slot", "pv_split", "slot_atom")
1413 def __init__(self, **kwargs):
1414 Task.__init__(self, **kwargs)
1415 self.cp = portage.cpv_getkey(self.cpv)
1416 self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
1417 self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
1418 self.pv_split = portage.catpkgsplit(self.cpv)[1:]
1420 def _get_hash_key(self):
1421 hash_key = getattr(self, "_hash_key", None)
1422 if hash_key is None:
1423 if self.operation is None:
1424 self.operation = "merge"
1425 if self.onlydeps or self.installed:
1426 self.operation = "nomerge"
1428 (self.type_name, self.root, self.cpv, self.operation)
1429 return self._hash_key
1431 def __lt__(self, other):
1432 if other.cp != self.cp:
1434 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1438 def __le__(self, other):
1439 if other.cp != self.cp:
1441 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1445 def __gt__(self, other):
1446 if other.cp != self.cp:
1448 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1452 def __ge__(self, other):
1453 if other.cp != self.cp:
1455 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1459 class DependencyArg(object):
1460 def __init__(self, arg=None, root_config=None):
1462 self.root_config = root_config
1467 class AtomArg(DependencyArg):
1468 def __init__(self, atom=None, **kwargs):
1469 DependencyArg.__init__(self, **kwargs)
1471 self.set = (self.atom, )
1473 class PackageArg(DependencyArg):
1474 def __init__(self, package=None, **kwargs):
1475 DependencyArg.__init__(self, **kwargs)
1476 self.package = package
1477 self.atom = "=" + package.cpv
1478 self.set = (self.atom, )
1480 class SetArg(DependencyArg):
1481 def __init__(self, set=None, **kwargs):
1482 DependencyArg.__init__(self, **kwargs)
1484 self.name = self.arg[len(SETPREFIX):]
1486 class Dependency(SlotObject):
1487 __slots__ = ("atom", "blocker", "depth",
1488 "parent", "onlydeps", "priority", "root")
1489 def __init__(self, **kwargs):
1490 SlotObject.__init__(self, **kwargs)
1491 if self.priority is None:
1492 self.priority = DepPriority()
1493 if self.depth is None:
1496 class BlockerCache(DictMixin):
1497 """This caches blockers of installed packages so that dep_check does not
1498 have to be done for every single installed package on every invocation of
1499 emerge. The cache is invalidated whenever it is detected that something
1500 has changed that might alter the results of dep_check() calls:
1501 1) the set of installed packages (including COUNTER) has changed
1502 2) the old-style virtuals have changed
1504 class BlockerData(object):
1505 def __init__(self, counter, atoms):
1506 self.counter = counter
1509 def __init__(self, myroot, vardb):
1511 self._installed_pkgs = set(vardb.cpv_all())
1512 self._virtuals = vardb.settings.getvirtuals()
1513 self._cache_filename = os.path.join(myroot,
1514 portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
1515 self._cache_version = "1"
1516 self._cache_data = None
1517 self._modified = False
1522 f = open(self._cache_filename)
1523 mypickle = cPickle.Unpickler(f)
1524 mypickle.find_global = None
1525 self._cache_data = mypickle.load()
1528 except (IOError, OSError, EOFError, cPickle.UnpicklingError):
1530 cache_valid = self._cache_data and \
1531 isinstance(self._cache_data, dict) and \
1532 self._cache_data.get("version") == self._cache_version and \
1533 isinstance(self._cache_data.get("blockers"), dict)
1535 self._cache_data = {"version":self._cache_version}
1536 self._cache_data["blockers"] = {}
1537 self._cache_data["virtuals"] = self._virtuals
1538 self._modified = False
1541 """If the current user has permission and the internal blocker cache
1542 been updated, save it to disk and mark it unmodified. This is called
1543 by emerge after it has proccessed blockers for all installed packages.
1544 Currently, the cache is only written if the user has superuser
1545 privileges (since that's required to obtain a lock), but all users
1546 have read access and benefit from faster blocker lookups (as long as
1547 the entire cache is still valid). The cache is stored as a pickled
1548 dict object with the following format:
1552 "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
1553 "virtuals" : vardb.settings.getvirtuals()
1556 if self._modified and \
1559 f = portage_util.atomic_ofstream(self._cache_filename)
1560 cPickle.dump(self._cache_data, f, -1)
1562 portage_util.apply_secpass_permissions(
1563 self._cache_filename, gid=portage.portage_gid, mode=0644)
1564 except (IOError, OSError), e:
1566 self._modified = False
1568 def __setitem__(self, cpv, blocker_data):
1570 Update the cache and mark it as modified for a future call to
1573 @param cpv: Package for which to cache blockers.
1575 @param blocker_data: An object with counter and atoms attributes.
1576 @type blocker_data: BlockerData
1578 self._cache_data["blockers"][cpv] = \
1579 (blocker_data.counter, blocker_data.atoms)
1580 self._modified = True
1583 return iter(self._cache_data["blockers"])
1585 def __delitem__(self, cpv):
1586 del self._cache_data["blockers"][cpv]
1587 self._modified = True
1589 def __getitem__(self, cpv):
1592 @returns: An object with counter and atoms attributes.
1594 return self.BlockerData(*self._cache_data["blockers"][cpv])
1597 """This needs to be implemented so that self.__repr__() doesn't raise
1598 an AttributeError."""
1601 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
1603 from formatter import AbstractFormatter, DumbWriter
1604 f = AbstractFormatter(DumbWriter(maxcol=72))
1606 print "\n\n!!! Invalid or corrupt dependency specification: "
1614 p_type, p_root, p_key, p_status = parent_node
1616 if p_status == "nomerge":
1617 category, pf = portage.catsplit(p_key)
1618 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
1619 msg.append("Portage is unable to process the dependencies of the ")
1620 msg.append("'%s' package. " % p_key)
1621 msg.append("In order to correct this problem, the package ")
1622 msg.append("should be uninstalled, reinstalled, or upgraded. ")
1623 msg.append("As a temporary workaround, the --nodeps option can ")
1624 msg.append("be used to ignore all dependencies. For reference, ")
1625 msg.append("the problematic dependencies can be found in the ")
1626 msg.append("*DEPEND files located in '%s/'." % pkg_location)
1628 msg.append("This package can not be installed. ")
1629 msg.append("Please notify the '%s' package maintainer " % p_key)
1630 msg.append("about this problem.")
1633 f.add_flowing_data(x)
1636 class PackageVirtualDbapi(portage.dbapi):
1638 A dbapi-like interface class that represents the state of the installed
1639 package database as new packages are installed, replacing any packages
1640 that previously existed in the same slot. The main difference between
1641 this class and fakedbapi is that this one uses Package instances
1642 internally (passed in via cpv_inject() and cpv_remove() calls).
1644 def __init__(self, settings):
1645 portage.dbapi.__init__(self)
1646 self.settings = settings
1647 self._match_cache = {}
1652 obj = PackageVirtualDbapi(self.settings)
1653 obj._match_cache = self._match_cache.copy()
1654 obj._cp_map = self._cp_map.copy()
1655 for k, v in obj._cp_map.iteritems():
1656 obj._cp_map[k] = v[:]
1657 obj._cpv_map = self._cpv_map.copy()
1660 def __contains__(self, item):
1661 existing = self._cpv_map.get(item.cpv)
1662 if existing is not None and \
1667 def match_pkgs(self, atom):
1668 return [self._cpv_map[cpv] for cpv in self.match(atom)]
1670 def _clear_cache(self):
1671 if self._categories is not None:
1672 self._categories = None
1673 if self._match_cache:
1674 self._match_cache = {}
1676 def match(self, origdep, use_cache=1):
1677 result = self._match_cache.get(origdep)
1678 if result is not None:
1680 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
1681 self._match_cache[origdep] = result
1684 def cpv_exists(self, cpv):
1685 return cpv in self._cpv_map
1687 def cp_list(self, mycp, use_cache=1):
1688 cachelist = self._match_cache.get(mycp)
1689 # cp_list() doesn't expand old-style virtuals
1690 if cachelist and cachelist[0].startswith(mycp):
1692 cpv_list = self._cp_map.get(mycp)
1693 if cpv_list is None:
1696 cpv_list = [pkg.cpv for pkg in cpv_list]
1697 self._cpv_sort_ascending(cpv_list)
1698 if not (not cpv_list and mycp.startswith("virtual/")):
1699 self._match_cache[mycp] = cpv_list
1703 return list(self._cp_map)
1706 return list(self._cpv_map)
1708 def cpv_inject(self, pkg):
1709 cp_list = self._cp_map.get(pkg.cp)
1712 self._cp_map[pkg.cp] = cp_list
1713 e_pkg = self._cpv_map.get(pkg.cpv)
1714 if e_pkg is not None:
1717 self.cpv_remove(e_pkg)
1718 for e_pkg in cp_list:
1719 if e_pkg.slot_atom == pkg.slot_atom:
1722 self.cpv_remove(e_pkg)
1725 self._cpv_map[pkg.cpv] = pkg
1728 def cpv_remove(self, pkg):
1729 old_pkg = self._cpv_map.get(pkg.cpv)
1732 self._cp_map[pkg.cp].remove(pkg)
1733 del self._cpv_map[pkg.cpv]
1736 def aux_get(self, cpv, wants):
1737 metadata = self._cpv_map[cpv].metadata
1738 return [metadata.get(x, "") for x in wants]
1740 def aux_update(self, cpv, values):
1741 self._cpv_map[cpv].metadata.update(values)
1744 class depgraph(object):
1747 "ebuild":"porttree",
1749 "installed":"vartree"}
1752 "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
1753 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1754 "repository", "RESTRICT", "SLOT", "USE"]
1756 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1758 def __init__(self, settings, trees, myopts, myparams, spinner):
1759 self.settings = settings
1760 self.target_root = settings["ROOT"]
1761 self.myopts = myopts
1762 self.myparams = myparams
1764 if settings.get("PORTAGE_DEBUG", "") == "1":
1766 self.spinner = spinner
1767 self.pkgsettings = {}
1768 # Maps slot atom to package for each Package added to the graph.
1769 self._slot_pkg_map = {}
1770 # Maps nodes to the reasons they were selected for reinstallation.
1771 self._reinstall_nodes = {}
1774 self._trees_orig = trees
1776 # Contains a filtered view of preferred packages that are selected
1777 # from available repositories.
1778 self._filtered_trees = {}
1779 # Contains installed packages and new packages that have been added
1781 self._graph_trees = {}
1782 # All Package instances
1783 self._pkg_cache = self._package_cache(self)
1784 for myroot in trees:
1785 self.trees[myroot] = {}
1786 # Create a RootConfig instance that references
1787 # the FakeVartree instead of the real one.
1788 self.roots[myroot] = RootConfig(
1789 trees[myroot]["vartree"].settings,
1791 trees[myroot]["root_config"].setconfig)
1792 for tree in ("porttree", "bintree"):
1793 self.trees[myroot][tree] = trees[myroot][tree]
1794 self.trees[myroot]["vartree"] = \
1795 FakeVartree(trees[myroot]["vartree"],
1796 trees[myroot]["porttree"].dbapi,
1797 self._mydbapi_keys, self._pkg_cache)
1798 self.pkgsettings[myroot] = portage.config(
1799 clone=self.trees[myroot]["vartree"].settings)
1800 self._slot_pkg_map[myroot] = {}
1801 vardb = self.trees[myroot]["vartree"].dbapi
1802 preload_installed_pkgs = "--nodeps" not in self.myopts and \
1803 "--buildpkgonly" not in self.myopts
1804 # This fakedbapi instance will model the state that the vdb will
1805 # have after new packages have been installed.
1806 fakedb = PackageVirtualDbapi(vardb.settings)
1807 if preload_installed_pkgs:
1808 for cpv in vardb.cpv_all():
1809 self.spinner.update()
1810 metadata = dict(izip(self._mydbapi_keys,
1811 vardb.aux_get(cpv, self._mydbapi_keys)))
1812 pkg = Package(built=True, cpv=cpv,
1813 installed=True, metadata=metadata,
1814 root=myroot, type_name="installed")
1815 self._pkg_cache[pkg] = pkg
1816 fakedb.cpv_inject(pkg)
1817 self.mydbapi[myroot] = fakedb
1820 graph_tree.dbapi = fakedb
1821 self._graph_trees[myroot] = {}
1822 self._filtered_trees[myroot] = {}
1823 # Substitute the graph tree for the vartree in dep_check() since we
1824 # want atom selections to be consistent with package selections
1825 # have already been made.
1826 self._graph_trees[myroot]["porttree"] = graph_tree
1827 self._graph_trees[myroot]["vartree"] = graph_tree
1828 def filtered_tree():
1830 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
1831 self._filtered_trees[myroot]["porttree"] = filtered_tree
1833 # Passing in graph_tree as the vartree here could lead to better
1834 # atom selections in some cases by causing atoms for packages that
1835 # have been added to the graph to be preferred over other choices.
1836 # However, it can trigger atom selections that result in
1837 # unresolvable direct circular dependencies. For example, this
1838 # happens with gwydion-dylan which depends on either itself or
1839 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
1840 # gwydion-dylan-bin needs to be selected in order to avoid a
1841 # an unresolvable direct circular dependency.
1843 # To solve the problem described above, pass in "graph_db" so that
1844 # packages that have been added to the graph are distinguishable
1845 # from other available packages and installed packages. Also, pass
1846 # the parent package into self._select_atoms() calls so that
1847 # unresolvable direct circular dependencies can be detected and
1848 # avoided when possible.
1849 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
1850 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
1853 portdb = self.trees[myroot]["porttree"].dbapi
1854 bindb = self.trees[myroot]["bintree"].dbapi
1855 vardb = self.trees[myroot]["vartree"].dbapi
1856 # (db, pkg_type, built, installed, db_keys)
1857 if "--usepkgonly" not in self.myopts:
1858 db_keys = list(portdb._aux_cache_keys)
1859 dbs.append((portdb, "ebuild", False, False, db_keys))
1860 if "--usepkg" in self.myopts:
1861 db_keys = list(bindb._aux_cache_keys)
1862 dbs.append((bindb, "binary", True, False, db_keys))
1863 db_keys = self._mydbapi_keys
1864 dbs.append((vardb, "installed", True, True, db_keys))
1865 self._filtered_trees[myroot]["dbs"] = dbs
1866 if "--usepkg" in self.myopts:
1867 self.trees[myroot]["bintree"].populate(
1868 "--getbinpkg" in self.myopts,
1869 "--getbinpkgonly" in self.myopts)
1872 self.digraph=portage.digraph()
1873 # contains all sets added to the graph
1875 # contains atoms given as arguments
1876 self._sets["args"] = InternalPackageSet()
1877 # contains all atoms from all sets added to the graph, including
1878 # atoms given as arguments
1879 self._set_atoms = InternalPackageSet()
1880 self._atom_arg_map = {}
1881 # contains all nodes pulled in by self._set_atoms
1882 self._set_nodes = set()
1883 # Contains only Blocker -> Uninstall edges
1884 self._blocker_uninstalls = digraph()
1885 # Contains only Package -> Blocker edges
1886 self._blocker_parents = digraph()
1887 # Contains only unsolvable Package -> Blocker edges
1888 self._unsolvable_blockers = digraph()
1889 self._slot_collision_info = set()
1890 # Slot collision nodes are not allowed to block other packages since
1891 # blocker validation is only able to account for one package per slot.
1892 self._slot_collision_nodes = set()
1893 self._serialized_tasks_cache = None
1894 self._pprovided_args = []
1895 self._missing_args = []
1896 self._masked_installed = []
1897 self._unsatisfied_deps_for_display = []
1898 self._dep_stack = []
1899 self._unsatisfied_deps = []
1900 self._ignored_deps = []
1901 self._required_set_names = set(["system", "world"])
1902 self._select_atoms = self._select_atoms_highest_available
1903 self._select_package = self._select_pkg_highest_available
1904 self._highest_pkg_cache = {}
1906 def _show_slot_collision_notice(self):
1907 """Show an informational message advising the user to mask one of the
1908 the packages. In some cases it may be possible to resolve this
1909 automatically, but support for backtracking (removal nodes that have
1910 already been selected) will be required in order to handle all possible
1913 if not self._slot_collision_info:
1917 msg.append("\n!!! Multiple versions within a single " + \
1918 "package slot have been pulled\n")
1919 msg.append("!!! into the dependency graph, resulting" + \
1920 " in a slot conflict:\n\n")
1922 # Max number of parents shown, to avoid flooding the display.
1924 for slot_atom, root in self._slot_collision_info:
1925 msg.append(slot_atom)
1928 for node in self._slot_collision_nodes:
1929 if node.slot_atom == slot_atom:
1930 slot_nodes.append(node)
1931 slot_nodes.append(self._slot_pkg_map[root][slot_atom])
1932 for node in slot_nodes:
1934 msg.append(str(node))
1935 parents = self.digraph.parent_nodes(node)
1938 if len(parents) > max_parents:
1940 # When generating the pruned list, prefer instances
1941 # of DependencyArg over instances of Package.
1942 for parent in parents:
1943 if isinstance(parent, DependencyArg):
1944 pruned_list.append(parent)
1945 # Prefer Packages instances that themselves have been
1946 # pulled into collision slots.
1947 for parent in parents:
1948 if isinstance(parent, Package) and \
1949 (parent.slot_atom, parent.root) \
1950 in self._slot_collision_info:
1951 pruned_list.append(parent)
1952 for parent in parents:
1953 if len(pruned_list) >= max_parents:
1955 if not isinstance(parent, DependencyArg) and \
1956 parent not in pruned_list:
1957 pruned_list.append(parent)
1958 omitted_parents = len(parents) - len(pruned_list)
1959 parents = pruned_list
1960 msg.append(" pulled in by\n")
1961 for parent in parents:
1962 msg.append(2*indent)
1963 msg.append(str(parent))
1966 msg.append(2*indent)
1967 msg.append("(and %d more)\n" % omitted_parents)
1969 msg.append(" (no parents)\n")
1972 sys.stderr.write("".join(msg))
1975 if "--quiet" in self.myopts:
1979 msg.append("It may be possible to solve this problem ")
1980 msg.append("by using package.mask to prevent one of ")
1981 msg.append("those packages from being selected. ")
1982 msg.append("However, it is also possible that conflicting ")
1983 msg.append("dependencies exist such that they are impossible to ")
1984 msg.append("satisfy simultaneously. If such a conflict exists in ")
1985 msg.append("the dependencies of two different packages, then those ")
1986 msg.append("packages can not be installed simultaneously.")
1988 from formatter import AbstractFormatter, DumbWriter
1989 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
1991 f.add_flowing_data(x)
1995 msg.append("For more information, see MASKED PACKAGES ")
1996 msg.append("section in the emerge man page or refer ")
1997 msg.append("to the Gentoo Handbook.")
1999 f.add_flowing_data(x)
2003 def _reinstall_for_flags(self, forced_flags,
2004 orig_use, orig_iuse, cur_use, cur_iuse):
2005 """Return a set of flags that trigger reinstallation, or None if there
2006 are no such flags."""
2007 if "--newuse" in self.myopts:
2008 flags = orig_iuse.symmetric_difference(
2009 cur_iuse).difference(forced_flags)
2010 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
2011 cur_iuse.intersection(cur_use)))
2014 elif "changed-use" == self.myopts.get("--reinstall"):
2015 flags = orig_iuse.intersection(orig_use).symmetric_difference(
2016 cur_iuse.intersection(cur_use))
2021 def _create_graph(self, allow_unsatisfied=False):
2022 dep_stack = self._dep_stack
2024 self.spinner.update()
2025 dep = dep_stack.pop()
2026 if isinstance(dep, Package):
2027 if not self._add_pkg_deps(dep,
2028 allow_unsatisfied=allow_unsatisfied):
2031 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
2035 def _add_dep(self, dep, allow_unsatisfied=False):
2036 debug = "--debug" in self.myopts
2037 buildpkgonly = "--buildpkgonly" in self.myopts
2038 nodeps = "--nodeps" in self.myopts
2039 empty = "empty" in self.myparams
2040 deep = "deep" in self.myparams
2041 update = "--update" in self.myopts and dep.depth <= 1
2043 if not buildpkgonly and \
2045 dep.parent not in self._slot_collision_nodes:
2046 if dep.parent.onlydeps:
2047 # It's safe to ignore blockers if the
2048 # parent is an --onlydeps node.
2050 # The blocker applies to the root where
2051 # the parent is or will be installed.
2052 blocker = Blocker(atom=dep.atom, root=dep.parent.root)
2053 self._blocker_parents.add(blocker, dep.parent)
2055 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
2056 onlydeps=dep.onlydeps)
2058 if allow_unsatisfied:
2059 self._unsatisfied_deps.append(dep)
2061 self._unsatisfied_deps_for_display.append(
2062 ((dep.root, dep.atom), {"myparent":dep.parent}))
2064 # In some cases, dep_check will return deps that shouldn't
2065 # be proccessed any further, so they are identified and
2066 # discarded here. Try to discard as few as possible since
2067 # discarded dependencies reduce the amount of information
2068 # available for optimization of merge order.
2069 if dep.priority.satisfied and \
2070 not (existing_node or empty or deep or update):
2072 if dep.root == self.target_root:
2074 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
2075 except StopIteration:
2077 except portage_exception.InvalidDependString:
2078 if not dep_pkg.installed:
2079 # This shouldn't happen since the package
2080 # should have been masked.
2083 self._ignored_deps.append(dep)
2086 if not self._add_pkg(dep_pkg, dep.parent,
2087 priority=dep.priority, depth=dep.depth):
2091 def _add_pkg(self, pkg, myparent, priority=None, depth=0):
2092 if priority is None:
2093 priority = DepPriority()
2095 Fills the digraph with nodes comprised of packages to merge.
2096 mybigkey is the package spec of the package to merge.
2097 myparent is the package depending on mybigkey ( or None )
2098 addme = Should we add this package to the digraph or are we just looking at it's deps?
2099 Think --onlydeps, we need to ignore packages in that case.
2102 #IUSE-aware emerge -> USE DEP aware depgraph
2103 #"no downgrade" emerge
2106 # select the correct /var database that we'll be checking against
2107 vardbapi = self.trees[pkg.root]["vartree"].dbapi
2108 pkgsettings = self.pkgsettings[pkg.root]
2114 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
2115 except portage_exception.InvalidDependString, e:
2116 if not pkg.installed:
2117 show_invalid_depstring_notice(
2118 pkg, pkg.metadata["PROVIDE"], str(e))
2122 args = [arg for arg, atom in arg_atoms]
2124 if not pkg.onlydeps:
2125 if not pkg.installed and \
2126 "empty" not in self.myparams and \
2127 vardbapi.match(pkg.slot_atom):
2128 # Increase the priority of dependencies on packages that
2129 # are being rebuilt. This optimizes merge order so that
2130 # dependencies are rebuilt/updated as soon as possible,
2131 # which is needed especially when emerge is called by
2132 # revdep-rebuild since dependencies may be affected by ABI
2133 # breakage that has rendered them useless. Don't adjust
2134 # priority here when in "empty" mode since all packages
2135 # are being merged in that case.
2136 priority.rebuild = True
2138 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
2139 slot_collision = False
2141 if pkg.cpv == existing_node.cpv:
2142 # The existing node can be reused.
2145 self.digraph.add(existing_node, arg,
2147 # If a direct circular dependency is not an unsatisfied
2148 # buildtime dependency then drop it here since otherwise
2149 # it can skew the merge order calculation in an unwanted
2151 if existing_node != myparent or \
2152 (priority.buildtime and not priority.satisfied):
2153 self.digraph.addnode(existing_node, myparent,
2157 if pkg in self._slot_collision_nodes:
2159 # A slot collision has occurred. Sometimes this coincides
2160 # with unresolvable blockers, so the slot collision will be
2161 # shown later if there are no unresolvable blockers.
2162 self._slot_collision_info.add((pkg.slot_atom, pkg.root))
2163 self._slot_collision_nodes.add(pkg)
2164 slot_collision = True
2167 # Now add this node to the graph so that self.display()
2168 # can show use flags and --tree portage.output. This node is
2169 # only being partially added to the graph. It must not be
2170 # allowed to interfere with the other nodes that have been
2171 # added. Do not overwrite data for existing nodes in
2172 # self.mydbapi since that data will be used for blocker
2174 # Even though the graph is now invalid, continue to process
2175 # dependencies so that things like --fetchonly can still
2176 # function despite collisions.
2179 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
2180 self.mydbapi[pkg.root].cpv_inject(pkg)
2182 self.digraph.addnode(pkg, myparent, priority=priority)
2184 if not pkg.installed:
2185 # Allow this package to satisfy old-style virtuals in case it
2186 # doesn't already. Any pre-existing providers will be preferred
2189 pkgsettings.setinst(pkg.cpv, pkg.metadata)
2190 # For consistency, also update the global virtuals.
2191 settings = self.roots[pkg.root].settings
2193 settings.setinst(pkg.cpv, pkg.metadata)
2195 except portage_exception.InvalidDependString, e:
2196 show_invalid_depstring_notice(
2197 pkg, pkg.metadata["PROVIDE"], str(e))
2202 # Warn if an installed package is masked and it
2203 # is pulled into the graph.
2204 if not visible(pkgsettings, pkg):
2205 self._masked_installed.append((pkg, pkgsettings))
2208 self._set_nodes.add(pkg)
2210 # Do this even when addme is False (--onlydeps) so that the
2211 # parent/child relationship is always known in case
2212 # self._show_slot_collision_notice() needs to be called later.
2214 self.digraph.add(pkg, myparent, priority=priority)
2217 self.digraph.add(pkg, arg, priority=priority)
2219 """ This section determines whether we go deeper into dependencies or not.
2220 We want to go deeper on a few occasions:
2221 Installing package A, we need to make sure package A's deps are met.
2222 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
2223 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
2225 dep_stack = self._dep_stack
2226 if "recurse" not in self.myparams:
2228 elif pkg.installed and \
2229 "deep" not in self.myparams:
2230 dep_stack = self._ignored_deps
2232 self.spinner.update()
2237 dep_stack.append(pkg)
2240 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
2242 mytype = pkg.type_name
2245 metadata = pkg.metadata
2246 myuse = metadata["USE"].split()
2248 depth = pkg.depth + 1
2251 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
2253 edepend[k] = metadata[k]
2255 if not pkg.built and \
2256 "--buildpkgonly" in self.myopts and \
2257 "deep" not in self.myparams and \
2258 "empty" not in self.myparams:
2259 edepend["RDEPEND"] = ""
2260 edepend["PDEPEND"] = ""
2261 bdeps_satisfied = False
2262 if mytype in ("installed", "binary"):
2263 if self.myopts.get("--with-bdeps", "n") == "y":
2264 # Pull in build time deps as requested, but marked them as
2265 # "satisfied" since they are not strictly required. This allows
2266 # more freedom in the merge order calculation for solving
2267 # circular dependencies. Don't convert to PDEPEND since that
2268 # could make --with-bdeps=y less effective if it is used to
2269 # adjust merge order to prevent built_with_use() calls from
2271 bdeps_satisfied = True
2273 # built packages do not have build time dependencies.
2274 edepend["DEPEND"] = ""
2277 ("/", edepend["DEPEND"],
2278 DepPriority(buildtime=True, satisfied=bdeps_satisfied)),
2279 (myroot, edepend["RDEPEND"], DepPriority(runtime=True)),
2280 (myroot, edepend["PDEPEND"], DepPriority(runtime_post=True))
2283 debug = "--debug" in self.myopts
2284 strict = mytype != "installed"
2286 for dep_root, dep_string, dep_priority in deps:
2288 # Decrease priority so that --buildpkgonly
2289 # hasallzeros() works correctly.
2290 dep_priority = DepPriority()
2295 print "Parent: ", jbigkey
2296 print "Depstring:", dep_string
2297 print "Priority:", dep_priority
2298 vardb = self.roots[dep_root].trees["vartree"].dbapi
2300 selected_atoms = self._select_atoms(dep_root,
2301 dep_string, myuse=myuse, parent=pkg, strict=strict)
2302 except portage_exception.InvalidDependString, e:
2303 show_invalid_depstring_notice(jbigkey, dep_string, str(e))
2306 print "Candidates:", selected_atoms
2307 for atom in selected_atoms:
2308 blocker = atom.startswith("!")
2311 mypriority = dep_priority.copy()
2312 if not blocker and vardb.match(atom):
2313 mypriority.satisfied = True
2314 if not self._add_dep(Dependency(atom=atom,
2315 blocker=blocker, depth=depth, parent=pkg,
2316 priority=mypriority, root=dep_root),
2317 allow_unsatisfied=allow_unsatisfied):
2320 print "Exiting...", jbigkey
2321 except ValueError, e:
2322 if not e.args or not isinstance(e.args[0], list) or \
2326 portage.writemsg("\n\n!!! An atom in the dependencies " + \
2327 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
2329 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
2330 portage.writemsg("\n", noiselevel=-1)
2331 if mytype == "binary":
2333 "!!! This binary package cannot be installed: '%s'\n" % \
2334 mykey, noiselevel=-1)
2335 elif mytype == "ebuild":
2336 portdb = self.roots[myroot].trees["porttree"].dbapi
2337 myebuild, mylocation = portdb.findname2(mykey)
2338 portage.writemsg("!!! This ebuild cannot be installed: " + \
2339 "'%s'\n" % myebuild, noiselevel=-1)
2340 portage.writemsg("!!! Please notify the package maintainer " + \
2341 "that atoms must be fully-qualified.\n", noiselevel=-1)
2345 def _dep_expand(self, root_config, atom_without_category):
2347 @param root_config: a root config instance
2348 @type root_config: RootConfig
2349 @param atom_without_category: an atom without a category component
2350 @type atom_without_category: String
2352 @returns: a list of atoms containing categories (possibly empty)
2354 null_cp = portage.dep_getkey(insert_category_into_atom(
2355 atom_without_category, "null"))
2356 cat, atom_pn = portage.catsplit(null_cp)
2359 for db, pkg_type, built, installed, db_keys in \
2360 self._filtered_trees[root_config.root]["dbs"]:
2361 cp_set.update(db.cp_all())
2362 for cp in list(cp_set):
2363 cat, pn = portage.catsplit(cp)
2368 cat, pn = portage.catsplit(cp)
2369 deps.append(insert_category_into_atom(
2370 atom_without_category, cat))
2373 def _have_new_virt(self, root, atom_cp):
2375 for db, pkg_type, built, installed, db_keys in \
2376 self._filtered_trees[root]["dbs"]:
2377 if db.cp_list(atom_cp):
2382 def _iter_atoms_for_pkg(self, pkg):
2383 # TODO: add multiple $ROOT support
2384 if pkg.root != self.target_root:
2386 atom_arg_map = self._atom_arg_map
2387 root_config = self.roots[pkg.root]
2388 for atom in self._set_atoms.iterAtomsForPackage(pkg):
2389 atom_cp = portage.dep_getkey(atom)
2390 if atom_cp != pkg.cp and \
2391 self._have_new_virt(pkg.root, atom_cp):
2393 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
2394 visible_pkgs.reverse() # descending order
2396 for visible_pkg in visible_pkgs:
2397 if visible_pkg.cp != atom_cp:
2399 if pkg >= visible_pkg:
2400 # This is descending order, and we're not
2401 # interested in any versions <= pkg given.
2403 if pkg.slot_atom != visible_pkg.slot_atom:
2404 higher_slot = visible_pkg
2406 if higher_slot is not None:
2408 for arg in atom_arg_map[(atom, pkg.root)]:
2409 if isinstance(arg, PackageArg) and \
2414 def select_files(self, myfiles):
2415 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
2416 appropriate depgraph and return a favorite list."""
2417 root_config = self.roots[self.target_root]
2418 sets = root_config.sets
2419 getSetAtoms = root_config.setconfig.getSetAtoms
2420 oneshot = "--oneshot" in self.myopts or \
2421 "--onlydeps" in self.myopts
2423 myroot = self.target_root
2424 dbs = self._filtered_trees[myroot]["dbs"]
2425 vardb = self.trees[myroot]["vartree"].dbapi
2426 portdb = self.trees[myroot]["porttree"].dbapi
2427 bindb = self.trees[myroot]["bintree"].dbapi
2428 pkgsettings = self.pkgsettings[myroot]
2430 onlydeps = "--onlydeps" in self.myopts
2432 ext = os.path.splitext(x)[1]
2434 if not os.path.exists(x):
2436 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2437 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2438 elif os.path.exists(
2439 os.path.join(pkgsettings["PKGDIR"], x)):
2440 x = os.path.join(pkgsettings["PKGDIR"], x)
2442 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
2443 print "!!! Please ensure the tbz2 exists as specified.\n"
2444 return 0, myfavorites
2446 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2447 if os.path.realpath(x) != \
2448 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
2449 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
2450 return 0, myfavorites
2451 metadata = dict(izip(self._mydbapi_keys,
2452 bindb.aux_get(mykey, self._mydbapi_keys)))
2453 pkg = Package(type_name="binary", root=myroot,
2454 cpv=mykey, built=True, metadata=metadata,
2456 self._pkg_cache[pkg] = pkg
2457 args.append(PackageArg(arg=x, package=pkg,
2458 root_config=root_config))
2459 elif ext==".ebuild":
2460 ebuild_path = portage_util.normalize_path(os.path.abspath(x))
2461 pkgdir = os.path.dirname(ebuild_path)
2462 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2463 cp = pkgdir[len(tree_root)+1:]
2464 e = portage_exception.PackageNotFound(
2465 ("%s is not in a valid portage tree " + \
2466 "hierarchy or does not exist") % x)
2467 if not portage.isvalidatom(cp):
2469 cat = portage.catsplit(cp)[0]
2470 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2471 if not portage.isvalidatom("="+mykey):
2473 ebuild_path = portdb.findname(mykey)
2475 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2476 cp, os.path.basename(ebuild_path)):
2477 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
2478 return 0, myfavorites
2479 if mykey not in portdb.xmatch(
2480 "match-visible", portage.dep_getkey(mykey)):
2481 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
2482 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
2483 print colorize("BAD", "*** page for details.")
2484 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
2487 raise portage_exception.PackageNotFound(
2488 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2489 metadata = dict(izip(self._mydbapi_keys,
2490 portdb.aux_get(mykey, self._mydbapi_keys)))
2491 pkgsettings.setcpv(mykey, mydb=metadata)
2492 metadata["USE"] = pkgsettings["PORTAGE_USE"]
2493 pkg = Package(type_name="ebuild", root=myroot,
2494 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
2495 self._pkg_cache[pkg] = pkg
2496 args.append(PackageArg(arg=x, package=pkg,
2497 root_config=root_config))
2498 elif x.startswith(os.path.sep):
2499 if not x.startswith(myroot):
2500 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2501 " $ROOT.\n") % x, noiselevel=-1)
2503 relative_path = x[len(myroot):]
2504 vartree = self._trees_orig[myroot]["vartree"]
2506 for cpv in vardb.cpv_all():
2507 self.spinner.update()
2508 cat, pf = portage.catsplit(cpv)
2509 if portage.dblink(cat, pf, myroot,
2510 pkgsettings, vartree=vartree).isowner(
2511 relative_path, myroot):
2514 if owner_cpv is None:
2515 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2516 "by any package.\n") % x, noiselevel=-1)
2518 slot = vardb.aux_get(owner_cpv, ["SLOT"])[0]
2520 # portage now masks packages with missing slot, but it's
2521 # possible that one was installed by an older version
2522 atom = portage.cpv_getkey(owner_cpv)
2524 atom = "%s:%s" % (portage.cpv_getkey(owner_cpv), slot)
2525 args.append(AtomArg(arg=atom, atom=atom,
2526 root_config=root_config))
2528 if x in ("system", "world"):
2530 if x.startswith(SETPREFIX):
2531 s = x[len(SETPREFIX):]
2533 raise portage_exception.PackageNotFound(
2534 "emerge: there are no sets to satisfy '%s'." % s)
2537 # Recursively expand sets so that containment tests in
2538 # self._get_parent_sets() properly match atoms in nested
2539 # sets (like if world contains system).
2540 expanded_set = InternalPackageSet(
2541 initial_atoms=getSetAtoms(s))
2542 self._sets[s] = expanded_set
2543 args.append(SetArg(arg=x, set=expanded_set,
2544 root_config=root_config))
2546 # pull in the system set too
2548 expanded_set = InternalPackageSet(
2549 initial_atoms=getSetAtoms(s))
2550 self._sets[s] = expanded_set
2551 args.append(SetArg(arg=SETPREFIX+s, set=expanded_set,
2552 root_config=root_config))
2554 # myfavorites.append(x)
2556 if not is_valid_package_atom(x):
2557 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2559 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2560 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2562 # Don't expand categories or old-style virtuals here unless
2563 # necessary. Expansion of old-style virtuals here causes at
2564 # least the following problems:
2565 # 1) It's more difficult to determine which set(s) an atom
2566 # came from, if any.
2567 # 2) It takes away freedom from the resolver to choose other
2568 # possible expansions when necessary.
2570 args.append(AtomArg(arg=x, atom=x,
2571 root_config=root_config))
2573 expanded_atoms = self._dep_expand(root_config, x)
2574 installed_cp_set = set()
2575 for atom in expanded_atoms:
2576 atom_cp = portage.dep_getkey(atom)
2577 if vardb.cp_list(atom_cp):
2578 installed_cp_set.add(atom_cp)
2579 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2580 installed_cp = iter(installed_cp_set).next()
2581 expanded_atoms = [atom for atom in expanded_atoms \
2582 if portage.dep_getkey(atom) == installed_cp]
2584 if len(expanded_atoms) > 1:
2585 print "\n\n!!! The short ebuild name \"" + x + "\" is ambiguous. Please specify"
2586 print "!!! one of the following fully-qualified ebuild names instead:\n"
2587 expanded_atoms = set(portage.dep_getkey(atom) \
2588 for atom in expanded_atoms)
2589 for i in sorted(expanded_atoms):
2590 print " " + green(i)
2592 return False, myfavorites
2594 atom = expanded_atoms[0]
2596 null_atom = insert_category_into_atom(x, "null")
2597 null_cp = portage.dep_getkey(null_atom)
2598 cat, atom_pn = portage.catsplit(null_cp)
2599 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2601 # Allow the depgraph to choose which virtual.
2602 atom = insert_category_into_atom(x, "virtual")
2604 atom = insert_category_into_atom(x, "null")
2606 args.append(AtomArg(arg=x, atom=atom,
2607 root_config=root_config))
2609 if "--update" in self.myopts:
2610 # Enable greedy SLOT atoms for atoms given as arguments.
2611 # This is currently disabled for sets since greedy SLOT
2612 # atoms could be a property of the set itself.
2615 # In addition to any installed slots, also try to pull
2616 # in the latest new slot that may be available.
2617 greedy_atoms.append(arg)
2618 if not isinstance(arg, (AtomArg, PackageArg)):
2620 atom_cp = portage.dep_getkey(arg.atom)
2622 for cpv in vardb.match(arg.atom):
2623 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
2625 greedy_atoms.append(
2626 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
2627 root_config=root_config))
2631 # Create the "args" package set from atoms and
2632 # packages given as arguments.
2633 args_set = self._sets["args"]
2635 if not isinstance(arg, (AtomArg, PackageArg)):
2638 if myatom in args_set:
2640 args_set.add(myatom)
2642 myfavorites.append(myatom)
2643 self._set_atoms.update(chain(*self._sets.itervalues()))
2644 atom_arg_map = self._atom_arg_map
2646 for atom in arg.set:
2647 atom_key = (atom, myroot)
2648 refs = atom_arg_map.get(atom_key)
2651 atom_arg_map[atom_key] = refs
2654 pprovideddict = pkgsettings.pprovideddict
2655 # Order needs to be preserved since a feature of --nodeps
2656 # is to allow the user to force a specific merge order.
2660 for atom in arg.set:
2661 self.spinner.update()
2662 atom_cp = portage.dep_getkey(atom)
2664 pprovided = pprovideddict.get(portage.dep_getkey(atom))
2665 if pprovided and portage.match_from_list(atom, pprovided):
2666 # A provided package has been specified on the command line.
2667 self._pprovided_args.append((arg, atom))
2669 if isinstance(arg, PackageArg):
2670 if not self._add_pkg(arg.package, arg) or \
2671 not self._create_graph():
2672 sys.stderr.write(("\n\n!!! Problem resolving " + \
2673 "dependencies for %s\n") % arg.arg)
2674 return 0, myfavorites
2676 pkg, existing_node = self._select_package(
2677 myroot, atom, onlydeps=onlydeps)
2679 if not (isinstance(arg, SetArg) and \
2680 arg.name in ("system", "world")):
2681 self._unsatisfied_deps_for_display.append(
2682 ((myroot, atom), {}))
2683 return 0, myfavorites
2684 self._missing_args.append((arg, atom))
2686 if atom_cp != pkg.cp:
2687 # For old-style virtuals, we need to repeat the
2688 # package.provided check against the selected package.
2689 expanded_atom = atom.replace(atom_cp, pkg.cp)
2690 pprovided = pprovideddict.get(pkg.cp)
2692 portage.match_from_list(expanded_atom, pprovided):
2693 # A provided package has been
2694 # specified on the command line.
2695 self._pprovided_args.append((arg, atom))
2697 if pkg.installed and "selective" not in self.myparams:
2698 self._unsatisfied_deps_for_display.append(
2699 ((myroot, atom), {}))
2700 # Previous behavior was to bail out in this case, but
2701 # since the dep is satisfied by the installed package,
2702 # it's more friendly to continue building the graph
2703 # and just show a warning message. Therefore, only bail
2704 # out here if the atom is not from either the system or
2706 if not (isinstance(arg, SetArg) and \
2707 arg.name in ("system", "world")):
2708 return 0, myfavorites
2710 dep = Dependency(atom=atom, onlydeps=onlydeps,
2711 root=myroot, parent=arg)
2713 # Add the selected package to the graph as soon as possible
2714 # so that later dep_check() calls can use it as feedback
2715 # for making more consistent atom selections.
2716 if not self._add_pkg(pkg, dep.parent,
2717 priority=dep.priority, depth=dep.depth):
2718 if isinstance(arg, SetArg):
2719 sys.stderr.write(("\n\n!!! Problem resolving " + \
2720 "dependencies for %s from %s\n") % \
2723 sys.stderr.write(("\n\n!!! Problem resolving " + \
2724 "dependencies for %s\n") % atom)
2725 return 0, myfavorites
2727 except portage_exception.MissingSignature, e:
2728 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
2729 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
2730 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
2731 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
2732 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
2733 return 0, myfavorites
2734 except portage_exception.InvalidSignature, e:
2735 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
2736 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
2737 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
2738 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
2739 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
2740 return 0, myfavorites
2741 except SystemExit, e:
2742 raise # Needed else can't exit
2743 except Exception, e:
2744 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
2745 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
2748 # Now that the root packages have been added to the graph,
2749 # process the dependencies.
2750 if not self._create_graph():
2751 return 0, myfavorites
2754 if "--usepkgonly" in self.myopts:
2755 for xs in self.digraph.all_nodes():
2756 if not isinstance(xs, Package):
2758 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
2762 print "Missing binary for:",xs[2]
2766 except self._unknown_internal_error:
2767 return False, myfavorites
2769 # We're true here unless we are missing binaries.
2770 return (not missing,myfavorites)
2772 def _select_atoms_from_graph(self, *pargs, **kwargs):
2774 Prefer atoms matching packages that have already been
2775 added to the graph or those that are installed and have
2776 not been scheduled for replacement.
2778 kwargs["trees"] = self._graph_trees
2779 return self._select_atoms_highest_available(*pargs, **kwargs)
2781 def _select_atoms_highest_available(self, root, depstring,
2782 myuse=None, parent=None, strict=True, trees=None):
2783 """This will raise InvalidDependString if necessary. If trees is
2784 None then self._filtered_trees is used."""
2785 pkgsettings = self.pkgsettings[root]
2787 trees = self._filtered_trees
2790 if parent is not None:
2791 trees[root]["parent"] = parent
2793 portage_dep._dep_check_strict = False
2794 mycheck = portage.dep_check(depstring, None,
2795 pkgsettings, myuse=myuse,
2796 myroot=root, trees=trees)
2798 if parent is not None:
2799 trees[root].pop("parent")
2800 portage_dep._dep_check_strict = True
2802 raise portage_exception.InvalidDependString(mycheck[1])
2803 selected_atoms = mycheck[1]
2804 return selected_atoms
2806 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
2807 xinfo = '"%s"' % atom
2810 # Discard null/ from failed cpv_expand category expansion.
2811 xinfo = xinfo.replace("null/", "")
2813 xfrom = '(dependency required by '+ \
2814 green('"%s"' % myparent[2]) + \
2815 red(' [%s]' % myparent[0]) + ')'
2816 masked_packages = []
2817 missing_licenses = []
2818 have_eapi_mask = False
2819 pkgsettings = self.pkgsettings[root]
2820 root_config = self.roots[root]
2821 portdb = self.roots[root].trees["porttree"].dbapi
2822 dbs = self._filtered_trees[root]["dbs"]
2823 for db, pkg_type, built, installed, db_keys in dbs:
2827 if hasattr(db, "xmatch"):
2828 cpv_list = db.xmatch("match-all", atom)
2830 cpv_list = db.match(atom)
2833 for cpv in cpv_list:
2834 metadata, mreasons = get_mask_info(root_config, cpv,
2835 pkgsettings, db, pkg_type, built, installed, db_keys)
2836 masked_packages.append(
2837 (root_config, pkgsettings, cpv, metadata, mreasons))
2840 print "\n!!! "+red("All ebuilds that could satisfy ")+green(xinfo)+red(" have been masked.")
2841 print "!!! One of the following masked packages is required to complete your request:"
2842 have_eapi_mask = show_masked_packages(masked_packages)
2845 msg = ("The current version of portage supports " + \
2846 "EAPI '%s'. You must upgrade to a newer version" + \
2847 " of portage before EAPI masked packages can" + \
2848 " be installed.") % portage_const.EAPI
2849 from textwrap import wrap
2850 for line in wrap(msg, 75):
2855 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
2860 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2861 cache_key = (root, atom, onlydeps)
2862 ret = self._highest_pkg_cache.get(cache_key)
2865 if pkg and not existing:
2866 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
2867 if existing and existing == pkg:
2868 # Update the cache to reflect that the
2869 # package has been added to the graph.
2871 self._highest_pkg_cache[cache_key] = ret
2873 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2874 self._highest_pkg_cache[cache_key] = ret
2877 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2878 pkgsettings = self.pkgsettings[root]
2879 dbs = self._filtered_trees[root]["dbs"]
2880 vardb = self.roots[root].trees["vartree"].dbapi
2881 portdb = self.roots[root].trees["porttree"].dbapi
2882 # List of acceptable packages, ordered by type preference.
2883 matched_packages = []
2884 highest_version = None
2885 atom_cp = portage.dep_getkey(atom)
2886 existing_node = None
2888 usepkgonly = "--usepkgonly" in self.myopts
2889 empty = "empty" in self.myparams
2890 selective = "selective" in self.myparams
2892 noreplace = "--noreplace" in self.myopts
2893 # Behavior of the "selective" parameter depends on
2894 # whether or not a package matches an argument atom.
2895 # If an installed package provides an old-style
2896 # virtual that is no longer provided by an available
2897 # package, the installed package may match an argument
2898 # atom even though none of the available packages do.
2899 # Therefore, "selective" logic does not consider
2900 # whether or not an installed package matches an
2901 # argument atom. It only considers whether or not
2902 # available packages match argument atoms, which is
2903 # represented by the found_available_arg flag.
2904 found_available_arg = False
2905 for find_existing_node in True, False:
2908 for db, pkg_type, built, installed, db_keys in dbs:
2911 if installed and not find_existing_node:
2912 want_reinstall = reinstall or empty or \
2913 (found_available_arg and not selective)
2914 if want_reinstall and matched_packages:
2916 if hasattr(db, "xmatch"):
2917 cpv_list = db.xmatch("match-all", atom)
2919 cpv_list = db.match(atom)
2922 pkg_status = "merge"
2923 if installed or onlydeps:
2924 pkg_status = "nomerge"
2927 for cpv in cpv_list:
2928 # Make --noreplace take precedence over --newuse.
2929 if not installed and noreplace and \
2930 cpv in vardb.match(atom):
2931 # If the installed version is masked, it may
2932 # be necessary to look at lower versions,
2933 # in case there is a visible downgrade.
2935 reinstall_for_flags = None
2936 cache_key = (pkg_type, root, cpv, pkg_status)
2937 calculated_use = True
2938 pkg = self._pkg_cache.get(cache_key)
2940 calculated_use = False
2942 metadata = dict(izip(self._mydbapi_keys,
2943 db.aux_get(cpv, self._mydbapi_keys)))
2946 if not built and ("?" in metadata["LICENSE"] or \
2947 "?" in metadata["PROVIDE"]):
2948 # This is avoided whenever possible because
2949 # it's expensive. It only needs to be done here
2950 # if it has an effect on visibility.
2951 pkgsettings.setcpv(cpv, mydb=metadata)
2952 metadata["USE"] = pkgsettings["PORTAGE_USE"]
2953 calculated_use = True
2954 pkg = Package(built=built, cpv=cpv,
2955 installed=installed, metadata=metadata,
2956 onlydeps=onlydeps, root=root, type_name=pkg_type)
2957 self._pkg_cache[pkg] = pkg
2959 if root == self.target_root:
2961 myarg = self._iter_atoms_for_pkg(pkg).next()
2962 except StopIteration:
2964 except portage_exception.InvalidDependString:
2966 # masked by corruption
2968 if not installed and myarg:
2969 found_available_arg = True
2970 if not installed or (installed and matched_packages):
2971 # Only enforce visibility on installed packages
2972 # if there is at least one other visible package
2973 # available. By filtering installed masked packages
2974 # here, packages that have been masked since they
2975 # were installed can be automatically downgraded
2976 # to an unmasked version.
2978 if not visible(pkgsettings, pkg):
2980 except portage_exception.InvalidDependString:
2983 # Enable upgrade or downgrade to a version
2984 # with visible KEYWORDS when the installed
2985 # version is masked by KEYWORDS, but never
2986 # reinstall the same exact version only due
2987 # to a KEYWORDS mask.
2988 if installed and matched_packages and \
2989 pkgsettings._getMissingKeywords(
2990 pkg.cpv, pkg.metadata):
2991 different_version = None
2992 for avail_pkg in matched_packages:
2993 if not portage_dep.cpvequal(
2994 pkg.cpv, avail_pkg.cpv):
2995 different_version = avail_pkg
2997 if different_version is not None:
2998 # Only reinstall for KEYWORDS if
2999 # it's not the same version.
3002 if not built and not calculated_use:
3003 # This is avoided whenever possible because
3005 pkgsettings.setcpv(cpv, mydb=pkg.metadata)
3006 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
3007 if pkg.cp == atom_cp:
3008 if highest_version is None:
3009 highest_version = pkg
3010 elif pkg > highest_version:
3011 highest_version = pkg
3012 # At this point, we've found the highest visible
3013 # match from the current repo. Any lower versions
3014 # from this repo are ignored, so this so the loop
3015 # will always end with a break statement below
3017 if find_existing_node:
3018 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
3021 cpv_slot = "%s:%s" % \
3022 (e_pkg.cpv, e_pkg.metadata["SLOT"])
3023 if portage_dep.match_from_list(atom, [cpv_slot]):
3024 if highest_version and \
3025 e_pkg.cp == atom_cp and \
3026 e_pkg < highest_version and \
3027 e_pkg.slot_atom != highest_version.slot_atom:
3028 # There is a higher version available in a
3029 # different slot, so this existing node is
3033 matched_packages.append(e_pkg)
3034 existing_node = e_pkg
3036 # Compare built package to current config and
3037 # reject the built package if necessary.
3038 if built and not installed and \
3039 ("--newuse" in self.myopts or \
3040 "--reinstall" in self.myopts):
3041 iuses = set(filter_iuse_defaults(
3042 pkg.metadata["IUSE"].split()))
3043 old_use = pkg.metadata["USE"].split()
3045 if myeb and not usepkgonly:
3048 pkgsettings.setcpv(myeb, mydb=mydb)
3050 pkgsettings.setcpv(cpv, mydb=mydb)
3051 now_use = pkgsettings["PORTAGE_USE"].split()
3052 forced_flags = set()
3053 forced_flags.update(pkgsettings.useforce)
3054 forced_flags.update(pkgsettings.usemask)
3056 if myeb and not usepkgonly:
3057 cur_iuse = set(filter_iuse_defaults(
3058 portdb.aux_get(myeb,
3059 ["IUSE"])[0].split()))
3060 if self._reinstall_for_flags(forced_flags,
3064 # Compare current config to installed package
3065 # and do not reinstall if possible.
3066 if not installed and \
3067 ("--newuse" in self.myopts or \
3068 "--reinstall" in self.myopts) and \
3069 cpv in vardb.match(atom):
3070 pkgsettings.setcpv(cpv, mydb=pkg.metadata)
3071 forced_flags = set()
3072 forced_flags.update(pkgsettings.useforce)
3073 forced_flags.update(pkgsettings.usemask)
3074 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
3075 old_iuse = set(filter_iuse_defaults(
3076 vardb.aux_get(cpv, ["IUSE"])[0].split()))
3077 cur_use = pkgsettings["PORTAGE_USE"].split()
3078 cur_iuse = set(filter_iuse_defaults(
3079 pkg.metadata["IUSE"].split()))
3080 reinstall_for_flags = \
3081 self._reinstall_for_flags(
3082 forced_flags, old_use, old_iuse,
3084 if reinstall_for_flags:
3087 must_reinstall = empty or \
3088 (myarg and not selective)
3089 if not reinstall_for_flags and \
3090 not must_reinstall and \
3091 cpv in vardb.match(atom):
3092 # If the installed version is masked, it may
3093 # be necessary to look at lower versions,
3094 # in case there is a visible downgrade.
3098 matched_packages.append(pkg)
3099 if reinstall_for_flags:
3100 self._reinstall_nodes[pkg] = \
3104 if not matched_packages:
3107 if "--debug" in self.myopts:
3108 for pkg in matched_packages:
3109 print (pkg.type_name + ":").rjust(10), pkg.cpv
3111 # Filter out any old-style virtual matches if they are
3112 # mixed with new-style virtual matches.
3113 cp = portage.dep_getkey(atom)
3114 if len(matched_packages) > 1 and \
3115 "virtual" == portage.catsplit(cp)[0]:
3116 for pkg in matched_packages:
3119 # Got a new-style virtual, so filter
3120 # out any old-style virtuals.
3121 matched_packages = [pkg for pkg in matched_packages \
3125 if len(matched_packages) > 1:
3126 bestmatch = portage.best(
3127 [pkg.cpv for pkg in matched_packages])
3128 matched_packages = [pkg for pkg in matched_packages \
3129 if portage_dep.cpvequal(pkg.cpv, bestmatch)]
3131 # ordered by type preference ("ebuild" type is the last resort)
3132 return matched_packages[-1], existing_node
3134 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3136 Select packages that have already been added to the graph or
3137 those that are installed and have not been scheduled for
3140 graph_db = self._graph_trees[root]["porttree"].dbapi
3141 matches = graph_db.match(atom)
3144 cpv = matches[-1] # highest match
3145 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
3146 graph_db.aux_get(cpv, ["SLOT"])[0])
3147 e_pkg = self._slot_pkg_map[root].get(slot_atom)
3150 # Since this cpv exists in the graph_db,
3151 # we must have a cached Package instance.
3152 cache_key = ("installed", root, cpv, "nomerge")
3153 return (self._pkg_cache[cache_key], None)
3155 def _complete_graph(self):
3157 Add any deep dependencies of required sets (args, system, world) that
3158 have not been pulled into the graph yet. This ensures that the graph
3159 is consistent such that initially satisfied deep dependencies are not
3160 broken in the new graph. Initially unsatisfied dependencies are
3161 irrelevant since we only want to avoid breaking dependencies that are
3164 Since this method can consume enough time to disturb users, it is
3165 currently only enabled by the --complete-graph option.
3167 if "complete" not in self.myparams:
3168 # Skip this to avoid consuming enough time to disturb users.
3171 if "--buildpkgonly" in self.myopts or \
3172 "recurse" not in self.myparams:
3175 # Put the depgraph into a mode that causes it to only
3176 # select packages that have already been added to the
3177 # graph or those that are installed and have not been
3178 # scheduled for replacement. Also, toggle the "deep"
3179 # parameter so that all dependencies are traversed and
3181 self._select_atoms = self._select_atoms_from_graph
3182 self._select_package = self._select_pkg_from_graph
3183 already_deep = "deep" in self.myparams
3184 if not already_deep:
3185 self.myparams.add("deep")
3187 for root in self.roots:
3188 required_set_names = self._required_set_names.copy()
3189 if root == self.target_root and \
3190 (already_deep or "empty" in self.myparams):
3191 required_set_names.difference_update(self._sets)
3192 if not required_set_names and not self._ignored_deps:
3194 root_config = self.roots[root]
3195 setconfig = root_config.setconfig
3197 # Reuse existing SetArg instances when available.
3198 for arg in self.digraph.root_nodes():
3199 if not isinstance(arg, SetArg):
3201 if arg.root_config != root_config:
3203 if arg.name in required_set_names:
3205 required_set_names.remove(arg.name)
3206 # Create new SetArg instances only when necessary.
3207 for s in required_set_names:
3208 expanded_set = InternalPackageSet(
3209 initial_atoms=setconfig.getSetAtoms(s))
3210 atom = SETPREFIX + s
3211 args.append(SetArg(arg=atom, set=expanded_set,
3212 root_config=root_config))
3213 vardb = root_config.trees["vartree"].dbapi
3215 for atom in arg.set:
3216 self._dep_stack.append(
3217 Dependency(atom=atom, root=root, parent=arg))
3218 if self._ignored_deps:
3219 self._dep_stack.extend(self._ignored_deps)
3220 self._ignored_deps = []
3221 if not self._create_graph(allow_unsatisfied=True):
3223 # Check the unsatisfied deps to see if any initially satisfied deps
3224 # will become unsatisfied due to an upgrade. Initially unsatisfied
3225 # deps are irrelevant since we only want to avoid breaking deps
3226 # that are initially satisfied.
3227 while self._unsatisfied_deps:
3228 dep = self._unsatisfied_deps.pop()
3229 matches = vardb.match_pkgs(dep.atom)
3231 # Initially unsatisfied.
3233 # An scheduled installation broke a deep dependency.
3234 # Add the installed package to the graph so that it
3235 # will be appropriately reported as a slot collision
3236 # (possibly solvable via backtracking).
3237 pkg = matches[-1] # highest match
3238 if not self._add_pkg(pkg, dep.parent,
3239 priority=dep.priority, depth=dep.depth):
3241 if not self._create_graph(allow_unsatisfied=True):
3245 def validate_blockers(self):
3246 """Remove any blockers from the digraph that do not match any of the
3247 packages within the graph. If necessary, create hard deps to ensure
3248 correct merge order such that mutually blocking packages are never
3249 installed simultaneously."""
3251 if "--buildpkgonly" in self.myopts or \
3252 "--nodeps" in self.myopts:
3255 #if "deep" in self.myparams:
3257 # Pull in blockers from all installed packages that haven't already
3258 # been pulled into the depgraph. This is not enabled by default
3259 # due to the performance penalty that is incurred by all the
3260 # additional dep_check calls that are required.
3262 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
3263 for myroot in self.trees:
3264 vardb = self.trees[myroot]["vartree"].dbapi
3265 portdb = self.trees[myroot]["porttree"].dbapi
3266 pkgsettings = self.pkgsettings[myroot]
3267 final_db = self.mydbapi[myroot]
3268 cpv_all_installed = self.trees[myroot]["vartree"].dbapi.cpv_all()
3269 blocker_cache = BlockerCache(myroot, vardb)
3270 stale_cache = set(blocker_cache)
3271 for cpv in cpv_all_installed:
3272 stale_cache.discard(cpv)
3273 blocker_atoms = None
3274 pkg = self._pkg_cache[
3275 ("installed", myroot, cpv, "nomerge")]
3277 if self.digraph.contains(pkg):
3279 blockers = self._blocker_parents.child_nodes(pkg)
3282 if blockers is not None:
3283 blockers = set("!" + blocker.atom \
3284 for blocker in blockers)
3286 # If this node has any blockers, create a "nomerge"
3287 # node for it so that they can be enforced.
3288 self.spinner.update()
3289 blocker_data = blocker_cache.get(cpv)
3290 if blocker_data is not None and \
3291 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3294 # If blocker data from the graph is available, use
3295 # it to validate the cache and update the cache if
3297 if blocker_data is not None and \
3298 blockers is not None:
3299 if not blockers.symmetric_difference(
3300 blocker_data.atoms):
3304 if blocker_data is None and \
3305 blockers is not None:
3306 # Re-use the blockers from the graph.
3307 blocker_atoms = sorted(blockers)
3308 counter = long(pkg.metadata["COUNTER"])
3310 blocker_cache.BlockerData(counter, blocker_atoms)
3311 blocker_cache[pkg.cpv] = blocker_data
3315 blocker_atoms = blocker_data.atoms
3317 myuse = pkg.metadata["USE"].split()
3318 # Use aux_get() to trigger FakeVartree global
3319 # updates on *DEPEND when appropriate.
3320 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3321 # It is crucial to pass in final_db here in order to
3322 # optimize dep_check calls by eliminating atoms via
3323 # dep_wordreduce and dep_eval calls.
3325 portage_dep._dep_check_strict = False
3327 success, atoms = portage.dep_check(depstr,
3328 final_db, pkgsettings, myuse=myuse,
3329 trees=self._graph_trees, myroot=myroot)
3330 except Exception, e:
3331 if isinstance(e, SystemExit):
3333 # This is helpful, for example, if a ValueError
3334 # is thrown from cpv_expand due to multiple
3335 # matches (this can happen if an atom lacks a
3337 show_invalid_depstring_notice(
3338 pkg, depstr, str(e))
3342 portage_dep._dep_check_strict = True
3344 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3345 if replacement_pkg and \
3346 replacement_pkg[0].operation == "merge":
3347 # This package is being replaced anyway, so
3348 # ignore invalid dependencies so as not to
3349 # annoy the user too much (otherwise they'd be
3350 # forced to manually unmerge it first).
3352 show_invalid_depstring_notice(pkg, depstr, atoms)
3354 blocker_atoms = [myatom for myatom in atoms \
3355 if myatom.startswith("!")]
3356 blocker_atoms.sort()
3357 counter = long(pkg.metadata["COUNTER"])
3358 blocker_cache[cpv] = \
3359 blocker_cache.BlockerData(counter, blocker_atoms)
3361 for myatom in blocker_atoms:
3362 blocker = Blocker(atom=myatom[1:], root=myroot)
3363 self._blocker_parents.add(blocker, pkg)
3364 for cpv in stale_cache:
3365 del blocker_cache[cpv]
3366 blocker_cache.flush()
3369 # Discard any "uninstall" tasks scheduled by previous calls
3370 # to this method, since those tasks may not make sense given
3371 # the current graph state.
3372 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
3373 if previous_uninstall_tasks:
3374 self._blocker_uninstalls = digraph()
3375 self.digraph.difference_update(previous_uninstall_tasks)
3377 for blocker in self._blocker_parents.leaf_nodes():
3378 self.spinner.update()
3379 root_config = self.roots[blocker.root]
3380 virtuals = root_config.settings.getvirtuals()
3381 mytype, myroot, mydep = blocker
3382 initial_db = self.trees[myroot]["vartree"].dbapi
3383 final_db = self.mydbapi[myroot]
3385 provider_virtual = False
3386 if blocker.cp in virtuals and \
3387 not self._have_new_virt(blocker.root, blocker.cp):
3388 provider_virtual = True
3390 if provider_virtual:
3392 for provider_entry in virtuals[blocker.cp]:
3394 portage.dep_getkey(provider_entry)
3395 atoms.append(blocker.atom.replace(
3396 blocker.cp, provider_cp))
3398 atoms = [blocker.atom]
3400 blocked_initial = []
3402 blocked_initial.extend(initial_db.match_pkgs(atom))
3406 blocked_final.extend(final_db.match_pkgs(atom))
3408 if not blocked_initial and not blocked_final:
3409 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
3410 self._blocker_parents.remove(blocker)
3411 # Discard any parents that don't have any more blockers.
3412 for pkg in parent_pkgs:
3413 if not self._blocker_parents.child_nodes(pkg):
3414 self._blocker_parents.remove(pkg)
3416 for parent in self._blocker_parents.parent_nodes(blocker):
3417 unresolved_blocks = False
3418 depends_on_order = set()
3419 for pkg in blocked_initial:
3420 if pkg.slot_atom == parent.slot_atom:
3421 # TODO: Support blocks within slots in cases where it
3422 # might make sense. For example, a new version might
3423 # require that the old version be uninstalled at build
3426 if parent.installed:
3427 # Two currently installed packages conflict with
3428 # eachother. Ignore this case since the damage
3429 # is already done and this would be likely to
3430 # confuse users if displayed like a normal blocker.
3432 if parent.operation == "merge":
3433 # Maybe the blocked package can be replaced or simply
3434 # unmerged to resolve this block.
3435 depends_on_order.add((pkg, parent))
3437 # None of the above blocker resolutions techniques apply,
3438 # so apparently this one is unresolvable.
3439 unresolved_blocks = True
3440 for pkg in blocked_final:
3441 if pkg.slot_atom == parent.slot_atom:
3442 # TODO: Support blocks within slots.
3444 if parent.operation == "nomerge" and \
3445 pkg.operation == "nomerge":
3446 # This blocker will be handled the next time that a
3447 # merge of either package is triggered.
3450 # Maybe the blocking package can be
3451 # unmerged to resolve this block.
3452 if parent.operation == "merge" and pkg.installed:
3453 depends_on_order.add((pkg, parent))
3455 elif parent.operation == "nomerge":
3456 depends_on_order.add((parent, pkg))
3458 # None of the above blocker resolutions techniques apply,
3459 # so apparently this one is unresolvable.
3460 unresolved_blocks = True
3462 # Make sure we don't unmerge any package that have been pulled
3464 if not unresolved_blocks and depends_on_order:
3465 for inst_pkg, inst_task in depends_on_order:
3466 if self.digraph.contains(inst_pkg) and \
3467 self.digraph.parent_nodes(inst_pkg):
3468 unresolved_blocks = True
3471 if not unresolved_blocks and depends_on_order:
3472 for inst_pkg, inst_task in depends_on_order:
3473 uninst_task = Package(built=inst_pkg.built,
3474 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
3475 metadata=inst_pkg.metadata,
3476 operation="uninstall", root=inst_pkg.root,
3477 type_name=inst_pkg.type_name)
3478 self._pkg_cache[uninst_task] = uninst_task
3479 # Enforce correct merge order with a hard dep.
3480 self.digraph.addnode(uninst_task, inst_task,
3481 priority=BlockerDepPriority.instance)
3482 # Count references to this blocker so that it can be
3483 # invalidated after nodes referencing it have been
3485 self._blocker_uninstalls.addnode(uninst_task, blocker)
3486 if not unresolved_blocks and not depends_on_order:
3487 self._blocker_parents.remove_edge(blocker, parent)
3488 if not self._blocker_parents.parent_nodes(blocker):
3489 self._blocker_parents.remove(blocker)
3490 if not self._blocker_parents.child_nodes(parent):
3491 self._blocker_parents.remove(parent)
3492 if unresolved_blocks:
3493 self._unsolvable_blockers.add(blocker, parent)
3497 def _accept_collisions(self):
3499 for x in ("--buildpkgonly", "--fetchonly",
3500 "--fetch-all-uri", "--nodeps", "--pretend"):
3501 if x in self.myopts:
3506 def _merge_order_bias(self, mygraph):
3507 """Order nodes from highest to lowest overall reference count for
3508 optimal leaf node selection."""
3510 for node in mygraph.order:
3511 node_info[node] = len(mygraph.parent_nodes(node))
3512 def cmp_merge_preference(node1, node2):
3513 return node_info[node2] - node_info[node1]
3514 mygraph.order.sort(cmp_merge_preference)
3516 def altlist(self, reversed=False):
3518 while self._serialized_tasks_cache is None:
3519 self._resolve_conflicts()
3521 self._serialized_tasks_cache = self._serialize_tasks()
3522 except self._serialize_tasks_retry:
3525 retlist = self._serialized_tasks_cache[:]
3530 def _resolve_conflicts(self):
3531 if not self._complete_graph():
3532 raise self._unknown_internal_error()
3534 if not self.validate_blockers():
3535 raise self._unknown_internal_error()
3537 def _serialize_tasks(self):
3538 mygraph=self.digraph.copy()
3539 # Prune "nomerge" root nodes if nothing depends on them, since
3540 # otherwise they slow down merge order calculation. Don't remove
3541 # non-root nodes since they help optimize merge order in some cases
3542 # such as revdep-rebuild.
3543 removed_nodes = set()
3545 for node in mygraph.root_nodes():
3546 if not isinstance(node, Package) or \
3547 node.installed or node.onlydeps:
3548 removed_nodes.add(node)
3550 self.spinner.update()
3551 mygraph.difference_update(removed_nodes)
3552 if not removed_nodes:
3554 removed_nodes.clear()
3555 self._merge_order_bias(mygraph)
3556 def cmp_circular_bias(n1, n2):
3558 RDEPEND is stronger than PDEPEND and this function
3559 measures such a strength bias within a circular
3560 dependency relationship.
3562 n1_n2_medium = n2 in mygraph.child_nodes(n1,
3563 ignore_priority=DepPriority.MEDIUM_SOFT)
3564 n2_n1_medium = n1 in mygraph.child_nodes(n2,
3565 ignore_priority=DepPriority.MEDIUM_SOFT)
3566 if n1_n2_medium == n2_n1_medium:
3571 myblocker_uninstalls = self._blocker_uninstalls.copy()
3573 # Contains any Uninstall tasks that have been ignored
3574 # in order to avoid the circular deps code path. These
3575 # correspond to blocker conflicts that could not be
3577 ignored_uninstall_tasks = set()
3578 have_uninstall_task = False
3579 complete = "complete" in self.myparams
3580 myblocker_parents = self._blocker_parents.copy()
3583 def get_nodes(**kwargs):
3585 Returns leaf nodes excluding Uninstall instances
3586 since those should be executed as late as possible.
3588 return [node for node in mygraph.leaf_nodes(**kwargs) \
3589 if isinstance(node, Package) and \
3590 node.operation != "uninstall"]
3592 # sys-apps/portage needs special treatment if ROOT="/"
3593 portage_node = self.mydbapi["/"].match_pkgs("sys-apps/portage")
3595 portage_node = portage_node[0]
3598 if portage_node is not None and \
3599 (not mygraph.contains(portage_node) or \
3600 portage_node.operation == "nomerge"):
3603 ignore_priority_soft_range = [None]
3604 ignore_priority_soft_range.extend(
3605 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
3606 tree_mode = "--tree" in self.myopts
3607 # Tracks whether or not the current iteration should prefer asap_nodes
3608 # if available. This is set to False when the previous iteration
3609 # failed to select any nodes. It is reset whenever nodes are
3610 # successfully selected.
3613 # By default, try to avoid selecting root nodes whenever possible. This
3614 # helps ensure that the maximimum possible number of soft dependencies
3615 # have been removed from the graph before their parent nodes have
3616 # selected. This is especially important when those dependencies are
3617 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
3618 # CHOST has been changed (like when building a stage3 from a stage2).
3619 accept_root_node = False
3621 # State of prefer_asap and accept_root_node flags for successive
3622 # iterations that loosen the criteria for node selection.
3624 # iteration prefer_asap accept_root_node
3629 # If no nodes are selected on the 3rd iteration, it is due to
3630 # unresolved blockers or circular dependencies.
3632 while not mygraph.empty():
3633 self.spinner.update()
3634 selected_nodes = None
3635 ignore_priority = None
3636 if prefer_asap and asap_nodes:
3637 """ASAP nodes are merged before their soft deps."""
3638 asap_nodes = [node for node in asap_nodes \
3639 if mygraph.contains(node)]
3640 for node in asap_nodes:
3641 if not mygraph.child_nodes(node,
3642 ignore_priority=DepPriority.SOFT):
3643 selected_nodes = [node]
3644 asap_nodes.remove(node)
3646 if not selected_nodes and \
3647 not (prefer_asap and asap_nodes):
3648 for ignore_priority in ignore_priority_soft_range:
3649 nodes = get_nodes(ignore_priority=ignore_priority)
3653 if ignore_priority is None and not tree_mode:
3654 # Greedily pop all of these nodes since no relationship
3655 # has been ignored. This optimization destroys --tree
3656 # output, so it's disabled in reversed mode.
3657 selected_nodes = nodes
3659 # For optimal merge order:
3660 # * Only pop one node.
3661 # * Removing a root node (node without a parent)
3662 # will not produce a leaf node, so avoid it.
3664 if mygraph.parent_nodes(node):
3665 # found a non-root node
3666 selected_nodes = [node]
3668 if not selected_nodes and \
3669 (accept_root_node or ignore_priority is None):
3670 # settle for a root node
3671 selected_nodes = [nodes[0]]
3673 if not selected_nodes:
3674 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
3676 """Recursively gather a group of nodes that RDEPEND on
3677 eachother. This ensures that they are merged as a group
3678 and get their RDEPENDs satisfied as soon as possible."""
3679 def gather_deps(ignore_priority,
3680 mergeable_nodes, selected_nodes, node):
3681 if node in selected_nodes:
3683 if node not in mergeable_nodes:
3685 if node == portage_node and mygraph.child_nodes(node,
3686 ignore_priority=DepPriority.MEDIUM_SOFT):
3687 # Make sure that portage always has all of it's
3688 # RDEPENDs installed first.
3690 selected_nodes.add(node)
3691 for child in mygraph.child_nodes(node,
3692 ignore_priority=ignore_priority):
3693 if not gather_deps(ignore_priority,
3694 mergeable_nodes, selected_nodes, child):
3697 mergeable_nodes = set(nodes)
3698 if prefer_asap and asap_nodes:
3700 for ignore_priority in xrange(DepPriority.SOFT,
3701 DepPriority.MEDIUM_SOFT + 1):
3703 if nodes is not asap_nodes and \
3704 not accept_root_node and \
3705 not mygraph.parent_nodes(node):
3707 selected_nodes = set()
3708 if gather_deps(ignore_priority,
3709 mergeable_nodes, selected_nodes, node):
3712 selected_nodes = None
3716 # If any nodes have been selected here, it's always
3717 # possible that anything up to a MEDIUM_SOFT priority
3718 # relationship has been ignored. This state is recorded
3719 # in ignore_priority so that relevant nodes will be
3720 # added to asap_nodes when appropriate.
3722 ignore_priority = DepPriority.MEDIUM_SOFT
3724 if prefer_asap and asap_nodes and not selected_nodes:
3725 # We failed to find any asap nodes to merge, so ignore
3726 # them for the next iteration.
3730 if not selected_nodes and not accept_root_node:
3731 # Maybe there are only root nodes left, so accept them
3732 # for the next iteration.
3733 accept_root_node = True
3736 if selected_nodes and ignore_priority > DepPriority.SOFT:
3737 # Try to merge ignored medium deps as soon as possible.
3738 for node in selected_nodes:
3739 children = set(mygraph.child_nodes(node))
3740 soft = children.difference(
3741 mygraph.child_nodes(node,
3742 ignore_priority=DepPriority.SOFT))
3743 medium_soft = children.difference(
3744 mygraph.child_nodes(node,
3745 ignore_priority=DepPriority.MEDIUM_SOFT))
3746 medium_soft.difference_update(soft)
3747 for child in medium_soft:
3748 if child in selected_nodes:
3750 if child in asap_nodes:
3752 asap_nodes.append(child)
3754 if selected_nodes and len(selected_nodes) > 1:
3755 if not isinstance(selected_nodes, list):
3756 selected_nodes = list(selected_nodes)
3757 selected_nodes.sort(cmp_circular_bias)
3759 if not selected_nodes and not myblocker_uninstalls.is_empty():
3760 # An Uninstall task needs to be executed in order to
3761 # avoid conflict if possible.
3763 min_parent_deps = None
3765 for task in myblocker_uninstalls.leaf_nodes():
3766 # Do some sanity checks so that system or world packages
3767 # don't get uninstalled inappropriately here (only really
3768 # necessary when --complete-graph has not been enabled).
3770 if task in ignored_uninstall_tasks:
3773 root_config = self.roots[task.root]
3774 inst_pkg = self._pkg_cache[
3775 ("installed", task.root, task.cpv, "nomerge")]
3777 if self.digraph.contains(inst_pkg):
3780 if "/" == task.root:
3781 # Never uninstall sys-apps/portage
3782 # except through replacement.
3783 if "sys-apps/portage" == task.cp:
3785 # For packages in the system set, don't take
3786 # any chances. If the conflict can't be resolved
3787 # by a normal replacement operation then abort.
3790 for atom in root_config.sets[
3791 "system"].iterAtomsForPackage(task):
3794 except portage_exception.InvalidDependString:
3799 # Note that the world check isn't always
3800 # necessary since self._complete_graph() will
3801 # add all packages from the system and world sets to the
3802 # graph. This just allows unresolved conflicts to be
3803 # detected as early as possible, which makes it possible
3804 # to avoid calling self._complete_graph() when it is
3805 # unnecessary due to blockers triggering an abortion.
3807 # For packages in the world set, go ahead an uninstall
3808 # when necessary, as long as the atom will be satisfied
3809 # in the final state.
3810 graph_db = self.mydbapi[task.root]
3812 for atom in root_config.sets[
3813 "world"].iterAtomsForPackage(task):
3815 for pkg in graph_db.match_pkgs(atom):
3823 except portage_exception.InvalidDependString:
3828 # Check the deps of parent nodes to ensure that
3829 # the chosen task produces a leaf node. Maybe
3830 # this can be optimized some more to make the
3831 # best possible choice, but the current algorithm
3832 # is simple and should be near optimal for most
3835 for parent in mygraph.parent_nodes(task):
3836 parent_deps.update(mygraph.child_nodes(parent,
3837 ignore_priority=DepPriority.MEDIUM_SOFT))
3838 parent_deps.remove(task)
3839 if min_parent_deps is None or \
3840 len(parent_deps) < min_parent_deps:
3841 min_parent_deps = len(parent_deps)
3844 if uninst_task is not None:
3845 selected_nodes = [uninst_task]
3847 # None of the Uninstall tasks are acceptable, so
3848 # the corresponding blockers are unresolvable.
3849 # We need to drop an Uninstall task here in order
3850 # to avoid the circular deps code path, but the
3851 # blocker will still be counted as an unresolved
3853 for node in myblocker_uninstalls.leaf_nodes():
3855 mygraph.remove(node)
3859 ignored_uninstall_tasks.add(node)
3862 # After dropping an Uninstall task, reset
3863 # the state variables for leaf node selection and
3864 # continue trying to select leaf nodes.
3866 accept_root_node = False
3869 if not selected_nodes:
3870 # No leaf nodes are available, so we have a circular
3871 # dependency panic situation. Reduce the noise level to a
3872 # minimum via repeated elimination of root nodes since they
3873 # have no parents and thus can not be part of a cycle.
3875 root_nodes = mygraph.root_nodes(
3876 ignore_priority=DepPriority.MEDIUM_SOFT)
3879 for node in root_nodes:
3880 mygraph.remove(node)
3881 # Display the USE flags that are enabled on nodes that are part
3882 # of dependency cycles in case that helps the user decide to
3883 # disable some of them.
3885 tempgraph = mygraph.copy()
3886 while not tempgraph.empty():
3887 nodes = tempgraph.leaf_nodes()
3889 node = tempgraph.order[0]
3892 display_order.append(node)
3893 tempgraph.remove(node)
3894 display_order.reverse()
3895 self.myopts.pop("--quiet", None)
3896 self.myopts.pop("--verbose", None)
3897 self.myopts["--tree"] = True
3900 self.display(display_order)
3901 print "!!! Error: circular dependencies:"
3903 mygraph.debug_print()
3905 print "!!! Note that circular dependencies can often be avoided by temporarily"
3906 print "!!! disabling USE flags that trigger optional dependencies."
3907 raise self._unknown_internal_error()
3909 # At this point, we've succeeded in selecting one or more nodes, so
3910 # it's now safe to reset the prefer_asap and accept_root_node flags
3911 # to their default states.
3913 accept_root_node = False
3915 mygraph.difference_update(selected_nodes)
3917 for node in selected_nodes:
3918 if isinstance(node, Package) and \
3919 node.operation == "nomerge":
3922 # Handle interactions between blockers
3923 # and uninstallation tasks.
3924 solved_blockers = set()
3926 if isinstance(node, Package) and \
3927 "uninstall" == node.operation:
3928 have_uninstall_task = True
3931 vardb = self.trees[node.root]["vartree"].dbapi
3932 previous_cpv = vardb.match(node.slot_atom)
3934 # The package will be replaced by this one, so remove
3935 # the corresponding Uninstall task if necessary.
3936 previous_cpv = previous_cpv[0]
3938 ("installed", node.root, previous_cpv, "uninstall")
3940 mygraph.remove(uninst_task)
3943 if uninst_task is not None and \
3944 uninst_task not in ignored_uninstall_tasks and \
3945 myblocker_uninstalls.contains(uninst_task):
3946 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
3947 myblocker_uninstalls.remove(uninst_task)
3948 # Discard any blockers that this Uninstall solves.
3949 for blocker in blocker_nodes:
3950 if not myblocker_uninstalls.child_nodes(blocker):
3951 myblocker_uninstalls.remove(blocker)
3952 solved_blockers.add(blocker)
3954 retlist.append(node)
3956 if isinstance(node, Package) and \
3957 "uninstall" == node.operation:
3958 # Include satisfied blockers in the merge list so
3959 # that the user can see why the package had to be
3960 # uninstalled in advance rather than through
3962 for blocker in solved_blockers:
3963 retlist.append(Blocker(atom=blocker.atom,
3964 root=blocker.root, satisfied=True))
3966 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
3967 for node in myblocker_uninstalls.root_nodes():
3968 unsolvable_blockers.add(node)
3970 for blocker in unsolvable_blockers:
3971 retlist.append(blocker)
3973 # If any Uninstall tasks need to be executed in order
3974 # to avoid a conflict, complete the graph with any
3975 # dependencies that may have been initially
3976 # neglected (to ensure that unsafe Uninstall tasks
3977 # are properly identified and blocked from execution).
3978 if have_uninstall_task and \
3980 not unsolvable_blockers:
3981 self.myparams.add("complete")
3982 raise self._serialize_tasks_retry("")
3986 def display(self, mylist, favorites=[], verbosity=None):
3987 if verbosity is None:
3988 verbosity = ("--quiet" in self.myopts and 1 or \
3989 "--verbose" in self.myopts and 3 or 2)
3990 favorites_set = InternalPackageSet(favorites)
3995 counters = PackageCounters()
3997 if verbosity == 1 and "--verbose" not in self.myopts:
3998 def create_use_string(*args):
4001 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
4003 is_new, reinst_flags,
4004 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
4005 alphabetical=("--alphabetical" in self.myopts)):
4013 cur_iuse = set(cur_iuse)
4014 enabled_flags = cur_iuse.intersection(cur_use)
4015 removed_iuse = set(old_iuse).difference(cur_iuse)
4016 any_iuse = cur_iuse.union(old_iuse)
4017 any_iuse = list(any_iuse)
4019 for flag in any_iuse:
4022 reinst_flag = reinst_flags and flag in reinst_flags
4023 if flag in enabled_flags:
4025 if is_new or flag in old_use and \
4026 (all_flags or reinst_flag):
4027 flag_str = red(flag)
4028 elif flag not in old_iuse:
4029 flag_str = yellow(flag) + "%*"
4030 elif flag not in old_use:
4031 flag_str = green(flag) + "*"
4032 elif flag in removed_iuse:
4033 if all_flags or reinst_flag:
4034 flag_str = yellow("-" + flag) + "%"
4037 flag_str = "(" + flag_str + ")"
4038 removed.append(flag_str)
4041 if is_new or flag in old_iuse and \
4042 flag not in old_use and \
4043 (all_flags or reinst_flag):
4044 flag_str = blue("-" + flag)
4045 elif flag not in old_iuse:
4046 flag_str = yellow("-" + flag)
4047 if flag not in iuse_forced:
4049 elif flag in old_use:
4050 flag_str = green("-" + flag) + "*"
4052 if flag in iuse_forced:
4053 flag_str = "(" + flag_str + ")"
4055 enabled.append(flag_str)
4057 disabled.append(flag_str)
4060 ret = " ".join(enabled)
4062 ret = " ".join(enabled + disabled + removed)
4064 ret = '%s="%s" ' % (name, ret)
4067 repo_display = RepoDisplay(self.roots)
4072 mygraph = self.digraph.copy()
4074 # If there are any Uninstall instances, add the corresponding
4075 # blockers to the digraph (useful for --tree display).
4076 for uninstall in self._blocker_uninstalls.leaf_nodes():
4077 uninstall_parents = \
4078 self._blocker_uninstalls.parent_nodes(uninstall)
4079 if not uninstall_parents:
4082 # Remove the corresponding "nomerge" node and substitute
4083 # the Uninstall node.
4084 inst_pkg = self._pkg_cache[
4085 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
4087 mygraph.remove(inst_pkg)
4092 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
4094 inst_pkg_blockers = []
4096 # Break the Package -> Uninstall edges.
4097 mygraph.remove(uninstall)
4099 # Resolution of a package's blockers
4100 # depend on it's own uninstallation.
4101 for blocker in inst_pkg_blockers:
4102 mygraph.add(uninstall, blocker)
4104 # Expand Package -> Uninstall edges into
4105 # Package -> Blocker -> Uninstall edges.
4106 for blocker in uninstall_parents:
4107 mygraph.add(uninstall, blocker)
4108 for parent in self._blocker_parents.parent_nodes(blocker):
4109 if parent != inst_pkg:
4110 mygraph.add(blocker, parent)
4117 if isinstance(graph_key, list):
4118 graph_key = tuple(graph_key)
4119 if "--tree" in self.myopts:
4120 depth = len(tree_nodes)
4121 while depth and graph_key not in \
4122 mygraph.child_nodes(tree_nodes[depth-1]):
4125 tree_nodes = tree_nodes[:depth]
4126 tree_nodes.append(graph_key)
4127 display_list.append((x, depth, True))
4128 shown_edges.add((graph_key, tree_nodes[depth-1]))
4130 traversed_nodes = set() # prevent endless circles
4131 traversed_nodes.add(graph_key)
4132 def add_parents(current_node, ordered):
4134 # Do not traverse to parents if this node is an
4135 # an argument or a direct member of a set that has
4136 # been specified as an argument (system or world).
4137 if current_node not in self._set_nodes:
4138 parent_nodes = mygraph.parent_nodes(current_node)
4140 child_nodes = set(mygraph.child_nodes(current_node))
4141 selected_parent = None
4142 # First, try to avoid a direct cycle.
4143 for node in parent_nodes:
4144 if not isinstance(node, (Blocker, Package)):
4146 if node not in traversed_nodes and \
4147 node not in child_nodes:
4148 edge = (current_node, node)
4149 if edge in shown_edges:
4151 selected_parent = node
4153 if not selected_parent:
4154 # A direct cycle is unavoidable.
4155 for node in parent_nodes:
4156 if not isinstance(node, (Blocker, Package)):
4158 if node not in traversed_nodes:
4159 edge = (current_node, node)
4160 if edge in shown_edges:
4162 selected_parent = node
4165 shown_edges.add((current_node, selected_parent))
4166 traversed_nodes.add(selected_parent)
4167 add_parents(selected_parent, False)
4168 display_list.append((current_node,
4169 len(tree_nodes), ordered))
4170 tree_nodes.append(current_node)
4172 add_parents(graph_key, True)
4174 display_list.append((x, depth, True))
4175 mylist = display_list
4177 last_merge_depth = 0
4178 for i in xrange(len(mylist)-1,-1,-1):
4179 graph_key, depth, ordered = mylist[i]
4180 if not ordered and depth == 0 and i > 0 \
4181 and graph_key == mylist[i-1][0] and \
4182 mylist[i-1][1] == 0:
4183 # An ordered node got a consecutive duplicate when the tree was
4187 if ordered and graph_key[-1] != "nomerge":
4188 last_merge_depth = depth
4190 if depth >= last_merge_depth or \
4191 i < len(mylist) - 1 and \
4192 depth >= mylist[i+1][1]:
4195 from portage import flatten
4196 from portage_dep import use_reduce, paren_reduce
4197 # files to fetch list - avoids counting a same file twice
4198 # in size display (verbose mode)
4201 for mylist_index in xrange(len(mylist)):
4202 x, depth, ordered = mylist[mylist_index]
4206 portdb = self.trees[myroot]["porttree"].dbapi
4207 bindb = self.trees[myroot]["bintree"].dbapi
4208 vardb = self.trees[myroot]["vartree"].dbapi
4209 vartree = self.trees[myroot]["vartree"]
4210 pkgsettings = self.pkgsettings[myroot]
4213 indent = " " * depth
4216 addl=""+red("B")+" "+fetch+" "
4218 counters.blocks += 1
4219 resolved = portage.key_expand(
4220 pkg_key, mydb=vardb, settings=pkgsettings)
4221 if "--columns" in self.myopts and "--quiet" in self.myopts:
4222 addl = addl + " " + red(resolved)
4224 addl = "[blocks " + addl + "] " + indent + red(resolved)
4225 block_parents = self._blocker_parents.parent_nodes(tuple(x))
4226 block_parents = set([pnode[2] for pnode in block_parents])
4227 block_parents = ", ".join(block_parents)
4229 addl += bad(" (\"%s\" is blocking %s)") % \
4230 (pkg_key, block_parents)
4232 addl += bad(" (is blocking %s)") % block_parents
4233 if isinstance(x, Blocker) and x.satisfied:
4236 blockers.append(addl)
4239 pkg_merge = ordered and pkg_status == "merge"
4240 if not pkg_merge and pkg_status == "merge":
4241 pkg_status = "nomerge"
4242 built = pkg_type != "ebuild"
4243 installed = pkg_type == "installed"
4245 metadata = pkg.metadata
4247 repo_name = metadata["repository"]
4248 if pkg_type == "ebuild":
4249 ebuild_path = portdb.findname(pkg_key)
4250 if not ebuild_path: # shouldn't happen
4251 raise portage_exception.PackageNotFound(pkg_key)
4252 repo_path_real = os.path.dirname(os.path.dirname(
4253 os.path.dirname(ebuild_path)))
4255 repo_path_real = portdb.getRepositoryPath(repo_name)
4256 pkg_use = metadata["USE"].split()
4258 restrict = flatten(use_reduce(paren_reduce(
4259 pkg.metadata["RESTRICT"]), uselist=pkg_use))
4260 except portage_exception.InvalidDependString, e:
4261 if not pkg.installed:
4262 show_invalid_depstring_notice(x,
4263 pkg.metadata["RESTRICT"], str(e))
4267 if "ebuild" == pkg_type and x[3] != "nomerge" and \
4268 "fetch" in restrict:
4271 counters.restrict_fetch += 1
4272 if portdb.fetch_check(pkg_key, pkg_use):
4275 counters.restrict_fetch_satisfied += 1
4277 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
4278 #param is used for -u, where you still *do* want to see when something is being upgraded.
4281 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
4282 if vardb.cpv_exists(pkg_key):
4283 addl=" "+yellow("R")+fetch+" "
4285 counters.reinst += 1
4286 elif pkg_status == "uninstall":
4287 counters.uninst += 1
4288 # filter out old-style virtual matches
4289 elif installed_versions and \
4290 portage.cpv_getkey(installed_versions[0]) == \
4291 portage.cpv_getkey(pkg_key):
4292 myinslotlist = vardb.match(pkg.slot_atom)
4293 # If this is the first install of a new-style virtual, we
4294 # need to filter out old-style virtual matches.
4295 if myinslotlist and \
4296 portage.cpv_getkey(myinslotlist[0]) != \
4297 portage.cpv_getkey(pkg_key):
4300 myoldbest = myinslotlist[:]
4302 if not portage_dep.cpvequal(pkg_key,
4303 portage.best([pkg_key] + myoldbest)):
4305 addl += turquoise("U")+blue("D")
4307 counters.downgrades += 1
4310 addl += turquoise("U") + " "
4312 counters.upgrades += 1
4314 # New slot, mark it new.
4315 addl = " " + green("NS") + fetch + " "
4316 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
4318 counters.newslot += 1
4320 if "--changelog" in self.myopts:
4321 inst_matches = vardb.match(pkg.slot_atom)
4323 changelogs.extend(self.calc_changelog(
4324 portdb.findname(pkg_key),
4325 inst_matches[0], pkg_key))
4327 addl = " " + green("N") + " " + fetch + " "
4335 cur_iuse = list(filter_iuse_defaults(
4336 pkg.metadata["IUSE"].split()))
4338 forced_flags = set()
4339 pkgsettings.setcpv(pkg.cpv, mydb=pkg.metadata) # for package.use.{mask,force}
4340 forced_flags.update(pkgsettings.useforce)
4341 forced_flags.update(pkgsettings.usemask)
4343 cur_iuse = portage.unique_array(cur_iuse)
4346 cur_use = [flag for flag in cur_use if flag in cur_iuse]
4348 if myoldbest and myinslotlist:
4349 previous_cpv = myoldbest[0]
4351 previous_cpv = pkg.cpv
4352 if vardb.cpv_exists(previous_cpv):
4353 old_iuse, old_use = vardb.aux_get(
4354 previous_cpv, ["IUSE", "USE"])
4355 old_iuse = list(set(
4356 filter_iuse_defaults(old_iuse.split())))
4358 old_use = old_use.split()
4365 old_use = [flag for flag in old_use if flag in old_iuse]
4367 use_expand = pkgsettings["USE_EXPAND"].lower().split()
4369 use_expand.reverse()
4370 use_expand_hidden = \
4371 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
4373 def map_to_use_expand(myvals, forcedFlags=False,
4377 for exp in use_expand:
4380 for val in myvals[:]:
4381 if val.startswith(exp.lower()+"_"):
4382 if val in forced_flags:
4383 forced[exp].add(val[len(exp)+1:])
4384 ret[exp].append(val[len(exp)+1:])
4387 forced["USE"] = [val for val in myvals \
4388 if val in forced_flags]
4390 for exp in use_expand_hidden:
4396 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
4397 # are the only thing that triggered reinstallation.
4398 reinst_flags_map = {}
4399 reinstall_for_flags = self._reinstall_nodes.get(pkg)
4400 reinst_expand_map = None
4401 if reinstall_for_flags:
4402 reinst_flags_map = map_to_use_expand(
4403 list(reinstall_for_flags), removeHidden=False)
4404 for k in list(reinst_flags_map):
4405 if not reinst_flags_map[k]:
4406 del reinst_flags_map[k]
4407 if not reinst_flags_map.get("USE"):
4408 reinst_expand_map = reinst_flags_map.copy()
4409 reinst_expand_map.pop("USE", None)
4410 if reinst_expand_map and \
4411 not set(reinst_expand_map).difference(
4413 use_expand_hidden = \
4414 set(use_expand_hidden).difference(
4417 cur_iuse_map, iuse_forced = \
4418 map_to_use_expand(cur_iuse, forcedFlags=True)
4419 cur_use_map = map_to_use_expand(cur_use)
4420 old_iuse_map = map_to_use_expand(old_iuse)
4421 old_use_map = map_to_use_expand(old_use)
4424 use_expand.insert(0, "USE")
4426 for key in use_expand:
4427 if key in use_expand_hidden:
4429 verboseadd += create_use_string(key.upper(),
4430 cur_iuse_map[key], iuse_forced[key],
4431 cur_use_map[key], old_iuse_map[key],
4432 old_use_map[key], is_new,
4433 reinst_flags_map.get(key))
4438 if pkg_type == "ebuild" and pkg_merge:
4440 myfilesdict = portdb.getfetchsizes(pkg_key,
4441 useflags=pkg_use, debug=self.edebug)
4442 except portage_exception.InvalidDependString, e:
4443 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
4444 show_invalid_depstring_notice(x, src_uri, str(e))
4447 if myfilesdict is None:
4448 myfilesdict="[empty/missing/bad digest]"
4450 for myfetchfile in myfilesdict:
4451 if myfetchfile not in myfetchlist:
4452 mysize+=myfilesdict[myfetchfile]
4453 myfetchlist.append(myfetchfile)
4454 counters.totalsize += mysize
4455 verboseadd+=format_size(mysize)+" "
4458 # assign index for a previous version in the same slot
4459 has_previous = False
4460 repo_name_prev = None
4461 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
4463 slot_matches = vardb.match(slot_atom)
4466 repo_name_prev = vardb.aux_get(slot_matches[0],
4469 # now use the data to generate output
4471 if pkg.installed or not has_previous:
4472 repoadd = repo_display.repoStr(repo_path_real)
4474 repo_path_prev = None
4476 repo_path_prev = portdb.getRepositoryPath(
4478 # To avoid false positives during the transition
4479 # period, don't show ? if the installed package
4480 # is missing a repository label. Stages starting
4481 # with 2008.0 will come with repository labels.
4482 ignore_missing_labels = True
4483 if (ignore_missing_labels and not repo_path_prev) or \
4484 repo_path_prev == repo_path_real:
4485 repoadd = repo_display.repoStr(repo_path_real)
4487 repoadd = "%s=>%s" % (
4488 repo_display.repoStr(repo_path_prev),
4489 repo_display.repoStr(repo_path_real))
4490 if repoadd and repoadd != "0":
4492 verboseadd += teal("[%s]" % repoadd)
4494 xs = [portage.cpv_getkey(pkg_key)] + \
4495 list(portage.catpkgsplit(pkg_key)[2:])
4502 if "COLUMNWIDTH" in self.settings:
4504 mywidth = int(self.settings["COLUMNWIDTH"])
4505 except ValueError, e:
4506 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
4508 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
4509 self.settings["COLUMNWIDTH"], noiselevel=-1)
4511 oldlp = mywidth - 30
4514 # Convert myoldbest from a list to a string.
4518 for pos, key in enumerate(myoldbest):
4519 key = portage.catpkgsplit(key)[2] + \
4520 "-" + portage.catpkgsplit(key)[3]
4521 if key[-3:] == "-r0":
4523 myoldbest[pos] = key
4524 myoldbest = blue("["+", ".join(myoldbest)+"]")
4527 root_config = self.roots[myroot]
4528 system_set = root_config.sets["system"]
4529 world_set = root_config.sets["world"]
4534 pkg_system = system_set.findAtomForPackage(pkg_key, metadata)
4535 pkg_world = world_set.findAtomForPackage(pkg_key, metadata)
4536 if not pkg_world and myroot == self.target_root and \
4537 favorites_set.findAtomForPackage(pkg_key, metadata):
4538 # Maybe it will be added to world now.
4539 if create_world_atom(pkg_key, metadata,
4540 favorites_set, root_config):
4542 except portage_exception.InvalidDependString:
4543 # This is reported elsewhere if relevant.
4546 def pkgprint(pkg_str):
4549 return colorize("PKG_MERGE_SYSTEM", pkg_str)
4551 return colorize("PKG_MERGE_WORLD", pkg_str)
4553 return colorize("PKG_MERGE", pkg_str)
4554 elif pkg_status == "uninstall":
4555 return colorize("PKG_UNINSTALL", pkg_str)
4558 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4560 return colorize("PKG_NOMERGE_WORLD", pkg_str)
4562 return colorize("PKG_NOMERGE", pkg_str)
4567 if "--columns" in self.myopts:
4568 if "--quiet" in self.myopts:
4569 myprint=addl+" "+indent+pkgprint(pkg_cp)
4570 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4571 myprint=myprint+myoldbest
4572 myprint=myprint+darkgreen("to "+x[1])
4575 myprint = "[%s] %s%s" % \
4576 (pkgprint(pkg_status.ljust(13)),
4577 indent, pkgprint(pkg.cp))
4579 myprint = "[%s %s] %s%s" % \
4580 (pkgprint(pkg.type_name), addl,
4581 indent, pkgprint(pkg.cp))
4582 if (newlp-nc_len(myprint)) > 0:
4583 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4584 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4585 if (oldlp-nc_len(myprint)) > 0:
4586 myprint=myprint+" "*(oldlp-nc_len(myprint))
4587 myprint=myprint+myoldbest
4588 myprint=myprint+darkgreen("to "+x[1])+" "+verboseadd
4591 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4593 myprint = "[" + pkg_type + " " + addl + "] "
4594 myprint += indent + pkgprint(pkg_key) + " " + \
4595 myoldbest + darkgreen("to " + myroot) + " " + \
4598 if "--columns" in self.myopts:
4599 if "--quiet" in self.myopts:
4600 myprint=addl+" "+indent+pkgprint(pkg_cp)
4601 myprint=myprint+" "+green(xs[1]+xs[2])+" "
4602 myprint=myprint+myoldbest
4605 myprint = "[%s] %s%s" % \
4606 (pkgprint(pkg_status.ljust(13)),
4607 indent, pkgprint(pkg.cp))
4609 myprint = "[%s %s] %s%s" % \
4610 (pkgprint(pkg.type_name), addl,
4611 indent, pkgprint(pkg.cp))
4612 if (newlp-nc_len(myprint)) > 0:
4613 myprint=myprint+(" "*(newlp-nc_len(myprint)))
4614 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4615 if (oldlp-nc_len(myprint)) > 0:
4616 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4617 myprint=myprint+myoldbest+" "+verboseadd
4620 myprint = "[%s] %s%s %s %s" % \
4621 (pkgprint(pkg_status.ljust(13)),
4622 indent, pkgprint(pkg.cpv),
4623 myoldbest, verboseadd)
4625 myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
4628 mysplit = [portage.cpv_getkey(pkg_key)] + \
4629 list(portage.catpkgsplit(pkg_key)[2:])
4630 if "--tree" not in self.myopts and mysplit and \
4631 len(mysplit) == 3 and mysplit[0] == "sys-apps/portage" and \
4634 if mysplit[2] == "r0":
4635 myversion = mysplit[1]
4637 myversion = "%s-%s" % (mysplit[1], mysplit[2])
4639 if myversion != portage.VERSION and "--quiet" not in self.myopts:
4640 if mylist_index < len(mylist) - 1:
4641 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4642 p.append(colorize("WARN", " then resume the merge."))
4655 sys.stdout.write(str(repo_display))
4657 if "--changelog" in self.myopts:
4659 for revision,text in changelogs:
4660 print bold('*'+revision)
4661 sys.stdout.write(text)
4664 self.display_problems()
4667 def display_problems(self):
4669 Display problems with the dependency graph such as slot collisions.
4670 This is called internally by display() to show the problems _after_
4671 the merge list where it is most likely to be seen, but if display()
4672 is not going to be called then this method should be called explicitly
4673 to ensure that the user is notified of problems with the graph.
4676 task_list = self._serialized_tasks_cache
4678 # Any blockers must be appended to the tail of the list,
4679 # so we only need to check the last item.
4680 have_blocker_conflict = bool(task_list and \
4681 (isinstance(task_list[-1], Blocker) and \
4682 not task_list[-1].satisfied))
4684 # The user is only notified of a slot conflict if
4685 # there are no unresolvable blocker conflicts.
4686 if not have_blocker_conflict:
4687 self._show_slot_collision_notice()
4689 # TODO: Add generic support for "set problem" handlers so that
4690 # the below warnings aren't special cases for world only.
4692 if self._missing_args:
4693 world_problems = False
4694 if "world" in self._sets:
4695 for arg, atom in self._missing_args:
4696 if arg.name == "world":
4697 world_problems = True
4701 sys.stderr.write("\n!!! Problems have been " + \
4702 "detected with your world file\n")
4703 sys.stderr.write("!!! Please run " + \
4704 green("emaint --check world")+"\n\n")
4706 if self._missing_args:
4707 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4708 " Ebuilds for the following packages are either all\n")
4709 sys.stderr.write(colorize("BAD", "!!!") + \
4710 " masked or don't exist:\n")
4711 sys.stderr.write(" ".join(atom for arg, atom in \
4712 self._missing_args) + "\n")
4714 if self._pprovided_args:
4716 for arg, atom in self._pprovided_args:
4717 if isinstance(arg, SetArg):
4719 arg_atom = (atom, atom)
4722 arg_atom = (arg.arg, atom)
4723 refs = arg_refs.setdefault(arg_atom, [])
4724 if parent not in refs:
4727 msg.append(bad("\nWARNING: "))
4728 if len(self._pprovided_args) > 1:
4729 msg.append("Requested packages will not be " + \
4730 "merged because they are listed in\n")
4732 msg.append("A requested package will not be " + \
4733 "merged because it is listed in\n")
4734 msg.append("package.provided:\n\n")
4735 problems_sets = set()
4736 for (arg, atom), refs in arg_refs.iteritems():
4739 problems_sets.update(refs)
4741 ref_string = ", ".join(["'%s'" % name for name in refs])
4742 ref_string = " pulled in by " + ref_string
4743 msg.append(" %s%s\n" % (colorize("INFORM", arg), ref_string))
4745 if "world" in problems_sets:
4746 msg.append("This problem can be solved in one of the following ways:\n\n")
4747 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
4748 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
4749 msg.append(" C) Remove offending entries from package.provided.\n\n")
4750 msg.append("The best course of action depends on the reason that an offending\n")
4751 msg.append("package.provided entry exists.\n\n")
4752 sys.stderr.write("".join(msg))
4754 masked_packages = []
4755 for pkg, pkgsettings in self._masked_installed:
4756 root_config = self.roots[pkg.root]
4757 mreasons = get_masking_status(pkg, pkgsettings, root_config)
4758 masked_packages.append((root_config, pkgsettings,
4759 pkg.cpv, pkg.metadata, mreasons))
4761 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4762 " The following installed packages are masked:\n")
4763 show_masked_packages(masked_packages)
4767 for pargs, kwargs in self._unsatisfied_deps_for_display:
4768 self._show_unsatisfied_dep(*pargs, **kwargs)
4770 def calc_changelog(self,ebuildpath,current,next):
4771 if ebuildpath == None or not os.path.exists(ebuildpath):
4773 current = '-'.join(portage.catpkgsplit(current)[1:])
4774 if current.endswith('-r0'):
4775 current = current[:-3]
4776 next = '-'.join(portage.catpkgsplit(next)[1:])
4777 if next.endswith('-r0'):
4779 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
4781 changelog = open(changelogpath).read()
4782 except SystemExit, e:
4783 raise # Needed else can't exit
4786 divisions = self.find_changelog_tags(changelog)
4787 #print 'XX from',current,'to',next
4788 #for div,text in divisions: print 'XX',div
4789 # skip entries for all revisions above the one we are about to emerge
4790 for i in range(len(divisions)):
4791 if divisions[i][0]==next:
4792 divisions = divisions[i:]
4794 # find out how many entries we are going to display
4795 for i in range(len(divisions)):
4796 if divisions[i][0]==current:
4797 divisions = divisions[:i]
4800 # couldnt find the current revision in the list. display nothing
4804 def find_changelog_tags(self,changelog):
4808 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
4810 if release is not None:
4811 divs.append((release,changelog))
4813 if release is not None:
4814 divs.append((release,changelog[:match.start()]))
4815 changelog = changelog[match.end():]
4816 release = match.group(1)
4817 if release.endswith('.ebuild'):
4818 release = release[:-7]
4819 if release.endswith('-r0'):
4820 release = release[:-3]
4822 def saveNomergeFavorites(self):
4823 """Find atoms in favorites that are not in the mergelist and add them
4824 to the world file if necessary."""
4825 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4826 "--oneshot", "--onlydeps", "--pretend"):
4827 if x in self.myopts:
4829 root_config = self.roots[self.target_root]
4830 world_set = root_config.sets["world"]
4832 world_set.load() # maybe it's changed on disk
4833 args_set = self._sets["args"]
4834 portdb = self.trees[self.target_root]["porttree"].dbapi
4835 added_favorites = set()
4836 for x in self._set_nodes:
4837 pkg_type, root, pkg_key, pkg_status = x
4838 if pkg_status != "nomerge":
4840 metadata = dict(izip(self._mydbapi_keys,
4841 self.mydbapi[root].aux_get(pkg_key, self._mydbapi_keys)))
4843 myfavkey = create_world_atom(pkg_key, metadata,
4844 args_set, root_config)
4846 if myfavkey in added_favorites:
4848 added_favorites.add(myfavkey)
4849 except portage_exception.InvalidDependString, e:
4850 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
4851 (pkg_key, str(e)), noiselevel=-1)
4852 writemsg("!!! see '%s'\n\n" % os.path.join(
4853 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
4856 for k in self._sets:
4857 if k in ("args", "world"):
4862 all_added.append(SETPREFIX + k)
4863 all_added.extend(added_favorites)
4866 print ">>> Recording %s in \"world\" favorites file..." % \
4867 colorize("INFORM", a)
4869 world_set.update(all_added)
4873 def loadResumeCommand(self, resume_data):
4875 Add a resume command to the graph and validate it in the process. This
4876 will raise a PackageNotFound exception if a package is not available.
4878 self._sets["args"].update(resume_data.get("favorites", []))
4879 mergelist = resume_data.get("mergelist", [])
4881 if mergelist and "--skipfirst" in self.myopts:
4882 for i, task in enumerate(mergelist):
4883 if isinstance(task, list) and \
4884 task and task[-1] == "merge":
4888 fakedb = self.mydbapi
4890 serialized_tasks = []
4892 if not (isinstance(x, list) and len(x) == 4):
4894 pkg_type, myroot, pkg_key, action = x
4895 if pkg_type not in self.pkg_tree_map:
4897 if action not in ("merge", "uninstall"):
4899 mydb = trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
4901 metadata = dict(izip(self._mydbapi_keys,
4902 mydb.aux_get(pkg_key, self._mydbapi_keys)))
4904 # It does no exist or it is corrupt.
4905 if action == "uninstall":
4907 raise portage_exception.PackageNotFound(pkg_key)
4908 if pkg_type == "ebuild":
4909 pkgsettings = self.pkgsettings[myroot]
4910 pkgsettings.setcpv(pkg_key, mydb=metadata)
4911 metadata["USE"] = pkgsettings["PORTAGE_USE"]
4912 installed = action == "uninstall"
4913 built = pkg_type != "ebuild"
4914 pkg = Package(built=built, cpv=pkg_key,
4915 installed=installed, metadata=metadata,
4916 operation=action, root=myroot,
4918 self._pkg_cache[pkg] = pkg
4919 fakedb[myroot].cpv_inject(pkg)
4920 serialized_tasks.append(pkg)
4921 self.spinner.update()
4922 self._serialized_tasks_cache = serialized_tasks
4924 class _internal_exception(portage_exception.PortageException):
4925 def __init__(self, value=""):
4926 portage_exception.PortageException.__init__(self, value)
4928 class _unknown_internal_error(_internal_exception):
4930 Used by the depgraph internally to terminate graph creation.
4931 The specific reason for the failure should have been dumped
4932 to stderr, unfortunately, the exact reason for the failure
4936 class _serialize_tasks_retry(_internal_exception):
4938 This is raised by the _serialize_tasks() method when it needs to
4939 be called again for some reason. The only case that it's currently
4940 used for is when neglected dependencies need to be added to the
4941 graph in order to avoid making a potentially unsafe decision.
4944 class _dep_check_composite_db(portage.dbapi):
4946 A dbapi-like interface that is optimized for use in dep_check() calls.
4947 This is built on top of the existing depgraph package selection logic.
4948 Some packages that have been added to the graph may be masked from this
4949 view in order to influence the atom preference selection that occurs
4952 def __init__(self, depgraph, root):
4953 portage.dbapi.__init__(self)
4954 self._depgraph = depgraph
4956 self._match_cache = {}
4957 self._cpv_pkg_map = {}
4959 def match(self, atom):
4960 ret = self._match_cache.get(atom)
4965 atom = self._dep_expand(atom)
4966 pkg, existing = self._depgraph._select_package(self._root, atom)
4970 # Return the highest available from select_package() as well as
4971 # any matching slots in the graph db.
4973 slots.add(pkg.metadata["SLOT"])
4974 atom_cp = portage.dep_getkey(atom)
4975 if pkg.cp.startswith("virtual/"):
4976 # For new-style virtual lookahead that occurs inside
4977 # dep_check(), examine all slots. This is needed
4978 # so that newer slots will not unnecessarily be pulled in
4979 # when a satisfying lower slot is already installed. For
4980 # example, if virtual/jdk-1.4 is satisfied via kaffe then
4981 # there's no need to pull in a newer slot to satisfy a
4982 # virtual/jdk dependency.
4983 for db, pkg_type, built, installed, db_keys in \
4984 self._depgraph._filtered_trees[self._root]["dbs"]:
4985 for cpv in db.match(atom):
4986 if portage.cpv_getkey(cpv) != pkg.cp:
4988 slots.add(db.aux_get(cpv, ["SLOT"])[0])
4990 if self._visible(pkg):
4991 self._cpv_pkg_map[pkg.cpv] = pkg
4993 slots.remove(pkg.metadata["SLOT"])
4995 slot_atom = "%s:%s" % (atom_cp, slots.pop())
4996 pkg, existing = self._depgraph._select_package(
4997 self._root, slot_atom)
5000 if not self._visible(pkg):
5002 self._cpv_pkg_map[pkg.cpv] = pkg
5005 self._cpv_sort_ascending(ret)
5006 self._match_cache[orig_atom] = ret
5009 def _visible(self, pkg):
5010 if pkg.installed and "selective" not in self._depgraph.myparams:
5012 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
5013 except (StopIteration, portage_exception.InvalidDependString):
5020 self._depgraph.pkgsettings[pkg.root], pkg):
5022 except portage_exception.InvalidDependString:
5026 def _dep_expand(self, atom):
5028 This is only needed for old installed packages that may
5029 contain atoms that are not fully qualified with a specific
5030 category. Emulate the cpv_expand() function that's used by
5031 dbapi.match() in cases like this. If there are multiple
5032 matches, it's often due to a new-style virtual that has
5033 been added, so try to filter those out to avoid raising
5036 root_config = self._depgraph.roots[self._root]
5038 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5039 if len(expanded_atoms) > 1:
5040 non_virtual_atoms = []
5041 for x in expanded_atoms:
5042 if not portage.dep_getkey(x).startswith("virtual/"):
5043 non_virtual_atoms.append(x)
5044 if len(non_virtual_atoms) == 1:
5045 expanded_atoms = non_virtual_atoms
5046 if len(expanded_atoms) > 1:
5047 # compatible with portage.cpv_expand()
5048 raise ValueError([portage.dep_getkey(x) \
5049 for x in expanded_atoms])
5051 atom = expanded_atoms[0]
5053 null_atom = insert_category_into_atom(atom, "null")
5054 null_cp = portage.dep_getkey(null_atom)
5055 cat, atom_pn = portage.catsplit(null_cp)
5056 virts_p = root_config.settings.get_virts_p().get(atom_pn)
5058 # Allow the resolver to choose which virtual.
5059 atom = insert_category_into_atom(atom, "virtual")
5061 atom = insert_category_into_atom(atom, "null")
5064 def aux_get(self, cpv, wants):
5065 metadata = self._cpv_pkg_map[cpv].metadata
5066 return [metadata.get(x, "") for x in wants]
5068 class _package_cache(dict):
5069 def __init__(self, depgraph):
5071 self._depgraph = depgraph
5073 def __setitem__(self, k, v):
5074 dict.__setitem__(self, k, v)
5075 root_config = self._depgraph.roots[v.root]
5076 if visible(root_config.settings, v):
5077 root_config.visible_pkgs.cpv_inject(v)
5079 class RepoDisplay(object):
5080 def __init__(self, roots):
5081 self._shown_repos = {}
5082 self._unknown_repo = False
5084 for root_config in roots.itervalues():
5085 portdir = root_config.settings.get("PORTDIR")
5087 repo_paths.add(portdir)
5088 overlays = root_config.settings.get("PORTDIR_OVERLAY")
5090 repo_paths.update(overlays.split())
5091 repo_paths = list(repo_paths)
5092 self._repo_paths = repo_paths
5093 self._repo_paths_real = [ os.path.realpath(repo_path) \
5094 for repo_path in repo_paths ]
5096 # pre-allocate index for PORTDIR so that it always has index 0.
5097 for root_config in roots.itervalues():
5098 portdb = root_config.trees["porttree"].dbapi
5099 portdir = portdb.porttree_root
5101 self.repoStr(portdir)
5103 def repoStr(self, repo_path_real):
5106 real_index = self._repo_paths_real.index(repo_path_real)
5107 if real_index == -1:
5109 self._unknown_repo = True
5111 shown_repos = self._shown_repos
5112 repo_paths = self._repo_paths
5113 repo_path = repo_paths[real_index]
5114 index = shown_repos.get(repo_path)
5116 index = len(shown_repos)
5117 shown_repos[repo_path] = index
5123 shown_repos = self._shown_repos
5124 unknown_repo = self._unknown_repo
5125 if shown_repos or self._unknown_repo:
5126 output.append("Portage tree and overlays:\n")
5127 show_repo_paths = list(shown_repos)
5128 for repo_path, repo_index in shown_repos.iteritems():
5129 show_repo_paths[repo_index] = repo_path
5131 for index, repo_path in enumerate(show_repo_paths):
5132 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
5134 output.append(" "+teal("[?]") + \
5135 " indicates that the source repository could not be determined\n")
5136 return "".join(output)
5138 class PackageCounters(object):
5149 self.restrict_fetch = 0
5150 self.restrict_fetch_satisfied = 0
5153 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
5156 myoutput.append("Total: %s package" % total_installs)
5157 if total_installs != 1:
5158 myoutput.append("s")
5159 if total_installs != 0:
5160 myoutput.append(" (")
5161 if self.upgrades > 0:
5162 details.append("%s upgrade" % self.upgrades)
5163 if self.upgrades > 1:
5165 if self.downgrades > 0:
5166 details.append("%s downgrade" % self.downgrades)
5167 if self.downgrades > 1:
5170 details.append("%s new" % self.new)
5171 if self.newslot > 0:
5172 details.append("%s in new slot" % self.newslot)
5173 if self.newslot > 1:
5176 details.append("%s reinstall" % self.reinst)
5180 details.append("%s uninstall" % self.uninst)
5184 details.append("%s block" % self.blocks)
5187 myoutput.append(", ".join(details))
5188 if total_installs != 0:
5189 myoutput.append(")")
5190 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
5191 if self.restrict_fetch:
5192 myoutput.append("\nFetch Restriction: %s package" % \
5193 self.restrict_fetch)
5194 if self.restrict_fetch > 1:
5195 myoutput.append("s")
5196 if self.restrict_fetch_satisfied < self.restrict_fetch:
5197 myoutput.append(bad(" (%s unsatisfied)") % \
5198 (self.restrict_fetch - self.restrict_fetch_satisfied))
5199 return "".join(myoutput)
5201 class MergeTask(object):
5203 def __init__(self, settings, trees, myopts):
5204 self.settings = settings
5205 self.target_root = settings["ROOT"]
5207 self.myopts = myopts
5209 if settings.get("PORTAGE_DEBUG", "") == "1":
5211 self.pkgsettings = {}
5212 self.pkgsettings[self.target_root] = portage.config(clone=settings)
5213 if self.target_root != "/":
5214 self.pkgsettings["/"] = \
5215 portage.config(clone=trees["/"]["vartree"].settings)
5217 self._spawned_pids = []
5218 self._uninstall_queue = []
5220 def merge(self, mylist, favorites, mtimedb):
5222 return self._merge(mylist, favorites, mtimedb)
5224 if self._spawned_pids:
5225 portage.portage_exec.spawned_pids.extend(self._spawned_pids)
5226 self._spawned_pids = []
5228 def _poll_child_processes(self):
5230 After each merge, collect status from child processes
5231 in order to clean up zombies (such as the parallel-fetch
5234 spawned_pids = self._spawned_pids
5235 if not spawned_pids:
5237 for pid in list(spawned_pids):
5239 if os.waitpid(pid, os.WNOHANG) == (0, 0):
5242 # This pid has been cleaned up elsewhere,
5243 # so remove it from our list.
5245 spawned_pids.remove(pid)
5247 def _dequeue_uninstall_tasks(self, mtimedb):
5248 if not self._uninstall_queue:
5250 for uninst_task in self._uninstall_queue:
5251 root_config = self.trees[uninst_task.root]["root_config"]
5252 unmerge(root_config.settings, self.myopts,
5253 root_config.trees["vartree"], "unmerge",
5254 [uninst_task.cpv], mtimedb["ldpath"], clean_world=0)
5255 del mtimedb["resume"]["mergelist"][0]
5257 del self._uninstall_queue[:]
5259 def _merge(self, mylist, favorites, mtimedb):
5261 buildpkgonly = "--buildpkgonly" in self.myopts
5262 fetchonly = "--fetchonly" in self.myopts or \
5263 "--fetch-all-uri" in self.myopts
5264 pretend = "--pretend" in self.myopts
5265 ldpath_mtimes = mtimedb["ldpath"]
5266 xterm_titles = "notitles" not in self.settings.features
5268 if "--resume" in self.myopts:
5270 print colorize("GOOD", "*** Resuming merge...")
5271 emergelog(xterm_titles, " *** Resuming merge...")
5273 # Verify all the manifests now so that the user is notified of failure
5274 # as soon as possible.
5275 if "--fetchonly" not in self.myopts and \
5276 "--fetch-all-uri" not in self.myopts and \
5277 "strict" in self.settings.features:
5278 shown_verifying_msg = False
5280 for myroot, pkgsettings in self.pkgsettings.iteritems():
5281 quiet_config = portage.config(clone=pkgsettings)
5282 quiet_config["PORTAGE_QUIET"] = "1"
5283 quiet_config.backup_changes("PORTAGE_QUIET")
5284 quiet_settings[myroot] = quiet_config
5287 if x[0] != "ebuild" or x[-1] == "nomerge":
5289 if not shown_verifying_msg:
5290 shown_verifying_msg = True
5291 print ">>> Verifying ebuild Manifests..."
5292 mytype, myroot, mycpv, mystatus = x
5293 portdb = self.trees[myroot]["porttree"].dbapi
5294 quiet_config = quiet_settings[myroot]
5295 quiet_config["O"] = os.path.dirname(portdb.findname(mycpv))
5296 if not portage.digestcheck([], quiet_config, strict=True):
5298 del x, mytype, myroot, mycpv, mystatus, quiet_config
5299 del shown_verifying_msg, quiet_settings
5301 root_config = self.trees[self.target_root]["root_config"]
5302 system_set = root_config.sets["system"]
5303 args_set = InternalPackageSet(favorites)
5304 world_set = root_config.sets["world"]
5306 mtimedb["resume"]["mergelist"] = [list(x) for x in mylist \
5307 if isinstance(x, Package)]
5310 mymergelist = mylist
5311 myfeat = self.settings.features[:]
5312 bad_resume_opts = set(["--ask", "--tree", "--changelog", "--skipfirst",
5314 if "parallel-fetch" in myfeat and \
5315 not ("--pretend" in self.myopts or \
5316 "--fetch-all-uri" in self.myopts or \
5317 "--fetchonly" in self.myopts):
5318 if "distlocks" not in myfeat:
5320 print red("!!!")+" parallel-fetching requires the distlocks feature enabled"
5321 print red("!!!")+" you have it disabled, thus parallel-fetching is being disabled"
5323 elif len(mymergelist) > 1:
5324 fetch_log = "/var/log/emerge-fetch.log"
5325 logfile = open(fetch_log, "w")
5326 fd_pipes = {1:logfile.fileno(), 2:logfile.fileno()}
5327 portage_util.apply_secpass_permissions(fetch_log,
5328 uid=portage.portage_uid, gid=portage.portage_gid,
5330 fetch_env = os.environ.copy()
5331 fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs"
5332 fetch_env["PORTAGE_NICENESS"] = "0"
5333 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
5334 fetch_args = [sys.argv[0], "--resume", "--fetchonly"]
5335 resume_opts = self.myopts.copy()
5336 # For automatic resume, we need to prevent
5337 # any of bad_resume_opts from leaking in
5338 # via EMERGE_DEFAULT_OPTS.
5339 resume_opts["--ignore-default-opts"] = True
5340 for myopt, myarg in resume_opts.iteritems():
5341 if myopt not in bad_resume_opts:
5343 fetch_args.append(myopt)
5345 fetch_args.append(myopt +"="+ myarg)
5346 self._spawned_pids.extend(
5347 portage.portage_exec.spawn(
5348 fetch_args, env=fetch_env,
5349 fd_pipes=fd_pipes, returnpid=True))
5350 logfile.close() # belongs to the spawned process
5351 del fetch_log, logfile, fd_pipes, fetch_env, fetch_args, \
5353 print ">>> starting parallel fetching pid %d" % \
5354 self._spawned_pids[-1]
5356 metadata_keys = [k for k in portage.auxdbkeys \
5357 if not k.startswith("UNUSED_")] + ["USE"]
5359 task_list = mymergelist
5360 # Filter mymergelist so that all the len(mymergelist) calls
5361 # below (for display) do not count Uninstall instances.
5362 mymergelist = [x for x in mymergelist if x[-1] == "merge"]
5365 if x[0] == "blocks":
5367 pkg_type, myroot, pkg_key, operation = x
5369 built = pkg_type != "ebuild"
5370 installed = pkg_type == "installed"
5371 portdb = self.trees[myroot]["porttree"].dbapi
5372 bindb = self.trees[myroot]["bintree"].dbapi
5373 vartree = self.trees[myroot]["vartree"]
5374 vardb = vartree.dbapi
5375 root_config = self.trees[myroot]["root_config"]
5376 pkgsettings = self.pkgsettings[myroot]
5377 if pkg_type == "blocks":
5379 elif pkg_type == "ebuild":
5382 if pkg_type == "binary":
5384 elif pkg_type == "installed":
5387 raise AssertionError("Package type: '%s'" % pkg_type)
5391 metadata = pkg.metadata
5393 if not (buildpkgonly or fetchonly or pretend):
5394 self._uninstall_queue.append(pkg)
5399 y = portdb.findname(pkg_key)
5400 if "--pretend" not in self.myopts:
5401 print "\n>>> Emerging (" + \
5402 colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \
5403 colorize("MERGE_LIST_PROGRESS", str(len(mymergelist))) + ") " + \
5404 colorize("GOOD", x[pkgindex]) + " to " + x[1]
5405 emergelog(xterm_titles, " >>> emerge ("+\
5406 str(mergecount)+" of "+str(len(mymergelist))+\
5407 ") "+x[pkgindex]+" to "+x[1])
5409 pkgsettings["EMERGE_FROM"] = x[0]
5410 pkgsettings.backup_changes("EMERGE_FROM")
5413 #buildsyspkg: Check if we need to _force_ binary package creation
5414 issyspkg = ("buildsyspkg" in myfeat) \
5415 and x[0] != "blocks" \
5416 and system_set.findAtomForPackage(pkg_key, metadata) \
5417 and "--buildpkg" not in self.myopts
5418 if x[0] in ["ebuild","blocks"]:
5419 if x[0] == "blocks" and "--fetchonly" not in self.myopts:
5420 raise Exception, "Merging a blocker"
5421 elif "--fetchonly" in self.myopts or \
5422 "--fetch-all-uri" in self.myopts:
5423 if "--fetch-all-uri" in self.myopts:
5424 retval = portage.doebuild(y, "fetch", myroot,
5425 pkgsettings, self.edebug,
5426 "--pretend" in self.myopts, fetchonly=1,
5427 fetchall=1, mydbapi=portdb, tree="porttree")
5429 retval = portage.doebuild(y, "fetch", myroot,
5430 pkgsettings, self.edebug,
5431 "--pretend" in self.myopts, fetchonly=1,
5432 mydbapi=portdb, tree="porttree")
5433 if (retval is None) or retval:
5435 print "!!! Fetch for",y,"failed, continuing..."
5437 failed_fetches.append(pkg_key)
5441 portage.doebuild_environment(y, "setup", myroot,
5442 pkgsettings, self.edebug, 1, portdb)
5443 catdir = os.path.dirname(pkgsettings["PORTAGE_BUILDDIR"])
5444 portage_util.ensure_dirs(os.path.dirname(catdir),
5445 uid=portage.portage_uid, gid=portage.portage_gid,
5447 builddir_lock = None
5450 catdir_lock = portage_locks.lockdir(catdir)
5451 portage_util.ensure_dirs(catdir,
5452 gid=portage.portage_gid,
5454 builddir_lock = portage_locks.lockdir(
5455 pkgsettings["PORTAGE_BUILDDIR"])
5457 portage_locks.unlockdir(catdir_lock)
5460 msg = " === (%s of %s) Cleaning (%s::%s)" % \
5461 (mergecount, len(mymergelist), pkg_key, y)
5462 short_msg = "emerge: (%s of %s) %s Clean" % \
5463 (mergecount, len(mymergelist), pkg_key)
5464 emergelog(xterm_titles, msg, short_msg=short_msg)
5465 retval = portage.doebuild(y, "clean", myroot,
5466 pkgsettings, self.edebug, cleanup=1,
5467 mydbapi=portdb, tree="porttree")
5468 if retval != os.EX_OK:
5470 if "--buildpkg" in self.myopts or issyspkg:
5472 print ">>> This is a system package, " + \
5473 "let's pack a rescue tarball."
5474 msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
5475 (mergecount, len(mymergelist), pkg_key, y)
5476 short_msg = "emerge: (%s of %s) %s Compile" % \
5477 (mergecount, len(mymergelist), pkg_key)
5478 emergelog(xterm_titles, msg, short_msg=short_msg)
5479 self.trees[myroot]["bintree"].prevent_collision(pkg_key)
5480 retval = portage.doebuild(y, "package", myroot,
5481 pkgsettings, self.edebug, mydbapi=portdb,
5483 if retval != os.EX_OK or \
5484 "--buildpkgonly" in self.myopts:
5485 portage.elog_process(pkg_key, pkgsettings)
5486 if retval != os.EX_OK:
5488 bintree = self.trees[myroot]["bintree"]
5489 if bintree.populated:
5490 bintree.inject(pkg_key)
5491 self._dequeue_uninstall_tasks(mtimedb)
5492 if "--buildpkgonly" not in self.myopts:
5493 msg = " === (%s of %s) Merging (%s::%s)" % \
5494 (mergecount, len(mymergelist), pkg_key, y)
5495 short_msg = "emerge: (%s of %s) %s Merge" % \
5496 (mergecount, len(mymergelist), pkg_key)
5497 emergelog(xterm_titles, msg, short_msg=short_msg)
5498 retval = portage.merge(pkgsettings["CATEGORY"],
5499 pkgsettings["PF"], pkgsettings["D"],
5500 os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
5501 "build-info"), myroot, pkgsettings,
5502 myebuild=pkgsettings["EBUILD"],
5503 mytree="porttree", mydbapi=portdb,
5504 vartree=vartree, prev_mtimes=ldpath_mtimes)
5505 if retval != os.EX_OK:
5507 elif "noclean" not in pkgsettings.features:
5508 portage.doebuild(y, "clean", myroot,
5509 pkgsettings, self.edebug, mydbapi=portdb,
5512 msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
5513 (mergecount, len(mymergelist), pkg_key, y)
5514 short_msg = "emerge: (%s of %s) %s Compile" % \
5515 (mergecount, len(mymergelist), pkg_key)
5516 emergelog(xterm_titles, msg, short_msg=short_msg)
5517 retval = portage.doebuild(y, "install", myroot,
5518 pkgsettings, self.edebug, vartree=vartree,
5519 mydbapi=portdb, tree="porttree",
5520 prev_mtimes=ldpath_mtimes)
5521 if retval != os.EX_OK:
5523 self._dequeue_uninstall_tasks(mtimedb)
5524 retval = portage.merge(pkgsettings["CATEGORY"],
5525 pkgsettings["PF"], pkgsettings["D"],
5526 os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
5527 "build-info"), myroot, pkgsettings,
5528 myebuild=pkgsettings["EBUILD"],
5529 mytree="porttree", mydbapi=portdb,
5530 vartree=vartree, prev_mtimes=ldpath_mtimes)
5531 if retval != os.EX_OK:
5535 portage_locks.unlockdir(builddir_lock)
5538 # Lock catdir for removal if empty.
5539 catdir_lock = portage_locks.lockdir(catdir)
5545 if e.errno not in (errno.ENOENT,
5546 errno.ENOTEMPTY, errno.EEXIST):
5549 portage_locks.unlockdir(catdir_lock)
5551 elif x[0]=="binary":
5552 self._dequeue_uninstall_tasks(mtimedb)
5554 mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key)
5555 if "--getbinpkg" in self.myopts:
5558 if "distlocks" in pkgsettings.features and \
5559 os.access(pkgsettings["PKGDIR"], os.W_OK):
5560 portage_util.ensure_dirs(os.path.dirname(mytbz2))
5561 tbz2_lock = portage_locks.lockfile(mytbz2,
5563 if self.trees[myroot]["bintree"].isremote(pkg_key):
5564 msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
5565 (mergecount, len(mymergelist), pkg_key, mytbz2)
5566 short_msg = "emerge: (%s of %s) %s Fetch" % \
5567 (mergecount, len(mymergelist), pkg_key)
5568 emergelog(xterm_titles, msg, short_msg=short_msg)
5570 self.trees[myroot]["bintree"].gettbz2(pkg_key)
5571 except portage_exception.FileNotFound:
5572 writemsg("!!! Fetching Binary failed " + \
5573 "for '%s'\n" % pkg_key, noiselevel=-1)
5576 failed_fetches.append(pkg_key)
5577 except portage_exception.DigestException, e:
5578 writemsg("\n!!! Digest verification failed:\n",
5580 writemsg("!!! %s\n" % e.value[0],
5582 writemsg("!!! Reason: %s\n" % e.value[1],
5584 writemsg("!!! Got: %s\n" % e.value[2],
5586 writemsg("!!! Expected: %s\n" % e.value[3],
5591 failed_fetches.append(pkg_key)
5594 portage_locks.unlockfile(tbz2_lock)
5596 if "--fetchonly" in self.myopts or \
5597 "--fetch-all-uri" in self.myopts:
5601 short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Merge Binary"
5602 emergelog(xterm_titles, " === ("+str(mergecount)+\
5603 " of "+str(len(mymergelist))+") Merging Binary ("+\
5604 x[pkgindex]+"::"+mytbz2+")", short_msg=short_msg)
5605 retval = portage.pkgmerge(mytbz2, x[1], pkgsettings,
5607 vartree=self.trees[myroot]["vartree"],
5608 prev_mtimes=ldpath_mtimes)
5609 if retval != os.EX_OK:
5611 #need to check for errors
5612 if "--buildpkgonly" not in self.myopts:
5613 self.trees[x[1]]["vartree"].inject(x[2])
5614 myfavkey = portage.cpv_getkey(x[2])
5615 if not fetchonly and not pretend and \
5616 args_set.findAtomForPackage(pkg_key, metadata):
5619 myfavkey = create_world_atom(pkg_key, metadata,
5620 args_set, root_config)
5622 world_set.add(myfavkey)
5623 print ">>> Recording",myfavkey,"in \"world\" favorites file..."
5624 emergelog(xterm_titles, " === ("+\
5625 str(mergecount)+" of "+\
5626 str(len(mymergelist))+\
5627 ") Updating world file ("+x[pkgindex]+")")
5631 if "--pretend" not in self.myopts and \
5632 "--fetchonly" not in self.myopts and \
5633 "--fetch-all-uri" not in self.myopts:
5635 # Figure out if we need a restart.
5636 if myroot == "/" and pkg.cp == "sys-apps/portage":
5637 if "livecvsportage" not in self.settings.features:
5638 if len(mymergelist) > mergecount:
5639 emergelog(xterm_titles,
5640 " ::: completed emerge ("+ \
5641 str(mergecount)+" of "+ \
5642 str(len(mymergelist))+") "+ \
5644 emergelog(xterm_titles, " *** RESTARTING " + \
5645 "emerge via exec() after change of " + \
5647 del mtimedb["resume"]["mergelist"][0]
5649 portage.run_exitfuncs()
5650 mynewargv=[sys.argv[0],"--resume"]
5651 resume_opts = self.myopts.copy()
5652 # For automatic resume, we need to prevent
5653 # any of bad_resume_opts from leaking in
5654 # via EMERGE_DEFAULT_OPTS.
5655 resume_opts["--ignore-default-opts"] = True
5656 for myopt, myarg in resume_opts.iteritems():
5657 if myopt not in bad_resume_opts:
5659 mynewargv.append(myopt)
5661 mynewargv.append(myopt +"="+ myarg)
5662 # priority only needs to be adjusted on the first run
5663 os.environ["PORTAGE_NICENESS"] = "0"
5664 os.execv(mynewargv[0], mynewargv)
5666 if "--pretend" not in self.myopts and \
5667 "--fetchonly" not in self.myopts and \
5668 "--fetch-all-uri" not in self.myopts:
5669 if "noclean" not in self.settings.features:
5670 short_msg = "emerge: (%s of %s) %s Clean Post" % \
5671 (mergecount, len(mymergelist), x[pkgindex])
5672 emergelog(xterm_titles, (" === (%s of %s) " + \
5673 "Post-Build Cleaning (%s::%s)") % \
5674 (mergecount, len(mymergelist), x[pkgindex], y),
5675 short_msg=short_msg)
5676 emergelog(xterm_titles, " ::: completed emerge ("+\
5677 str(mergecount)+" of "+str(len(mymergelist))+") "+\
5680 # Unsafe for parallel merges
5681 del mtimedb["resume"]["mergelist"][0]
5682 # Commit after each merge so that --resume may still work in
5683 # in the event that portage is not allowed to exit normally
5684 # due to power failure, SIGKILL, etc...
5687 self._poll_child_processes()
5689 if "--pretend" not in self.myopts:
5690 emergelog(xterm_titles, " *** Finished. Cleaning up...")
5692 # We're out of the loop... We're done. Delete the resume data.
5693 if mtimedb.has_key("resume"):
5694 del mtimedb["resume"]
5697 #by doing an exit this way, --fetchonly can continue to try to
5698 #fetch everything even if a particular download fails.
5699 if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts:
5701 sys.stderr.write("\n\n!!! Some fetch errors were " + \
5702 "encountered. Please see above for details.\n\n")
5703 for cpv in failed_fetches:
5704 sys.stderr.write(" ")
5705 sys.stderr.write(cpv)
5706 sys.stderr.write("\n")
5707 sys.stderr.write("\n")
5713 def unmerge(settings, myopts, vartree, unmerge_action, unmerge_files,
5714 ldpath_mtimes, autoclean=0, clean_world=1, ordered=0):
5715 candidate_catpkgs=[]
5717 xterm_titles = "notitles" not in settings.features
5719 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
5721 # At least the parent needs to exist for the lock file.
5722 portage_util.ensure_dirs(vdb_path)
5723 except portage_exception.PortageException:
5727 if os.access(vdb_path, os.W_OK):
5728 vdb_lock = portage_locks.lockdir(vdb_path)
5729 realsyslist = getlist(settings, "system")
5731 for x in realsyslist:
5732 mycp = portage.dep_getkey(x)
5733 if mycp in settings.getvirtuals():
5735 for provider in settings.getvirtuals()[mycp]:
5736 if vartree.dbapi.match(provider):
5737 providers.append(provider)
5738 if len(providers) == 1:
5739 syslist.extend(providers)
5741 syslist.append(mycp)
5743 mysettings = portage.config(clone=settings)
5745 if not unmerge_files or "world" in unmerge_files or \
5746 "system" in unmerge_files:
5747 if "unmerge"==unmerge_action:
5749 print bold("emerge unmerge") + " can only be used with " + \
5750 "specific package names, not with "+bold("world")+" or"
5751 print bold("system")+" targets."
5758 # process all arguments and add all
5759 # valid db entries to candidate_catpkgs
5761 if not unmerge_files or "world" in unmerge_files:
5762 candidate_catpkgs.extend(vartree.dbapi.cp_all())
5763 elif "system" in unmerge_files:
5764 candidate_catpkgs.extend(getlist(settings, "system"))
5766 #we've got command-line arguments
5767 if not unmerge_files:
5768 print "\nNo packages to unmerge have been provided.\n"
5770 for x in unmerge_files:
5771 arg_parts = x.split('/')
5772 if x[0] not in [".","/"] and \
5773 arg_parts[-1][-7:] != ".ebuild":
5774 #possible cat/pkg or dep; treat as such
5775 candidate_catpkgs.append(x)
5776 elif unmerge_action in ["prune","clean"]:
5777 print "\n!!! Prune and clean do not accept individual" + \
5778 " ebuilds as arguments;\n skipping.\n"
5781 # it appears that the user is specifying an installed
5782 # ebuild and we're in "unmerge" mode, so it's ok.
5783 if not os.path.exists(x):
5784 print "\n!!! The path '"+x+"' doesn't exist.\n"
5787 absx = os.path.abspath(x)
5788 sp_absx = absx.split("/")
5789 if sp_absx[-1][-7:] == ".ebuild":
5791 absx = "/".join(sp_absx)
5793 sp_absx_len = len(sp_absx)
5795 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
5796 vdb_len = len(vdb_path)
5798 sp_vdb = vdb_path.split("/")
5799 sp_vdb_len = len(sp_vdb)
5801 if not os.path.exists(absx+"/CONTENTS"):
5802 print "!!! Not a valid db dir: "+str(absx)
5805 if sp_absx_len <= sp_vdb_len:
5806 # The Path is shorter... so it can't be inside the vdb.
5809 print "\n!!!",x,"cannot be inside "+ \
5810 vdb_path+"; aborting.\n"
5813 for idx in range(0,sp_vdb_len):
5814 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
5817 print "\n!!!", x, "is not inside "+\
5818 vdb_path+"; aborting.\n"
5821 print "="+"/".join(sp_absx[sp_vdb_len:])
5822 candidate_catpkgs.append(
5823 "="+"/".join(sp_absx[sp_vdb_len:]))
5826 if (not "--quiet" in myopts):
5828 if settings["ROOT"] != "/":
5829 print darkgreen(newline+ \
5830 ">>> Using system located in ROOT tree "+settings["ROOT"])
5831 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
5832 not ("--quiet" in myopts):
5833 print darkgreen(newline+\
5834 ">>> These are the packages that would be unmerged:")
5836 # Preservation of order is required for --depclean and --prune so
5837 # that dependencies are respected. Use all_selected to eliminate
5838 # duplicate packages since the same package may be selected by
5841 all_selected = set()
5842 for x in candidate_catpkgs:
5843 # cycle through all our candidate deps and determine
5844 # what will and will not get unmerged
5846 mymatch=localtree.dep_match(x)
5849 except ValueError, errpkgs:
5850 print "\n\n!!! The short ebuild name \"" + \
5851 x + "\" is ambiguous. Please specify"
5852 print "!!! one of the following fully-qualified " + \
5853 "ebuild names instead:\n"
5854 for i in errpkgs[0]:
5855 print " " + green(i)
5859 if not mymatch and x[0] not in "<>=~":
5860 #add a "=" if missing
5861 mymatch=localtree.dep_match("="+x)
5863 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
5864 (x, unmerge_action), noiselevel=-1)
5867 {"protected": set(), "selected": set(), "omitted": set()})
5868 mykey = len(pkgmap) - 1
5869 if unmerge_action=="unmerge":
5871 if y not in all_selected:
5872 pkgmap[mykey]["selected"].add(y)
5874 elif unmerge_action == "prune":
5875 if len(mymatch) == 1:
5877 best_version = mymatch[0]
5878 best_slot = vartree.getslot(best_version)
5879 best_counter = vartree.dbapi.cpv_counter(best_version)
5880 for mypkg in mymatch[1:]:
5881 myslot = vartree.getslot(mypkg)
5882 mycounter = vartree.dbapi.cpv_counter(mypkg)
5883 if (myslot == best_slot and mycounter > best_counter) or \
5884 mypkg == portage.best([mypkg, best_version]):
5885 if myslot == best_slot:
5886 if mycounter < best_counter:
5887 # On slot collision, keep the one with the
5888 # highest counter since it is the most
5889 # recently installed.
5891 best_version = mypkg
5893 best_counter = mycounter
5894 pkgmap[mykey]["protected"].add(best_version)
5895 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
5896 if mypkg != best_version and mypkg not in all_selected)
5897 all_selected.update(pkgmap[mykey]["selected"])
5899 # unmerge_action == "clean"
5901 for mypkg in mymatch:
5902 if unmerge_action=="clean":
5903 myslot=localtree.getslot(mypkg)
5905 # since we're pruning, we don't care about slots
5906 # and put all the pkgs in together
5908 if not slotmap.has_key(myslot):
5910 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)]=mypkg
5911 for myslot in slotmap:
5912 counterkeys=slotmap[myslot].keys()
5917 pkgmap[mykey]["protected"].add(
5918 slotmap[myslot][counterkeys[-1]])
5920 #be pretty and get them in order of merge:
5921 for ckey in counterkeys:
5922 mypkg = slotmap[myslot][ckey]
5923 if mypkg not in all_selected:
5924 pkgmap[mykey]["selected"].add(mypkg)
5925 all_selected.add(mypkg)
5926 # ok, now the last-merged package
5927 # is protected, and the rest are selected
5928 numselected = len(all_selected)
5929 if global_unmerge and not numselected:
5930 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
5934 portage.writemsg_stdout(
5935 "\n>>> No packages selected for removal by " + \
5936 unmerge_action + "\n")
5940 portage_locks.unlockdir(vdb_lock)
5942 # Unmerge order only matters in some cases
5946 selected = d["selected"]
5949 cp = portage.cpv_getkey(iter(selected).next())
5950 cp_dict = unordered.get(cp)
5953 unordered[cp] = cp_dict
5956 for k, v in d.iteritems():
5957 cp_dict[k].update(v)
5958 pkgmap = [unordered[cp] for cp in sorted(unordered)]
5960 for x in xrange(len(pkgmap)):
5961 selected = pkgmap[x]["selected"]
5964 for mytype, mylist in pkgmap[x].iteritems():
5965 if mytype == "selected":
5967 mylist.difference_update(all_selected)
5968 cp = portage.cpv_getkey(iter(selected).next())
5969 for y in localtree.dep_match(cp):
5970 if y not in pkgmap[x]["omitted"] and \
5971 y not in pkgmap[x]["selected"] and \
5972 y not in pkgmap[x]["protected"] and \
5973 y not in all_selected:
5974 pkgmap[x]["omitted"].add(y)
5975 if global_unmerge and not pkgmap[x]["selected"]:
5976 #avoid cluttering the preview printout with stuff that isn't getting unmerged
5978 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
5979 print colorize("BAD","\a\n\n!!! '%s' is part of your system profile." % cp)
5980 print colorize("WARN","\a!!! Unmerging it may be damaging to your system.\n")
5981 if "--pretend" not in myopts and "--ask" not in myopts:
5982 countdown(int(settings["EMERGE_WARNING_DELAY"]),
5983 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
5984 if "--quiet" not in myopts:
5985 print "\n "+bold(cp)
5987 print bold(cp)+": ",
5988 for mytype in ["selected","protected","omitted"]:
5989 if "--quiet" not in myopts:
5990 portage.writemsg_stdout((mytype + ": ").rjust(14), noiselevel=-1)
5991 if pkgmap[x][mytype]:
5992 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] \
5993 for mypkg in pkgmap[x][mytype]]
5994 sorted_pkgs.sort(portage.pkgcmp)
5995 for pn, ver, rev in sorted_pkgs:
5999 myversion = ver + "-" + rev
6000 if mytype=="selected":
6001 portage.writemsg_stdout(
6002 colorize("UNMERGE_WARN", myversion + " "), noiselevel=-1)
6004 portage.writemsg_stdout(
6005 colorize("GOOD", myversion + " "), noiselevel=-1)
6007 portage.writemsg_stdout("none ", noiselevel=-1)
6008 if "--quiet" not in myopts:
6009 portage.writemsg_stdout("\n", noiselevel=-1)
6010 if "--quiet" in myopts:
6011 portage.writemsg_stdout("\n", noiselevel=-1)
6013 portage.writemsg_stdout("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
6014 " packages are slated for removal.\n")
6015 portage.writemsg_stdout(">>> " + colorize("GOOD", "'Protected'") + \
6016 " and " + colorize("GOOD", "'omitted'") + \
6017 " packages will not be removed.\n\n")
6019 if "--pretend" in myopts:
6020 #we're done... return
6022 if "--ask" in myopts:
6023 if userquery("Would you like to unmerge these packages?")=="No":
6024 # enter pretend mode for correct formatting of results
6025 myopts["--pretend"] = True
6030 #the real unmerging begins, after a short delay....
6032 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
6034 for x in xrange(len(pkgmap)):
6035 for y in pkgmap[x]["selected"]:
6036 print ">>> Unmerging "+y+"..."
6037 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
6038 mysplit=y.split("/")
6040 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
6041 mysettings, unmerge_action not in ["clean","prune"],
6042 vartree=vartree, ldpath_mtimes=ldpath_mtimes)
6043 if retval != os.EX_OK:
6044 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
6048 world_clean_package(vartree.dbapi, y)
6049 emergelog(xterm_titles, " >>> unmerge success: "+y)
6052 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
6054 if os.path.exists("/usr/bin/install-info"):
6059 inforoot=normpath(root+z)
6060 if os.path.isdir(inforoot):
6061 infomtime = long(os.stat(inforoot).st_mtime)
6062 if inforoot not in prev_mtimes or \
6063 prev_mtimes[inforoot] != infomtime:
6064 regen_infodirs.append(inforoot)
6066 if not regen_infodirs:
6067 portage.writemsg_stdout("\n "+green("*")+" GNU info directory index is up-to-date.\n")
6069 portage.writemsg_stdout("\n "+green("*")+" Regenerating GNU info directory index...\n")
6071 dir_extensions = ("", ".gz", ".bz2")
6074 for inforoot in regen_infodirs:
6078 if not os.path.isdir(inforoot):
6081 file_list = os.listdir(inforoot)
6083 dir_file = os.path.join(inforoot, "dir")
6084 moved_old_dir = False
6087 if x.startswith(".") or \
6088 os.path.isdir(os.path.join(inforoot, x)):
6090 if x.startswith("dir"):
6092 for ext in dir_extensions:
6093 if x == "dir" + ext or \
6094 x == "dir" + ext + ".old":
6099 if processed_count == 0:
6100 for ext in dir_extensions:
6102 os.rename(dir_file + ext, dir_file + ext + ".old")
6103 moved_old_dir = True
6104 except EnvironmentError, e:
6105 if e.errno != errno.ENOENT:
6108 processed_count += 1
6109 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
6110 existsstr="already exists, for file `"
6112 if re.search(existsstr,myso):
6113 # Already exists... Don't increment the count for this.
6115 elif myso[:44]=="install-info: warning: no info dir entry in ":
6116 # This info file doesn't contain a DIR-header: install-info produces this
6117 # (harmless) warning (the --quiet switch doesn't seem to work).
6118 # Don't increment the count for this.
6122 errmsg += myso + "\n"
6125 if moved_old_dir and not os.path.exists(dir_file):
6126 # We didn't generate a new dir file, so put the old file
6127 # back where it was originally found.
6128 for ext in dir_extensions:
6130 os.rename(dir_file + ext + ".old", dir_file + ext)
6131 except EnvironmentError, e:
6132 if e.errno != errno.ENOENT:
6136 # Clean dir.old cruft so that they don't prevent
6137 # unmerge of otherwise empty directories.
6138 for ext in dir_extensions:
6140 os.unlink(dir_file + ext + ".old")
6141 except EnvironmentError, e:
6142 if e.errno != errno.ENOENT:
6146 #update mtime so we can potentially avoid regenerating.
6147 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
6150 print " "+yellow("*")+" Processed",icount,"info files;",badcount,"errors."
6154 print " "+green("*")+" Processed",icount,"info files."
6157 def post_emerge(settings, mtimedb, retval):
6158 target_root = settings["ROOT"]
6159 info_mtimes = mtimedb["info"]
6161 # Load the most current variables from ${ROOT}/etc/profile.env
6163 settings.regenerate()
6166 config_protect = settings.get("CONFIG_PROTECT","").split()
6167 infodirs = settings.get("INFOPATH","").split(":") + \
6168 settings.get("INFODIR","").split(":")
6172 if retval == os.EX_OK:
6173 exit_msg = " *** exiting successfully."
6175 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
6176 emergelog("notitles" not in settings.features, exit_msg)
6178 # Dump the mod_echo output now so that our other notifications are shown
6181 from elog_modules import mod_echo
6183 pass # happens during downgrade to a version without the module
6187 vdb_path = os.path.join(target_root, portage.VDB_PATH)
6188 portage_util.ensure_dirs(vdb_path)
6189 vdb_lock = portage_locks.lockdir(vdb_path)
6191 if "noinfo" not in settings.features:
6192 chk_updated_info_files(target_root, infodirs, info_mtimes, retval)
6195 portage_locks.unlockdir(vdb_lock)
6197 chk_updated_cfg_files(target_root, config_protect)
6202 def chk_updated_cfg_files(target_root, config_protect):
6204 #number of directories with some protect files in them
6206 for x in config_protect:
6207 x = os.path.join(target_root, x.lstrip(os.path.sep))
6208 if not os.access(x, os.W_OK):
6209 # Avoid Permission denied errors generated
6213 mymode = os.lstat(x).st_mode
6216 if stat.S_ISLNK(mymode):
6217 # We want to treat it like a directory if it
6218 # is a symlink to an existing directory.
6220 real_mode = os.stat(x).st_mode
6221 if stat.S_ISDIR(real_mode):
6225 if stat.S_ISDIR(mymode):
6226 mycommand = "find '%s' -iname '._cfg????_*'" % x
6228 mycommand = "find '%s' -maxdepth 1 -iname '._cfg????_%s'" % \
6229 os.path.split(x.rstrip(os.path.sep))
6230 mycommand += " ! -iname '.*~' ! -iname '.*.bak' -print0"
6231 a = commands.getstatusoutput(mycommand)
6233 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
6235 # Show the error message alone, sending stdout to /dev/null.
6236 os.system(mycommand + " 1>/dev/null")
6238 files = a[1].split('\0')
6239 # split always produces an empty string as the last element
6240 if files and not files[-1]:
6244 print "\n"+colorize("WARN", " * IMPORTANT:"),
6245 if stat.S_ISDIR(mymode):
6246 print "%d config files in '%s' need updating." % \
6249 print "config file '%s' needs updating." % x
6252 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
6253 " section of the " + bold("emerge")
6254 print " "+yellow("*")+" man page to learn how to update config files."
6256 def insert_category_into_atom(atom, category):
6257 alphanum = re.search(r'\w', atom)
6259 ret = atom[:alphanum.start()] + "%s/" % category + \
6260 atom[alphanum.start():]
6265 def is_valid_package_atom(x):
6267 alphanum = re.search(r'\w', x)
6269 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
6270 return portage.isvalidatom(x)
6272 def show_blocker_docs_link():
6274 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
6275 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
6277 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
6280 def show_mask_docs():
6281 print "For more information, see the MASKED PACKAGES section in the emerge"
6282 print "man page or refer to the Gentoo Handbook."
6284 def action_sync(settings, trees, mtimedb, myopts, myaction):
6285 xterm_titles = "notitles" not in settings.features
6286 emergelog(xterm_titles, " === sync")
6287 myportdir = settings.get("PORTDIR", None)
6289 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
6291 if myportdir[-1]=="/":
6292 myportdir=myportdir[:-1]
6293 if not os.path.exists(myportdir):
6294 print ">>>",myportdir,"not found, creating it."
6295 os.makedirs(myportdir,0755)
6296 syncuri=settings["SYNC"].rstrip()
6298 updatecache_flg = False
6299 if myaction == "metadata":
6300 print "skipping sync"
6301 updatecache_flg = True
6302 elif syncuri[:8]=="rsync://":
6303 if not os.path.exists("/usr/bin/rsync"):
6304 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
6305 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
6310 import shlex, StringIO
6311 if settings["PORTAGE_RSYNC_OPTS"] == "":
6312 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
6314 "--recursive", # Recurse directories
6315 "--links", # Consider symlinks
6316 "--safe-links", # Ignore links outside of tree
6317 "--perms", # Preserve permissions
6318 "--times", # Preserive mod times
6319 "--compress", # Compress the data transmitted
6320 "--force", # Force deletion on non-empty dirs
6321 "--whole-file", # Don't do block transfers, only entire files
6322 "--delete", # Delete files that aren't in the master tree
6323 "--stats", # Show final statistics about what was transfered
6324 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
6325 "--exclude=/distfiles", # Exclude distfiles from consideration
6326 "--exclude=/local", # Exclude local from consideration
6327 "--exclude=/packages", # Exclude packages from consideration
6331 # The below validation is not needed when using the above hardcoded
6334 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
6335 lexer = shlex.shlex(StringIO.StringIO(
6336 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
6337 lexer.whitespace_split = True
6338 rsync_opts.extend(lexer)
6341 for opt in ("--recursive", "--times"):
6342 if opt not in rsync_opts:
6343 portage.writemsg(yellow("WARNING:") + " adding required option " + \
6344 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
6345 rsync_opts.append(opt)
6347 for exclude in ("distfiles", "local", "packages"):
6348 opt = "--exclude=/%s" % exclude
6349 if opt not in rsync_opts:
6350 portage.writemsg(yellow("WARNING:") + \
6351 " adding required option %s not included in " % opt + \
6352 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
6353 rsync_opts.append(opt)
6355 if settings["RSYNC_TIMEOUT"] != "":
6356 portage.writemsg("WARNING: usage of RSYNC_TIMEOUT is deprecated, " + \
6357 "use PORTAGE_RSYNC_EXTRA_OPTS instead\n")
6359 mytimeout = int(settings["RSYNC_TIMEOUT"])
6360 rsync_opts.append("--timeout=%d" % mytimeout)
6361 except ValueError, e:
6362 portage.writemsg("!!! %s\n" % str(e))
6364 # TODO: determine options required for official servers
6365 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
6367 def rsync_opt_startswith(opt_prefix):
6368 for x in rsync_opts:
6369 if x.startswith(opt_prefix):
6373 if not rsync_opt_startswith("--timeout="):
6374 rsync_opts.append("--timeout=%d" % mytimeout)
6376 for opt in ("--compress", "--whole-file"):
6377 if opt not in rsync_opts:
6378 portage.writemsg(yellow("WARNING:") + " adding required option " + \
6379 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
6380 rsync_opts.append(opt)
6382 if "--quiet" in myopts:
6383 rsync_opts.append("--quiet") # Shut up a lot
6385 rsync_opts.append("--verbose") # Print filelist
6387 if "--verbose" in myopts:
6388 rsync_opts.append("--progress") # Progress meter for each file
6390 if "--debug" in myopts:
6391 rsync_opts.append("--checksum") # Force checksum on all files
6393 if settings["RSYNC_EXCLUDEFROM"] != "":
6394 portage.writemsg(yellow("WARNING:") + \
6395 " usage of RSYNC_EXCLUDEFROM is deprecated, use " + \
6396 "PORTAGE_RSYNC_EXTRA_OPTS instead\n")
6397 if os.path.exists(settings["RSYNC_EXCLUDEFROM"]):
6398 rsync_opts.append("--exclude-from=%s" % \
6399 settings["RSYNC_EXCLUDEFROM"])
6401 portage.writemsg("!!! RSYNC_EXCLUDEFROM specified," + \
6402 " but file does not exist.\n")
6404 if settings["RSYNC_RATELIMIT"] != "":
6405 portage.writemsg(yellow("WARNING:") + \
6406 " usage of RSYNC_RATELIMIT is deprecated, use " + \
6407 "PORTAGE_RSYNC_EXTRA_OPTS instead")
6408 rsync_opts.append("--bwlimit=%s" % \
6409 settings["RSYNC_RATELIMIT"])
6411 # Real local timestamp file.
6412 servertimestampfile = os.path.join(
6413 myportdir, "metadata", "timestamp.chk")
6415 content = portage_util.grabfile(servertimestampfile)
6419 mytimestamp = time.mktime(time.strptime(content[0],
6420 "%a, %d %b %Y %H:%M:%S +0000"))
6421 except (OverflowError, ValueError):
6426 rsync_initial_timeout = \
6427 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
6429 rsync_initial_timeout = 15
6432 if settings.has_key("RSYNC_RETRIES"):
6433 print yellow("WARNING:")+" usage of RSYNC_RETRIES is deprecated, use PORTAGE_RSYNC_RETRIES instead"
6434 maxretries=int(settings["RSYNC_RETRIES"])
6436 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
6437 except SystemExit, e:
6438 raise # Needed else can't exit
6440 maxretries=3 #default number of retries
6443 user_name, hostname, port = re.split(
6444 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
6447 if user_name is None:
6449 updatecache_flg=True
6450 all_rsync_opts = set(rsync_opts)
6451 lexer = shlex.shlex(StringIO.StringIO(
6452 settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
6453 lexer.whitespace_split = True
6454 extra_rsync_opts = list(lexer)
6456 all_rsync_opts.update(extra_rsync_opts)
6457 family = socket.AF_INET
6458 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
6459 family = socket.AF_INET
6460 elif socket.has_ipv6 and \
6461 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
6462 family = socket.AF_INET6
6464 SERVER_OUT_OF_DATE = -1
6465 EXCEEDED_MAX_RETRIES = -2
6471 for addrinfo in socket.getaddrinfo(
6472 hostname, None, family, socket.SOCK_STREAM):
6473 if addrinfo[0] == socket.AF_INET6:
6474 # IPv6 addresses need to be enclosed in square brackets
6475 ips.append("[%s]" % addrinfo[4][0])
6477 ips.append(addrinfo[4][0])
6478 from random import shuffle
6480 except SystemExit, e:
6481 raise # Needed else can't exit
6482 except Exception, e:
6483 print "Notice:",str(e)
6488 dosyncuri = syncuri.replace(
6489 "//" + user_name + hostname + port + "/",
6490 "//" + user_name + ips[0] + port + "/", 1)
6491 except SystemExit, e:
6492 raise # Needed else can't exit
6493 except Exception, e:
6494 print "Notice:",str(e)
6498 if "--ask" in myopts:
6499 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
6504 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
6505 if "--quiet" not in myopts:
6506 print ">>> Starting rsync with "+dosyncuri+"..."
6508 emergelog(xterm_titles,
6509 ">>> Starting retry %d of %d with %s" % \
6510 (retries,maxretries,dosyncuri))
6511 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
6513 if mytimestamp != 0 and "--quiet" not in myopts:
6514 print ">>> Checking server timestamp ..."
6516 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
6518 if "--debug" in myopts:
6523 # Even if there's no timestamp available locally, fetch the
6524 # timestamp anyway as an initial probe to verify that the server is
6525 # responsive. This protects us from hanging indefinitely on a
6526 # connection attempt to an unresponsive server which rsync's
6527 # --timeout option does not prevent.
6529 # Temporary file for remote server timestamp comparison.
6530 from tempfile import mkstemp
6531 fd, tmpservertimestampfile = mkstemp()
6533 mycommand = rsynccommand[:]
6534 mycommand.append(dosyncuri.rstrip("/") + \
6535 "/metadata/timestamp.chk")
6536 mycommand.append(tmpservertimestampfile)
6541 def timeout_handler(signum, frame):
6542 raise portage_exception.PortageException("timed out")
6543 signal.signal(signal.SIGALRM, timeout_handler)
6544 # Timeout here in case the server is unresponsive. The
6545 # --timeout rsync option doesn't apply to the initial
6546 # connection attempt.
6547 if rsync_initial_timeout:
6548 signal.alarm(rsync_initial_timeout)
6550 mypids.extend(portage_exec.spawn(
6551 mycommand, env=settings.environ(), returnpid=True))
6552 exitcode = os.waitpid(mypids[0], 0)[1]
6553 content = portage.grabfile(tmpservertimestampfile)
6555 if rsync_initial_timeout:
6558 os.unlink(tmpservertimestampfile)
6561 except portage_exception.PortageException, e:
6565 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
6566 os.kill(mypids[0], signal.SIGTERM)
6567 os.waitpid(mypids[0], 0)
6568 # This is the same code rsync uses for timeout.
6571 if exitcode != os.EX_OK:
6573 exitcode = (exitcode & 0xff) << 8
6575 exitcode = exitcode >> 8
6577 portage_exec.spawned_pids.remove(mypids[0])
6580 servertimestamp = time.mktime(time.strptime(
6581 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
6582 except (OverflowError, ValueError):
6584 del mycommand, mypids, content
6585 if exitcode == os.EX_OK:
6586 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
6587 emergelog(xterm_titles,
6588 ">>> Cancelling sync -- Already current.")
6591 print ">>> Timestamps on the server and in the local repository are the same."
6592 print ">>> Cancelling all further sync action. You are already up to date."
6594 print ">>> In order to force sync, remove '%s'." % servertimestampfile
6598 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
6599 emergelog(xterm_titles,
6600 ">>> Server out of date: %s" % dosyncuri)
6603 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
6605 print ">>> In order to force sync, remove '%s'." % servertimestampfile
6608 exitcode = SERVER_OUT_OF_DATE
6609 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
6611 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
6612 exitcode = portage.portage_exec.spawn(mycommand,
6613 env=settings.environ())
6614 if exitcode in [0,1,3,4,11,14,20,21]:
6616 elif exitcode in [1,3,4,11,14,20,21]:
6619 # Code 2 indicates protocol incompatibility, which is expected
6620 # for servers with protocol < 29 that don't support
6621 # --prune-empty-directories. Retry for a server that supports
6622 # at least rsync protocol version 29 (>=rsync-2.6.4).
6627 if retries<=maxretries:
6628 print ">>> Retrying..."
6633 updatecache_flg=False
6634 exitcode = EXCEEDED_MAX_RETRIES
6638 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
6639 elif exitcode == SERVER_OUT_OF_DATE:
6641 elif exitcode == EXCEEDED_MAX_RETRIES:
6643 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
6648 print darkred("!!!")+green(" Rsync has reported that there is a syntax error. Please ensure")
6649 print darkred("!!!")+green(" that your SYNC statement is proper.")
6650 print darkred("!!!")+green(" SYNC="+settings["SYNC"])
6652 print darkred("!!!")+green(" Rsync has reported that there is a File IO error. Normally")
6653 print darkred("!!!")+green(" this means your disk is full, but can be caused by corruption")
6654 print darkred("!!!")+green(" on the filesystem that contains PORTDIR. Please investigate")
6655 print darkred("!!!")+green(" and try again after the problem has been fixed.")
6656 print darkred("!!!")+green(" PORTDIR="+settings["PORTDIR"])
6658 print darkred("!!!")+green(" Rsync was killed before it finished.")
6660 print darkred("!!!")+green(" Rsync has not successfully finished. It is recommended that you keep")
6661 print darkred("!!!")+green(" trying or that you use the 'emerge-webrsync' option if you are unable")
6662 print darkred("!!!")+green(" to use rsync due to firewall or other restrictions. This should be a")
6663 print darkred("!!!")+green(" temporary problem unless complications exist with your network")
6664 print darkred("!!!")+green(" (and possibly your system's filesystem) configuration.")
6667 elif syncuri[:6]=="cvs://":
6668 if not os.path.exists("/usr/bin/cvs"):
6669 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
6670 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
6673 cvsdir=os.path.dirname(myportdir)
6674 if not os.path.exists(myportdir+"/CVS"):
6676 print ">>> Starting initial cvs checkout with "+syncuri+"..."
6677 if os.path.exists(cvsdir+"/gentoo-x86"):
6678 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
6683 if e.errno != errno.ENOENT:
6685 "!!! existing '%s' directory; exiting.\n" % myportdir)
6688 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
6689 print "!!! cvs checkout error; exiting."
6691 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
6694 print ">>> Starting cvs update with "+syncuri+"..."
6695 retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
6696 myportdir, settings, free=1)
6697 if retval != os.EX_OK:
6701 print "!!! rsync setting: ",syncuri,"not recognized; exiting."
6704 if updatecache_flg and \
6705 myaction != "metadata" and \
6706 "metadata-transfer" not in settings.features:
6707 updatecache_flg = False
6709 # Reload the whole config from scratch.
6710 settings, trees, mtimedb = load_emerge_config(trees=trees)
6711 portdb = trees[settings["ROOT"]]["porttree"].dbapi
6713 if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
6714 action_metadata(settings, portdb, myopts)
6716 if portage._global_updates(trees, mtimedb["updates"]):
6718 # Reload the whole config from scratch.
6719 settings, trees, mtimedb = load_emerge_config(trees=trees)
6720 portdb = trees[settings["ROOT"]]["porttree"].dbapi
6722 mybestpv = portdb.xmatch("bestmatch-visible", "sys-apps/portage")
6723 mypvs = portage.best(
6724 trees[settings["ROOT"]]["vartree"].dbapi.match("sys-apps/portage"))
6726 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
6728 if myaction != "metadata":
6729 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
6730 retval = portage.portage_exec.spawn(
6731 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
6732 dosyncuri], env=settings.environ())
6733 if retval != os.EX_OK:
6734 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
6736 if(mybestpv != mypvs) and not "--quiet" in myopts:
6738 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
6739 print red(" * ")+"that you update portage now, before any other packages are updated."
6741 print red(" * ")+"To update portage, run 'emerge portage' now."
6744 def action_metadata(settings, portdb, myopts):
6745 portage.writemsg_stdout("\n>>> Updating Portage cache: ")
6746 old_umask = os.umask(0002)
6747 cachedir = os.path.normpath(settings.depcachedir)
6748 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
6749 "/lib", "/opt", "/proc", "/root", "/sbin",
6750 "/sys", "/tmp", "/usr", "/var"]:
6751 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
6752 "ROOT DIRECTORY ON YOUR SYSTEM."
6753 print >> sys.stderr, \
6754 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
6756 if not os.path.exists(cachedir):
6759 ec = portage.eclass_cache.cache(portdb.porttree_root)
6760 myportdir = os.path.realpath(settings["PORTDIR"])
6761 cm = settings.load_best_module("portdbapi.metadbmodule")(
6762 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
6766 class percentage_noise_maker(cache.util.quiet_mirroring):
6767 def __init__(self, dbapi):
6769 self.cp_all = dbapi.cp_all()
6770 l = len(self.cp_all)
6771 self.call_update_min = 100000000
6772 self.min_cp_all = l/100.0
6777 for x in self.cp_all:
6779 if self.count > self.min_cp_all:
6780 self.call_update_min = 0
6782 for y in self.dbapi.cp_list(x):
6784 self.call_update_mine = 0
6786 def update(self, *arg):
6787 try: self.pstr = int(self.pstr) + 1
6788 except ValueError: self.pstr = 1
6789 sys.stdout.write("%s%i%%" % \
6790 ("\b" * (len(str(self.pstr))+1), self.pstr))
6792 self.call_update_min = 10000000
6794 def finish(self, *arg):
6795 sys.stdout.write("\b\b\b\b100%\n")
6798 if "--quiet" in myopts:
6799 def quicky_cpv_generator(cp_all_list):
6800 for x in cp_all_list:
6801 for y in portdb.cp_list(x):
6803 source = quicky_cpv_generator(portdb.cp_all())
6804 noise_maker = cache.util.quiet_mirroring()
6806 noise_maker = source = percentage_noise_maker(portdb)
6807 cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
6808 eclass_cache=ec, verbose_instance=noise_maker)
6813 def action_regen(settings, portdb):
6814 xterm_titles = "notitles" not in settings.features
6815 emergelog(xterm_titles, " === regen")
6816 #regenerate cache entries
6817 portage.writemsg_stdout("Regenerating cache entries...\n")
6819 os.close(sys.stdin.fileno())
6820 except SystemExit, e:
6821 raise # Needed else can't exit
6825 mynodes = portdb.cp_all()
6826 from cache.cache_errors import CacheError
6828 for mytree in portdb.porttrees:
6830 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
6831 except CacheError, e:
6832 portage.writemsg("Error listing cache entries for " + \
6833 "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
6838 mymatches = portdb.cp_list(x)
6839 portage.writemsg_stdout("Processing %s\n" % x)
6842 foo = portdb.aux_get(y,["DEPEND"])
6843 except (KeyError, portage_exception.PortageException), e:
6845 "Error processing %(cpv)s, continuing... (%(e)s)\n" % \
6846 {"cpv":y,"e":str(e)}, noiselevel=-1)
6848 for mytree in portdb.porttrees:
6849 if portdb.findname2(y, mytree=mytree)[0]:
6850 dead_nodes[mytree].discard(y)
6852 for mytree, nodes in dead_nodes.iteritems():
6853 auxdb = portdb.auxdb[mytree]
6857 except (KeyError, CacheError):
6859 portage.writemsg_stdout("done!\n")
6861 def action_config(settings, trees, myopts, myfiles):
6862 if len(myfiles) != 1 or "system" in myfiles or "world" in myfiles:
6863 print red("!!! config can only take a single package atom at this time\n")
6865 if not is_valid_package_atom(myfiles[0]):
6866 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
6868 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
6869 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
6873 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
6874 except ValueError, e:
6875 # Multiple matches thrown from cpv_expand
6878 print "No packages found.\n"
6881 if "--ask" in myopts:
6883 print "Please select a package to configure:"
6887 options.append(str(idx))
6888 print options[-1]+") "+pkg
6891 idx = userquery("Selection?", options)
6894 pkg = pkgs[int(idx)-1]
6896 print "The following packages available:"
6899 print "\nPlease use a specific atom or the --ask option."
6905 if "--ask" in myopts:
6906 if userquery("Ready to configure "+pkg+"?") == "No":
6909 print "Configuring pkg..."
6911 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
6912 mysettings = portage.config(clone=settings)
6913 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
6914 debug = mysettings.get("PORTAGE_DEBUG") == "1"
6915 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
6917 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
6918 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
6919 if retval == os.EX_OK:
6920 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
6921 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
6924 def action_info(settings, trees, myopts, myfiles):
6925 unameout=commands.getstatusoutput("uname -mrp")[1]
6926 print getportageversion(settings["PORTDIR"], settings["ROOT"],
6927 settings.profile_path, settings["CHOST"],
6928 trees[settings["ROOT"]]["vartree"].dbapi)
6930 header_title = "System Settings"
6932 print header_width * "="
6933 print header_title.rjust(int(header_width/2 + len(header_title)/2))
6934 print header_width * "="
6935 print "System uname: "+unameout
6936 lastSync = portage.grabfile(os.path.join(
6937 settings["PORTDIR"], "metadata", "timestamp.chk"))
6938 print "Timestamp of tree:",
6944 output=commands.getstatusoutput("distcc --version")
6946 print str(output[1].split("\n",1)[0]),
6947 if "distcc" in settings.features:
6952 output=commands.getstatusoutput("ccache -V")
6954 print str(output[1].split("\n",1)[0]),
6955 if "ccache" in settings.features:
6960 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
6961 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
6962 myvars += portage_util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
6963 myvars = portage_util.unique_array(myvars)
6967 if portage.isvalidatom(x):
6968 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
6969 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
6970 pkg_matches.sort(portage.pkgcmp)
6972 for pn, ver, rev in pkg_matches:
6974 pkgs.append(ver + "-" + rev)
6978 pkgs = ", ".join(pkgs)
6979 print "%-20s %s" % (x+":", pkgs)
6981 print "%-20s %s" % (x+":", "[NOT VALID]")
6983 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
6985 if "--verbose" in myopts:
6986 myvars=settings.keys()
6988 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
6989 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
6990 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
6991 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
6993 myvars.extend(portage_util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
6995 myvars = portage_util.unique_array(myvars)
7001 print '%s="%s"' % (x, settings[x])
7003 use = set(settings["USE"].split())
7004 use_expand = settings["USE_EXPAND"].split()
7006 for varname in use_expand:
7007 flag_prefix = varname.lower() + "_"
7009 if f.startswith(flag_prefix):
7013 print 'USE="%s"' % " ".join(use),
7014 for varname in use_expand:
7015 myval = settings.get(varname)
7017 print '%s="%s"' % (varname, myval),
7020 unset_vars.append(x)
7022 print "Unset: "+", ".join(unset_vars)
7025 if "--debug" in myopts:
7026 for x in dir(portage):
7027 module = getattr(portage, x)
7028 if "cvs_id_string" in dir(module):
7029 print "%s: %s" % (str(x), str(module.cvs_id_string))
7031 # See if we can find any packages installed matching the strings
7032 # passed on the command line
7034 vardb = trees[settings["ROOT"]]["vartree"].dbapi
7035 portdb = trees[settings["ROOT"]]["porttree"].dbapi
7037 mypkgs.extend(vardb.match(x))
7039 # If some packages were found...
7041 # Get our global settings (we only print stuff if it varies from
7042 # the current config)
7043 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS' ]
7044 auxkeys = mydesiredvars + [ "USE", "IUSE"]
7046 pkgsettings = portage.config(clone=settings)
7048 for myvar in mydesiredvars:
7049 global_vals[myvar] = set(settings.get(myvar, "").split())
7051 # Loop through each package
7052 # Only print settings if they differ from global settings
7053 header_title = "Package Settings"
7054 print header_width * "="
7055 print header_title.rjust(int(header_width/2 + len(header_title)/2))
7056 print header_width * "="
7057 from output import EOutput
7060 # Get all package specific variables
7061 auxvalues = vardb.aux_get(pkg, auxkeys)
7063 for i in xrange(len(auxkeys)):
7064 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
7066 for myvar in mydesiredvars:
7067 # If the package variable doesn't match the
7068 # current global variable, something has changed
7069 # so set diff_found so we know to print
7070 if valuesmap[myvar] != global_vals[myvar]:
7071 diff_values[myvar] = valuesmap[myvar]
7072 valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
7073 valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
7075 # If a matching ebuild is no longer available in the tree, maybe it
7076 # would make sense to compare against the flags for the best
7077 # available version with the same slot?
7079 if portdb.cpv_exists(pkg):
7081 pkgsettings.setcpv(pkg, mydb=mydb)
7082 if valuesmap["IUSE"].intersection(
7083 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
7084 diff_values["USE"] = valuesmap["USE"]
7085 # If a difference was found, print the info for
7088 # Print package info
7089 print "%s was built with the following:" % pkg
7090 for myvar in mydesiredvars + ["USE"]:
7091 if myvar in diff_values:
7092 mylist = list(diff_values[myvar])
7094 print "%s=\"%s\"" % (myvar, " ".join(mylist))
7096 print ">>> Attempting to run pkg_info() for '%s'" % pkg
7097 ebuildpath = vardb.findname(pkg)
7098 if not ebuildpath or not os.path.exists(ebuildpath):
7099 out.ewarn("No ebuild found for '%s'" % pkg)
7101 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
7102 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
7103 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
7106 def action_search(settings, trees, myopts, myfiles, spinner):
7108 print "emerge: no search terms provided."
7110 searchinstance = search(settings, trees,
7111 spinner, "--searchdesc" in myopts,
7112 "--quiet" not in myopts, "--usepkg" in myopts,
7113 "--usepkgonly" in myopts)
7114 for mysearch in myfiles:
7116 searchinstance.execute(mysearch)
7117 except re.error, comment:
7118 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
7120 searchinstance.output()
7122 def action_depclean(settings, trees, ldpath_mtimes,
7123 myopts, action, myfiles, spinner):
7124 # Kill packages that aren't explicitly merged or are required as a
7125 # dependency of another package. World file is explicit.
7127 # Global depclean or prune operations are not very safe when there are
7128 # missing dependencies since it's unknown how badly incomplete
7129 # the dependency graph is, and we might accidentally remove packages
7130 # that should have been pulled into the graph. On the other hand, it's
7131 # relatively safe to ignore missing deps when only asked to remove
7132 # specific packages.
7133 allow_missing_deps = len(myfiles) > 0
7136 msg.append("Depclean may break link level dependencies. Thus, it is\n")
7137 msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
7138 msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
7140 msg.append("Also study the list of packages to be cleaned for any obvious\n")
7141 msg.append("mistakes. Packages that are part of the world set will always\n")
7142 msg.append("be kept. They can be manually added to this set with\n")
7143 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
7144 msg.append("package.provided (see portage(5)) will be removed by\n")
7145 msg.append("depclean, even if they are part of the world set.\n")
7147 msg.append("As a safety measure, depclean will not remove any packages\n")
7148 msg.append("unless *all* required dependencies have been resolved. As a\n")
7149 msg.append("consequence, it is often necessary to run\n")
7150 msg.append(good("`emerge --update --newuse --deep world`") + " prior to depclean.\n")
7152 if action == "depclean" and "--quiet" not in myopts and not myfiles:
7153 portage.writemsg_stdout("\n")
7155 portage.writemsg_stdout(colorize("BAD", "*** WARNING *** ") + x)
7157 xterm_titles = "notitles" not in settings.features
7158 myroot = settings["ROOT"]
7159 portdb = trees[myroot]["porttree"].dbapi
7161 dep_check_trees = {}
7162 dep_check_trees[myroot] = {}
7163 dep_check_trees[myroot]["vartree"] = \
7164 FakeVartree(trees[myroot]["vartree"],
7165 trees[myroot]["porttree"].dbapi,
7166 depgraph._mydbapi_keys, pkg_cache)
7167 vardb = dep_check_trees[myroot]["vartree"].dbapi
7168 # Constrain dependency selection to the installed packages.
7169 dep_check_trees[myroot]["porttree"] = dep_check_trees[myroot]["vartree"]
7170 root_config = trees[myroot]["root_config"]
7171 setconfig = root_config.setconfig
7172 syslist = setconfig.getSetAtoms("system")
7173 worldlist = setconfig.getSetAtoms("world")
7174 args_set = InternalPackageSet()
7175 fakedb = portage.fakedbapi(settings=settings)
7176 myvarlist = vardb.cpv_all()
7179 print "\n!!! You have no system list.",
7181 print "\n!!! You have no world file.",
7183 print "\n!!! You have no installed package database (%s)." % portage.VDB_PATH,
7185 if not (syslist and worldlist and myvarlist):
7186 print "\n!!! Proceeding "+(syslist and myvarlist and "may" or "will")
7187 print " break your installation.\n"
7188 if "--pretend" not in myopts:
7189 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
7191 if action == "depclean":
7192 emergelog(xterm_titles, " >>> depclean")
7195 if not is_valid_package_atom(x):
7196 portage.writemsg("!!! '%s' is not a valid package atom.\n" % x,
7198 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
7201 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
7202 except ValueError, e:
7203 print "!!! The short ebuild name \"" + x + "\" is ambiguous. Please specify"
7204 print "!!! one of the following fully-qualified ebuild names instead:\n"
7206 print " " + colorize("INFORM", i)
7210 matched_packages = False
7213 matched_packages = True
7215 if not matched_packages:
7216 portage.writemsg_stdout(
7217 ">>> No packages selected for removal by %s\n" % action)
7220 if "--quiet" not in myopts:
7221 print "\nCalculating dependencies ",
7223 runtime = UnmergeDepPriority(runtime=True)
7224 runtime_post = UnmergeDepPriority(runtime_post=True)
7225 buildtime = UnmergeDepPriority(buildtime=True)
7229 "PDEPEND": runtime_post,
7230 "DEPEND": buildtime,
7233 remaining_atoms = []
7234 if action == "depclean":
7235 for atom in syslist:
7236 if vardb.match(atom):
7237 remaining_atoms.append((atom, 'system', runtime))
7239 # Pull in everything that's installed since we don't want
7240 # to clean any package if something depends on it.
7241 remaining_atoms.extend(
7242 ("="+cpv, 'world', runtime) for cpv in vardb.cpv_all())
7244 for atom in worldlist:
7245 if vardb.match(atom):
7246 remaining_atoms.append((atom, 'world', runtime))
7247 elif action == "prune":
7248 for atom in syslist:
7249 if vardb.match(atom):
7250 remaining_atoms.append((atom, 'system', runtime))
7251 # Pull in everything that's installed since we don't want to prune a
7252 # package if something depends on it.
7253 remaining_atoms.extend(
7254 (atom, 'world', runtime) for atom in vardb.cp_all())
7256 # Try to prune everything that's slotted.
7257 for cp in vardb.cp_all():
7258 if len(vardb.cp_list(cp)) > 1:
7262 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
7263 metadata_keys = depgraph._mydbapi_keys
7265 with_bdeps = myopts.get("--with-bdeps", "y") == "y"
7267 while remaining_atoms:
7268 atom, parent, priority = remaining_atoms.pop()
7269 pkgs = vardb.match(atom)
7271 if priority > UnmergeDepPriority.SOFT:
7272 unresolveable.setdefault(atom, []).append(parent)
7274 if action == "depclean" and parent == "world" and myfiles:
7275 # Filter out packages given as arguments since the user wants
7279 metadata = dict(izip(metadata_keys,
7280 vardb.aux_get(pkg, metadata_keys)))
7283 arg_atom = args_set.findAtomForPackage(pkg, metadata)
7284 except portage_exception.InvalidDependString, e:
7285 file_path = os.path.join(myroot, portage.VDB_PATH, pkg, "PROVIDE")
7286 portage.writemsg("\n\nInvalid PROVIDE: %s\n" % str(e),
7288 portage.writemsg("See '%s'\n" % file_path,
7292 filtered_pkgs.append(pkg)
7293 pkgs = filtered_pkgs
7295 # For consistency with the update algorithm, keep the highest
7296 # visible version and prune any versions that are old or masked.
7297 for cpv in reversed(pkgs):
7298 if visible(settings,
7299 pkg_cache[("installed", myroot, cpv, "nomerge")]):
7303 # They're all masked, so just keep the highest version.
7306 graph.add(pkg, parent, priority=priority)
7307 if fakedb.cpv_exists(pkg):
7310 fakedb.cpv_inject(pkg)
7311 myaux = dict(izip(aux_keys, vardb.aux_get(pkg, aux_keys)))
7314 usedef = vardb.aux_get(pkg, ["USE"])[0].split()
7315 for dep_type, depstr in myaux.iteritems():
7320 if not with_bdeps and dep_type == "DEPEND":
7323 priority = priority_map[dep_type]
7324 if "--debug" in myopts:
7326 print "Parent: ", pkg
7327 print "Depstring:", depstr
7328 print "Priority:", priority
7331 portage_dep._dep_check_strict = False
7332 success, atoms = portage.dep_check(depstr, None, settings,
7333 myuse=usedef, trees=dep_check_trees, myroot=myroot)
7335 portage_dep._dep_check_strict = True
7337 show_invalid_depstring_notice(
7338 ("installed", myroot, pkg, "nomerge"),
7342 if "--debug" in myopts:
7343 print "Candidates:", atoms
7346 if atom.startswith("!"):
7348 remaining_atoms.append((atom, pkg, priority))
7350 if "--quiet" not in myopts:
7351 print "\b\b... done!\n"
7353 if unresolveable and not allow_missing_deps:
7354 print "Dependencies could not be completely resolved due to"
7355 print "the following required packages not being installed:"
7357 for atom in unresolveable:
7358 print atom, "required by", " ".join(unresolveable[atom])
7359 if unresolveable and not allow_missing_deps:
7361 print "Have you forgotten to run " + good("`emerge --update --newuse --deep world`") + " prior to"
7362 print "%s? It may be necessary to manually uninstall packages that no longer" % action
7363 print "exist in the portage tree since it may not be possible to satisfy their"
7364 print "dependencies. Also, be aware of the --with-bdeps option that is documented"
7365 print "in " + good("`man emerge`") + "."
7367 if action == "prune":
7368 print "If you would like to ignore dependencies then use %s." % \
7372 def show_parents(child_node):
7373 parent_nodes = graph.parent_nodes(child_node)
7374 if not parent_nodes:
7375 # With --prune, the highest version can be pulled in without any
7376 # real parent since all installed packages are pulled in. In that
7377 # case there's nothing to show here.
7381 msg.append(" %s pulled in by:\n" % str(child_node))
7382 for parent_node in parent_nodes:
7383 msg.append(" %s\n" % str(parent_node))
7385 portage.writemsg_stdout("".join(msg), noiselevel=-1)
7388 if action == "depclean":
7390 for pkg in vardb.cpv_all():
7391 metadata = dict(izip(metadata_keys,
7392 vardb.aux_get(pkg, metadata_keys)))
7395 arg_atom = args_set.findAtomForPackage(pkg, metadata)
7396 except portage_exception.InvalidDependString:
7397 # this error has already been displayed by now
7400 if not fakedb.cpv_exists(pkg):
7401 cleanlist.append(pkg)
7402 elif "--verbose" in myopts:
7405 for pkg in vardb.cpv_all():
7406 if not fakedb.cpv_exists(pkg):
7407 cleanlist.append(pkg)
7408 elif "--verbose" in myopts:
7410 elif action == "prune":
7411 # Prune really uses all installed instead of world. It's not a real
7412 # reverse dependency so don't display it as such.
7413 if graph.contains("world"):
7414 graph.remove("world")
7415 for atom in args_set:
7416 for pkg in vardb.match(atom):
7417 if not fakedb.cpv_exists(pkg):
7418 cleanlist.append(pkg)
7419 elif "--verbose" in myopts:
7423 portage.writemsg_stdout(
7424 ">>> No packages selected for removal by %s\n" % action)
7425 if "--verbose" not in myopts:
7426 portage.writemsg_stdout(
7427 ">>> To see reverse dependencies, use %s\n" % \
7429 if action == "prune":
7430 portage.writemsg_stdout(
7431 ">>> To ignore dependencies, use %s\n" % \
7435 # Use a topological sort to create an unmerge order such that
7436 # each package is unmerged before it's dependencies. This is
7437 # necessary to avoid breaking things that may need to run
7438 # during pkg_prerm or pkg_postrm phases.
7440 # Create a new graph to account for dependencies between the
7441 # packages being unmerged.
7443 clean_set = set(cleanlist)
7445 for node in clean_set:
7446 graph.add(node, None)
7447 myaux = dict(izip(aux_keys, vardb.aux_get(node, aux_keys)))
7449 usedef = vardb.aux_get(node, ["USE"])[0].split()
7450 for dep_type, depstr in myaux.iteritems():
7454 portage_dep._dep_check_strict = False
7455 success, atoms = portage.dep_check(depstr, None, settings,
7456 myuse=usedef, trees=dep_check_trees, myroot=myroot)
7458 portage_dep._dep_check_strict = True
7460 show_invalid_depstring_notice(
7461 ("installed", myroot, node, "nomerge"),
7465 priority = priority_map[dep_type]
7467 if atom.startswith("!"):
7469 matches = vardb.match(atom)
7473 if cpv in clean_set:
7474 graph.add(cpv, node, priority=priority)
7477 if len(graph.order) == len(graph.root_nodes()):
7478 # If there are no dependencies between packages
7479 # let unmerge() group them by cat/pn.
7481 cleanlist = graph.all_nodes()
7483 # Order nodes from lowest to highest overall reference count for
7484 # optimal root node selection.
7486 for node in graph.order:
7487 node_refcounts[node] = len(graph.parent_nodes(node))
7488 def cmp_reference_count(node1, node2):
7489 return node_refcounts[node1] - node_refcounts[node2]
7490 graph.order.sort(cmp_reference_count)
7492 ignore_priority_range = [None]
7493 ignore_priority_range.extend(
7494 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
7495 while not graph.empty():
7496 for ignore_priority in ignore_priority_range:
7497 nodes = graph.root_nodes(ignore_priority=ignore_priority)
7501 raise AssertionError("no root nodes")
7502 if ignore_priority is not None:
7503 # Some deps have been dropped due to circular dependencies,
7504 # so only pop one node in order do minimize the number that
7509 cleanlist.append(node)
7511 unmerge(settings, myopts, trees[settings["ROOT"]]["vartree"],
7512 "unmerge", cleanlist, ldpath_mtimes, ordered=ordered)
7514 if action == "prune":
7517 if not cleanlist and "--quiet" in myopts:
7520 print "Packages installed: "+str(len(myvarlist))
7521 print "Packages in world: "+str(len(worldlist))
7522 print "Packages in system: "+str(len(syslist))
7523 print "Unique package names: "+str(len(myvarlist))
7524 print "Required packages: "+str(len(fakedb.cpv_all()))
7525 if "--pretend" in myopts:
7526 print "Number to remove: "+str(len(cleanlist))
7528 print "Number removed: "+str(len(cleanlist))
7530 def action_build(settings, trees, mtimedb,
7531 myopts, myaction, myfiles, spinner):
7532 ldpath_mtimes = mtimedb["ldpath"]
7535 buildpkgonly = "--buildpkgonly" in myopts
7536 pretend = "--pretend" in myopts
7537 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
7538 ask = "--ask" in myopts
7539 tree = "--tree" in myopts
7540 verbose = "--verbose" in myopts
7541 quiet = "--quiet" in myopts
7542 if pretend or fetchonly:
7543 # make the mtimedb readonly
7544 mtimedb.filename = None
7545 if "--digest" in myopts:
7546 msg = "The --digest option can prevent corruption from being" + \
7547 " noticed. The `repoman manifest` command is the preferred" + \
7548 " way to generate manifests and it is capable of doing an" + \
7549 " entire repository or category at once."
7551 writemsg(prefix + "\n")
7552 from textwrap import wrap
7553 for line in wrap(msg, 72):
7554 writemsg("%s%s\n" % (prefix, line))
7555 writemsg(prefix + "\n")
7557 if "--quiet" not in myopts and \
7558 ("--pretend" in myopts or "--ask" in myopts or \
7559 "--tree" in myopts or "--verbose" in myopts):
7561 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
7563 elif "--buildpkgonly" in myopts:
7567 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
7569 print darkgreen("These are the packages that would be %s, in reverse order:") % action
7573 print darkgreen("These are the packages that would be %s, in order:") % action
7576 # validate the state of the resume data
7577 # so that we can make assumptions later.
7578 for k in ("resume", "resume_backup"):
7580 if "mergelist" in mtimedb[k]:
7581 if not mtimedb[k]["mergelist"]:
7586 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
7587 if not show_spinner:
7588 spinner.update = spinner.update_quiet
7590 if "--resume" in myopts and \
7591 ("resume" in mtimedb or
7592 "resume_backup" in mtimedb):
7593 if "resume" not in mtimedb:
7594 mtimedb["resume"] = mtimedb["resume_backup"]
7595 del mtimedb["resume_backup"]
7598 # Adjust config according to options of the command being resumed.
7599 for myroot in trees:
7600 mysettings = trees[myroot]["vartree"].settings
7602 adjust_config(myopts, mysettings)
7604 del myroot, mysettings
7606 # "myopts" is a list for backward compatibility.
7607 resume_opts = mtimedb["resume"].get("myopts", [])
7608 if isinstance(resume_opts, list):
7609 resume_opts = dict((k,True) for k in resume_opts)
7610 for opt in ("--skipfirst", "--ask", "--tree"):
7611 resume_opts.pop(opt, None)
7612 myopts.update(resume_opts)
7614 print "Calculating dependencies ",
7615 myparams = create_depgraph_params(myopts, myaction)
7616 mydepgraph = depgraph(settings, trees,
7617 myopts, myparams, spinner)
7619 mydepgraph.loadResumeCommand(mtimedb["resume"])
7620 except portage_exception.PackageNotFound:
7623 from output import EOutput
7625 out.eerror("Error: The resume list contains packages that are no longer")
7626 out.eerror(" available to be emerged. Please restart/continue")
7627 out.eerror(" the merge operation manually.")
7629 # delete the current list and also the backup
7630 # since it's probably stale too.
7631 for k in ("resume", "resume_backup"):
7632 mtimedb.pop(k, None)
7636 print "\b\b... done!"
7638 if ("--resume" in myopts):
7639 print darkgreen("emerge: It seems we have nothing to resume...")
7642 myparams = create_depgraph_params(myopts, myaction)
7643 if "--quiet" not in myopts and "--nodeps" not in myopts:
7644 print "Calculating dependencies ",
7646 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
7648 retval, favorites = mydepgraph.select_files(myfiles)
7649 except portage_exception.PackageNotFound, e:
7650 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
7653 mydepgraph.display_problems()
7655 if "--quiet" not in myopts and "--nodeps" not in myopts:
7656 print "\b\b... done!"
7657 display = pretend or \
7658 ((ask or tree or verbose) and not (quiet and not ask))
7660 mydepgraph.display_problems()
7662 if "--pretend" not in myopts and \
7663 ("--ask" in myopts or "--tree" in myopts or \
7664 "--verbose" in myopts) and \
7665 not ("--quiet" in myopts and "--ask" not in myopts):
7666 if "--resume" in myopts:
7667 mymergelist = mydepgraph.altlist()
7668 if len(mymergelist) == 0:
7669 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
7671 favorites = mtimedb["resume"]["favorites"]
7672 retval = mydepgraph.display(mymergelist, favorites=favorites)
7673 if retval != os.EX_OK:
7675 prompt="Would you like to resume merging these packages?"
7677 retval = mydepgraph.display(
7678 mydepgraph.altlist(reversed=("--tree" in myopts)),
7679 favorites=favorites)
7680 if retval != os.EX_OK:
7683 for x in mydepgraph.altlist():
7684 if isinstance(x, Blocker) and x.satisfied:
7686 if x[0] != "blocks" and x[3] != "nomerge":
7688 #check for blocking dependencies
7689 if x[0]=="blocks" and "--fetchonly" not in myopts and "--fetch-all-uri" not in myopts:
7690 print "\n!!! Error: The above package list contains packages which cannot be installed"
7691 print "!!! at the same time on the same system."
7692 if "--quiet" not in myopts:
7693 show_blocker_docs_link()
7696 if "--noreplace" in myopts and favorites:
7699 print " %s %s" % (good("*"), x)
7700 prompt="Would you like to add these packages to your world favorites?"
7701 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
7702 prompt="Nothing to merge; would you like to auto-clean packages?"
7705 print "Nothing to merge; quitting."
7708 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
7709 prompt="Would you like to fetch the source files for these packages?"
7711 prompt="Would you like to merge these packages?"
7713 if "--ask" in myopts and userquery(prompt) == "No":
7718 # Don't ask again (e.g. when auto-cleaning packages after merge)
7719 myopts.pop("--ask", None)
7721 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
7722 if ("--resume" in myopts):
7723 mymergelist = mydepgraph.altlist()
7724 if len(mymergelist) == 0:
7725 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
7727 favorites = mtimedb["resume"]["favorites"]
7728 retval = mydepgraph.display(mymergelist, favorites=favorites)
7729 if retval != os.EX_OK:
7732 retval = mydepgraph.display(
7733 mydepgraph.altlist(reversed=("--tree" in myopts)),
7734 favorites=favorites)
7735 if retval != os.EX_OK:
7737 if "--buildpkgonly" in myopts:
7738 graph_copy = mydepgraph.digraph.clone()
7739 for node in list(graph_copy.order):
7740 if not isinstance(node, Package):
7741 graph_copy.remove(node)
7742 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
7743 print "\n!!! --buildpkgonly requires all dependencies to be merged."
7744 print "!!! You have to merge the dependencies before you can build this package.\n"
7747 if "--buildpkgonly" in myopts:
7748 graph_copy = mydepgraph.digraph.clone()
7749 for node in list(graph_copy.order):
7750 if not isinstance(node, Package):
7751 graph_copy.remove(node)
7752 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
7753 print "\n!!! --buildpkgonly requires all dependencies to be merged."
7754 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
7757 if ("--resume" in myopts):
7758 favorites=mtimedb["resume"]["favorites"]
7759 mergetask = MergeTask(settings, trees, myopts)
7760 if "PORTAGE_PARALLEL_FETCHONLY" in settings:
7761 """ parallel-fetch uses --resume --fetchonly and we don't want
7762 it to write the mtimedb"""
7763 mtimedb.filename = None
7764 time.sleep(3) # allow the parent to have first fetch
7765 mymergelist = mydepgraph.altlist()
7767 retval = mergetask.merge(mymergelist, favorites, mtimedb)
7768 merge_count = mergetask.curval
7770 if "resume" in mtimedb and \
7771 "mergelist" in mtimedb["resume"] and \
7772 len(mtimedb["resume"]["mergelist"]) > 1:
7773 mtimedb["resume_backup"] = mtimedb["resume"]
7774 del mtimedb["resume"]
7776 mtimedb["resume"]={}
7777 # XXX: Stored as a list for backward compatibility.
7778 mtimedb["resume"]["myopts"] = \
7779 [k for k in myopts if myopts[k] is True]
7780 mtimedb["resume"]["favorites"]=favorites
7781 if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
7782 for pkgline in mydepgraph.altlist():
7783 if pkgline[0]=="ebuild" and pkgline[3]=="merge":
7784 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
7785 tmpsettings = portage.config(clone=settings)
7787 if settings.get("PORTAGE_DEBUG", "") == "1":
7789 retval = portage.doebuild(
7790 y, "digest", settings["ROOT"], tmpsettings, edebug,
7791 ("--pretend" in myopts),
7792 mydbapi=trees[pkgline[1]]["porttree"].dbapi,
7795 pkglist = mydepgraph.altlist()
7797 if fetchonly or "--buildpkgonly" in myopts:
7798 pkglist = [pkg for pkg in pkglist if pkg[0] != "blocks"]
7801 if isinstance(x, Blocker) and x.satisfied:
7803 if x[0] != "blocks":
7805 retval = mydepgraph.display(mydepgraph.altlist(
7806 reversed=("--tree" in myopts)),
7807 favorites=favorites)
7808 msg = "Error: The above package list contains " + \
7809 "packages which cannot be installed " + \
7810 "at the same time on the same system."
7812 from textwrap import wrap
7814 for line in wrap(msg, 70):
7816 if "--quiet" not in myopts:
7817 show_blocker_docs_link()
7821 mydepgraph.saveNomergeFavorites()
7823 mergetask = MergeTask(settings, trees, myopts)
7824 retval = mergetask.merge(pkglist, favorites, mtimedb)
7825 merge_count = mergetask.curval
7827 if retval == os.EX_OK and not (pretend or fetchonly):
7828 mtimedb.pop("resume", None)
7829 if "yes" == settings.get("AUTOCLEAN"):
7830 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
7831 vartree = trees[settings["ROOT"]]["vartree"]
7832 unmerge(settings, myopts, vartree, "clean", ["world"],
7833 ldpath_mtimes, autoclean=1)
7835 portage.writemsg_stdout(colorize("WARN", "WARNING:")
7836 + " AUTOCLEAN is disabled. This can cause serious"
7837 + " problems due to overlapping packages.\n")
7839 if merge_count and not (buildpkgonly or fetchonly or pretend):
7840 post_emerge(settings, mtimedb, retval)
7843 def multiple_actions(action1, action2):
7844 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
7845 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
7848 def parse_opts(tmpcmdline, silent=False):
7853 global actions, options, shortmapping
7855 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
7856 argument_options = {
7858 "help":"specify the location for portage configuration files",
7862 "help":"enable or disable color output",
7864 "choices":("y", "n")
7867 "help":"include unnecessary build time dependencies",
7869 "choices":("y", "n")
7872 "help":"specify conditions to trigger package reinstallation",
7874 "choices":["changed-use"]
7878 from optparse import OptionParser
7879 parser = OptionParser()
7880 if parser.has_option("--help"):
7881 parser.remove_option("--help")
7883 for action_opt in actions:
7884 parser.add_option("--" + action_opt, action="store_true",
7885 dest=action_opt.replace("-", "_"), default=False)
7886 for myopt in options:
7887 parser.add_option(myopt, action="store_true",
7888 dest=myopt.lstrip("--").replace("-", "_"), default=False)
7889 for shortopt, longopt in shortmapping.iteritems():
7890 parser.add_option("-" + shortopt, action="store_true",
7891 dest=longopt.lstrip("--").replace("-", "_"), default=False)
7892 for myalias, myopt in longopt_aliases.iteritems():
7893 parser.add_option(myalias, action="store_true",
7894 dest=myopt.lstrip("--").replace("-", "_"), default=False)
7896 for myopt, kwargs in argument_options.iteritems():
7897 parser.add_option(myopt,
7898 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
7900 myoptions, myargs = parser.parse_args(args=tmpcmdline)
7902 for myopt in options:
7903 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
7905 myopts[myopt] = True
7907 for myopt in argument_options:
7908 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
7912 for action_opt in actions:
7913 v = getattr(myoptions, action_opt.replace("-", "_"))
7916 multiple_actions(myaction, action_opt)
7918 myaction = action_opt
7921 if x in actions and myaction != "search":
7922 if not silent and x not in ["system", "world"]:
7923 print red("*** Deprecated use of action '%s', use '--%s' instead" % (x,x))
7924 # special case "search" so people can search for action terms, e.g. emerge -s sync
7926 multiple_actions(myaction, x)
7932 if "--nocolor" in myopts:
7934 sys.stderr.write("*** Deprecated use of '--nocolor', " + \
7935 "use '--color=n' instead.\n")
7936 del myopts["--nocolor"]
7937 myopts["--color"] = "n"
7939 return myaction, myopts, myfiles
7941 def validate_ebuild_environment(trees):
7942 for myroot in trees:
7943 settings = trees[myroot]["vartree"].settings
7946 def load_emerge_config(trees=None):
7948 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
7949 kwargs[k] = os.environ.get(envvar, None)
7950 trees = portage.create_trees(trees=trees, **kwargs)
7952 for root, root_trees in trees.iteritems():
7953 settings = root_trees["vartree"].settings
7954 setconfig = SetConfig(settings, root_trees)
7955 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
7957 settings = trees["/"]["vartree"].settings
7959 for myroot in trees:
7961 settings = trees[myroot]["vartree"].settings
7964 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
7965 mtimedb = portage.MtimeDB(mtimedbfile)
7966 return settings, trees, mtimedb
7968 def adjust_config(myopts, settings):
7969 """Make emerge specific adjustments to the config."""
7971 # To enhance usability, make some vars case insensitive by forcing them to
7973 for myvar in ("AUTOCLEAN", "NOCOLOR"):
7974 if myvar in settings:
7975 settings[myvar] = settings[myvar].lower()
7976 settings.backup_changes(myvar)
7979 # Kill noauto as it will break merges otherwise.
7980 if "noauto" in settings.features:
7981 while "noauto" in settings.features:
7982 settings.features.remove("noauto")
7983 settings["FEATURES"] = " ".join(settings.features)
7984 settings.backup_changes("FEATURES")
7988 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
7989 except ValueError, e:
7990 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7991 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
7992 settings["CLEAN_DELAY"], noiselevel=-1)
7993 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
7994 settings.backup_changes("CLEAN_DELAY")
7996 EMERGE_WARNING_DELAY = 10
7998 EMERGE_WARNING_DELAY = int(settings.get(
7999 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
8000 except ValueError, e:
8001 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8002 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
8003 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
8004 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
8005 settings.backup_changes("EMERGE_WARNING_DELAY")
8007 if "--quiet" in myopts:
8008 settings["PORTAGE_QUIET"]="1"
8009 settings.backup_changes("PORTAGE_QUIET")
8011 # Set so that configs will be merged regardless of remembered status
8012 if ("--noconfmem" in myopts):
8013 settings["NOCONFMEM"]="1"
8014 settings.backup_changes("NOCONFMEM")
8016 # Set various debug markers... They should be merged somehow.
8019 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
8020 if PORTAGE_DEBUG not in (0, 1):
8021 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
8022 PORTAGE_DEBUG, noiselevel=-1)
8023 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
8026 except ValueError, e:
8027 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8028 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
8029 settings["PORTAGE_DEBUG"], noiselevel=-1)
8031 if "--debug" in myopts:
8033 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
8034 settings.backup_changes("PORTAGE_DEBUG")
8036 if settings.get("NOCOLOR") not in ("yes","true"):
8037 output.havecolor = 1
8039 """The explicit --color < y | n > option overrides the NOCOLOR environment
8040 variable and stdout auto-detection."""
8041 if "--color" in myopts:
8042 if "y" == myopts["--color"]:
8043 output.havecolor = 1
8044 settings["NOCOLOR"] = "false"
8046 output.havecolor = 0
8047 settings["NOCOLOR"] = "true"
8048 settings.backup_changes("NOCOLOR")
8049 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
8050 output.havecolor = 0
8051 settings["NOCOLOR"] = "true"
8052 settings.backup_changes("NOCOLOR")
8055 # Disable color until we're sure that it should be enabled (after
8056 # EMERGE_DEFAULT_OPTS has been parsed).
8057 output.havecolor = 0
8058 # This first pass is just for options that need to be known as early as
8059 # possible, such as --config-root. They will be parsed again later,
8060 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
8061 # the value of --config-root).
8062 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
8063 if "--debug" in myopts:
8064 os.environ["PORTAGE_DEBUG"] = "1"
8065 if "--config-root" in myopts:
8066 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
8068 # Portage needs to ensure a sane umask for the files it creates.
8070 settings, trees, mtimedb = load_emerge_config()
8071 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8074 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
8075 except (OSError, ValueError), e:
8076 portage.writemsg("!!! Failed to change nice value to '%s'\n" % \
8077 settings["PORTAGE_NICENESS"])
8078 portage.writemsg("!!! %s\n" % str(e))
8081 if portage._global_updates(trees, mtimedb["updates"]):
8083 # Reload the whole config from scratch.
8084 settings, trees, mtimedb = load_emerge_config(trees=trees)
8085 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8087 xterm_titles = "notitles" not in settings.features
8090 if "--ignore-default-opts" not in myopts:
8091 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
8092 tmpcmdline.extend(sys.argv[1:])
8093 myaction, myopts, myfiles = parse_opts(tmpcmdline)
8095 if "--digest" in myopts:
8096 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
8097 # Reload the whole config from scratch so that the portdbapi internal
8098 # config is updated with new FEATURES.
8099 settings, trees, mtimedb = load_emerge_config(trees=trees)
8100 portdb = trees[settings["ROOT"]]["porttree"].dbapi
8102 for myroot in trees:
8103 mysettings = trees[myroot]["vartree"].settings
8105 adjust_config(myopts, mysettings)
8107 del myroot, mysettings
8109 spinner = stdout_spinner()
8110 if "candy" in settings.features:
8111 spinner.update = spinner.update_scroll
8113 portage.deprecated_profile_check()
8115 eclasses_overridden = {}
8116 for mytrees in trees.itervalues():
8117 mydb = mytrees["porttree"].dbapi
8118 # Freeze the portdbapi for performance (memoize all xmatch results).
8120 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
8123 if eclasses_overridden and \
8124 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
8126 if len(eclasses_overridden) == 1:
8127 writemsg(prefix + "Overlay eclass overrides " + \
8128 "eclass from PORTDIR:\n", noiselevel=-1)
8130 writemsg(prefix + "Overlay eclasses override " + \
8131 "eclasses from PORTDIR:\n", noiselevel=-1)
8132 writemsg(prefix + "\n", noiselevel=-1)
8133 for eclass_name in sorted(eclasses_overridden):
8134 writemsg(prefix + " '%s/%s.eclass'\n" % \
8135 (eclasses_overridden[eclass_name], eclass_name),
8137 writemsg(prefix + "\n", noiselevel=-1)
8138 msg = "It is best to avoid overridding eclasses from PORTDIR " + \
8139 "because it will trigger invalidation of cached ebuild metadata " + \
8140 "that is distributed with the portage tree. If you must " + \
8141 "override eclasses from PORTDIR then you are advised to run " + \
8142 "`emerge --regen` after each time that you run `emerge --sync`. " + \
8143 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
8144 "you would like to disable this warning."
8145 from textwrap import wrap
8146 for line in wrap(msg, 72):
8147 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
8149 if "moo" in myfiles:
8152 Larry loves Gentoo (""" + os.uname()[0] + """)
8154 _______________________
8155 < Have you mooed today? >
8156 -----------------------
8165 if myaction in ("world", "system"):
8167 print "emerge: please specify a package class (\"world\" " + \
8168 "or \"system\") or individual packages, but not both."
8170 myfiles.append(myaction)
8173 ext = os.path.splitext(x)[1]
8174 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
8175 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
8178 if ("--tree" in myopts) and ("--columns" in myopts):
8179 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
8182 if ("--quiet" in myopts):
8183 spinner.update = spinner.update_quiet
8184 portage_util.noiselimit = -1
8186 # Always create packages if FEATURES=buildpkg
8187 # Imply --buildpkg if --buildpkgonly
8188 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
8189 if "--buildpkg" not in myopts:
8190 myopts["--buildpkg"] = True
8192 # Also allow -S to invoke search action (-sS)
8193 if ("--searchdesc" in myopts):
8194 if myaction and myaction != "search":
8195 myfiles.append(myaction)
8196 if "--search" not in myopts:
8197 myopts["--search"] = True
8200 # Always try and fetch binary packages if FEATURES=getbinpkg
8201 if ("getbinpkg" in settings.features):
8202 myopts["--getbinpkg"] = True
8204 if "--buildpkgonly" in myopts:
8205 # --buildpkgonly will not merge anything, so
8206 # it cancels all binary package options.
8207 for opt in ("--getbinpkg", "--getbinpkgonly",
8208 "--usepkg", "--usepkgonly"):
8209 myopts.pop(opt, None)
8211 if "--skipfirst" in myopts and "--resume" not in myopts:
8212 myopts["--resume"] = True
8214 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
8215 myopts["--usepkgonly"] = True
8217 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
8218 myopts["--getbinpkg"] = True
8220 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
8221 myopts["--usepkg"] = True
8223 # Also allow -K to apply --usepkg/-k
8224 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
8225 myopts["--usepkg"] = True
8227 # Also allow -l to apply --pretend/-p, but if already in --ask mode
8228 if ("--changelog" in myopts) and not (("--pretend" in myopts) or ("--ask" in myopts)):
8229 print ">>> --changelog implies --pretend... adding --pretend to options."
8230 myopts["--pretend"] = True
8232 # Allow -p to remove --ask
8233 if ("--pretend" in myopts) and ("--ask" in myopts):
8234 print ">>> --pretend disables --ask... removing --ask from options."
8237 # forbid --ask when not in a terminal
8238 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
8239 if ("--ask" in myopts) and (not sys.stdin.isatty()):
8240 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
8244 if settings.get("PORTAGE_DEBUG", "") == "1":
8245 spinner.update = spinner.update_quiet
8247 if "python-trace" in settings.features:
8248 import portage_debug
8249 portage_debug.set_trace(True)
8251 if ("--resume" in myopts):
8252 if "--tree" in myopts:
8253 print "* --tree is currently broken with --resume. Disabling..."
8254 del myopts["--tree"]
8256 if not ("--quiet" in myopts):
8257 if not sys.stdout.isatty() or ("--nospinner" in myopts):
8258 spinner.update = spinner.update_basic
8260 if "--version" in myopts:
8261 print getportageversion(settings["PORTDIR"], settings["ROOT"],
8262 settings.profile_path, settings["CHOST"],
8263 trees[settings["ROOT"]]["vartree"].dbapi)
8265 elif "--help" in myopts:
8266 emergehelp.help(myaction, myopts, output.havecolor)
8269 if portage.wheelgid == portage.portage_gid:
8270 print "emerge: wheel group use is being deprecated. Please update group and passwd to"
8271 print " include the portage user as noted above, and then use group portage."
8273 if "--debug" in myopts:
8274 print "myaction", myaction
8275 print "myopts", myopts
8277 if not myaction and not myfiles and "--resume" not in myopts:
8278 emergehelp.help(myaction, myopts, output.havecolor)
8281 pretend = "--pretend" in myopts
8282 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
8283 buildpkgonly = "--buildpkgonly" in myopts
8285 # check if root user is the current user for the actions where emerge needs this
8286 if portage.secpass < 2:
8287 # We've already allowed "--version" and "--help" above.
8288 if "--pretend" not in myopts and myaction not in ("search","info"):
8289 need_superuser = not \
8291 (buildpkgonly and secpass >= 1) or \
8292 myaction in ("metadata", "regen") or \
8293 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
8294 if portage.secpass < 1 or \
8297 access_desc = "superuser"
8299 access_desc = "portage group"
8300 # Always show portage_group_warning() when only portage group
8301 # access is required but the user is not in the portage group.
8302 from portage_data import portage_group_warning
8303 if "--ask" in myopts:
8304 myopts["--pretend"] = True
8306 print ("%s access is required... " + \
8307 "adding --pretend to options.\n") % access_desc
8308 if portage.secpass < 1 and not need_superuser:
8309 portage_group_warning()
8311 sys.stderr.write(("emerge: %s access is " + \
8312 "required.\n\n") % access_desc)
8313 if portage.secpass < 1 and not need_superuser:
8314 portage_group_warning()
8317 disable_emergelog = False
8318 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
8320 disable_emergelog = True
8322 if myaction in ("search", "info"):
8323 disable_emergelog = True
8324 if disable_emergelog:
8325 """ Disable emergelog for everything except build or unmerge
8326 operations. This helps minimize parallel emerge.log entries that can
8327 confuse log parsers. We especially want it disabled during
8328 parallel-fetch, which uses --resume --fetchonly."""
8330 def emergelog(*pargs, **kargs):
8333 if not "--pretend" in myopts:
8334 emergelog(xterm_titles, "Started emerge on: "+\
8335 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
8338 myelogstr=" ".join(myopts)
8339 if myaction and myaction not in ("system", "world"):
8340 # Exclude "world" and "system" here because myfiles
8341 # contains a duplicate.
8342 myelogstr+=" "+myaction
8344 myelogstr+=" "+" ".join(myfiles)
8345 emergelog(xterm_titles, " *** emerge " + myelogstr)
8347 def emergeexitsig(signum, frame):
8348 signal.signal(signal.SIGINT, signal.SIG_IGN)
8349 signal.signal(signal.SIGTERM, signal.SIG_IGN)
8350 portage_util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
8351 sys.exit(100+signum)
8352 signal.signal(signal.SIGINT, emergeexitsig)
8353 signal.signal(signal.SIGTERM, emergeexitsig)
8356 """This gets out final log message in before we quit."""
8357 if "--pretend" not in myopts:
8358 emergelog(xterm_titles, " *** terminating.")
8359 if "notitles" not in settings.features:
8361 portage.atexit_register(emergeexit)
8363 if myaction in ("config", "metadata", "regen", "sync"):
8364 if "--pretend" in myopts:
8365 sys.stderr.write(("emerge: The '%s' action does " + \
8366 "not support '--pretend'.\n") % myaction)
8368 if "sync" == myaction:
8369 action_sync(settings, trees, mtimedb, myopts, myaction)
8370 elif "metadata" == myaction:
8371 action_metadata(settings, portdb, myopts)
8372 elif myaction=="regen":
8373 validate_ebuild_environment(trees)
8374 action_regen(settings, portdb)
8376 elif "config"==myaction:
8377 validate_ebuild_environment(trees)
8378 action_config(settings, trees, myopts, myfiles)
8381 elif "info"==myaction:
8382 action_info(settings, trees, myopts, myfiles)
8385 elif "search"==myaction:
8386 validate_ebuild_environment(trees)
8387 action_search(settings, trees[settings["ROOT"]],
8388 myopts, myfiles, spinner)
8389 elif myaction in ("clean", "unmerge") or \
8390 (myaction == "prune" and "--nodeps" in myopts):
8391 validate_ebuild_environment(trees)
8392 vartree = trees[settings["ROOT"]]["vartree"]
8393 # When given a list of atoms, unmerge
8394 # them in the order given.
8395 ordered = myaction == "unmerge"
8396 if 1 == unmerge(settings, myopts, vartree, myaction, myfiles,
8397 mtimedb["ldpath"], ordered=ordered):
8398 if not (buildpkgonly or fetchonly or pretend):
8399 post_emerge(settings, mtimedb, 0)
8401 elif myaction in ("depclean", "prune"):
8402 validate_ebuild_environment(trees)
8403 action_depclean(settings, trees, mtimedb["ldpath"],
8404 myopts, myaction, myfiles, spinner)
8405 if not (buildpkgonly or fetchonly or pretend):
8406 post_emerge(settings, mtimedb, 0)
8407 # "update", "system", or just process files:
8409 validate_ebuild_environment(trees)
8410 retval = action_build(settings, trees, mtimedb,
8411 myopts, myaction, myfiles, spinner)
8414 if __name__ == "__main__":
8415 from portage_exception import ParseError, PermissionDenied
8417 retval = emerge_main()
8418 except PermissionDenied, e:
8419 sys.stderr.write("Permission denied: '%s'\n" % str(e))
8421 except ParseError, e:
8422 sys.stderr.write("%s\n" % str(e))