2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
20 from os import path as osp
21 sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
24 from portage import digraph
25 from portage.const import NEWS_LIB_PATH
28 import portage.xpak, commands, errno, re, socket, time
29 from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
30 nc_len, red, teal, turquoise, xtermTitle, \
31 xtermTitleReset, yellow
32 from portage.output import create_color_func
33 good = create_color_func("GOOD")
34 bad = create_color_func("BAD")
35 # white looks bad on terminals with white background
36 from portage.output import bold as white
40 portage.dep._dep_check_strict = True
43 import portage.exception
44 from portage.cache.cache_errors import CacheError
45 from portage.data import secpass
46 from portage.elog.messages import eerror
47 from portage.util import normalize_path as normpath
48 from portage.util import cmp_sort_key, writemsg, writemsg_level
49 from portage.sets import load_default_config, SETPREFIX
50 from portage.sets.base import InternalPackageSet
52 from itertools import chain, izip
54 from _emerge.SlotObject import SlotObject
55 from _emerge.DepPriority import DepPriority
56 from _emerge.BlockerDepPriority import BlockerDepPriority
57 from _emerge.UnmergeDepPriority import UnmergeDepPriority
58 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
59 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
60 from _emerge.Task import Task
61 from _emerge.Blocker import Blocker
62 from _emerge.AsynchronousTask import AsynchronousTask
63 from _emerge.CompositeTask import CompositeTask
64 from _emerge.EbuildFetcher import EbuildFetcher
65 from _emerge.EbuildBuild import EbuildBuild
66 from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
67 from _emerge.EbuildPhase import EbuildPhase
68 from _emerge.Binpkg import Binpkg
69 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
70 from _emerge.PackageMerge import PackageMerge
71 from _emerge.DependencyArg import DependencyArg
72 from _emerge.AtomArg import AtomArg
73 from _emerge.PackageArg import PackageArg
74 from _emerge.SetArg import SetArg
75 from _emerge.Dependency import Dependency
76 from _emerge.BlockerCache import BlockerCache
77 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
78 from _emerge.RepoDisplay import RepoDisplay
79 from _emerge.UseFlagDisplay import UseFlagDisplay
80 from _emerge.SequentialTaskQueue import SequentialTaskQueue
81 from _emerge.ProgressHandler import ProgressHandler
82 from _emerge.stdout_spinner import stdout_spinner
83 from _emerge.UninstallFailure import UninstallFailure
84 from _emerge.JobStatusDisplay import JobStatusDisplay
85 from _emerge.PollScheduler import PollScheduler
87 def userquery(prompt, responses=None, colours=None):
88 """Displays a prompt and a set of responses, then waits for a response
89 which is checked against the responses and the first to match is
90 returned. An empty response will match the first value in responses. The
91 input buffer is *not* cleared prior to the prompt!
94 responses: a List of Strings.
95 colours: a List of Functions taking and returning a String, used to
96 process the responses for display. Typically these will be functions
97 like red() but could be e.g. lambda x: "DisplayString".
98 If responses is omitted, defaults to ["Yes", "No"], [green, red].
99 If only colours is omitted, defaults to [bold, ...].
101 Returns a member of the List responses. (If called without optional
102 arguments, returns "Yes" or "No".)
103 KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
105 if responses is None:
106 responses = ["Yes", "No"]
108 create_color_func("PROMPT_CHOICE_DEFAULT"),
109 create_color_func("PROMPT_CHOICE_OTHER")
111 elif colours is None:
113 colours=(colours*len(responses))[:len(responses)]
117 response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
118 for key in responses:
119 # An empty response will match the first value in responses.
120 if response.upper()==key[:len(response)].upper():
122 print "Sorry, response '%s' not understood." % response,
123 except (EOFError, KeyboardInterrupt):
127 actions = frozenset([
128 "clean", "config", "depclean",
129 "info", "list-sets", "metadata",
130 "prune", "regen", "search",
131 "sync", "unmerge", "version",
134 "--ask", "--alphabetical",
135 "--buildpkg", "--buildpkgonly",
136 "--changelog", "--columns",
141 "--fetchonly", "--fetch-all-uri",
142 "--getbinpkg", "--getbinpkgonly",
143 "--help", "--ignore-default-opts",
147 "--nodeps", "--noreplace",
148 "--nospinner", "--oneshot",
149 "--onlydeps", "--pretend",
150 "--quiet", "--resume",
151 "--searchdesc", "--selective",
155 "--usepkg", "--usepkgonly",
162 "b":"--buildpkg", "B":"--buildpkgonly",
163 "c":"--clean", "C":"--unmerge",
164 "d":"--debug", "D":"--deep",
166 "f":"--fetchonly", "F":"--fetch-all-uri",
167 "g":"--getbinpkg", "G":"--getbinpkgonly",
169 "k":"--usepkg", "K":"--usepkgonly",
171 "n":"--noreplace", "N":"--newuse",
172 "o":"--onlydeps", "O":"--nodeps",
173 "p":"--pretend", "P":"--prune",
175 "s":"--search", "S":"--searchdesc",
178 "v":"--verbose", "V":"--version"
181 _emerge_log_dir = '/var/log'
183 def emergelog(xterm_titles, mystr, short_msg=None):
184 if xterm_titles and short_msg:
185 if "HOSTNAME" in os.environ:
186 short_msg = os.environ["HOSTNAME"]+": "+short_msg
187 xtermTitle(short_msg)
189 file_path = os.path.join(_emerge_log_dir, 'emerge.log')
190 mylogfile = open(file_path, "a")
191 portage.util.apply_secpass_permissions(file_path,
192 uid=portage.portage_uid, gid=portage.portage_gid,
196 mylock = portage.locks.lockfile(mylogfile)
197 # seek because we may have gotten held up by the lock.
198 # if so, we may not be positioned at the end of the file.
200 mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
204 portage.locks.unlockfile(mylock)
206 except (IOError,OSError,portage.exception.PortageException), e:
208 print >> sys.stderr, "emergelog():",e
210 def countdown(secs=5, doing="Starting"):
212 print ">>> Waiting",secs,"seconds before starting..."
213 print ">>> (Control-C to abort)...\n"+doing+" in: ",
217 sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
222 # formats a size given in bytes nicely
223 def format_size(mysize):
224 if isinstance(mysize, basestring):
226 if 0 != mysize % 1024:
227 # Always round up to the next kB so that it doesn't show 0 kB when
228 # some small file still needs to be fetched.
229 mysize += 1024 - mysize % 1024
230 mystr=str(mysize/1024)
234 mystr=mystr[:mycount]+","+mystr[mycount:]
238 def getgccversion(chost):
241 return: the current in-use gcc version
244 gcc_ver_command = 'gcc -dumpversion'
245 gcc_ver_prefix = 'gcc-'
247 gcc_not_found_error = red(
248 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
249 "!!! to update the environment of this terminal and possibly\n" +
250 "!!! other terminals also.\n"
253 mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
254 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
255 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
257 mystatus, myoutput = commands.getstatusoutput(
258 chost + "-" + gcc_ver_command)
259 if mystatus == os.EX_OK:
260 return gcc_ver_prefix + myoutput
262 mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
263 if mystatus == os.EX_OK:
264 return gcc_ver_prefix + myoutput
266 portage.writemsg(gcc_not_found_error, noiselevel=-1)
267 return "[unavailable]"
269 def getportageversion(portdir, target_root, profile, chost, vardb):
270 profilever = "unavailable"
272 realpath = os.path.realpath(profile)
273 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
274 if realpath.startswith(basepath):
275 profilever = realpath[1 + len(basepath):]
278 profilever = "!" + os.readlink(profile)
281 del realpath, basepath
284 libclist = vardb.match("virtual/libc")
285 libclist += vardb.match("virtual/glibc")
286 libclist = portage.util.unique_array(libclist)
288 xs=portage.catpkgsplit(x)
290 libcver+=","+"-".join(xs[1:])
292 libcver="-".join(xs[1:])
294 libcver="unavailable"
296 gccver = getgccversion(chost)
297 unameout=platform.release()+" "+platform.machine()
299 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
301 def create_depgraph_params(myopts, myaction):
302 #configure emerge engine parameters
304 # self: include _this_ package regardless of if it is merged.
305 # selective: exclude the package if it is merged
306 # recurse: go into the dependencies
307 # deep: go into the dependencies of already merged packages
308 # empty: pretend nothing is merged
309 # complete: completely account for all known dependencies
310 # remove: build graph for use in removing packages
311 myparams = set(["recurse"])
313 if myaction == "remove":
314 myparams.add("remove")
315 myparams.add("complete")
318 if "--update" in myopts or \
319 "--newuse" in myopts or \
320 "--reinstall" in myopts or \
321 "--noreplace" in myopts:
322 myparams.add("selective")
323 if "--emptytree" in myopts:
324 myparams.add("empty")
325 myparams.discard("selective")
326 if "--nodeps" in myopts:
327 myparams.discard("recurse")
328 if "--deep" in myopts:
330 if "--complete-graph" in myopts:
331 myparams.add("complete")
334 # search functionality
335 class search(object):
346 def __init__(self, root_config, spinner, searchdesc,
347 verbose, usepkg, usepkgonly):
348 """Searches the available and installed packages for the supplied search key.
349 The list of available and installed packages is created at object instantiation.
350 This makes successive searches faster."""
351 self.settings = root_config.settings
352 self.vartree = root_config.trees["vartree"]
353 self.spinner = spinner
354 self.verbose = verbose
355 self.searchdesc = searchdesc
356 self.root_config = root_config
357 self.setconfig = root_config.setconfig
358 self.matches = {"pkg" : []}
363 self.portdb = fake_portdb
364 for attrib in ("aux_get", "cp_all",
365 "xmatch", "findname", "getFetchMap"):
366 setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
370 portdb = root_config.trees["porttree"].dbapi
371 bindb = root_config.trees["bintree"].dbapi
372 vardb = root_config.trees["vartree"].dbapi
374 if not usepkgonly and portdb._have_root_eclass_dir:
375 self._dbs.append(portdb)
377 if (usepkg or usepkgonly) and bindb.cp_all():
378 self._dbs.append(bindb)
380 self._dbs.append(vardb)
381 self._portdb = portdb
386 cp_all.update(db.cp_all())
387 return list(sorted(cp_all))
389 def _aux_get(self, *args, **kwargs):
392 return db.aux_get(*args, **kwargs)
397 def _findname(self, *args, **kwargs):
399 if db is not self._portdb:
400 # We don't want findname to return anything
401 # unless it's an ebuild in a portage tree.
402 # Otherwise, it's already built and we don't
405 func = getattr(db, "findname", None)
407 value = func(*args, **kwargs)
412 def _getFetchMap(self, *args, **kwargs):
414 func = getattr(db, "getFetchMap", None)
416 value = func(*args, **kwargs)
421 def _visible(self, db, cpv, metadata):
422 installed = db is self.vartree.dbapi
423 built = installed or db is not self._portdb
426 pkg_type = "installed"
429 return visible(self.settings,
430 Package(type_name=pkg_type, root_config=self.root_config,
431 cpv=cpv, built=built, installed=installed, metadata=metadata))
433 def _xmatch(self, level, atom):
435 This method does not expand old-style virtuals because it
436 is restricted to returning matches for a single ${CATEGORY}/${PN}
437 and old-style virual matches unreliable for that when querying
438 multiple package databases. If necessary, old-style virtuals
439 can be performed on atoms prior to calling this method.
441 cp = portage.dep_getkey(atom)
442 if level == "match-all":
445 if hasattr(db, "xmatch"):
446 matches.update(db.xmatch(level, atom))
448 matches.update(db.match(atom))
449 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
450 db._cpv_sort_ascending(result)
451 elif level == "match-visible":
454 if hasattr(db, "xmatch"):
455 matches.update(db.xmatch(level, atom))
457 db_keys = list(db._aux_cache_keys)
458 for cpv in db.match(atom):
459 metadata = izip(db_keys,
460 db.aux_get(cpv, db_keys))
461 if not self._visible(db, cpv, metadata):
464 result = list(x for x in matches if portage.cpv_getkey(x) == cp)
465 db._cpv_sort_ascending(result)
466 elif level == "bestmatch-visible":
469 if hasattr(db, "xmatch"):
470 cpv = db.xmatch("bestmatch-visible", atom)
471 if not cpv or portage.cpv_getkey(cpv) != cp:
473 if not result or cpv == portage.best([cpv, result]):
476 db_keys = Package.metadata_keys
477 # break out of this loop with highest visible
478 # match, checked in descending order
479 for cpv in reversed(db.match(atom)):
480 if portage.cpv_getkey(cpv) != cp:
482 metadata = izip(db_keys,
483 db.aux_get(cpv, db_keys))
484 if not self._visible(db, cpv, metadata):
486 if not result or cpv == portage.best([cpv, result]):
490 raise NotImplementedError(level)
493 def execute(self,searchkey):
494 """Performs the search for the supplied search key"""
496 self.searchkey=searchkey
497 self.packagematches = []
500 self.matches = {"pkg":[], "desc":[], "set":[]}
503 self.matches = {"pkg":[], "set":[]}
504 print "Searching... ",
507 if self.searchkey.startswith('%'):
509 self.searchkey = self.searchkey[1:]
510 if self.searchkey.startswith('@'):
512 self.searchkey = self.searchkey[1:]
514 self.searchre=re.compile(self.searchkey,re.I)
516 self.searchre=re.compile(re.escape(self.searchkey), re.I)
517 for package in self.portdb.cp_all():
518 self.spinner.update()
521 match_string = package[:]
523 match_string = package.split("/")[-1]
526 if self.searchre.search(match_string):
527 if not self.portdb.xmatch("match-visible", package):
529 self.matches["pkg"].append([package,masked])
530 elif self.searchdesc: # DESCRIPTION searching
531 full_package = self.portdb.xmatch("bestmatch-visible", package)
533 #no match found; we don't want to query description
534 full_package = portage.best(
535 self.portdb.xmatch("match-all", package))
541 full_desc = self.portdb.aux_get(
542 full_package, ["DESCRIPTION"])[0]
544 print "emerge: search: aux_get() failed, skipping"
546 if self.searchre.search(full_desc):
547 self.matches["desc"].append([full_package,masked])
549 self.sdict = self.setconfig.getSets()
550 for setname in self.sdict:
551 self.spinner.update()
553 match_string = setname
555 match_string = setname.split("/")[-1]
557 if self.searchre.search(match_string):
558 self.matches["set"].append([setname, False])
559 elif self.searchdesc:
560 if self.searchre.search(
561 self.sdict[setname].getMetadata("DESCRIPTION")):
562 self.matches["set"].append([setname, False])
565 for mtype in self.matches:
566 self.matches[mtype].sort()
567 self.mlen += len(self.matches[mtype])
570 if not self.portdb.xmatch("match-all", cp):
573 if not self.portdb.xmatch("bestmatch-visible", cp):
575 self.matches["pkg"].append([cp, masked])
579 """Outputs the results of the search."""
580 print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]"
581 print "[ Applications found : "+white(str(self.mlen))+" ]"
583 vardb = self.vartree.dbapi
584 for mtype in self.matches:
585 for match,masked in self.matches[mtype]:
589 full_package = self.portdb.xmatch(
590 "bestmatch-visible", match)
592 #no match found; we don't want to query description
594 full_package = portage.best(
595 self.portdb.xmatch("match-all",match))
596 elif mtype == "desc":
598 match = portage.cpv_getkey(match)
600 print green("*")+" "+white(match)
601 print " ", darkgreen("Description:")+" ", self.sdict[match].getMetadata("DESCRIPTION")
605 desc, homepage, license = self.portdb.aux_get(
606 full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
608 print "emerge: search: aux_get() failed, skipping"
611 print green("*")+" "+white(match)+" "+red("[ Masked ]")
613 print green("*")+" "+white(match)
614 myversion = self.getVersion(full_package, search.VERSION_RELEASE)
618 mycat = match.split("/")[0]
619 mypkg = match.split("/")[1]
620 mycpv = match + "-" + myversion
621 myebuild = self.portdb.findname(mycpv)
623 pkgdir = os.path.dirname(myebuild)
624 from portage import manifest
625 mf = manifest.Manifest(
626 pkgdir, self.settings["DISTDIR"])
628 uri_map = self.portdb.getFetchMap(mycpv)
629 except portage.exception.InvalidDependString, e:
630 file_size_str = "Unknown (%s)" % (e,)
634 mysum[0] = mf.getDistfilesSize(uri_map)
636 file_size_str = "Unknown (missing " + \
637 "digest for %s)" % (e,)
642 if db is not vardb and \
643 db.cpv_exists(mycpv):
645 if not myebuild and hasattr(db, "bintree"):
646 myebuild = db.bintree.getname(mycpv)
648 mysum[0] = os.stat(myebuild).st_size
653 if myebuild and file_size_str is None:
654 mystr = str(mysum[0] / 1024)
658 mystr = mystr[:mycount] + "," + mystr[mycount:]
659 file_size_str = mystr + " kB"
663 print " ", darkgreen("Latest version available:"),myversion
664 print " ", self.getInstallationStatus(mycat+'/'+mypkg)
667 (darkgreen("Size of files:"), file_size_str)
668 print " ", darkgreen("Homepage:")+" ",homepage
669 print " ", darkgreen("Description:")+" ",desc
670 print " ", darkgreen("License:")+" ",license
675 def getInstallationStatus(self,package):
676 installed_package = self.vartree.dep_bestmatch(package)
678 version = self.getVersion(installed_package,search.VERSION_RELEASE)
680 result = darkgreen("Latest version installed:")+" "+version
682 result = darkgreen("Latest version installed:")+" [ Not Installed ]"
685 def getVersion(self,full_package,detail):
686 if len(full_package) > 1:
687 package_parts = portage.catpkgsplit(full_package)
688 if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
689 result = package_parts[2]+ "-" + package_parts[3]
691 result = package_parts[2]
696 class RootConfig(object):
697 """This is used internally by depgraph to track information about a
701 "ebuild" : "porttree",
702 "binary" : "bintree",
703 "installed" : "vartree"
707 for k, v in pkg_tree_map.iteritems():
710 def __init__(self, settings, trees, setconfig):
712 self.settings = settings
713 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
714 self.root = self.settings["ROOT"]
715 self.setconfig = setconfig
716 if setconfig is None:
719 self.sets = self.setconfig.getSets()
720 self.visible_pkgs = PackageVirtualDbapi(self.settings)
722 def create_world_atom(pkg, args_set, root_config):
723 """Create a new atom for the world file if one does not exist. If the
724 argument atom is precise enough to identify a specific slot then a slot
725 atom will be returned. Atoms that are in the system set may also be stored
726 in world since system atoms can only match one slot while world atoms can
727 be greedy with respect to slots. Unslotted system packages will not be
730 arg_atom = args_set.findAtomForPackage(pkg)
733 cp = portage.dep_getkey(arg_atom)
735 sets = root_config.sets
736 portdb = root_config.trees["porttree"].dbapi
737 vardb = root_config.trees["vartree"].dbapi
738 available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
739 for cpv in portdb.match(cp))
740 slotted = len(available_slots) > 1 or \
741 (len(available_slots) == 1 and "0" not in available_slots)
743 # check the vdb in case this is multislot
744 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
745 for cpv in vardb.match(cp))
746 slotted = len(available_slots) > 1 or \
747 (len(available_slots) == 1 and "0" not in available_slots)
748 if slotted and arg_atom != cp:
749 # If the user gave a specific atom, store it as a
750 # slot atom in the world file.
751 slot_atom = pkg.slot_atom
753 # For USE=multislot, there are a couple of cases to
756 # 1) SLOT="0", but the real SLOT spontaneously changed to some
757 # unknown value, so just record an unslotted atom.
759 # 2) SLOT comes from an installed package and there is no
760 # matching SLOT in the portage tree.
762 # Make sure that the slot atom is available in either the
763 # portdb or the vardb, since otherwise the user certainly
764 # doesn't want the SLOT atom recorded in the world file
765 # (case 1 above). If it's only available in the vardb,
766 # the user may be trying to prevent a USE=multislot
767 # package from being removed by --depclean (case 2 above).
770 if not portdb.match(slot_atom):
771 # SLOT seems to come from an installed multislot package
773 # If there is no installed package matching the SLOT atom,
774 # it probably changed SLOT spontaneously due to USE=multislot,
775 # so just record an unslotted atom.
776 if vardb.match(slot_atom):
777 # Now verify that the argument is precise
778 # enough to identify a specific slot.
779 matches = mydb.match(arg_atom)
780 matched_slots = set()
782 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
783 if len(matched_slots) == 1:
784 new_world_atom = slot_atom
786 if new_world_atom == sets["world"].findAtomForPackage(pkg):
787 # Both atoms would be identical, so there's nothing to add.
790 # Unlike world atoms, system atoms are not greedy for slots, so they
791 # can't be safely excluded from world if they are slotted.
792 system_atom = sets["system"].findAtomForPackage(pkg)
794 if not portage.dep_getkey(system_atom).startswith("virtual/"):
796 # System virtuals aren't safe to exclude from world since they can
797 # match multiple old-style virtuals but only one of them will be
798 # pulled in by update or depclean.
799 providers = portdb.mysettings.getvirtuals().get(
800 portage.dep_getkey(system_atom))
801 if providers and len(providers) == 1 and providers[0] == cp:
803 return new_world_atom
805 def filter_iuse_defaults(iuse):
807 if flag.startswith("+") or flag.startswith("-"):
812 def _find_deep_system_runtime_deps(graph):
813 deep_system_deps = set()
816 if not isinstance(node, Package) or \
817 node.operation == 'uninstall':
819 if node.root_config.sets['system'].findAtomForPackage(node):
820 node_stack.append(node)
822 def ignore_priority(priority):
824 Ignore non-runtime priorities.
826 if isinstance(priority, DepPriority) and \
827 (priority.runtime or priority.runtime_post):
832 node = node_stack.pop()
833 if node in deep_system_deps:
835 deep_system_deps.add(node)
836 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
837 if not isinstance(child, Package) or \
838 child.operation == 'uninstall':
840 node_stack.append(child)
842 return deep_system_deps
844 class FakeVartree(portage.vartree):
845 """This is implements an in-memory copy of a vartree instance that provides
846 all the interfaces required for use by the depgraph. The vardb is locked
847 during the constructor call just long enough to read a copy of the
848 installed package information. This allows the depgraph to do it's
849 dependency calculations without holding a lock on the vardb. It also
850 allows things like vardb global updates to be done in memory so that the
851 user doesn't necessarily need write access to the vardb in cases where
852 global updates are necessary (updates are performed when necessary if there
853 is not a matching ebuild in the tree)."""
854 def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
855 self._root_config = root_config
856 if pkg_cache is None:
858 real_vartree = root_config.trees["vartree"]
859 portdb = root_config.trees["porttree"].dbapi
860 self.root = real_vartree.root
861 self.settings = real_vartree.settings
862 mykeys = list(real_vartree.dbapi._aux_cache_keys)
863 if "_mtime_" not in mykeys:
864 mykeys.append("_mtime_")
865 self._db_keys = mykeys
866 self._pkg_cache = pkg_cache
867 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
868 vdb_path = os.path.join(self.root, portage.VDB_PATH)
870 # At least the parent needs to exist for the lock file.
871 portage.util.ensure_dirs(vdb_path)
872 except portage.exception.PortageException:
876 if acquire_lock and os.access(vdb_path, os.W_OK):
877 vdb_lock = portage.locks.lockdir(vdb_path)
878 real_dbapi = real_vartree.dbapi
880 for cpv in real_dbapi.cpv_all():
881 cache_key = ("installed", self.root, cpv, "nomerge")
882 pkg = self._pkg_cache.get(cache_key)
884 metadata = pkg.metadata
886 metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
887 myslot = metadata["SLOT"]
888 mycp = portage.dep_getkey(cpv)
889 myslot_atom = "%s:%s" % (mycp, myslot)
891 mycounter = long(metadata["COUNTER"])
894 metadata["COUNTER"] = str(mycounter)
895 other_counter = slot_counters.get(myslot_atom, None)
896 if other_counter is not None:
897 if other_counter > mycounter:
899 slot_counters[myslot_atom] = mycounter
901 pkg = Package(built=True, cpv=cpv,
902 installed=True, metadata=metadata,
903 root_config=root_config, type_name="installed")
904 self._pkg_cache[pkg] = pkg
905 self.dbapi.cpv_inject(pkg)
906 real_dbapi.flush_cache()
909 portage.locks.unlockdir(vdb_lock)
910 # Populate the old-style virtuals using the cached values.
911 if not self.settings.treeVirtuals:
912 self.settings.treeVirtuals = portage.util.map_dictlist_vals(
913 portage.getCPFromCPV, self.get_all_provides())
915 # Intialize variables needed for lazy cache pulls of the live ebuild
916 # metadata. This ensures that the vardb lock is released ASAP, without
917 # being delayed in case cache generation is triggered.
918 self._aux_get = self.dbapi.aux_get
919 self.dbapi.aux_get = self._aux_get_wrapper
920 self._match = self.dbapi.match
921 self.dbapi.match = self._match_wrapper
922 self._aux_get_history = set()
923 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
924 self._portdb = portdb
925 self._global_updates = None
927 def _match_wrapper(self, cpv, use_cache=1):
929 Make sure the metadata in Package instances gets updated for any
930 cpv that is returned from a match() call, since the metadata can
931 be accessed directly from the Package instance instead of via
934 matches = self._match(cpv, use_cache=use_cache)
936 if cpv in self._aux_get_history:
938 self._aux_get_wrapper(cpv, [])
941 def _aux_get_wrapper(self, pkg, wants):
942 if pkg in self._aux_get_history:
943 return self._aux_get(pkg, wants)
944 self._aux_get_history.add(pkg)
946 # Use the live ebuild metadata if possible.
947 live_metadata = dict(izip(self._portdb_keys,
948 self._portdb.aux_get(pkg, self._portdb_keys)))
949 if not portage.eapi_is_supported(live_metadata["EAPI"]):
951 self.dbapi.aux_update(pkg, live_metadata)
952 except (KeyError, portage.exception.PortageException):
953 if self._global_updates is None:
954 self._global_updates = \
955 grab_global_updates(self._portdb.porttree_root)
956 perform_global_updates(
957 pkg, self.dbapi, self._global_updates)
958 return self._aux_get(pkg, wants)
960 def sync(self, acquire_lock=1):
962 Call this method to synchronize state with the real vardb
963 after one or more packages may have been installed or
966 vdb_path = os.path.join(self.root, portage.VDB_PATH)
968 # At least the parent needs to exist for the lock file.
969 portage.util.ensure_dirs(vdb_path)
970 except portage.exception.PortageException:
974 if acquire_lock and os.access(vdb_path, os.W_OK):
975 vdb_lock = portage.locks.lockdir(vdb_path)
979 portage.locks.unlockdir(vdb_lock)
983 real_vardb = self._root_config.trees["vartree"].dbapi
984 current_cpv_set = frozenset(real_vardb.cpv_all())
985 pkg_vardb = self.dbapi
986 aux_get_history = self._aux_get_history
988 # Remove any packages that have been uninstalled.
989 for pkg in list(pkg_vardb):
990 if pkg.cpv not in current_cpv_set:
991 pkg_vardb.cpv_remove(pkg)
992 aux_get_history.discard(pkg.cpv)
994 # Validate counters and timestamps.
997 validation_keys = ["COUNTER", "_mtime_"]
998 for cpv in current_cpv_set:
1000 pkg_hash_key = ("installed", root, cpv, "nomerge")
1001 pkg = pkg_vardb.get(pkg_hash_key)
1003 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1005 counter = long(counter)
1009 if counter != pkg.counter or \
1011 pkg_vardb.cpv_remove(pkg)
1012 aux_get_history.discard(pkg.cpv)
1016 pkg = self._pkg(cpv)
1018 other_counter = slot_counters.get(pkg.slot_atom)
1019 if other_counter is not None:
1020 if other_counter > pkg.counter:
1023 slot_counters[pkg.slot_atom] = pkg.counter
1024 pkg_vardb.cpv_inject(pkg)
1026 real_vardb.flush_cache()
1028 def _pkg(self, cpv):
1029 root_config = self._root_config
1030 real_vardb = root_config.trees["vartree"].dbapi
1031 pkg = Package(cpv=cpv, installed=True,
1032 metadata=izip(self._db_keys,
1033 real_vardb.aux_get(cpv, self._db_keys)),
1034 root_config=root_config,
1035 type_name="installed")
1038 mycounter = long(pkg.metadata["COUNTER"])
1041 pkg.metadata["COUNTER"] = str(mycounter)
1045 def grab_global_updates(portdir):
1046 from portage.update import grab_updates, parse_updates
1047 updpath = os.path.join(portdir, "profiles", "updates")
1049 rawupdates = grab_updates(updpath)
1050 except portage.exception.DirectoryNotFound:
1053 for mykey, mystat, mycontent in rawupdates:
1054 commands, errors = parse_updates(mycontent)
1055 upd_commands.extend(commands)
1058 def perform_global_updates(mycpv, mydb, mycommands):
1059 from portage.update import update_dbentries
1060 aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1061 aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1062 updates = update_dbentries(mycommands, aux_dict)
1064 mydb.aux_update(mycpv, updates)
1066 def visible(pkgsettings, pkg):
1068 Check if a package is visible. This can raise an InvalidDependString
1069 exception if LICENSE is invalid.
1070 TODO: optionally generate a list of masking reasons
1072 @returns: True if the package is visible, False otherwise.
1074 if not pkg.metadata["SLOT"]:
1076 if not pkg.installed:
1077 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1079 eapi = pkg.metadata["EAPI"]
1080 if not portage.eapi_is_supported(eapi):
1082 if not pkg.installed:
1083 if portage._eapi_is_deprecated(eapi):
1085 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1087 if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1089 if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1092 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1094 except portage.exception.InvalidDependString:
1098 def get_masking_status(pkg, pkgsettings, root_config):
1100 mreasons = portage.getmaskingstatus(
1101 pkg, settings=pkgsettings,
1102 portdb=root_config.trees["porttree"].dbapi)
1104 if not pkg.installed:
1105 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1106 mreasons.append("CHOST: %s" % \
1107 pkg.metadata["CHOST"])
1109 if not pkg.metadata["SLOT"]:
1110 mreasons.append("invalid: SLOT is undefined")
1114 def get_mask_info(root_config, cpv, pkgsettings,
1115 db, pkg_type, built, installed, db_keys):
1118 metadata = dict(izip(db_keys,
1119 db.aux_get(cpv, db_keys)))
1122 if metadata and not built:
1123 pkgsettings.setcpv(cpv, mydb=metadata)
1124 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1125 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1126 if metadata is None:
1127 mreasons = ["corruption"]
1129 eapi = metadata['EAPI']
1132 if not portage.eapi_is_supported(eapi):
1133 mreasons = ['EAPI %s' % eapi]
1135 pkg = Package(type_name=pkg_type, root_config=root_config,
1136 cpv=cpv, built=built, installed=installed, metadata=metadata)
1137 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1138 return metadata, mreasons
1140 def show_masked_packages(masked_packages):
1141 shown_licenses = set()
1142 shown_comments = set()
1143 # Maybe there is both an ebuild and a binary. Only
1144 # show one of them to avoid redundant appearance.
1146 have_eapi_mask = False
1147 for (root_config, pkgsettings, cpv,
1148 metadata, mreasons) in masked_packages:
1149 if cpv in shown_cpvs:
1152 comment, filename = None, None
1153 if "package.mask" in mreasons:
1154 comment, filename = \
1155 portage.getmaskingreason(
1156 cpv, metadata=metadata,
1157 settings=pkgsettings,
1158 portdb=root_config.trees["porttree"].dbapi,
1159 return_location=True)
1160 missing_licenses = []
1162 if not portage.eapi_is_supported(metadata["EAPI"]):
1163 have_eapi_mask = True
1165 missing_licenses = \
1166 pkgsettings._getMissingLicenses(
1168 except portage.exception.InvalidDependString:
1169 # This will have already been reported
1170 # above via mreasons.
1173 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1174 if comment and comment not in shown_comments:
1177 shown_comments.add(comment)
1178 portdb = root_config.trees["porttree"].dbapi
1179 for l in missing_licenses:
1180 l_path = portdb.findLicensePath(l)
1181 if l in shown_licenses:
1183 msg = ("A copy of the '%s' license" + \
1184 " is located at '%s'.") % (l, l_path)
1187 shown_licenses.add(l)
1188 return have_eapi_mask
1190 class Package(Task):
1192 __hash__ = Task.__hash__
1193 __slots__ = ("built", "cpv", "depth",
1194 "installed", "metadata", "onlydeps", "operation",
1195 "root_config", "type_name",
1196 "category", "counter", "cp", "cpv_split",
1197 "inherited", "iuse", "mtime",
1198 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1201 "CHOST", "COUNTER", "DEPEND", "EAPI",
1202 "INHERITED", "IUSE", "KEYWORDS",
1203 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1204 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1206 def __init__(self, **kwargs):
1207 Task.__init__(self, **kwargs)
1208 self.root = self.root_config.root
1209 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1210 self.cp = portage.cpv_getkey(self.cpv)
1213 # Avoid an InvalidAtom exception when creating slot_atom.
1214 # This package instance will be masked due to empty SLOT.
1216 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1217 self.category, self.pf = portage.catsplit(self.cpv)
1218 self.cpv_split = portage.catpkgsplit(self.cpv)
1219 self.pv_split = self.cpv_split[1:]
1223 __slots__ = ("__weakref__", "enabled")
1225 def __init__(self, use):
1226 self.enabled = frozenset(use)
1228 class _iuse(object):
1230 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1232 def __init__(self, tokens, iuse_implicit):
1233 self.tokens = tuple(tokens)
1234 self.iuse_implicit = iuse_implicit
1241 enabled.append(x[1:])
1243 disabled.append(x[1:])
1246 self.enabled = frozenset(enabled)
1247 self.disabled = frozenset(disabled)
1248 self.all = frozenset(chain(enabled, disabled, other))
1250 def __getattribute__(self, name):
1253 return object.__getattribute__(self, "regex")
1254 except AttributeError:
1255 all = object.__getattribute__(self, "all")
1256 iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1257 # Escape anything except ".*" which is supposed
1258 # to pass through from _get_implicit_iuse()
1259 regex = (re.escape(x) for x in chain(all, iuse_implicit))
1260 regex = "^(%s)$" % "|".join(regex)
1261 regex = regex.replace("\\.\\*", ".*")
1262 self.regex = re.compile(regex)
1263 return object.__getattribute__(self, name)
1265 def _get_hash_key(self):
1266 hash_key = getattr(self, "_hash_key", None)
1267 if hash_key is None:
1268 if self.operation is None:
1269 self.operation = "merge"
1270 if self.onlydeps or self.installed:
1271 self.operation = "nomerge"
1273 (self.type_name, self.root, self.cpv, self.operation)
1274 return self._hash_key
1276 def __lt__(self, other):
1277 if other.cp != self.cp:
1279 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1283 def __le__(self, other):
1284 if other.cp != self.cp:
1286 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1290 def __gt__(self, other):
1291 if other.cp != self.cp:
1293 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1297 def __ge__(self, other):
1298 if other.cp != self.cp:
1300 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1304 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1305 if not x.startswith("UNUSED_"))
1306 _all_metadata_keys.discard("CDEPEND")
1307 _all_metadata_keys.update(Package.metadata_keys)
1309 from portage.cache.mappings import slot_dict_class
1310 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1312 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1314 Detect metadata updates and synchronize Package attributes.
1317 __slots__ = ("_pkg",)
1318 _wrapped_keys = frozenset(
1319 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1321 def __init__(self, pkg, metadata):
1322 _PackageMetadataWrapperBase.__init__(self)
1324 self.update(metadata)
1326 def __setitem__(self, k, v):
1327 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1328 if k in self._wrapped_keys:
1329 getattr(self, "_set_" + k.lower())(k, v)
1331 def _set_inherited(self, k, v):
1332 if isinstance(v, basestring):
1333 v = frozenset(v.split())
1334 self._pkg.inherited = v
1336 def _set_iuse(self, k, v):
1337 self._pkg.iuse = self._pkg._iuse(
1338 v.split(), self._pkg.root_config.iuse_implicit)
1340 def _set_slot(self, k, v):
1343 def _set_use(self, k, v):
1344 self._pkg.use = self._pkg._use(v.split())
1346 def _set_counter(self, k, v):
1347 if isinstance(v, basestring):
1352 self._pkg.counter = v
1354 def _set__mtime_(self, k, v):
1355 if isinstance(v, basestring):
1362 class PackageUninstall(AsynchronousTask):
1364 __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
1368 unmerge(self.pkg.root_config, self.opts, "unmerge",
1369 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
1370 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
1371 writemsg_level=self._writemsg_level)
1372 except UninstallFailure, e:
1373 self.returncode = e.status
1375 self.returncode = os.EX_OK
1378 def _writemsg_level(self, msg, level=0, noiselevel=0):
1380 log_path = self.settings.get("PORTAGE_LOG_FILE")
1381 background = self.background
1383 if log_path is None:
1384 if not (background and level < logging.WARNING):
1385 portage.util.writemsg_level(msg,
1386 level=level, noiselevel=noiselevel)
1389 portage.util.writemsg_level(msg,
1390 level=level, noiselevel=noiselevel)
1392 f = open(log_path, 'a')
1398 class MergeListItem(CompositeTask):
1401 TODO: For parallel scheduling, everything here needs asynchronous
1402 execution support (start, poll, and wait methods).
1405 __slots__ = ("args_set",
1406 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
1407 "find_blockers", "logger", "mtimedb", "pkg",
1408 "pkg_count", "pkg_to_replace", "prefetcher",
1409 "settings", "statusMessage", "world_atom") + \
1415 build_opts = self.build_opts
1418 # uninstall, executed by self.merge()
1419 self.returncode = os.EX_OK
1423 args_set = self.args_set
1424 find_blockers = self.find_blockers
1425 logger = self.logger
1426 mtimedb = self.mtimedb
1427 pkg_count = self.pkg_count
1428 scheduler = self.scheduler
1429 settings = self.settings
1430 world_atom = self.world_atom
1431 ldpath_mtimes = mtimedb["ldpath"]
1433 action_desc = "Emerging"
1435 if pkg.type_name == "binary":
1436 action_desc += " binary"
1438 if build_opts.fetchonly:
1439 action_desc = "Fetching"
1441 msg = "%s (%s of %s) %s" % \
1443 colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
1444 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
1445 colorize("GOOD", pkg.cpv))
1447 portdb = pkg.root_config.trees["porttree"].dbapi
1448 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
1449 if portdir_repo_name:
1450 pkg_repo_name = pkg.metadata.get("repository")
1451 if pkg_repo_name != portdir_repo_name:
1452 if not pkg_repo_name:
1453 pkg_repo_name = "unknown repo"
1454 msg += " from %s" % pkg_repo_name
1457 msg += " %s %s" % (preposition, pkg.root)
1459 if not build_opts.pretend:
1460 self.statusMessage(msg)
1461 logger.log(" >>> emerge (%s of %s) %s to %s" % \
1462 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
1464 if pkg.type_name == "ebuild":
1466 build = EbuildBuild(args_set=args_set,
1467 background=self.background,
1468 config_pool=self.config_pool,
1469 find_blockers=find_blockers,
1470 ldpath_mtimes=ldpath_mtimes, logger=logger,
1471 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
1472 prefetcher=self.prefetcher, scheduler=scheduler,
1473 settings=settings, world_atom=world_atom)
1475 self._install_task = build
1476 self._start_task(build, self._default_final_exit)
1479 elif pkg.type_name == "binary":
1481 binpkg = Binpkg(background=self.background,
1482 find_blockers=find_blockers,
1483 ldpath_mtimes=ldpath_mtimes, logger=logger,
1484 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
1485 prefetcher=self.prefetcher, settings=settings,
1486 scheduler=scheduler, world_atom=world_atom)
1488 self._install_task = binpkg
1489 self._start_task(binpkg, self._default_final_exit)
1493 self._install_task.poll()
1494 return self.returncode
1497 self._install_task.wait()
1498 return self.returncode
1503 build_opts = self.build_opts
1504 find_blockers = self.find_blockers
1505 logger = self.logger
1506 mtimedb = self.mtimedb
1507 pkg_count = self.pkg_count
1508 prefetcher = self.prefetcher
1509 scheduler = self.scheduler
1510 settings = self.settings
1511 world_atom = self.world_atom
1512 ldpath_mtimes = mtimedb["ldpath"]
1515 if not (build_opts.buildpkgonly or \
1516 build_opts.fetchonly or build_opts.pretend):
1518 uninstall = PackageUninstall(background=self.background,
1519 ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
1520 pkg=pkg, scheduler=scheduler, settings=settings)
1523 retval = uninstall.wait()
1524 if retval != os.EX_OK:
1528 if build_opts.fetchonly or \
1529 build_opts.buildpkgonly:
1530 return self.returncode
1532 retval = self._install_task.install()
1535 class BlockerDB(object):
1537 def __init__(self, root_config):
1538 self._root_config = root_config
1539 self._vartree = root_config.trees["vartree"]
1540 self._portdb = root_config.trees["porttree"].dbapi
1542 self._dep_check_trees = None
1543 self._fake_vartree = None
1545 def _get_fake_vartree(self, acquire_lock=0):
1546 fake_vartree = self._fake_vartree
1547 if fake_vartree is None:
1548 fake_vartree = FakeVartree(self._root_config,
1549 acquire_lock=acquire_lock)
1550 self._fake_vartree = fake_vartree
1551 self._dep_check_trees = { self._vartree.root : {
1552 "porttree" : fake_vartree,
1553 "vartree" : fake_vartree,
1556 fake_vartree.sync(acquire_lock=acquire_lock)
1559 def findInstalledBlockers(self, new_pkg, acquire_lock=0):
1560 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
1561 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1562 settings = self._vartree.settings
1563 stale_cache = set(blocker_cache)
1564 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
1565 dep_check_trees = self._dep_check_trees
1566 vardb = fake_vartree.dbapi
1567 installed_pkgs = list(vardb)
1569 for inst_pkg in installed_pkgs:
1570 stale_cache.discard(inst_pkg.cpv)
1571 cached_blockers = blocker_cache.get(inst_pkg.cpv)
1572 if cached_blockers is not None and \
1573 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
1574 cached_blockers = None
1575 if cached_blockers is not None:
1576 blocker_atoms = cached_blockers.atoms
1578 # Use aux_get() to trigger FakeVartree global
1579 # updates on *DEPEND when appropriate.
1580 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
1582 portage.dep._dep_check_strict = False
1583 success, atoms = portage.dep_check(depstr,
1584 vardb, settings, myuse=inst_pkg.use.enabled,
1585 trees=dep_check_trees, myroot=inst_pkg.root)
1587 portage.dep._dep_check_strict = True
1589 pkg_location = os.path.join(inst_pkg.root,
1590 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
1591 portage.writemsg("!!! %s/*DEPEND: %s\n" % \
1592 (pkg_location, atoms), noiselevel=-1)
1595 blocker_atoms = [atom for atom in atoms \
1596 if atom.startswith("!")]
1597 blocker_atoms.sort()
1598 counter = long(inst_pkg.metadata["COUNTER"])
1599 blocker_cache[inst_pkg.cpv] = \
1600 blocker_cache.BlockerData(counter, blocker_atoms)
1601 for cpv in stale_cache:
1602 del blocker_cache[cpv]
1603 blocker_cache.flush()
1605 blocker_parents = digraph()
1607 for pkg in installed_pkgs:
1608 for blocker_atom in blocker_cache[pkg.cpv].atoms:
1609 blocker_atom = blocker_atom.lstrip("!")
1610 blocker_atoms.append(blocker_atom)
1611 blocker_parents.add(blocker_atom, pkg)
1613 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1614 blocking_pkgs = set()
1615 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
1616 blocking_pkgs.update(blocker_parents.parent_nodes(atom))
1618 # Check for blockers in the other direction.
1619 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
1621 portage.dep._dep_check_strict = False
1622 success, atoms = portage.dep_check(depstr,
1623 vardb, settings, myuse=new_pkg.use.enabled,
1624 trees=dep_check_trees, myroot=new_pkg.root)
1626 portage.dep._dep_check_strict = True
1628 # We should never get this far with invalid deps.
1629 show_invalid_depstring_notice(new_pkg, depstr, atoms)
1632 blocker_atoms = [atom.lstrip("!") for atom in atoms \
1635 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1636 for inst_pkg in installed_pkgs:
1638 blocker_atoms.iterAtomsForPackage(inst_pkg).next()
1639 except (portage.exception.InvalidDependString, StopIteration):
1641 blocking_pkgs.add(inst_pkg)
1643 return blocking_pkgs
1645 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
1647 msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
1648 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
1649 p_type, p_root, p_key, p_status = parent_node
1651 if p_status == "nomerge":
1652 category, pf = portage.catsplit(p_key)
1653 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
1654 msg.append("Portage is unable to process the dependencies of the ")
1655 msg.append("'%s' package. " % p_key)
1656 msg.append("In order to correct this problem, the package ")
1657 msg.append("should be uninstalled, reinstalled, or upgraded. ")
1658 msg.append("As a temporary workaround, the --nodeps option can ")
1659 msg.append("be used to ignore all dependencies. For reference, ")
1660 msg.append("the problematic dependencies can be found in the ")
1661 msg.append("*DEPEND files located in '%s/'." % pkg_location)
1663 msg.append("This package can not be installed. ")
1664 msg.append("Please notify the '%s' package maintainer " % p_key)
1665 msg.append("about this problem.")
1667 msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
1668 writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
1670 class depgraph(object):
1672 pkg_tree_map = RootConfig.pkg_tree_map
1674 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1676 def __init__(self, settings, trees, myopts, myparams, spinner):
1677 self.settings = settings
1678 self.target_root = settings["ROOT"]
1679 self.myopts = myopts
1680 self.myparams = myparams
1682 if settings.get("PORTAGE_DEBUG", "") == "1":
1684 self.spinner = spinner
1685 self._running_root = trees["/"]["root_config"]
1686 self._opts_no_restart = Scheduler._opts_no_restart
1687 self.pkgsettings = {}
1688 # Maps slot atom to package for each Package added to the graph.
1689 self._slot_pkg_map = {}
1690 # Maps nodes to the reasons they were selected for reinstallation.
1691 self._reinstall_nodes = {}
1694 self._trees_orig = trees
1696 # Contains a filtered view of preferred packages that are selected
1697 # from available repositories.
1698 self._filtered_trees = {}
1699 # Contains installed packages and new packages that have been added
1701 self._graph_trees = {}
1702 # All Package instances
1703 self._pkg_cache = {}
1704 for myroot in trees:
1705 self.trees[myroot] = {}
1706 # Create a RootConfig instance that references
1707 # the FakeVartree instead of the real one.
1708 self.roots[myroot] = RootConfig(
1709 trees[myroot]["vartree"].settings,
1711 trees[myroot]["root_config"].setconfig)
1712 for tree in ("porttree", "bintree"):
1713 self.trees[myroot][tree] = trees[myroot][tree]
1714 self.trees[myroot]["vartree"] = \
1715 FakeVartree(trees[myroot]["root_config"],
1716 pkg_cache=self._pkg_cache)
1717 self.pkgsettings[myroot] = portage.config(
1718 clone=self.trees[myroot]["vartree"].settings)
1719 self._slot_pkg_map[myroot] = {}
1720 vardb = self.trees[myroot]["vartree"].dbapi
1721 preload_installed_pkgs = "--nodeps" not in self.myopts and \
1722 "--buildpkgonly" not in self.myopts
1723 # This fakedbapi instance will model the state that the vdb will
1724 # have after new packages have been installed.
1725 fakedb = PackageVirtualDbapi(vardb.settings)
1726 if preload_installed_pkgs:
1728 self.spinner.update()
1729 # This triggers metadata updates via FakeVartree.
1730 vardb.aux_get(pkg.cpv, [])
1731 fakedb.cpv_inject(pkg)
1733 # Now that the vardb state is cached in our FakeVartree,
1734 # we won't be needing the real vartree cache for awhile.
1735 # To make some room on the heap, clear the vardbapi
1737 trees[myroot]["vartree"].dbapi._clear_cache()
1740 self.mydbapi[myroot] = fakedb
1743 graph_tree.dbapi = fakedb
1744 self._graph_trees[myroot] = {}
1745 self._filtered_trees[myroot] = {}
1746 # Substitute the graph tree for the vartree in dep_check() since we
1747 # want atom selections to be consistent with package selections
1748 # have already been made.
1749 self._graph_trees[myroot]["porttree"] = graph_tree
1750 self._graph_trees[myroot]["vartree"] = graph_tree
1751 def filtered_tree():
1753 filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
1754 self._filtered_trees[myroot]["porttree"] = filtered_tree
1756 # Passing in graph_tree as the vartree here could lead to better
1757 # atom selections in some cases by causing atoms for packages that
1758 # have been added to the graph to be preferred over other choices.
1759 # However, it can trigger atom selections that result in
1760 # unresolvable direct circular dependencies. For example, this
1761 # happens with gwydion-dylan which depends on either itself or
1762 # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
1763 # gwydion-dylan-bin needs to be selected in order to avoid a
1764 # an unresolvable direct circular dependency.
1766 # To solve the problem described above, pass in "graph_db" so that
1767 # packages that have been added to the graph are distinguishable
1768 # from other available packages and installed packages. Also, pass
1769 # the parent package into self._select_atoms() calls so that
1770 # unresolvable direct circular dependencies can be detected and
1771 # avoided when possible.
1772 self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
1773 self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
1776 portdb = self.trees[myroot]["porttree"].dbapi
1777 bindb = self.trees[myroot]["bintree"].dbapi
1778 vardb = self.trees[myroot]["vartree"].dbapi
1779 # (db, pkg_type, built, installed, db_keys)
1780 if "--usepkgonly" not in self.myopts:
1781 db_keys = list(portdb._aux_cache_keys)
1782 dbs.append((portdb, "ebuild", False, False, db_keys))
1783 if "--usepkg" in self.myopts:
1784 db_keys = list(bindb._aux_cache_keys)
1785 dbs.append((bindb, "binary", True, False, db_keys))
1786 db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
1787 dbs.append((vardb, "installed", True, True, db_keys))
1788 self._filtered_trees[myroot]["dbs"] = dbs
1789 if "--usepkg" in self.myopts:
1790 self.trees[myroot]["bintree"].populate(
1791 "--getbinpkg" in self.myopts,
1792 "--getbinpkgonly" in self.myopts)
1795 self.digraph=portage.digraph()
1796 # contains all sets added to the graph
1798 # contains atoms given as arguments
1799 self._sets["args"] = InternalPackageSet()
1800 # contains all atoms from all sets added to the graph, including
1801 # atoms given as arguments
1802 self._set_atoms = InternalPackageSet()
1803 self._atom_arg_map = {}
1804 # contains all nodes pulled in by self._set_atoms
1805 self._set_nodes = set()
1806 # Contains only Blocker -> Uninstall edges
1807 self._blocker_uninstalls = digraph()
1808 # Contains only Package -> Blocker edges
1809 self._blocker_parents = digraph()
1810 # Contains only irrelevant Package -> Blocker edges
1811 self._irrelevant_blockers = digraph()
1812 # Contains only unsolvable Package -> Blocker edges
1813 self._unsolvable_blockers = digraph()
1814 # Contains all Blocker -> Blocked Package edges
1815 self._blocked_pkgs = digraph()
1816 # Contains world packages that have been protected from
1817 # uninstallation but may not have been added to the graph
1818 # if the graph is not complete yet.
1819 self._blocked_world_pkgs = {}
1820 self._slot_collision_info = {}
1821 # Slot collision nodes are not allowed to block other packages since
1822 # blocker validation is only able to account for one package per slot.
1823 self._slot_collision_nodes = set()
1824 self._parent_atoms = {}
1825 self._slot_conflict_parent_atoms = set()
1826 self._serialized_tasks_cache = None
1827 self._scheduler_graph = None
1828 self._displayed_list = None
1829 self._pprovided_args = []
1830 self._missing_args = []
1831 self._masked_installed = set()
1832 self._unsatisfied_deps_for_display = []
1833 self._unsatisfied_blockers_for_display = None
1834 self._circular_deps_for_display = None
1835 self._dep_stack = []
1836 self._dep_disjunctive_stack = []
1837 self._unsatisfied_deps = []
1838 self._initially_unsatisfied_deps = []
1839 self._ignored_deps = []
1840 self._required_set_names = set(["system", "world"])
1841 self._select_atoms = self._select_atoms_highest_available
1842 self._select_package = self._select_pkg_highest_available
1843 self._highest_pkg_cache = {}
1845 def _show_slot_collision_notice(self):
1846 """Show an informational message advising the user to mask one of the
1847 the packages. In some cases it may be possible to resolve this
1848 automatically, but support for backtracking (removal nodes that have
1849 already been selected) will be required in order to handle all possible
1853 if not self._slot_collision_info:
1856 self._show_merge_list()
1859 msg.append("\n!!! Multiple package instances within a single " + \
1860 "package slot have been pulled\n")
1861 msg.append("!!! into the dependency graph, resulting" + \
1862 " in a slot conflict:\n\n")
1864 # Max number of parents shown, to avoid flooding the display.
1866 explanation_columns = 70
1868 for (slot_atom, root), slot_nodes \
1869 in self._slot_collision_info.iteritems():
1870 msg.append(str(slot_atom))
1873 for node in slot_nodes:
1875 msg.append(str(node))
1876 parent_atoms = self._parent_atoms.get(node)
1879 # Prefer conflict atoms over others.
1880 for parent_atom in parent_atoms:
1881 if len(pruned_list) >= max_parents:
1883 if parent_atom in self._slot_conflict_parent_atoms:
1884 pruned_list.add(parent_atom)
1886 # If this package was pulled in by conflict atoms then
1887 # show those alone since those are the most interesting.
1889 # When generating the pruned list, prefer instances
1890 # of DependencyArg over instances of Package.
1891 for parent_atom in parent_atoms:
1892 if len(pruned_list) >= max_parents:
1894 parent, atom = parent_atom
1895 if isinstance(parent, DependencyArg):
1896 pruned_list.add(parent_atom)
1897 # Prefer Packages instances that themselves have been
1898 # pulled into collision slots.
1899 for parent_atom in parent_atoms:
1900 if len(pruned_list) >= max_parents:
1902 parent, atom = parent_atom
1903 if isinstance(parent, Package) and \
1904 (parent.slot_atom, parent.root) \
1905 in self._slot_collision_info:
1906 pruned_list.add(parent_atom)
1907 for parent_atom in parent_atoms:
1908 if len(pruned_list) >= max_parents:
1910 pruned_list.add(parent_atom)
1911 omitted_parents = len(parent_atoms) - len(pruned_list)
1912 parent_atoms = pruned_list
1913 msg.append(" pulled in by\n")
1914 for parent_atom in parent_atoms:
1915 parent, atom = parent_atom
1916 msg.append(2*indent)
1917 if isinstance(parent,
1918 (PackageArg, AtomArg)):
1919 # For PackageArg and AtomArg types, it's
1920 # redundant to display the atom attribute.
1921 msg.append(str(parent))
1923 # Display the specific atom from SetArg or
1925 msg.append("%s required by %s" % (atom, parent))
1928 msg.append(2*indent)
1929 msg.append("(and %d more)\n" % omitted_parents)
1931 msg.append(" (no parents)\n")
1933 explanation = self._slot_conflict_explanation(slot_nodes)
1936 msg.append(indent + "Explanation:\n\n")
1937 for line in textwrap.wrap(explanation, explanation_columns):
1938 msg.append(2*indent + line + "\n")
1941 sys.stderr.write("".join(msg))
1944 explanations_for_all = explanations == len(self._slot_collision_info)
1946 if explanations_for_all or "--quiet" in self.myopts:
1950 msg.append("It may be possible to solve this problem ")
1951 msg.append("by using package.mask to prevent one of ")
1952 msg.append("those packages from being selected. ")
1953 msg.append("However, it is also possible that conflicting ")
1954 msg.append("dependencies exist such that they are impossible to ")
1955 msg.append("satisfy simultaneously. If such a conflict exists in ")
1956 msg.append("the dependencies of two different packages, then those ")
1957 msg.append("packages can not be installed simultaneously.")
1959 from formatter import AbstractFormatter, DumbWriter
1960 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
1962 f.add_flowing_data(x)
1966 msg.append("For more information, see MASKED PACKAGES ")
1967 msg.append("section in the emerge man page or refer ")
1968 msg.append("to the Gentoo Handbook.")
1970 f.add_flowing_data(x)
1974 def _slot_conflict_explanation(self, slot_nodes):
1976 When a slot conflict occurs due to USE deps, there are a few
1977 different cases to consider:
1979 1) New USE are correctly set but --newuse wasn't requested so an
1980 installed package with incorrect USE happened to get pulled
1981 into graph before the new one.
1983 2) New USE are incorrectly set but an installed package has correct
1984 USE so it got pulled into the graph, and a new instance also got
1985 pulled in due to --newuse or an upgrade.
1987 3) Multiple USE deps exist that can't be satisfied simultaneously,
1988 and multiple package instances got pulled into the same slot to
1989 satisfy the conflicting deps.
1991 Currently, explanations and suggested courses of action are generated
1992 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
1995 if len(slot_nodes) != 2:
1996 # Suggestions are only implemented for
1997 # conflicts between two packages.
2000 all_conflict_atoms = self._slot_conflict_parent_atoms
2002 matched_atoms = None
2003 unmatched_node = None
2004 for node in slot_nodes:
2005 parent_atoms = self._parent_atoms.get(node)
2006 if not parent_atoms:
2007 # Normally, there are always parent atoms. If there are
2008 # none then something unexpected is happening and there's
2009 # currently no suggestion for this case.
2011 conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
2012 for parent_atom in conflict_atoms:
2013 parent, atom = parent_atom
2015 # Suggestions are currently only implemented for cases
2016 # in which all conflict atoms have USE deps.
2019 if matched_node is not None:
2020 # If conflict atoms match multiple nodes
2021 # then there's no suggestion.
2024 matched_atoms = conflict_atoms
2026 if unmatched_node is not None:
2027 # Neither node is matched by conflict atoms, and
2028 # there is no suggestion for this case.
2030 unmatched_node = node
2032 if matched_node is None or unmatched_node is None:
2033 # This shouldn't happen.
2036 if unmatched_node.installed and not matched_node.installed and \
2037 unmatched_node.cpv == matched_node.cpv:
2038 # If the conflicting packages are the same version then
2039 # --newuse should be all that's needed. If they are different
2040 # versions then there's some other problem.
2041 return "New USE are correctly set, but --newuse wasn't" + \
2042 " requested, so an installed package with incorrect USE " + \
2043 "happened to get pulled into the dependency graph. " + \
2044 "In order to solve " + \
2045 "this, either specify the --newuse option or explicitly " + \
2046 " reinstall '%s'." % matched_node.slot_atom
2048 if matched_node.installed and not unmatched_node.installed:
2049 atoms = sorted(set(atom for parent, atom in matched_atoms))
2050 explanation = ("New USE for '%s' are incorrectly set. " + \
2051 "In order to solve this, adjust USE to satisfy '%s'") % \
2052 (matched_node.slot_atom, atoms[0])
2054 for atom in atoms[1:-1]:
2055 explanation += ", '%s'" % (atom,)
2058 explanation += " and '%s'" % (atoms[-1],)
2064 def _process_slot_conflicts(self):
2066 Process slot conflict data to identify specific atoms which
2067 lead to conflict. These atoms only match a subset of the
2068 packages that have been pulled into a given slot.
2070 for (slot_atom, root), slot_nodes \
2071 in self._slot_collision_info.iteritems():
2073 all_parent_atoms = set()
2074 for pkg in slot_nodes:
2075 parent_atoms = self._parent_atoms.get(pkg)
2076 if not parent_atoms:
2078 all_parent_atoms.update(parent_atoms)
2080 for pkg in slot_nodes:
2081 parent_atoms = self._parent_atoms.get(pkg)
2082 if parent_atoms is None:
2083 parent_atoms = set()
2084 self._parent_atoms[pkg] = parent_atoms
2085 for parent_atom in all_parent_atoms:
2086 if parent_atom in parent_atoms:
2088 # Use package set for matching since it will match via
2089 # PROVIDE when necessary, while match_from_list does not.
2090 parent, atom = parent_atom
2091 atom_set = InternalPackageSet(
2092 initial_atoms=(atom,))
2093 if atom_set.findAtomForPackage(pkg):
2094 parent_atoms.add(parent_atom)
2096 self._slot_conflict_parent_atoms.add(parent_atom)
2098 def _reinstall_for_flags(self, forced_flags,
2099 orig_use, orig_iuse, cur_use, cur_iuse):
2100 """Return a set of flags that trigger reinstallation, or None if there
2101 are no such flags."""
2102 if "--newuse" in self.myopts:
2103 flags = set(orig_iuse.symmetric_difference(
2104 cur_iuse).difference(forced_flags))
2105 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
2106 cur_iuse.intersection(cur_use)))
2109 elif "changed-use" == self.myopts.get("--reinstall"):
2110 flags = orig_iuse.intersection(orig_use).symmetric_difference(
2111 cur_iuse.intersection(cur_use))
2116 def _create_graph(self, allow_unsatisfied=False):
2117 dep_stack = self._dep_stack
2118 dep_disjunctive_stack = self._dep_disjunctive_stack
2119 while dep_stack or dep_disjunctive_stack:
2120 self.spinner.update()
2122 dep = dep_stack.pop()
2123 if isinstance(dep, Package):
2124 if not self._add_pkg_deps(dep,
2125 allow_unsatisfied=allow_unsatisfied):
2128 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
2130 if dep_disjunctive_stack:
2131 if not self._pop_disjunction(allow_unsatisfied):
2135 def _add_dep(self, dep, allow_unsatisfied=False):
2136 debug = "--debug" in self.myopts
2137 buildpkgonly = "--buildpkgonly" in self.myopts
2138 nodeps = "--nodeps" in self.myopts
2139 empty = "empty" in self.myparams
2140 deep = "deep" in self.myparams
2141 update = "--update" in self.myopts and dep.depth <= 1
2143 if not buildpkgonly and \
2145 dep.parent not in self._slot_collision_nodes:
2146 if dep.parent.onlydeps:
2147 # It's safe to ignore blockers if the
2148 # parent is an --onlydeps node.
2150 # The blocker applies to the root where
2151 # the parent is or will be installed.
2152 blocker = Blocker(atom=dep.atom,
2153 eapi=dep.parent.metadata["EAPI"],
2154 root=dep.parent.root)
2155 self._blocker_parents.add(blocker, dep.parent)
2157 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
2158 onlydeps=dep.onlydeps)
2160 if dep.priority.optional:
2161 # This could be an unecessary build-time dep
2162 # pulled in by --with-bdeps=y.
2164 if allow_unsatisfied:
2165 self._unsatisfied_deps.append(dep)
2167 self._unsatisfied_deps_for_display.append(
2168 ((dep.root, dep.atom), {"myparent":dep.parent}))
2170 # In some cases, dep_check will return deps that shouldn't
2171 # be proccessed any further, so they are identified and
2172 # discarded here. Try to discard as few as possible since
2173 # discarded dependencies reduce the amount of information
2174 # available for optimization of merge order.
2175 if dep.priority.satisfied and \
2176 not dep_pkg.installed and \
2177 not (existing_node or empty or deep or update):
2179 if dep.root == self.target_root:
2181 myarg = self._iter_atoms_for_pkg(dep_pkg).next()
2182 except StopIteration:
2184 except portage.exception.InvalidDependString:
2185 if not dep_pkg.installed:
2186 # This shouldn't happen since the package
2187 # should have been masked.
2190 self._ignored_deps.append(dep)
2193 if not self._add_pkg(dep_pkg, dep):
2197 def _add_pkg(self, pkg, dep):
2204 myparent = dep.parent
2205 priority = dep.priority
2207 if priority is None:
2208 priority = DepPriority()
2210 Fills the digraph with nodes comprised of packages to merge.
2211 mybigkey is the package spec of the package to merge.
2212 myparent is the package depending on mybigkey ( or None )
2213 addme = Should we add this package to the digraph or are we just looking at it's deps?
2214 Think --onlydeps, we need to ignore packages in that case.
2217 #IUSE-aware emerge -> USE DEP aware depgraph
2218 #"no downgrade" emerge
2220 # Ensure that the dependencies of the same package
2221 # are never processed more than once.
2222 previously_added = pkg in self.digraph
2224 # select the correct /var database that we'll be checking against
2225 vardbapi = self.trees[pkg.root]["vartree"].dbapi
2226 pkgsettings = self.pkgsettings[pkg.root]
2231 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
2232 except portage.exception.InvalidDependString, e:
2233 if not pkg.installed:
2234 show_invalid_depstring_notice(
2235 pkg, pkg.metadata["PROVIDE"], str(e))
2239 if not pkg.onlydeps:
2240 if not pkg.installed and \
2241 "empty" not in self.myparams and \
2242 vardbapi.match(pkg.slot_atom):
2243 # Increase the priority of dependencies on packages that
2244 # are being rebuilt. This optimizes merge order so that
2245 # dependencies are rebuilt/updated as soon as possible,
2246 # which is needed especially when emerge is called by
2247 # revdep-rebuild since dependencies may be affected by ABI
2248 # breakage that has rendered them useless. Don't adjust
2249 # priority here when in "empty" mode since all packages
2250 # are being merged in that case.
2251 priority.rebuild = True
2253 existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
2254 slot_collision = False
2256 existing_node_matches = pkg.cpv == existing_node.cpv
2257 if existing_node_matches and \
2258 pkg != existing_node and \
2259 dep.atom is not None:
2260 # Use package set for matching since it will match via
2261 # PROVIDE when necessary, while match_from_list does not.
2262 atom_set = InternalPackageSet(initial_atoms=[dep.atom])
2263 if not atom_set.findAtomForPackage(existing_node):
2264 existing_node_matches = False
2265 if existing_node_matches:
2266 # The existing node can be reused.
2268 for parent_atom in arg_atoms:
2269 parent, atom = parent_atom
2270 self.digraph.add(existing_node, parent,
2272 self._add_parent_atom(existing_node, parent_atom)
2273 # If a direct circular dependency is not an unsatisfied
2274 # buildtime dependency then drop it here since otherwise
2275 # it can skew the merge order calculation in an unwanted
2277 if existing_node != myparent or \
2278 (priority.buildtime and not priority.satisfied):
2279 self.digraph.addnode(existing_node, myparent,
2281 if dep.atom is not None and dep.parent is not None:
2282 self._add_parent_atom(existing_node,
2283 (dep.parent, dep.atom))
2287 # A slot collision has occurred. Sometimes this coincides
2288 # with unresolvable blockers, so the slot collision will be
2289 # shown later if there are no unresolvable blockers.
2290 self._add_slot_conflict(pkg)
2291 slot_collision = True
2294 # Now add this node to the graph so that self.display()
2295 # can show use flags and --tree portage.output. This node is
2296 # only being partially added to the graph. It must not be
2297 # allowed to interfere with the other nodes that have been
2298 # added. Do not overwrite data for existing nodes in
2299 # self.mydbapi since that data will be used for blocker
2301 # Even though the graph is now invalid, continue to process
2302 # dependencies so that things like --fetchonly can still
2303 # function despite collisions.
2305 elif not previously_added:
2306 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
2307 self.mydbapi[pkg.root].cpv_inject(pkg)
2308 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
2310 if not pkg.installed:
2311 # Allow this package to satisfy old-style virtuals in case it
2312 # doesn't already. Any pre-existing providers will be preferred
2315 pkgsettings.setinst(pkg.cpv, pkg.metadata)
2316 # For consistency, also update the global virtuals.
2317 settings = self.roots[pkg.root].settings
2319 settings.setinst(pkg.cpv, pkg.metadata)
2321 except portage.exception.InvalidDependString, e:
2322 show_invalid_depstring_notice(
2323 pkg, pkg.metadata["PROVIDE"], str(e))
2328 self._set_nodes.add(pkg)
2330 # Do this even when addme is False (--onlydeps) so that the
2331 # parent/child relationship is always known in case
2332 # self._show_slot_collision_notice() needs to be called later.
2333 self.digraph.add(pkg, myparent, priority=priority)
2334 if dep.atom is not None and dep.parent is not None:
2335 self._add_parent_atom(pkg, (dep.parent, dep.atom))
2338 for parent_atom in arg_atoms:
2339 parent, atom = parent_atom
2340 self.digraph.add(pkg, parent, priority=priority)
2341 self._add_parent_atom(pkg, parent_atom)
2343 """ This section determines whether we go deeper into dependencies or not.
2344 We want to go deeper on a few occasions:
2345 Installing package A, we need to make sure package A's deps are met.
2346 emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
2347 If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
2349 dep_stack = self._dep_stack
2350 if "recurse" not in self.myparams:
2352 elif pkg.installed and \
2353 "deep" not in self.myparams:
2354 dep_stack = self._ignored_deps
2356 self.spinner.update()
2361 if not previously_added:
2362 dep_stack.append(pkg)
2365 def _add_parent_atom(self, pkg, parent_atom):
2366 parent_atoms = self._parent_atoms.get(pkg)
2367 if parent_atoms is None:
2368 parent_atoms = set()
2369 self._parent_atoms[pkg] = parent_atoms
2370 parent_atoms.add(parent_atom)
2372 def _add_slot_conflict(self, pkg):
2373 self._slot_collision_nodes.add(pkg)
2374 slot_key = (pkg.slot_atom, pkg.root)
2375 slot_nodes = self._slot_collision_info.get(slot_key)
2376 if slot_nodes is None:
2378 slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
2379 self._slot_collision_info[slot_key] = slot_nodes
2382 def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
2384 mytype = pkg.type_name
2387 metadata = pkg.metadata
2388 myuse = pkg.use.enabled
2390 depth = pkg.depth + 1
2391 removal_action = "remove" in self.myparams
2394 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
2396 edepend[k] = metadata[k]
2398 if not pkg.built and \
2399 "--buildpkgonly" in self.myopts and \
2400 "deep" not in self.myparams and \
2401 "empty" not in self.myparams:
2402 edepend["RDEPEND"] = ""
2403 edepend["PDEPEND"] = ""
2404 bdeps_optional = False
2406 if pkg.built and not removal_action:
2407 if self.myopts.get("--with-bdeps", "n") == "y":
2408 # Pull in build time deps as requested, but marked them as
2409 # "optional" since they are not strictly required. This allows
2410 # more freedom in the merge order calculation for solving
2411 # circular dependencies. Don't convert to PDEPEND since that
2412 # could make --with-bdeps=y less effective if it is used to
2413 # adjust merge order to prevent built_with_use() calls from
2415 bdeps_optional = True
2417 # built packages do not have build time dependencies.
2418 edepend["DEPEND"] = ""
2420 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
2421 edepend["DEPEND"] = ""
2424 root_deps = self.myopts.get("--root-deps")
2425 if root_deps is not None:
2426 if root_deps is True:
2428 elif root_deps == "rdeps":
2429 edepend["DEPEND"] = ""
2432 (bdeps_root, edepend["DEPEND"],
2433 self._priority(buildtime=(not bdeps_optional),
2434 optional=bdeps_optional)),
2435 (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
2436 (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
2439 debug = "--debug" in self.myopts
2440 strict = mytype != "installed"
2443 portage.dep._dep_check_strict = False
2445 for dep_root, dep_string, dep_priority in deps:
2450 print "Parent: ", jbigkey
2451 print "Depstring:", dep_string
2452 print "Priority:", dep_priority
2456 dep_string = portage.dep.paren_normalize(
2457 portage.dep.use_reduce(
2458 portage.dep.paren_reduce(dep_string),
2459 uselist=pkg.use.enabled))
2461 dep_string = list(self._queue_disjunctive_deps(
2462 pkg, dep_root, dep_priority, dep_string))
2464 except portage.exception.InvalidDependString, e:
2468 show_invalid_depstring_notice(pkg, dep_string, str(e))
2474 dep_string = portage.dep.paren_enclose(dep_string)
2476 if not self._add_pkg_dep_string(
2477 pkg, dep_root, dep_priority, dep_string,
2481 except portage.exception.AmbiguousPackageName, e:
2483 portage.writemsg("\n\n!!! An atom in the dependencies " + \
2484 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
2486 portage.writemsg(" %s\n" % cpv, noiselevel=-1)
2487 portage.writemsg("\n", noiselevel=-1)
2488 if mytype == "binary":
2490 "!!! This binary package cannot be installed: '%s'\n" % \
2491 mykey, noiselevel=-1)
2492 elif mytype == "ebuild":
2493 portdb = self.roots[myroot].trees["porttree"].dbapi
2494 myebuild, mylocation = portdb.findname2(mykey)
2495 portage.writemsg("!!! This ebuild cannot be installed: " + \
2496 "'%s'\n" % myebuild, noiselevel=-1)
2497 portage.writemsg("!!! Please notify the package maintainer " + \
2498 "that atoms must be fully-qualified.\n", noiselevel=-1)
2501 portage.dep._dep_check_strict = True
2504 def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
2506 depth = pkg.depth + 1
2507 debug = "--debug" in self.myopts
2508 strict = pkg.type_name != "installed"
2512 print "Parent: ", pkg
2513 print "Depstring:", dep_string
2514 print "Priority:", dep_priority
2517 selected_atoms = self._select_atoms(dep_root,
2518 dep_string, myuse=pkg.use.enabled, parent=pkg,
2519 strict=strict, priority=dep_priority)
2520 except portage.exception.InvalidDependString, e:
2521 show_invalid_depstring_notice(pkg, dep_string, str(e))
2528 print "Candidates:", selected_atoms
2530 vardb = self.roots[dep_root].trees["vartree"].dbapi
2532 for atom in selected_atoms:
2535 atom = portage.dep.Atom(atom)
2537 mypriority = dep_priority.copy()
2538 if not atom.blocker and vardb.match(atom):
2539 mypriority.satisfied = True
2541 if not self._add_dep(Dependency(atom=atom,
2542 blocker=atom.blocker, depth=depth, parent=pkg,
2543 priority=mypriority, root=dep_root),
2544 allow_unsatisfied=allow_unsatisfied):
2547 except portage.exception.InvalidAtom, e:
2548 show_invalid_depstring_notice(
2549 pkg, dep_string, str(e))
2551 if not pkg.installed:
2555 print "Exiting...", pkg
2559 def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2561 Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
2562 Yields non-disjunctive deps. Raises InvalidDependString when
2566 while i < len(dep_struct):
2568 if isinstance(x, list):
2569 for y in self._queue_disjunctive_deps(
2570 pkg, dep_root, dep_priority, x):
2573 self._queue_disjunction(pkg, dep_root, dep_priority,
2574 [ x, dep_struct[ i + 1 ] ] )
2578 x = portage.dep.Atom(x)
2579 except portage.exception.InvalidAtom:
2580 if not pkg.installed:
2581 raise portage.exception.InvalidDependString(
2582 "invalid atom: '%s'" % x)
2584 # Note: Eventually this will check for PROPERTIES=virtual
2585 # or whatever other metadata gets implemented for this
2587 if x.cp.startswith('virtual/'):
2588 self._queue_disjunction( pkg, dep_root,
2589 dep_priority, [ str(x) ] )
2594 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2595 self._dep_disjunctive_stack.append(
2596 (pkg, dep_root, dep_priority, dep_struct))
2598 def _pop_disjunction(self, allow_unsatisfied):
2600 Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
2601 populate self._dep_stack.
2603 pkg, dep_root, dep_priority, dep_struct = \
2604 self._dep_disjunctive_stack.pop()
2605 dep_string = portage.dep.paren_enclose(dep_struct)
2606 if not self._add_pkg_dep_string(
2607 pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
2611 def _priority(self, **kwargs):
2612 if "remove" in self.myparams:
2613 priority_constructor = UnmergeDepPriority
2615 priority_constructor = DepPriority
2616 return priority_constructor(**kwargs)
2618 def _dep_expand(self, root_config, atom_without_category):
2620 @param root_config: a root config instance
2621 @type root_config: RootConfig
2622 @param atom_without_category: an atom without a category component
2623 @type atom_without_category: String
2625 @returns: a list of atoms containing categories (possibly empty)
2627 null_cp = portage.dep_getkey(insert_category_into_atom(
2628 atom_without_category, "null"))
2629 cat, atom_pn = portage.catsplit(null_cp)
2631 dbs = self._filtered_trees[root_config.root]["dbs"]
2633 for db, pkg_type, built, installed, db_keys in dbs:
2634 for cat in db.categories:
2635 if db.cp_list("%s/%s" % (cat, atom_pn)):
2639 for cat in categories:
2640 deps.append(insert_category_into_atom(
2641 atom_without_category, cat))
2644 def _have_new_virt(self, root, atom_cp):
2646 for db, pkg_type, built, installed, db_keys in \
2647 self._filtered_trees[root]["dbs"]:
2648 if db.cp_list(atom_cp):
2653 def _iter_atoms_for_pkg(self, pkg):
2654 # TODO: add multiple $ROOT support
2655 if pkg.root != self.target_root:
2657 atom_arg_map = self._atom_arg_map
2658 root_config = self.roots[pkg.root]
2659 for atom in self._set_atoms.iterAtomsForPackage(pkg):
2660 atom_cp = portage.dep_getkey(atom)
2661 if atom_cp != pkg.cp and \
2662 self._have_new_virt(pkg.root, atom_cp):
2664 visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
2665 visible_pkgs.reverse() # descending order
2667 for visible_pkg in visible_pkgs:
2668 if visible_pkg.cp != atom_cp:
2670 if pkg >= visible_pkg:
2671 # This is descending order, and we're not
2672 # interested in any versions <= pkg given.
2674 if pkg.slot_atom != visible_pkg.slot_atom:
2675 higher_slot = visible_pkg
2677 if higher_slot is not None:
2679 for arg in atom_arg_map[(atom, pkg.root)]:
2680 if isinstance(arg, PackageArg) and \
2685 def select_files(self, myfiles):
2686 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
2687 appropriate depgraph and return a favorite list."""
2688 debug = "--debug" in self.myopts
2689 root_config = self.roots[self.target_root]
2690 sets = root_config.sets
2691 getSetAtoms = root_config.setconfig.getSetAtoms
2693 myroot = self.target_root
2694 dbs = self._filtered_trees[myroot]["dbs"]
2695 vardb = self.trees[myroot]["vartree"].dbapi
2696 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
2697 portdb = self.trees[myroot]["porttree"].dbapi
2698 bindb = self.trees[myroot]["bintree"].dbapi
2699 pkgsettings = self.pkgsettings[myroot]
2701 onlydeps = "--onlydeps" in self.myopts
2704 ext = os.path.splitext(x)[1]
2706 if not os.path.exists(x):
2708 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2709 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2710 elif os.path.exists(
2711 os.path.join(pkgsettings["PKGDIR"], x)):
2712 x = os.path.join(pkgsettings["PKGDIR"], x)
2714 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
2715 print "!!! Please ensure the tbz2 exists as specified.\n"
2716 return 0, myfavorites
2717 mytbz2=portage.xpak.tbz2(x)
2718 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2719 if os.path.realpath(x) != \
2720 os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
2721 print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
2722 return 0, myfavorites
2723 db_keys = list(bindb._aux_cache_keys)
2724 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
2725 pkg = Package(type_name="binary", root_config=root_config,
2726 cpv=mykey, built=True, metadata=metadata,
2728 self._pkg_cache[pkg] = pkg
2729 args.append(PackageArg(arg=x, package=pkg,
2730 root_config=root_config))
2731 elif ext==".ebuild":
2732 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2733 pkgdir = os.path.dirname(ebuild_path)
2734 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2735 cp = pkgdir[len(tree_root)+1:]
2736 e = portage.exception.PackageNotFound(
2737 ("%s is not in a valid portage tree " + \
2738 "hierarchy or does not exist") % x)
2739 if not portage.isvalidatom(cp):
2741 cat = portage.catsplit(cp)[0]
2742 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2743 if not portage.isvalidatom("="+mykey):
2745 ebuild_path = portdb.findname(mykey)
2747 if ebuild_path != os.path.join(os.path.realpath(tree_root),
2748 cp, os.path.basename(ebuild_path)):
2749 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
2750 return 0, myfavorites
2751 if mykey not in portdb.xmatch(
2752 "match-visible", portage.dep_getkey(mykey)):
2753 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
2754 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
2755 print colorize("BAD", "*** page for details.")
2756 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
2759 raise portage.exception.PackageNotFound(
2760 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2761 db_keys = list(portdb._aux_cache_keys)
2762 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
2763 pkg = Package(type_name="ebuild", root_config=root_config,
2764 cpv=mykey, metadata=metadata, onlydeps=onlydeps)
2765 pkgsettings.setcpv(pkg)
2766 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
2767 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
2768 self._pkg_cache[pkg] = pkg
2769 args.append(PackageArg(arg=x, package=pkg,
2770 root_config=root_config))
2771 elif x.startswith(os.path.sep):
2772 if not x.startswith(myroot):
2773 portage.writemsg(("\n\n!!! '%s' does not start with" + \
2774 " $ROOT.\n") % x, noiselevel=-1)
2776 # Queue these up since it's most efficient to handle
2777 # multiple files in a single iter_owners() call.
2778 lookup_owners.append(x)
2780 if x in ("system", "world"):
2782 if x.startswith(SETPREFIX):
2783 s = x[len(SETPREFIX):]
2785 raise portage.exception.PackageSetNotFound(s)
2788 # Recursively expand sets so that containment tests in
2789 # self._get_parent_sets() properly match atoms in nested
2790 # sets (like if world contains system).
2791 expanded_set = InternalPackageSet(
2792 initial_atoms=getSetAtoms(s))
2793 self._sets[s] = expanded_set
2794 args.append(SetArg(arg=x, set=expanded_set,
2795 root_config=root_config))
2797 if not is_valid_package_atom(x):
2798 portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2800 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2801 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2803 # Don't expand categories or old-style virtuals here unless
2804 # necessary. Expansion of old-style virtuals here causes at
2805 # least the following problems:
2806 # 1) It's more difficult to determine which set(s) an atom
2807 # came from, if any.
2808 # 2) It takes away freedom from the resolver to choose other
2809 # possible expansions when necessary.
2811 args.append(AtomArg(arg=x, atom=x,
2812 root_config=root_config))
2814 expanded_atoms = self._dep_expand(root_config, x)
2815 installed_cp_set = set()
2816 for atom in expanded_atoms:
2817 atom_cp = portage.dep_getkey(atom)
2818 if vardb.cp_list(atom_cp):
2819 installed_cp_set.add(atom_cp)
2821 if len(installed_cp_set) > 1:
2822 non_virtual_cps = set()
2823 for atom_cp in installed_cp_set:
2824 if not atom_cp.startswith("virtual/"):
2825 non_virtual_cps.add(atom_cp)
2826 if len(non_virtual_cps) == 1:
2827 installed_cp_set = non_virtual_cps
2829 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2830 installed_cp = iter(installed_cp_set).next()
2831 expanded_atoms = [atom for atom in expanded_atoms \
2832 if portage.dep_getkey(atom) == installed_cp]
2834 if len(expanded_atoms) > 1:
2837 ambiguous_package_name(x, expanded_atoms, root_config,
2838 self.spinner, self.myopts)
2839 return False, myfavorites
2841 atom = expanded_atoms[0]
2843 null_atom = insert_category_into_atom(x, "null")
2844 null_cp = portage.dep_getkey(null_atom)
2845 cat, atom_pn = portage.catsplit(null_cp)
2846 virts_p = root_config.settings.get_virts_p().get(atom_pn)
2848 # Allow the depgraph to choose which virtual.
2849 atom = insert_category_into_atom(x, "virtual")
2851 atom = insert_category_into_atom(x, "null")
2853 args.append(AtomArg(arg=x, atom=atom,
2854 root_config=root_config))
2858 search_for_multiple = False
2859 if len(lookup_owners) > 1:
2860 search_for_multiple = True
2862 for x in lookup_owners:
2863 if not search_for_multiple and os.path.isdir(x):
2864 search_for_multiple = True
2865 relative_paths.append(x[len(myroot):])
2868 for pkg, relative_path in \
2869 real_vardb._owners.iter_owners(relative_paths):
2870 owners.add(pkg.mycpv)
2871 if not search_for_multiple:
2875 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2876 "by any package.\n") % lookup_owners[0], noiselevel=-1)
2880 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2882 # portage now masks packages with missing slot, but it's
2883 # possible that one was installed by an older version
2884 atom = portage.cpv_getkey(cpv)
2886 atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
2887 args.append(AtomArg(arg=atom, atom=atom,
2888 root_config=root_config))
2890 if "--update" in self.myopts:
2891 # In some cases, the greedy slots behavior can pull in a slot that
2892 # the user would want to uninstall due to it being blocked by a
2893 # newer version in a different slot. Therefore, it's necessary to
2894 # detect and discard any that should be uninstalled. Each time
2895 # that arguments are updated, package selections are repeated in
2896 # order to ensure consistency with the current arguments:
2898 # 1) Initialize args
2899 # 2) Select packages and generate initial greedy atoms
2900 # 3) Update args with greedy atoms
2901 # 4) Select packages and generate greedy atoms again, while
2902 # accounting for any blockers between selected packages
2903 # 5) Update args with revised greedy atoms
2905 self._set_args(args)
2908 greedy_args.append(arg)
2909 if not isinstance(arg, AtomArg):
2911 for atom in self._greedy_slots(arg.root_config, arg.atom):
2913 AtomArg(arg=arg.arg, atom=atom,
2914 root_config=arg.root_config))
2916 self._set_args(greedy_args)
2919 # Revise greedy atoms, accounting for any blockers
2920 # between selected packages.
2921 revised_greedy_args = []
2923 revised_greedy_args.append(arg)
2924 if not isinstance(arg, AtomArg):
2926 for atom in self._greedy_slots(arg.root_config, arg.atom,
2927 blocker_lookahead=True):
2928 revised_greedy_args.append(
2929 AtomArg(arg=arg.arg, atom=atom,
2930 root_config=arg.root_config))
2931 args = revised_greedy_args
2932 del revised_greedy_args
2934 self._set_args(args)
2936 myfavorites = set(myfavorites)
2938 if isinstance(arg, (AtomArg, PackageArg)):
2939 myfavorites.add(arg.atom)
2940 elif isinstance(arg, SetArg):
2941 myfavorites.add(arg.arg)
2942 myfavorites = list(myfavorites)
2944 pprovideddict = pkgsettings.pprovideddict
2946 portage.writemsg("\n", noiselevel=-1)
2947 # Order needs to be preserved since a feature of --nodeps
2948 # is to allow the user to force a specific merge order.
2952 for atom in arg.set:
2953 self.spinner.update()
2954 dep = Dependency(atom=atom, onlydeps=onlydeps,
2955 root=myroot, parent=arg)
2956 atom_cp = portage.dep_getkey(atom)
2958 pprovided = pprovideddict.get(portage.dep_getkey(atom))
2959 if pprovided and portage.match_from_list(atom, pprovided):
2960 # A provided package has been specified on the command line.
2961 self._pprovided_args.append((arg, atom))
2963 if isinstance(arg, PackageArg):
2964 if not self._add_pkg(arg.package, dep) or \
2965 not self._create_graph():
2966 sys.stderr.write(("\n\n!!! Problem resolving " + \
2967 "dependencies for %s\n") % arg.arg)
2968 return 0, myfavorites
2971 portage.writemsg(" Arg: %s\n Atom: %s\n" % \
2972 (arg, atom), noiselevel=-1)
2973 pkg, existing_node = self._select_package(
2974 myroot, atom, onlydeps=onlydeps)
2976 if not (isinstance(arg, SetArg) and \
2977 arg.name in ("system", "world")):
2978 self._unsatisfied_deps_for_display.append(
2979 ((myroot, atom), {}))
2980 return 0, myfavorites
2981 self._missing_args.append((arg, atom))
2983 if atom_cp != pkg.cp:
2984 # For old-style virtuals, we need to repeat the
2985 # package.provided check against the selected package.
2986 expanded_atom = atom.replace(atom_cp, pkg.cp)
2987 pprovided = pprovideddict.get(pkg.cp)
2989 portage.match_from_list(expanded_atom, pprovided):
2990 # A provided package has been
2991 # specified on the command line.
2992 self._pprovided_args.append((arg, atom))
2994 if pkg.installed and "selective" not in self.myparams:
2995 self._unsatisfied_deps_for_display.append(
2996 ((myroot, atom), {}))
2997 # Previous behavior was to bail out in this case, but
2998 # since the dep is satisfied by the installed package,
2999 # it's more friendly to continue building the graph
3000 # and just show a warning message. Therefore, only bail
3001 # out here if the atom is not from either the system or
3003 if not (isinstance(arg, SetArg) and \
3004 arg.name in ("system", "world")):
3005 return 0, myfavorites
3007 # Add the selected package to the graph as soon as possible
3008 # so that later dep_check() calls can use it as feedback
3009 # for making more consistent atom selections.
3010 if not self._add_pkg(pkg, dep):
3011 if isinstance(arg, SetArg):
3012 sys.stderr.write(("\n\n!!! Problem resolving " + \
3013 "dependencies for %s from %s\n") % \
3016 sys.stderr.write(("\n\n!!! Problem resolving " + \
3017 "dependencies for %s\n") % atom)
3018 return 0, myfavorites
3020 except portage.exception.MissingSignature, e:
3021 portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
3022 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3023 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3024 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3025 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3026 return 0, myfavorites
3027 except portage.exception.InvalidSignature, e:
3028 portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
3029 portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3030 portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3031 portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3032 portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3033 return 0, myfavorites
3034 except SystemExit, e:
3035 raise # Needed else can't exit
3036 except Exception, e:
3037 print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
3038 print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
3041 # Now that the root packages have been added to the graph,
3042 # process the dependencies.
3043 if not self._create_graph():
3044 return 0, myfavorites
3047 if "--usepkgonly" in self.myopts:
3048 for xs in self.digraph.all_nodes():
3049 if not isinstance(xs, Package):
3051 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
3055 print "Missing binary for:",xs[2]
3059 except self._unknown_internal_error:
3060 return False, myfavorites
3062 # We're true here unless we are missing binaries.
3063 return (not missing,myfavorites)
3065 def _set_args(self, args):
3067 Create the "args" package set from atoms and packages given as
3068 arguments. This method can be called multiple times if necessary.
3069 The package selection cache is automatically invalidated, since
3070 arguments influence package selections.
3072 args_set = self._sets["args"]
3075 if not isinstance(arg, (AtomArg, PackageArg)):
3078 if atom in args_set:
3082 self._set_atoms.clear()
3083 self._set_atoms.update(chain(*self._sets.itervalues()))
3084 atom_arg_map = self._atom_arg_map
3085 atom_arg_map.clear()
3087 for atom in arg.set:
3088 atom_key = (atom, arg.root_config.root)
3089 refs = atom_arg_map.get(atom_key)
3092 atom_arg_map[atom_key] = refs
3096 # Invalidate the package selection cache, since
3097 # arguments influence package selections.
3098 self._highest_pkg_cache.clear()
3099 for trees in self._filtered_trees.itervalues():
3100 trees["porttree"].dbapi._clear_cache()
3102 def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
3104 Return a list of slot atoms corresponding to installed slots that
3105 differ from the slot of the highest visible match. When
3106 blocker_lookahead is True, slot atoms that would trigger a blocker
3107 conflict are automatically discarded, potentially allowing automatic
3108 uninstallation of older slots when appropriate.
3110 highest_pkg, in_graph = self._select_package(root_config.root, atom)
3111 if highest_pkg is None:
3113 vardb = root_config.trees["vartree"].dbapi
3115 for cpv in vardb.match(atom):
3116 # don't mix new virtuals with old virtuals
3117 if portage.cpv_getkey(cpv) == highest_pkg.cp:
3118 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
3120 slots.add(highest_pkg.metadata["SLOT"])
3124 slots.remove(highest_pkg.metadata["SLOT"])
3127 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
3128 pkg, in_graph = self._select_package(root_config.root, slot_atom)
3129 if pkg is not None and \
3130 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
3131 greedy_pkgs.append(pkg)
3134 if not blocker_lookahead:
3135 return [pkg.slot_atom for pkg in greedy_pkgs]
3138 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
3139 for pkg in greedy_pkgs + [highest_pkg]:
3140 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
3142 atoms = self._select_atoms(
3143 pkg.root, dep_str, pkg.use.enabled,
3144 parent=pkg, strict=True)
3145 except portage.exception.InvalidDependString:
3147 blocker_atoms = (x for x in atoms if x.blocker)
3148 blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
3150 if highest_pkg not in blockers:
3153 # filter packages with invalid deps
3154 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
3156 # filter packages that conflict with highest_pkg
3157 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
3158 (blockers[highest_pkg].findAtomForPackage(pkg) or \
3159 blockers[pkg].findAtomForPackage(highest_pkg))]
3164 # If two packages conflict, discard the lower version.
3165 discard_pkgs = set()
3166 greedy_pkgs.sort(reverse=True)
3167 for i in xrange(len(greedy_pkgs) - 1):
3168 pkg1 = greedy_pkgs[i]
3169 if pkg1 in discard_pkgs:
3171 for j in xrange(i + 1, len(greedy_pkgs)):
3172 pkg2 = greedy_pkgs[j]
3173 if pkg2 in discard_pkgs:
3175 if blockers[pkg1].findAtomForPackage(pkg2) or \
3176 blockers[pkg2].findAtomForPackage(pkg1):
3178 discard_pkgs.add(pkg2)
3180 return [pkg.slot_atom for pkg in greedy_pkgs \
3181 if pkg not in discard_pkgs]
3183 def _select_atoms_from_graph(self, *pargs, **kwargs):
3185 Prefer atoms matching packages that have already been
3186 added to the graph or those that are installed and have
3187 not been scheduled for replacement.
3189 kwargs["trees"] = self._graph_trees
3190 return self._select_atoms_highest_available(*pargs, **kwargs)
3192 def _select_atoms_highest_available(self, root, depstring,
3193 myuse=None, parent=None, strict=True, trees=None, priority=None):
3194 """This will raise InvalidDependString if necessary. If trees is
3195 None then self._filtered_trees is used."""
3196 pkgsettings = self.pkgsettings[root]
3198 trees = self._filtered_trees
3199 if not getattr(priority, "buildtime", False):
3200 # The parent should only be passed to dep_check() for buildtime
3201 # dependencies since that's the only case when it's appropriate
3202 # to trigger the circular dependency avoidance code which uses it.
3203 # It's important not to trigger the same circular dependency
3204 # avoidance code for runtime dependencies since it's not needed
3205 # and it can promote an incorrect package choice.
3209 if parent is not None:
3210 trees[root]["parent"] = parent
3212 portage.dep._dep_check_strict = False
3213 mycheck = portage.dep_check(depstring, None,
3214 pkgsettings, myuse=myuse,
3215 myroot=root, trees=trees)
3217 if parent is not None:
3218 trees[root].pop("parent")
3219 portage.dep._dep_check_strict = True
3221 raise portage.exception.InvalidDependString(mycheck[1])
3222 selected_atoms = mycheck[1]
3223 return selected_atoms
3225 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
3226 atom = portage.dep.Atom(atom)
3227 atom_set = InternalPackageSet(initial_atoms=(atom,))
3228 atom_without_use = atom
3230 atom_without_use = portage.dep.remove_slot(atom)
3232 atom_without_use += ":" + atom.slot
3233 atom_without_use = portage.dep.Atom(atom_without_use)
3234 xinfo = '"%s"' % atom
3237 # Discard null/ from failed cpv_expand category expansion.
3238 xinfo = xinfo.replace("null/", "")
3239 masked_packages = []
3241 masked_pkg_instances = set()
3242 missing_licenses = []
3243 have_eapi_mask = False
3244 pkgsettings = self.pkgsettings[root]
3245 implicit_iuse = pkgsettings._get_implicit_iuse()
3246 root_config = self.roots[root]
3247 portdb = self.roots[root].trees["porttree"].dbapi
3248 dbs = self._filtered_trees[root]["dbs"]
3249 for db, pkg_type, built, installed, db_keys in dbs:
3253 if hasattr(db, "xmatch"):
3254 cpv_list = db.xmatch("match-all", atom_without_use)
3256 cpv_list = db.match(atom_without_use)
3259 for cpv in cpv_list:
3260 metadata, mreasons = get_mask_info(root_config, cpv,
3261 pkgsettings, db, pkg_type, built, installed, db_keys)
3262 if metadata is not None:
3263 pkg = Package(built=built, cpv=cpv,
3264 installed=installed, metadata=metadata,
3265 root_config=root_config)
3266 if pkg.cp != atom.cp:
3267 # A cpv can be returned from dbapi.match() as an
3268 # old-style virtual match even in cases when the
3269 # package does not actually PROVIDE the virtual.
3270 # Filter out any such false matches here.
3271 if not atom_set.findAtomForPackage(pkg):
3274 masked_pkg_instances.add(pkg)
3276 missing_use.append(pkg)
3279 masked_packages.append(
3280 (root_config, pkgsettings, cpv, metadata, mreasons))
3282 missing_use_reasons = []
3283 missing_iuse_reasons = []
3284 for pkg in missing_use:
3285 use = pkg.use.enabled
3286 iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
3287 iuse_re = re.compile("^(%s)$" % "|".join(iuse))
3289 for x in atom.use.required:
3290 if iuse_re.match(x) is None:
3291 missing_iuse.append(x)
3294 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3295 missing_iuse_reasons.append((pkg, mreasons))
3297 need_enable = sorted(atom.use.enabled.difference(use))
3298 need_disable = sorted(atom.use.disabled.intersection(use))
3299 if need_enable or need_disable:
3301 changes.extend(colorize("red", "+" + x) \
3302 for x in need_enable)
3303 changes.extend(colorize("blue", "-" + x) \
3304 for x in need_disable)
3305 mreasons.append("Change USE: %s" % " ".join(changes))
3306 missing_use_reasons.append((pkg, mreasons))
3308 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3309 in missing_use_reasons if pkg not in masked_pkg_instances]
3311 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3312 in missing_iuse_reasons if pkg not in masked_pkg_instances]
3314 show_missing_use = False
3315 if unmasked_use_reasons:
3316 # Only show the latest version.
3317 show_missing_use = unmasked_use_reasons[:1]
3318 elif unmasked_iuse_reasons:
3319 if missing_use_reasons:
3320 # All packages with required IUSE are masked,
3321 # so display a normal masking message.
3324 show_missing_use = unmasked_iuse_reasons
3326 if show_missing_use:
3327 print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
3328 print "!!! One of the following packages is required to complete your request:"
3329 for pkg, mreasons in show_missing_use:
3330 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
3332 elif masked_packages:
3334 colorize("BAD", "All ebuilds that could satisfy ") + \
3335 colorize("INFORM", xinfo) + \
3336 colorize("BAD", " have been masked.")
3337 print "!!! One of the following masked packages is required to complete your request:"
3338 have_eapi_mask = show_masked_packages(masked_packages)
3341 msg = ("The current version of portage supports " + \
3342 "EAPI '%s'. You must upgrade to a newer version" + \
3343 " of portage before EAPI masked packages can" + \
3344 " be installed.") % portage.const.EAPI
3345 from textwrap import wrap
3346 for line in wrap(msg, 75):
3351 print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
3353 # Show parent nodes and the argument that pulled them in.
3354 traversed_nodes = set()
3357 while node is not None:
3358 traversed_nodes.add(node)
3359 msg.append('(dependency required by "%s" [%s])' % \
3360 (colorize('INFORM', str(node.cpv)), node.type_name))
3361 # When traversing to parents, prefer arguments over packages
3362 # since arguments are root nodes. Never traverse the same
3363 # package twice, in order to prevent an infinite loop.
3364 selected_parent = None
3365 for parent in self.digraph.parent_nodes(node):
3366 if isinstance(parent, DependencyArg):
3367 msg.append('(dependency required by "%s" [argument])' % \
3368 (colorize('INFORM', str(parent))))
3369 selected_parent = None
3371 if parent not in traversed_nodes:
3372 selected_parent = parent
3373 node = selected_parent
3379 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3380 cache_key = (root, atom, onlydeps)
3381 ret = self._highest_pkg_cache.get(cache_key)
3384 if pkg and not existing:
3385 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
3386 if existing and existing == pkg:
3387 # Update the cache to reflect that the
3388 # package has been added to the graph.
3390 self._highest_pkg_cache[cache_key] = ret
3392 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3393 self._highest_pkg_cache[cache_key] = ret
3396 settings = pkg.root_config.settings
3397 if visible(settings, pkg) and not (pkg.installed and \
3398 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
3399 pkg.root_config.visible_pkgs.cpv_inject(pkg)
3402 def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3403 root_config = self.roots[root]
3404 pkgsettings = self.pkgsettings[root]
3405 dbs = self._filtered_trees[root]["dbs"]
3406 vardb = self.roots[root].trees["vartree"].dbapi
3407 portdb = self.roots[root].trees["porttree"].dbapi
3408 # List of acceptable packages, ordered by type preference.
3409 matched_packages = []
3410 highest_version = None
3411 if not isinstance(atom, portage.dep.Atom):
3412 atom = portage.dep.Atom(atom)
3414 atom_set = InternalPackageSet(initial_atoms=(atom,))
3415 existing_node = None
3417 usepkgonly = "--usepkgonly" in self.myopts
3418 empty = "empty" in self.myparams
3419 selective = "selective" in self.myparams
3421 noreplace = "--noreplace" in self.myopts
3422 # Behavior of the "selective" parameter depends on
3423 # whether or not a package matches an argument atom.
3424 # If an installed package provides an old-style
3425 # virtual that is no longer provided by an available
3426 # package, the installed package may match an argument
3427 # atom even though none of the available packages do.
3428 # Therefore, "selective" logic does not consider
3429 # whether or not an installed package matches an
3430 # argument atom. It only considers whether or not
3431 # available packages match argument atoms, which is
3432 # represented by the found_available_arg flag.
3433 found_available_arg = False
3434 for find_existing_node in True, False:
3437 for db, pkg_type, built, installed, db_keys in dbs:
3440 if installed and not find_existing_node:
3441 want_reinstall = reinstall or empty or \
3442 (found_available_arg and not selective)
3443 if want_reinstall and matched_packages:
3445 if hasattr(db, "xmatch"):
3446 cpv_list = db.xmatch("match-all", atom)
3448 cpv_list = db.match(atom)
3450 # USE=multislot can make an installed package appear as if
3451 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3452 # won't do any good as long as USE=multislot is enabled since
3453 # the newly built package still won't have the expected slot.
3454 # Therefore, assume that such SLOT dependencies are already
3455 # satisfied rather than forcing a rebuild.
3456 if installed and not cpv_list and atom.slot:
3457 for cpv in db.match(atom.cp):
3458 slot_available = False
3459 for other_db, other_type, other_built, \
3460 other_installed, other_keys in dbs:
3463 other_db.aux_get(cpv, ["SLOT"])[0]:
3464 slot_available = True
3468 if not slot_available:
3470 inst_pkg = self._pkg(cpv, "installed",
3471 root_config, installed=installed)
3472 # Remove the slot from the atom and verify that
3473 # the package matches the resulting atom.
3474 atom_without_slot = portage.dep.remove_slot(atom)
3476 atom_without_slot += str(atom.use)
3477 atom_without_slot = portage.dep.Atom(atom_without_slot)
3478 if portage.match_from_list(
3479 atom_without_slot, [inst_pkg]):
3480 cpv_list = [inst_pkg.cpv]
3485 pkg_status = "merge"
3486 if installed or onlydeps:
3487 pkg_status = "nomerge"
3490 for cpv in cpv_list:
3491 # Make --noreplace take precedence over --newuse.
3492 if not installed and noreplace and \
3493 cpv in vardb.match(atom):
3494 # If the installed version is masked, it may
3495 # be necessary to look at lower versions,
3496 # in case there is a visible downgrade.
3498 reinstall_for_flags = None
3499 cache_key = (pkg_type, root, cpv, pkg_status)
3500 calculated_use = True
3501 pkg = self._pkg_cache.get(cache_key)
3503 calculated_use = False
3505 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
3508 pkg = Package(built=built, cpv=cpv,
3509 installed=installed, metadata=metadata,
3510 onlydeps=onlydeps, root_config=root_config,
3512 metadata = pkg.metadata
3514 metadata['CHOST'] = pkgsettings.get('CHOST', '')
3515 if not built and ("?" in metadata["LICENSE"] or \
3516 "?" in metadata["PROVIDE"]):
3517 # This is avoided whenever possible because
3518 # it's expensive. It only needs to be done here
3519 # if it has an effect on visibility.
3520 pkgsettings.setcpv(pkg)
3521 metadata["USE"] = pkgsettings["PORTAGE_USE"]
3522 calculated_use = True
3523 self._pkg_cache[pkg] = pkg
3525 if not installed or (built and matched_packages):
3526 # Only enforce visibility on installed packages
3527 # if there is at least one other visible package
3528 # available. By filtering installed masked packages
3529 # here, packages that have been masked since they
3530 # were installed can be automatically downgraded
3531 # to an unmasked version.
3533 if not visible(pkgsettings, pkg):
3535 except portage.exception.InvalidDependString:
3539 # Enable upgrade or downgrade to a version
3540 # with visible KEYWORDS when the installed
3541 # version is masked by KEYWORDS, but never
3542 # reinstall the same exact version only due
3543 # to a KEYWORDS mask.
3544 if built and matched_packages:
3546 different_version = None
3547 for avail_pkg in matched_packages:
3548 if not portage.dep.cpvequal(
3549 pkg.cpv, avail_pkg.cpv):
3550 different_version = avail_pkg
3552 if different_version is not None:
3555 pkgsettings._getMissingKeywords(
3556 pkg.cpv, pkg.metadata):
3559 # If the ebuild no longer exists or it's
3560 # keywords have been dropped, reject built
3561 # instances (installed or binary).
3562 # If --usepkgonly is enabled, assume that
3563 # the ebuild status should be ignored.
3567 pkg.cpv, "ebuild", root_config)
3568 except portage.exception.PackageNotFound:
3571 if not visible(pkgsettings, pkg_eb):
3574 if not pkg.built and not calculated_use:
3575 # This is avoided whenever possible because
3577 pkgsettings.setcpv(pkg)
3578 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
3580 if pkg.cp != atom.cp:
3581 # A cpv can be returned from dbapi.match() as an
3582 # old-style virtual match even in cases when the
3583 # package does not actually PROVIDE the virtual.
3584 # Filter out any such false matches here.
3585 if not atom_set.findAtomForPackage(pkg):
3589 if root == self.target_root:
3591 # Ebuild USE must have been calculated prior
3592 # to this point, in case atoms have USE deps.
3593 myarg = self._iter_atoms_for_pkg(pkg).next()
3594 except StopIteration:
3596 except portage.exception.InvalidDependString:
3598 # masked by corruption
3600 if not installed and myarg:
3601 found_available_arg = True
3603 if atom.use and not pkg.built:
3604 use = pkg.use.enabled
3605 if atom.use.enabled.difference(use):
3607 if atom.use.disabled.intersection(use):
3609 if pkg.cp == atom_cp:
3610 if highest_version is None:
3611 highest_version = pkg
3612 elif pkg > highest_version:
3613 highest_version = pkg
3614 # At this point, we've found the highest visible
3615 # match from the current repo. Any lower versions
3616 # from this repo are ignored, so this so the loop
3617 # will always end with a break statement below
3619 if find_existing_node:
3620 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
3623 if portage.dep.match_from_list(atom, [e_pkg]):
3624 if highest_version and \
3625 e_pkg.cp == atom_cp and \
3626 e_pkg < highest_version and \
3627 e_pkg.slot_atom != highest_version.slot_atom:
3628 # There is a higher version available in a
3629 # different slot, so this existing node is
3633 matched_packages.append(e_pkg)
3634 existing_node = e_pkg
3636 # Compare built package to current config and
3637 # reject the built package if necessary.
3638 if built and not installed and \
3639 ("--newuse" in self.myopts or \
3640 "--reinstall" in self.myopts):
3641 iuses = pkg.iuse.all
3642 old_use = pkg.use.enabled
3644 pkgsettings.setcpv(myeb)
3646 pkgsettings.setcpv(pkg)
3647 now_use = pkgsettings["PORTAGE_USE"].split()
3648 forced_flags = set()
3649 forced_flags.update(pkgsettings.useforce)
3650 forced_flags.update(pkgsettings.usemask)
3652 if myeb and not usepkgonly:
3653 cur_iuse = myeb.iuse.all
3654 if self._reinstall_for_flags(forced_flags,
3658 # Compare current config to installed package
3659 # and do not reinstall if possible.
3660 if not installed and \
3661 ("--newuse" in self.myopts or \
3662 "--reinstall" in self.myopts) and \
3663 cpv in vardb.match(atom):
3664 pkgsettings.setcpv(pkg)
3665 forced_flags = set()
3666 forced_flags.update(pkgsettings.useforce)
3667 forced_flags.update(pkgsettings.usemask)
3668 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
3669 old_iuse = set(filter_iuse_defaults(
3670 vardb.aux_get(cpv, ["IUSE"])[0].split()))
3671 cur_use = pkg.use.enabled
3672 cur_iuse = pkg.iuse.all
3673 reinstall_for_flags = \
3674 self._reinstall_for_flags(
3675 forced_flags, old_use, old_iuse,
3677 if reinstall_for_flags:
3681 matched_packages.append(pkg)
3682 if reinstall_for_flags:
3683 self._reinstall_nodes[pkg] = \
3687 if not matched_packages:
3690 if "--debug" in self.myopts:
3691 for pkg in matched_packages:
3692 portage.writemsg("%s %s\n" % \
3693 ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3695 # Filter out any old-style virtual matches if they are
3696 # mixed with new-style virtual matches.
3697 cp = portage.dep_getkey(atom)
3698 if len(matched_packages) > 1 and \
3699 "virtual" == portage.catsplit(cp)[0]:
3700 for pkg in matched_packages:
3703 # Got a new-style virtual, so filter
3704 # out any old-style virtuals.
3705 matched_packages = [pkg for pkg in matched_packages \
3709 if len(matched_packages) > 1:
3710 bestmatch = portage.best(
3711 [pkg.cpv for pkg in matched_packages])
3712 matched_packages = [pkg for pkg in matched_packages \
3713 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3715 # ordered by type preference ("ebuild" type is the last resort)
3716 return matched_packages[-1], existing_node
3718 def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3720 Select packages that have already been added to the graph or
3721 those that are installed and have not been scheduled for
3724 graph_db = self._graph_trees[root]["porttree"].dbapi
3725 matches = graph_db.match_pkgs(atom)
3728 pkg = matches[-1] # highest match
3729 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
3730 return pkg, in_graph
3732 def _complete_graph(self):
3734 Add any deep dependencies of required sets (args, system, world) that
3735 have not been pulled into the graph yet. This ensures that the graph
3736 is consistent such that initially satisfied deep dependencies are not
3737 broken in the new graph. Initially unsatisfied dependencies are
3738 irrelevant since we only want to avoid breaking dependencies that are
3741 Since this method can consume enough time to disturb users, it is
3742 currently only enabled by the --complete-graph option.
3744 if "--buildpkgonly" in self.myopts or \
3745 "recurse" not in self.myparams:
3748 if "complete" not in self.myparams:
3749 # Skip this to avoid consuming enough time to disturb users.
3752 # Put the depgraph into a mode that causes it to only
3753 # select packages that have already been added to the
3754 # graph or those that are installed and have not been
3755 # scheduled for replacement. Also, toggle the "deep"
3756 # parameter so that all dependencies are traversed and
3758 self._select_atoms = self._select_atoms_from_graph
3759 self._select_package = self._select_pkg_from_graph
3760 already_deep = "deep" in self.myparams
3761 if not already_deep:
3762 self.myparams.add("deep")
3764 for root in self.roots:
3765 required_set_names = self._required_set_names.copy()
3766 if root == self.target_root and \
3767 (already_deep or "empty" in self.myparams):
3768 required_set_names.difference_update(self._sets)
3769 if not required_set_names and not self._ignored_deps:
3771 root_config = self.roots[root]
3772 setconfig = root_config.setconfig
3774 # Reuse existing SetArg instances when available.
3775 for arg in self.digraph.root_nodes():
3776 if not isinstance(arg, SetArg):
3778 if arg.root_config != root_config:
3780 if arg.name in required_set_names:
3782 required_set_names.remove(arg.name)
3783 # Create new SetArg instances only when necessary.
3784 for s in required_set_names:
3785 expanded_set = InternalPackageSet(
3786 initial_atoms=setconfig.getSetAtoms(s))
3787 atom = SETPREFIX + s
3788 args.append(SetArg(arg=atom, set=expanded_set,
3789 root_config=root_config))
3790 vardb = root_config.trees["vartree"].dbapi
3792 for atom in arg.set:
3793 self._dep_stack.append(
3794 Dependency(atom=atom, root=root, parent=arg))
3795 if self._ignored_deps:
3796 self._dep_stack.extend(self._ignored_deps)
3797 self._ignored_deps = []
3798 if not self._create_graph(allow_unsatisfied=True):
3800 # Check the unsatisfied deps to see if any initially satisfied deps
3801 # will become unsatisfied due to an upgrade. Initially unsatisfied
3802 # deps are irrelevant since we only want to avoid breaking deps
3803 # that are initially satisfied.
3804 while self._unsatisfied_deps:
3805 dep = self._unsatisfied_deps.pop()
3806 matches = vardb.match_pkgs(dep.atom)
3808 self._initially_unsatisfied_deps.append(dep)
3810 # An scheduled installation broke a deep dependency.
3811 # Add the installed package to the graph so that it
3812 # will be appropriately reported as a slot collision
3813 # (possibly solvable via backtracking).
3814 pkg = matches[-1] # highest match
3815 if not self._add_pkg(pkg, dep):
3817 if not self._create_graph(allow_unsatisfied=True):
3821 def _pkg(self, cpv, type_name, root_config, installed=False):
3823 Get a package instance from the cache, or create a new
3824 one if necessary. Raises KeyError from aux_get if it
3825 failures for some reason (package does not exist or is
3830 operation = "nomerge"
3831 pkg = self._pkg_cache.get(
3832 (type_name, root_config.root, cpv, operation))
3834 tree_type = self.pkg_tree_map[type_name]
3835 db = root_config.trees[tree_type].dbapi
3836 db_keys = list(self._trees_orig[root_config.root][
3837 tree_type].dbapi._aux_cache_keys)
3839 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
3841 raise portage.exception.PackageNotFound(cpv)
3842 pkg = Package(cpv=cpv, metadata=metadata,
3843 root_config=root_config, installed=installed)
3844 if type_name == "ebuild":
3845 settings = self.pkgsettings[root_config.root]
3846 settings.setcpv(pkg)
3847 pkg.metadata["USE"] = settings["PORTAGE_USE"]
3848 pkg.metadata['CHOST'] = settings.get('CHOST', '')
3849 self._pkg_cache[pkg] = pkg
3852 def validate_blockers(self):
3853 """Remove any blockers from the digraph that do not match any of the
3854 packages within the graph. If necessary, create hard deps to ensure
3855 correct merge order such that mutually blocking packages are never
3856 installed simultaneously."""
3858 if "--buildpkgonly" in self.myopts or \
3859 "--nodeps" in self.myopts:
3862 #if "deep" in self.myparams:
3864 # Pull in blockers from all installed packages that haven't already
3865 # been pulled into the depgraph. This is not enabled by default
3866 # due to the performance penalty that is incurred by all the
3867 # additional dep_check calls that are required.
3869 dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
3870 for myroot in self.trees:
3871 vardb = self.trees[myroot]["vartree"].dbapi
3872 portdb = self.trees[myroot]["porttree"].dbapi
3873 pkgsettings = self.pkgsettings[myroot]
3874 final_db = self.mydbapi[myroot]
3876 blocker_cache = BlockerCache(myroot, vardb)
3877 stale_cache = set(blocker_cache)
3880 stale_cache.discard(cpv)
3881 pkg_in_graph = self.digraph.contains(pkg)
3883 # Check for masked installed packages. Only warn about
3884 # packages that are in the graph in order to avoid warning
3885 # about those that will be automatically uninstalled during
3886 # the merge process or by --depclean.
3888 if pkg_in_graph and not visible(pkgsettings, pkg):
3889 self._masked_installed.add(pkg)
3891 blocker_atoms = None
3897 self._blocker_parents.child_nodes(pkg))
3902 self._irrelevant_blockers.child_nodes(pkg))
3905 if blockers is not None:
3906 blockers = set(str(blocker.atom) \
3907 for blocker in blockers)
3909 # If this node has any blockers, create a "nomerge"
3910 # node for it so that they can be enforced.
3911 self.spinner.update()
3912 blocker_data = blocker_cache.get(cpv)
3913 if blocker_data is not None and \
3914 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3917 # If blocker data from the graph is available, use
3918 # it to validate the cache and update the cache if
3920 if blocker_data is not None and \
3921 blockers is not None:
3922 if not blockers.symmetric_difference(
3923 blocker_data.atoms):
3927 if blocker_data is None and \
3928 blockers is not None:
3929 # Re-use the blockers from the graph.
3930 blocker_atoms = sorted(blockers)
3931 counter = long(pkg.metadata["COUNTER"])
3933 blocker_cache.BlockerData(counter, blocker_atoms)
3934 blocker_cache[pkg.cpv] = blocker_data
3938 blocker_atoms = blocker_data.atoms
3940 # Use aux_get() to trigger FakeVartree global
3941 # updates on *DEPEND when appropriate.
3942 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3943 # It is crucial to pass in final_db here in order to
3944 # optimize dep_check calls by eliminating atoms via
3945 # dep_wordreduce and dep_eval calls.
3947 portage.dep._dep_check_strict = False
3949 success, atoms = portage.dep_check(depstr,
3950 final_db, pkgsettings, myuse=pkg.use.enabled,
3951 trees=self._graph_trees, myroot=myroot)
3952 except Exception, e:
3953 if isinstance(e, SystemExit):
3955 # This is helpful, for example, if a ValueError
3956 # is thrown from cpv_expand due to multiple
3957 # matches (this can happen if an atom lacks a
3959 show_invalid_depstring_notice(
3960 pkg, depstr, str(e))
3964 portage.dep._dep_check_strict = True
3966 replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
3967 if replacement_pkg and \
3968 replacement_pkg[0].operation == "merge":
3969 # This package is being replaced anyway, so
3970 # ignore invalid dependencies so as not to
3971 # annoy the user too much (otherwise they'd be
3972 # forced to manually unmerge it first).
3974 show_invalid_depstring_notice(pkg, depstr, atoms)
3976 blocker_atoms = [myatom for myatom in atoms \
3977 if myatom.startswith("!")]
3978 blocker_atoms.sort()
3979 counter = long(pkg.metadata["COUNTER"])
3980 blocker_cache[cpv] = \
3981 blocker_cache.BlockerData(counter, blocker_atoms)
3984 for atom in blocker_atoms:
3985 blocker = Blocker(atom=portage.dep.Atom(atom),
3986 eapi=pkg.metadata["EAPI"], root=myroot)
3987 self._blocker_parents.add(blocker, pkg)
3988 except portage.exception.InvalidAtom, e:
3989 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
3990 show_invalid_depstring_notice(
3991 pkg, depstr, "Invalid Atom: %s" % (e,))
3993 for cpv in stale_cache:
3994 del blocker_cache[cpv]
3995 blocker_cache.flush()
3998 # Discard any "uninstall" tasks scheduled by previous calls
3999 # to this method, since those tasks may not make sense given
4000 # the current graph state.
4001 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
4002 if previous_uninstall_tasks:
4003 self._blocker_uninstalls = digraph()
4004 self.digraph.difference_update(previous_uninstall_tasks)
4006 for blocker in self._blocker_parents.leaf_nodes():
4007 self.spinner.update()
4008 root_config = self.roots[blocker.root]
4009 virtuals = root_config.settings.getvirtuals()
4010 myroot = blocker.root
4011 initial_db = self.trees[myroot]["vartree"].dbapi
4012 final_db = self.mydbapi[myroot]
4014 provider_virtual = False
4015 if blocker.cp in virtuals and \
4016 not self._have_new_virt(blocker.root, blocker.cp):
4017 provider_virtual = True
4019 # Use this to check PROVIDE for each matched package
4021 atom_set = InternalPackageSet(
4022 initial_atoms=[blocker.atom])
4024 if provider_virtual:
4026 for provider_entry in virtuals[blocker.cp]:
4028 portage.dep_getkey(provider_entry)
4029 atoms.append(blocker.atom.replace(
4030 blocker.cp, provider_cp))
4032 atoms = [blocker.atom]
4034 blocked_initial = set()
4036 for pkg in initial_db.match_pkgs(atom):
4037 if atom_set.findAtomForPackage(pkg):
4038 blocked_initial.add(pkg)
4040 blocked_final = set()
4042 for pkg in final_db.match_pkgs(atom):
4043 if atom_set.findAtomForPackage(pkg):
4044 blocked_final.add(pkg)
4046 if not blocked_initial and not blocked_final:
4047 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
4048 self._blocker_parents.remove(blocker)
4049 # Discard any parents that don't have any more blockers.
4050 for pkg in parent_pkgs:
4051 self._irrelevant_blockers.add(blocker, pkg)
4052 if not self._blocker_parents.child_nodes(pkg):
4053 self._blocker_parents.remove(pkg)
4055 for parent in self._blocker_parents.parent_nodes(blocker):
4056 unresolved_blocks = False
4057 depends_on_order = set()
4058 for pkg in blocked_initial:
4059 if pkg.slot_atom == parent.slot_atom:
4060 # TODO: Support blocks within slots in cases where it
4061 # might make sense. For example, a new version might
4062 # require that the old version be uninstalled at build
4065 if parent.installed:
4066 # Two currently installed packages conflict with
4067 # eachother. Ignore this case since the damage
4068 # is already done and this would be likely to
4069 # confuse users if displayed like a normal blocker.
4072 self._blocked_pkgs.add(pkg, blocker)
4074 if parent.operation == "merge":
4075 # Maybe the blocked package can be replaced or simply
4076 # unmerged to resolve this block.
4077 depends_on_order.add((pkg, parent))
4079 # None of the above blocker resolutions techniques apply,
4080 # so apparently this one is unresolvable.
4081 unresolved_blocks = True
4082 for pkg in blocked_final:
4083 if pkg.slot_atom == parent.slot_atom:
4084 # TODO: Support blocks within slots.
4086 if parent.operation == "nomerge" and \
4087 pkg.operation == "nomerge":
4088 # This blocker will be handled the next time that a
4089 # merge of either package is triggered.
4092 self._blocked_pkgs.add(pkg, blocker)
4094 # Maybe the blocking package can be
4095 # unmerged to resolve this block.
4096 if parent.operation == "merge" and pkg.installed:
4097 depends_on_order.add((pkg, parent))
4099 elif parent.operation == "nomerge":
4100 depends_on_order.add((parent, pkg))
4102 # None of the above blocker resolutions techniques apply,
4103 # so apparently this one is unresolvable.
4104 unresolved_blocks = True
4106 # Make sure we don't unmerge any package that have been pulled
4108 if not unresolved_blocks and depends_on_order:
4109 for inst_pkg, inst_task in depends_on_order:
4110 if self.digraph.contains(inst_pkg) and \
4111 self.digraph.parent_nodes(inst_pkg):
4112 unresolved_blocks = True
4115 if not unresolved_blocks and depends_on_order:
4116 for inst_pkg, inst_task in depends_on_order:
4117 uninst_task = Package(built=inst_pkg.built,
4118 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4119 metadata=inst_pkg.metadata,
4120 operation="uninstall",
4121 root_config=inst_pkg.root_config,
4122 type_name=inst_pkg.type_name)
4123 self._pkg_cache[uninst_task] = uninst_task
4124 # Enforce correct merge order with a hard dep.
4125 self.digraph.addnode(uninst_task, inst_task,
4126 priority=BlockerDepPriority.instance)
4127 # Count references to this blocker so that it can be
4128 # invalidated after nodes referencing it have been
4130 self._blocker_uninstalls.addnode(uninst_task, blocker)
4131 if not unresolved_blocks and not depends_on_order:
4132 self._irrelevant_blockers.add(blocker, parent)
4133 self._blocker_parents.remove_edge(blocker, parent)
4134 if not self._blocker_parents.parent_nodes(blocker):
4135 self._blocker_parents.remove(blocker)
4136 if not self._blocker_parents.child_nodes(parent):
4137 self._blocker_parents.remove(parent)
4138 if unresolved_blocks:
4139 self._unsolvable_blockers.add(blocker, parent)
4143 def _accept_blocker_conflicts(self):
4145 for x in ("--buildpkgonly", "--fetchonly",
4146 "--fetch-all-uri", "--nodeps"):
4147 if x in self.myopts:
4152 def _merge_order_bias(self, mygraph):
4154 For optimal leaf node selection, promote deep system runtime deps and
4155 order nodes from highest to lowest overall reference count.
4159 for node in mygraph.order:
4160 node_info[node] = len(mygraph.parent_nodes(node))
4161 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4163 def cmp_merge_preference(node1, node2):
4165 if node1.operation == 'uninstall':
4166 if node2.operation == 'uninstall':
4170 if node2.operation == 'uninstall':
4171 if node1.operation == 'uninstall':
4175 node1_sys = node1 in deep_system_deps
4176 node2_sys = node2 in deep_system_deps
4177 if node1_sys != node2_sys:
4182 return node_info[node2] - node_info[node1]
4184 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4186 def altlist(self, reversed=False):
4188 while self._serialized_tasks_cache is None:
4189 self._resolve_conflicts()
4191 self._serialized_tasks_cache, self._scheduler_graph = \
4192 self._serialize_tasks()
4193 except self._serialize_tasks_retry:
4196 retlist = self._serialized_tasks_cache[:]
4201 def schedulerGraph(self):
4203 The scheduler graph is identical to the normal one except that
4204 uninstall edges are reversed in specific cases that require
4205 conflicting packages to be temporarily installed simultaneously.
4206 This is intended for use by the Scheduler in it's parallelization
4207 logic. It ensures that temporary simultaneous installation of
4208 conflicting packages is avoided when appropriate (especially for
4209 !!atom blockers), but allowed in specific cases that require it.
4211 Note that this method calls break_refs() which alters the state of
4212 internal Package instances such that this depgraph instance should
4213 not be used to perform any more calculations.
4215 if self._scheduler_graph is None:
4217 self.break_refs(self._scheduler_graph.order)
4218 return self._scheduler_graph
4220 def break_refs(self, nodes):
4222 Take a mergelist like that returned from self.altlist() and
4223 break any references that lead back to the depgraph. This is
4224 useful if you want to hold references to packages without
4225 also holding the depgraph on the heap.
4228 if hasattr(node, "root_config"):
4229 # The FakeVartree references the _package_cache which
4230 # references the depgraph. So that Package instances don't
4231 # hold the depgraph and FakeVartree on the heap, replace
4232 # the RootConfig that references the FakeVartree with the
4233 # original RootConfig instance which references the actual
4235 node.root_config = \
4236 self._trees_orig[node.root_config.root]["root_config"]
4238 def _resolve_conflicts(self):
4239 if not self._complete_graph():
4240 raise self._unknown_internal_error()
4242 if not self.validate_blockers():
4243 raise self._unknown_internal_error()
4245 if self._slot_collision_info:
4246 self._process_slot_conflicts()
4248 def _serialize_tasks(self):
4250 if "--debug" in self.myopts:
4251 writemsg("\ndigraph:\n\n", noiselevel=-1)
4252 self.digraph.debug_print()
4253 writemsg("\n", noiselevel=-1)
4255 scheduler_graph = self.digraph.copy()
4257 if '--nodeps' in self.myopts:
4258 # Preserve the package order given on the command line.
4259 return ([node for node in scheduler_graph \
4260 if isinstance(node, Package) \
4261 and node.operation == 'merge'], scheduler_graph)
4263 mygraph=self.digraph.copy()
4264 # Prune "nomerge" root nodes if nothing depends on them, since
4265 # otherwise they slow down merge order calculation. Don't remove
4266 # non-root nodes since they help optimize merge order in some cases
4267 # such as revdep-rebuild.
4268 removed_nodes = set()
4270 for node in mygraph.root_nodes():
4271 if not isinstance(node, Package) or \
4272 node.installed or node.onlydeps:
4273 removed_nodes.add(node)
4275 self.spinner.update()
4276 mygraph.difference_update(removed_nodes)
4277 if not removed_nodes:
4279 removed_nodes.clear()
4280 self._merge_order_bias(mygraph)
4281 def cmp_circular_bias(n1, n2):
4283 RDEPEND is stronger than PDEPEND and this function
4284 measures such a strength bias within a circular
4285 dependency relationship.
4287 n1_n2_medium = n2 in mygraph.child_nodes(n1,
4288 ignore_priority=priority_range.ignore_medium_soft)
4289 n2_n1_medium = n1 in mygraph.child_nodes(n2,
4290 ignore_priority=priority_range.ignore_medium_soft)
4291 if n1_n2_medium == n2_n1_medium:
4296 myblocker_uninstalls = self._blocker_uninstalls.copy()
4298 # Contains uninstall tasks that have been scheduled to
4299 # occur after overlapping blockers have been installed.
4300 scheduled_uninstalls = set()
4301 # Contains any Uninstall tasks that have been ignored
4302 # in order to avoid the circular deps code path. These
4303 # correspond to blocker conflicts that could not be
4305 ignored_uninstall_tasks = set()
4306 have_uninstall_task = False
4307 complete = "complete" in self.myparams
4310 def get_nodes(**kwargs):
4312 Returns leaf nodes excluding Uninstall instances
4313 since those should be executed as late as possible.
4315 return [node for node in mygraph.leaf_nodes(**kwargs) \
4316 if isinstance(node, Package) and \
4317 (node.operation != "uninstall" or \
4318 node in scheduled_uninstalls)]
4320 # sys-apps/portage needs special treatment if ROOT="/"
4321 running_root = self._running_root.root
4322 from portage.const import PORTAGE_PACKAGE_ATOM
4323 runtime_deps = InternalPackageSet(
4324 initial_atoms=[PORTAGE_PACKAGE_ATOM])
4325 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
4326 PORTAGE_PACKAGE_ATOM)
4327 replacement_portage = self.mydbapi[running_root].match_pkgs(
4328 PORTAGE_PACKAGE_ATOM)
4331 running_portage = running_portage[0]
4333 running_portage = None
4335 if replacement_portage:
4336 replacement_portage = replacement_portage[0]
4338 replacement_portage = None
4340 if replacement_portage == running_portage:
4341 replacement_portage = None
4343 if replacement_portage is not None:
4344 # update from running_portage to replacement_portage asap
4345 asap_nodes.append(replacement_portage)
4347 if running_portage is not None:
4349 portage_rdepend = self._select_atoms_highest_available(
4350 running_root, running_portage.metadata["RDEPEND"],
4351 myuse=running_portage.use.enabled,
4352 parent=running_portage, strict=False)
4353 except portage.exception.InvalidDependString, e:
4354 portage.writemsg("!!! Invalid RDEPEND in " + \
4355 "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4356 (running_root, running_portage.cpv, e), noiselevel=-1)
4358 portage_rdepend = []
4359 runtime_deps.update(atom for atom in portage_rdepend \
4360 if not atom.startswith("!"))
4362 def gather_deps(ignore_priority, mergeable_nodes,
4363 selected_nodes, node):
4365 Recursively gather a group of nodes that RDEPEND on
4366 eachother. This ensures that they are merged as a group
4367 and get their RDEPENDs satisfied as soon as possible.
4369 if node in selected_nodes:
4371 if node not in mergeable_nodes:
4373 if node == replacement_portage and \
4374 mygraph.child_nodes(node,
4375 ignore_priority=priority_range.ignore_medium_soft):
4376 # Make sure that portage always has all of it's
4377 # RDEPENDs installed first.
4379 selected_nodes.add(node)
4380 for child in mygraph.child_nodes(node,
4381 ignore_priority=ignore_priority):
4382 if not gather_deps(ignore_priority,
4383 mergeable_nodes, selected_nodes, child):
4387 def ignore_uninst_or_med(priority):
4388 if priority is BlockerDepPriority.instance:
4390 return priority_range.ignore_medium(priority)
4392 def ignore_uninst_or_med_soft(priority):
4393 if priority is BlockerDepPriority.instance:
4395 return priority_range.ignore_medium_soft(priority)
4397 tree_mode = "--tree" in self.myopts
4398 # Tracks whether or not the current iteration should prefer asap_nodes
4399 # if available. This is set to False when the previous iteration
4400 # failed to select any nodes. It is reset whenever nodes are
4401 # successfully selected.
4404 # Controls whether or not the current iteration should drop edges that
4405 # are "satisfied" by installed packages, in order to solve circular
4406 # dependencies. The deep runtime dependencies of installed packages are
4407 # not checked in this case (bug #199856), so it must be avoided
4408 # whenever possible.
4409 drop_satisfied = False
4411 # State of variables for successive iterations that loosen the
4412 # criteria for node selection.
4414 # iteration prefer_asap drop_satisfied
4419 # If no nodes are selected on the last iteration, it is due to
4420 # unresolved blockers or circular dependencies.
4422 while not mygraph.empty():
4423 self.spinner.update()
4424 selected_nodes = None
4425 ignore_priority = None
4426 if drop_satisfied or (prefer_asap and asap_nodes):
4427 priority_range = DepPrioritySatisfiedRange
4429 priority_range = DepPriorityNormalRange
4430 if prefer_asap and asap_nodes:
4431 # ASAP nodes are merged before their soft deps. Go ahead and
4432 # select root nodes here if necessary, since it's typical for
4433 # the parent to have been removed from the graph already.
4434 asap_nodes = [node for node in asap_nodes \
4435 if mygraph.contains(node)]
4436 for node in asap_nodes:
4437 if not mygraph.child_nodes(node,
4438 ignore_priority=priority_range.ignore_soft):
4439 selected_nodes = [node]
4440 asap_nodes.remove(node)
4442 if not selected_nodes and \
4443 not (prefer_asap and asap_nodes):
4444 for i in xrange(priority_range.NONE,
4445 priority_range.MEDIUM_SOFT + 1):
4446 ignore_priority = priority_range.ignore_priority[i]
4447 nodes = get_nodes(ignore_priority=ignore_priority)
4449 # If there is a mix of uninstall nodes with other
4450 # types, save the uninstall nodes for later since
4451 # sometimes a merge node will render an uninstall
4452 # node unnecessary (due to occupying the same slot),
4453 # and we want to avoid executing a separate uninstall
4454 # task in that case.
4456 good_uninstalls = []
4457 with_some_uninstalls_excluded = []
4459 if node.operation == "uninstall":
4460 slot_node = self.mydbapi[node.root
4461 ].match_pkgs(node.slot_atom)
4463 slot_node[0].operation == "merge":
4465 good_uninstalls.append(node)
4466 with_some_uninstalls_excluded.append(node)
4468 nodes = good_uninstalls
4469 elif with_some_uninstalls_excluded:
4470 nodes = with_some_uninstalls_excluded
4474 if ignore_priority is None and not tree_mode:
4475 # Greedily pop all of these nodes since no
4476 # relationship has been ignored. This optimization
4477 # destroys --tree output, so it's disabled in tree
4479 selected_nodes = nodes
4481 # For optimal merge order:
4482 # * Only pop one node.
4483 # * Removing a root node (node without a parent)
4484 # will not produce a leaf node, so avoid it.
4485 # * It's normal for a selected uninstall to be a
4486 # root node, so don't check them for parents.
4488 if node.operation == "uninstall" or \
4489 mygraph.parent_nodes(node):
4490 selected_nodes = [node]
4496 if not selected_nodes:
4497 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4499 mergeable_nodes = set(nodes)
4500 if prefer_asap and asap_nodes:
4502 for i in xrange(priority_range.SOFT,
4503 priority_range.MEDIUM_SOFT + 1):
4504 ignore_priority = priority_range.ignore_priority[i]
4506 if not mygraph.parent_nodes(node):
4508 selected_nodes = set()
4509 if gather_deps(ignore_priority,
4510 mergeable_nodes, selected_nodes, node):
4513 selected_nodes = None
4517 if prefer_asap and asap_nodes and not selected_nodes:
4518 # We failed to find any asap nodes to merge, so ignore
4519 # them for the next iteration.
4523 if selected_nodes and ignore_priority is not None:
4524 # Try to merge ignored medium_soft deps as soon as possible
4525 # if they're not satisfied by installed packages.
4526 for node in selected_nodes:
4527 children = set(mygraph.child_nodes(node))
4528 soft = children.difference(
4529 mygraph.child_nodes(node,
4530 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4531 medium_soft = children.difference(
4532 mygraph.child_nodes(node,
4534 DepPrioritySatisfiedRange.ignore_medium_soft))
4535 medium_soft.difference_update(soft)
4536 for child in medium_soft:
4537 if child in selected_nodes:
4539 if child in asap_nodes:
4541 asap_nodes.append(child)
4543 if selected_nodes and len(selected_nodes) > 1:
4544 if not isinstance(selected_nodes, list):
4545 selected_nodes = list(selected_nodes)
4546 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4548 if not selected_nodes and not myblocker_uninstalls.is_empty():
4549 # An Uninstall task needs to be executed in order to
4550 # avoid conflict if possible.
4553 priority_range = DepPrioritySatisfiedRange
4555 priority_range = DepPriorityNormalRange
4557 mergeable_nodes = get_nodes(
4558 ignore_priority=ignore_uninst_or_med)
4560 min_parent_deps = None
4562 for task in myblocker_uninstalls.leaf_nodes():
4563 # Do some sanity checks so that system or world packages
4564 # don't get uninstalled inappropriately here (only really
4565 # necessary when --complete-graph has not been enabled).
4567 if task in ignored_uninstall_tasks:
4570 if task in scheduled_uninstalls:
4571 # It's been scheduled but it hasn't
4572 # been executed yet due to dependence
4573 # on installation of blocking packages.
4576 root_config = self.roots[task.root]
4577 inst_pkg = self._pkg_cache[
4578 ("installed", task.root, task.cpv, "nomerge")]
4580 if self.digraph.contains(inst_pkg):
4583 forbid_overlap = False
4584 heuristic_overlap = False
4585 for blocker in myblocker_uninstalls.parent_nodes(task):
4586 if blocker.eapi in ("0", "1"):
4587 heuristic_overlap = True
4588 elif blocker.atom.blocker.overlap.forbid:
4589 forbid_overlap = True
4591 if forbid_overlap and running_root == task.root:
4594 if heuristic_overlap and running_root == task.root:
4595 # Never uninstall sys-apps/portage or it's essential
4596 # dependencies, except through replacement.
4598 runtime_dep_atoms = \
4599 list(runtime_deps.iterAtomsForPackage(task))
4600 except portage.exception.InvalidDependString, e:
4601 portage.writemsg("!!! Invalid PROVIDE in " + \
4602 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4603 (task.root, task.cpv, e), noiselevel=-1)
4607 # Don't uninstall a runtime dep if it appears
4608 # to be the only suitable one installed.
4610 vardb = root_config.trees["vartree"].dbapi
4611 for atom in runtime_dep_atoms:
4612 other_version = None
4613 for pkg in vardb.match_pkgs(atom):
4614 if pkg.cpv == task.cpv and \
4615 pkg.metadata["COUNTER"] == \
4616 task.metadata["COUNTER"]:
4620 if other_version is None:
4626 # For packages in the system set, don't take
4627 # any chances. If the conflict can't be resolved
4628 # by a normal replacement operation then abort.
4631 for atom in root_config.sets[
4632 "system"].iterAtomsForPackage(task):
4635 except portage.exception.InvalidDependString, e:
4636 portage.writemsg("!!! Invalid PROVIDE in " + \
4637 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4638 (task.root, task.cpv, e), noiselevel=-1)
4644 # Note that the world check isn't always
4645 # necessary since self._complete_graph() will
4646 # add all packages from the system and world sets to the
4647 # graph. This just allows unresolved conflicts to be
4648 # detected as early as possible, which makes it possible
4649 # to avoid calling self._complete_graph() when it is
4650 # unnecessary due to blockers triggering an abortion.
4652 # For packages in the world set, go ahead an uninstall
4653 # when necessary, as long as the atom will be satisfied
4654 # in the final state.
4655 graph_db = self.mydbapi[task.root]
4658 for atom in root_config.sets[
4659 "world"].iterAtomsForPackage(task):
4661 for pkg in graph_db.match_pkgs(atom):
4668 self._blocked_world_pkgs[inst_pkg] = atom
4670 except portage.exception.InvalidDependString, e:
4671 portage.writemsg("!!! Invalid PROVIDE in " + \
4672 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4673 (task.root, task.cpv, e), noiselevel=-1)
4679 # Check the deps of parent nodes to ensure that
4680 # the chosen task produces a leaf node. Maybe
4681 # this can be optimized some more to make the
4682 # best possible choice, but the current algorithm
4683 # is simple and should be near optimal for most
4685 mergeable_parent = False
4687 for parent in mygraph.parent_nodes(task):
4688 parent_deps.update(mygraph.child_nodes(parent,
4689 ignore_priority=priority_range.ignore_medium_soft))
4690 if parent in mergeable_nodes and \
4691 gather_deps(ignore_uninst_or_med_soft,
4692 mergeable_nodes, set(), parent):
4693 mergeable_parent = True
4695 if not mergeable_parent:
4698 parent_deps.remove(task)
4699 if min_parent_deps is None or \
4700 len(parent_deps) < min_parent_deps:
4701 min_parent_deps = len(parent_deps)
4704 if uninst_task is not None:
4705 # The uninstall is performed only after blocking
4706 # packages have been merged on top of it. File
4707 # collisions between blocking packages are detected
4708 # and removed from the list of files to be uninstalled.
4709 scheduled_uninstalls.add(uninst_task)
4710 parent_nodes = mygraph.parent_nodes(uninst_task)
4712 # Reverse the parent -> uninstall edges since we want
4713 # to do the uninstall after blocking packages have
4714 # been merged on top of it.
4715 mygraph.remove(uninst_task)
4716 for blocked_pkg in parent_nodes:
4717 mygraph.add(blocked_pkg, uninst_task,
4718 priority=BlockerDepPriority.instance)
4719 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
4720 scheduler_graph.add(blocked_pkg, uninst_task,
4721 priority=BlockerDepPriority.instance)
4723 # Reset the state variables for leaf node selection and
4724 # continue trying to select leaf nodes.
4726 drop_satisfied = False
4729 if not selected_nodes:
4730 # Only select root nodes as a last resort. This case should
4731 # only trigger when the graph is nearly empty and the only
4732 # remaining nodes are isolated (no parents or children). Since
4733 # the nodes must be isolated, ignore_priority is not needed.
4734 selected_nodes = get_nodes()
4736 if not selected_nodes and not drop_satisfied:
4737 drop_satisfied = True
4740 if not selected_nodes and not myblocker_uninstalls.is_empty():
4741 # If possible, drop an uninstall task here in order to avoid
4742 # the circular deps code path. The corresponding blocker will
4743 # still be counted as an unresolved conflict.
4745 for node in myblocker_uninstalls.leaf_nodes():
4747 mygraph.remove(node)
4752 ignored_uninstall_tasks.add(node)
4755 if uninst_task is not None:
4756 # Reset the state variables for leaf node selection and
4757 # continue trying to select leaf nodes.
4759 drop_satisfied = False
4762 if not selected_nodes:
4763 self._circular_deps_for_display = mygraph
4764 raise self._unknown_internal_error()
4766 # At this point, we've succeeded in selecting one or more nodes, so
4767 # reset state variables for leaf node selection.
4769 drop_satisfied = False
4771 mygraph.difference_update(selected_nodes)
4773 for node in selected_nodes:
4774 if isinstance(node, Package) and \
4775 node.operation == "nomerge":
4778 # Handle interactions between blockers
4779 # and uninstallation tasks.
4780 solved_blockers = set()
4782 if isinstance(node, Package) and \
4783 "uninstall" == node.operation:
4784 have_uninstall_task = True
4787 vardb = self.trees[node.root]["vartree"].dbapi
4788 previous_cpv = vardb.match(node.slot_atom)
4790 # The package will be replaced by this one, so remove
4791 # the corresponding Uninstall task if necessary.
4792 previous_cpv = previous_cpv[0]
4794 ("installed", node.root, previous_cpv, "uninstall")
4796 mygraph.remove(uninst_task)
4800 if uninst_task is not None and \
4801 uninst_task not in ignored_uninstall_tasks and \
4802 myblocker_uninstalls.contains(uninst_task):
4803 blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4804 myblocker_uninstalls.remove(uninst_task)
4805 # Discard any blockers that this Uninstall solves.
4806 for blocker in blocker_nodes:
4807 if not myblocker_uninstalls.child_nodes(blocker):
4808 myblocker_uninstalls.remove(blocker)
4809 solved_blockers.add(blocker)
4811 retlist.append(node)
4813 if (isinstance(node, Package) and \
4814 "uninstall" == node.operation) or \
4815 (uninst_task is not None and \
4816 uninst_task in scheduled_uninstalls):
4817 # Include satisfied blockers in the merge list
4818 # since the user might be interested and also
4819 # it serves as an indicator that blocking packages
4820 # will be temporarily installed simultaneously.
4821 for blocker in solved_blockers:
4822 retlist.append(Blocker(atom=blocker.atom,
4823 root=blocker.root, eapi=blocker.eapi,
4826 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
4827 for node in myblocker_uninstalls.root_nodes():
4828 unsolvable_blockers.add(node)
4830 for blocker in unsolvable_blockers:
4831 retlist.append(blocker)
4833 # If any Uninstall tasks need to be executed in order
4834 # to avoid a conflict, complete the graph with any
4835 # dependencies that may have been initially
4836 # neglected (to ensure that unsafe Uninstall tasks
4837 # are properly identified and blocked from execution).
4838 if have_uninstall_task and \
4840 not unsolvable_blockers:
4841 self.myparams.add("complete")
4842 raise self._serialize_tasks_retry("")
4844 if unsolvable_blockers and \
4845 not self._accept_blocker_conflicts():
4846 self._unsatisfied_blockers_for_display = unsolvable_blockers
4847 self._serialized_tasks_cache = retlist[:]
4848 self._scheduler_graph = scheduler_graph
4849 raise self._unknown_internal_error()
4851 if self._slot_collision_info and \
4852 not self._accept_blocker_conflicts():
4853 self._serialized_tasks_cache = retlist[:]
4854 self._scheduler_graph = scheduler_graph
4855 raise self._unknown_internal_error()
4857 return retlist, scheduler_graph
4859 def _show_circular_deps(self, mygraph):
4860 # No leaf nodes are available, so we have a circular
4861 # dependency panic situation. Reduce the noise level to a
4862 # minimum via repeated elimination of root nodes since they
4863 # have no parents and thus can not be part of a cycle.
4865 root_nodes = mygraph.root_nodes(
4866 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
4869 mygraph.difference_update(root_nodes)
4870 # Display the USE flags that are enabled on nodes that are part
4871 # of dependency cycles in case that helps the user decide to
4872 # disable some of them.
4874 tempgraph = mygraph.copy()
4875 while not tempgraph.empty():
4876 nodes = tempgraph.leaf_nodes()
4878 node = tempgraph.order[0]
4881 display_order.append(node)
4882 tempgraph.remove(node)
4883 display_order.reverse()
4884 self.myopts.pop("--quiet", None)
4885 self.myopts.pop("--verbose", None)
4886 self.myopts["--tree"] = True
4887 portage.writemsg("\n\n", noiselevel=-1)
4888 self.display(display_order)
4889 prefix = colorize("BAD", " * ")
4890 portage.writemsg("\n", noiselevel=-1)
4891 portage.writemsg(prefix + "Error: circular dependencies:\n",
4893 portage.writemsg("\n", noiselevel=-1)
4894 mygraph.debug_print()
4895 portage.writemsg("\n", noiselevel=-1)
4896 portage.writemsg(prefix + "Note that circular dependencies " + \
4897 "can often be avoided by temporarily\n", noiselevel=-1)
4898 portage.writemsg(prefix + "disabling USE flags that trigger " + \
4899 "optional dependencies.\n", noiselevel=-1)
4901 def _show_merge_list(self):
4902 if self._serialized_tasks_cache is not None and \
4903 not (self._displayed_list and \
4904 (self._displayed_list == self._serialized_tasks_cache or \
4905 self._displayed_list == \
4906 list(reversed(self._serialized_tasks_cache)))):
4907 display_list = self._serialized_tasks_cache[:]
4908 if "--tree" in self.myopts:
4909 display_list.reverse()
4910 self.display(display_list)
4912 def _show_unsatisfied_blockers(self, blockers):
4913 self._show_merge_list()
4914 msg = "Error: The above package list contains " + \
4915 "packages which cannot be installed " + \
4916 "at the same time on the same system."
4917 prefix = colorize("BAD", " * ")
4918 from textwrap import wrap
4919 portage.writemsg("\n", noiselevel=-1)
4920 for line in wrap(msg, 70):
4921 portage.writemsg(prefix + line + "\n", noiselevel=-1)
4923 # Display the conflicting packages along with the packages
4924 # that pulled them in. This is helpful for troubleshooting
4925 # cases in which blockers don't solve automatically and
4926 # the reasons are not apparent from the normal merge list
4930 for blocker in blockers:
4931 for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
4932 self._blocker_parents.parent_nodes(blocker)):
4933 parent_atoms = self._parent_atoms.get(pkg)
4934 if not parent_atoms:
4935 atom = self._blocked_world_pkgs.get(pkg)
4936 if atom is not None:
4937 parent_atoms = set([("@world", atom)])
4939 conflict_pkgs[pkg] = parent_atoms
4942 # Reduce noise by pruning packages that are only
4943 # pulled in by other conflict packages.
4945 for pkg, parent_atoms in conflict_pkgs.iteritems():
4946 relevant_parent = False
4947 for parent, atom in parent_atoms:
4948 if parent not in conflict_pkgs:
4949 relevant_parent = True
4951 if not relevant_parent:
4952 pruned_pkgs.add(pkg)
4953 for pkg in pruned_pkgs:
4954 del conflict_pkgs[pkg]
4960 # Max number of parents shown, to avoid flooding the display.
4962 for pkg, parent_atoms in conflict_pkgs.iteritems():
4966 # Prefer packages that are not directly involved in a conflict.
4967 for parent_atom in parent_atoms:
4968 if len(pruned_list) >= max_parents:
4970 parent, atom = parent_atom
4971 if parent not in conflict_pkgs:
4972 pruned_list.add(parent_atom)
4974 for parent_atom in parent_atoms:
4975 if len(pruned_list) >= max_parents:
4977 pruned_list.add(parent_atom)
4979 omitted_parents = len(parent_atoms) - len(pruned_list)
4980 msg.append(indent + "%s pulled in by\n" % pkg)
4982 for parent_atom in pruned_list:
4983 parent, atom = parent_atom
4984 msg.append(2*indent)
4985 if isinstance(parent,
4986 (PackageArg, AtomArg)):
4987 # For PackageArg and AtomArg types, it's
4988 # redundant to display the atom attribute.
4989 msg.append(str(parent))
4991 # Display the specific atom from SetArg or
4993 msg.append("%s required by %s" % (atom, parent))
4997 msg.append(2*indent)
4998 msg.append("(and %d more)\n" % omitted_parents)
5002 sys.stderr.write("".join(msg))
5005 if "--quiet" not in self.myopts:
5006 show_blocker_docs_link()
5008 def display(self, mylist, favorites=[], verbosity=None):
5010 # This is used to prevent display_problems() from
5011 # redundantly displaying this exact same merge list
5012 # again via _show_merge_list().
5013 self._displayed_list = mylist
5015 if verbosity is None:
5016 verbosity = ("--quiet" in self.myopts and 1 or \
5017 "--verbose" in self.myopts and 3 or 2)
5018 favorites_set = InternalPackageSet(favorites)
5019 oneshot = "--oneshot" in self.myopts or \
5020 "--onlydeps" in self.myopts
5021 columns = "--columns" in self.myopts
5026 counters = PackageCounters()
5028 if verbosity == 1 and "--verbose" not in self.myopts:
5029 def create_use_string(*args):
5032 def create_use_string(name, cur_iuse, iuse_forced, cur_use,
5034 is_new, reinst_flags,
5035 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
5036 alphabetical=("--alphabetical" in self.myopts)):
5044 cur_iuse = set(cur_iuse)
5045 enabled_flags = cur_iuse.intersection(cur_use)
5046 removed_iuse = set(old_iuse).difference(cur_iuse)
5047 any_iuse = cur_iuse.union(old_iuse)
5048 any_iuse = list(any_iuse)
5050 for flag in any_iuse:
5053 reinst_flag = reinst_flags and flag in reinst_flags
5054 if flag in enabled_flags:
5056 if is_new or flag in old_use and \
5057 (all_flags or reinst_flag):
5058 flag_str = red(flag)
5059 elif flag not in old_iuse:
5060 flag_str = yellow(flag) + "%*"
5061 elif flag not in old_use:
5062 flag_str = green(flag) + "*"
5063 elif flag in removed_iuse:
5064 if all_flags or reinst_flag:
5065 flag_str = yellow("-" + flag) + "%"
5068 flag_str = "(" + flag_str + ")"
5069 removed.append(flag_str)
5072 if is_new or flag in old_iuse and \
5073 flag not in old_use and \
5074 (all_flags or reinst_flag):
5075 flag_str = blue("-" + flag)
5076 elif flag not in old_iuse:
5077 flag_str = yellow("-" + flag)
5078 if flag not in iuse_forced:
5080 elif flag in old_use:
5081 flag_str = green("-" + flag) + "*"
5083 if flag in iuse_forced:
5084 flag_str = "(" + flag_str + ")"
5086 enabled.append(flag_str)
5088 disabled.append(flag_str)
5091 ret = " ".join(enabled)
5093 ret = " ".join(enabled + disabled + removed)
5095 ret = '%s="%s" ' % (name, ret)
5098 repo_display = RepoDisplay(self.roots)
5102 mygraph = self.digraph.copy()
5104 # If there are any Uninstall instances, add the corresponding
5105 # blockers to the digraph (useful for --tree display).
5107 executed_uninstalls = set(node for node in mylist \
5108 if isinstance(node, Package) and node.operation == "unmerge")
5110 for uninstall in self._blocker_uninstalls.leaf_nodes():
5111 uninstall_parents = \
5112 self._blocker_uninstalls.parent_nodes(uninstall)
5113 if not uninstall_parents:
5116 # Remove the corresponding "nomerge" node and substitute
5117 # the Uninstall node.
5118 inst_pkg = self._pkg_cache[
5119 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
5121 mygraph.remove(inst_pkg)
5126 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
5128 inst_pkg_blockers = []
5130 # Break the Package -> Uninstall edges.
5131 mygraph.remove(uninstall)
5133 # Resolution of a package's blockers
5134 # depend on it's own uninstallation.
5135 for blocker in inst_pkg_blockers:
5136 mygraph.add(uninstall, blocker)
5138 # Expand Package -> Uninstall edges into
5139 # Package -> Blocker -> Uninstall edges.
5140 for blocker in uninstall_parents:
5141 mygraph.add(uninstall, blocker)
5142 for parent in self._blocker_parents.parent_nodes(blocker):
5143 if parent != inst_pkg:
5144 mygraph.add(blocker, parent)
5146 # If the uninstall task did not need to be executed because
5147 # of an upgrade, display Blocker -> Upgrade edges since the
5148 # corresponding Blocker -> Uninstall edges will not be shown.
5150 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
5151 if upgrade_node is not None and \
5152 uninstall not in executed_uninstalls:
5153 for blocker in uninstall_parents:
5154 mygraph.add(upgrade_node, blocker)
5156 unsatisfied_blockers = []
5161 if isinstance(x, Blocker) and not x.satisfied:
5162 unsatisfied_blockers.append(x)
5165 if "--tree" in self.myopts:
5166 depth = len(tree_nodes)
5167 while depth and graph_key not in \
5168 mygraph.child_nodes(tree_nodes[depth-1]):
5171 tree_nodes = tree_nodes[:depth]
5172 tree_nodes.append(graph_key)
5173 display_list.append((x, depth, True))
5174 shown_edges.add((graph_key, tree_nodes[depth-1]))
5176 traversed_nodes = set() # prevent endless circles
5177 traversed_nodes.add(graph_key)
5178 def add_parents(current_node, ordered):
5180 # Do not traverse to parents if this node is an
5181 # an argument or a direct member of a set that has
5182 # been specified as an argument (system or world).
5183 if current_node not in self._set_nodes:
5184 parent_nodes = mygraph.parent_nodes(current_node)
5186 child_nodes = set(mygraph.child_nodes(current_node))
5187 selected_parent = None
5188 # First, try to avoid a direct cycle.
5189 for node in parent_nodes:
5190 if not isinstance(node, (Blocker, Package)):
5192 if node not in traversed_nodes and \
5193 node not in child_nodes:
5194 edge = (current_node, node)
5195 if edge in shown_edges:
5197 selected_parent = node
5199 if not selected_parent:
5200 # A direct cycle is unavoidable.
5201 for node in parent_nodes:
5202 if not isinstance(node, (Blocker, Package)):
5204 if node not in traversed_nodes:
5205 edge = (current_node, node)
5206 if edge in shown_edges:
5208 selected_parent = node
5211 shown_edges.add((current_node, selected_parent))
5212 traversed_nodes.add(selected_parent)
5213 add_parents(selected_parent, False)
5214 display_list.append((current_node,
5215 len(tree_nodes), ordered))
5216 tree_nodes.append(current_node)
5218 add_parents(graph_key, True)
5220 display_list.append((x, depth, True))
5221 mylist = display_list
5222 for x in unsatisfied_blockers:
5223 mylist.append((x, 0, True))
5225 last_merge_depth = 0
5226 for i in xrange(len(mylist)-1,-1,-1):
5227 graph_key, depth, ordered = mylist[i]
5228 if not ordered and depth == 0 and i > 0 \
5229 and graph_key == mylist[i-1][0] and \
5230 mylist[i-1][1] == 0:
5231 # An ordered node got a consecutive duplicate when the tree was
5235 if ordered and graph_key[-1] != "nomerge":
5236 last_merge_depth = depth
5238 if depth >= last_merge_depth or \
5239 i < len(mylist) - 1 and \
5240 depth >= mylist[i+1][1]:
5243 from portage import flatten
5244 from portage.dep import use_reduce, paren_reduce
5245 # files to fetch list - avoids counting a same file twice
5246 # in size display (verbose mode)
5249 # Use this set to detect when all the "repoadd" strings are "[0]"
5250 # and disable the entire repo display in this case.
5253 for mylist_index in xrange(len(mylist)):
5254 x, depth, ordered = mylist[mylist_index]
5258 portdb = self.trees[myroot]["porttree"].dbapi
5259 bindb = self.trees[myroot]["bintree"].dbapi
5260 vardb = self.trees[myroot]["vartree"].dbapi
5261 vartree = self.trees[myroot]["vartree"]
5262 pkgsettings = self.pkgsettings[myroot]
5265 indent = " " * depth
5267 if isinstance(x, Blocker):
5269 blocker_style = "PKG_BLOCKER_SATISFIED"
5270 addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
5272 blocker_style = "PKG_BLOCKER"
5273 addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
5275 counters.blocks += 1
5277 counters.blocks_satisfied += 1
5278 resolved = portage.key_expand(
5279 str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
5280 if "--columns" in self.myopts and "--quiet" in self.myopts:
5281 addl += " " + colorize(blocker_style, resolved)
5283 addl = "[%s %s] %s%s" % \
5284 (colorize(blocker_style, "blocks"),
5285 addl, indent, colorize(blocker_style, resolved))
5286 block_parents = self._blocker_parents.parent_nodes(x)
5287 block_parents = set([pnode[2] for pnode in block_parents])
5288 block_parents = ", ".join(block_parents)
5290 addl += colorize(blocker_style,
5291 " (\"%s\" is blocking %s)") % \
5292 (str(x.atom).lstrip("!"), block_parents)
5294 addl += colorize(blocker_style,
5295 " (is blocking %s)") % block_parents
5296 if isinstance(x, Blocker) and x.satisfied:
5301 blockers.append(addl)
5304 pkg_merge = ordered and pkg_status == "merge"
5305 if not pkg_merge and pkg_status == "merge":
5306 pkg_status = "nomerge"
5307 built = pkg_type != "ebuild"
5308 installed = pkg_type == "installed"
5310 metadata = pkg.metadata
5312 repo_name = metadata["repository"]
5313 if pkg_type == "ebuild":
5314 ebuild_path = portdb.findname(pkg_key)
5315 if not ebuild_path: # shouldn't happen
5316 raise portage.exception.PackageNotFound(pkg_key)
5317 repo_path_real = os.path.dirname(os.path.dirname(
5318 os.path.dirname(ebuild_path)))
5320 repo_path_real = portdb.getRepositoryPath(repo_name)
5321 pkg_use = list(pkg.use.enabled)
5323 restrict = flatten(use_reduce(paren_reduce(
5324 pkg.metadata["RESTRICT"]), uselist=pkg_use))
5325 except portage.exception.InvalidDependString, e:
5326 if not pkg.installed:
5327 show_invalid_depstring_notice(x,
5328 pkg.metadata["RESTRICT"], str(e))
5332 if "ebuild" == pkg_type and x[3] != "nomerge" and \
5333 "fetch" in restrict:
5336 counters.restrict_fetch += 1
5337 if portdb.fetch_check(pkg_key, pkg_use):
5340 counters.restrict_fetch_satisfied += 1
5342 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
5343 #param is used for -u, where you still *do* want to see when something is being upgraded.
5346 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
5347 if vardb.cpv_exists(pkg_key):
5348 addl=" "+yellow("R")+fetch+" "
5351 counters.reinst += 1
5352 elif pkg_status == "uninstall":
5353 counters.uninst += 1
5354 # filter out old-style virtual matches
5355 elif installed_versions and \
5356 portage.cpv_getkey(installed_versions[0]) == \
5357 portage.cpv_getkey(pkg_key):
5358 myinslotlist = vardb.match(pkg.slot_atom)
5359 # If this is the first install of a new-style virtual, we
5360 # need to filter out old-style virtual matches.
5361 if myinslotlist and \
5362 portage.cpv_getkey(myinslotlist[0]) != \
5363 portage.cpv_getkey(pkg_key):
5366 myoldbest = myinslotlist[:]
5368 if not portage.dep.cpvequal(pkg_key,
5369 portage.best([pkg_key] + myoldbest)):
5371 addl += turquoise("U")+blue("D")
5373 counters.downgrades += 1
5376 addl += turquoise("U") + " "
5378 counters.upgrades += 1
5380 # New slot, mark it new.
5381 addl = " " + green("NS") + fetch + " "
5382 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
5384 counters.newslot += 1
5386 if "--changelog" in self.myopts:
5387 inst_matches = vardb.match(pkg.slot_atom)
5389 changelogs.extend(self.calc_changelog(
5390 portdb.findname(pkg_key),
5391 inst_matches[0], pkg_key))
5393 addl = " " + green("N") + " " + fetch + " "
5402 forced_flags = set()
5403 pkgsettings.setcpv(pkg) # for package.use.{mask,force}
5404 forced_flags.update(pkgsettings.useforce)
5405 forced_flags.update(pkgsettings.usemask)
5407 cur_use = [flag for flag in pkg.use.enabled \
5408 if flag in pkg.iuse.all]
5409 cur_iuse = sorted(pkg.iuse.all)
5411 if myoldbest and myinslotlist:
5412 previous_cpv = myoldbest[0]
5414 previous_cpv = pkg.cpv
5415 if vardb.cpv_exists(previous_cpv):
5416 old_iuse, old_use = vardb.aux_get(
5417 previous_cpv, ["IUSE", "USE"])
5418 old_iuse = list(set(
5419 filter_iuse_defaults(old_iuse.split())))
5421 old_use = old_use.split()
5428 old_use = [flag for flag in old_use if flag in old_iuse]
5430 use_expand = pkgsettings["USE_EXPAND"].lower().split()
5432 use_expand.reverse()
5433 use_expand_hidden = \
5434 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
5436 def map_to_use_expand(myvals, forcedFlags=False,
5440 for exp in use_expand:
5443 for val in myvals[:]:
5444 if val.startswith(exp.lower()+"_"):
5445 if val in forced_flags:
5446 forced[exp].add(val[len(exp)+1:])
5447 ret[exp].append(val[len(exp)+1:])
5450 forced["USE"] = [val for val in myvals \
5451 if val in forced_flags]
5453 for exp in use_expand_hidden:
5459 # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
5460 # are the only thing that triggered reinstallation.
5461 reinst_flags_map = {}
5462 reinstall_for_flags = self._reinstall_nodes.get(pkg)
5463 reinst_expand_map = None
5464 if reinstall_for_flags:
5465 reinst_flags_map = map_to_use_expand(
5466 list(reinstall_for_flags), removeHidden=False)
5467 for k in list(reinst_flags_map):
5468 if not reinst_flags_map[k]:
5469 del reinst_flags_map[k]
5470 if not reinst_flags_map.get("USE"):
5471 reinst_expand_map = reinst_flags_map.copy()
5472 reinst_expand_map.pop("USE", None)
5473 if reinst_expand_map and \
5474 not set(reinst_expand_map).difference(
5476 use_expand_hidden = \
5477 set(use_expand_hidden).difference(
5480 cur_iuse_map, iuse_forced = \
5481 map_to_use_expand(cur_iuse, forcedFlags=True)
5482 cur_use_map = map_to_use_expand(cur_use)
5483 old_iuse_map = map_to_use_expand(old_iuse)
5484 old_use_map = map_to_use_expand(old_use)
5487 use_expand.insert(0, "USE")
5489 for key in use_expand:
5490 if key in use_expand_hidden:
5492 verboseadd += create_use_string(key.upper(),
5493 cur_iuse_map[key], iuse_forced[key],
5494 cur_use_map[key], old_iuse_map[key],
5495 old_use_map[key], is_new,
5496 reinst_flags_map.get(key))
5501 if pkg_type == "ebuild" and pkg_merge:
5503 myfilesdict = portdb.getfetchsizes(pkg_key,
5504 useflags=pkg_use, debug=self.edebug)
5505 except portage.exception.InvalidDependString, e:
5506 src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
5507 show_invalid_depstring_notice(x, src_uri, str(e))
5510 if myfilesdict is None:
5511 myfilesdict="[empty/missing/bad digest]"
5513 for myfetchfile in myfilesdict:
5514 if myfetchfile not in myfetchlist:
5515 mysize+=myfilesdict[myfetchfile]
5516 myfetchlist.append(myfetchfile)
5518 counters.totalsize += mysize
5519 verboseadd += format_size(mysize)
5522 # assign index for a previous version in the same slot
5523 has_previous = False
5524 repo_name_prev = None
5525 slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
5527 slot_matches = vardb.match(slot_atom)
5530 repo_name_prev = vardb.aux_get(slot_matches[0],
5533 # now use the data to generate output
5534 if pkg.installed or not has_previous:
5535 repoadd = repo_display.repoStr(repo_path_real)
5537 repo_path_prev = None
5539 repo_path_prev = portdb.getRepositoryPath(
5541 if repo_path_prev == repo_path_real:
5542 repoadd = repo_display.repoStr(repo_path_real)
5544 repoadd = "%s=>%s" % (
5545 repo_display.repoStr(repo_path_prev),
5546 repo_display.repoStr(repo_path_real))
5548 repoadd_set.add(repoadd)
5550 xs = [portage.cpv_getkey(pkg_key)] + \
5551 list(portage.catpkgsplit(pkg_key)[2:])
5558 if "COLUMNWIDTH" in self.settings:
5560 mywidth = int(self.settings["COLUMNWIDTH"])
5561 except ValueError, e:
5562 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
5564 "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
5565 self.settings["COLUMNWIDTH"], noiselevel=-1)
5567 oldlp = mywidth - 30
5570 # Convert myoldbest from a list to a string.
5574 for pos, key in enumerate(myoldbest):
5575 key = portage.catpkgsplit(key)[2] + \
5576 "-" + portage.catpkgsplit(key)[3]
5577 if key[-3:] == "-r0":
5579 myoldbest[pos] = key
5580 myoldbest = blue("["+", ".join(myoldbest)+"]")
5583 root_config = self.roots[myroot]
5584 system_set = root_config.sets["system"]
5585 world_set = root_config.sets["world"]
5590 pkg_system = system_set.findAtomForPackage(pkg)
5591 pkg_world = world_set.findAtomForPackage(pkg)
5592 if not (oneshot or pkg_world) and \
5593 myroot == self.target_root and \
5594 favorites_set.findAtomForPackage(pkg):
5595 # Maybe it will be added to world now.
5596 if create_world_atom(pkg, favorites_set, root_config):
5598 except portage.exception.InvalidDependString:
5599 # This is reported elsewhere if relevant.
5602 def pkgprint(pkg_str):
5605 return colorize("PKG_MERGE_SYSTEM", pkg_str)
5607 return colorize("PKG_MERGE_WORLD", pkg_str)
5609 return colorize("PKG_MERGE", pkg_str)
5610 elif pkg_status == "uninstall":
5611 return colorize("PKG_UNINSTALL", pkg_str)
5614 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
5616 return colorize("PKG_NOMERGE_WORLD", pkg_str)
5618 return colorize("PKG_NOMERGE", pkg_str)
5621 properties = flatten(use_reduce(paren_reduce(
5622 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
5623 except portage.exception.InvalidDependString, e:
5624 if not pkg.installed:
5625 show_invalid_depstring_notice(pkg,
5626 pkg.metadata["PROPERTIES"], str(e))
5630 interactive = "interactive" in properties
5631 if interactive and pkg.operation == "merge":
5632 addl = colorize("WARN", "I") + addl[1:]
5634 counters.interactive += 1
5639 if "--columns" in self.myopts:
5640 if "--quiet" in self.myopts:
5641 myprint=addl+" "+indent+pkgprint(pkg_cp)
5642 myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
5643 myprint=myprint+myoldbest
5644 myprint=myprint+darkgreen("to "+x[1])
5648 myprint = "[%s] %s%s" % \
5649 (pkgprint(pkg_status.ljust(13)),
5650 indent, pkgprint(pkg.cp))
5652 myprint = "[%s %s] %s%s" % \
5653 (pkgprint(pkg.type_name), addl,
5654 indent, pkgprint(pkg.cp))
5655 if (newlp-nc_len(myprint)) > 0:
5656 myprint=myprint+(" "*(newlp-nc_len(myprint)))
5657 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
5658 if (oldlp-nc_len(myprint)) > 0:
5659 myprint=myprint+" "*(oldlp-nc_len(myprint))
5660 myprint=myprint+myoldbest
5661 myprint += darkgreen("to " + pkg.root)
5664 myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
5666 myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
5667 myprint += indent + pkgprint(pkg_key) + " " + \
5668 myoldbest + darkgreen("to " + myroot)
5670 if "--columns" in self.myopts:
5671 if "--quiet" in self.myopts:
5672 myprint=addl+" "+indent+pkgprint(pkg_cp)
5673 myprint=myprint+" "+green(xs[1]+xs[2])+" "
5674 myprint=myprint+myoldbest
5678 myprint = "[%s] %s%s" % \
5679 (pkgprint(pkg_status.ljust(13)),
5680 indent, pkgprint(pkg.cp))
5682 myprint = "[%s %s] %s%s" % \
5683 (pkgprint(pkg.type_name), addl,
5684 indent, pkgprint(pkg.cp))
5685 if (newlp-nc_len(myprint)) > 0:
5686 myprint=myprint+(" "*(newlp-nc_len(myprint)))
5687 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
5688 if (oldlp-nc_len(myprint)) > 0:
5689 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
5690 myprint += myoldbest
5693 myprint = "[%s] %s%s %s" % \
5694 (pkgprint(pkg_status.ljust(13)),
5695 indent, pkgprint(pkg.cpv),
5698 myprint = "[%s %s] %s%s %s" % \
5699 (pkgprint(pkg_type), addl, indent,
5700 pkgprint(pkg.cpv), myoldbest)
5702 if columns and pkg.operation == "uninstall":
5704 p.append((myprint, verboseadd, repoadd))
5706 if "--tree" not in self.myopts and \
5707 "--quiet" not in self.myopts and \
5708 not self._opts_no_restart.intersection(self.myopts) and \
5709 pkg.root == self._running_root.root and \
5710 portage.match_from_list(
5711 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
5712 not vardb.cpv_exists(pkg.cpv) and \
5713 "--quiet" not in self.myopts:
5714 if mylist_index < len(mylist) - 1:
5715 p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
5716 p.append(colorize("WARN", " then resume the merge."))
5719 show_repos = repoadd_set and repoadd_set != set(["0"])
5722 if isinstance(x, basestring):
5723 out.write("%s\n" % (x,))
5726 myprint, verboseadd, repoadd = x
5729 myprint += " " + verboseadd
5731 if show_repos and repoadd:
5732 myprint += " " + teal("[%s]" % repoadd)
5734 out.write("%s\n" % (myprint,))
5743 sys.stdout.write(str(repo_display))
5745 if "--changelog" in self.myopts:
5747 for revision,text in changelogs:
5748 print bold('*'+revision)
5749 sys.stdout.write(text)
5754 def display_problems(self):
5756 Display problems with the dependency graph such as slot collisions.
5757 This is called internally by display() to show the problems _after_
5758 the merge list where it is most likely to be seen, but if display()
5759 is not going to be called then this method should be called explicitly
5760 to ensure that the user is notified of problems with the graph.
5762 All output goes to stderr, except for unsatisfied dependencies which
5763 go to stdout for parsing by programs such as autounmask.
5766 # Note that show_masked_packages() sends it's output to
5767 # stdout, and some programs such as autounmask parse the
5768 # output in cases when emerge bails out. However, when
5769 # show_masked_packages() is called for installed packages
5770 # here, the message is a warning that is more appropriate
5771 # to send to stderr, so temporarily redirect stdout to
5772 # stderr. TODO: Fix output code so there's a cleaner way
5773 # to redirect everything to stderr.
5778 sys.stdout = sys.stderr
5779 self._display_problems()
5785 # This goes to stdout for parsing by programs like autounmask.
5786 for pargs, kwargs in self._unsatisfied_deps_for_display:
5787 self._show_unsatisfied_dep(*pargs, **kwargs)
5789 def _display_problems(self):
5790 if self._circular_deps_for_display is not None:
5791 self._show_circular_deps(
5792 self._circular_deps_for_display)
5794 # The user is only notified of a slot conflict if
5795 # there are no unresolvable blocker conflicts.
5796 if self._unsatisfied_blockers_for_display is not None:
5797 self._show_unsatisfied_blockers(
5798 self._unsatisfied_blockers_for_display)
5800 self._show_slot_collision_notice()
5802 # TODO: Add generic support for "set problem" handlers so that
5803 # the below warnings aren't special cases for world only.
5805 if self._missing_args:
5806 world_problems = False
5807 if "world" in self._sets:
5808 # Filter out indirect members of world (from nested sets)
5809 # since only direct members of world are desired here.
5810 world_set = self.roots[self.target_root].sets["world"]
5811 for arg, atom in self._missing_args:
5812 if arg.name == "world" and atom in world_set:
5813 world_problems = True
5817 sys.stderr.write("\n!!! Problems have been " + \
5818 "detected with your world file\n")
5819 sys.stderr.write("!!! Please run " + \
5820 green("emaint --check world")+"\n\n")
5822 if self._missing_args:
5823 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5824 " Ebuilds for the following packages are either all\n")
5825 sys.stderr.write(colorize("BAD", "!!!") + \
5826 " masked or don't exist:\n")
5827 sys.stderr.write(" ".join(str(atom) for arg, atom in \
5828 self._missing_args) + "\n")
5830 if self._pprovided_args:
5832 for arg, atom in self._pprovided_args:
5833 if isinstance(arg, SetArg):
5835 arg_atom = (atom, atom)
5838 arg_atom = (arg.arg, atom)
5839 refs = arg_refs.setdefault(arg_atom, [])
5840 if parent not in refs:
5843 msg.append(bad("\nWARNING: "))
5844 if len(self._pprovided_args) > 1:
5845 msg.append("Requested packages will not be " + \
5846 "merged because they are listed in\n")
5848 msg.append("A requested package will not be " + \
5849 "merged because it is listed in\n")
5850 msg.append("package.provided:\n\n")
5851 problems_sets = set()
5852 for (arg, atom), refs in arg_refs.iteritems():
5855 problems_sets.update(refs)
5857 ref_string = ", ".join(["'%s'" % name for name in refs])
5858 ref_string = " pulled in by " + ref_string
5859 msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
5861 if "world" in problems_sets:
5862 msg.append("This problem can be solved in one of the following ways:\n\n")
5863 msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
5864 msg.append(" B) Uninstall offending packages (cleans them from world).\n")
5865 msg.append(" C) Remove offending entries from package.provided.\n\n")
5866 msg.append("The best course of action depends on the reason that an offending\n")
5867 msg.append("package.provided entry exists.\n\n")
5868 sys.stderr.write("".join(msg))
5870 masked_packages = []
5871 for pkg in self._masked_installed:
5872 root_config = pkg.root_config
5873 pkgsettings = self.pkgsettings[pkg.root]
5874 mreasons = get_masking_status(pkg, pkgsettings, root_config)
5875 masked_packages.append((root_config, pkgsettings,
5876 pkg.cpv, pkg.metadata, mreasons))
5878 sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5879 " The following installed packages are masked:\n")
5880 show_masked_packages(masked_packages)
5884 def calc_changelog(self,ebuildpath,current,next):
5885 if ebuildpath == None or not os.path.exists(ebuildpath):
5887 current = '-'.join(portage.catpkgsplit(current)[1:])
5888 if current.endswith('-r0'):
5889 current = current[:-3]
5890 next = '-'.join(portage.catpkgsplit(next)[1:])
5891 if next.endswith('-r0'):
5893 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
5895 changelog = open(changelogpath).read()
5896 except SystemExit, e:
5897 raise # Needed else can't exit
5900 divisions = self.find_changelog_tags(changelog)
5901 #print 'XX from',current,'to',next
5902 #for div,text in divisions: print 'XX',div
5903 # skip entries for all revisions above the one we are about to emerge
5904 for i in range(len(divisions)):
5905 if divisions[i][0]==next:
5906 divisions = divisions[i:]
5908 # find out how many entries we are going to display
5909 for i in range(len(divisions)):
5910 if divisions[i][0]==current:
5911 divisions = divisions[:i]
5914 # couldnt find the current revision in the list. display nothing
5918 def find_changelog_tags(self,changelog):
5922 match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
5924 if release is not None:
5925 divs.append((release,changelog))
5927 if release is not None:
5928 divs.append((release,changelog[:match.start()]))
5929 changelog = changelog[match.end():]
5930 release = match.group(1)
5931 if release.endswith('.ebuild'):
5932 release = release[:-7]
5933 if release.endswith('-r0'):
5934 release = release[:-3]
5936 def saveNomergeFavorites(self):
5937 """Find atoms in favorites that are not in the mergelist and add them
5938 to the world file if necessary."""
5939 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
5940 "--oneshot", "--onlydeps", "--pretend"):
5941 if x in self.myopts:
5943 root_config = self.roots[self.target_root]
5944 world_set = root_config.sets["world"]
5946 world_locked = False
5947 if hasattr(world_set, "lock"):
5951 if hasattr(world_set, "load"):
5952 world_set.load() # maybe it's changed on disk
5954 args_set = self._sets["args"]
5955 portdb = self.trees[self.target_root]["porttree"].dbapi
5956 added_favorites = set()
5957 for x in self._set_nodes:
5958 pkg_type, root, pkg_key, pkg_status = x
5959 if pkg_status != "nomerge":
5963 myfavkey = create_world_atom(x, args_set, root_config)
5965 if myfavkey in added_favorites:
5967 added_favorites.add(myfavkey)
5968 except portage.exception.InvalidDependString, e:
5969 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
5970 (pkg_key, str(e)), noiselevel=-1)
5971 writemsg("!!! see '%s'\n\n" % os.path.join(
5972 root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
5975 for k in self._sets:
5976 if k in ("args", "world") or not root_config.sets[k].world_candidate:
5981 all_added.append(SETPREFIX + k)
5982 all_added.extend(added_favorites)
5985 print ">>> Recording %s in \"world\" favorites file..." % \
5986 colorize("INFORM", str(a))
5988 world_set.update(all_added)
5993 def loadResumeCommand(self, resume_data, skip_masked=True,
5996 Add a resume command to the graph and validate it in the process. This
5997 will raise a PackageNotFound exception if a package is not available.
6000 if not isinstance(resume_data, dict):
6003 mergelist = resume_data.get("mergelist")
6004 if not isinstance(mergelist, list):
6007 fakedb = self.mydbapi
6009 serialized_tasks = []
6012 if not (isinstance(x, list) and len(x) == 4):
6014 pkg_type, myroot, pkg_key, action = x
6015 if pkg_type not in self.pkg_tree_map:
6017 if action != "merge":
6019 tree_type = self.pkg_tree_map[pkg_type]
6020 mydb = trees[myroot][tree_type].dbapi
6021 db_keys = list(self._trees_orig[myroot][
6022 tree_type].dbapi._aux_cache_keys)
6024 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
6026 # It does no exist or it is corrupt.
6027 if action == "uninstall":
6030 # TODO: log these somewhere
6032 raise portage.exception.PackageNotFound(pkg_key)
6033 installed = action == "uninstall"
6034 built = pkg_type != "ebuild"
6035 root_config = self.roots[myroot]
6036 pkg = Package(built=built, cpv=pkg_key,
6037 installed=installed, metadata=metadata,
6038 operation=action, root_config=root_config,
6040 if pkg_type == "ebuild":
6041 pkgsettings = self.pkgsettings[myroot]
6042 pkgsettings.setcpv(pkg)
6043 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6044 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
6045 self._pkg_cache[pkg] = pkg
6047 root_config = self.roots[pkg.root]
6048 if "merge" == pkg.operation and \
6049 not visible(root_config.settings, pkg):
6051 masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6053 self._unsatisfied_deps_for_display.append(
6054 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6056 fakedb[myroot].cpv_inject(pkg)
6057 serialized_tasks.append(pkg)
6058 self.spinner.update()
6060 if self._unsatisfied_deps_for_display:
6063 if not serialized_tasks or "--nodeps" in self.myopts:
6064 self._serialized_tasks_cache = serialized_tasks
6065 self._scheduler_graph = self.digraph
6067 self._select_package = self._select_pkg_from_graph
6068 self.myparams.add("selective")
6069 # Always traverse deep dependencies in order to account for
6070 # potentially unsatisfied dependencies of installed packages.
6071 # This is necessary for correct --keep-going or --resume operation
6072 # in case a package from a group of circularly dependent packages
6073 # fails. In this case, a package which has recently been installed
6074 # may have an unsatisfied circular dependency (pulled in by
6075 # PDEPEND, for example). So, even though a package is already
6076 # installed, it may not have all of it's dependencies satisfied, so
6077 # it may not be usable. If such a package is in the subgraph of
6078 # deep depenedencies of a scheduled build, that build needs to
6079 # be cancelled. In order for this type of situation to be
6080 # recognized, deep traversal of dependencies is required.
6081 self.myparams.add("deep")
6083 favorites = resume_data.get("favorites")
6084 args_set = self._sets["args"]
6085 if isinstance(favorites, list):
6086 args = self._load_favorites(favorites)
6090 for task in serialized_tasks:
6091 if isinstance(task, Package) and \
6092 task.operation == "merge":
6093 if not self._add_pkg(task, None):
6096 # Packages for argument atoms need to be explicitly
6097 # added via _add_pkg() so that they are included in the
6098 # digraph (needed at least for --tree display).
6100 for atom in arg.set:
6101 pkg, existing_node = self._select_package(
6102 arg.root_config.root, atom)
6103 if existing_node is None and \
6105 if not self._add_pkg(pkg, Dependency(atom=atom,
6106 root=pkg.root, parent=arg)):
6109 # Allow unsatisfied deps here to avoid showing a masking
6110 # message for an unsatisfied dep that isn't necessarily
6112 if not self._create_graph(allow_unsatisfied=True):
6115 unsatisfied_deps = []
6116 for dep in self._unsatisfied_deps:
6117 if not isinstance(dep.parent, Package):
6119 if dep.parent.operation == "merge":
6120 unsatisfied_deps.append(dep)
6123 # For unsatisfied deps of installed packages, only account for
6124 # them if they are in the subgraph of dependencies of a package
6125 # which is scheduled to be installed.
6126 unsatisfied_install = False
6128 dep_stack = self.digraph.parent_nodes(dep.parent)
6130 node = dep_stack.pop()
6131 if not isinstance(node, Package):
6133 if node.operation == "merge":
6134 unsatisfied_install = True
6136 if node in traversed:
6139 dep_stack.extend(self.digraph.parent_nodes(node))
6141 if unsatisfied_install:
6142 unsatisfied_deps.append(dep)
6144 if masked_tasks or unsatisfied_deps:
6145 # This probably means that a required package
6146 # was dropped via --skipfirst. It makes the
6147 # resume list invalid, so convert it to a
6148 # UnsatisfiedResumeDep exception.
6149 raise self.UnsatisfiedResumeDep(self,
6150 masked_tasks + unsatisfied_deps)
6151 self._serialized_tasks_cache = None
6154 except self._unknown_internal_error:
6159 def _load_favorites(self, favorites):
6161 Use a list of favorites to resume state from a
6162 previous select_files() call. This creates similar
6163 DependencyArg instances to those that would have
6164 been created by the original select_files() call.
6165 This allows Package instances to be matched with
6166 DependencyArg instances during graph creation.
6168 root_config = self.roots[self.target_root]
6169 getSetAtoms = root_config.setconfig.getSetAtoms
6170 sets = root_config.sets
6173 if not isinstance(x, basestring):
6175 if x in ("system", "world"):
6177 if x.startswith(SETPREFIX):
6178 s = x[len(SETPREFIX):]
6183 # Recursively expand sets so that containment tests in
6184 # self._get_parent_sets() properly match atoms in nested
6185 # sets (like if world contains system).
6186 expanded_set = InternalPackageSet(
6187 initial_atoms=getSetAtoms(s))
6188 self._sets[s] = expanded_set
6189 args.append(SetArg(arg=x, set=expanded_set,
6190 root_config=root_config))
6192 if not portage.isvalidatom(x):
6194 args.append(AtomArg(arg=x, atom=x,
6195 root_config=root_config))
6197 self._set_args(args)
6200 class UnsatisfiedResumeDep(portage.exception.PortageException):
6202 A dependency of a resume list is not installed. This
6203 can occur when a required package is dropped from the
6204 merge list via --skipfirst.
6206 def __init__(self, depgraph, value):
6207 portage.exception.PortageException.__init__(self, value)
6208 self.depgraph = depgraph
6210 class _internal_exception(portage.exception.PortageException):
6211 def __init__(self, value=""):
6212 portage.exception.PortageException.__init__(self, value)
6214 class _unknown_internal_error(_internal_exception):
6216 Used by the depgraph internally to terminate graph creation.
6217 The specific reason for the failure should have been dumped
6218 to stderr, unfortunately, the exact reason for the failure
6222 class _serialize_tasks_retry(_internal_exception):
6224 This is raised by the _serialize_tasks() method when it needs to
6225 be called again for some reason. The only case that it's currently
6226 used for is when neglected dependencies need to be added to the
6227 graph in order to avoid making a potentially unsafe decision.
6230 class _dep_check_composite_db(portage.dbapi):
6232 A dbapi-like interface that is optimized for use in dep_check() calls.
6233 This is built on top of the existing depgraph package selection logic.
6234 Some packages that have been added to the graph may be masked from this
6235 view in order to influence the atom preference selection that occurs
6238 def __init__(self, depgraph, root):
6239 portage.dbapi.__init__(self)
6240 self._depgraph = depgraph
6242 self._match_cache = {}
6243 self._cpv_pkg_map = {}
6245 def _clear_cache(self):
6246 self._match_cache.clear()
6247 self._cpv_pkg_map.clear()
6249 def match(self, atom):
6250 ret = self._match_cache.get(atom)
6255 atom = self._dep_expand(atom)
6256 pkg, existing = self._depgraph._select_package(self._root, atom)
6260 # Return the highest available from select_package() as well as
6261 # any matching slots in the graph db.
6263 slots.add(pkg.metadata["SLOT"])
6264 atom_cp = portage.dep_getkey(atom)
6265 if pkg.cp.startswith("virtual/"):
6266 # For new-style virtual lookahead that occurs inside
6267 # dep_check(), examine all slots. This is needed
6268 # so that newer slots will not unnecessarily be pulled in
6269 # when a satisfying lower slot is already installed. For
6270 # example, if virtual/jdk-1.4 is satisfied via kaffe then
6271 # there's no need to pull in a newer slot to satisfy a
6272 # virtual/jdk dependency.
6273 for db, pkg_type, built, installed, db_keys in \
6274 self._depgraph._filtered_trees[self._root]["dbs"]:
6275 for cpv in db.match(atom):
6276 if portage.cpv_getkey(cpv) != pkg.cp:
6278 slots.add(db.aux_get(cpv, ["SLOT"])[0])
6280 if self._visible(pkg):
6281 self._cpv_pkg_map[pkg.cpv] = pkg
6283 slots.remove(pkg.metadata["SLOT"])
6285 slot_atom = "%s:%s" % (atom_cp, slots.pop())
6286 pkg, existing = self._depgraph._select_package(
6287 self._root, slot_atom)
6290 if not self._visible(pkg):
6292 self._cpv_pkg_map[pkg.cpv] = pkg
6295 self._cpv_sort_ascending(ret)
6296 self._match_cache[orig_atom] = ret
6299 def _visible(self, pkg):
6300 if pkg.installed and "selective" not in self._depgraph.myparams:
6302 arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
6303 except (StopIteration, portage.exception.InvalidDependString):
6310 self._depgraph.pkgsettings[pkg.root], pkg):
6312 except portage.exception.InvalidDependString:
6314 in_graph = self._depgraph._slot_pkg_map[
6315 self._root].get(pkg.slot_atom)
6316 if in_graph is None:
6317 # Mask choices for packages which are not the highest visible
6318 # version within their slot (since they usually trigger slot
6320 highest_visible, in_graph = self._depgraph._select_package(
6321 self._root, pkg.slot_atom)
6322 if pkg != highest_visible:
6324 elif in_graph != pkg:
6325 # Mask choices for packages that would trigger a slot
6326 # conflict with a previously selected package.
6330 def _dep_expand(self, atom):
6332 This is only needed for old installed packages that may
6333 contain atoms that are not fully qualified with a specific
6334 category. Emulate the cpv_expand() function that's used by
6335 dbapi.match() in cases like this. If there are multiple
6336 matches, it's often due to a new-style virtual that has
6337 been added, so try to filter those out to avoid raising
6340 root_config = self._depgraph.roots[self._root]
6342 expanded_atoms = self._depgraph._dep_expand(root_config, atom)
6343 if len(expanded_atoms) > 1:
6344 non_virtual_atoms = []
6345 for x in expanded_atoms:
6346 if not portage.dep_getkey(x).startswith("virtual/"):
6347 non_virtual_atoms.append(x)
6348 if len(non_virtual_atoms) == 1:
6349 expanded_atoms = non_virtual_atoms
6350 if len(expanded_atoms) > 1:
6351 # compatible with portage.cpv_expand()
6352 raise portage.exception.AmbiguousPackageName(
6353 [portage.dep_getkey(x) for x in expanded_atoms])
6355 atom = expanded_atoms[0]
6357 null_atom = insert_category_into_atom(atom, "null")
6358 null_cp = portage.dep_getkey(null_atom)
6359 cat, atom_pn = portage.catsplit(null_cp)
6360 virts_p = root_config.settings.get_virts_p().get(atom_pn)
6362 # Allow the resolver to choose which virtual.
6363 atom = insert_category_into_atom(atom, "virtual")
6365 atom = insert_category_into_atom(atom, "null")
6368 def aux_get(self, cpv, wants):
6369 metadata = self._cpv_pkg_map[cpv].metadata
6370 return [metadata.get(x, "") for x in wants]
6372 class PackageCounters(object):
6382 self.blocks_satisfied = 0
6384 self.restrict_fetch = 0
6385 self.restrict_fetch_satisfied = 0
6386 self.interactive = 0
6389 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
6392 myoutput.append("Total: %s package" % total_installs)
6393 if total_installs != 1:
6394 myoutput.append("s")
6395 if total_installs != 0:
6396 myoutput.append(" (")
6397 if self.upgrades > 0:
6398 details.append("%s upgrade" % self.upgrades)
6399 if self.upgrades > 1:
6401 if self.downgrades > 0:
6402 details.append("%s downgrade" % self.downgrades)
6403 if self.downgrades > 1:
6406 details.append("%s new" % self.new)
6407 if self.newslot > 0:
6408 details.append("%s in new slot" % self.newslot)
6409 if self.newslot > 1:
6412 details.append("%s reinstall" % self.reinst)
6416 details.append("%s uninstall" % self.uninst)
6419 if self.interactive > 0:
6420 details.append("%s %s" % (self.interactive,
6421 colorize("WARN", "interactive")))
6422 myoutput.append(", ".join(details))
6423 if total_installs != 0:
6424 myoutput.append(")")
6425 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
6426 if self.restrict_fetch:
6427 myoutput.append("\nFetch Restriction: %s package" % \
6428 self.restrict_fetch)
6429 if self.restrict_fetch > 1:
6430 myoutput.append("s")
6431 if self.restrict_fetch_satisfied < self.restrict_fetch:
6432 myoutput.append(bad(" (%s unsatisfied)") % \
6433 (self.restrict_fetch - self.restrict_fetch_satisfied))
6435 myoutput.append("\nConflict: %s block" % \
6438 myoutput.append("s")
6439 if self.blocks_satisfied < self.blocks:
6440 myoutput.append(bad(" (%s unsatisfied)") % \
6441 (self.blocks - self.blocks_satisfied))
6442 return "".join(myoutput)
6444 class Scheduler(PollScheduler):
6446 _opts_ignore_blockers = \
6447 frozenset(["--buildpkgonly",
6448 "--fetchonly", "--fetch-all-uri",
6449 "--nodeps", "--pretend"])
6451 _opts_no_background = \
6452 frozenset(["--pretend",
6453 "--fetchonly", "--fetch-all-uri"])
6455 _opts_no_restart = frozenset(["--buildpkgonly",
6456 "--fetchonly", "--fetch-all-uri", "--pretend"])
6458 _bad_resume_opts = set(["--ask", "--changelog",
6459 "--resume", "--skipfirst"])
6461 _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
6463 class _iface_class(SlotObject):
6464 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
6465 "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
6466 "scheduleSetup", "scheduleUnpack", "scheduleYield",
6469 class _fetch_iface_class(SlotObject):
6470 __slots__ = ("log_file", "schedule")
6472 _task_queues_class = slot_dict_class(
6473 ("merge", "jobs", "fetch", "unpack"), prefix="")
6475 class _build_opts_class(SlotObject):
6476 __slots__ = ("buildpkg", "buildpkgonly",
6477 "fetch_all_uri", "fetchonly", "pretend")
6479 class _binpkg_opts_class(SlotObject):
6480 __slots__ = ("fetchonly", "getbinpkg", "pretend")
6482 class _pkg_count_class(SlotObject):
6483 __slots__ = ("curval", "maxval")
6485 class _emerge_log_class(SlotObject):
6486 __slots__ = ("xterm_titles",)
6488 def log(self, *pargs, **kwargs):
6489 if not self.xterm_titles:
6490 # Avoid interference with the scheduler's status display.
6491 kwargs.pop("short_msg", None)
6492 emergelog(self.xterm_titles, *pargs, **kwargs)
6494 class _failed_pkg(SlotObject):
6495 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
6497 class _ConfigPool(object):
6498 """Interface for a task to temporarily allocate a config
6499 instance from a pool. This allows a task to be constructed
6500 long before the config instance actually becomes needed, like
6501 when prefetchers are constructed for the whole merge list."""
6502 __slots__ = ("_root", "_allocate", "_deallocate")
6503 def __init__(self, root, allocate, deallocate):
6505 self._allocate = allocate
6506 self._deallocate = deallocate
6508 return self._allocate(self._root)
6509 def deallocate(self, settings):
6510 self._deallocate(settings)
6512 class _unknown_internal_error(portage.exception.PortageException):
6514 Used internally to terminate scheduling. The specific reason for
6515 the failure should have been dumped to stderr.
6517 def __init__(self, value=""):
6518 portage.exception.PortageException.__init__(self, value)
6520 def __init__(self, settings, trees, mtimedb, myopts,
6521 spinner, mergelist, favorites, digraph):
6522 PollScheduler.__init__(self)
6523 self.settings = settings
6524 self.target_root = settings["ROOT"]
6526 self.myopts = myopts
6527 self._spinner = spinner
6528 self._mtimedb = mtimedb
6529 self._mergelist = mergelist
6530 self._favorites = favorites
6531 self._args_set = InternalPackageSet(favorites)
6532 self._build_opts = self._build_opts_class()
6533 for k in self._build_opts.__slots__:
6534 setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
6535 self._binpkg_opts = self._binpkg_opts_class()
6536 for k in self._binpkg_opts.__slots__:
6537 setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
6540 self._logger = self._emerge_log_class()
6541 self._task_queues = self._task_queues_class()
6542 for k in self._task_queues.allowed_keys:
6543 setattr(self._task_queues, k,
6544 SequentialTaskQueue())
6546 # Holds merges that will wait to be executed when no builds are
6547 # executing. This is useful for system packages since dependencies
6548 # on system packages are frequently unspecified.
6549 self._merge_wait_queue = []
6550 # Holds merges that have been transfered from the merge_wait_queue to
6551 # the actual merge queue. They are removed from this list upon
6552 # completion. Other packages can start building only when this list is
6554 self._merge_wait_scheduled = []
6556 # Holds system packages and their deep runtime dependencies. Before
6557 # being merged, these packages go to merge_wait_queue, to be merged
6558 # when no other packages are building.
6559 self._deep_system_deps = set()
6561 # Holds packages to merge which will satisfy currently unsatisfied
6562 # deep runtime dependencies of system packages. If this is not empty
6563 # then no parallel builds will be spawned until it is empty. This
6564 # minimizes the possibility that a build will fail due to the system
6565 # being in a fragile state. For example, see bug #259954.
6566 self._unsatisfied_system_deps = set()
6568 self._status_display = JobStatusDisplay(
6569 xterm_titles=('notitles' not in settings.features))
6570 self._max_load = myopts.get("--load-average")
6571 max_jobs = myopts.get("--jobs")
6572 if max_jobs is None:
6574 self._set_max_jobs(max_jobs)
6576 # The root where the currently running
6577 # portage instance is installed.
6578 self._running_root = trees["/"]["root_config"]
6580 if settings.get("PORTAGE_DEBUG", "") == "1":
6582 self.pkgsettings = {}
6583 self._config_pool = {}
6584 self._blocker_db = {}
6586 self._config_pool[root] = []
6587 self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
6589 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
6590 schedule=self._schedule_fetch)
6591 self._sched_iface = self._iface_class(
6592 dblinkEbuildPhase=self._dblink_ebuild_phase,
6593 dblinkDisplayMerge=self._dblink_display_merge,
6594 dblinkElog=self._dblink_elog,
6595 dblinkEmergeLog=self._dblink_emerge_log,
6596 fetch=fetch_iface, register=self._register,
6597 schedule=self._schedule_wait,
6598 scheduleSetup=self._schedule_setup,
6599 scheduleUnpack=self._schedule_unpack,
6600 scheduleYield=self._schedule_yield,
6601 unregister=self._unregister)
6603 self._prefetchers = weakref.WeakValueDictionary()
6604 self._pkg_queue = []
6605 self._completed_tasks = set()
6607 self._failed_pkgs = []
6608 self._failed_pkgs_all = []
6609 self._failed_pkgs_die_msgs = []
6610 self._post_mod_echo_msgs = []
6611 self._parallel_fetch = False
6612 merge_count = len([x for x in mergelist \
6613 if isinstance(x, Package) and x.operation == "merge"])
6614 self._pkg_count = self._pkg_count_class(
6615 curval=0, maxval=merge_count)
6616 self._status_display.maxval = self._pkg_count.maxval
6618 # The load average takes some time to respond when new
6619 # jobs are added, so we need to limit the rate of adding
6621 self._job_delay_max = 10
6622 self._job_delay_factor = 1.0
6623 self._job_delay_exp = 1.5
6624 self._previous_job_start_time = None
6626 self._set_digraph(digraph)
6628 # This is used to memoize the _choose_pkg() result when
6629 # no packages can be chosen until one of the existing
6631 self._choose_pkg_return_early = False
6633 features = self.settings.features
6634 if "parallel-fetch" in features and \
6635 not ("--pretend" in self.myopts or \
6636 "--fetch-all-uri" in self.myopts or \
6637 "--fetchonly" in self.myopts):
6638 if "distlocks" not in features:
6639 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
6640 portage.writemsg(red("!!!")+" parallel-fetching " + \
6641 "requires the distlocks feature enabled"+"\n",
6643 portage.writemsg(red("!!!")+" you have it disabled, " + \
6644 "thus parallel-fetching is being disabled"+"\n",
6646 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
6647 elif len(mergelist) > 1:
6648 self._parallel_fetch = True
6650 if self._parallel_fetch:
6651 # clear out existing fetch log if it exists
6653 open(self._fetch_log, 'w')
6654 except EnvironmentError:
6657 self._running_portage = None
6658 portage_match = self._running_root.trees["vartree"].dbapi.match(
6659 portage.const.PORTAGE_PACKAGE_ATOM)
6661 cpv = portage_match.pop()
6662 self._running_portage = self._pkg(cpv, "installed",
6663 self._running_root, installed=True)
6665 def _poll(self, timeout=None):
6667 PollScheduler._poll(self, timeout=timeout)
6669 def _set_max_jobs(self, max_jobs):
6670 self._max_jobs = max_jobs
6671 self._task_queues.jobs.max_jobs = max_jobs
6673 def _background_mode(self):
6675 Check if background mode is enabled and adjust states as necessary.
6678 @returns: True if background mode is enabled, False otherwise.
6680 background = (self._max_jobs is True or \
6681 self._max_jobs > 1 or "--quiet" in self.myopts) and \
6682 not bool(self._opts_no_background.intersection(self.myopts))
6685 interactive_tasks = self._get_interactive_tasks()
6686 if interactive_tasks:
6688 writemsg_level(">>> Sending package output to stdio due " + \
6689 "to interactive package(s):\n",
6690 level=logging.INFO, noiselevel=-1)
6692 for pkg in interactive_tasks:
6693 pkg_str = " " + colorize("INFORM", str(pkg.cpv))
6695 pkg_str += " for " + pkg.root
6698 writemsg_level("".join("%s\n" % (l,) for l in msg),
6699 level=logging.INFO, noiselevel=-1)
6700 if self._max_jobs is True or self._max_jobs > 1:
6701 self._set_max_jobs(1)
6702 writemsg_level(">>> Setting --jobs=1 due " + \
6703 "to the above interactive package(s)\n",
6704 level=logging.INFO, noiselevel=-1)
6706 self._status_display.quiet = \
6708 ("--quiet" in self.myopts and \
6709 "--verbose" not in self.myopts)
6711 self._logger.xterm_titles = \
6712 "notitles" not in self.settings.features and \
6713 self._status_display.quiet
6717 def _get_interactive_tasks(self):
6718 from portage import flatten
6719 from portage.dep import use_reduce, paren_reduce
6720 interactive_tasks = []
6721 for task in self._mergelist:
6722 if not (isinstance(task, Package) and \
6723 task.operation == "merge"):
6726 properties = flatten(use_reduce(paren_reduce(
6727 task.metadata["PROPERTIES"]), uselist=task.use.enabled))
6728 except portage.exception.InvalidDependString, e:
6729 show_invalid_depstring_notice(task,
6730 task.metadata["PROPERTIES"], str(e))
6731 raise self._unknown_internal_error()
6732 if "interactive" in properties:
6733 interactive_tasks.append(task)
6734 return interactive_tasks
6736 def _set_digraph(self, digraph):
6737 if "--nodeps" in self.myopts or \
6738 (self._max_jobs is not True and self._max_jobs < 2):
6740 self._digraph = None
6743 self._digraph = digraph
6744 self._find_system_deps()
6745 self._prune_digraph()
6746 self._prevent_builddir_collisions()
6748 def _find_system_deps(self):
6750 Find system packages and their deep runtime dependencies. Before being
6751 merged, these packages go to merge_wait_queue, to be merged when no
6752 other packages are building.
6754 deep_system_deps = self._deep_system_deps
6755 deep_system_deps.clear()
6756 deep_system_deps.update(
6757 _find_deep_system_runtime_deps(self._digraph))
6758 deep_system_deps.difference_update([pkg for pkg in \
6759 deep_system_deps if pkg.operation != "merge"])
6761 def _prune_digraph(self):
6763 Prune any root nodes that are irrelevant.
6766 graph = self._digraph
6767 completed_tasks = self._completed_tasks
6768 removed_nodes = set()
6770 for node in graph.root_nodes():
6771 if not isinstance(node, Package) or \
6772 (node.installed and node.operation == "nomerge") or \
6774 node in completed_tasks:
6775 removed_nodes.add(node)
6777 graph.difference_update(removed_nodes)
6778 if not removed_nodes:
6780 removed_nodes.clear()
6782 def _prevent_builddir_collisions(self):
6784 When building stages, sometimes the same exact cpv needs to be merged
6785 to both $ROOTs. Add edges to the digraph in order to avoid collisions
6786 in the builddir. Currently, normal file locks would be inappropriate
6787 for this purpose since emerge holds all of it's build dir locks from
6791 for pkg in self._mergelist:
6792 if not isinstance(pkg, Package):
6793 # a satisfied blocker
6797 if pkg.cpv not in cpv_map:
6798 cpv_map[pkg.cpv] = [pkg]
6800 for earlier_pkg in cpv_map[pkg.cpv]:
6801 self._digraph.add(earlier_pkg, pkg,
6802 priority=DepPriority(buildtime=True))
6803 cpv_map[pkg.cpv].append(pkg)
6805 class _pkg_failure(portage.exception.PortageException):
6807 An instance of this class is raised by unmerge() when
6808 an uninstallation fails.
6811 def __init__(self, *pargs):
6812 portage.exception.PortageException.__init__(self, pargs)
6814 self.status = pargs[0]
6816 def _schedule_fetch(self, fetcher):
6818 Schedule a fetcher on the fetch queue, in order to
6819 serialize access to the fetch log.
6821 self._task_queues.fetch.addFront(fetcher)
6823 def _schedule_setup(self, setup_phase):
6825 Schedule a setup phase on the merge queue, in order to
6826 serialize unsandboxed access to the live filesystem.
6828 self._task_queues.merge.addFront(setup_phase)
6831 def _schedule_unpack(self, unpack_phase):
6833 Schedule an unpack phase on the unpack queue, in order
6834 to serialize $DISTDIR access for live ebuilds.
6836 self._task_queues.unpack.add(unpack_phase)
6838 def _find_blockers(self, new_pkg):
6840 Returns a callable which should be called only when
6841 the vdb lock has been acquired.
6844 return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
6847 def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
6848 if self._opts_ignore_blockers.intersection(self.myopts):
6851 # Call gc.collect() here to avoid heap overflow that
6852 # triggers 'Cannot allocate memory' errors (reported
6857 blocker_db = self._blocker_db[new_pkg.root]
6859 blocker_dblinks = []
6860 for blocking_pkg in blocker_db.findInstalledBlockers(
6861 new_pkg, acquire_lock=acquire_lock):
6862 if new_pkg.slot_atom == blocking_pkg.slot_atom:
6864 if new_pkg.cpv == blocking_pkg.cpv:
6866 blocker_dblinks.append(portage.dblink(
6867 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
6868 self.pkgsettings[blocking_pkg.root], treetype="vartree",
6869 vartree=self.trees[blocking_pkg.root]["vartree"]))
6873 return blocker_dblinks
6875 def _dblink_pkg(self, pkg_dblink):
6876 cpv = pkg_dblink.mycpv
6877 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
6878 root_config = self.trees[pkg_dblink.myroot]["root_config"]
6879 installed = type_name == "installed"
6880 return self._pkg(cpv, type_name, root_config, installed=installed)
6882 def _append_to_log_path(self, log_path, msg):
6883 f = open(log_path, 'a')
6889 def _dblink_elog(self, pkg_dblink, phase, func, msgs):
6891 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
6894 background = self._background
6896 if background and log_path is not None:
6897 log_file = open(log_path, 'a')
6902 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
6904 if log_file is not None:
6907 def _dblink_emerge_log(self, msg):
6908 self._logger.log(msg)
6910 def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
6911 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
6912 background = self._background
6914 if log_path is None:
6915 if not (background and level < logging.WARN):
6916 portage.util.writemsg_level(msg,
6917 level=level, noiselevel=noiselevel)
6920 portage.util.writemsg_level(msg,
6921 level=level, noiselevel=noiselevel)
6922 self._append_to_log_path(log_path, msg)
6924 def _dblink_ebuild_phase(self,
6925 pkg_dblink, pkg_dbapi, ebuild_path, phase):
6927 Using this callback for merge phases allows the scheduler
6928 to run while these phases execute asynchronously, and allows
6929 the scheduler control output handling.
6932 scheduler = self._sched_iface
6933 settings = pkg_dblink.settings
6934 pkg = self._dblink_pkg(pkg_dblink)
6935 background = self._background
6936 log_path = settings.get("PORTAGE_LOG_FILE")
6938 ebuild_phase = EbuildPhase(background=background,
6939 pkg=pkg, phase=phase, scheduler=scheduler,
6940 settings=settings, tree=pkg_dblink.treetype)
6941 ebuild_phase.start()
6944 return ebuild_phase.returncode
6946 def _generate_digests(self):
6948 Generate digests if necessary for --digests or FEATURES=digest.
6949 In order to avoid interference, this must done before parallel
6953 if '--fetchonly' in self.myopts:
6956 digest = '--digest' in self.myopts
6958 for pkgsettings in self.pkgsettings.itervalues():
6959 if 'digest' in pkgsettings.features:
6966 for x in self._mergelist:
6967 if not isinstance(x, Package) or \
6968 x.type_name != 'ebuild' or \
6969 x.operation != 'merge':
6971 pkgsettings = self.pkgsettings[x.root]
6972 if '--digest' not in self.myopts and \
6973 'digest' not in pkgsettings.features:
6975 portdb = x.root_config.trees['porttree'].dbapi
6976 ebuild_path = portdb.findname(x.cpv)
6979 "!!! Could not locate ebuild for '%s'.\n" \
6980 % x.cpv, level=logging.ERROR, noiselevel=-1)
6982 pkgsettings['O'] = os.path.dirname(ebuild_path)
6983 if not portage.digestgen([], pkgsettings, myportdb=portdb):
6985 "!!! Unable to generate manifest for '%s'.\n" \
6986 % x.cpv, level=logging.ERROR, noiselevel=-1)
6991 def _check_manifests(self):
6992 # Verify all the manifests now so that the user is notified of failure
6993 # as soon as possible.
6994 if "strict" not in self.settings.features or \
6995 "--fetchonly" in self.myopts or \
6996 "--fetch-all-uri" in self.myopts:
6999 shown_verifying_msg = False
7001 for myroot, pkgsettings in self.pkgsettings.iteritems():
7002 quiet_config = portage.config(clone=pkgsettings)
7003 quiet_config["PORTAGE_QUIET"] = "1"
7004 quiet_config.backup_changes("PORTAGE_QUIET")
7005 quiet_settings[myroot] = quiet_config
7008 for x in self._mergelist:
7009 if not isinstance(x, Package) or \
7010 x.type_name != "ebuild":
7013 if not shown_verifying_msg:
7014 shown_verifying_msg = True
7015 self._status_msg("Verifying ebuild manifests")
7017 root_config = x.root_config
7018 portdb = root_config.trees["porttree"].dbapi
7019 quiet_config = quiet_settings[root_config.root]
7020 quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
7021 if not portage.digestcheck([], quiet_config, strict=True):
7026 def _add_prefetchers(self):
7028 if not self._parallel_fetch:
7031 if self._parallel_fetch:
7032 self._status_msg("Starting parallel fetch")
7034 prefetchers = self._prefetchers
7035 getbinpkg = "--getbinpkg" in self.myopts
7037 # In order to avoid "waiting for lock" messages
7038 # at the beginning, which annoy users, never
7039 # spawn a prefetcher for the first package.
7040 for pkg in self._mergelist[1:]:
7041 prefetcher = self._create_prefetcher(pkg)
7042 if prefetcher is not None:
7043 self._task_queues.fetch.add(prefetcher)
7044 prefetchers[pkg] = prefetcher
7046 def _create_prefetcher(self, pkg):
7048 @return: a prefetcher, or None if not applicable
7052 if not isinstance(pkg, Package):
7055 elif pkg.type_name == "ebuild":
7057 prefetcher = EbuildFetcher(background=True,
7058 config_pool=self._ConfigPool(pkg.root,
7059 self._allocate_config, self._deallocate_config),
7060 fetchonly=1, logfile=self._fetch_log,
7061 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
7063 elif pkg.type_name == "binary" and \
7064 "--getbinpkg" in self.myopts and \
7065 pkg.root_config.trees["bintree"].isremote(pkg.cpv):
7067 prefetcher = BinpkgPrefetcher(background=True,
7068 pkg=pkg, scheduler=self._sched_iface)
7072 def _is_restart_scheduled(self):
7074 Check if the merge list contains a replacement
7075 for the current running instance, that will result
7076 in restart after merge.
7078 @returns: True if a restart is scheduled, False otherwise.
7080 if self._opts_no_restart.intersection(self.myopts):
7083 mergelist = self._mergelist
7085 for i, pkg in enumerate(mergelist):
7086 if self._is_restart_necessary(pkg) and \
7087 i != len(mergelist) - 1:
7092 def _is_restart_necessary(self, pkg):
7094 @return: True if merging the given package
7095 requires restart, False otherwise.
7098 # Figure out if we need a restart.
7099 if pkg.root == self._running_root.root and \
7100 portage.match_from_list(
7101 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
7102 if self._running_portage:
7103 return pkg.cpv != self._running_portage.cpv
7107 def _restart_if_necessary(self, pkg):
7109 Use execv() to restart emerge. This happens
7110 if portage upgrades itself and there are
7111 remaining packages in the list.
7114 if self._opts_no_restart.intersection(self.myopts):
7117 if not self._is_restart_necessary(pkg):
7120 if pkg == self._mergelist[-1]:
7123 self._main_loop_cleanup()
7125 logger = self._logger
7126 pkg_count = self._pkg_count
7127 mtimedb = self._mtimedb
7128 bad_resume_opts = self._bad_resume_opts
7130 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
7131 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
7133 logger.log(" *** RESTARTING " + \
7134 "emerge via exec() after change of " + \
7137 mtimedb["resume"]["mergelist"].remove(list(pkg))
7139 portage.run_exitfuncs()
7140 mynewargv = [sys.argv[0], "--resume"]
7141 resume_opts = self.myopts.copy()
7142 # For automatic resume, we need to prevent
7143 # any of bad_resume_opts from leaking in
7144 # via EMERGE_DEFAULT_OPTS.
7145 resume_opts["--ignore-default-opts"] = True
7146 for myopt, myarg in resume_opts.iteritems():
7147 if myopt not in bad_resume_opts:
7149 mynewargv.append(myopt)
7151 mynewargv.append(myopt +"="+ str(myarg))
7152 # priority only needs to be adjusted on the first run
7153 os.environ["PORTAGE_NICENESS"] = "0"
7154 os.execv(mynewargv[0], mynewargv)
7158 if "--resume" in self.myopts:
7160 portage.writemsg_stdout(
7161 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
7162 self._logger.log(" *** Resuming merge...")
7164 self._save_resume_list()
7167 self._background = self._background_mode()
7168 except self._unknown_internal_error:
7171 for root in self.trees:
7172 root_config = self.trees[root]["root_config"]
7174 # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
7175 # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
7176 # for ensuring sane $PWD (bug #239560) and storing elog messages.
7177 tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
7178 if not tmpdir or not os.path.isdir(tmpdir):
7179 msg = "The directory specified in your " + \
7180 "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
7181 "does not exist. Please create this " + \
7182 "directory or correct your PORTAGE_TMPDIR setting."
7183 msg = textwrap.wrap(msg, 70)
7184 out = portage.output.EOutput()
7189 if self._background:
7190 root_config.settings.unlock()
7191 root_config.settings["PORTAGE_BACKGROUND"] = "1"
7192 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
7193 root_config.settings.lock()
7195 self.pkgsettings[root] = portage.config(
7196 clone=root_config.settings)
7198 rval = self._generate_digests()
7199 if rval != os.EX_OK:
7202 rval = self._check_manifests()
7203 if rval != os.EX_OK:
7206 keep_going = "--keep-going" in self.myopts
7207 fetchonly = self._build_opts.fetchonly
7208 mtimedb = self._mtimedb
7209 failed_pkgs = self._failed_pkgs
7212 rval = self._merge()
7213 if rval == os.EX_OK or fetchonly or not keep_going:
7215 if "resume" not in mtimedb:
7217 mergelist = self._mtimedb["resume"].get("mergelist")
7224 for failed_pkg in failed_pkgs:
7225 mergelist.remove(list(failed_pkg.pkg))
7227 self._failed_pkgs_all.extend(failed_pkgs)
7233 if not self._calc_resume_list():
7236 clear_caches(self.trees)
7237 if not self._mergelist:
7240 self._save_resume_list()
7241 self._pkg_count.curval = 0
7242 self._pkg_count.maxval = len([x for x in self._mergelist \
7243 if isinstance(x, Package) and x.operation == "merge"])
7244 self._status_display.maxval = self._pkg_count.maxval
7246 self._logger.log(" *** Finished. Cleaning up...")
7249 self._failed_pkgs_all.extend(failed_pkgs)
7252 background = self._background
7253 failure_log_shown = False
7254 if background and len(self._failed_pkgs_all) == 1:
7255 # If only one package failed then just show it's
7256 # whole log for easy viewing.
7257 failed_pkg = self._failed_pkgs_all[-1]
7258 build_dir = failed_pkg.build_dir
7261 log_paths = [failed_pkg.build_log]
7263 log_path = self._locate_failure_log(failed_pkg)
7264 if log_path is not None:
7266 log_file = open(log_path)
7270 if log_file is not None:
7272 for line in log_file:
7273 writemsg_level(line, noiselevel=-1)
7276 failure_log_shown = True
7278 # Dump mod_echo output now since it tends to flood the terminal.
7279 # This allows us to avoid having more important output, generated
7280 # later, from being swept away by the mod_echo output.
7281 mod_echo_output = _flush_elog_mod_echo()
7283 if background and not failure_log_shown and \
7284 self._failed_pkgs_all and \
7285 self._failed_pkgs_die_msgs and \
7286 not mod_echo_output:
7288 printer = portage.output.EOutput()
7289 for mysettings, key, logentries in self._failed_pkgs_die_msgs:
7291 if mysettings["ROOT"] != "/":
7292 root_msg = " merged to %s" % mysettings["ROOT"]
7294 printer.einfo("Error messages for package %s%s:" % \
7295 (colorize("INFORM", key), root_msg))
7297 for phase in portage.const.EBUILD_PHASES:
7298 if phase not in logentries:
7300 for msgtype, msgcontent in logentries[phase]:
7301 if isinstance(msgcontent, basestring):
7302 msgcontent = [msgcontent]
7303 for line in msgcontent:
7304 printer.eerror(line.strip("\n"))
7306 if self._post_mod_echo_msgs:
7307 for msg in self._post_mod_echo_msgs:
7310 if len(self._failed_pkgs_all) > 1 or \
7311 (self._failed_pkgs_all and "--keep-going" in self.myopts):
7312 if len(self._failed_pkgs_all) > 1:
7313 msg = "The following %d packages have " % \
7314 len(self._failed_pkgs_all) + \
7315 "failed to build or install:"
7317 msg = "The following package has " + \
7318 "failed to build or install:"
7320 writemsg(prefix + "\n", noiselevel=-1)
7321 from textwrap import wrap
7322 for line in wrap(msg, 72):
7323 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
7324 writemsg(prefix + "\n", noiselevel=-1)
7325 for failed_pkg in self._failed_pkgs_all:
7326 writemsg("%s\t%s\n" % (prefix,
7327 colorize("INFORM", str(failed_pkg.pkg))),
7329 writemsg(prefix + "\n", noiselevel=-1)
7333 def _elog_listener(self, mysettings, key, logentries, fulltext):
7334 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
7336 self._failed_pkgs_die_msgs.append(
7337 (mysettings, key, errors))
7339 def _locate_failure_log(self, failed_pkg):
7341 build_dir = failed_pkg.build_dir
7344 log_paths = [failed_pkg.build_log]
7346 for log_path in log_paths:
7351 log_size = os.stat(log_path).st_size
7362 def _add_packages(self):
7363 pkg_queue = self._pkg_queue
7364 for pkg in self._mergelist:
7365 if isinstance(pkg, Package):
7366 pkg_queue.append(pkg)
7367 elif isinstance(pkg, Blocker):
7370 def _system_merge_started(self, merge):
7372 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
7374 graph = self._digraph
7377 pkg = merge.merge.pkg
7379 # Skip this if $ROOT != / since it shouldn't matter if there
7380 # are unsatisfied system runtime deps in this case.
7384 completed_tasks = self._completed_tasks
7385 unsatisfied = self._unsatisfied_system_deps
7387 def ignore_non_runtime_or_satisfied(priority):
7389 Ignore non-runtime and satisfied runtime priorities.
7391 if isinstance(priority, DepPriority) and \
7392 not priority.satisfied and \
7393 (priority.runtime or priority.runtime_post):
7397 # When checking for unsatisfied runtime deps, only check
7398 # direct deps since indirect deps are checked when the
7399 # corresponding parent is merged.
7400 for child in graph.child_nodes(pkg,
7401 ignore_priority=ignore_non_runtime_or_satisfied):
7402 if not isinstance(child, Package) or \
7403 child.operation == 'uninstall':
7407 if child.operation == 'merge' and \
7408 child not in completed_tasks:
7409 unsatisfied.add(child)
7411 def _merge_wait_exit_handler(self, task):
7412 self._merge_wait_scheduled.remove(task)
7413 self._merge_exit(task)
7415 def _merge_exit(self, merge):
7416 self._do_merge_exit(merge)
7417 self._deallocate_config(merge.merge.settings)
7418 if merge.returncode == os.EX_OK and \
7419 not merge.merge.pkg.installed:
7420 self._status_display.curval += 1
7421 self._status_display.merges = len(self._task_queues.merge)
7424 def _do_merge_exit(self, merge):
7425 pkg = merge.merge.pkg
7426 if merge.returncode != os.EX_OK:
7427 settings = merge.merge.settings
7428 build_dir = settings.get("PORTAGE_BUILDDIR")
7429 build_log = settings.get("PORTAGE_LOG_FILE")
7431 self._failed_pkgs.append(self._failed_pkg(
7432 build_dir=build_dir, build_log=build_log,
7434 returncode=merge.returncode))
7435 self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
7437 self._status_display.failed = len(self._failed_pkgs)
7440 self._task_complete(pkg)
7441 pkg_to_replace = merge.merge.pkg_to_replace
7442 if pkg_to_replace is not None:
7443 # When a package is replaced, mark it's uninstall
7444 # task complete (if any).
7446 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
7447 self._task_complete(uninst_hash_key)
7452 self._restart_if_necessary(pkg)
7454 # Call mtimedb.commit() after each merge so that
7455 # --resume still works after being interrupted
7456 # by reboot, sigkill or similar.
7457 mtimedb = self._mtimedb
7458 mtimedb["resume"]["mergelist"].remove(list(pkg))
7459 if not mtimedb["resume"]["mergelist"]:
7460 del mtimedb["resume"]
7463 def _build_exit(self, build):
7464 if build.returncode == os.EX_OK:
7466 merge = PackageMerge(merge=build)
7467 if not build.build_opts.buildpkgonly and \
7468 build.pkg in self._deep_system_deps:
7469 # Since dependencies on system packages are frequently
7470 # unspecified, merge them only when no builds are executing.
7471 self._merge_wait_queue.append(merge)
7472 merge.addStartListener(self._system_merge_started)
7474 merge.addExitListener(self._merge_exit)
7475 self._task_queues.merge.add(merge)
7476 self._status_display.merges = len(self._task_queues.merge)
7478 settings = build.settings
7479 build_dir = settings.get("PORTAGE_BUILDDIR")
7480 build_log = settings.get("PORTAGE_LOG_FILE")
7482 self._failed_pkgs.append(self._failed_pkg(
7483 build_dir=build_dir, build_log=build_log,
7485 returncode=build.returncode))
7486 self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
7488 self._status_display.failed = len(self._failed_pkgs)
7489 self._deallocate_config(build.settings)
7491 self._status_display.running = self._jobs
7494 def _extract_exit(self, build):
7495 self._build_exit(build)
7497 def _task_complete(self, pkg):
7498 self._completed_tasks.add(pkg)
7499 self._unsatisfied_system_deps.discard(pkg)
7500 self._choose_pkg_return_early = False
7504 self._add_prefetchers()
7505 self._add_packages()
7506 pkg_queue = self._pkg_queue
7507 failed_pkgs = self._failed_pkgs
7508 portage.locks._quiet = self._background
7509 portage.elog._emerge_elog_listener = self._elog_listener
7515 self._main_loop_cleanup()
7516 portage.locks._quiet = False
7517 portage.elog._emerge_elog_listener = None
7519 rval = failed_pkgs[-1].returncode
7523 def _main_loop_cleanup(self):
7524 del self._pkg_queue[:]
7525 self._completed_tasks.clear()
7526 self._deep_system_deps.clear()
7527 self._unsatisfied_system_deps.clear()
7528 self._choose_pkg_return_early = False
7529 self._status_display.reset()
7530 self._digraph = None
7531 self._task_queues.fetch.clear()
7533 def _choose_pkg(self):
7535 Choose a task that has all it's dependencies satisfied.
7538 if self._choose_pkg_return_early:
7541 if self._digraph is None:
7542 if (self._jobs or self._task_queues.merge) and \
7543 not ("--nodeps" in self.myopts and \
7544 (self._max_jobs is True or self._max_jobs > 1)):
7545 self._choose_pkg_return_early = True
7547 return self._pkg_queue.pop(0)
7549 if not (self._jobs or self._task_queues.merge):
7550 return self._pkg_queue.pop(0)
7552 self._prune_digraph()
7555 later = set(self._pkg_queue)
7556 for pkg in self._pkg_queue:
7558 if not self._dependent_on_scheduled_merges(pkg, later):
7562 if chosen_pkg is not None:
7563 self._pkg_queue.remove(chosen_pkg)
7565 if chosen_pkg is None:
7566 # There's no point in searching for a package to
7567 # choose until at least one of the existing jobs
7569 self._choose_pkg_return_early = True
7573 def _dependent_on_scheduled_merges(self, pkg, later):
7575 Traverse the subgraph of the given packages deep dependencies
7576 to see if it contains any scheduled merges.
7577 @param pkg: a package to check dependencies for
7579 @param later: packages for which dependence should be ignored
7580 since they will be merged later than pkg anyway and therefore
7581 delaying the merge of pkg will not result in a more optimal
7585 @returns: True if the package is dependent, False otherwise.
7588 graph = self._digraph
7589 completed_tasks = self._completed_tasks
7592 traversed_nodes = set([pkg])
7593 direct_deps = graph.child_nodes(pkg)
7594 node_stack = direct_deps
7595 direct_deps = frozenset(direct_deps)
7597 node = node_stack.pop()
7598 if node in traversed_nodes:
7600 traversed_nodes.add(node)
7601 if not ((node.installed and node.operation == "nomerge") or \
7602 (node.operation == "uninstall" and \
7603 node not in direct_deps) or \
7604 node in completed_tasks or \
7608 node_stack.extend(graph.child_nodes(node))
7612 def _allocate_config(self, root):
7614 Allocate a unique config instance for a task in order
7615 to prevent interference between parallel tasks.
7617 if self._config_pool[root]:
7618 temp_settings = self._config_pool[root].pop()
7620 temp_settings = portage.config(clone=self.pkgsettings[root])
7621 # Since config.setcpv() isn't guaranteed to call config.reset() due to
7622 # performance reasons, call it here to make sure all settings from the
7623 # previous package get flushed out (such as PORTAGE_LOG_FILE).
7624 temp_settings.reload()
7625 temp_settings.reset()
7626 return temp_settings
7628 def _deallocate_config(self, settings):
7629 self._config_pool[settings["ROOT"]].append(settings)
7631 def _main_loop(self):
7633 # Only allow 1 job max if a restart is scheduled
7634 # due to portage update.
7635 if self._is_restart_scheduled() or \
7636 self._opts_no_background.intersection(self.myopts):
7637 self._set_max_jobs(1)
7639 merge_queue = self._task_queues.merge
7641 while self._schedule():
7642 if self._poll_event_handlers:
7647 if not (self._jobs or merge_queue):
7649 if self._poll_event_handlers:
7652 def _keep_scheduling(self):
7653 return bool(self._pkg_queue and \
7654 not (self._failed_pkgs and not self._build_opts.fetchonly))
7656 def _schedule_tasks(self):
7658 # When the number of jobs drops to zero, process all waiting merges.
7659 if not self._jobs and self._merge_wait_queue:
7660 for task in self._merge_wait_queue:
7661 task.addExitListener(self._merge_wait_exit_handler)
7662 self._task_queues.merge.add(task)
7663 self._status_display.merges = len(self._task_queues.merge)
7664 self._merge_wait_scheduled.extend(self._merge_wait_queue)
7665 del self._merge_wait_queue[:]
7667 self._schedule_tasks_imp()
7668 self._status_display.display()
7671 for q in self._task_queues.values():
7675 # Cancel prefetchers if they're the only reason
7676 # the main poll loop is still running.
7677 if self._failed_pkgs and not self._build_opts.fetchonly and \
7678 not (self._jobs or self._task_queues.merge) and \
7679 self._task_queues.fetch:
7680 self._task_queues.fetch.clear()
7684 self._schedule_tasks_imp()
7685 self._status_display.display()
7687 return self._keep_scheduling()
7689 def _job_delay(self):
7692 @returns: True if job scheduling should be delayed, False otherwise.
7695 if self._jobs and self._max_load is not None:
7697 current_time = time.time()
7699 delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
7700 if delay > self._job_delay_max:
7701 delay = self._job_delay_max
7702 if (current_time - self._previous_job_start_time) < delay:
7707 def _schedule_tasks_imp(self):
7710 @returns: True if state changed, False otherwise.
7717 if not self._keep_scheduling():
7718 return bool(state_change)
7720 if self._choose_pkg_return_early or \
7721 self._merge_wait_scheduled or \
7722 (self._jobs and self._unsatisfied_system_deps) or \
7723 not self._can_add_job() or \
7725 return bool(state_change)
7727 pkg = self._choose_pkg()
7729 return bool(state_change)
7733 if not pkg.installed:
7734 self._pkg_count.curval += 1
7736 task = self._task(pkg)
7739 merge = PackageMerge(merge=task)
7740 merge.addExitListener(self._merge_exit)
7741 self._task_queues.merge.add(merge)
7745 self._previous_job_start_time = time.time()
7746 self._status_display.running = self._jobs
7747 task.addExitListener(self._extract_exit)
7748 self._task_queues.jobs.add(task)
7752 self._previous_job_start_time = time.time()
7753 self._status_display.running = self._jobs
7754 task.addExitListener(self._build_exit)
7755 self._task_queues.jobs.add(task)
7757 return bool(state_change)
7759 def _task(self, pkg):
7761 pkg_to_replace = None
7762 if pkg.operation != "uninstall":
7763 vardb = pkg.root_config.trees["vartree"].dbapi
7764 previous_cpv = vardb.match(pkg.slot_atom)
7766 previous_cpv = previous_cpv.pop()
7767 pkg_to_replace = self._pkg(previous_cpv,
7768 "installed", pkg.root_config, installed=True)
7770 task = MergeListItem(args_set=self._args_set,
7771 background=self._background, binpkg_opts=self._binpkg_opts,
7772 build_opts=self._build_opts,
7773 config_pool=self._ConfigPool(pkg.root,
7774 self._allocate_config, self._deallocate_config),
7775 emerge_opts=self.myopts,
7776 find_blockers=self._find_blockers(pkg), logger=self._logger,
7777 mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
7778 pkg_to_replace=pkg_to_replace,
7779 prefetcher=self._prefetchers.get(pkg),
7780 scheduler=self._sched_iface,
7781 settings=self._allocate_config(pkg.root),
7782 statusMessage=self._status_msg,
7783 world_atom=self._world_atom)
7787 def _failed_pkg_msg(self, failed_pkg, action, preposition):
7788 pkg = failed_pkg.pkg
7789 msg = "%s to %s %s" % \
7790 (bad("Failed"), action, colorize("INFORM", pkg.cpv))
7792 msg += " %s %s" % (preposition, pkg.root)
7794 log_path = self._locate_failure_log(failed_pkg)
7795 if log_path is not None:
7796 msg += ", Log file:"
7797 self._status_msg(msg)
7799 if log_path is not None:
7800 self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
7802 def _status_msg(self, msg):
7804 Display a brief status message (no newlines) in the status display.
7805 This is called by tasks to provide feedback to the user. This
7806 delegates the resposibility of generating \r and \n control characters,
7807 to guarantee that lines are created or erased when necessary and
7811 @param msg: a brief status message (no newlines allowed)
7813 if not self._background:
7814 writemsg_level("\n")
7815 self._status_display.displayMessage(msg)
7817 def _save_resume_list(self):
7819 Do this before verifying the ebuild Manifests since it might
7820 be possible for the user to use --resume --skipfirst get past
7821 a non-essential package with a broken digest.
7823 mtimedb = self._mtimedb
7824 mtimedb["resume"]["mergelist"] = [list(x) \
7825 for x in self._mergelist \
7826 if isinstance(x, Package) and x.operation == "merge"]
7830 def _calc_resume_list(self):
7832 Use the current resume list to calculate a new one,
7833 dropping any packages with unsatisfied deps.
7835 @returns: True if successful, False otherwise.
7837 print colorize("GOOD", "*** Resuming merge...")
7839 if self._show_list():
7840 if "--tree" in self.myopts:
7841 portage.writemsg_stdout("\n" + \
7842 darkgreen("These are the packages that " + \
7843 "would be merged, in reverse order:\n\n"))
7846 portage.writemsg_stdout("\n" + \
7847 darkgreen("These are the packages that " + \
7848 "would be merged, in order:\n\n"))
7850 show_spinner = "--quiet" not in self.myopts and \
7851 "--nodeps" not in self.myopts
7854 print "Calculating dependencies ",
7856 myparams = create_depgraph_params(self.myopts, None)
7860 success, mydepgraph, dropped_tasks = resume_depgraph(
7861 self.settings, self.trees, self._mtimedb, self.myopts,
7862 myparams, self._spinner)
7863 except depgraph.UnsatisfiedResumeDep, exc:
7864 # rename variable to avoid python-3.0 error:
7865 # SyntaxError: can not delete variable 'e' referenced in nested
7868 mydepgraph = e.depgraph
7869 dropped_tasks = set()
7872 print "\b\b... done!"
7875 def unsatisfied_resume_dep_msg():
7876 mydepgraph.display_problems()
7877 out = portage.output.EOutput()
7878 out.eerror("One or more packages are either masked or " + \
7879 "have missing dependencies:")
7882 show_parents = set()
7884 if dep.parent in show_parents:
7886 show_parents.add(dep.parent)
7887 if dep.atom is None:
7888 out.eerror(indent + "Masked package:")
7889 out.eerror(2 * indent + str(dep.parent))
7892 out.eerror(indent + str(dep.atom) + " pulled in by:")
7893 out.eerror(2 * indent + str(dep.parent))
7895 msg = "The resume list contains packages " + \
7896 "that are either masked or have " + \
7897 "unsatisfied dependencies. " + \
7898 "Please restart/continue " + \
7899 "the operation manually, or use --skipfirst " + \
7900 "to skip the first package in the list and " + \
7901 "any other packages that may be " + \
7902 "masked or have missing dependencies."
7903 for line in textwrap.wrap(msg, 72):
7905 self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
7908 if success and self._show_list():
7909 mylist = mydepgraph.altlist()
7911 if "--tree" in self.myopts:
7913 mydepgraph.display(mylist, favorites=self._favorites)
7916 self._post_mod_echo_msgs.append(mydepgraph.display_problems)
7918 mydepgraph.display_problems()
7920 mylist = mydepgraph.altlist()
7921 mydepgraph.break_refs(mylist)
7922 mydepgraph.break_refs(dropped_tasks)
7923 self._mergelist = mylist
7924 self._set_digraph(mydepgraph.schedulerGraph())
7927 for task in dropped_tasks:
7928 if not (isinstance(task, Package) and task.operation == "merge"):
7931 msg = "emerge --keep-going:" + \
7934 msg += " for %s" % (pkg.root,)
7935 msg += " dropped due to unsatisfied dependency."
7936 for line in textwrap.wrap(msg, msg_width):
7937 eerror(line, phase="other", key=pkg.cpv)
7938 settings = self.pkgsettings[pkg.root]
7939 # Ensure that log collection from $T is disabled inside
7940 # elog_process(), since any logs that might exist are
7942 settings.pop("T", None)
7943 portage.elog.elog_process(pkg.cpv, settings)
7944 self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
7948 def _show_list(self):
7949 myopts = self.myopts
7950 if "--quiet" not in myopts and \
7951 ("--ask" in myopts or "--tree" in myopts or \
7952 "--verbose" in myopts):
7956 def _world_atom(self, pkg):
7958 Add the package to the world file, but only if
7959 it's supposed to be added. Otherwise, do nothing.
7962 if set(("--buildpkgonly", "--fetchonly",
7964 "--oneshot", "--onlydeps",
7965 "--pretend")).intersection(self.myopts):
7968 if pkg.root != self.target_root:
7971 args_set = self._args_set
7972 if not args_set.findAtomForPackage(pkg):
7975 logger = self._logger
7976 pkg_count = self._pkg_count
7977 root_config = pkg.root_config
7978 world_set = root_config.sets["world"]
7979 world_locked = False
7980 if hasattr(world_set, "lock"):
7985 if hasattr(world_set, "load"):
7986 world_set.load() # maybe it's changed on disk
7988 atom = create_world_atom(pkg, args_set, root_config)
7990 if hasattr(world_set, "add"):
7991 self._status_msg(('Recording %s in "world" ' + \
7992 'favorites file...') % atom)
7993 logger.log(" === (%s of %s) Updating world file (%s)" % \
7994 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
7997 writemsg_level('\n!!! Unable to record %s in "world"\n' % \
7998 (atom,), level=logging.WARN, noiselevel=-1)
8003 def _pkg(self, cpv, type_name, root_config, installed=False):
8005 Get a package instance from the cache, or create a new
8006 one if necessary. Raises KeyError from aux_get if it
8007 failures for some reason (package does not exist or is
8012 operation = "nomerge"
8014 if self._digraph is not None:
8015 # Reuse existing instance when available.
8016 pkg = self._digraph.get(
8017 (type_name, root_config.root, cpv, operation))
8021 tree_type = depgraph.pkg_tree_map[type_name]
8022 db = root_config.trees[tree_type].dbapi
8023 db_keys = list(self.trees[root_config.root][
8024 tree_type].dbapi._aux_cache_keys)
8025 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
8026 pkg = Package(cpv=cpv, metadata=metadata,
8027 root_config=root_config, installed=installed)
8028 if type_name == "ebuild":
8029 settings = self.pkgsettings[root_config.root]
8030 settings.setcpv(pkg)
8031 pkg.metadata["USE"] = settings["PORTAGE_USE"]
8032 pkg.metadata['CHOST'] = settings.get('CHOST', '')
8036 class MetadataRegen(PollScheduler):
8038 def __init__(self, portdb, cp_iter=None, consumer=None,
8039 max_jobs=None, max_load=None):
8040 PollScheduler.__init__(self)
8041 self._portdb = portdb
8042 self._global_cleanse = False
8044 cp_iter = self._iter_every_cp()
8045 # We can globally cleanse stale cache only if we
8046 # iterate over every single cp.
8047 self._global_cleanse = True
8048 self._cp_iter = cp_iter
8049 self._consumer = consumer
8051 if max_jobs is None:
8054 self._max_jobs = max_jobs
8055 self._max_load = max_load
8056 self._sched_iface = self._sched_iface_class(
8057 register=self._register,
8058 schedule=self._schedule_wait,
8059 unregister=self._unregister)
8061 self._valid_pkgs = set()
8062 self._cp_set = set()
8063 self._process_iter = self._iter_metadata_processes()
8064 self.returncode = os.EX_OK
8065 self._error_count = 0
8067 def _iter_every_cp(self):
8068 every_cp = self._portdb.cp_all()
8069 every_cp.sort(reverse=True)
8072 yield every_cp.pop()
8076 def _iter_metadata_processes(self):
8077 portdb = self._portdb
8078 valid_pkgs = self._valid_pkgs
8079 cp_set = self._cp_set
8080 consumer = self._consumer
8082 for cp in self._cp_iter:
8084 portage.writemsg_stdout("Processing %s\n" % cp)
8085 cpv_list = portdb.cp_list(cp)
8086 for cpv in cpv_list:
8088 ebuild_path, repo_path = portdb.findname2(cpv)
8089 metadata, st, emtime = portdb._pull_valid_cache(
8090 cpv, ebuild_path, repo_path)
8091 if metadata is not None:
8092 if consumer is not None:
8093 consumer(cpv, ebuild_path,
8094 repo_path, metadata)
8097 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
8098 ebuild_mtime=emtime,
8099 metadata_callback=portdb._metadata_callback,
8100 portdb=portdb, repo_path=repo_path,
8101 settings=portdb.doebuild_settings)
8105 portdb = self._portdb
8106 from portage.cache.cache_errors import CacheError
8109 while self._schedule():
8115 if self._global_cleanse:
8116 for mytree in portdb.porttrees:
8118 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
8119 except CacheError, e:
8120 portage.writemsg("Error listing cache entries for " + \
8121 "'%s': %s, continuing...\n" % (mytree, e),
8127 cp_set = self._cp_set
8128 cpv_getkey = portage.cpv_getkey
8129 for mytree in portdb.porttrees:
8131 dead_nodes[mytree] = set(cpv for cpv in \
8132 portdb.auxdb[mytree].iterkeys() \
8133 if cpv_getkey(cpv) in cp_set)
8134 except CacheError, e:
8135 portage.writemsg("Error listing cache entries for " + \
8136 "'%s': %s, continuing...\n" % (mytree, e),
8143 for y in self._valid_pkgs:
8144 for mytree in portdb.porttrees:
8145 if portdb.findname2(y, mytree=mytree)[0]:
8146 dead_nodes[mytree].discard(y)
8148 for mytree, nodes in dead_nodes.iteritems():
8149 auxdb = portdb.auxdb[mytree]
8153 except (KeyError, CacheError):
8156 def _schedule_tasks(self):
8159 @returns: True if there may be remaining tasks to schedule,
8162 while self._can_add_job():
8164 metadata_process = self._process_iter.next()
8165 except StopIteration:
8169 metadata_process.scheduler = self._sched_iface
8170 metadata_process.addExitListener(self._metadata_exit)
8171 metadata_process.start()
8174 def _metadata_exit(self, metadata_process):
8176 if metadata_process.returncode != os.EX_OK:
8178 self._error_count += 1
8179 self._valid_pkgs.discard(metadata_process.cpv)
8180 portage.writemsg("Error processing %s, continuing...\n" % \
8181 (metadata_process.cpv,), noiselevel=-1)
8183 if self._consumer is not None:
8184 # On failure, still notify the consumer (in this case the metadata
8185 # argument is None).
8186 self._consumer(metadata_process.cpv,
8187 metadata_process.ebuild_path,
8188 metadata_process.repo_path,
8189 metadata_process.metadata)
8193 def unmerge(root_config, myopts, unmerge_action,
8194 unmerge_files, ldpath_mtimes, autoclean=0,
8195 clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
8196 scheduler=None, writemsg_level=portage.util.writemsg_level):
8199 clean_world = myopts.get('--deselect') != 'n'
8200 quiet = "--quiet" in myopts
8201 settings = root_config.settings
8202 sets = root_config.sets
8203 vartree = root_config.trees["vartree"]
8204 candidate_catpkgs=[]
8206 xterm_titles = "notitles" not in settings.features
8207 out = portage.output.EOutput()
8209 db_keys = list(vartree.dbapi._aux_cache_keys)
8212 pkg = pkg_cache.get(cpv)
8214 pkg = Package(cpv=cpv, installed=True,
8215 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
8216 root_config=root_config,
8217 type_name="installed")
8218 pkg_cache[cpv] = pkg
8221 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
8223 # At least the parent needs to exist for the lock file.
8224 portage.util.ensure_dirs(vdb_path)
8225 except portage.exception.PortageException:
8229 if os.access(vdb_path, os.W_OK):
8230 vdb_lock = portage.locks.lockdir(vdb_path)
8231 realsyslist = sets["system"].getAtoms()
8233 for x in realsyslist:
8234 mycp = portage.dep_getkey(x)
8235 if mycp in settings.getvirtuals():
8237 for provider in settings.getvirtuals()[mycp]:
8238 if vartree.dbapi.match(provider):
8239 providers.append(provider)
8240 if len(providers) == 1:
8241 syslist.extend(providers)
8243 syslist.append(mycp)
8245 mysettings = portage.config(clone=settings)
8247 if not unmerge_files:
8248 if unmerge_action == "unmerge":
8250 print bold("emerge unmerge") + " can only be used with specific package names"
8257 # process all arguments and add all
8258 # valid db entries to candidate_catpkgs
8260 if not unmerge_files:
8261 candidate_catpkgs.extend(vartree.dbapi.cp_all())
8263 #we've got command-line arguments
8264 if not unmerge_files:
8265 print "\nNo packages to unmerge have been provided.\n"
8267 for x in unmerge_files:
8268 arg_parts = x.split('/')
8269 if x[0] not in [".","/"] and \
8270 arg_parts[-1][-7:] != ".ebuild":
8271 #possible cat/pkg or dep; treat as such
8272 candidate_catpkgs.append(x)
8273 elif unmerge_action in ["prune","clean"]:
8274 print "\n!!! Prune and clean do not accept individual" + \
8275 " ebuilds as arguments;\n skipping.\n"
8278 # it appears that the user is specifying an installed
8279 # ebuild and we're in "unmerge" mode, so it's ok.
8280 if not os.path.exists(x):
8281 print "\n!!! The path '"+x+"' doesn't exist.\n"
8284 absx = os.path.abspath(x)
8285 sp_absx = absx.split("/")
8286 if sp_absx[-1][-7:] == ".ebuild":
8288 absx = "/".join(sp_absx)
8290 sp_absx_len = len(sp_absx)
8292 vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
8293 vdb_len = len(vdb_path)
8295 sp_vdb = vdb_path.split("/")
8296 sp_vdb_len = len(sp_vdb)
8298 if not os.path.exists(absx+"/CONTENTS"):
8299 print "!!! Not a valid db dir: "+str(absx)
8302 if sp_absx_len <= sp_vdb_len:
8303 # The Path is shorter... so it can't be inside the vdb.
8306 print "\n!!!",x,"cannot be inside "+ \
8307 vdb_path+"; aborting.\n"
8310 for idx in range(0,sp_vdb_len):
8311 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
8314 print "\n!!!", x, "is not inside "+\
8315 vdb_path+"; aborting.\n"
8318 print "="+"/".join(sp_absx[sp_vdb_len:])
8319 candidate_catpkgs.append(
8320 "="+"/".join(sp_absx[sp_vdb_len:]))
8323 if (not "--quiet" in myopts):
8325 if settings["ROOT"] != "/":
8326 writemsg_level(darkgreen(newline+ \
8327 ">>> Using system located in ROOT tree %s\n" % \
8330 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
8331 not ("--quiet" in myopts):
8332 writemsg_level(darkgreen(newline+\
8333 ">>> These are the packages that would be unmerged:\n"))
8335 # Preservation of order is required for --depclean and --prune so
8336 # that dependencies are respected. Use all_selected to eliminate
8337 # duplicate packages since the same package may be selected by
8340 all_selected = set()
8341 for x in candidate_catpkgs:
8342 # cycle through all our candidate deps and determine
8343 # what will and will not get unmerged
8345 mymatch = vartree.dbapi.match(x)
8346 except portage.exception.AmbiguousPackageName, errpkgs:
8347 print "\n\n!!! The short ebuild name \"" + \
8348 x + "\" is ambiguous. Please specify"
8349 print "!!! one of the following fully-qualified " + \
8350 "ebuild names instead:\n"
8351 for i in errpkgs[0]:
8352 print " " + green(i)
8356 if not mymatch and x[0] not in "<>=~":
8357 mymatch = localtree.dep_match(x)
8359 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
8360 (x, unmerge_action), noiselevel=-1)
8364 {"protected": set(), "selected": set(), "omitted": set()})
8365 mykey = len(pkgmap) - 1
8366 if unmerge_action=="unmerge":
8368 if y not in all_selected:
8369 pkgmap[mykey]["selected"].add(y)
8371 elif unmerge_action == "prune":
8372 if len(mymatch) == 1:
8374 best_version = mymatch[0]
8375 best_slot = vartree.getslot(best_version)
8376 best_counter = vartree.dbapi.cpv_counter(best_version)
8377 for mypkg in mymatch[1:]:
8378 myslot = vartree.getslot(mypkg)
8379 mycounter = vartree.dbapi.cpv_counter(mypkg)
8380 if (myslot == best_slot and mycounter > best_counter) or \
8381 mypkg == portage.best([mypkg, best_version]):
8382 if myslot == best_slot:
8383 if mycounter < best_counter:
8384 # On slot collision, keep the one with the
8385 # highest counter since it is the most
8386 # recently installed.
8388 best_version = mypkg
8390 best_counter = mycounter
8391 pkgmap[mykey]["protected"].add(best_version)
8392 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
8393 if mypkg != best_version and mypkg not in all_selected)
8394 all_selected.update(pkgmap[mykey]["selected"])
8396 # unmerge_action == "clean"
8398 for mypkg in mymatch:
8399 if unmerge_action == "clean":
8400 myslot = localtree.getslot(mypkg)
8402 # since we're pruning, we don't care about slots
8403 # and put all the pkgs in together
8405 if myslot not in slotmap:
8406 slotmap[myslot] = {}
8407 slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
8409 for mypkg in vartree.dbapi.cp_list(
8410 portage.dep_getkey(mymatch[0])):
8411 myslot = vartree.getslot(mypkg)
8412 if myslot not in slotmap:
8413 slotmap[myslot] = {}
8414 slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
8416 for myslot in slotmap:
8417 counterkeys = slotmap[myslot].keys()
8421 pkgmap[mykey]["protected"].add(
8422 slotmap[myslot][counterkeys[-1]])
8425 for counter in counterkeys[:]:
8426 mypkg = slotmap[myslot][counter]
8427 if mypkg not in mymatch:
8428 counterkeys.remove(counter)
8429 pkgmap[mykey]["protected"].add(
8430 slotmap[myslot][counter])
8432 #be pretty and get them in order of merge:
8433 for ckey in counterkeys:
8434 mypkg = slotmap[myslot][ckey]
8435 if mypkg not in all_selected:
8436 pkgmap[mykey]["selected"].add(mypkg)
8437 all_selected.add(mypkg)
8438 # ok, now the last-merged package
8439 # is protected, and the rest are selected
8440 numselected = len(all_selected)
8441 if global_unmerge and not numselected:
8442 portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
8446 portage.writemsg_stdout(
8447 "\n>>> No packages selected for removal by " + \
8448 unmerge_action + "\n")
8452 vartree.dbapi.flush_cache()
8453 portage.locks.unlockdir(vdb_lock)
8455 from portage.sets.base import EditablePackageSet
8457 # generate a list of package sets that are directly or indirectly listed in "world",
8458 # as there is no persistent list of "installed" sets
8459 installed_sets = ["world"]
8464 pos = len(installed_sets)
8465 for s in installed_sets[pos - 1:]:
8468 candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
8471 installed_sets += candidates
8472 installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
8475 # we don't want to unmerge packages that are still listed in user-editable package sets
8476 # listed in "world" as they would be remerged on the next update of "world" or the
8477 # relevant package sets.
8478 unknown_sets = set()
8479 for cp in xrange(len(pkgmap)):
8480 for cpv in pkgmap[cp]["selected"].copy():
8484 # It could have been uninstalled
8485 # by a concurrent process.
8488 if unmerge_action != "clean" and \
8489 root_config.root == "/" and \
8490 portage.match_from_list(
8491 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
8492 msg = ("Not unmerging package %s since there is no valid " + \
8493 "reason for portage to unmerge itself.") % (pkg.cpv,)
8494 for line in textwrap.wrap(msg, 75):
8496 # adjust pkgmap so the display output is correct
8497 pkgmap[cp]["selected"].remove(cpv)
8498 all_selected.remove(cpv)
8499 pkgmap[cp]["protected"].add(cpv)
8503 for s in installed_sets:
8504 # skip sets that the user requested to unmerge, and skip world
8505 # unless we're unmerging a package set (as the package would be
8506 # removed from "world" later on)
8507 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
8511 if s in unknown_sets:
8514 out = portage.output.EOutput()
8515 out.eerror(("Unknown set '@%s' in " + \
8516 "%svar/lib/portage/world_sets") % \
8517 (s, root_config.root))
8520 # only check instances of EditablePackageSet as other classes are generally used for
8521 # special purposes and can be ignored here (and are usually generated dynamically, so the
8522 # user can't do much about them anyway)
8523 if isinstance(sets[s], EditablePackageSet):
8525 # This is derived from a snippet of code in the
8526 # depgraph._iter_atoms_for_pkg() method.
8527 for atom in sets[s].iterAtomsForPackage(pkg):
8528 inst_matches = vartree.dbapi.match(atom)
8529 inst_matches.reverse() # descending order
8531 for inst_cpv in inst_matches:
8533 inst_pkg = _pkg(inst_cpv)
8535 # It could have been uninstalled
8536 # by a concurrent process.
8539 if inst_pkg.cp != atom.cp:
8542 # This is descending order, and we're not
8543 # interested in any versions <= pkg given.
8545 if pkg.slot_atom != inst_pkg.slot_atom:
8546 higher_slot = inst_pkg
8548 if higher_slot is None:
8552 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
8553 #print colorize("WARN", "but still listed in the following package sets:")
8554 #print " %s\n" % ", ".join(parents)
8555 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
8556 print colorize("WARN", "still referenced by the following package sets:")
8557 print " %s\n" % ", ".join(parents)
8558 # adjust pkgmap so the display output is correct
8559 pkgmap[cp]["selected"].remove(cpv)
8560 all_selected.remove(cpv)
8561 pkgmap[cp]["protected"].add(cpv)
8565 numselected = len(all_selected)
8568 "\n>>> No packages selected for removal by " + \
8569 unmerge_action + "\n")
8572 # Unmerge order only matters in some cases
8576 selected = d["selected"]
8579 cp = portage.cpv_getkey(iter(selected).next())
8580 cp_dict = unordered.get(cp)
8583 unordered[cp] = cp_dict
8586 for k, v in d.iteritems():
8587 cp_dict[k].update(v)
8588 pkgmap = [unordered[cp] for cp in sorted(unordered)]
8590 for x in xrange(len(pkgmap)):
8591 selected = pkgmap[x]["selected"]
8594 for mytype, mylist in pkgmap[x].iteritems():
8595 if mytype == "selected":
8597 mylist.difference_update(all_selected)
8598 cp = portage.cpv_getkey(iter(selected).next())
8599 for y in localtree.dep_match(cp):
8600 if y not in pkgmap[x]["omitted"] and \
8601 y not in pkgmap[x]["selected"] and \
8602 y not in pkgmap[x]["protected"] and \
8603 y not in all_selected:
8604 pkgmap[x]["omitted"].add(y)
8605 if global_unmerge and not pkgmap[x]["selected"]:
8606 #avoid cluttering the preview printout with stuff that isn't getting unmerged
8608 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
8609 writemsg_level(colorize("BAD","\a\n\n!!! " + \
8610 "'%s' is part of your system profile.\n" % cp),
8611 level=logging.WARNING, noiselevel=-1)
8612 writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
8613 "be damaging to your system.\n\n"),
8614 level=logging.WARNING, noiselevel=-1)
8615 if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
8616 countdown(int(settings["EMERGE_WARNING_DELAY"]),
8617 colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
8619 writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
8621 writemsg_level(bold(cp) + ": ", noiselevel=-1)
8622 for mytype in ["selected","protected","omitted"]:
8624 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
8625 if pkgmap[x][mytype]:
8626 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
8627 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
8628 for pn, ver, rev in sorted_pkgs:
8632 myversion = ver + "-" + rev
8633 if mytype == "selected":
8635 colorize("UNMERGE_WARN", myversion + " "),
8639 colorize("GOOD", myversion + " "), noiselevel=-1)
8641 writemsg_level("none ", noiselevel=-1)
8643 writemsg_level("\n", noiselevel=-1)
8645 writemsg_level("\n", noiselevel=-1)
8647 writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
8648 " packages are slated for removal.\n")
8649 writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
8650 " and " + colorize("GOOD", "'omitted'") + \
8651 " packages will not be removed.\n\n")
8653 if "--pretend" in myopts:
8654 #we're done... return
8656 if "--ask" in myopts:
8657 if userquery("Would you like to unmerge these packages?")=="No":
8658 # enter pretend mode for correct formatting of results
8659 myopts["--pretend"] = True
8664 #the real unmerging begins, after a short delay....
8665 if clean_delay and not autoclean:
8666 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
8668 for x in xrange(len(pkgmap)):
8669 for y in pkgmap[x]["selected"]:
8670 writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
8671 emergelog(xterm_titles, "=== Unmerging... ("+y+")")
8672 mysplit = y.split("/")
8674 retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
8675 mysettings, unmerge_action not in ["clean","prune"],
8676 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
8677 scheduler=scheduler)
8679 if retval != os.EX_OK:
8680 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
8682 raise UninstallFailure(retval)
8685 if clean_world and hasattr(sets["world"], "cleanPackage"):
8686 sets["world"].cleanPackage(vartree.dbapi, y)
8687 emergelog(xterm_titles, " >>> unmerge success: "+y)
8688 if clean_world and hasattr(sets["world"], "remove"):
8689 for s in root_config.setconfig.active:
8690 sets["world"].remove(SETPREFIX+s)
8693 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
8695 if os.path.exists("/usr/bin/install-info"):
8696 out = portage.output.EOutput()
8701 inforoot=normpath(root+z)
8702 if os.path.isdir(inforoot):
8703 infomtime = long(os.stat(inforoot).st_mtime)
8704 if inforoot not in prev_mtimes or \
8705 prev_mtimes[inforoot] != infomtime:
8706 regen_infodirs.append(inforoot)
8708 if not regen_infodirs:
8709 portage.writemsg_stdout("\n")
8710 out.einfo("GNU info directory index is up-to-date.")
8712 portage.writemsg_stdout("\n")
8713 out.einfo("Regenerating GNU info directory index...")
8715 dir_extensions = ("", ".gz", ".bz2")
8719 for inforoot in regen_infodirs:
8723 if not os.path.isdir(inforoot) or \
8724 not os.access(inforoot, os.W_OK):
8727 file_list = os.listdir(inforoot)
8729 dir_file = os.path.join(inforoot, "dir")
8730 moved_old_dir = False
8733 if x.startswith(".") or \
8734 os.path.isdir(os.path.join(inforoot, x)):
8736 if x.startswith("dir"):
8738 for ext in dir_extensions:
8739 if x == "dir" + ext or \
8740 x == "dir" + ext + ".old":
8745 if processed_count == 0:
8746 for ext in dir_extensions:
8748 os.rename(dir_file + ext, dir_file + ext + ".old")
8749 moved_old_dir = True
8750 except EnvironmentError, e:
8751 if e.errno != errno.ENOENT:
8754 processed_count += 1
8755 myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
8756 existsstr="already exists, for file `"
8758 if re.search(existsstr,myso):
8759 # Already exists... Don't increment the count for this.
8761 elif myso[:44]=="install-info: warning: no info dir entry in ":
8762 # This info file doesn't contain a DIR-header: install-info produces this
8763 # (harmless) warning (the --quiet switch doesn't seem to work).
8764 # Don't increment the count for this.
8768 errmsg += myso + "\n"
8771 if moved_old_dir and not os.path.exists(dir_file):
8772 # We didn't generate a new dir file, so put the old file
8773 # back where it was originally found.
8774 for ext in dir_extensions:
8776 os.rename(dir_file + ext + ".old", dir_file + ext)
8777 except EnvironmentError, e:
8778 if e.errno != errno.ENOENT:
8782 # Clean dir.old cruft so that they don't prevent
8783 # unmerge of otherwise empty directories.
8784 for ext in dir_extensions:
8786 os.unlink(dir_file + ext + ".old")
8787 except EnvironmentError, e:
8788 if e.errno != errno.ENOENT:
8792 #update mtime so we can potentially avoid regenerating.
8793 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
8796 out.eerror("Processed %d info files; %d errors." % \
8798 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
8801 out.einfo("Processed %d info files." % (icount,))
8804 def display_news_notification(root_config, myopts):
8805 target_root = root_config.root
8806 trees = root_config.trees
8807 settings = trees["vartree"].settings
8808 portdb = trees["porttree"].dbapi
8809 vardb = trees["vartree"].dbapi
8810 NEWS_PATH = os.path.join("metadata", "news")
8811 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
8812 newsReaderDisplay = False
8813 update = "--pretend" not in myopts
8815 for repo in portdb.getRepositories():
8816 unreadItems = checkUpdatedNewsItems(
8817 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
8819 if not newsReaderDisplay:
8820 newsReaderDisplay = True
8822 print colorize("WARN", " * IMPORTANT:"),
8823 print "%s news items need reading for repository '%s'." % (unreadItems, repo)
8826 if newsReaderDisplay:
8827 print colorize("WARN", " *"),
8828 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
8831 def display_preserved_libs(vardbapi):
8834 # Ensure the registry is consistent with existing files.
8835 vardbapi.plib_registry.pruneNonExisting()
8837 if vardbapi.plib_registry.hasEntries():
8839 print colorize("WARN", "!!!") + " existing preserved libs:"
8840 plibdata = vardbapi.plib_registry.getPreservedLibs()
8841 linkmap = vardbapi.linkmap
8844 linkmap_broken = False
8848 except portage.exception.CommandNotFound, e:
8849 writemsg_level("!!! Command Not Found: %s\n" % (e,),
8850 level=logging.ERROR, noiselevel=-1)
8852 linkmap_broken = True
8854 search_for_owners = set()
8855 for cpv in plibdata:
8856 internal_plib_keys = set(linkmap._obj_key(f) \
8857 for f in plibdata[cpv])
8858 for f in plibdata[cpv]:
8859 if f in consumer_map:
8862 for c in linkmap.findConsumers(f):
8863 # Filter out any consumers that are also preserved libs
8864 # belonging to the same package as the provider.
8865 if linkmap._obj_key(c) not in internal_plib_keys:
8868 consumer_map[f] = consumers
8869 search_for_owners.update(consumers[:MAX_DISPLAY+1])
8871 owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
8873 for cpv in plibdata:
8874 print colorize("WARN", ">>>") + " package: %s" % cpv
8876 for f in plibdata[cpv]:
8877 obj_key = linkmap._obj_key(f)
8878 alt_paths = samefile_map.get(obj_key)
8879 if alt_paths is None:
8881 samefile_map[obj_key] = alt_paths
8884 for alt_paths in samefile_map.itervalues():
8885 alt_paths = sorted(alt_paths)
8887 print colorize("WARN", " * ") + " - %s" % (p,)
8889 consumers = consumer_map.get(f, [])
8890 for c in consumers[:MAX_DISPLAY]:
8891 print colorize("WARN", " * ") + " used by %s (%s)" % \
8892 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
8893 if len(consumers) == MAX_DISPLAY + 1:
8894 print colorize("WARN", " * ") + " used by %s (%s)" % \
8895 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
8896 for x in owners.get(consumers[MAX_DISPLAY], [])))
8897 elif len(consumers) > MAX_DISPLAY:
8898 print colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY)
8899 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
8902 def _flush_elog_mod_echo():
8904 Dump the mod_echo output now so that our other
8905 notifications are shown last.
8907 @returns: True if messages were shown, False otherwise.
8909 messages_shown = False
8911 from portage.elog import mod_echo
8913 pass # happens during downgrade to a version without the module
8915 messages_shown = bool(mod_echo._items)
8917 return messages_shown
8919 def post_emerge(root_config, myopts, mtimedb, retval):
8921 Misc. things to run at the end of a merge session.
8927 Display preserved libs warnings
8930 @param trees: A dictionary mapping each ROOT to it's package databases
8932 @param mtimedb: The mtimeDB to store data needed across merge invocations
8933 @type mtimedb: MtimeDB class instance
8934 @param retval: Emerge's return value
8938 1. Calls sys.exit(retval)
8941 target_root = root_config.root
8942 trees = { target_root : root_config.trees }
8943 vardbapi = trees[target_root]["vartree"].dbapi
8944 settings = vardbapi.settings
8945 info_mtimes = mtimedb["info"]
8947 # Load the most current variables from ${ROOT}/etc/profile.env
8950 settings.regenerate()
8953 config_protect = settings.get("CONFIG_PROTECT","").split()
8954 infodirs = settings.get("INFOPATH","").split(":") + \
8955 settings.get("INFODIR","").split(":")
8959 if retval == os.EX_OK:
8960 exit_msg = " *** exiting successfully."
8962 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
8963 emergelog("notitles" not in settings.features, exit_msg)
8965 _flush_elog_mod_echo()
8967 counter_hash = settings.get("PORTAGE_COUNTER_HASH")
8968 if "--pretend" in myopts or (counter_hash is not None and \
8969 counter_hash == vardbapi._counter_hash()):
8970 display_news_notification(root_config, myopts)
8971 # If vdb state has not changed then there's nothing else to do.
8974 vdb_path = os.path.join(target_root, portage.VDB_PATH)
8975 portage.util.ensure_dirs(vdb_path)
8977 if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
8978 vdb_lock = portage.locks.lockdir(vdb_path)
8982 if "noinfo" not in settings.features:
8983 chk_updated_info_files(target_root,
8984 infodirs, info_mtimes, retval)
8988 portage.locks.unlockdir(vdb_lock)
8990 chk_updated_cfg_files(target_root, config_protect)
8992 display_news_notification(root_config, myopts)
8993 if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
8994 display_preserved_libs(vardbapi)
8999 def chk_updated_cfg_files(target_root, config_protect):
9001 #number of directories with some protect files in them
9003 for x in config_protect:
9004 x = os.path.join(target_root, x.lstrip(os.path.sep))
9005 if not os.access(x, os.W_OK):
9006 # Avoid Permission denied errors generated
9010 mymode = os.lstat(x).st_mode
9013 if stat.S_ISLNK(mymode):
9014 # We want to treat it like a directory if it
9015 # is a symlink to an existing directory.
9017 real_mode = os.stat(x).st_mode
9018 if stat.S_ISDIR(real_mode):
9022 if stat.S_ISDIR(mymode):
9023 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
9025 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
9026 os.path.split(x.rstrip(os.path.sep))
9027 mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
9028 a = commands.getstatusoutput(mycommand)
9030 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
9032 # Show the error message alone, sending stdout to /dev/null.
9033 os.system(mycommand + " 1>/dev/null")
9035 files = a[1].split('\0')
9036 # split always produces an empty string as the last element
9037 if files and not files[-1]:
9041 print "\n"+colorize("WARN", " * IMPORTANT:"),
9042 if stat.S_ISDIR(mymode):
9043 print "%d config files in '%s' need updating." % \
9046 print "config file '%s' needs updating." % x
9049 print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
9050 " section of the " + bold("emerge")
9051 print " "+yellow("*")+" man page to learn how to update config files."
9053 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
9056 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
9057 Returns the number of unread (yet relevent) items.
9059 @param portdb: a portage tree database
9060 @type portdb: pordbapi
9061 @param vardb: an installed package database
9062 @type vardb: vardbapi
9071 1. The number of unread but relevant news items.
9074 from portage.news import NewsManager
9075 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
9076 return manager.getUnreadItems( repo_id, update=update )
9078 def insert_category_into_atom(atom, category):
9079 alphanum = re.search(r'\w', atom)
9081 ret = atom[:alphanum.start()] + "%s/" % category + \
9082 atom[alphanum.start():]
9087 def is_valid_package_atom(x):
9089 alphanum = re.search(r'\w', x)
9091 x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
9092 return portage.isvalidatom(x)
9094 def show_blocker_docs_link():
9096 print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
9097 print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
9099 print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
9102 def show_mask_docs():
9103 print "For more information, see the MASKED PACKAGES section in the emerge"
9104 print "man page or refer to the Gentoo Handbook."
9106 def action_sync(settings, trees, mtimedb, myopts, myaction):
9107 xterm_titles = "notitles" not in settings.features
9108 emergelog(xterm_titles, " === sync")
9109 portdb = trees[settings["ROOT"]]["porttree"].dbapi
9110 myportdir = portdb.porttree_root
9111 out = portage.output.EOutput()
9113 sys.stderr.write("!!! PORTDIR is undefined. Is /etc/make.globals missing?\n")
9115 if myportdir[-1]=="/":
9116 myportdir=myportdir[:-1]
9118 st = os.stat(myportdir)
9122 print ">>>",myportdir,"not found, creating it."
9123 os.makedirs(myportdir,0755)
9124 st = os.stat(myportdir)
9127 spawn_kwargs["env"] = settings.environ()
9128 if 'usersync' in settings.features and \
9129 portage.data.secpass >= 2 and \
9130 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
9131 st.st_gid != os.getgid() and st.st_mode & 0070):
9133 homedir = pwd.getpwuid(st.st_uid).pw_dir
9137 # Drop privileges when syncing, in order to match
9138 # existing uid/gid settings.
9139 spawn_kwargs["uid"] = st.st_uid
9140 spawn_kwargs["gid"] = st.st_gid
9141 spawn_kwargs["groups"] = [st.st_gid]
9142 spawn_kwargs["env"]["HOME"] = homedir
9144 if not st.st_mode & 0020:
9145 umask = umask | 0020
9146 spawn_kwargs["umask"] = umask
9148 syncuri = settings.get("SYNC", "").strip()
9150 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
9151 noiselevel=-1, level=logging.ERROR)
9154 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
9155 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
9159 updatecache_flg = False
9160 if myaction == "metadata":
9161 print "skipping sync"
9162 updatecache_flg = True
9163 elif ".git" in vcs_dirs:
9164 # Update existing git repository, and ignore the syncuri. We are
9165 # going to trust the user and assume that the user is in the branch
9166 # that he/she wants updated. We'll let the user manage branches with
9168 if portage.process.find_binary("git") is None:
9169 msg = ["Command not found: git",
9170 "Type \"emerge dev-util/git\" to enable git support."]
9172 writemsg_level("!!! %s\n" % l,
9173 level=logging.ERROR, noiselevel=-1)
9175 msg = ">>> Starting git pull in %s..." % myportdir
9176 emergelog(xterm_titles, msg )
9177 writemsg_level(msg + "\n")
9178 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
9179 (portage._shell_quote(myportdir),), **spawn_kwargs)
9180 if exitcode != os.EX_OK:
9181 msg = "!!! git pull error in %s." % myportdir
9182 emergelog(xterm_titles, msg)
9183 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
9185 msg = ">>> Git pull in %s successful" % myportdir
9186 emergelog(xterm_titles, msg)
9187 writemsg_level(msg + "\n")
9188 exitcode = git_sync_timestamps(settings, myportdir)
9189 if exitcode == os.EX_OK:
9190 updatecache_flg = True
9191 elif syncuri[:8]=="rsync://":
9192 for vcs_dir in vcs_dirs:
9193 writemsg_level(("!!! %s appears to be under revision " + \
9194 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
9195 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
9197 if not os.path.exists("/usr/bin/rsync"):
9198 print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
9199 print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
9204 if settings["PORTAGE_RSYNC_OPTS"] == "":
9205 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
9207 "--recursive", # Recurse directories
9208 "--links", # Consider symlinks
9209 "--safe-links", # Ignore links outside of tree
9210 "--perms", # Preserve permissions
9211 "--times", # Preserive mod times
9212 "--compress", # Compress the data transmitted
9213 "--force", # Force deletion on non-empty dirs
9214 "--whole-file", # Don't do block transfers, only entire files
9215 "--delete", # Delete files that aren't in the master tree
9216 "--stats", # Show final statistics about what was transfered
9217 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
9218 "--exclude=/distfiles", # Exclude distfiles from consideration
9219 "--exclude=/local", # Exclude local from consideration
9220 "--exclude=/packages", # Exclude packages from consideration
9224 # The below validation is not needed when using the above hardcoded
9227 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
9229 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
9230 for opt in ("--recursive", "--times"):
9231 if opt not in rsync_opts:
9232 portage.writemsg(yellow("WARNING:") + " adding required option " + \
9233 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
9234 rsync_opts.append(opt)
9236 for exclude in ("distfiles", "local", "packages"):
9237 opt = "--exclude=/%s" % exclude
9238 if opt not in rsync_opts:
9239 portage.writemsg(yellow("WARNING:") + \
9240 " adding required option %s not included in " % opt + \
9241 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
9242 rsync_opts.append(opt)
9244 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
9245 def rsync_opt_startswith(opt_prefix):
9246 for x in rsync_opts:
9247 if x.startswith(opt_prefix):
9251 if not rsync_opt_startswith("--timeout="):
9252 rsync_opts.append("--timeout=%d" % mytimeout)
9254 for opt in ("--compress", "--whole-file"):
9255 if opt not in rsync_opts:
9256 portage.writemsg(yellow("WARNING:") + " adding required option " + \
9257 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
9258 rsync_opts.append(opt)
9260 if "--quiet" in myopts:
9261 rsync_opts.append("--quiet") # Shut up a lot
9263 rsync_opts.append("--verbose") # Print filelist
9265 if "--verbose" in myopts:
9266 rsync_opts.append("--progress") # Progress meter for each file
9268 if "--debug" in myopts:
9269 rsync_opts.append("--checksum") # Force checksum on all files
9271 # Real local timestamp file.
9272 servertimestampfile = os.path.join(
9273 myportdir, "metadata", "timestamp.chk")
9275 content = portage.util.grabfile(servertimestampfile)
9279 mytimestamp = time.mktime(time.strptime(content[0],
9280 "%a, %d %b %Y %H:%M:%S +0000"))
9281 except (OverflowError, ValueError):
9286 rsync_initial_timeout = \
9287 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
9289 rsync_initial_timeout = 15
9292 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
9293 except SystemExit, e:
9294 raise # Needed else can't exit
9296 maxretries=3 #default number of retries
9299 user_name, hostname, port = re.split(
9300 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
9303 if user_name is None:
9305 updatecache_flg=True
9306 all_rsync_opts = set(rsync_opts)
9307 extra_rsync_opts = shlex.split(
9308 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
9309 all_rsync_opts.update(extra_rsync_opts)
9310 family = socket.AF_INET
9311 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
9312 family = socket.AF_INET
9313 elif socket.has_ipv6 and \
9314 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
9315 family = socket.AF_INET6
9317 SERVER_OUT_OF_DATE = -1
9318 EXCEEDED_MAX_RETRIES = -2
9324 for addrinfo in socket.getaddrinfo(
9325 hostname, None, family, socket.SOCK_STREAM):
9326 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
9327 # IPv6 addresses need to be enclosed in square brackets
9328 ips.append("[%s]" % addrinfo[4][0])
9330 ips.append(addrinfo[4][0])
9331 from random import shuffle
9333 except SystemExit, e:
9334 raise # Needed else can't exit
9335 except Exception, e:
9336 print "Notice:",str(e)
9341 dosyncuri = syncuri.replace(
9342 "//" + user_name + hostname + port + "/",
9343 "//" + user_name + ips[0] + port + "/", 1)
9344 except SystemExit, e:
9345 raise # Needed else can't exit
9346 except Exception, e:
9347 print "Notice:",str(e)
9351 if "--ask" in myopts:
9352 if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
9357 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
9358 if "--quiet" not in myopts:
9359 print ">>> Starting rsync with "+dosyncuri+"..."
9361 emergelog(xterm_titles,
9362 ">>> Starting retry %d of %d with %s" % \
9363 (retries,maxretries,dosyncuri))
9364 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
9366 if mytimestamp != 0 and "--quiet" not in myopts:
9367 print ">>> Checking server timestamp ..."
9369 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
9371 if "--debug" in myopts:
9376 # Even if there's no timestamp available locally, fetch the
9377 # timestamp anyway as an initial probe to verify that the server is
9378 # responsive. This protects us from hanging indefinitely on a
9379 # connection attempt to an unresponsive server which rsync's
9380 # --timeout option does not prevent.
9382 # Temporary file for remote server timestamp comparison.
9383 from tempfile import mkstemp
9384 fd, tmpservertimestampfile = mkstemp()
9386 mycommand = rsynccommand[:]
9387 mycommand.append(dosyncuri.rstrip("/") + \
9388 "/metadata/timestamp.chk")
9389 mycommand.append(tmpservertimestampfile)
9393 def timeout_handler(signum, frame):
9394 raise portage.exception.PortageException("timed out")
9395 signal.signal(signal.SIGALRM, timeout_handler)
9396 # Timeout here in case the server is unresponsive. The
9397 # --timeout rsync option doesn't apply to the initial
9398 # connection attempt.
9399 if rsync_initial_timeout:
9400 signal.alarm(rsync_initial_timeout)
9402 mypids.extend(portage.process.spawn(
9403 mycommand, env=settings.environ(), returnpid=True))
9404 exitcode = os.waitpid(mypids[0], 0)[1]
9405 content = portage.grabfile(tmpservertimestampfile)
9407 if rsync_initial_timeout:
9410 os.unlink(tmpservertimestampfile)
9413 except portage.exception.PortageException, e:
9417 if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
9418 os.kill(mypids[0], signal.SIGTERM)
9419 os.waitpid(mypids[0], 0)
9420 # This is the same code rsync uses for timeout.
9423 if exitcode != os.EX_OK:
9425 exitcode = (exitcode & 0xff) << 8
9427 exitcode = exitcode >> 8
9429 portage.process.spawned_pids.remove(mypids[0])
9432 servertimestamp = time.mktime(time.strptime(
9433 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
9434 except (OverflowError, ValueError):
9436 del mycommand, mypids, content
9437 if exitcode == os.EX_OK:
9438 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
9439 emergelog(xterm_titles,
9440 ">>> Cancelling sync -- Already current.")
9443 print ">>> Timestamps on the server and in the local repository are the same."
9444 print ">>> Cancelling all further sync action. You are already up to date."
9446 print ">>> In order to force sync, remove '%s'." % servertimestampfile
9450 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
9451 emergelog(xterm_titles,
9452 ">>> Server out of date: %s" % dosyncuri)
9455 print ">>> SERVER OUT OF DATE: %s" % dosyncuri
9457 print ">>> In order to force sync, remove '%s'." % servertimestampfile
9460 exitcode = SERVER_OUT_OF_DATE
9461 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
9463 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
9464 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
9465 if exitcode in [0,1,3,4,11,14,20,21]:
9467 elif exitcode in [1,3,4,11,14,20,21]:
9470 # Code 2 indicates protocol incompatibility, which is expected
9471 # for servers with protocol < 29 that don't support
9472 # --prune-empty-directories. Retry for a server that supports
9473 # at least rsync protocol version 29 (>=rsync-2.6.4).
9478 if retries<=maxretries:
9479 print ">>> Retrying..."
9484 updatecache_flg=False
9485 exitcode = EXCEEDED_MAX_RETRIES
9489 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
9490 elif exitcode == SERVER_OUT_OF_DATE:
9492 elif exitcode == EXCEEDED_MAX_RETRIES:
9494 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
9499 msg.append("Rsync has reported that there is a syntax error. Please ensure")
9500 msg.append("that your SYNC statement is proper.")
9501 msg.append("SYNC=" + settings["SYNC"])
9503 msg.append("Rsync has reported that there is a File IO error. Normally")
9504 msg.append("this means your disk is full, but can be caused by corruption")
9505 msg.append("on the filesystem that contains PORTDIR. Please investigate")
9506 msg.append("and try again after the problem has been fixed.")
9507 msg.append("PORTDIR=" + settings["PORTDIR"])
9509 msg.append("Rsync was killed before it finished.")
9511 msg.append("Rsync has not successfully finished. It is recommended that you keep")
9512 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
9513 msg.append("to use rsync due to firewall or other restrictions. This should be a")
9514 msg.append("temporary problem unless complications exist with your network")
9515 msg.append("(and possibly your system's filesystem) configuration.")
9519 elif syncuri[:6]=="cvs://":
9520 if not os.path.exists("/usr/bin/cvs"):
9521 print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
9522 print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
9525 cvsdir=os.path.dirname(myportdir)
9526 if not os.path.exists(myportdir+"/CVS"):
9528 print ">>> Starting initial cvs checkout with "+syncuri+"..."
9529 if os.path.exists(cvsdir+"/gentoo-x86"):
9530 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
9535 if e.errno != errno.ENOENT:
9537 "!!! existing '%s' directory; exiting.\n" % myportdir)
9540 if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
9541 print "!!! cvs checkout error; exiting."
9543 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
9546 print ">>> Starting cvs update with "+syncuri+"..."
9547 retval = portage.process.spawn_bash(
9548 "cd %s; cvs -z0 -q update -dP" % \
9549 (portage._shell_quote(myportdir),), **spawn_kwargs)
9550 if retval != os.EX_OK:
9554 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
9555 noiselevel=-1, level=logging.ERROR)
9558 if updatecache_flg and \
9559 myaction != "metadata" and \
9560 "metadata-transfer" not in settings.features:
9561 updatecache_flg = False
9563 # Reload the whole config from scratch.
9564 settings, trees, mtimedb = load_emerge_config(trees=trees)
9565 root_config = trees[settings["ROOT"]]["root_config"]
9566 portdb = trees[settings["ROOT"]]["porttree"].dbapi
9568 if updatecache_flg and \
9569 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
9571 # Only update cache for myportdir since that's
9572 # the only one that's been synced here.
9573 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
9575 if portage._global_updates(trees, mtimedb["updates"]):
9577 # Reload the whole config from scratch.
9578 settings, trees, mtimedb = load_emerge_config(trees=trees)
9579 portdb = trees[settings["ROOT"]]["porttree"].dbapi
9580 root_config = trees[settings["ROOT"]]["root_config"]
9582 mybestpv = portdb.xmatch("bestmatch-visible",
9583 portage.const.PORTAGE_PACKAGE_ATOM)
9584 mypvs = portage.best(
9585 trees[settings["ROOT"]]["vartree"].dbapi.match(
9586 portage.const.PORTAGE_PACKAGE_ATOM))
9588 chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
9590 if myaction != "metadata":
9591 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
9592 retval = portage.process.spawn(
9593 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
9594 dosyncuri], env=settings.environ())
9595 if retval != os.EX_OK:
9596 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
9598 if(mybestpv != mypvs) and not "--quiet" in myopts:
9600 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
9601 print red(" * ")+"that you update portage now, before any other packages are updated."
9603 print red(" * ")+"To update portage, run 'emerge portage' now."
9606 display_news_notification(root_config, myopts)
9609 def git_sync_timestamps(settings, portdir):
9611 Since git doesn't preserve timestamps, synchronize timestamps between
9612 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
9613 for a given file as long as the file in the working tree is not modified
9616 cache_dir = os.path.join(portdir, "metadata", "cache")
9617 if not os.path.isdir(cache_dir):
9619 writemsg_level(">>> Synchronizing timestamps...\n")
9621 from portage.cache.cache_errors import CacheError
9623 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
9624 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
9625 except CacheError, e:
9626 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
9627 level=logging.ERROR, noiselevel=-1)
9630 ec_dir = os.path.join(portdir, "eclass")
9632 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
9633 if f.endswith(".eclass"))
9635 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
9636 level=logging.ERROR, noiselevel=-1)
9639 args = [portage.const.BASH_BINARY, "-c",
9640 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
9641 portage._shell_quote(portdir)]
9643 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
9644 modified_files = set(l.rstrip("\n") for l in proc.stdout)
9646 if rval != os.EX_OK:
9649 modified_eclasses = set(ec for ec in ec_names \
9650 if os.path.join("eclass", ec + ".eclass") in modified_files)
9652 updated_ec_mtimes = {}
9654 for cpv in cache_db:
9655 cpv_split = portage.catpkgsplit(cpv)
9656 if cpv_split is None:
9657 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
9658 level=logging.ERROR, noiselevel=-1)
9661 cat, pn, ver, rev = cpv_split
9662 cat, pf = portage.catsplit(cpv)
9663 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
9664 if relative_eb_path in modified_files:
9668 cache_entry = cache_db[cpv]
9669 eb_mtime = cache_entry.get("_mtime_")
9670 ec_mtimes = cache_entry.get("_eclasses_")
9672 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
9673 level=logging.ERROR, noiselevel=-1)
9675 except CacheError, e:
9676 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
9677 (cpv, e), level=logging.ERROR, noiselevel=-1)
9680 if eb_mtime is None:
9681 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
9682 level=logging.ERROR, noiselevel=-1)
9686 eb_mtime = long(eb_mtime)
9688 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
9689 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
9692 if ec_mtimes is None:
9693 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
9694 level=logging.ERROR, noiselevel=-1)
9697 if modified_eclasses.intersection(ec_mtimes):
9700 missing_eclasses = set(ec_mtimes).difference(ec_names)
9701 if missing_eclasses:
9702 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
9703 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
9707 eb_path = os.path.join(portdir, relative_eb_path)
9709 current_eb_mtime = os.stat(eb_path)
9711 writemsg_level("!!! Missing ebuild: %s\n" % \
9712 (cpv,), level=logging.ERROR, noiselevel=-1)
9715 inconsistent = False
9716 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
9717 updated_mtime = updated_ec_mtimes.get(ec)
9718 if updated_mtime is not None and updated_mtime != ec_mtime:
9719 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
9720 (cpv, ec), level=logging.ERROR, noiselevel=-1)
9727 if current_eb_mtime != eb_mtime:
9728 os.utime(eb_path, (eb_mtime, eb_mtime))
9730 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
9731 if ec in updated_ec_mtimes:
9733 ec_path = os.path.join(ec_dir, ec + ".eclass")
9734 current_mtime = long(os.stat(ec_path).st_mtime)
9735 if current_mtime != ec_mtime:
9736 os.utime(ec_path, (ec_mtime, ec_mtime))
9737 updated_ec_mtimes[ec] = ec_mtime
9741 def action_metadata(settings, portdb, myopts, porttrees=None):
9742 if porttrees is None:
9743 porttrees = portdb.porttrees
9744 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
9745 old_umask = os.umask(0002)
9746 cachedir = os.path.normpath(settings.depcachedir)
9747 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
9748 "/lib", "/opt", "/proc", "/root", "/sbin",
9749 "/sys", "/tmp", "/usr", "/var"]:
9750 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
9751 "ROOT DIRECTORY ON YOUR SYSTEM."
9752 print >> sys.stderr, \
9753 "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
9755 if not os.path.exists(cachedir):
9756 os.makedirs(cachedir)
9758 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
9759 auxdbkeys = tuple(auxdbkeys)
9761 class TreeData(object):
9762 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
9763 def __init__(self, dest_db, eclass_db, path, src_db):
9764 self.dest_db = dest_db
9765 self.eclass_db = eclass_db
9767 self.src_db = src_db
9768 self.valid_nodes = set()
9771 for path in porttrees:
9772 src_db = portdb._pregen_auxdb.get(path)
9773 if src_db is None and \
9774 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
9775 src_db = portdb.metadbmodule(
9776 path, 'metadata/cache', auxdbkeys, readonly=True)
9778 src_db.ec = portdb._repo_info[path].eclass_db
9779 except AttributeError:
9782 if src_db is not None:
9783 porttrees_data.append(TreeData(portdb.auxdb[path],
9784 portdb._repo_info[path].eclass_db, path, src_db))
9786 porttrees = [tree_data.path for tree_data in porttrees_data]
9788 isatty = sys.stdout.isatty()
9789 quiet = not isatty or '--quiet' in myopts
9792 progressBar = portage.output.TermProgressBar()
9793 progressHandler = ProgressHandler()
9794 onProgress = progressHandler.onProgress
9796 progressBar.set(progressHandler.curval, progressHandler.maxval)
9797 progressHandler.display = display
9798 def sigwinch_handler(signum, frame):
9799 lines, progressBar.term_columns = \
9800 portage.output.get_term_size()
9801 signal.signal(signal.SIGWINCH, sigwinch_handler)
9803 # Temporarily override portdb.porttrees so portdb.cp_all()
9804 # will only return the relevant subset.
9805 portdb_porttrees = portdb.porttrees
9806 portdb.porttrees = porttrees
9808 cp_all = portdb.cp_all()
9810 portdb.porttrees = portdb_porttrees
9813 maxval = len(cp_all)
9814 if onProgress is not None:
9815 onProgress(maxval, curval)
9817 from portage.cache.util import quiet_mirroring
9818 from portage import eapi_is_supported, \
9819 _validate_cache_for_unsupported_eapis
9821 # TODO: Display error messages, but do not interfere with the progress bar.
9823 # 1) erase the progress bar
9824 # 2) show the error message
9825 # 3) redraw the progress bar on a new line
9826 noise = quiet_mirroring()
9829 for tree_data in porttrees_data:
9830 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
9831 tree_data.valid_nodes.add(cpv)
9833 src = tree_data.src_db[cpv]
9835 noise.missing_entry(cpv)
9838 except CacheError, ce:
9839 noise.exception(cpv, ce)
9843 eapi = src.get('EAPI')
9846 eapi = eapi.lstrip('-')
9847 eapi_supported = eapi_is_supported(eapi)
9848 if not eapi_supported:
9849 if not _validate_cache_for_unsupported_eapis:
9850 noise.misc(cpv, "unable to validate " + \
9851 "cache for EAPI='%s'" % eapi)
9856 dest = tree_data.dest_db[cpv]
9857 except (KeyError, CacheError):
9860 for d in (src, dest):
9861 if d is not None and d.get('EAPI') in ('', '0'):
9864 if dest is not None:
9865 if not (dest['_mtime_'] == src['_mtime_'] and \
9866 tree_data.eclass_db.is_eclass_data_valid(
9867 dest['_eclasses_']) and \
9868 set(dest['_eclasses_']) == set(src['_eclasses_'])):
9871 # We don't want to skip the write unless we're really
9872 # sure that the existing cache is identical, so don't
9873 # trust _mtime_ and _eclasses_ alone.
9874 for k in set(chain(src, dest)).difference(
9875 ('_mtime_', '_eclasses_')):
9876 if dest.get(k, '') != src.get(k, ''):
9880 if dest is not None:
9881 # The existing data is valid and identical,
9882 # so there's no need to overwrite it.
9886 inherited = src.get('INHERITED', '')
9887 eclasses = src.get('_eclasses_')
9888 except CacheError, ce:
9889 noise.exception(cpv, ce)
9893 if eclasses is not None:
9894 if not tree_data.eclass_db.is_eclass_data_valid(
9896 noise.eclass_stale(cpv)
9898 inherited = eclasses
9900 inherited = inherited.split()
9902 if tree_data.src_db.complete_eclass_entries and \
9904 noise.corruption(cpv, "missing _eclasses_ field")
9908 # Even if _eclasses_ already exists, replace it with data from
9909 # eclass_cache, in order to insert local eclass paths.
9911 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
9913 # INHERITED contains a non-existent eclass.
9914 noise.eclass_stale(cpv)
9917 if eclasses is None:
9918 noise.eclass_stale(cpv)
9920 src['_eclasses_'] = eclasses
9922 src['_eclasses_'] = {}
9924 if not eapi_supported:
9926 'EAPI' : '-' + eapi,
9927 '_mtime_' : src['_mtime_'],
9928 '_eclasses_' : src['_eclasses_'],
9932 tree_data.dest_db[cpv] = src
9933 except CacheError, ce:
9934 noise.exception(cpv, ce)
9938 if onProgress is not None:
9939 onProgress(maxval, curval)
9941 if onProgress is not None:
9942 onProgress(maxval, curval)
9944 for tree_data in porttrees_data:
9946 dead_nodes = set(tree_data.dest_db.iterkeys())
9947 except CacheError, e:
9948 writemsg_level("Error listing cache entries for " + \
9949 "'%s': %s, continuing...\n" % (tree_data.path, e),
9950 level=logging.ERROR, noiselevel=-1)
9953 dead_nodes.difference_update(tree_data.valid_nodes)
9954 for cpv in dead_nodes:
9956 del tree_data.dest_db[cpv]
9957 except (KeyError, CacheError):
9961 # make sure the final progress is displayed
9962 progressHandler.display()
9964 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
9969 def action_regen(settings, portdb, max_jobs, max_load):
9970 xterm_titles = "notitles" not in settings.features
9971 emergelog(xterm_titles, " === regen")
9972 #regenerate cache entries
9973 portage.writemsg_stdout("Regenerating cache entries...\n")
9975 os.close(sys.stdin.fileno())
9976 except SystemExit, e:
9977 raise # Needed else can't exit
9982 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
9985 portage.writemsg_stdout("done!\n")
9986 return regen.returncode
9988 def action_config(settings, trees, myopts, myfiles):
9989 if len(myfiles) != 1:
9990 print red("!!! config can only take a single package atom at this time\n")
9992 if not is_valid_package_atom(myfiles[0]):
9993 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
9995 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
9996 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
10000 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
10001 except portage.exception.AmbiguousPackageName, e:
10002 # Multiple matches thrown from cpv_expand
10005 print "No packages found.\n"
10007 elif len(pkgs) > 1:
10008 if "--ask" in myopts:
10010 print "Please select a package to configure:"
10014 options.append(str(idx))
10015 print options[-1]+") "+pkg
10017 options.append("X")
10018 idx = userquery("Selection?", options)
10021 pkg = pkgs[int(idx)-1]
10023 print "The following packages available:"
10026 print "\nPlease use a specific atom or the --ask option."
10032 if "--ask" in myopts:
10033 if userquery("Ready to configure "+pkg+"?") == "No":
10036 print "Configuring pkg..."
10038 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
10039 mysettings = portage.config(clone=settings)
10040 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
10041 debug = mysettings.get("PORTAGE_DEBUG") == "1"
10042 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
10044 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
10045 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
10046 if retval == os.EX_OK:
10047 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
10048 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
10051 def action_info(settings, trees, myopts, myfiles):
10052 print getportageversion(settings["PORTDIR"], settings["ROOT"],
10053 settings.profile_path, settings["CHOST"],
10054 trees[settings["ROOT"]]["vartree"].dbapi)
10056 header_title = "System Settings"
10058 print header_width * "="
10059 print header_title.rjust(int(header_width/2 + len(header_title)/2))
10060 print header_width * "="
10061 print "System uname: "+platform.platform(aliased=1)
10063 lastSync = portage.grabfile(os.path.join(
10064 settings["PORTDIR"], "metadata", "timestamp.chk"))
10065 print "Timestamp of tree:",
10071 output=commands.getstatusoutput("distcc --version")
10073 print str(output[1].split("\n",1)[0]),
10074 if "distcc" in settings.features:
10079 output=commands.getstatusoutput("ccache -V")
10081 print str(output[1].split("\n",1)[0]),
10082 if "ccache" in settings.features:
10087 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
10088 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
10089 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
10090 myvars = portage.util.unique_array(myvars)
10094 if portage.isvalidatom(x):
10095 pkg_matches = trees["/"]["vartree"].dbapi.match(x)
10096 pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
10097 pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
10099 for pn, ver, rev in pkg_matches:
10101 pkgs.append(ver + "-" + rev)
10105 pkgs = ", ".join(pkgs)
10106 print "%-20s %s" % (x+":", pkgs)
10108 print "%-20s %s" % (x+":", "[NOT VALID]")
10110 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
10112 if "--verbose" in myopts:
10113 myvars=settings.keys()
10115 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
10116 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
10117 'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
10118 'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
10120 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
10122 myvars = portage.util.unique_array(myvars)
10123 use_expand = settings.get('USE_EXPAND', '').split()
10125 use_expand_hidden = set(
10126 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
10127 alphabetical_use = '--alphabetical' in myopts
10128 root_config = trees[settings["ROOT"]]['root_config']
10134 print '%s="%s"' % (x, settings[x])
10136 use = set(settings["USE"].split())
10137 for varname in use_expand:
10138 flag_prefix = varname.lower() + "_"
10139 for f in list(use):
10140 if f.startswith(flag_prefix):
10144 print 'USE="%s"' % " ".join(use),
10145 for varname in use_expand:
10146 myval = settings.get(varname)
10148 print '%s="%s"' % (varname, myval),
10151 unset_vars.append(x)
10153 print "Unset: "+", ".join(unset_vars)
10156 if "--debug" in myopts:
10157 for x in dir(portage):
10158 module = getattr(portage, x)
10159 if "cvs_id_string" in dir(module):
10160 print "%s: %s" % (str(x), str(module.cvs_id_string))
10162 # See if we can find any packages installed matching the strings
10163 # passed on the command line
10165 vardb = trees[settings["ROOT"]]["vartree"].dbapi
10166 portdb = trees[settings["ROOT"]]["porttree"].dbapi
10168 mypkgs.extend(vardb.match(x))
10170 # If some packages were found...
10172 # Get our global settings (we only print stuff if it varies from
10173 # the current config)
10174 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
10175 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
10176 auxkeys.append('DEFINED_PHASES')
10178 pkgsettings = portage.config(clone=settings)
10180 # Loop through each package
10181 # Only print settings if they differ from global settings
10182 header_title = "Package Settings"
10183 print header_width * "="
10184 print header_title.rjust(int(header_width/2 + len(header_title)/2))
10185 print header_width * "="
10186 from portage.output import EOutput
10189 # Get all package specific variables
10190 metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
10191 pkg = Package(built=True, cpv=cpv,
10192 installed=True, metadata=izip(Package.metadata_keys,
10193 (metadata.get(x, '') for x in Package.metadata_keys)),
10194 root_config=root_config, type_name='installed')
10196 print "\n%s was built with the following:" % \
10197 colorize("INFORM", str(pkg.cpv))
10199 pkgsettings.setcpv(pkg)
10200 forced_flags = set(chain(pkgsettings.useforce,
10201 pkgsettings.usemask))
10202 use = set(pkg.use.enabled)
10203 use.discard(pkgsettings.get('ARCH'))
10204 use_expand_flags = set()
10207 for varname in use_expand:
10208 flag_prefix = varname.lower() + "_"
10210 if f.startswith(flag_prefix):
10211 use_expand_flags.add(f)
10212 use_enabled.setdefault(
10213 varname.upper(), []).append(f[len(flag_prefix):])
10215 for f in pkg.iuse.all:
10216 if f.startswith(flag_prefix):
10217 use_expand_flags.add(f)
10219 use_disabled.setdefault(
10220 varname.upper(), []).append(f[len(flag_prefix):])
10222 var_order = set(use_enabled)
10223 var_order.update(use_disabled)
10224 var_order = sorted(var_order)
10225 var_order.insert(0, 'USE')
10226 use.difference_update(use_expand_flags)
10227 use_enabled['USE'] = list(use)
10228 use_disabled['USE'] = []
10230 for f in pkg.iuse.all:
10231 if f not in use and \
10232 f not in use_expand_flags:
10233 use_disabled['USE'].append(f)
10235 for varname in var_order:
10236 if varname in use_expand_hidden:
10239 for f in use_enabled.get(varname, []):
10240 flags.append(UseFlagDisplay(f, True, f in forced_flags))
10241 for f in use_disabled.get(varname, []):
10242 flags.append(UseFlagDisplay(f, False, f in forced_flags))
10243 if alphabetical_use:
10244 flags.sort(key=UseFlagDisplay.sort_combined)
10246 flags.sort(key=UseFlagDisplay.sort_separated)
10247 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
10250 for myvar in mydesiredvars:
10251 if metadata[myvar].split() != settings.get(myvar, '').split():
10252 print "%s=\"%s\"" % (myvar, metadata[myvar])
10255 if metadata['DEFINED_PHASES']:
10256 if 'info' not in metadata['DEFINED_PHASES'].split():
10259 print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
10260 ebuildpath = vardb.findname(pkg.cpv)
10261 if not ebuildpath or not os.path.exists(ebuildpath):
10262 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
10264 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
10265 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
10266 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
10269 def action_search(root_config, myopts, myfiles, spinner):
10271 print "emerge: no search terms provided."
10273 searchinstance = search(root_config,
10274 spinner, "--searchdesc" in myopts,
10275 "--quiet" not in myopts, "--usepkg" in myopts,
10276 "--usepkgonly" in myopts)
10277 for mysearch in myfiles:
10279 searchinstance.execute(mysearch)
10280 except re.error, comment:
10281 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
10283 searchinstance.output()
10285 def action_uninstall(settings, trees, ldpath_mtimes,
10286 opts, action, files, spinner):
10288 # For backward compat, some actions do not require leading '='.
10289 ignore_missing_eq = action in ('clean', 'unmerge')
10290 root = settings['ROOT']
10291 vardb = trees[root]['vartree'].dbapi
10295 # Ensure atoms are valid before calling unmerge().
10296 # For backward compat, leading '=' is not required.
10298 if is_valid_package_atom(x) or \
10299 (ignore_missing_eq and is_valid_package_atom('=' + x)):
10302 valid_atoms.append(
10303 portage.dep_expand(x, mydb=vardb, settings=settings))
10304 except portage.exception.AmbiguousPackageName, e:
10305 msg = "The short ebuild name \"" + x + \
10306 "\" is ambiguous. Please specify " + \
10307 "one of the following " + \
10308 "fully-qualified ebuild names instead:"
10309 for line in textwrap.wrap(msg, 70):
10310 writemsg_level("!!! %s\n" % (line,),
10311 level=logging.ERROR, noiselevel=-1)
10313 writemsg_level(" %s\n" % colorize("INFORM", i),
10314 level=logging.ERROR, noiselevel=-1)
10315 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
10318 elif x.startswith(os.sep):
10319 if not x.startswith(root):
10320 writemsg_level(("!!! '%s' does not start with" + \
10321 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
10323 # Queue these up since it's most efficient to handle
10324 # multiple files in a single iter_owners() call.
10325 lookup_owners.append(x)
10329 msg.append("'%s' is not a valid package atom." % (x,))
10330 msg.append("Please check ebuild(5) for full details.")
10331 writemsg_level("".join("!!! %s\n" % line for line in msg),
10332 level=logging.ERROR, noiselevel=-1)
10336 relative_paths = []
10337 search_for_multiple = False
10338 if len(lookup_owners) > 1:
10339 search_for_multiple = True
10341 for x in lookup_owners:
10342 if not search_for_multiple and os.path.isdir(x):
10343 search_for_multiple = True
10344 relative_paths.append(x[len(root):])
10347 for pkg, relative_path in \
10348 vardb._owners.iter_owners(relative_paths):
10349 owners.add(pkg.mycpv)
10350 if not search_for_multiple:
10355 slot = vardb.aux_get(cpv, ['SLOT'])[0]
10357 # portage now masks packages with missing slot, but it's
10358 # possible that one was installed by an older version
10359 atom = portage.cpv_getkey(cpv)
10361 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
10362 valid_atoms.append(portage.dep.Atom(atom))
10364 writemsg_level(("!!! '%s' is not claimed " + \
10365 "by any package.\n") % lookup_owners[0],
10366 level=logging.WARNING, noiselevel=-1)
10368 if files and not valid_atoms:
10371 if action in ('clean', 'unmerge') or \
10372 (action == 'prune' and "--nodeps" in opts):
10373 # When given a list of atoms, unmerge them in the order given.
10374 ordered = action == 'unmerge'
10375 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
10376 valid_atoms, ldpath_mtimes, ordered=ordered)
10378 elif action == 'deselect':
10379 rval = action_deselect(settings, trees, opts, valid_atoms)
10381 rval = action_depclean(settings, trees, ldpath_mtimes,
10382 opts, action, valid_atoms, spinner)
10386 def action_deselect(settings, trees, opts, atoms):
10387 root_config = trees[settings['ROOT']]['root_config']
10388 world_set = root_config.sets['world']
10389 if not hasattr(world_set, 'update'):
10390 writemsg_level("World set does not appear to be mutable.\n",
10391 level=logging.ERROR, noiselevel=-1)
10394 vardb = root_config.trees['vartree'].dbapi
10395 expanded_atoms = set(atoms)
10396 from portage.dep import Atom
10398 for cpv in vardb.match(atom):
10399 slot, = vardb.aux_get(cpv, ['SLOT'])
10402 expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
10404 pretend = '--pretend' in opts
10406 if not pretend and hasattr(world_set, 'lock'):
10410 discard_atoms = set()
10412 for atom in world_set:
10413 if not isinstance(atom, Atom):
10416 for arg_atom in expanded_atoms:
10417 if arg_atom.intersects(atom) and \
10418 not (arg_atom.slot and not atom.slot):
10419 discard_atoms.add(atom)
10422 for atom in sorted(discard_atoms):
10423 print ">>> Removing %s from \"world\" favorites file..." % \
10424 colorize("INFORM", str(atom))
10426 if '--ask' in opts:
10427 prompt = "Would you like to remove these " + \
10428 "packages from your world favorites?"
10429 if userquery(prompt) == 'No':
10432 remaining = set(world_set)
10433 remaining.difference_update(discard_atoms)
10435 world_set.replace(remaining)
10437 print ">>> No matching atoms found in \"world\" favorites file..."
10443 def action_depclean(settings, trees, ldpath_mtimes,
10444 myopts, action, myfiles, spinner):
10445 # Kill packages that aren't explicitly merged or are required as a
10446 # dependency of another package. World file is explicit.
10448 # Global depclean or prune operations are not very safe when there are
10449 # missing dependencies since it's unknown how badly incomplete
10450 # the dependency graph is, and we might accidentally remove packages
10451 # that should have been pulled into the graph. On the other hand, it's
10452 # relatively safe to ignore missing deps when only asked to remove
10453 # specific packages.
10454 allow_missing_deps = len(myfiles) > 0
10457 msg.append("Always study the list of packages to be cleaned for any obvious\n")
10458 msg.append("mistakes. Packages that are part of the world set will always\n")
10459 msg.append("be kept. They can be manually added to this set with\n")
10460 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
10461 msg.append("package.provided (see portage(5)) will be removed by\n")
10462 msg.append("depclean, even if they are part of the world set.\n")
10464 msg.append("As a safety measure, depclean will not remove any packages\n")
10465 msg.append("unless *all* required dependencies have been resolved. As a\n")
10466 msg.append("consequence, it is often necessary to run %s\n" % \
10467 good("`emerge --update"))
10468 msg.append(good("--newuse --deep @system @world`") + \
10469 " prior to depclean.\n")
10471 if action == "depclean" and "--quiet" not in myopts and not myfiles:
10472 portage.writemsg_stdout("\n")
10474 portage.writemsg_stdout(colorize("WARN", " * ") + x)
10476 xterm_titles = "notitles" not in settings.features
10477 myroot = settings["ROOT"]
10478 root_config = trees[myroot]["root_config"]
10479 getSetAtoms = root_config.setconfig.getSetAtoms
10480 vardb = trees[myroot]["vartree"].dbapi
10481 deselect = myopts.get('--deselect') != 'n'
10483 required_set_names = ("system", "world")
10487 for s in required_set_names:
10488 required_sets[s] = InternalPackageSet(
10489 initial_atoms=getSetAtoms(s))
10492 # When removing packages, use a temporary version of world
10493 # which excludes packages that are intended to be eligible for
10495 world_temp_set = required_sets["world"]
10496 system_set = required_sets["system"]
10498 if not system_set or not world_temp_set:
10501 writemsg_level("!!! You have no system list.\n",
10502 level=logging.ERROR, noiselevel=-1)
10504 if not world_temp_set:
10505 writemsg_level("!!! You have no world file.\n",
10506 level=logging.WARNING, noiselevel=-1)
10508 writemsg_level("!!! Proceeding is likely to " + \
10509 "break your installation.\n",
10510 level=logging.WARNING, noiselevel=-1)
10511 if "--pretend" not in myopts:
10512 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
10514 if action == "depclean":
10515 emergelog(xterm_titles, " >>> depclean")
10518 args_set = InternalPackageSet()
10520 args_set.update(myfiles)
10521 matched_packages = False
10524 matched_packages = True
10526 if not matched_packages:
10527 writemsg_level(">>> No packages selected for removal by %s\n" % \
10531 writemsg_level("\nCalculating dependencies ")
10532 resolver_params = create_depgraph_params(myopts, "remove")
10533 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
10534 vardb = resolver.trees[myroot]["vartree"].dbapi
10536 if action == "depclean":
10541 world_temp_set.clear()
10543 # Pull in everything that's installed but not matched
10544 # by an argument atom since we don't want to clean any
10545 # package if something depends on it.
10550 if args_set.findAtomForPackage(pkg) is None:
10551 world_temp_set.add("=" + pkg.cpv)
10553 except portage.exception.InvalidDependString, e:
10554 show_invalid_depstring_notice(pkg,
10555 pkg.metadata["PROVIDE"], str(e))
10557 world_temp_set.add("=" + pkg.cpv)
10560 elif action == "prune":
10563 world_temp_set.clear()
10565 # Pull in everything that's installed since we don't
10566 # to prune a package if something depends on it.
10567 world_temp_set.update(vardb.cp_all())
10571 # Try to prune everything that's slotted.
10572 for cp in vardb.cp_all():
10573 if len(vardb.cp_list(cp)) > 1:
10576 # Remove atoms from world that match installed packages
10577 # that are also matched by argument atoms, but do not remove
10578 # them if they match the highest installed version.
10581 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
10582 if not pkgs_for_cp or pkg not in pkgs_for_cp:
10583 raise AssertionError("package expected in matches: " + \
10584 "cp = %s, cpv = %s matches = %s" % \
10585 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
10587 highest_version = pkgs_for_cp[-1]
10588 if pkg == highest_version:
10589 # pkg is the highest version
10590 world_temp_set.add("=" + pkg.cpv)
10593 if len(pkgs_for_cp) <= 1:
10594 raise AssertionError("more packages expected: " + \
10595 "cp = %s, cpv = %s matches = %s" % \
10596 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
10599 if args_set.findAtomForPackage(pkg) is None:
10600 world_temp_set.add("=" + pkg.cpv)
10602 except portage.exception.InvalidDependString, e:
10603 show_invalid_depstring_notice(pkg,
10604 pkg.metadata["PROVIDE"], str(e))
10606 world_temp_set.add("=" + pkg.cpv)
10610 for s, package_set in required_sets.iteritems():
10611 set_atom = SETPREFIX + s
10612 set_arg = SetArg(arg=set_atom, set=package_set,
10613 root_config=resolver.roots[myroot])
10614 set_args[s] = set_arg
10615 for atom in set_arg.set:
10616 resolver._dep_stack.append(
10617 Dependency(atom=atom, root=myroot, parent=set_arg))
10618 resolver.digraph.add(set_arg, None)
10620 success = resolver._complete_graph()
10621 writemsg_level("\b\b... done!\n")
10623 resolver.display_problems()
10628 def unresolved_deps():
10630 unresolvable = set()
10631 for dep in resolver._initially_unsatisfied_deps:
10632 if isinstance(dep.parent, Package) and \
10633 (dep.priority > UnmergeDepPriority.SOFT):
10634 unresolvable.add((dep.atom, dep.parent.cpv))
10636 if not unresolvable:
10639 if unresolvable and not allow_missing_deps:
10640 prefix = bad(" * ")
10642 msg.append("Dependencies could not be completely resolved due to")
10643 msg.append("the following required packages not being installed:")
10645 for atom, parent in unresolvable:
10646 msg.append(" %s pulled in by:" % (atom,))
10647 msg.append(" %s" % (parent,))
10649 msg.append("Have you forgotten to run " + \
10650 good("`emerge --update --newuse --deep @system @world`") + " prior")
10651 msg.append(("to %s? It may be necessary to manually " + \
10652 "uninstall packages that no longer") % action)
10653 msg.append("exist in the portage tree since " + \
10654 "it may not be possible to satisfy their")
10655 msg.append("dependencies. Also, be aware of " + \
10656 "the --with-bdeps option that is documented")
10657 msg.append("in " + good("`man emerge`") + ".")
10658 if action == "prune":
10660 msg.append("If you would like to ignore " + \
10661 "dependencies then use %s." % good("--nodeps"))
10662 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
10663 level=logging.ERROR, noiselevel=-1)
10667 if unresolved_deps():
10670 graph = resolver.digraph.copy()
10671 required_pkgs_total = 0
10673 if isinstance(node, Package):
10674 required_pkgs_total += 1
10676 def show_parents(child_node):
10677 parent_nodes = graph.parent_nodes(child_node)
10678 if not parent_nodes:
10679 # With --prune, the highest version can be pulled in without any
10680 # real parent since all installed packages are pulled in. In that
10681 # case there's nothing to show here.
10684 for node in parent_nodes:
10685 parent_strs.append(str(getattr(node, "cpv", node)))
10688 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
10689 for parent_str in parent_strs:
10690 msg.append(" %s\n" % (parent_str,))
10692 portage.writemsg_stdout("".join(msg), noiselevel=-1)
10694 def cmp_pkg_cpv(pkg1, pkg2):
10695 """Sort Package instances by cpv."""
10696 if pkg1.cpv > pkg2.cpv:
10698 elif pkg1.cpv == pkg2.cpv:
10703 def create_cleanlist():
10704 pkgs_to_remove = []
10706 if action == "depclean":
10709 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
10712 arg_atom = args_set.findAtomForPackage(pkg)
10713 except portage.exception.InvalidDependString:
10714 # this error has already been displayed by now
10718 if pkg not in graph:
10719 pkgs_to_remove.append(pkg)
10720 elif "--verbose" in myopts:
10724 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
10725 if pkg not in graph:
10726 pkgs_to_remove.append(pkg)
10727 elif "--verbose" in myopts:
10730 elif action == "prune":
10731 # Prune really uses all installed instead of world. It's not
10732 # a real reverse dependency so don't display it as such.
10733 graph.remove(set_args["world"])
10735 for atom in args_set:
10736 for pkg in vardb.match_pkgs(atom):
10737 if pkg not in graph:
10738 pkgs_to_remove.append(pkg)
10739 elif "--verbose" in myopts:
10742 if not pkgs_to_remove:
10744 ">>> No packages selected for removal by %s\n" % action)
10745 if "--verbose" not in myopts:
10747 ">>> To see reverse dependencies, use %s\n" % \
10749 if action == "prune":
10751 ">>> To ignore dependencies, use %s\n" % \
10754 return pkgs_to_remove
10756 cleanlist = create_cleanlist()
10759 clean_set = set(cleanlist)
10761 # Check if any of these package are the sole providers of libraries
10762 # with consumers that have not been selected for removal. If so, these
10763 # packages and any dependencies need to be added to the graph.
10764 real_vardb = trees[myroot]["vartree"].dbapi
10765 linkmap = real_vardb.linkmap
10766 liblist = linkmap.listLibraryObjects()
10767 consumer_cache = {}
10768 provider_cache = {}
10772 writemsg_level(">>> Checking for lib consumers...\n")
10774 for pkg in cleanlist:
10775 pkg_dblink = real_vardb._dblink(pkg.cpv)
10776 provided_libs = set()
10778 for lib in liblist:
10779 if pkg_dblink.isowner(lib, myroot):
10780 provided_libs.add(lib)
10782 if not provided_libs:
10786 for lib in provided_libs:
10787 lib_consumers = consumer_cache.get(lib)
10788 if lib_consumers is None:
10789 lib_consumers = linkmap.findConsumers(lib)
10790 consumer_cache[lib] = lib_consumers
10792 consumers[lib] = lib_consumers
10797 for lib, lib_consumers in consumers.items():
10798 for consumer_file in list(lib_consumers):
10799 if pkg_dblink.isowner(consumer_file, myroot):
10800 lib_consumers.remove(consumer_file)
10801 if not lib_consumers:
10807 for lib, lib_consumers in consumers.iteritems():
10809 soname = soname_cache.get(lib)
10811 soname = linkmap.getSoname(lib)
10812 soname_cache[lib] = soname
10814 consumer_providers = []
10815 for lib_consumer in lib_consumers:
10816 providers = provider_cache.get(lib)
10817 if providers is None:
10818 providers = linkmap.findProviders(lib_consumer)
10819 provider_cache[lib_consumer] = providers
10820 if soname not in providers:
10821 # Why does this happen?
10823 consumer_providers.append(
10824 (lib_consumer, providers[soname]))
10826 consumers[lib] = consumer_providers
10828 consumer_map[pkg] = consumers
10832 search_files = set()
10833 for consumers in consumer_map.itervalues():
10834 for lib, consumer_providers in consumers.iteritems():
10835 for lib_consumer, providers in consumer_providers:
10836 search_files.add(lib_consumer)
10837 search_files.update(providers)
10839 writemsg_level(">>> Assigning files to packages...\n")
10840 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
10842 for pkg, consumers in consumer_map.items():
10843 for lib, consumer_providers in consumers.items():
10844 lib_consumers = set()
10846 for lib_consumer, providers in consumer_providers:
10847 owner_set = file_owners.get(lib_consumer)
10848 provider_dblinks = set()
10849 provider_pkgs = set()
10851 if len(providers) > 1:
10852 for provider in providers:
10853 provider_set = file_owners.get(provider)
10854 if provider_set is not None:
10855 provider_dblinks.update(provider_set)
10857 if len(provider_dblinks) > 1:
10858 for provider_dblink in provider_dblinks:
10859 pkg_key = ("installed", myroot,
10860 provider_dblink.mycpv, "nomerge")
10861 if pkg_key not in clean_set:
10862 provider_pkgs.add(vardb.get(pkg_key))
10867 if owner_set is not None:
10868 lib_consumers.update(owner_set)
10870 for consumer_dblink in list(lib_consumers):
10871 if ("installed", myroot, consumer_dblink.mycpv,
10872 "nomerge") in clean_set:
10873 lib_consumers.remove(consumer_dblink)
10877 consumers[lib] = lib_consumers
10881 del consumer_map[pkg]
10884 # TODO: Implement a package set for rebuilding consumer packages.
10886 msg = "In order to avoid breakage of link level " + \
10887 "dependencies, one or more packages will not be removed. " + \
10888 "This can be solved by rebuilding " + \
10889 "the packages that pulled them in."
10891 prefix = bad(" * ")
10892 from textwrap import wrap
10893 writemsg_level("".join(prefix + "%s\n" % line for \
10894 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
10897 for pkg, consumers in consumer_map.iteritems():
10898 unique_consumers = set(chain(*consumers.values()))
10899 unique_consumers = sorted(consumer.mycpv \
10900 for consumer in unique_consumers)
10902 msg.append(" %s pulled in by:" % (pkg.cpv,))
10903 for consumer in unique_consumers:
10904 msg.append(" %s" % (consumer,))
10906 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
10907 level=logging.WARNING, noiselevel=-1)
10909 # Add lib providers to the graph as children of lib consumers,
10910 # and also add any dependencies pulled in by the provider.
10911 writemsg_level(">>> Adding lib providers to graph...\n")
10913 for pkg, consumers in consumer_map.iteritems():
10914 for consumer_dblink in set(chain(*consumers.values())):
10915 consumer_pkg = vardb.get(("installed", myroot,
10916 consumer_dblink.mycpv, "nomerge"))
10917 if not resolver._add_pkg(pkg,
10918 Dependency(parent=consumer_pkg,
10919 priority=UnmergeDepPriority(runtime=True),
10921 resolver.display_problems()
10924 writemsg_level("\nCalculating dependencies ")
10925 success = resolver._complete_graph()
10926 writemsg_level("\b\b... done!\n")
10927 resolver.display_problems()
10930 if unresolved_deps():
10933 graph = resolver.digraph.copy()
10934 required_pkgs_total = 0
10936 if isinstance(node, Package):
10937 required_pkgs_total += 1
10938 cleanlist = create_cleanlist()
10941 clean_set = set(cleanlist)
10943 # Use a topological sort to create an unmerge order such that
10944 # each package is unmerged before it's dependencies. This is
10945 # necessary to avoid breaking things that may need to run
10946 # during pkg_prerm or pkg_postrm phases.
10948 # Create a new graph to account for dependencies between the
10949 # packages being unmerged.
10953 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
10954 runtime = UnmergeDepPriority(runtime=True)
10955 runtime_post = UnmergeDepPriority(runtime_post=True)
10956 buildtime = UnmergeDepPriority(buildtime=True)
10958 "RDEPEND": runtime,
10959 "PDEPEND": runtime_post,
10960 "DEPEND": buildtime,
10963 for node in clean_set:
10964 graph.add(node, None)
10966 node_use = node.metadata["USE"].split()
10967 for dep_type in dep_keys:
10968 depstr = node.metadata[dep_type]
10972 portage.dep._dep_check_strict = False
10973 success, atoms = portage.dep_check(depstr, None, settings,
10974 myuse=node_use, trees=resolver._graph_trees,
10977 portage.dep._dep_check_strict = True
10979 # Ignore invalid deps of packages that will
10980 # be uninstalled anyway.
10983 priority = priority_map[dep_type]
10985 if not isinstance(atom, portage.dep.Atom):
10986 # Ignore invalid atoms returned from dep_check().
10990 matches = vardb.match_pkgs(atom)
10993 for child_node in matches:
10994 if child_node in clean_set:
10995 graph.add(child_node, node, priority=priority)
10998 if len(graph.order) == len(graph.root_nodes()):
10999 # If there are no dependencies between packages
11000 # let unmerge() group them by cat/pn.
11002 cleanlist = [pkg.cpv for pkg in graph.order]
11004 # Order nodes from lowest to highest overall reference count for
11005 # optimal root node selection.
11006 node_refcounts = {}
11007 for node in graph.order:
11008 node_refcounts[node] = len(graph.parent_nodes(node))
11009 def cmp_reference_count(node1, node2):
11010 return node_refcounts[node1] - node_refcounts[node2]
11011 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
11013 ignore_priority_range = [None]
11014 ignore_priority_range.extend(
11015 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
11016 while not graph.empty():
11017 for ignore_priority in ignore_priority_range:
11018 nodes = graph.root_nodes(ignore_priority=ignore_priority)
11022 raise AssertionError("no root nodes")
11023 if ignore_priority is not None:
11024 # Some deps have been dropped due to circular dependencies,
11025 # so only pop one node in order do minimize the number that
11030 cleanlist.append(node.cpv)
11032 unmerge(root_config, myopts, "unmerge", cleanlist,
11033 ldpath_mtimes, ordered=ordered)
11035 if action == "prune":
11038 if not cleanlist and "--quiet" in myopts:
11041 print "Packages installed: "+str(len(vardb.cpv_all()))
11042 print "Packages in world: " + \
11043 str(len(root_config.sets["world"].getAtoms()))
11044 print "Packages in system: " + \
11045 str(len(root_config.sets["system"].getAtoms()))
11046 print "Required packages: "+str(required_pkgs_total)
11047 if "--pretend" in myopts:
11048 print "Number to remove: "+str(len(cleanlist))
11050 print "Number removed: "+str(len(cleanlist))
11052 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
11054 Construct a depgraph for the given resume list. This will raise
11055 PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
11057 @returns: (success, depgraph, dropped_tasks)
11060 skip_unsatisfied = True
11061 mergelist = mtimedb["resume"]["mergelist"]
11062 dropped_tasks = set()
11064 mydepgraph = depgraph(settings, trees,
11065 myopts, myparams, spinner)
11067 success = mydepgraph.loadResumeCommand(mtimedb["resume"],
11068 skip_masked=skip_masked)
11069 except depgraph.UnsatisfiedResumeDep, e:
11070 if not skip_unsatisfied:
11073 graph = mydepgraph.digraph
11074 unsatisfied_parents = dict((dep.parent, dep.parent) \
11075 for dep in e.value)
11076 traversed_nodes = set()
11077 unsatisfied_stack = list(unsatisfied_parents)
11078 while unsatisfied_stack:
11079 pkg = unsatisfied_stack.pop()
11080 if pkg in traversed_nodes:
11082 traversed_nodes.add(pkg)
11084 # If this package was pulled in by a parent
11085 # package scheduled for merge, removing this
11086 # package may cause the the parent package's
11087 # dependency to become unsatisfied.
11088 for parent_node in graph.parent_nodes(pkg):
11089 if not isinstance(parent_node, Package) \
11090 or parent_node.operation not in ("merge", "nomerge"):
11093 graph.child_nodes(parent_node,
11094 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
11095 if pkg in unsatisfied:
11096 unsatisfied_parents[parent_node] = parent_node
11097 unsatisfied_stack.append(parent_node)
11099 pruned_mergelist = []
11100 for x in mergelist:
11101 if isinstance(x, list) and \
11102 tuple(x) not in unsatisfied_parents:
11103 pruned_mergelist.append(x)
11105 # If the mergelist doesn't shrink then this loop is infinite.
11106 if len(pruned_mergelist) == len(mergelist):
11107 # This happens if a package can't be dropped because
11108 # it's already installed, but it has unsatisfied PDEPEND.
11110 mergelist[:] = pruned_mergelist
11112 # Exclude installed packages that have been removed from the graph due
11113 # to failure to build/install runtime dependencies after the dependent
11114 # package has already been installed.
11115 dropped_tasks.update(pkg for pkg in \
11116 unsatisfied_parents if pkg.operation != "nomerge")
11117 mydepgraph.break_refs(unsatisfied_parents)
11119 del e, graph, traversed_nodes, \
11120 unsatisfied_parents, unsatisfied_stack
11124 return (success, mydepgraph, dropped_tasks)
11126 def action_build(settings, trees, mtimedb,
11127 myopts, myaction, myfiles, spinner):
11129 # validate the state of the resume data
11130 # so that we can make assumptions later.
11131 for k in ("resume", "resume_backup"):
11132 if k not in mtimedb:
11134 resume_data = mtimedb[k]
11135 if not isinstance(resume_data, dict):
11138 mergelist = resume_data.get("mergelist")
11139 if not isinstance(mergelist, list):
11142 for x in mergelist:
11143 if not (isinstance(x, list) and len(x) == 4):
11145 pkg_type, pkg_root, pkg_key, pkg_action = x
11146 if pkg_root not in trees:
11147 # Current $ROOT setting differs,
11148 # so the list must be stale.
11154 resume_opts = resume_data.get("myopts")
11155 if not isinstance(resume_opts, (dict, list)):
11158 favorites = resume_data.get("favorites")
11159 if not isinstance(favorites, list):
11164 if "--resume" in myopts and \
11165 ("resume" in mtimedb or
11166 "resume_backup" in mtimedb):
11168 if "resume" not in mtimedb:
11169 mtimedb["resume"] = mtimedb["resume_backup"]
11170 del mtimedb["resume_backup"]
11172 # "myopts" is a list for backward compatibility.
11173 resume_opts = mtimedb["resume"].get("myopts", [])
11174 if isinstance(resume_opts, list):
11175 resume_opts = dict((k,True) for k in resume_opts)
11176 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
11177 resume_opts.pop(opt, None)
11179 # Current options always override resume_opts.
11180 resume_opts.update(myopts)
11182 myopts.update(resume_opts)
11184 if "--debug" in myopts:
11185 writemsg_level("myopts %s\n" % (myopts,))
11187 # Adjust config according to options of the command being resumed.
11188 for myroot in trees:
11189 mysettings = trees[myroot]["vartree"].settings
11190 mysettings.unlock()
11191 adjust_config(myopts, mysettings)
11193 del myroot, mysettings
11195 ldpath_mtimes = mtimedb["ldpath"]
11198 buildpkgonly = "--buildpkgonly" in myopts
11199 pretend = "--pretend" in myopts
11200 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
11201 ask = "--ask" in myopts
11202 nodeps = "--nodeps" in myopts
11203 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
11204 tree = "--tree" in myopts
11205 if nodeps and tree:
11207 del myopts["--tree"]
11208 portage.writemsg(colorize("WARN", " * ") + \
11209 "--tree is broken with --nodeps. Disabling...\n")
11210 debug = "--debug" in myopts
11211 verbose = "--verbose" in myopts
11212 quiet = "--quiet" in myopts
11213 if pretend or fetchonly:
11214 # make the mtimedb readonly
11215 mtimedb.filename = None
11216 if '--digest' in myopts or 'digest' in settings.features:
11217 if '--digest' in myopts:
11218 msg = "The --digest option"
11220 msg = "The FEATURES=digest setting"
11222 msg += " can prevent corruption from being" + \
11223 " noticed. The `repoman manifest` command is the preferred" + \
11224 " way to generate manifests and it is capable of doing an" + \
11225 " entire repository or category at once."
11226 prefix = bad(" * ")
11227 writemsg(prefix + "\n")
11228 from textwrap import wrap
11229 for line in wrap(msg, 72):
11230 writemsg("%s%s\n" % (prefix, line))
11231 writemsg(prefix + "\n")
11233 if "--quiet" not in myopts and \
11234 ("--pretend" in myopts or "--ask" in myopts or \
11235 "--tree" in myopts or "--verbose" in myopts):
11237 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
11239 elif "--buildpkgonly" in myopts:
11243 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
11245 print darkgreen("These are the packages that would be %s, in reverse order:") % action
11249 print darkgreen("These are the packages that would be %s, in order:") % action
11252 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
11253 if not show_spinner:
11254 spinner.update = spinner.update_quiet
11257 favorites = mtimedb["resume"].get("favorites")
11258 if not isinstance(favorites, list):
11262 print "Calculating dependencies ",
11263 myparams = create_depgraph_params(myopts, myaction)
11265 resume_data = mtimedb["resume"]
11266 mergelist = resume_data["mergelist"]
11267 if mergelist and "--skipfirst" in myopts:
11268 for i, task in enumerate(mergelist):
11269 if isinstance(task, list) and \
11270 task and task[-1] == "merge":
11277 success, mydepgraph, dropped_tasks = resume_depgraph(
11278 settings, trees, mtimedb, myopts, myparams, spinner)
11279 except (portage.exception.PackageNotFound,
11280 depgraph.UnsatisfiedResumeDep), e:
11281 if isinstance(e, depgraph.UnsatisfiedResumeDep):
11282 mydepgraph = e.depgraph
11285 from textwrap import wrap
11286 from portage.output import EOutput
11289 resume_data = mtimedb["resume"]
11290 mergelist = resume_data.get("mergelist")
11291 if not isinstance(mergelist, list):
11293 if mergelist and debug or (verbose and not quiet):
11294 out.eerror("Invalid resume list:")
11297 for task in mergelist:
11298 if isinstance(task, list):
11299 out.eerror(indent + str(tuple(task)))
11302 if isinstance(e, depgraph.UnsatisfiedResumeDep):
11303 out.eerror("One or more packages are either masked or " + \
11304 "have missing dependencies:")
11307 for dep in e.value:
11308 if dep.atom is None:
11309 out.eerror(indent + "Masked package:")
11310 out.eerror(2 * indent + str(dep.parent))
11313 out.eerror(indent + str(dep.atom) + " pulled in by:")
11314 out.eerror(2 * indent + str(dep.parent))
11316 msg = "The resume list contains packages " + \
11317 "that are either masked or have " + \
11318 "unsatisfied dependencies. " + \
11319 "Please restart/continue " + \
11320 "the operation manually, or use --skipfirst " + \
11321 "to skip the first package in the list and " + \
11322 "any other packages that may be " + \
11323 "masked or have missing dependencies."
11324 for line in wrap(msg, 72):
11326 elif isinstance(e, portage.exception.PackageNotFound):
11327 out.eerror("An expected package is " + \
11328 "not available: %s" % str(e))
11330 msg = "The resume list contains one or more " + \
11331 "packages that are no longer " + \
11332 "available. Please restart/continue " + \
11333 "the operation manually."
11334 for line in wrap(msg, 72):
11338 print "\b\b... done!"
11342 portage.writemsg("!!! One or more packages have been " + \
11343 "dropped due to\n" + \
11344 "!!! masking or unsatisfied dependencies:\n\n",
11346 for task in dropped_tasks:
11347 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
11348 portage.writemsg("\n", noiselevel=-1)
11351 if mydepgraph is not None:
11352 mydepgraph.display_problems()
11353 if not (ask or pretend):
11354 # delete the current list and also the backup
11355 # since it's probably stale too.
11356 for k in ("resume", "resume_backup"):
11357 mtimedb.pop(k, None)
11362 if ("--resume" in myopts):
11363 print darkgreen("emerge: It seems we have nothing to resume...")
11366 myparams = create_depgraph_params(myopts, myaction)
11367 if "--quiet" not in myopts and "--nodeps" not in myopts:
11368 print "Calculating dependencies ",
11370 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
11372 retval, favorites = mydepgraph.select_files(myfiles)
11373 except portage.exception.PackageNotFound, e:
11374 portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
11376 except portage.exception.PackageSetNotFound, e:
11377 root_config = trees[settings["ROOT"]]["root_config"]
11378 display_missing_pkg_set(root_config, e.value)
11381 print "\b\b... done!"
11383 mydepgraph.display_problems()
11386 if "--pretend" not in myopts and \
11387 ("--ask" in myopts or "--tree" in myopts or \
11388 "--verbose" in myopts) and \
11389 not ("--quiet" in myopts and "--ask" not in myopts):
11390 if "--resume" in myopts:
11391 mymergelist = mydepgraph.altlist()
11392 if len(mymergelist) == 0:
11393 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
11395 favorites = mtimedb["resume"]["favorites"]
11396 retval = mydepgraph.display(
11397 mydepgraph.altlist(reversed=tree),
11398 favorites=favorites)
11399 mydepgraph.display_problems()
11400 if retval != os.EX_OK:
11402 prompt="Would you like to resume merging these packages?"
11404 retval = mydepgraph.display(
11405 mydepgraph.altlist(reversed=("--tree" in myopts)),
11406 favorites=favorites)
11407 mydepgraph.display_problems()
11408 if retval != os.EX_OK:
11411 for x in mydepgraph.altlist():
11412 if isinstance(x, Package) and x.operation == "merge":
11416 sets = trees[settings["ROOT"]]["root_config"].sets
11417 world_candidates = None
11418 if "--noreplace" in myopts and \
11419 not oneshot and favorites:
11420 # Sets that are not world candidates are filtered
11421 # out here since the favorites list needs to be
11422 # complete for depgraph.loadResumeCommand() to
11423 # operate correctly.
11424 world_candidates = [x for x in favorites \
11425 if not (x.startswith(SETPREFIX) and \
11426 not sets[x[1:]].world_candidate)]
11427 if "--noreplace" in myopts and \
11428 not oneshot and world_candidates:
11430 for x in world_candidates:
11431 print " %s %s" % (good("*"), x)
11432 prompt="Would you like to add these packages to your world favorites?"
11433 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
11434 prompt="Nothing to merge; would you like to auto-clean packages?"
11437 print "Nothing to merge; quitting."
11440 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
11441 prompt="Would you like to fetch the source files for these packages?"
11443 prompt="Would you like to merge these packages?"
11445 if "--ask" in myopts and userquery(prompt) == "No":
11450 # Don't ask again (e.g. when auto-cleaning packages after merge)
11451 myopts.pop("--ask", None)
11453 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
11454 if ("--resume" in myopts):
11455 mymergelist = mydepgraph.altlist()
11456 if len(mymergelist) == 0:
11457 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
11459 favorites = mtimedb["resume"]["favorites"]
11460 retval = mydepgraph.display(
11461 mydepgraph.altlist(reversed=tree),
11462 favorites=favorites)
11463 mydepgraph.display_problems()
11464 if retval != os.EX_OK:
11467 retval = mydepgraph.display(
11468 mydepgraph.altlist(reversed=("--tree" in myopts)),
11469 favorites=favorites)
11470 mydepgraph.display_problems()
11471 if retval != os.EX_OK:
11473 if "--buildpkgonly" in myopts:
11474 graph_copy = mydepgraph.digraph.clone()
11475 removed_nodes = set()
11476 for node in graph_copy:
11477 if not isinstance(node, Package) or \
11478 node.operation == "nomerge":
11479 removed_nodes.add(node)
11480 graph_copy.difference_update(removed_nodes)
11481 if not graph_copy.hasallzeros(ignore_priority = \
11482 DepPrioritySatisfiedRange.ignore_medium):
11483 print "\n!!! --buildpkgonly requires all dependencies to be merged."
11484 print "!!! You have to merge the dependencies before you can build this package.\n"
11487 if "--buildpkgonly" in myopts:
11488 graph_copy = mydepgraph.digraph.clone()
11489 removed_nodes = set()
11490 for node in graph_copy:
11491 if not isinstance(node, Package) or \
11492 node.operation == "nomerge":
11493 removed_nodes.add(node)
11494 graph_copy.difference_update(removed_nodes)
11495 if not graph_copy.hasallzeros(ignore_priority = \
11496 DepPrioritySatisfiedRange.ignore_medium):
11497 print "\n!!! --buildpkgonly requires all dependencies to be merged."
11498 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
11501 if ("--resume" in myopts):
11502 favorites=mtimedb["resume"]["favorites"]
11503 mymergelist = mydepgraph.altlist()
11504 mydepgraph.break_refs(mymergelist)
11505 mergetask = Scheduler(settings, trees, mtimedb, myopts,
11506 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
11507 del mydepgraph, mymergelist
11508 clear_caches(trees)
11510 retval = mergetask.merge()
11511 merge_count = mergetask.curval
11513 if "resume" in mtimedb and \
11514 "mergelist" in mtimedb["resume"] and \
11515 len(mtimedb["resume"]["mergelist"]) > 1:
11516 mtimedb["resume_backup"] = mtimedb["resume"]
11517 del mtimedb["resume"]
11519 mtimedb["resume"]={}
11520 # Stored as a dict starting with portage-2.1.6_rc1, and supported
11521 # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
11522 # a list type for options.
11523 mtimedb["resume"]["myopts"] = myopts.copy()
11525 # Convert Atom instances to plain str.
11526 mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
11528 pkglist = mydepgraph.altlist()
11529 mydepgraph.saveNomergeFavorites()
11530 mydepgraph.break_refs(pkglist)
11531 mergetask = Scheduler(settings, trees, mtimedb, myopts,
11532 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
11533 del mydepgraph, pkglist
11534 clear_caches(trees)
11536 retval = mergetask.merge()
11537 merge_count = mergetask.curval
11539 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
11540 if "yes" == settings.get("AUTOCLEAN"):
11541 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
11542 unmerge(trees[settings["ROOT"]]["root_config"],
11543 myopts, "clean", [],
11544 ldpath_mtimes, autoclean=1)
11546 portage.writemsg_stdout(colorize("WARN", "WARNING:")
11547 + " AUTOCLEAN is disabled. This can cause serious"
11548 + " problems due to overlapping packages.\n")
11549 trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
11553 def multiple_actions(action1, action2):
11554 sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
11555 sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
11558 def insert_optional_args(args):
11560 Parse optional arguments and insert a value if one has
11561 not been provided. This is done before feeding the args
11562 to the optparse parser since that parser does not support
11563 this feature natively.
11567 jobs_opts = ("-j", "--jobs")
11568 default_arg_opts = {
11569 '--deselect' : ('n',),
11570 '--root-deps' : ('rdeps',),
11572 arg_stack = args[:]
11573 arg_stack.reverse()
11575 arg = arg_stack.pop()
11577 default_arg_choices = default_arg_opts.get(arg)
11578 if default_arg_choices is not None:
11579 new_args.append(arg)
11580 if arg_stack and arg_stack[-1] in default_arg_choices:
11581 new_args.append(arg_stack.pop())
11583 # insert default argument
11584 new_args.append('True')
11587 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
11588 if not (short_job_opt or arg in jobs_opts):
11589 new_args.append(arg)
11592 # Insert an empty placeholder in order to
11593 # satisfy the requirements of optparse.
11595 new_args.append("--jobs")
11598 if short_job_opt and len(arg) > 2:
11599 if arg[:2] == "-j":
11601 job_count = int(arg[2:])
11603 saved_opts = arg[2:]
11606 saved_opts = arg[1:].replace("j", "")
11608 if job_count is None and arg_stack:
11610 job_count = int(arg_stack[-1])
11614 # Discard the job count from the stack
11615 # since we're consuming it here.
11618 if job_count is None:
11619 # unlimited number of jobs
11620 new_args.append("True")
11622 new_args.append(str(job_count))
11624 if saved_opts is not None:
11625 new_args.append("-" + saved_opts)
11629 def parse_opts(tmpcmdline, silent=False):
11634 global actions, options, shortmapping
11636 longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
11637 argument_options = {
11639 "help":"specify the location for portage configuration files",
11643 "help":"enable or disable color output",
11645 "choices":("y", "n")
11649 "help" : "remove atoms from the world file",
11651 "choices" : ("True", "n")
11656 "help" : "Specifies the number of packages to build " + \
11662 "--load-average": {
11664 "help" :"Specifies that no new builds should be started " + \
11665 "if there are other builds running and the load average " + \
11666 "is at least LOAD (a floating-point number).",
11672 "help":"include unnecessary build time dependencies",
11674 "choices":("y", "n")
11677 "help":"specify conditions to trigger package reinstallation",
11679 "choices":["changed-use"]
11682 "help" : "specify the target root filesystem for merging packages",
11687 "help" : "modify interpretation of depedencies",
11689 "choices" :("True", "rdeps")
11693 from optparse import OptionParser
11694 parser = OptionParser()
11695 if parser.has_option("--help"):
11696 parser.remove_option("--help")
11698 for action_opt in actions:
11699 parser.add_option("--" + action_opt, action="store_true",
11700 dest=action_opt.replace("-", "_"), default=False)
11701 for myopt in options:
11702 parser.add_option(myopt, action="store_true",
11703 dest=myopt.lstrip("--").replace("-", "_"), default=False)
11704 for shortopt, longopt in shortmapping.iteritems():
11705 parser.add_option("-" + shortopt, action="store_true",
11706 dest=longopt.lstrip("--").replace("-", "_"), default=False)
11707 for myalias, myopt in longopt_aliases.iteritems():
11708 parser.add_option(myalias, action="store_true",
11709 dest=myopt.lstrip("--").replace("-", "_"), default=False)
11711 for myopt, kwargs in argument_options.iteritems():
11712 parser.add_option(myopt,
11713 dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
11715 tmpcmdline = insert_optional_args(tmpcmdline)
11717 myoptions, myargs = parser.parse_args(args=tmpcmdline)
11719 if myoptions.deselect == "True":
11720 myoptions.deselect = True
11722 if myoptions.root_deps == "True":
11723 myoptions.root_deps = True
11727 if myoptions.jobs == "True":
11731 jobs = int(myoptions.jobs)
11735 if jobs is not True and \
11739 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
11740 (myoptions.jobs,), noiselevel=-1)
11742 myoptions.jobs = jobs
11744 if myoptions.load_average:
11746 load_average = float(myoptions.load_average)
11750 if load_average <= 0.0:
11751 load_average = None
11753 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
11754 (myoptions.load_average,), noiselevel=-1)
11756 myoptions.load_average = load_average
11758 for myopt in options:
11759 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
11761 myopts[myopt] = True
11763 for myopt in argument_options:
11764 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
11768 if myoptions.searchdesc:
11769 myoptions.search = True
11771 for action_opt in actions:
11772 v = getattr(myoptions, action_opt.replace("-", "_"))
11775 multiple_actions(myaction, action_opt)
11777 myaction = action_opt
11779 if myaction is None and myoptions.deselect is True:
11780 myaction = 'deselect'
11784 return myaction, myopts, myfiles
11786 def validate_ebuild_environment(trees):
11787 for myroot in trees:
11788 settings = trees[myroot]["vartree"].settings
11789 settings.validate()
11791 def clear_caches(trees):
11792 for d in trees.itervalues():
11793 d["porttree"].dbapi.melt()
11794 d["porttree"].dbapi._aux_cache.clear()
11795 d["bintree"].dbapi._aux_cache.clear()
11796 d["bintree"].dbapi._clear_cache()
11797 d["vartree"].dbapi.linkmap._clear_cache()
11798 portage.dircache.clear()
11801 def load_emerge_config(trees=None):
11803 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
11804 v = os.environ.get(envvar, None)
11805 if v and v.strip():
11807 trees = portage.create_trees(trees=trees, **kwargs)
11809 for root, root_trees in trees.iteritems():
11810 settings = root_trees["vartree"].settings
11811 setconfig = load_default_config(settings, root_trees)
11812 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
11814 settings = trees["/"]["vartree"].settings
11816 for myroot in trees:
11818 settings = trees[myroot]["vartree"].settings
11821 mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
11822 mtimedb = portage.MtimeDB(mtimedbfile)
11824 return settings, trees, mtimedb
11826 def adjust_config(myopts, settings):
11827 """Make emerge specific adjustments to the config."""
11829 # To enhance usability, make some vars case insensitive by forcing them to
11831 for myvar in ("AUTOCLEAN", "NOCOLOR"):
11832 if myvar in settings:
11833 settings[myvar] = settings[myvar].lower()
11834 settings.backup_changes(myvar)
11837 # Kill noauto as it will break merges otherwise.
11838 if "noauto" in settings.features:
11839 settings.features.remove('noauto')
11840 settings['FEATURES'] = ' '.join(sorted(settings.features))
11841 settings.backup_changes("FEATURES")
11845 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
11846 except ValueError, e:
11847 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
11848 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
11849 settings["CLEAN_DELAY"], noiselevel=-1)
11850 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
11851 settings.backup_changes("CLEAN_DELAY")
11853 EMERGE_WARNING_DELAY = 10
11855 EMERGE_WARNING_DELAY = int(settings.get(
11856 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
11857 except ValueError, e:
11858 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
11859 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
11860 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
11861 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
11862 settings.backup_changes("EMERGE_WARNING_DELAY")
11864 if "--quiet" in myopts:
11865 settings["PORTAGE_QUIET"]="1"
11866 settings.backup_changes("PORTAGE_QUIET")
11868 if "--verbose" in myopts:
11869 settings["PORTAGE_VERBOSE"] = "1"
11870 settings.backup_changes("PORTAGE_VERBOSE")
11872 # Set so that configs will be merged regardless of remembered status
11873 if ("--noconfmem" in myopts):
11874 settings["NOCONFMEM"]="1"
11875 settings.backup_changes("NOCONFMEM")
11877 # Set various debug markers... They should be merged somehow.
11880 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
11881 if PORTAGE_DEBUG not in (0, 1):
11882 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
11883 PORTAGE_DEBUG, noiselevel=-1)
11884 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
11887 except ValueError, e:
11888 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
11889 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
11890 settings["PORTAGE_DEBUG"], noiselevel=-1)
11892 if "--debug" in myopts:
11894 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
11895 settings.backup_changes("PORTAGE_DEBUG")
11897 if settings.get("NOCOLOR") not in ("yes","true"):
11898 portage.output.havecolor = 1
11900 """The explicit --color < y | n > option overrides the NOCOLOR environment
11901 variable and stdout auto-detection."""
11902 if "--color" in myopts:
11903 if "y" == myopts["--color"]:
11904 portage.output.havecolor = 1
11905 settings["NOCOLOR"] = "false"
11907 portage.output.havecolor = 0
11908 settings["NOCOLOR"] = "true"
11909 settings.backup_changes("NOCOLOR")
11910 elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
11911 portage.output.havecolor = 0
11912 settings["NOCOLOR"] = "true"
11913 settings.backup_changes("NOCOLOR")
11915 def apply_priorities(settings):
11919 def nice(settings):
11921 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
11922 except (OSError, ValueError), e:
11923 out = portage.output.EOutput()
11924 out.eerror("Failed to change nice value to '%s'" % \
11925 settings["PORTAGE_NICENESS"])
11926 out.eerror("%s\n" % str(e))
11928 def ionice(settings):
11930 ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
11932 ionice_cmd = shlex.split(ionice_cmd)
11936 from portage.util import varexpand
11937 variables = {"PID" : str(os.getpid())}
11938 cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
11941 rval = portage.process.spawn(cmd, env=os.environ)
11942 except portage.exception.CommandNotFound:
11943 # The OS kernel probably doesn't support ionice,
11944 # so return silently.
11947 if rval != os.EX_OK:
11948 out = portage.output.EOutput()
11949 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
11950 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
11952 def display_missing_pkg_set(root_config, set_name):
11955 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
11956 "The following sets exist:") % \
11957 colorize("INFORM", set_name))
11960 for s in sorted(root_config.sets):
11961 msg.append(" %s" % s)
11964 writemsg_level("".join("%s\n" % l for l in msg),
11965 level=logging.ERROR, noiselevel=-1)
11967 def expand_set_arguments(myfiles, myaction, root_config):
11969 setconfig = root_config.setconfig
11971 sets = setconfig.getSets()
11973 # In order to know exactly which atoms/sets should be added to the
11974 # world file, the depgraph performs set expansion later. It will get
11975 # confused about where the atoms came from if it's not allowed to
11976 # expand them itself.
11977 do_not_expand = (None, )
11980 if a in ("system", "world"):
11981 newargs.append(SETPREFIX+a)
11988 # separators for set arguments
11992 # WARNING: all operators must be of equal length
11994 DIFF_OPERATOR = "-@"
11995 UNION_OPERATOR = "+@"
11997 for i in range(0, len(myfiles)):
11998 if myfiles[i].startswith(SETPREFIX):
12001 x = myfiles[i][len(SETPREFIX):]
12004 start = x.find(ARG_START)
12005 end = x.find(ARG_END)
12006 if start > 0 and start < end:
12007 namepart = x[:start]
12008 argpart = x[start+1:end]
12010 # TODO: implement proper quoting
12011 args = argpart.split(",")
12015 k, v = a.split("=", 1)
12018 options[a] = "True"
12019 setconfig.update(namepart, options)
12020 newset += (x[:start-len(namepart)]+namepart)
12021 x = x[end+len(ARG_END):]
12025 myfiles[i] = SETPREFIX+newset
12027 sets = setconfig.getSets()
12029 # display errors that occured while loading the SetConfig instance
12030 for e in setconfig.errors:
12031 print colorize("BAD", "Error during set creation: %s" % e)
12033 # emerge relies on the existance of sets with names "world" and "system"
12034 required_sets = ("world", "system")
12037 for s in required_sets:
12039 missing_sets.append(s)
12041 if len(missing_sets) > 2:
12042 missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
12043 missing_sets_str += ', and "%s"' % missing_sets[-1]
12044 elif len(missing_sets) == 2:
12045 missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
12047 missing_sets_str = '"%s"' % missing_sets[-1]
12048 msg = ["emerge: incomplete set configuration, " + \
12049 "missing set(s): %s" % missing_sets_str]
12051 msg.append(" sets defined: %s" % ", ".join(sets))
12052 msg.append(" This usually means that '%s'" % \
12053 (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
12054 msg.append(" is missing or corrupt.")
12056 writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
12058 unmerge_actions = ("unmerge", "prune", "clean", "depclean")
12061 if a.startswith(SETPREFIX):
12062 # support simple set operations (intersection, difference and union)
12063 # on the commandline. Expressions are evaluated strictly left-to-right
12064 if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
12065 expression = a[len(SETPREFIX):]
12068 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
12069 is_pos = expression.rfind(IS_OPERATOR)
12070 diff_pos = expression.rfind(DIFF_OPERATOR)
12071 union_pos = expression.rfind(UNION_OPERATOR)
12072 op_pos = max(is_pos, diff_pos, union_pos)
12073 s1 = expression[:op_pos]
12074 s2 = expression[op_pos+len(IS_OPERATOR):]
12075 op = expression[op_pos:op_pos+len(IS_OPERATOR)]
12077 display_missing_pkg_set(root_config, s2)
12079 expr_sets.insert(0, s2)
12080 expr_ops.insert(0, op)
12082 if not expression in sets:
12083 display_missing_pkg_set(root_config, expression)
12085 expr_sets.insert(0, expression)
12086 result = set(setconfig.getSetAtoms(expression))
12087 for i in range(0, len(expr_ops)):
12088 s2 = setconfig.getSetAtoms(expr_sets[i+1])
12089 if expr_ops[i] == IS_OPERATOR:
12090 result.intersection_update(s2)
12091 elif expr_ops[i] == DIFF_OPERATOR:
12092 result.difference_update(s2)
12093 elif expr_ops[i] == UNION_OPERATOR:
12096 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
12097 newargs.extend(result)
12099 s = a[len(SETPREFIX):]
12101 display_missing_pkg_set(root_config, s)
12103 setconfig.active.append(s)
12105 set_atoms = setconfig.getSetAtoms(s)
12106 except portage.exception.PackageSetNotFound, e:
12107 writemsg_level(("emerge: the given set '%s' " + \
12108 "contains a non-existent set named '%s'.\n") % \
12109 (s, e), level=logging.ERROR, noiselevel=-1)
12111 if myaction in unmerge_actions and \
12112 not sets[s].supportsOperation("unmerge"):
12113 sys.stderr.write("emerge: the given set '%s' does " % s + \
12114 "not support unmerge operations\n")
12116 elif not set_atoms:
12117 print "emerge: '%s' is an empty set" % s
12118 elif myaction not in do_not_expand:
12119 newargs.extend(set_atoms)
12121 newargs.append(SETPREFIX+s)
12122 for e in sets[s].errors:
12126 return (newargs, retval)
12128 def repo_name_check(trees):
12129 missing_repo_names = set()
12130 for root, root_trees in trees.iteritems():
12131 if "porttree" in root_trees:
12132 portdb = root_trees["porttree"].dbapi
12133 missing_repo_names.update(portdb.porttrees)
12134 repos = portdb.getRepositories()
12136 missing_repo_names.discard(portdb.getRepositoryPath(r))
12137 if portdb.porttree_root in missing_repo_names and \
12138 not os.path.exists(os.path.join(
12139 portdb.porttree_root, "profiles")):
12140 # This is normal if $PORTDIR happens to be empty,
12141 # so don't warn about it.
12142 missing_repo_names.remove(portdb.porttree_root)
12144 if missing_repo_names:
12146 msg.append("WARNING: One or more repositories " + \
12147 "have missing repo_name entries:")
12149 for p in missing_repo_names:
12150 msg.append("\t%s/profiles/repo_name" % (p,))
12152 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
12153 "should be a plain text file containing a unique " + \
12154 "name for the repository on the first line.", 70))
12155 writemsg_level("".join("%s\n" % l for l in msg),
12156 level=logging.WARNING, noiselevel=-1)
12158 return bool(missing_repo_names)
12160 def repo_name_duplicate_check(trees):
12162 for root, root_trees in trees.iteritems():
12163 if 'porttree' in root_trees:
12164 portdb = root_trees['porttree'].dbapi
12165 if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
12166 for repo_name, paths in portdb._ignored_repos:
12167 k = (root, repo_name, portdb.getRepositoryPath(repo_name))
12168 ignored_repos.setdefault(k, []).extend(paths)
12172 msg.append('WARNING: One or more repositories ' + \
12173 'have been ignored due to duplicate')
12174 msg.append(' profiles/repo_name entries:')
12176 for k in sorted(ignored_repos):
12177 msg.append(' %s overrides' % (k,))
12178 for path in ignored_repos[k]:
12179 msg.append(' %s' % (path,))
12181 msg.extend(' ' + x for x in textwrap.wrap(
12182 "All profiles/repo_name entries must be unique in order " + \
12183 "to avoid having duplicates ignored. " + \
12184 "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
12185 "/etc/make.conf if you would like to disable this warning."))
12186 writemsg_level(''.join('%s\n' % l for l in msg),
12187 level=logging.WARNING, noiselevel=-1)
12189 return bool(ignored_repos)
12191 def config_protect_check(trees):
12192 for root, root_trees in trees.iteritems():
12193 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
12194 msg = "!!! CONFIG_PROTECT is empty"
12196 msg += " for '%s'" % root
12197 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
12199 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
12201 if "--quiet" in myopts:
12202 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
12203 print "!!! one of the following fully-qualified ebuild names instead:\n"
12204 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
12205 print " " + colorize("INFORM", cp)
12208 s = search(root_config, spinner, "--searchdesc" in myopts,
12209 "--quiet" not in myopts, "--usepkg" in myopts,
12210 "--usepkgonly" in myopts)
12211 null_cp = portage.dep_getkey(insert_category_into_atom(
12213 cat, atom_pn = portage.catsplit(null_cp)
12214 s.searchkey = atom_pn
12215 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
12218 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
12219 print "!!! one of the above fully-qualified ebuild names instead.\n"
12221 def profile_check(trees, myaction, myopts):
12222 if myaction in ("info", "sync"):
12224 elif "--version" in myopts or "--help" in myopts:
12226 for root, root_trees in trees.iteritems():
12227 if root_trees["root_config"].settings.profiles:
12229 # generate some profile related warning messages
12230 validate_ebuild_environment(trees)
12231 msg = "If you have just changed your profile configuration, you " + \
12232 "should revert back to the previous configuration. Due to " + \
12233 "your current profile being invalid, allowed actions are " + \
12234 "limited to --help, --info, --sync, and --version."
12235 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
12236 level=logging.ERROR, noiselevel=-1)
12241 global portage # NFC why this is necessary now - genone
12242 portage._disable_legacy_globals()
12243 # Disable color until we're sure that it should be enabled (after
12244 # EMERGE_DEFAULT_OPTS has been parsed).
12245 portage.output.havecolor = 0
12246 # This first pass is just for options that need to be known as early as
12247 # possible, such as --config-root. They will be parsed again later,
12248 # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
12249 # the value of --config-root).
12250 myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
12251 if "--debug" in myopts:
12252 os.environ["PORTAGE_DEBUG"] = "1"
12253 if "--config-root" in myopts:
12254 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
12255 if "--root" in myopts:
12256 os.environ["ROOT"] = myopts["--root"]
12258 # Portage needs to ensure a sane umask for the files it creates.
12260 settings, trees, mtimedb = load_emerge_config()
12261 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12262 rval = profile_check(trees, myaction, myopts)
12263 if rval != os.EX_OK:
12266 if portage._global_updates(trees, mtimedb["updates"]):
12268 # Reload the whole config from scratch.
12269 settings, trees, mtimedb = load_emerge_config(trees=trees)
12270 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12272 xterm_titles = "notitles" not in settings.features
12275 if "--ignore-default-opts" not in myopts:
12276 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
12277 tmpcmdline.extend(sys.argv[1:])
12278 myaction, myopts, myfiles = parse_opts(tmpcmdline)
12280 if "--digest" in myopts:
12281 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
12282 # Reload the whole config from scratch so that the portdbapi internal
12283 # config is updated with new FEATURES.
12284 settings, trees, mtimedb = load_emerge_config(trees=trees)
12285 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12287 for myroot in trees:
12288 mysettings = trees[myroot]["vartree"].settings
12289 mysettings.unlock()
12290 adjust_config(myopts, mysettings)
12291 if '--pretend' not in myopts and myaction in \
12292 (None, 'clean', 'depclean', 'prune', 'unmerge'):
12293 mysettings["PORTAGE_COUNTER_HASH"] = \
12294 trees[myroot]["vartree"].dbapi._counter_hash()
12295 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
12297 del myroot, mysettings
12299 apply_priorities(settings)
12301 spinner = stdout_spinner()
12302 if "candy" in settings.features:
12303 spinner.update = spinner.update_scroll
12305 if "--quiet" not in myopts:
12306 portage.deprecated_profile_check(settings=settings)
12307 repo_name_check(trees)
12308 repo_name_duplicate_check(trees)
12309 config_protect_check(trees)
12311 for mytrees in trees.itervalues():
12312 mydb = mytrees["porttree"].dbapi
12313 # Freeze the portdbapi for performance (memoize all xmatch results).
12317 if "moo" in myfiles:
12320 Larry loves Gentoo (""" + platform.system() + """)
12322 _______________________
12323 < Have you mooed today? >
12324 -----------------------
12334 ext = os.path.splitext(x)[1]
12335 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
12336 print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
12339 root_config = trees[settings["ROOT"]]["root_config"]
12340 if myaction == "list-sets":
12341 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
12345 # only expand sets for actions taking package arguments
12346 oldargs = myfiles[:]
12347 if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
12348 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
12349 if retval != os.EX_OK:
12352 # Need to handle empty sets specially, otherwise emerge will react
12353 # with the help message for empty argument lists
12354 if oldargs and not myfiles:
12355 print "emerge: no targets left after set expansion"
12358 if ("--tree" in myopts) and ("--columns" in myopts):
12359 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
12362 if ("--quiet" in myopts):
12363 spinner.update = spinner.update_quiet
12364 portage.util.noiselimit = -1
12366 # Always create packages if FEATURES=buildpkg
12367 # Imply --buildpkg if --buildpkgonly
12368 if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
12369 if "--buildpkg" not in myopts:
12370 myopts["--buildpkg"] = True
12372 # Always try and fetch binary packages if FEATURES=getbinpkg
12373 if ("getbinpkg" in settings.features):
12374 myopts["--getbinpkg"] = True
12376 if "--buildpkgonly" in myopts:
12377 # --buildpkgonly will not merge anything, so
12378 # it cancels all binary package options.
12379 for opt in ("--getbinpkg", "--getbinpkgonly",
12380 "--usepkg", "--usepkgonly"):
12381 myopts.pop(opt, None)
12383 if "--fetch-all-uri" in myopts:
12384 myopts["--fetchonly"] = True
12386 if "--skipfirst" in myopts and "--resume" not in myopts:
12387 myopts["--resume"] = True
12389 if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
12390 myopts["--usepkgonly"] = True
12392 if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
12393 myopts["--getbinpkg"] = True
12395 if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
12396 myopts["--usepkg"] = True
12398 # Also allow -K to apply --usepkg/-k
12399 if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
12400 myopts["--usepkg"] = True
12402 # Allow -p to remove --ask
12403 if "--pretend" in myopts:
12404 myopts.pop("--ask", None)
12406 # forbid --ask when not in a terminal
12407 # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
12408 if ("--ask" in myopts) and (not sys.stdin.isatty()):
12409 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
12413 if settings.get("PORTAGE_DEBUG", "") == "1":
12414 spinner.update = spinner.update_quiet
12416 if "python-trace" in settings.features:
12417 import portage.debug
12418 portage.debug.set_trace(True)
12420 if not ("--quiet" in myopts):
12421 if not sys.stdout.isatty() or ("--nospinner" in myopts):
12422 spinner.update = spinner.update_basic
12424 if myaction == 'version':
12425 print getportageversion(settings["PORTDIR"], settings["ROOT"],
12426 settings.profile_path, settings["CHOST"],
12427 trees[settings["ROOT"]]["vartree"].dbapi)
12429 elif "--help" in myopts:
12430 _emerge.help.help(myaction, myopts, portage.output.havecolor)
12433 if "--debug" in myopts:
12434 print "myaction", myaction
12435 print "myopts", myopts
12437 if not myaction and not myfiles and "--resume" not in myopts:
12438 _emerge.help.help(myaction, myopts, portage.output.havecolor)
12441 pretend = "--pretend" in myopts
12442 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
12443 buildpkgonly = "--buildpkgonly" in myopts
12445 # check if root user is the current user for the actions where emerge needs this
12446 if portage.secpass < 2:
12447 # We've already allowed "--version" and "--help" above.
12448 if "--pretend" not in myopts and myaction not in ("search","info"):
12449 need_superuser = myaction in ('clean', 'depclean', 'deselect',
12450 'prune', 'unmerge') or not \
12452 (buildpkgonly and secpass >= 1) or \
12453 myaction in ("metadata", "regen") or \
12454 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
12455 if portage.secpass < 1 or \
12458 access_desc = "superuser"
12460 access_desc = "portage group"
12461 # Always show portage_group_warning() when only portage group
12462 # access is required but the user is not in the portage group.
12463 from portage.data import portage_group_warning
12464 if "--ask" in myopts:
12465 myopts["--pretend"] = True
12466 del myopts["--ask"]
12467 print ("%s access is required... " + \
12468 "adding --pretend to options\n") % access_desc
12469 if portage.secpass < 1 and not need_superuser:
12470 portage_group_warning()
12472 sys.stderr.write(("emerge: %s access is required\n") \
12474 if portage.secpass < 1 and not need_superuser:
12475 portage_group_warning()
12478 disable_emergelog = False
12479 for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
12481 disable_emergelog = True
12483 if myaction in ("search", "info"):
12484 disable_emergelog = True
12485 if disable_emergelog:
12486 """ Disable emergelog for everything except build or unmerge
12487 operations. This helps minimize parallel emerge.log entries that can
12488 confuse log parsers. We especially want it disabled during
12489 parallel-fetch, which uses --resume --fetchonly."""
12491 def emergelog(*pargs, **kargs):
12495 if 'EMERGE_LOG_DIR' in settings:
12497 # At least the parent needs to exist for the lock file.
12498 portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
12499 except portage.exception.PortageException, e:
12500 writemsg_level("!!! Error creating directory for " + \
12501 "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
12502 (settings['EMERGE_LOG_DIR'], e),
12503 noiselevel=-1, level=logging.ERROR)
12505 global _emerge_log_dir
12506 _emerge_log_dir = settings['EMERGE_LOG_DIR']
12508 if not "--pretend" in myopts:
12509 emergelog(xterm_titles, "Started emerge on: "+\
12510 time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
12513 myelogstr=" ".join(myopts)
12515 myelogstr+=" "+myaction
12517 myelogstr += " " + " ".join(oldargs)
12518 emergelog(xterm_titles, " *** emerge " + myelogstr)
12521 def emergeexitsig(signum, frame):
12522 signal.signal(signal.SIGINT, signal.SIG_IGN)
12523 signal.signal(signal.SIGTERM, signal.SIG_IGN)
12524 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
12525 sys.exit(100+signum)
12526 signal.signal(signal.SIGINT, emergeexitsig)
12527 signal.signal(signal.SIGTERM, emergeexitsig)
12530 """This gets out final log message in before we quit."""
12531 if "--pretend" not in myopts:
12532 emergelog(xterm_titles, " *** terminating.")
12533 if "notitles" not in settings.features:
12535 portage.atexit_register(emergeexit)
12537 if myaction in ("config", "metadata", "regen", "sync"):
12538 if "--pretend" in myopts:
12539 sys.stderr.write(("emerge: The '%s' action does " + \
12540 "not support '--pretend'.\n") % myaction)
12543 if "sync" == myaction:
12544 return action_sync(settings, trees, mtimedb, myopts, myaction)
12545 elif "metadata" == myaction:
12546 action_metadata(settings, portdb, myopts)
12547 elif myaction=="regen":
12548 validate_ebuild_environment(trees)
12549 return action_regen(settings, portdb, myopts.get("--jobs"),
12550 myopts.get("--load-average"))
12552 elif "config"==myaction:
12553 validate_ebuild_environment(trees)
12554 action_config(settings, trees, myopts, myfiles)
12557 elif "search"==myaction:
12558 validate_ebuild_environment(trees)
12559 action_search(trees[settings["ROOT"]]["root_config"],
12560 myopts, myfiles, spinner)
12562 elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
12563 validate_ebuild_environment(trees)
12564 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
12565 myopts, myaction, myfiles, spinner)
12566 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
12567 post_emerge(root_config, myopts, mtimedb, rval)
12570 elif myaction == 'info':
12572 # Ensure atoms are valid before calling unmerge().
12573 vardb = trees[settings["ROOT"]]["vartree"].dbapi
12576 if is_valid_package_atom(x):
12578 valid_atoms.append(
12579 portage.dep_expand(x, mydb=vardb, settings=settings))
12580 except portage.exception.AmbiguousPackageName, e:
12581 msg = "The short ebuild name \"" + x + \
12582 "\" is ambiguous. Please specify " + \
12583 "one of the following " + \
12584 "fully-qualified ebuild names instead:"
12585 for line in textwrap.wrap(msg, 70):
12586 writemsg_level("!!! %s\n" % (line,),
12587 level=logging.ERROR, noiselevel=-1)
12589 writemsg_level(" %s\n" % colorize("INFORM", i),
12590 level=logging.ERROR, noiselevel=-1)
12591 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12595 msg.append("'%s' is not a valid package atom." % (x,))
12596 msg.append("Please check ebuild(5) for full details.")
12597 writemsg_level("".join("!!! %s\n" % line for line in msg),
12598 level=logging.ERROR, noiselevel=-1)
12601 return action_info(settings, trees, myopts, valid_atoms)
12603 # "update", "system", or just process files:
12605 validate_ebuild_environment(trees)
12608 if x.startswith(SETPREFIX) or \
12609 is_valid_package_atom(x):
12611 if x[:1] == os.sep:
12619 msg.append("'%s' is not a valid package atom." % (x,))
12620 msg.append("Please check ebuild(5) for full details.")
12621 writemsg_level("".join("!!! %s\n" % line for line in msg),
12622 level=logging.ERROR, noiselevel=-1)
12625 if "--pretend" not in myopts:
12626 display_news_notification(root_config, myopts)
12627 retval = action_build(settings, trees, mtimedb,
12628 myopts, myaction, myfiles, spinner)
12629 root_config = trees[settings["ROOT"]]["root_config"]
12630 post_emerge(root_config, myopts, mtimedb, retval)